Skip to content

Commit c462174

Browse files
committed
Store signatures in c/i/docker/daemon/signatures
Whether or not we are verifying signatures, download them and store them in docker/daemon/signatures. Note that this means that containers/image/docker is now involved on _every_ pull; failures of the c/i/docker client, or inability to download (possibly incorrectly configured but unused) signatures are now fatal. Alternatively, we could make the storing of signatures to c/i/d/d/s silently fail in such cases. WARNING: This DOES NOT BUILD because it references sirupsen/logrus, not Sirupsen/logrus.
1 parent 86d4ca2 commit c462174

14 files changed

Lines changed: 472 additions & 18 deletions

File tree

distribution/pull_v2.go

Lines changed: 18 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ import (
1414
"github.com/Sirupsen/logrus"
1515
cimagedocker "github.com/containers/image/docker"
1616
"github.com/containers/image/signature"
17+
"github.com/containers/image/types"
1718
"github.com/docker/distribution"
1819
"github.com/docker/distribution/digest"
1920
"github.com/docker/distribution/manifest/manifestlist"
@@ -99,8 +100,12 @@ func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (e
99100
var layersDownloaded bool
100101
if !reference.IsNameOnly(ref) {
101102
var err error
103+
ciImage, err := p.ciImage(ctx, ref)
104+
if err != nil {
105+
return err
106+
}
102107
if p.config.SignatureCheck {
103-
ref, err = p.checkTrusted(ctx, ref)
108+
ref, err = p.checkTrusted(ref, ciImage)
104109
if err != nil {
105110
if err == cimagedocker.ErrV1NotSupported {
106111
return fmt.Errorf("unable to pull from V1 Docker registries with image signature verification enabled. If you need to accept this risk and disable signature verification (for ALL images), run the docker daemon with --signature-enabled=false")
@@ -109,7 +114,7 @@ func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (e
109114
return err
110115
}
111116
}
112-
layersDownloaded, err = p.pullV2Tag(ctx, ref)
117+
layersDownloaded, err = p.pullV2Tag(ctx, ref, ciImage)
113118
if err != nil {
114119
return err
115120
}
@@ -135,8 +140,12 @@ func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (e
135140
}
136141
var ref reference.Named
137142
ref = tagRef
143+
ciImage, err := p.ciImage(ctx, ref)
144+
if err != nil {
145+
return err
146+
}
138147
if p.config.SignatureCheck {
139-
trustedRef, err := p.checkTrusted(ctx, tagRef)
148+
trustedRef, err := p.checkTrusted(tagRef, ciImage)
140149
if err != nil {
141150
p.originalRef = nil
142151
if err == cimagedocker.ErrV1NotSupported {
@@ -146,7 +155,7 @@ func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (e
146155
}
147156
ref = trustedRef
148157
}
149-
pulledNew, err := p.pullV2Tag(ctx, ref)
158+
pulledNew, err := p.pullV2Tag(ctx, ref, ciImage)
150159
if err != nil {
151160
// Since this is the pull-all-tags case, don't
152161
// allow an error pulling a particular tag to
@@ -366,7 +375,7 @@ func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) {
366375
ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.FullName()})
367376
}
368377

369-
func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) {
378+
func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named, ciImage types.Image) (tagUpdated bool, err error) {
370379
manSvc, err := p.repo.Manifests(ctx)
371380
if err != nil {
372381
return false, err
@@ -443,6 +452,10 @@ func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdat
443452

444453
progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String())
445454

455+
if err = p.storeSignatures(ctx, ciImage); err != nil {
456+
return false, err
457+
}
458+
446459
oldTagImageID, err := p.config.ReferenceStore.Get(ref)
447460
if err == nil {
448461
if oldTagImageID == imageID {

distribution/pull_v2_unix.go

Lines changed: 16 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ import (
77
"path/filepath"
88

99
"github.com/containers/image/docker"
10+
"github.com/containers/image/docker/daemon/signatures"
1011
containersImageRef "github.com/containers/image/docker/reference"
1112
"github.com/containers/image/manifest"
1213
"github.com/containers/image/signature"
@@ -43,8 +44,8 @@ func configurePolicyContext() (*signature.PolicyContext, error) {
4344
return pc, nil
4445
}
4546

46-
func (p *v2Puller) checkTrusted(c gctx.Context, ref reference.Named) (reference.Named, error) {
47-
p.originalRef = ref
47+
// ciImage returns a containers/image/types.Image for ref.
48+
func (p *v2Puller) ciImage(c gctx.Context, ref reference.Named) (types.Image, error) {
4849
// we can't use upstream docker/docker/reference since in projectatomic/docker
4950
// we modified docker/docker/reference and it's not doing any normalization.
5051
// we instead forked docker/docker/reference in containers/image and we need
@@ -76,7 +77,12 @@ func (p *v2Puller) checkTrusted(c gctx.Context, ref reference.Named) (reference.
7677
if err != nil {
7778
return nil, err
7879
}
79-
allowed, err := p.policyContext.IsRunningImageAllowed(img)
80+
return img, nil
81+
}
82+
83+
func (p *v2Puller) checkTrusted(ref reference.Named, ciImage types.Image) (reference.Named, error) {
84+
p.originalRef = ref
85+
allowed, err := p.policyContext.IsRunningImageAllowed(ciImage)
8086
if !allowed {
8187
if err != nil {
8288
return nil, fmt.Errorf("%s isn't allowed: %v", ref.String(), err)
@@ -86,7 +92,7 @@ func (p *v2Puller) checkTrusted(c gctx.Context, ref reference.Named) (reference.
8692
if err != nil {
8793
return nil, err
8894
}
89-
mfst, _, err := img.Manifest()
95+
mfst, _, err := ciImage.Manifest()
9096
if err != nil {
9197
return nil, err
9298
}
@@ -100,3 +106,9 @@ func (p *v2Puller) checkTrusted(c gctx.Context, ref reference.Named) (reference.
100106
}
101107
return ref, nil
102108
}
109+
110+
// storeSignature stores the signatures of ciImage and updates the tag in ciImage.Reference() if necessary.
111+
func (p *v2Puller) storeSignatures(c gctx.Context, ciImage types.Image) error {
112+
store := signatures.NewStore(nil)
113+
return store.RecordImage(c, ciImage)
114+
}

distribution/pull_v2_windows.go

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ import (
99
"os"
1010

1111
"github.com/containers/image/signature"
12+
"github.com/containers/image/types"
1213
"github.com/docker/distribution"
1314
"github.com/docker/distribution/context"
1415
"github.com/docker/distribution/manifest/schema1"
@@ -75,6 +76,14 @@ func configurePolicyContext() (*signature.PolicyContext, error) {
7576
return nil, nil
7677
}
7778

78-
func (p *v2Puller) checkTrusted(c gctx.Context, ref reference.Named) (reference.Named, error) {
79+
func (p *v2Puller) ciImage(c gctx.Context, ref reference.Named) (types.Image, error) {
80+
return nil, nil
81+
}
82+
83+
func (p *v2Puller) checkTrusted(ref reference.Named, ciImage types.Image) (reference.Named, error) {
7984
return ref, nil
8085
}
86+
87+
func (p *v2Puller) storeSignatures(c gctx.Context, ciImage types.Image) error {
88+
return nil
89+
}
Lines changed: 128 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,128 @@
1+
package signatures
2+
3+
// dataBucket stores the original manifests and signatures for an image.
4+
5+
// dataBucket keys are (config digest, manifest digest), and contains a sub-bucket for each key.
6+
// This sub-bucket stores a manifest in manifestKey, and individual signatures in
7+
// (signatureKeyPrefix + zero-based index.)
8+
9+
// Safety WRT concurrent access to data in a dataBucket sub-bucket:
10+
// The bucket is identified by the manifest digest, so no substantial updates to the
11+
// manifest are ever expected.
12+
// Readers expect signatures stored within a sub-bucket to be replaced/updated atomically
13+
// to ensure the bucket contents are consistent.
14+
15+
import (
16+
"bytes"
17+
"fmt"
18+
"strconv"
19+
20+
"github.com/boltdb/bolt"
21+
digest "github.com/opencontainers/go-digest"
22+
)
23+
24+
var (
25+
dataBucket = []byte("config+manifest->bucket")
26+
manifestKey = []byte("manifest")
27+
signatureKeyPrefix = []byte("sig")
28+
)
29+
30+
// dataBucketKey returns a key for use in dataBucket.
31+
func dataBucketKey(configDigest, manifestDigest digest.Digest) ([]byte, error) {
32+
configBytes, err := stringToNonNULBytes(configDigest.String())
33+
if err != nil {
34+
return nil, err
35+
}
36+
manifestBytes, err := stringToNonNULBytes(manifestDigest.String())
37+
if err != nil {
38+
return nil, err
39+
}
40+
return bytes.Join([][]byte{configBytes, manifestBytes}, []byte{0}), nil
41+
}
42+
43+
// copyBytes returns a freshly allocated clone of input.
44+
// This is needed because the data pointers returned by boltdb are invalid after the end of the transaction.
45+
func copyBytes(input []byte) []byte {
46+
res := make([]byte, len(input))
47+
copy(res, input)
48+
return res
49+
}
50+
51+
// readManifest returns the original manifest stored in b, or nil if not available.
52+
func readManifest(b *bolt.Bucket) []byte {
53+
m := b.Get(manifestKey)
54+
if m == nil {
55+
return nil
56+
}
57+
return copyBytes(m)
58+
}
59+
60+
// writeManifest stores the original manifest to b.
61+
func writeManifest(b *bolt.Bucket, manifest []byte) error {
62+
return b.Put(manifestKey, manifest)
63+
}
64+
65+
// readSignatures returns the original signatures stored in bucket, which is dataKey.
66+
func readSignatures(bucket *bolt.Bucket, dataKey []byte) ([][]byte, error) {
67+
// Iterate through all keys in the sub-bucket; we need all of the except for manifestKey, so it seems fastest to read them in the database order and then reorder in memory.
68+
signatureMap := map[int][]byte{}
69+
if err := bucket.ForEach(func(k, v []byte) error {
70+
if !bytes.HasPrefix(k, signatureKeyPrefix) {
71+
return nil
72+
}
73+
i, err := strconv.Atoi(string(bytes.TrimPrefix(k, signatureKeyPrefix)))
74+
if err != nil {
75+
return err
76+
}
77+
if _, ok := signatureMap[i]; ok {
78+
return fmt.Errorf("Internal error: Duplicate key %q in dataBucket key %q", k, dataKey)
79+
}
80+
signatureMap[i] = copyBytes(v)
81+
return nil
82+
}); err != nil {
83+
return nil, err
84+
}
85+
86+
signatures := [][]byte{}
87+
for i := 0; ; i++ {
88+
signature, ok := signatureMap[i]
89+
if !ok {
90+
break
91+
}
92+
signatures = append(signatures, signature)
93+
}
94+
if len(signatures) != len(signatureMap) {
95+
// The use of transactions to update signatures should prevent this from happening
96+
return nil, fmt.Errorf("Internal error: Non-consecutive signatures in dataBucket key %q", dataKey)
97+
}
98+
return signatures, nil
99+
}
100+
101+
// writeSignatures stores the original signatures to b, which is dataKey.
102+
func writeSignatures(b *bolt.Bucket, dataKey []byte, signatures [][]byte) error {
103+
if len(signatures) == 0 {
104+
return nil // Don't bother reading the old ones.
105+
}
106+
107+
existingSigs, err := readSignatures(b, dataKey)
108+
if err != nil {
109+
return err
110+
}
111+
112+
nextIndex := len(existingSigs)
113+
sigExists:
114+
for _, sig := range signatures {
115+
for _, existingSig := range existingSigs {
116+
if bytes.Equal(sig, existingSig) {
117+
continue sigExists
118+
}
119+
}
120+
121+
key := bytes.Join([][]byte{signatureKeyPrefix, []byte(strconv.Itoa(nextIndex))}, []byte{})
122+
if err := b.Put(key, sig); err != nil {
123+
return err
124+
}
125+
nextIndex++
126+
}
127+
return nil
128+
}

0 commit comments

Comments
 (0)