diff --git a/cli/command/image/trust.go b/cli/command/image/trust.go index 8d94164e5acf..ddbb8580d81c 100644 --- a/cli/command/image/trust.go +++ b/cli/command/image/trust.go @@ -158,7 +158,7 @@ func PushTrustedReference(streams command.Streams, repoInfo *registry.Repository // (based on whether we have the signing key and whether the role's path allows // us to). // If there are no delegation roles, we add to the targets role. -func addTargetToAllSignableRoles(repo *client.NotaryRepository, target *client.Target) error { +func addTargetToAllSignableRoles(repo *client.Repository, target *client.Target) error { var signableRoles []string // translate the full key names, which includes the GUN, into just the key IDs @@ -183,7 +183,7 @@ func addTargetToAllSignableRoles(repo *client.NotaryRepository, target *client.T // We do not support signing any delegation role that isn't a direct child of the targets role. // Also don't bother checking the keys if we can't add the target // to this role due to path restrictions - if path.Dir(delegationRole.Name) != data.CanonicalTargetsRole || !delegationRole.CheckPaths(target.Name) { + if data.RoleName(path.Dir(delegationRole.Name)) != data.CanonicalTargetsRole || !delegationRole.CheckPaths(target.Name) { continue } @@ -228,7 +228,7 @@ func trustedPull(ctx context.Context, cli command.Cli, repoInfo *registry.Reposi if tagged, isTagged := ref.(reference.NamedTagged); !isTagged { // List all targets - targets, err := notaryRepo.ListTargets(trust.ReleasesRole, data.CanonicalTargetsRole) + targets, err := notaryRepo.ListTargets(data.RoleName(trust.ReleasesRole), data.CanonicalTargetsRole) if err != nil { return trust.NotaryError(ref.Name(), err) } @@ -240,7 +240,7 @@ func trustedPull(ctx context.Context, cli command.Cli, repoInfo *registry.Reposi } // Only list tags in the top level targets role or the releases delegation role - ignore // all other delegation roles - if tgt.Role != trust.ReleasesRole && tgt.Role != data.CanonicalTargetsRole { + if tgt.Role != data.RoleName(trust.ReleasesRole) && data.RoleName(tgt.Role) != data.CanonicalTargetsRole { continue } refs = append(refs, t) @@ -249,13 +249,13 @@ func trustedPull(ctx context.Context, cli command.Cli, repoInfo *registry.Reposi return trust.NotaryError(ref.Name(), errors.Errorf("No trusted tags for %s", ref.Name())) } } else { - t, err := notaryRepo.GetTargetByName(tagged.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole) + t, err := notaryRepo.GetTargetByName(tagged.Tag(), data.RoleName(trust.ReleasesRole), data.CanonicalTargetsRole) if err != nil { return trust.NotaryError(ref.Name(), err) } // Only get the tag if it's in the top level targets role or the releases delegation role // ignore it if it's in any other delegation roles - if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole { + if t.Role != data.RoleName(trust.ReleasesRole) && t.Role != data.CanonicalTargetsRole { return trust.NotaryError(ref.Name(), errors.Errorf("No trust data for %s", tagged.Tag())) } @@ -341,13 +341,13 @@ func TrustedReference(ctx context.Context, cli command.Cli, ref reference.NamedT return nil, err } - t, err := notaryRepo.GetTargetByName(ref.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole) + t, err := notaryRepo.GetTargetByName(ref.Tag(), data.RoleName(trust.ReleasesRole), data.CanonicalTargetsRole) if err != nil { return nil, trust.NotaryError(repoInfo.Name.Name(), err) } // Only list tags in the top level targets role or the releases delegation role - ignore // all other delegation roles - if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole { + if t.Role != data.RoleName(trust.ReleasesRole) && t.Role != data.CanonicalTargetsRole { return nil, trust.NotaryError(repoInfo.Name.Name(), errors.Errorf("No trust data for %s", ref.Tag())) } r, err := convertTarget(t.Target) diff --git a/cli/command/service/trust.go b/cli/command/service/trust.go index e1be1e2030d5..f3874291abee 100644 --- a/cli/command/service/trust.go +++ b/cli/command/service/trust.go @@ -9,7 +9,6 @@ import ( "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/registry" "github.com/docker/notary/tuf/data" - "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/net/context" @@ -64,13 +63,13 @@ func trustedResolveDigest(ctx context.Context, cli command.Cli, ref reference.Na return nil, errors.Wrap(err, "error establishing connection to trust repository") } - t, err := notaryRepo.GetTargetByName(ref.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole) + t, err := notaryRepo.GetTargetByName(ref.Tag(), data.RoleName(trust.ReleasesRole), data.CanonicalTargetsRole) if err != nil { return nil, trust.NotaryError(repoInfo.Name.Name(), err) } // Only get the tag if it's in the top level targets role or the releases delegation role // ignore it if it's in any other delegation roles - if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole { + if t.Role != data.RoleName(trust.ReleasesRole) && data.RoleName(t.Role) != data.CanonicalTargetsRole { return nil, trust.NotaryError(repoInfo.Name.Name(), errors.Errorf("No trust data for %s", reference.FamiliarString(ref))) } diff --git a/cli/trust/trust.go b/cli/trust/trust.go index 13192fe11238..ae5b268529d8 100644 --- a/cli/trust/trust.go +++ b/cli/trust/trust.go @@ -33,7 +33,7 @@ import ( var ( // ReleasesRole is the role named "releases" - ReleasesRole = path.Join(data.CanonicalTargetsRole, "releases") + ReleasesRole = path.Join(data.CanonicalTargetsRole.String(), "releases") ) func trustDirectory() string { @@ -86,7 +86,7 @@ func (scs simpleCredentialStore) SetRefreshToken(*url.URL, string, string) { // GetNotaryRepository returns a NotaryRepository which stores all the // information needed to operate on a notary repository. // It creates an HTTP transport providing authentication support. -func GetNotaryRepository(streams command.Streams, repoInfo *registry.RepositoryInfo, authConfig types.AuthConfig, actions ...string) (*client.NotaryRepository, error) { +func GetNotaryRepository(streams command.Streams, repoInfo *registry.RepositoryInfo, authConfig types.AuthConfig, actions ...string) (client.Repository, error) { server, err := Server(repoInfo.Index) if err != nil { return nil, err @@ -164,9 +164,9 @@ func GetNotaryRepository(streams command.Streams, repoInfo *registry.RepositoryI modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) tr := transport.NewTransport(base, modifiers...) - return client.NewNotaryRepository( + return client.NewFileCachedRepository( trustDirectory(), - repoInfo.Name.Name(), + data.GUN(repoInfo.Name.Name()), server, tr, getPassphraseRetriever(streams), @@ -193,7 +193,7 @@ func getPassphraseRetriever(streams command.Streams) notary.PassRetriever { return v, numAttempts > 1, nil } // For non-root roles, we can also try the "default" alias if it is specified - if v := env["default"]; v != "" && alias != data.CanonicalRootRole { + if v := env["default"]; v != "" && alias != data.CanonicalRootRole.String() { return v, numAttempts > 1, nil } return baseRetriever(keyName, alias, createNew, numAttempts) diff --git a/vendor.conf b/vendor.conf index 4e4d16523873..01d47e4e621f 100755 --- a/vendor.conf +++ b/vendor.conf @@ -4,7 +4,7 @@ github.com/coreos/etcd 824277cb3a577a0e8c829ca9ec557b973fe06d20 github.com/cpuguy83/go-md2man a65d4d2de4d5f7c74868dfa9b202a3c8be315aaa github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 github.com/docker/distribution edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c -github.com/docker/docker 184cea5ff710abde25547749e5608b24a255ba09 +github.com/docker/docker 184cea5ff710abde25547749e5608b24a255ba09 github.com/docker/docker-credential-helpers v0.5.1 # the docker/go package contains a customized version of canonical/json @@ -14,7 +14,7 @@ github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9 github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1 github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a -github.com/docker/notary v0.4.2-sirupsen https://github.com/simonferquel/notary.git +github.com/docker/notary 87c591965429960d1a109d9aa95741a73b2e1c58 github.com/docker/swarmkit 0554c9bc9a485025e89b8e5c2c1f0d75961906a2 github.com/flynn-archive/go-shlex 3f9db97f856818214da2e1057f8ad84803971cff github.com/gogo/protobuf v0.4 @@ -29,7 +29,7 @@ github.com/miekg/pkcs11 df8ae6ca730422dba20c768ff38ef7d79077a59f github.com/mitchellh/mapstructure f3009df150dadf309fdee4a54ed65c124afad715 github.com/moby/buildkit da2b9dc7dab99e824b2b1067ad7d0523e32dd2d9 https://github.com/dmcgowan/buildkit.git github.com/Nvveen/Gotty a8b993ba6abdb0e0c12b0125c603323a71c7790c https://github.com/ijc25/Gotty -github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448 +github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448 github.com/opencontainers/image-spec v1.0.0 github.com/opencontainers/runc d40db12e72a40109dfcf28539f5ee0930d2f0277 github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9 @@ -45,10 +45,10 @@ github.com/tonistiigi/fsutil 0ac4c11b053b9c5c7c47558f81f96c7100ce50fb github.com/xeipuuv/gojsonpointer e0fe6f68307607d540ed8eac07a342c33fa1b54a github.com/xeipuuv/gojsonreference e02fc20de94c78484cd5ffb007f8af96be030a45 github.com/xeipuuv/gojsonschema 93e72a773fade158921402d6a24c819b48aba29d -golang.org/x/crypto 3fbbcd23f1cb824e69491a5930cfeff09b12f4d2 +golang.org/x/crypto 5bcd134fee4dd1475da17714aac19c0aa0142e2f golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6 golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c -golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f +golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756 golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944 diff --git a/vendor/github.com/docker/notary/LICENSE b/vendor/github.com/docker/notary/LICENSE index 6daf85e9de18..ad9500955cc9 100644 --- a/vendor/github.com/docker/notary/LICENSE +++ b/vendor/github.com/docker/notary/LICENSE @@ -1,4 +1,4 @@ -Apache License + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ diff --git a/vendor/github.com/docker/notary/README.md b/vendor/github.com/docker/notary/README.md index e81a91c10870..01af5d9b306b 100644 --- a/vendor/github.com/docker/notary/README.md +++ b/vendor/github.com/docker/notary/README.md @@ -78,18 +78,21 @@ to use `notary` with Docker images. ## Building Notary -Prerequisites: +Note that our [latest stable release](https://github.com/docker/notary/releases) is at the head of the +[releases branch](https://github.com/docker/notary/tree/releases). The master branch is the development +branch and contains features for the next release. -- Go >= 1.7 +Prerequisites: +- Go >= 1.7.1 - [godep](https://github.com/tools/godep) installed - libtool development headers installed - Ubuntu: `apt-get install libltdl-dev` - CentOS/RedHat: `yum install libtool-ltdl-devel` - Mac OS ([Homebrew](http://brew.sh/)): `brew install libtool` -Run `make binaries`, which creates the Notary Client CLI binary at `bin/notary`. -Note that `make binaries` assumes a standard Go directory structure, in which +Run `make client`, which creates the Notary Client CLI binary at `bin/notary`. +Note that `make client` assumes a standard Go directory structure, in which Notary is checked out to the `src` directory in your `GOPATH`. For example: ``` $GOPATH/ @@ -98,3 +101,5 @@ $GOPATH/ docker/ notary/ ``` + +To build the server and signer, please run `docker-compose build`. \ No newline at end of file diff --git a/vendor/github.com/docker/notary/client/changelist/change.go b/vendor/github.com/docker/notary/client/changelist/change.go index 3e872e681ed5..f9fa552d00f1 100644 --- a/vendor/github.com/docker/notary/client/changelist/change.go +++ b/vendor/github.com/docker/notary/client/changelist/change.go @@ -8,10 +8,8 @@ import ( // Unfortunately because of targets delegations, we can only // cover the base roles. const ( - ScopeRoot = "root" - ScopeTargets = "targets" - ScopeSnapshot = "snapshot" - ScopeTimestamp = "timestamp" + ScopeRoot = "root" + ScopeTargets = "targets" ) // Types for TUFChanges are namespaced by the Role they @@ -20,7 +18,7 @@ const ( // all changes in Snapshot and Timestamp are programmatically // generated base on Root and Targets changes. const ( - TypeRootRole = "role" + TypeBaseRole = "role" TypeTargetsTarget = "target" TypeTargetsDelegation = "delegation" TypeWitness = "witness" @@ -29,22 +27,22 @@ const ( // TUFChange represents a change to a TUF repo type TUFChange struct { // Abbreviated because Go doesn't permit a field and method of the same name - Actn string `json:"action"` - Role string `json:"role"` - ChangeType string `json:"type"` - ChangePath string `json:"path"` - Data []byte `json:"data"` + Actn string `json:"action"` + Role data.RoleName `json:"role"` + ChangeType string `json:"type"` + ChangePath string `json:"path"` + Data []byte `json:"data"` } // TUFRootData represents a modification of the keys associated // with a role that appears in the root.json type TUFRootData struct { - Keys data.KeyList `json:"keys"` - RoleName string `json:"role"` + Keys data.KeyList `json:"keys"` + RoleName data.RoleName `json:"role"` } // NewTUFChange initializes a TUFChange object -func NewTUFChange(action string, role, changeType, changePath string, content []byte) *TUFChange { +func NewTUFChange(action string, role data.RoleName, changeType, changePath string, content []byte) *TUFChange { return &TUFChange{ Actn: action, Role: role, @@ -60,7 +58,7 @@ func (c TUFChange) Action() string { } // Scope returns c.Role -func (c TUFChange) Scope() string { +func (c TUFChange) Scope() data.RoleName { return c.Role } @@ -83,17 +81,17 @@ func (c TUFChange) Content() []byte { // this includes creating a delegations. This format is used to avoid // unexpected race conditions between humans modifying the same delegation type TUFDelegation struct { - NewName string `json:"new_name,omitempty"` - NewThreshold int `json:"threshold, omitempty"` - AddKeys data.KeyList `json:"add_keys, omitempty"` - RemoveKeys []string `json:"remove_keys,omitempty"` - AddPaths []string `json:"add_paths,omitempty"` - RemovePaths []string `json:"remove_paths,omitempty"` - ClearAllPaths bool `json:"clear_paths,omitempty"` + NewName data.RoleName `json:"new_name,omitempty"` + NewThreshold int `json:"threshold, omitempty"` + AddKeys data.KeyList `json:"add_keys, omitempty"` + RemoveKeys []string `json:"remove_keys,omitempty"` + AddPaths []string `json:"add_paths,omitempty"` + RemovePaths []string `json:"remove_paths,omitempty"` + ClearAllPaths bool `json:"clear_paths,omitempty"` } // ToNewRole creates a fresh role object from the TUFDelegation data -func (td TUFDelegation) ToNewRole(scope string) (*data.Role, error) { +func (td TUFDelegation) ToNewRole(scope data.RoleName) (*data.Role, error) { name := scope if td.NewName != "" { name = td.NewName diff --git a/vendor/github.com/docker/notary/client/changelist/changelist.go b/vendor/github.com/docker/notary/client/changelist/changelist.go index 9b52981add0b..30cf69459841 100644 --- a/vendor/github.com/docker/notary/client/changelist/changelist.go +++ b/vendor/github.com/docker/notary/client/changelist/changelist.go @@ -21,6 +21,11 @@ func (cl *memChangelist) Add(c Change) error { return nil } +// Location returns the string "memory" +func (cl memChangelist) Location() string { + return "memory" +} + // Remove deletes the changes found at the given indices func (cl *memChangelist) Remove(idxs []int) error { remove := make(map[int]struct{}) diff --git a/vendor/github.com/docker/notary/client/changelist/file_changelist.go b/vendor/github.com/docker/notary/client/changelist/file_changelist.go index d3bc55c1be71..ab1b200e27fd 100644 --- a/vendor/github.com/docker/notary/client/changelist/file_changelist.go +++ b/vendor/github.com/docker/notary/client/changelist/file_changelist.go @@ -5,12 +5,12 @@ import ( "fmt" "io/ioutil" "os" + "path/filepath" "sort" "time" - "github.com/sirupsen/logrus" "github.com/docker/distribution/uuid" - "path/filepath" + "github.com/sirupsen/logrus" ) // FileChangelist stores all the changes as files @@ -137,6 +137,11 @@ func (cl FileChangelist) Close() error { return nil } +// Location returns the file path to the changelist +func (cl FileChangelist) Location() string { + return cl.dir +} + // NewIterator creates an iterator from FileChangelist func (cl FileChangelist) NewIterator() (ChangeIterator, error) { fileInfos, err := getFileNames(cl.dir) diff --git a/vendor/github.com/docker/notary/client/changelist/interface.go b/vendor/github.com/docker/notary/client/changelist/interface.go index a319b7b89850..70dc0a2d0e98 100644 --- a/vendor/github.com/docker/notary/client/changelist/interface.go +++ b/vendor/github.com/docker/notary/client/changelist/interface.go @@ -1,5 +1,7 @@ package changelist +import "github.com/docker/notary/tuf/data" + // Changelist is the interface for all TUF change lists type Changelist interface { // List returns the ordered list of changes @@ -25,6 +27,9 @@ type Changelist interface { // NewIterator returns an iterator for walking through the list // of changes currently stored NewIterator() (ChangeIterator, error) + + // Location returns the place the changelist is stores + Location() string } const ( @@ -43,7 +48,7 @@ type Change interface { // Where the change should be made. // For TUF this will be the role - Scope() string + Scope() data.RoleName // The content type being affected. // For TUF this will be "target", or "delegation". diff --git a/vendor/github.com/docker/notary/client/client.go b/vendor/github.com/docker/notary/client/client.go index 2b36a335f0f1..c7e473b43a12 100644 --- a/vendor/github.com/docker/notary/client/client.go +++ b/vendor/github.com/docker/notary/client/client.go @@ -9,134 +9,145 @@ import ( "net/url" "os" "path/filepath" - "strings" + "regexp" "time" - "github.com/sirupsen/logrus" + canonicaljson "github.com/docker/go/canonical/json" "github.com/docker/notary" "github.com/docker/notary/client/changelist" "github.com/docker/notary/cryptoservice" store "github.com/docker/notary/storage" - "github.com/docker/notary/trustmanager" "github.com/docker/notary/trustpinning" "github.com/docker/notary/tuf" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/signed" "github.com/docker/notary/tuf/utils" + "github.com/sirupsen/logrus" ) -func init() { - data.SetDefaultExpiryTimes(notary.NotaryDefaultExpiries) -} +const ( + tufDir = "tuf" -// ErrRepoNotInitialized is returned when trying to publish an uninitialized -// notary repository -type ErrRepoNotInitialized struct{} + // SignWithAllOldVersions is a sentinel constant for LegacyVersions flag + SignWithAllOldVersions = -1 +) -func (err ErrRepoNotInitialized) Error() string { - return "repository has not been initialized" +func init() { + data.SetDefaultExpiryTimes(data.NotaryDefaultExpiries) } -// ErrInvalidRemoteRole is returned when the server is requested to manage -// a key type that is not permitted -type ErrInvalidRemoteRole struct { - Role string +// repository stores all the information needed to operate on a notary repository. +type repository struct { + baseDir string + gun data.GUN + baseURL string + changelist changelist.Changelist + cache store.MetadataStore + remoteStore store.RemoteStore + cryptoService signed.CryptoService + tufRepo *tuf.Repo + invalid *tuf.Repo // known data that was parsable but deemed invalid + roundTrip http.RoundTripper + trustPinning trustpinning.TrustPinConfig + LegacyVersions int // number of versions back to fetch roots to sign with } -func (err ErrInvalidRemoteRole) Error() string { - return fmt.Sprintf( - "notary does not permit the server managing the %s key", err.Role) -} +// NewFileCachedRepository is a wrapper for NewRepository that initializes +// a file cache from the provided repository, local config information and a crypto service. +// It also retrieves the remote store associated to the base directory under where all the +// trust files will be stored and the specified GUN. +// +// In case of a nil RoundTripper, a default offline store is used instead. +func NewFileCachedRepository(baseDir string, gun data.GUN, baseURL string, rt http.RoundTripper, + retriever notary.PassRetriever, trustPinning trustpinning.TrustPinConfig) (Repository, error) { -// ErrInvalidLocalRole is returned when the client wants to manage -// a key type that is not permitted -type ErrInvalidLocalRole struct { - Role string -} + cache, err := store.NewFileStore( + filepath.Join(baseDir, tufDir, filepath.FromSlash(gun.String()), "metadata"), + "json", + ) + if err != nil { + return nil, err + } -func (err ErrInvalidLocalRole) Error() string { - return fmt.Sprintf( - "notary does not permit the client managing the %s key", err.Role) -} + keyStores, err := getKeyStores(baseDir, retriever) + if err != nil { + return nil, err + } -// ErrRepositoryNotExist is returned when an action is taken on a remote -// repository that doesn't exist -type ErrRepositoryNotExist struct { - remote string - gun string -} + cryptoService := cryptoservice.NewCryptoService(keyStores...) -func (err ErrRepositoryNotExist) Error() string { - return fmt.Sprintf("%s does not have trust data for %s", err.remote, err.gun) -} + remoteStore, err := getRemoteStore(baseURL, gun, rt) + if err != nil { + // baseURL is syntactically invalid + return nil, err + } -const ( - tufDir = "tuf" -) + cl, err := changelist.NewFileChangelist(filepath.Join( + filepath.Join(baseDir, tufDir, filepath.FromSlash(gun.String()), "changelist"), + )) + if err != nil { + return nil, err + } -// NotaryRepository stores all the information needed to operate on a notary -// repository. -type NotaryRepository struct { - baseDir string - gun string - baseURL string - tufRepoPath string - fileStore store.MetadataStore - CryptoService signed.CryptoService - tufRepo *tuf.Repo - invalid *tuf.Repo // known data that was parsable but deemed invalid - roundTrip http.RoundTripper - trustPinning trustpinning.TrustPinConfig -} - -// repositoryFromKeystores is a helper function for NewNotaryRepository that -// takes some basic NotaryRepository parameters as well as keystores (in order -// of usage preference), and returns a NotaryRepository. -func repositoryFromKeystores(baseDir, gun, baseURL string, rt http.RoundTripper, - keyStores []trustmanager.KeyStore, trustPin trustpinning.TrustPinConfig) (*NotaryRepository, error) { + return NewRepository(baseDir, gun, baseURL, remoteStore, cache, trustPinning, cryptoService, cl) +} - cryptoService := cryptoservice.NewCryptoService(keyStores...) +// NewRepository is the base method that returns a new notary repository. +// It takes the base directory under where all the trust files will be stored +// (This is normally defaults to "~/.notary" or "~/.docker/trust" when enabling +// docker content trust). +// It expects an initialized cache. In case of a nil remote store, a default +// offline store is used. +func NewRepository(baseDir string, gun data.GUN, baseURL string, remoteStore store.RemoteStore, cache store.MetadataStore, + trustPinning trustpinning.TrustPinConfig, cryptoService signed.CryptoService, cl changelist.Changelist) (Repository, error) { - nRepo := &NotaryRepository{ - gun: gun, - baseDir: baseDir, - baseURL: baseURL, - tufRepoPath: filepath.Join(baseDir, tufDir, filepath.FromSlash(gun)), - CryptoService: cryptoService, - roundTrip: rt, - trustPinning: trustPin, + // Repo's remote store is either a valid remote store or an OfflineStore + if remoteStore == nil { + remoteStore = store.OfflineStore{} } - fileStore, err := store.NewFilesystemStore( - nRepo.tufRepoPath, - "metadata", - "json", - ) - if err != nil { - return nil, err + if cache == nil { + return nil, fmt.Errorf("got an invalid cache (nil metadata store)") + } + + nRepo := &repository{ + gun: gun, + baseURL: baseURL, + baseDir: baseDir, + changelist: cl, + cache: cache, + remoteStore: remoteStore, + cryptoService: cryptoService, + trustPinning: trustPinning, + LegacyVersions: 0, // By default, don't sign with legacy roles } - nRepo.fileStore = fileStore return nRepo, nil } +// GetGUN is a getter for the GUN object from a Repository +func (r *repository) GetGUN() data.GUN { + return r.gun +} + // Target represents a simplified version of the data TUF operates on, so external // applications don't have to depend on TUF data types. type Target struct { - Name string // the name of the target - Hashes data.Hashes // the hash of the target - Length int64 // the size in bytes of the target + Name string // the name of the target + Hashes data.Hashes // the hash of the target + Length int64 // the size in bytes of the target + Custom *canonicaljson.RawMessage // the custom data provided to describe the file at TARGETPATH } // TargetWithRole represents a Target that exists in a particular role - this is // produced by ListTargets and GetTargetByName type TargetWithRole struct { Target - Role string + Role data.RoleName } // NewTarget is a helper method that returns a Target -func NewTarget(targetName string, targetPath string) (*Target, error) { +func NewTarget(targetName, targetPath string, targetCustom *canonicaljson.RawMessage) (*Target, error) { b, err := ioutil.ReadFile(targetPath) if err != nil { return nil, err @@ -147,10 +158,11 @@ func NewTarget(targetName string, targetPath string) (*Target, error) { return nil, err } - return &Target{Name: targetName, Hashes: meta.Hashes, Length: meta.Length}, nil + return &Target{Name: targetName, Hashes: meta.Hashes, Length: meta.Length, Custom: targetCustom}, nil } -func rootCertKey(gun string, privKey data.PrivateKey) (data.PublicKey, error) { +// rootCertKey generates the corresponding certificate for the private key given the privKey and repo's GUN +func rootCertKey(gun data.GUN, privKey data.PrivateKey) (data.PublicKey, error) { // Hard-coded policy: the generated certificate expires in 10 years. startTime := time.Now() cert, err := cryptoservice.GenerateCertificate( @@ -161,47 +173,39 @@ func rootCertKey(gun string, privKey data.PrivateKey) (data.PublicKey, error) { x509PublicKey := utils.CertToKey(cert) if x509PublicKey == nil { - return nil, fmt.Errorf( - "cannot use regenerated certificate: format %s", cert.PublicKeyAlgorithm) + return nil, fmt.Errorf("cannot generate public key from private key with id: %v and algorithm: %v", privKey.ID(), privKey.Algorithm()) } return x509PublicKey, nil } -// Initialize creates a new repository by using rootKey as the root Key for the -// TUF repository. The server must be reachable (and is asked to generate a -// timestamp key and possibly other serverManagedRoles), but the created repository -// result is only stored on local disk, not published to the server. To do that, -// use r.Publish() eventually. -func (r *NotaryRepository) Initialize(rootKeyIDs []string, serverManagedRoles ...string) error { - privKeys := make([]data.PrivateKey, 0, len(rootKeyIDs)) - for _, keyID := range rootKeyIDs { - privKey, _, err := r.CryptoService.GetPrivateKey(keyID) - if err != nil { - return err - } - privKeys = append(privKeys, privKey) - } +// GetCryptoService is the getter for the repository's CryptoService +func (r *repository) GetCryptoService() signed.CryptoService { + return r.cryptoService +} + +// initialize initializes the notary repository with a set of rootkeys, root certificates and roles. +func (r *repository) initialize(rootKeyIDs []string, rootCerts []data.PublicKey, serverManagedRoles ...data.RoleName) error { // currently we only support server managing timestamps and snapshots, and // nothing else - timestamps are always managed by the server, and implicit // (do not have to be passed in as part of `serverManagedRoles`, so that // the API of Initialize doesn't change). var serverManagesSnapshot bool - locallyManagedKeys := []string{ + locallyManagedKeys := []data.RoleName{ data.CanonicalTargetsRole, data.CanonicalSnapshotRole, // root is also locally managed, but that should have been created // already } - remotelyManagedKeys := []string{data.CanonicalTimestampRole} + remotelyManagedKeys := []data.RoleName{data.CanonicalTimestampRole} for _, role := range serverManagedRoles { switch role { case data.CanonicalTimestampRole: continue // timestamp is already in the right place case data.CanonicalSnapshotRole: // because we put Snapshot last - locallyManagedKeys = []string{data.CanonicalTargetsRole} + locallyManagedKeys = []data.RoleName{data.CanonicalTargetsRole} remotelyManagedKeys = append( remotelyManagedKeys, data.CanonicalSnapshotRole) serverManagesSnapshot = true @@ -210,105 +214,223 @@ func (r *NotaryRepository) Initialize(rootKeyIDs []string, serverManagedRoles .. } } - rootKeys := make([]data.PublicKey, 0, len(privKeys)) + // gets valid public keys corresponding to the rootKeyIDs or generate if necessary + var publicKeys []data.PublicKey + var err error + if len(rootCerts) == 0 { + publicKeys, err = r.createNewPublicKeyFromKeyIDs(rootKeyIDs) + } else { + publicKeys, err = r.publicKeysOfKeyIDs(rootKeyIDs, rootCerts) + } + if err != nil { + return err + } + + //initialize repo with public keys + rootRole, targetsRole, snapshotRole, timestampRole, err := r.initializeRoles( + publicKeys, + locallyManagedKeys, + remotelyManagedKeys, + ) + if err != nil { + return err + } + + r.tufRepo = tuf.NewRepo(r.GetCryptoService()) + + if err := r.tufRepo.InitRoot( + rootRole, + timestampRole, + snapshotRole, + targetsRole, + false, + ); err != nil { + logrus.Debug("Error on InitRoot: ", err.Error()) + return err + } + if _, err := r.tufRepo.InitTargets(data.CanonicalTargetsRole); err != nil { + logrus.Debug("Error on InitTargets: ", err.Error()) + return err + } + if err := r.tufRepo.InitSnapshot(); err != nil { + logrus.Debug("Error on InitSnapshot: ", err.Error()) + return err + } + + return r.saveMetadata(serverManagesSnapshot) +} + +// createNewPublicKeyFromKeyIDs generates a set of public keys corresponding to the given list of +// key IDs existing in the repository's CryptoService. +// the public keys returned are ordered to correspond to the keyIDs +func (r *repository) createNewPublicKeyFromKeyIDs(keyIDs []string) ([]data.PublicKey, error) { + publicKeys := []data.PublicKey{} + + privKeys, err := getAllPrivKeys(keyIDs, r.GetCryptoService()) + if err != nil { + return nil, err + } + for _, privKey := range privKeys { rootKey, err := rootCertKey(r.gun, privKey) + if err != nil { + return nil, err + } + publicKeys = append(publicKeys, rootKey) + } + return publicKeys, nil +} + +// publicKeysOfKeyIDs confirms that the public key and private keys (by Key IDs) forms valid, strictly ordered key pairs +// (eg. keyIDs[0] must match pubKeys[0] and keyIDs[1] must match certs[1] and so on). +// Or throw error when they mismatch. +func (r *repository) publicKeysOfKeyIDs(keyIDs []string, pubKeys []data.PublicKey) ([]data.PublicKey, error) { + if len(keyIDs) != len(pubKeys) { + err := fmt.Errorf("require matching number of keyIDs and public keys but got %d IDs and %d public keys", len(keyIDs), len(pubKeys)) + return nil, err + } + + if err := matchKeyIdsWithPubKeys(r, keyIDs, pubKeys); err != nil { + return nil, fmt.Errorf("could not obtain public key from IDs: %v", err) + } + return pubKeys, nil +} + +// matchKeyIdsWithPubKeys validates that the private keys (represented by their IDs) and the public keys +// forms matching key pairs +func matchKeyIdsWithPubKeys(r *repository, ids []string, pubKeys []data.PublicKey) error { + for i := 0; i < len(ids); i++ { + privKey, _, err := r.GetCryptoService().GetPrivateKey(ids[i]) + if err != nil { + return fmt.Errorf("could not get the private key matching id %v: %v", ids[i], err) + } + + pubKey := pubKeys[i] + err = signed.VerifyPublicKeyMatchesPrivateKey(privKey, pubKey) if err != nil { return err } - rootKeys = append(rootKeys, rootKey) } + return nil +} + +// Initialize creates a new repository by using rootKey as the root Key for the +// TUF repository. The server must be reachable (and is asked to generate a +// timestamp key and possibly other serverManagedRoles), but the created repository +// result is only stored on local disk, not published to the server. To do that, +// use r.Publish() eventually. +func (r *repository) Initialize(rootKeyIDs []string, serverManagedRoles ...data.RoleName) error { + return r.initialize(rootKeyIDs, nil, serverManagedRoles...) +} + +type errKeyNotFound struct{} + +func (errKeyNotFound) Error() string { + return fmt.Sprintf("cannot find matching private key id") +} + +// keyExistsInList returns the id of the private key in ids that matches the public key +// otherwise return empty string +func keyExistsInList(cert data.PublicKey, ids map[string]bool) error { + pubKeyID, err := utils.CanonicalKeyID(cert) + if err != nil { + return fmt.Errorf("failed to obtain the public key id from the given certificate: %v", err) + } + if _, ok := ids[pubKeyID]; ok { + return nil + } + return errKeyNotFound{} +} + +// InitializeWithCertificate initializes the repository with root keys and their corresponding certificates +func (r *repository) InitializeWithCertificate(rootKeyIDs []string, rootCerts []data.PublicKey, + serverManagedRoles ...data.RoleName) error { + + // If we explicitly pass in certificate(s) but not key, then look keys up using certificate + if len(rootKeyIDs) == 0 && len(rootCerts) != 0 { + rootKeyIDs = []string{} + availableRootKeyIDs := make(map[string]bool) + for _, k := range r.GetCryptoService().ListKeys(data.CanonicalRootRole) { + availableRootKeyIDs[k] = true + } + + for _, cert := range rootCerts { + if err := keyExistsInList(cert, availableRootKeyIDs); err != nil { + return fmt.Errorf("error initializing repository with certificate: %v", err) + } + keyID, _ := utils.CanonicalKeyID(cert) + rootKeyIDs = append(rootKeyIDs, keyID) + } + } + return r.initialize(rootKeyIDs, rootCerts, serverManagedRoles...) +} - var ( - rootRole = data.NewBaseRole( - data.CanonicalRootRole, - notary.MinThreshold, - rootKeys..., - ) - timestampRole data.BaseRole - snapshotRole data.BaseRole - targetsRole data.BaseRole +func (r *repository) initializeRoles(rootKeys []data.PublicKey, localRoles, remoteRoles []data.RoleName) ( + root, targets, snapshot, timestamp data.BaseRole, err error) { + root = data.NewBaseRole( + data.CanonicalRootRole, + notary.MinThreshold, + rootKeys..., ) // we want to create all the local keys first so we don't have to // make unnecessary network calls - for _, role := range locallyManagedKeys { + for _, role := range localRoles { // This is currently hardcoding the keys to ECDSA. - key, err := r.CryptoService.Create(role, r.gun, data.ECDSAKey) + var key data.PublicKey + key, err = r.GetCryptoService().Create(role, r.gun, data.ECDSAKey) if err != nil { - return err + return } switch role { case data.CanonicalSnapshotRole: - snapshotRole = data.NewBaseRole( + snapshot = data.NewBaseRole( role, notary.MinThreshold, key, ) case data.CanonicalTargetsRole: - targetsRole = data.NewBaseRole( + targets = data.NewBaseRole( role, notary.MinThreshold, key, ) } } - for _, role := range remotelyManagedKeys { + + remote := r.getRemoteStore() + + for _, role := range remoteRoles { // This key is generated by the remote server. - key, err := getRemoteKey(r.baseURL, r.gun, role, r.roundTrip) + var key data.PublicKey + key, err = getRemoteKey(role, remote) if err != nil { - return err + return } logrus.Debugf("got remote %s %s key with keyID: %s", role, key.Algorithm(), key.ID()) switch role { case data.CanonicalSnapshotRole: - snapshotRole = data.NewBaseRole( + snapshot = data.NewBaseRole( role, notary.MinThreshold, key, ) case data.CanonicalTimestampRole: - timestampRole = data.NewBaseRole( + timestamp = data.NewBaseRole( role, notary.MinThreshold, key, ) } } - - r.tufRepo = tuf.NewRepo(r.CryptoService) - - err := r.tufRepo.InitRoot( - rootRole, - timestampRole, - snapshotRole, - targetsRole, - false, - ) - if err != nil { - logrus.Debug("Error on InitRoot: ", err.Error()) - return err - } - _, err = r.tufRepo.InitTargets(data.CanonicalTargetsRole) - if err != nil { - logrus.Debug("Error on InitTargets: ", err.Error()) - return err - } - err = r.tufRepo.InitSnapshot() - if err != nil { - logrus.Debug("Error on InitSnapshot: ", err.Error()) - return err - } - - return r.saveMetadata(serverManagesSnapshot) + return root, targets, snapshot, timestamp, nil } // adds a TUF Change template to the given roles -func addChange(cl *changelist.FileChangelist, c changelist.Change, roles ...string) error { - +func addChange(cl changelist.Changelist, c changelist.Change, roles ...data.RoleName) error { if len(roles) == 0 { - roles = []string{data.CanonicalTargetsRole} + roles = []data.RoleName{data.CanonicalTargetsRole} } var changes []changelist.Change @@ -342,19 +464,13 @@ func addChange(cl *changelist.FileChangelist, c changelist.Change, roles ...stri // AddTarget creates new changelist entries to add a target to the given roles // in the repository when the changelist gets applied at publish time. // If roles are unspecified, the default role is "targets" -func (r *NotaryRepository) AddTarget(target *Target, roles ...string) error { - +func (r *repository) AddTarget(target *Target, roles ...data.RoleName) error { if len(target.Hashes) == 0 { return fmt.Errorf("no hashes specified for target \"%s\"", target.Name) } - cl, err := changelist.NewFileChangelist(filepath.Join(r.tufRepoPath, "changelist")) - if err != nil { - return err - } - defer cl.Close() logrus.Debugf("Adding target \"%s\" with sha256 \"%x\" and size %d bytes.\n", target.Name, target.Hashes["sha256"], target.Length) - meta := data.FileMeta{Length: target.Length, Hashes: target.Hashes} + meta := data.FileMeta{Length: target.Length, Hashes: target.Hashes, Custom: target.Custom} metaJSON, err := json.Marshal(meta) if err != nil { return err @@ -363,22 +479,17 @@ func (r *NotaryRepository) AddTarget(target *Target, roles ...string) error { template := changelist.NewTUFChange( changelist.ActionCreate, "", changelist.TypeTargetsTarget, target.Name, metaJSON) - return addChange(cl, template, roles...) + return addChange(r.changelist, template, roles...) } // RemoveTarget creates new changelist entries to remove a target from the given // roles in the repository when the changelist gets applied at publish time. // If roles are unspecified, the default role is "target". -func (r *NotaryRepository) RemoveTarget(targetName string, roles ...string) error { - - cl, err := changelist.NewFileChangelist(filepath.Join(r.tufRepoPath, "changelist")) - if err != nil { - return err - } +func (r *repository) RemoveTarget(targetName string, roles ...data.RoleName) error { logrus.Debugf("Removing target \"%s\"", targetName) template := changelist.NewTUFChange(changelist.ActionDelete, "", changelist.TypeTargetsTarget, targetName, nil) - return addChange(cl, template, roles...) + return addChange(r.changelist, template, roles...) } // ListTargets lists all targets for the current repository. The list of @@ -389,18 +500,18 @@ func (r *NotaryRepository) RemoveTarget(targetName string, roles ...string) erro // its entries will be strictly shadowed by those in other parts of the "targets/a" // subtree and also the "targets/x" subtree, as we will defer parsing it until // we explicitly reach it in our iteration of the provided list of roles. -func (r *NotaryRepository) ListTargets(roles ...string) ([]*TargetWithRole, error) { +func (r *repository) ListTargets(roles ...data.RoleName) ([]*TargetWithRole, error) { if err := r.Update(false); err != nil { return nil, err } if len(roles) == 0 { - roles = []string{data.CanonicalTargetsRole} + roles = []data.RoleName{data.CanonicalTargetsRole} } targets := make(map[string]*TargetWithRole) for _, role := range roles { // Define an array of roles to skip for this walk (see IMPORTANT comment above) - skipRoles := utils.StrSliceRemove(roles, role) + skipRoles := utils.RoleNameSliceRemove(roles, role) // Define a visitor function to populate the targets map in priority order listVisitorFunc := func(tgt *data.SignedTargets, validRole data.DelegationRole) interface{} { @@ -416,6 +527,7 @@ func (r *NotaryRepository) ListTargets(roles ...string) ([]*TargetWithRole, erro Name: targetName, Hashes: targetMeta.Hashes, Length: targetMeta.Length, + Custom: targetMeta.Custom, }, Role: validRole.Name, } @@ -441,7 +553,7 @@ func (r *NotaryRepository) ListTargets(roles ...string) ([]*TargetWithRole, erro // the target entry found in the subtree of the highest priority role // will be returned. // See the IMPORTANT section on ListTargets above. Those roles also apply here. -func (r *NotaryRepository) GetTargetByName(name string, roles ...string) (*TargetWithRole, error) { +func (r *repository) GetTargetByName(name string, roles ...data.RoleName) (*TargetWithRole, error) { if err := r.Update(false); err != nil { return nil, err } @@ -450,11 +562,11 @@ func (r *NotaryRepository) GetTargetByName(name string, roles ...string) (*Targe roles = append(roles, data.CanonicalTargetsRole) } var resultMeta data.FileMeta - var resultRoleName string + var resultRoleName data.RoleName var foundTarget bool for _, role := range roles { // Define an array of roles to skip for this walk (see IMPORTANT comment above) - skipRoles := utils.StrSliceRemove(roles, role) + skipRoles := utils.RoleNameSliceRemove(roles, role) // Define a visitor function to find the specified target getTargetVisitorFunc := func(tgt *data.SignedTargets, validRole data.DelegationRole) interface{} { @@ -471,10 +583,10 @@ func (r *NotaryRepository) GetTargetByName(name string, roles ...string) (*Targe } // Check that we didn't error, and that we assigned to our target if err := r.tufRepo.WalkTargets(name, role, getTargetVisitorFunc, skipRoles...); err == nil && foundTarget { - return &TargetWithRole{Target: Target{Name: name, Hashes: resultMeta.Hashes, Length: resultMeta.Length}, Role: resultRoleName}, nil + return &TargetWithRole{Target: Target{Name: name, Hashes: resultMeta.Hashes, Length: resultMeta.Length, Custom: resultMeta.Custom}, Role: resultRoleName}, nil } } - return nil, fmt.Errorf("No trust data for %s", name) + return nil, ErrNoSuchTarget(name) } @@ -485,10 +597,17 @@ type TargetSignedStruct struct { Signatures []data.Signature } +//ErrNoSuchTarget is returned when no valid trust data is found. +type ErrNoSuchTarget string + +func (f ErrNoSuchTarget) Error() string { + return fmt.Sprintf("No valid trust data for %s", string(f)) +} + // GetAllTargetMetadataByName searches the entire delegation role tree to find the specified target by name for all // roles, and returns a list of TargetSignedStructs for each time it finds the specified target. // If given an empty string for a target name, it will return back all targets signed into the repository in every role -func (r *NotaryRepository) GetAllTargetMetadataByName(name string) ([]TargetSignedStruct, error) { +func (r *repository) GetAllTargetMetadataByName(name string) ([]TargetSignedStruct, error) { if err := r.Update(false); err != nil { return nil, err } @@ -515,7 +634,7 @@ func (r *NotaryRepository) GetAllTargetMetadataByName(name string) ([]TargetSign for targetName, resultMeta := range targetMetaToAdd { targetInfo := TargetSignedStruct{ Role: validRole, - Target: Target{Name: targetName, Hashes: resultMeta.Hashes, Length: resultMeta.Length}, + Target: Target{Name: targetName, Hashes: resultMeta.Hashes, Length: resultMeta.Length, Custom: resultMeta.Custom}, Signatures: tgt.Signatures, } targetInfoList = append(targetInfoList, targetInfo) @@ -529,20 +648,26 @@ func (r *NotaryRepository) GetAllTargetMetadataByName(name string) ([]TargetSign return nil, err } if len(targetInfoList) == 0 { - return nil, fmt.Errorf("No valid trust data for %s", name) + return nil, ErrNoSuchTarget(name) } return targetInfoList, nil } // GetChangelist returns the list of the repository's unpublished changes -func (r *NotaryRepository) GetChangelist() (changelist.Changelist, error) { - changelistDir := filepath.Join(r.tufRepoPath, "changelist") - cl, err := changelist.NewFileChangelist(changelistDir) - if err != nil { - logrus.Debug("Error initializing changelist") - return nil, err +func (r *repository) GetChangelist() (changelist.Changelist, error) { + return r.changelist, nil +} + +// getRemoteStore returns the remoteStore of a repository if valid or +// or an OfflineStore otherwise +func (r *repository) getRemoteStore() store.RemoteStore { + if r.remoteStore != nil { + return r.remoteStore } - return cl, nil + + r.remoteStore = &store.OfflineStore{} + + return r.remoteStore } // RoleWithSignatures is a Role with its associated signatures @@ -553,7 +678,7 @@ type RoleWithSignatures struct { // ListRoles returns a list of RoleWithSignatures objects for this repo // This represents the latest metadata for each role in this repo -func (r *NotaryRepository) ListRoles() ([]RoleWithSignatures, error) { +func (r *repository) ListRoles() ([]RoleWithSignatures, error) { // Update to latest repo state if err := r.Update(false); err != nil { return nil, err @@ -592,41 +717,39 @@ func (r *NotaryRepository) ListRoles() ([]RoleWithSignatures, error) { // Publish pushes the local changes in signed material to the remote notary-server // Conceptually it performs an operation similar to a `git rebase` -func (r *NotaryRepository) Publish() error { - cl, err := r.GetChangelist() - if err != nil { - return err - } - if err = r.publish(cl); err != nil { +func (r *repository) Publish() error { + if err := r.publish(r.changelist); err != nil { return err } - if err = cl.Clear(""); err != nil { + if err := r.changelist.Clear(""); err != nil { // This is not a critical problem when only a single host is pushing // but will cause weird behaviour if changelist cleanup is failing // and there are multiple hosts writing to the repo. - logrus.Warn("Unable to clear changelist. You may want to manually delete the folder ", filepath.Join(r.tufRepoPath, "changelist")) + logrus.Warn("Unable to clear changelist. You may want to manually delete the folder ", r.changelist.Location()) } return nil } // publish pushes the changes in the given changelist to the remote notary-server // Conceptually it performs an operation similar to a `git rebase` -func (r *NotaryRepository) publish(cl changelist.Changelist) error { +func (r *repository) publish(cl changelist.Changelist) error { var initialPublish bool // update first before publishing if err := r.Update(true); err != nil { // If the remote is not aware of the repo, then this is being published - // for the first time. Try to load from disk instead for publishing. + // for the first time. Try to initialize the repository before publishing. if _, ok := err.(ErrRepositoryNotExist); ok { err := r.bootstrapRepo() + if _, ok := err.(store.ErrMetaNotFound); ok { + logrus.Infof("No TUF data found locally or remotely - initializing repository %s for the first time", r.gun.String()) + err = r.Initialize(nil) + } + if err != nil { - logrus.Debugf("Unable to load repository from local files: %s", - err.Error()) - if _, ok := err.(store.ErrMetaNotFound); ok { - return ErrRepoNotInitialized{} - } + logrus.WithError(err).Debugf("Unable to load or initialize repository during first publish: %s", err.Error()) return err } + // Ensure we will push the initial root and targets file. Either or // both of the root and targets may not be marked as Dirty, since // there may not be any changes that update them, so use a @@ -646,34 +769,23 @@ func (r *NotaryRepository) publish(cl changelist.Changelist) error { // these are the TUF files we will need to update, serialized as JSON before // we send anything to remote - updatedFiles := make(map[string][]byte) + updatedFiles := make(map[data.RoleName][]byte) + + // Fetch old keys to support old clients + legacyKeys, err := r.oldKeysForLegacyClientSupport(r.LegacyVersions, initialPublish) + if err != nil { + return err + } // check if our root file is nearing expiry or dirty. Resign if it is. If // root is not dirty but we are publishing for the first time, then just // publish the existing root we have. - if nearExpiry(r.tufRepo.Root.Signed.SignedCommon) || r.tufRepo.Root.Dirty { - rootJSON, err := serializeCanonicalRole(r.tufRepo, data.CanonicalRootRole) - if err != nil { - return err - } - updatedFiles[data.CanonicalRootRole] = rootJSON - } else if initialPublish { - rootJSON, err := r.tufRepo.Root.MarshalJSON() - if err != nil { - return err - } - updatedFiles[data.CanonicalRootRole] = rootJSON + if err := signRootIfNecessary(updatedFiles, r.tufRepo, legacyKeys, initialPublish); err != nil { + return err } - // iterate through all the targets files - if they are dirty, sign and update - for roleName, roleObj := range r.tufRepo.Targets { - if roleObj.Dirty || (roleName == data.CanonicalTargetsRole && initialPublish) { - targetsJSON, err := serializeCanonicalRole(r.tufRepo, roleName) - if err != nil { - return err - } - updatedFiles[roleName] = targetsJSON - } + if err := signTargets(updatedFiles, r.tufRepo, initialPublish); err != nil { + return err } // if we initialized the repo while designating the server as the snapshot @@ -685,10 +797,8 @@ func (r *NotaryRepository) publish(cl changelist.Changelist) error { } } - snapshotJSON, err := serializeCanonicalRole( - r.tufRepo, data.CanonicalSnapshotRole) - - if err == nil { + if snapshotJSON, err := serializeCanonicalRole( + r.tufRepo, data.CanonicalSnapshotRole, nil); err == nil { // Only update the snapshot if we've successfully signed it. updatedFiles[data.CanonicalSnapshotRole] = snapshotJSON } else if signErr, ok := err.(signed.ErrInsufficientSignatures); ok && signErr.FoundKeys == 0 { @@ -702,12 +812,116 @@ func (r *NotaryRepository) publish(cl changelist.Changelist) error { return err } - remote, err := getRemoteStore(r.baseURL, r.gun, r.roundTrip) + remote := r.getRemoteStore() + + return remote.SetMulti(data.MetadataRoleMapToStringMap(updatedFiles)) +} + +func signRootIfNecessary(updates map[data.RoleName][]byte, repo *tuf.Repo, extraSigningKeys data.KeyList, initialPublish bool) error { + if len(extraSigningKeys) > 0 { + repo.Root.Dirty = true + } + if nearExpiry(repo.Root.Signed.SignedCommon) || repo.Root.Dirty { + rootJSON, err := serializeCanonicalRole(repo, data.CanonicalRootRole, extraSigningKeys) + if err != nil { + return err + } + updates[data.CanonicalRootRole] = rootJSON + } else if initialPublish { + rootJSON, err := repo.Root.MarshalJSON() + if err != nil { + return err + } + updates[data.CanonicalRootRole] = rootJSON + } + return nil +} + +// Fetch back a `legacyVersions` number of roots files, collect the root public keys +// This includes old `root` roles as well as legacy versioned root roles, e.g. `1.root` +func (r *repository) oldKeysForLegacyClientSupport(legacyVersions int, initialPublish bool) (data.KeyList, error) { + if initialPublish { + return nil, nil + } + + var oldestVersion int + prevVersion := r.tufRepo.Root.Signed.Version + + if legacyVersions == SignWithAllOldVersions { + oldestVersion = 1 + } else { + oldestVersion = r.tufRepo.Root.Signed.Version - legacyVersions + } + + if oldestVersion < 1 { + oldestVersion = 1 + } + + if prevVersion <= 1 || oldestVersion == prevVersion { + return nil, nil + } + oldKeys := make(map[string]data.PublicKey) + + c, err := r.bootstrapClient(true) + // require a server connection to fetch old roots if err != nil { - return err + return nil, err + } + + for v := prevVersion; v >= oldestVersion; v-- { + logrus.Debugf("fetching old keys from version %d", v) + // fetch old root version + versionedRole := fmt.Sprintf("%d.%s", v, data.CanonicalRootRole.String()) + + raw, err := c.remote.GetSized(versionedRole, -1) + if err != nil { + logrus.Debugf("error downloading %s: %s", versionedRole, err) + continue + } + + signedOldRoot := &data.Signed{} + if err := json.Unmarshal(raw, signedOldRoot); err != nil { + return nil, err + } + oldRootVersion, err := data.RootFromSigned(signedOldRoot) + if err != nil { + return nil, err + } + + // extract legacy versioned root keys + oldRootVersionKeys := getOldRootPublicKeys(oldRootVersion) + for _, oldKey := range oldRootVersionKeys { + oldKeys[oldKey.ID()] = oldKey + } + } + oldKeyList := make(data.KeyList, 0, len(oldKeys)) + for _, key := range oldKeys { + oldKeyList = append(oldKeyList, key) } + return oldKeyList, nil +} - return remote.SetMulti(updatedFiles) +// get all the saved previous roles keys < the current root version +func getOldRootPublicKeys(root *data.SignedRoot) data.KeyList { + rootRole, err := root.BuildBaseRole(data.CanonicalRootRole) + if err != nil { + return nil + } + return rootRole.ListKeys() +} + +func signTargets(updates map[data.RoleName][]byte, repo *tuf.Repo, initialPublish bool) error { + // iterate through all the targets files - if they are dirty, sign and update + for roleName, roleObj := range repo.Targets { + if roleObj.Dirty || (roleName == data.CanonicalTargetsRole && initialPublish) { + targetsJSON, err := serializeCanonicalRole(repo, roleName, nil) + if err != nil { + return err + } + updates[roleName] = targetsJSON + } + } + return nil } // bootstrapRepo loads the repository from the local file system (i.e. @@ -715,13 +929,13 @@ func (r *NotaryRepository) publish(cl changelist.Changelist) error { // r.tufRepo. This attempts to load metadata for all roles. Since server // snapshots are supported, if the snapshot metadata fails to load, that's ok. // This assumes that bootstrapRepo is only used by Publish() or RotateKey() -func (r *NotaryRepository) bootstrapRepo() error { - b := tuf.NewRepoBuilder(r.gun, r.CryptoService, r.trustPinning) +func (r *repository) bootstrapRepo() error { + b := tuf.NewRepoBuilder(r.gun, r.GetCryptoService(), r.trustPinning) logrus.Debugf("Loading trusted collection.") for _, role := range data.BaseRoles { - jsonBytes, err := r.fileStore.GetSized(role, store.NoSizeLimit) + jsonBytes, err := r.cache.GetSized(role.String(), store.NoSizeLimit) if err != nil { if _, ok := err.(store.ErrMetaNotFound); ok && // server snapshots are supported, and server timestamp management @@ -746,19 +960,19 @@ func (r *NotaryRepository) bootstrapRepo() error { // saveMetadata saves contents of r.tufRepo onto the local disk, creating // signatures as necessary, possibly prompting for passphrases. -func (r *NotaryRepository) saveMetadata(ignoreSnapshot bool) error { +func (r *repository) saveMetadata(ignoreSnapshot bool) error { logrus.Debugf("Saving changes to Trusted Collection.") - rootJSON, err := serializeCanonicalRole(r.tufRepo, data.CanonicalRootRole) + rootJSON, err := serializeCanonicalRole(r.tufRepo, data.CanonicalRootRole, nil) if err != nil { return err } - err = r.fileStore.Set(data.CanonicalRootRole, rootJSON) + err = r.cache.Set(data.CanonicalRootRole.String(), rootJSON) if err != nil { return err } - targetsToSave := make(map[string][]byte) + targetsToSave := make(map[data.RoleName][]byte) for t := range r.tufRepo.Targets { signedTargets, err := r.tufRepo.SignTargets(t, data.DefaultExpires(data.CanonicalTargetsRole)) if err != nil { @@ -772,26 +986,25 @@ func (r *NotaryRepository) saveMetadata(ignoreSnapshot bool) error { } for role, blob := range targetsToSave { - parentDir := filepath.Dir(role) - os.MkdirAll(parentDir, 0755) - r.fileStore.Set(role, blob) + // If the parent directory does not exist, the cache.Set will create it + r.cache.Set(role.String(), blob) } if ignoreSnapshot { return nil } - snapshotJSON, err := serializeCanonicalRole(r.tufRepo, data.CanonicalSnapshotRole) + snapshotJSON, err := serializeCanonicalRole(r.tufRepo, data.CanonicalSnapshotRole, nil) if err != nil { return err } - return r.fileStore.Set(data.CanonicalSnapshotRole, snapshotJSON) + return r.cache.Set(data.CanonicalSnapshotRole.String(), snapshotJSON) } // returns a properly constructed ErrRepositoryNotExist error based on this // repo's information -func (r *NotaryRepository) errRepositoryNotExist() error { +func (r *repository) errRepositoryNotExist() error { host := r.baseURL parsed, err := url.Parse(r.baseURL) if err == nil { @@ -802,7 +1015,7 @@ func (r *NotaryRepository) errRepositoryNotExist() error { // Update bootstraps a trust anchor (root.json) before updating all the // metadata from the repo. -func (r *NotaryRepository) Update(forWrite bool) error { +func (r *repository) Update(forWrite bool) error { c, err := r.bootstrapClient(forWrite) if err != nil { if _, ok := err.(store.ErrMetaNotFound); ok { @@ -812,10 +1025,11 @@ func (r *NotaryRepository) Update(forWrite bool) error { } repo, invalid, err := c.Update() if err != nil { - // notFound.Resource may include a checksum so when the role is root, - // it will be root or root.. Therefore best we can - // do it match a "root." prefix - if notFound, ok := err.(store.ErrMetaNotFound); ok && strings.HasPrefix(notFound.Resource, data.CanonicalRootRole+".") { + // notFound.Resource may include a version or checksum so when the role is root, + // it will be root, .root or root.. + notFound, ok := err.(store.ErrMetaNotFound) + isRoot, _ := regexp.MatchString(`\.?`+data.CanonicalRootRole.String()+`\.?`, notFound.Resource) + if ok && isRoot { return r.errRepositoryNotExist() } return err @@ -834,8 +1048,11 @@ func (r *NotaryRepository) Update(forWrite bool) error { // is initialized or not. If set to true, we will always attempt to download // and return an error if the remote repository errors. // -// Populates a tuf.RepoBuilder with this root metadata (only use -// TUFClient.Update to load the rest). +// Populates a tuf.RepoBuilder with this root metadata. If the root metadata +// downloaded is a newer version than what is on disk, then intermediate +// versions will be downloaded and verified in order to rotate trusted keys +// properly. Newer root metadata must always be signed with the previous +// threshold and keys. // // Fails if the remote server is reachable and does not know the repo // (i.e. before the first r.Publish()), in which case the error is @@ -844,20 +1061,20 @@ func (r *NotaryRepository) Update(forWrite bool) error { // // Returns a TUFClient for the remote server, which may not be actually // operational (if the URL is invalid but a root.json is cached). -func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*TUFClient, error) { +func (r *repository) bootstrapClient(checkInitialized bool) (*tufClient, error) { minVersion := 1 // the old root on disk should not be validated against any trust pinning configuration // because if we have an old root, it itself is the thing that pins trust - oldBuilder := tuf.NewRepoBuilder(r.gun, r.CryptoService, trustpinning.TrustPinConfig{}) + oldBuilder := tuf.NewRepoBuilder(r.gun, r.GetCryptoService(), trustpinning.TrustPinConfig{}) // by default, we want to use the trust pinning configuration on any new root that we download - newBuilder := tuf.NewRepoBuilder(r.gun, r.CryptoService, r.trustPinning) + newBuilder := tuf.NewRepoBuilder(r.gun, r.GetCryptoService(), r.trustPinning) // Try to read root from cache first. We will trust this root until we detect a problem // during update which will cause us to download a new root and perform a rotation. // If we have an old root, and it's valid, then we overwrite the newBuilder to be one // preloaded with the old root or one which uses the old root for trust bootstrapping. - if rootJSON, err := r.fileStore.GetSized(data.CanonicalRootRole, store.NoSizeLimit); err == nil { + if rootJSON, err := r.cache.GetSized(data.CanonicalRootRole.String(), store.NoSizeLimit); err == nil { // if we can't load the cached root, fail hard because that is how we pin trust if err := oldBuilder.Load(data.CanonicalRootRole, rootJSON, minVersion, true); err != nil { return nil, err @@ -865,7 +1082,7 @@ func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*TUFClient, e // again, the root on disk is the source of trust pinning, so use an empty trust // pinning configuration - newBuilder = tuf.NewRepoBuilder(r.gun, r.CryptoService, trustpinning.TrustPinConfig{}) + newBuilder = tuf.NewRepoBuilder(r.gun, r.GetCryptoService(), trustpinning.TrustPinConfig{}) if err := newBuilder.Load(data.CanonicalRootRole, rootJSON, minVersion, false); err != nil { // Ok, the old root is expired - we want to download a new one. But we want to use the @@ -876,16 +1093,15 @@ func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*TUFClient, e } } - remote, remoteErr := getRemoteStore(r.baseURL, r.gun, r.roundTrip) - if remoteErr != nil { - logrus.Error(remoteErr) - } else if !newBuilder.IsLoaded(data.CanonicalRootRole) || checkInitialized { + remote := r.getRemoteStore() + + if !newBuilder.IsLoaded(data.CanonicalRootRole) || checkInitialized { // remoteErr was nil and we were not able to load a root from cache or // are specifically checking for initialization of the repo. // if remote store successfully set up, try and get root from remote // We don't have any local data to determine the size of root, so try the maximum (though it is restricted at 100MB) - tmpJSON, err := remote.GetSized(data.CanonicalRootRole, store.NoSizeLimit) + tmpJSON, err := remote.GetSized(data.CanonicalRootRole.String(), store.NoSizeLimit) if err != nil { // we didn't have a root in cache and were unable to load one from // the server. Nothing we can do but error. @@ -898,7 +1114,7 @@ func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*TUFClient, e return nil, err } - err = r.fileStore.Set(data.CanonicalRootRole, tmpJSON) + err = r.cache.Set(data.CanonicalRootRole.String(), tmpJSON) if err != nil { // if we can't write cache we should still continue, just log error logrus.Errorf("could not save root to cache: %s", err.Error()) @@ -912,71 +1128,122 @@ func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*TUFClient, e return nil, ErrRepoNotInitialized{} } - return NewTUFClient(oldBuilder, newBuilder, remote, r.fileStore), nil + return newTufClient(oldBuilder, newBuilder, remote, r.cache), nil } -// RotateKey removes all existing keys associated with the role, and either -// creates and adds one new key or delegates managing the key to the server. +// RotateKey removes all existing keys associated with the role. If no keys are +// specified in keyList, then this creates and adds one new key or delegates +// managing the key to the server. If key(s) are specified by keyList, then they are +// used for signing the role. // These changes are staged in a changelist until publish is called. -func (r *NotaryRepository) RotateKey(role string, serverManagesKey bool) error { - // We currently support remotely managing timestamp and snapshot keys - canBeRemoteKey := role == data.CanonicalTimestampRole || role == data.CanonicalSnapshotRole - // And locally managing root, targets, and snapshot keys - canBeLocalKey := (role == data.CanonicalSnapshotRole || role == data.CanonicalTargetsRole || - role == data.CanonicalRootRole) +func (r *repository) RotateKey(role data.RoleName, serverManagesKey bool, keyList []string) error { + if err := checkRotationInput(role, serverManagesKey); err != nil { + return err + } - switch { - case !data.ValidRole(role) || data.IsDelegation(role): - return fmt.Errorf("notary does not currently permit rotating the %s key", role) - case serverManagesKey && !canBeRemoteKey: - return ErrInvalidRemoteRole{Role: role} - case !serverManagesKey && !canBeLocalKey: - return ErrInvalidLocalRole{Role: role} + pubKeyList, err := r.pubKeyListForRotation(role, serverManagesKey, keyList) + if err != nil { + return err } - var ( - pubKey data.PublicKey - err error - errFmtMsg string - ) - switch serverManagesKey { - case true: - pubKey, err = rotateRemoteKey(r.baseURL, r.gun, role, r.roundTrip) - errFmtMsg = "unable to rotate remote key: %s" - default: - pubKey, err = r.CryptoService.Create(role, r.gun, data.ECDSAKey) - errFmtMsg = "unable to generate key: %s" + cl := changelist.NewMemChangelist() + if err := r.rootFileKeyChange(cl, role, changelist.ActionCreate, pubKeyList); err != nil { + return err + } + return r.publish(cl) +} + +// Given a set of new keys to rotate to and a set of keys to drop, returns the list of current keys to use +func (r *repository) pubKeyListForRotation(role data.RoleName, serverManaged bool, newKeys []string) (pubKeyList data.KeyList, err error) { + var pubKey data.PublicKey + + // If server manages the key being rotated, request a rotation and return the new key + if serverManaged { + remote := r.getRemoteStore() + pubKey, err = rotateRemoteKey(role, remote) + pubKeyList = make(data.KeyList, 0, 1) + pubKeyList = append(pubKeyList, pubKey) + if err != nil { + return nil, fmt.Errorf("unable to rotate remote key: %s", err) + } + return pubKeyList, nil } + // If no new keys are passed in, we generate one + if len(newKeys) == 0 { + pubKeyList = make(data.KeyList, 0, 1) + pubKey, err = r.GetCryptoService().Create(role, r.gun, data.ECDSAKey) + pubKeyList = append(pubKeyList, pubKey) + } if err != nil { - return fmt.Errorf(errFmtMsg, err) + return nil, fmt.Errorf("unable to generate key: %s", err) + } + + // If a list of keys to rotate to are provided, we add those + if len(newKeys) > 0 { + pubKeyList = make(data.KeyList, 0, len(newKeys)) + for _, keyID := range newKeys { + pubKey = r.GetCryptoService().GetKey(keyID) + if pubKey == nil { + return nil, fmt.Errorf("unable to find key: %s", keyID) + } + pubKeyList = append(pubKeyList, pubKey) + } + } + + // Convert to certs (for root keys) + if pubKeyList, err = r.pubKeysToCerts(role, pubKeyList); err != nil { + return nil, err + } + + return pubKeyList, nil +} + +func (r *repository) pubKeysToCerts(role data.RoleName, pubKeyList data.KeyList) (data.KeyList, error) { + // only generate certs for root keys + if role != data.CanonicalRootRole { + return pubKeyList, nil } - // if this is a root role, generate a root cert for the public key - if role == data.CanonicalRootRole { - privKey, _, err := r.CryptoService.GetPrivateKey(pubKey.ID()) + for i, pubKey := range pubKeyList { + privKey, loadedRole, err := r.GetCryptoService().GetPrivateKey(pubKey.ID()) if err != nil { - return err + return nil, err + } + if loadedRole != role { + return nil, fmt.Errorf("attempted to load root key but given %s key instead", loadedRole) } pubKey, err = rootCertKey(r.gun, privKey) if err != nil { - return err + return nil, err } + pubKeyList[i] = pubKey } + return pubKeyList, nil +} - cl := changelist.NewMemChangelist() - if err := r.rootFileKeyChange(cl, role, changelist.ActionCreate, pubKey); err != nil { - return err +func checkRotationInput(role data.RoleName, serverManaged bool) error { + // We currently support remotely managing timestamp and snapshot keys + canBeRemoteKey := role == data.CanonicalTimestampRole || role == data.CanonicalSnapshotRole + // And locally managing root, targets, and snapshot keys + canBeLocalKey := role == data.CanonicalSnapshotRole || role == data.CanonicalTargetsRole || + role == data.CanonicalRootRole + + switch { + case !data.ValidRole(role) || data.IsDelegation(role): + return fmt.Errorf("notary does not currently permit rotating the %s key", role) + case serverManaged && !canBeRemoteKey: + return ErrInvalidRemoteRole{Role: role} + case !serverManaged && !canBeLocalKey: + return ErrInvalidLocalRole{Role: role} } - return r.publish(cl) + return nil } -func (r *NotaryRepository) rootFileKeyChange(cl changelist.Changelist, role, action string, key data.PublicKey) error { - kl := make(data.KeyList, 0, 1) - kl = append(kl, key) +func (r *repository) rootFileKeyChange(cl changelist.Changelist, role data.RoleName, action string, keyList []data.PublicKey) error { meta := changelist.TUFRootData{ RoleName: role, - Keys: kl, + Keys: keyList, } metaJSON, err := json.Marshal(meta) if err != nil { @@ -986,8 +1253,8 @@ func (r *NotaryRepository) rootFileKeyChange(cl changelist.Changelist, role, act c := changelist.NewTUFChange( action, changelist.ScopeRoot, - changelist.TypeRootRole, - role, + changelist.TypeBaseRole, + role.String(), metaJSON, ) return cl.Add(c) @@ -995,15 +1262,17 @@ func (r *NotaryRepository) rootFileKeyChange(cl changelist.Changelist, role, act // DeleteTrustData removes the trust data stored for this repo in the TUF cache on the client side // Note that we will not delete any private key material from local storage -func (r *NotaryRepository) DeleteTrustData(deleteRemote bool) error { +func DeleteTrustData(baseDir string, gun data.GUN, URL string, rt http.RoundTripper, deleteRemote bool) error { + localRepo := filepath.Join(baseDir, tufDir, filepath.FromSlash(gun.String())) // Remove the tufRepoPath directory, which includes local TUF metadata files and changelist information - if err := os.RemoveAll(r.tufRepoPath); err != nil { + if err := os.RemoveAll(localRepo); err != nil { return fmt.Errorf("error clearing TUF repo data: %v", err) } - // Note that this will require admin permission in this NotaryRepository's roundtripper + // Note that this will require admin permission for the gun in the roundtripper if deleteRemote { - remote, err := getRemoteStore(r.baseURL, r.gun, r.roundTrip) + remote, err := getRemoteStore(URL, gun, rt) if err != nil { + logrus.Error("unable to instantiate a remote store: %v", err) return err } if err := remote.RemoveAll(); err != nil { @@ -1012,3 +1281,9 @@ func (r *NotaryRepository) DeleteTrustData(deleteRemote bool) error { } return nil } + +// SetLegacyVersions allows the number of legacy versions of the root +// to be inspected for old signing keys to be configured. +func (r *repository) SetLegacyVersions(n int) { + r.LegacyVersions = n +} diff --git a/vendor/github.com/docker/notary/client/delegations.go b/vendor/github.com/docker/notary/client/delegations.go index aa83d8d32827..99a764680fc4 100644 --- a/vendor/github.com/docker/notary/client/delegations.go +++ b/vendor/github.com/docker/notary/client/delegations.go @@ -3,19 +3,18 @@ package client import ( "encoding/json" "fmt" - "path/filepath" - "github.com/sirupsen/logrus" "github.com/docker/notary" "github.com/docker/notary/client/changelist" store "github.com/docker/notary/storage" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/utils" + "github.com/sirupsen/logrus" ) // AddDelegation creates changelist entries to add provided delegation public keys and paths. // This method composes AddDelegationRoleAndKeys and AddDelegationPaths (each creates one changelist if called). -func (r *NotaryRepository) AddDelegation(name string, delegationKeys []data.PublicKey, paths []string) error { +func (r *repository) AddDelegation(name data.RoleName, delegationKeys []data.PublicKey, paths []string) error { if len(delegationKeys) > 0 { err := r.AddDelegationRoleAndKeys(name, delegationKeys) if err != nil { @@ -34,18 +33,12 @@ func (r *NotaryRepository) AddDelegation(name string, delegationKeys []data.Publ // AddDelegationRoleAndKeys creates a changelist entry to add provided delegation public keys. // This method is the simplest way to create a new delegation, because the delegation must have at least // one key upon creation to be valid since we will reject the changelist while validating the threshold. -func (r *NotaryRepository) AddDelegationRoleAndKeys(name string, delegationKeys []data.PublicKey) error { +func (r *repository) AddDelegationRoleAndKeys(name data.RoleName, delegationKeys []data.PublicKey) error { if !data.IsDelegation(name) { return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"} } - cl, err := changelist.NewFileChangelist(filepath.Join(r.tufRepoPath, "changelist")) - if err != nil { - return err - } - defer cl.Close() - logrus.Debugf(`Adding delegation "%s" with threshold %d, and %d keys\n`, name, notary.MinThreshold, len(delegationKeys)) @@ -59,23 +52,17 @@ func (r *NotaryRepository) AddDelegationRoleAndKeys(name string, delegationKeys } template := newCreateDelegationChange(name, tdJSON) - return addChange(cl, template, name) + return addChange(r.changelist, template, name) } // AddDelegationPaths creates a changelist entry to add provided paths to an existing delegation. // This method cannot create a new delegation itself because the role must meet the key threshold upon creation. -func (r *NotaryRepository) AddDelegationPaths(name string, paths []string) error { +func (r *repository) AddDelegationPaths(name data.RoleName, paths []string) error { if !data.IsDelegation(name) { return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"} } - cl, err := changelist.NewFileChangelist(filepath.Join(r.tufRepoPath, "changelist")) - if err != nil { - return err - } - defer cl.Close() - logrus.Debugf(`Adding %s paths to delegation %s\n`, paths, name) tdJSON, err := json.Marshal(&changelist.TUFDelegation{ @@ -86,12 +73,12 @@ func (r *NotaryRepository) AddDelegationPaths(name string, paths []string) error } template := newCreateDelegationChange(name, tdJSON) - return addChange(cl, template, name) + return addChange(r.changelist, template, name) } // RemoveDelegationKeysAndPaths creates changelist entries to remove provided delegation key IDs and paths. // This method composes RemoveDelegationPaths and RemoveDelegationKeys (each creates one changelist if called). -func (r *NotaryRepository) RemoveDelegationKeysAndPaths(name string, keyIDs, paths []string) error { +func (r *repository) RemoveDelegationKeysAndPaths(name data.RoleName, keyIDs, paths []string) error { if len(paths) > 0 { err := r.RemoveDelegationPaths(name, paths) if err != nil { @@ -108,37 +95,25 @@ func (r *NotaryRepository) RemoveDelegationKeysAndPaths(name string, keyIDs, pat } // RemoveDelegationRole creates a changelist to remove all paths and keys from a role, and delete the role in its entirety. -func (r *NotaryRepository) RemoveDelegationRole(name string) error { +func (r *repository) RemoveDelegationRole(name data.RoleName) error { if !data.IsDelegation(name) { return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"} } - cl, err := changelist.NewFileChangelist(filepath.Join(r.tufRepoPath, "changelist")) - if err != nil { - return err - } - defer cl.Close() - logrus.Debugf(`Removing delegation "%s"\n`, name) template := newDeleteDelegationChange(name, nil) - return addChange(cl, template, name) + return addChange(r.changelist, template, name) } // RemoveDelegationPaths creates a changelist entry to remove provided paths from an existing delegation. -func (r *NotaryRepository) RemoveDelegationPaths(name string, paths []string) error { +func (r *repository) RemoveDelegationPaths(name data.RoleName, paths []string) error { if !data.IsDelegation(name) { return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"} } - cl, err := changelist.NewFileChangelist(filepath.Join(r.tufRepoPath, "changelist")) - if err != nil { - return err - } - defer cl.Close() - logrus.Debugf(`Removing %s paths from delegation "%s"\n`, paths, name) tdJSON, err := json.Marshal(&changelist.TUFDelegation{ @@ -149,7 +124,7 @@ func (r *NotaryRepository) RemoveDelegationPaths(name string, paths []string) er } template := newUpdateDelegationChange(name, tdJSON) - return addChange(cl, template, name) + return addChange(r.changelist, template, name) } // RemoveDelegationKeys creates a changelist entry to remove provided keys from an existing delegation. @@ -157,18 +132,12 @@ func (r *NotaryRepository) RemoveDelegationPaths(name string, paths []string) er // the role itself will be deleted in its entirety. // It can also delete a key from all delegations under a parent using a name // with a wildcard at the end. -func (r *NotaryRepository) RemoveDelegationKeys(name string, keyIDs []string) error { +func (r *repository) RemoveDelegationKeys(name data.RoleName, keyIDs []string) error { if !data.IsDelegation(name) && !data.IsWildDelegation(name) { return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"} } - cl, err := changelist.NewFileChangelist(filepath.Join(r.tufRepoPath, "changelist")) - if err != nil { - return err - } - defer cl.Close() - logrus.Debugf(`Removing %s keys from delegation "%s"\n`, keyIDs, name) tdJSON, err := json.Marshal(&changelist.TUFDelegation{ @@ -179,22 +148,16 @@ func (r *NotaryRepository) RemoveDelegationKeys(name string, keyIDs []string) er } template := newUpdateDelegationChange(name, tdJSON) - return addChange(cl, template, name) + return addChange(r.changelist, template, name) } // ClearDelegationPaths creates a changelist entry to remove all paths from an existing delegation. -func (r *NotaryRepository) ClearDelegationPaths(name string) error { +func (r *repository) ClearDelegationPaths(name data.RoleName) error { if !data.IsDelegation(name) { return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"} } - cl, err := changelist.NewFileChangelist(filepath.Join(r.tufRepoPath, "changelist")) - if err != nil { - return err - } - defer cl.Close() - logrus.Debugf(`Removing all paths from delegation "%s"\n`, name) tdJSON, err := json.Marshal(&changelist.TUFDelegation{ @@ -205,10 +168,10 @@ func (r *NotaryRepository) ClearDelegationPaths(name string) error { } template := newUpdateDelegationChange(name, tdJSON) - return addChange(cl, template, name) + return addChange(r.changelist, template, name) } -func newUpdateDelegationChange(name string, content []byte) *changelist.TUFChange { +func newUpdateDelegationChange(name data.RoleName, content []byte) *changelist.TUFChange { return changelist.NewTUFChange( changelist.ActionUpdate, name, @@ -218,7 +181,7 @@ func newUpdateDelegationChange(name string, content []byte) *changelist.TUFChang ) } -func newCreateDelegationChange(name string, content []byte) *changelist.TUFChange { +func newCreateDelegationChange(name data.RoleName, content []byte) *changelist.TUFChange { return changelist.NewTUFChange( changelist.ActionCreate, name, @@ -228,7 +191,7 @@ func newCreateDelegationChange(name string, content []byte) *changelist.TUFChang ) } -func newDeleteDelegationChange(name string, content []byte) *changelist.TUFChange { +func newDeleteDelegationChange(name data.RoleName, content []byte) *changelist.TUFChange { return changelist.NewTUFChange( changelist.ActionDelete, name, @@ -240,7 +203,7 @@ func newDeleteDelegationChange(name string, content []byte) *changelist.TUFChang // GetDelegationRoles returns the keys and roles of the repository's delegations // Also converts key IDs to canonical key IDs to keep consistent with signing prompts -func (r *NotaryRepository) GetDelegationRoles() ([]data.Role, error) { +func (r *repository) GetDelegationRoles() ([]data.Role, error) { // Update state of the repo to latest if err := r.Update(false); err != nil { return nil, err @@ -249,7 +212,7 @@ func (r *NotaryRepository) GetDelegationRoles() ([]data.Role, error) { // All top level delegations (ex: targets/level1) are stored exclusively in targets.json _, ok := r.tufRepo.Targets[data.CanonicalTargetsRole] if !ok { - return nil, store.ErrMetaNotFound{Resource: data.CanonicalTargetsRole} + return nil, store.ErrMetaNotFound{Resource: data.CanonicalTargetsRole.String()} } // make a copy for traversing nested delegations diff --git a/vendor/github.com/docker/notary/client/errors.go b/vendor/github.com/docker/notary/client/errors.go new file mode 100644 index 000000000000..ba7759c4f790 --- /dev/null +++ b/vendor/github.com/docker/notary/client/errors.go @@ -0,0 +1,48 @@ +package client + +import ( + "fmt" + + "github.com/docker/notary/tuf/data" +) + +// ErrRepoNotInitialized is returned when trying to publish an uninitialized +// notary repository +type ErrRepoNotInitialized struct{} + +func (err ErrRepoNotInitialized) Error() string { + return "repository has not been initialized" +} + +// ErrInvalidRemoteRole is returned when the server is requested to manage +// a key type that is not permitted +type ErrInvalidRemoteRole struct { + Role data.RoleName +} + +func (err ErrInvalidRemoteRole) Error() string { + return fmt.Sprintf( + "notary does not permit the server managing the %s key", err.Role.String()) +} + +// ErrInvalidLocalRole is returned when the client wants to manage +// a key type that is not permitted +type ErrInvalidLocalRole struct { + Role data.RoleName +} + +func (err ErrInvalidLocalRole) Error() string { + return fmt.Sprintf( + "notary does not permit the client managing the %s key", err.Role) +} + +// ErrRepositoryNotExist is returned when an action is taken on a remote +// repository that doesn't exist +type ErrRepositoryNotExist struct { + remote string + gun data.GUN +} + +func (err ErrRepositoryNotExist) Error() string { + return fmt.Sprintf("%s does not have trust data for %s", err.remote, err.gun.String()) +} diff --git a/vendor/github.com/docker/notary/client/helpers.go b/vendor/github.com/docker/notary/client/helpers.go index 31b98dd76995..186547eec8ee 100644 --- a/vendor/github.com/docker/notary/client/helpers.go +++ b/vendor/github.com/docker/notary/client/helpers.go @@ -10,14 +10,15 @@ import ( store "github.com/docker/notary/storage" "github.com/docker/notary/tuf" "github.com/docker/notary/tuf/data" + "github.com/docker/notary/tuf/signed" "github.com/docker/notary/tuf/utils" "github.com/sirupsen/logrus" ) // Use this to initialize remote HTTPStores from the config settings -func getRemoteStore(baseURL, gun string, rt http.RoundTripper) (store.RemoteStore, error) { +func getRemoteStore(baseURL string, gun data.GUN, rt http.RoundTripper) (store.RemoteStore, error) { s, err := store.NewHTTPStore( - baseURL+"/v2/"+gun+"/_trust/tuf/", + baseURL+"/v2/"+gun.String()+"/_trust/tuf/", "", "json", "key", @@ -26,7 +27,7 @@ func getRemoteStore(baseURL, gun string, rt http.RoundTripper) (store.RemoteStor if err != nil { return store.OfflineStore{}, err } - return s, err + return s, nil } func applyChangelist(repo *tuf.Repo, invalid *tuf.Repo, cl changelist.Changelist) error { @@ -47,7 +48,7 @@ func applyChangelist(repo *tuf.Repo, invalid *tuf.Repo, cl changelist.Changelist case c.Scope() == changelist.ScopeRoot: err = applyRootChange(repo, c) default: - return fmt.Errorf("scope not supported: %s", c.Scope()) + return fmt.Errorf("scope not supported: %s", c.Scope().String()) } if err != nil { logrus.Debugf("error attempting to apply change #%d: %s, on scope: %s path: %s type: %s", index, c.Action(), c.Scope(), c.Path(), c.Type()) @@ -165,7 +166,7 @@ func changeTargetMeta(repo *tuf.Repo, c changelist.Change) error { func applyRootChange(repo *tuf.Repo, c changelist.Change) error { var err error switch c.Type() { - case changelist.TypeRootRole: + case changelist.TypeBaseRole: err = applyRootRoleChange(repo, c) default: err = fmt.Errorf("type of root change not yet supported: %s", c.Type()) @@ -218,11 +219,7 @@ func warnRolesNearExpiry(r *tuf.Repo) { } // Fetches a public key from a remote store, given a gun and role -func getRemoteKey(url, gun, role string, rt http.RoundTripper) (data.PublicKey, error) { - remote, err := getRemoteStore(url, gun, rt) - if err != nil { - return nil, err - } +func getRemoteKey(role data.RoleName, remote store.RemoteStore) (data.PublicKey, error) { rawPubKey, err := remote.GetKey(role) if err != nil { return nil, err @@ -237,11 +234,7 @@ func getRemoteKey(url, gun, role string, rt http.RoundTripper) (data.PublicKey, } // Rotates a private key in a remote store and returns the public key component -func rotateRemoteKey(url, gun, role string, rt http.RoundTripper) (data.PublicKey, error) { - remote, err := getRemoteStore(url, gun, rt) - if err != nil { - return nil, err - } +func rotateRemoteKey(role data.RoleName, remote store.RemoteStore) (data.PublicKey, error) { rawPubKey, err := remote.RotateKey(role) if err != nil { return nil, err @@ -256,11 +249,11 @@ func rotateRemoteKey(url, gun, role string, rt http.RoundTripper) (data.PublicKe } // signs and serializes the metadata for a canonical role in a TUF repo to JSON -func serializeCanonicalRole(tufRepo *tuf.Repo, role string) (out []byte, err error) { +func serializeCanonicalRole(tufRepo *tuf.Repo, role data.RoleName, extraSigningKeys data.KeyList) (out []byte, err error) { var s *data.Signed switch { case role == data.CanonicalRootRole: - s, err = tufRepo.SignRoot(data.DefaultExpires(role)) + s, err = tufRepo.SignRoot(data.DefaultExpires(role), extraSigningKeys) case role == data.CanonicalSnapshotRole: s, err = tufRepo.SignSnapshot(data.DefaultExpires(role)) case tufRepo.Targets[role] != nil: @@ -276,3 +269,38 @@ func serializeCanonicalRole(tufRepo *tuf.Repo, role string) (out []byte, err err return json.Marshal(s) } + +func getAllPrivKeys(rootKeyIDs []string, cryptoService signed.CryptoService) ([]data.PrivateKey, error) { + if cryptoService == nil { + return nil, fmt.Errorf("no crypto service available to get private keys from") + } + + privKeys := make([]data.PrivateKey, 0, len(rootKeyIDs)) + for _, keyID := range rootKeyIDs { + privKey, _, err := cryptoService.GetPrivateKey(keyID) + if err != nil { + return nil, err + } + privKeys = append(privKeys, privKey) + } + if len(privKeys) == 0 { + var rootKeyID string + rootKeyList := cryptoService.ListKeys(data.CanonicalRootRole) + if len(rootKeyList) == 0 { + rootPublicKey, err := cryptoService.Create(data.CanonicalRootRole, "", data.ECDSAKey) + if err != nil { + return nil, err + } + rootKeyID = rootPublicKey.ID() + } else { + rootKeyID = rootKeyList[0] + } + privKey, _, err := cryptoService.GetPrivateKey(rootKeyID) + if err != nil { + return nil, err + } + privKeys = append(privKeys, privKey) + } + + return privKeys, nil +} diff --git a/vendor/github.com/docker/notary/client/interface.go b/vendor/github.com/docker/notary/client/interface.go new file mode 100644 index 000000000000..ca09fb4ebc63 --- /dev/null +++ b/vendor/github.com/docker/notary/client/interface.go @@ -0,0 +1,47 @@ +package client + +import ( + "github.com/docker/notary/client/changelist" + "github.com/docker/notary/tuf/data" + "github.com/docker/notary/tuf/signed" +) + +// Repository represents the set of options that must be supported over a TUF repo. +type Repository interface { + // General management operations + Initialize(rootKeyIDs []string, serverManagedRoles ...data.RoleName) error + InitializeWithCertificate(rootKeyIDs []string, rootCerts []data.PublicKey, serverManagedRoles ...data.RoleName) error + Publish() error + + // Target Operations + AddTarget(target *Target, roles ...data.RoleName) error + RemoveTarget(targetName string, roles ...data.RoleName) error + ListTargets(roles ...data.RoleName) ([]*TargetWithRole, error) + GetTargetByName(name string, roles ...data.RoleName) (*TargetWithRole, error) + GetAllTargetMetadataByName(name string) ([]TargetSignedStruct, error) + + // Changelist operations + GetChangelist() (changelist.Changelist, error) + + // Role operations + ListRoles() ([]RoleWithSignatures, error) + GetDelegationRoles() ([]data.Role, error) + AddDelegation(name data.RoleName, delegationKeys []data.PublicKey, paths []string) error + AddDelegationRoleAndKeys(name data.RoleName, delegationKeys []data.PublicKey) error + AddDelegationPaths(name data.RoleName, paths []string) error + RemoveDelegationKeysAndPaths(name data.RoleName, keyIDs, paths []string) error + RemoveDelegationRole(name data.RoleName) error + RemoveDelegationPaths(name data.RoleName, paths []string) error + RemoveDelegationKeys(name data.RoleName, keyIDs []string) error + ClearDelegationPaths(name data.RoleName) error + + // Witness and other re-signing operations + Witness(roles ...data.RoleName) ([]data.RoleName, error) + + // Key Operations + RotateKey(role data.RoleName, serverManagesKey bool, keyList []string) error + + GetCryptoService() signed.CryptoService + SetLegacyVersions(int) + GetGUN() data.GUN +} diff --git a/vendor/github.com/docker/notary/client/repo.go b/vendor/github.com/docker/notary/client/repo.go index 27f857616159..953fda10c337 100644 --- a/vendor/github.com/docker/notary/client/repo.go +++ b/vendor/github.com/docker/notary/client/repo.go @@ -4,26 +4,15 @@ package client import ( "fmt" - "net/http" "github.com/docker/notary" "github.com/docker/notary/trustmanager" - "github.com/docker/notary/trustpinning" ) -// NewNotaryRepository is a helper method that returns a new notary repository. -// It takes the base directory under where all the trust files will be stored -// (This is normally defaults to "~/.notary" or "~/.docker/trust" when enabling -// docker content trust). -func NewNotaryRepository(baseDir, gun, baseURL string, rt http.RoundTripper, - retriever notary.PassRetriever, trustPinning trustpinning.TrustPinConfig) ( - *NotaryRepository, error) { - +func getKeyStores(baseDir string, retriever notary.PassRetriever) ([]trustmanager.KeyStore, error) { fileKeyStore, err := trustmanager.NewKeyFileStore(baseDir, retriever) if err != nil { return nil, fmt.Errorf("failed to create private key store in directory: %s", baseDir) } - - return repositoryFromKeystores(baseDir, gun, baseURL, rt, - []trustmanager.KeyStore{fileKeyStore}, trustPinning) + return []trustmanager.KeyStore{fileKeyStore}, nil } diff --git a/vendor/github.com/docker/notary/client/repo_pkcs11.go b/vendor/github.com/docker/notary/client/repo_pkcs11.go index bbbe4762ff91..3eccc2f7f2e6 100644 --- a/vendor/github.com/docker/notary/client/repo_pkcs11.go +++ b/vendor/github.com/docker/notary/client/repo_pkcs11.go @@ -4,21 +4,13 @@ package client import ( "fmt" - "net/http" "github.com/docker/notary" "github.com/docker/notary/trustmanager" "github.com/docker/notary/trustmanager/yubikey" - "github.com/docker/notary/trustpinning" ) -// NewNotaryRepository is a helper method that returns a new notary repository. -// It takes the base directory under where all the trust files will be stored -// (usually ~/.docker/trust/). -func NewNotaryRepository(baseDir, gun, baseURL string, rt http.RoundTripper, - retriever notary.PassRetriever, trustPinning trustpinning.TrustPinConfig) ( - *NotaryRepository, error) { - +func getKeyStores(baseDir string, retriever notary.PassRetriever) ([]trustmanager.KeyStore, error) { fileKeyStore, err := trustmanager.NewKeyFileStore(baseDir, retriever) if err != nil { return nil, fmt.Errorf("failed to create private key store in directory: %s", baseDir) @@ -29,6 +21,5 @@ func NewNotaryRepository(baseDir, gun, baseURL string, rt http.RoundTripper, if yubiKeyStore != nil { keyStores = []trustmanager.KeyStore{yubiKeyStore, fileKeyStore} } - - return repositoryFromKeystores(baseDir, gun, baseURL, rt, keyStores, trustPinning) + return keyStores, nil } diff --git a/vendor/github.com/docker/notary/client/tufclient.go b/vendor/github.com/docker/notary/client/tufclient.go index 3a1ef0eb89f0..2cf91e2723dd 100644 --- a/vendor/github.com/docker/notary/client/tufclient.go +++ b/vendor/github.com/docker/notary/client/tufclient.go @@ -2,26 +2,28 @@ package client import ( "encoding/json" + "fmt" - "github.com/sirupsen/logrus" "github.com/docker/notary" store "github.com/docker/notary/storage" + "github.com/docker/notary/trustpinning" "github.com/docker/notary/tuf" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/signed" + "github.com/sirupsen/logrus" ) -// TUFClient is a usability wrapper around a raw TUF repo -type TUFClient struct { +// tufClient is a usability wrapper around a raw TUF repo +type tufClient struct { remote store.RemoteStore cache store.MetadataStore oldBuilder tuf.RepoBuilder newBuilder tuf.RepoBuilder } -// NewTUFClient initialized a TUFClient with the given repo, remote source of content, and cache -func NewTUFClient(oldBuilder, newBuilder tuf.RepoBuilder, remote store.RemoteStore, cache store.MetadataStore) *TUFClient { - return &TUFClient{ +// newTufClient initialized a tufClient with the given repo, remote source of content, and cache +func newTufClient(oldBuilder, newBuilder tuf.RepoBuilder, remote store.RemoteStore, cache store.MetadataStore) *tufClient { + return &tufClient{ oldBuilder: oldBuilder, newBuilder: newBuilder, remote: remote, @@ -30,7 +32,7 @@ func NewTUFClient(oldBuilder, newBuilder tuf.RepoBuilder, remote store.RemoteSto } // Update performs an update to the TUF repo as defined by the TUF spec -func (c *TUFClient) Update() (*tuf.Repo, *tuf.Repo, error) { +func (c *tufClient) Update() (*tuf.Repo, *tuf.Repo, error) { // 1. Get timestamp // a. If timestamp error (verification, expired, etc...) download new root and return to 1. // 2. Check if local snapshot is up to date @@ -47,8 +49,8 @@ func (c *TUFClient) Update() (*tuf.Repo, *tuf.Repo, error) { c.newBuilder = c.newBuilder.BootstrapNewBuilder() - if err := c.downloadRoot(); err != nil { - logrus.Debug("Client Update (Root):", err) + if err := c.updateRoot(); err != nil { + logrus.Debug("Client Update (Root): ", err) return nil, nil, err } // If we error again, we now have the latest root and just want to fail @@ -61,7 +63,7 @@ func (c *TUFClient) Update() (*tuf.Repo, *tuf.Repo, error) { return c.newBuilder.Finish() } -func (c *TUFClient) update() error { +func (c *tufClient) update() error { if err := c.downloadTimestamp(); err != nil { logrus.Debugf("Client Update (Timestamp): %s", err.Error()) return err @@ -78,37 +80,103 @@ func (c *TUFClient) update() error { return nil } -// downloadRoot is responsible for downloading the root.json -func (c *TUFClient) downloadRoot() error { - role := data.CanonicalRootRole - consistentInfo := c.newBuilder.GetConsistentInfo(role) +// updateRoot checks if there is a newer version of the root available, and if so +// downloads all intermediate root files to allow proper key rotation. +func (c *tufClient) updateRoot() error { + // Get current root version + currentRootConsistentInfo := c.oldBuilder.GetConsistentInfo(data.CanonicalRootRole) + currentVersion := c.oldBuilder.GetLoadedVersion(currentRootConsistentInfo.RoleName) - // We can't read an exact size for the root metadata without risking getting stuck in the TUF update cycle - // since it's possible that downloading timestamp/snapshot metadata may fail due to a signature mismatch - if !consistentInfo.ChecksumKnown() { - logrus.Debugf("Loading root with no expected checksum") + // Get new root version + raw, err := c.downloadRoot() - // get the cached root, if it exists, just for version checking - cachedRoot, _ := c.cache.GetSized(role, -1) - // prefer to download a new root - _, remoteErr := c.tryLoadRemote(consistentInfo, cachedRoot) - return remoteErr + switch err.(type) { + case *trustpinning.ErrRootRotationFail: + // Rotation errors are okay since we haven't yet downloaded + // all intermediate root files + break + case nil: + // No error updating root - we were at most 1 version behind + return nil + default: + // Return any non-rotation error. + return err } - _, err := c.tryLoadCacheThenRemote(consistentInfo) - return err + // Load current version into newBuilder + currentRaw, err := c.cache.GetSized(data.CanonicalRootRole.String(), -1) + if err != nil { + logrus.Debugf("error loading %d.%s: %s", currentVersion, data.CanonicalRootRole, err) + return err + } + if err := c.newBuilder.LoadRootForUpdate(currentRaw, currentVersion, false); err != nil { + logrus.Debugf("%d.%s is invalid: %s", currentVersion, data.CanonicalRootRole, err) + return err + } + + // Extract newest version number + signedRoot := &data.Signed{} + if err := json.Unmarshal(raw, signedRoot); err != nil { + return err + } + newestRoot, err := data.RootFromSigned(signedRoot) + if err != nil { + return err + } + newestVersion := newestRoot.Signed.SignedCommon.Version + + // Update from current + 1 (current already loaded) to newest - 1 (newest loaded below) + if err := c.updateRootVersions(currentVersion+1, newestVersion-1); err != nil { + return err + } + + // Already downloaded newest, verify it against newest - 1 + if err := c.newBuilder.LoadRootForUpdate(raw, newestVersion, true); err != nil { + logrus.Debugf("downloaded %d.%s is invalid: %s", newestVersion, data.CanonicalRootRole, err) + return err + } + logrus.Debugf("successfully verified downloaded %d.%s", newestVersion, data.CanonicalRootRole) + + // Write newest to cache + if err := c.cache.Set(data.CanonicalRootRole.String(), raw); err != nil { + logrus.Debugf("unable to write %s to cache: %d.%s", newestVersion, data.CanonicalRootRole, err) + } + logrus.Debugf("finished updating root files") + return nil +} + +// updateRootVersions updates the root from it's current version to a target, rotating keys +// as they are found +func (c *tufClient) updateRootVersions(fromVersion, toVersion int) error { + for v := fromVersion; v <= toVersion; v++ { + logrus.Debugf("updating root from version %d to version %d, currently fetching %d", fromVersion, toVersion, v) + + versionedRole := fmt.Sprintf("%d.%s", v, data.CanonicalRootRole) + + raw, err := c.remote.GetSized(versionedRole, -1) + if err != nil { + logrus.Debugf("error downloading %s: %s", versionedRole, err) + return err + } + if err := c.newBuilder.LoadRootForUpdate(raw, v, false); err != nil { + logrus.Debugf("downloaded %s is invalid: %s", versionedRole, err) + return err + } + logrus.Debugf("successfully verified downloaded %s", versionedRole) + } + return nil } // downloadTimestamp is responsible for downloading the timestamp.json // Timestamps are special in that we ALWAYS attempt to download and only // use cache if the download fails (and the cache is still valid). -func (c *TUFClient) downloadTimestamp() error { +func (c *tufClient) downloadTimestamp() error { logrus.Debug("Loading timestamp...") role := data.CanonicalTimestampRole consistentInfo := c.newBuilder.GetConsistentInfo(role) // always get the remote timestamp, since it supersedes the local one - cachedTS, cachedErr := c.cache.GetSized(role, notary.MaxTimestampSize) + cachedTS, cachedErr := c.cache.GetSized(role.String(), notary.MaxTimestampSize) _, remoteErr := c.tryLoadRemote(consistentInfo, cachedTS) // check that there was no remote error, or if there was a network problem @@ -138,7 +206,7 @@ func (c *TUFClient) downloadTimestamp() error { } // downloadSnapshot is responsible for downloading the snapshot.json -func (c *TUFClient) downloadSnapshot() error { +func (c *tufClient) downloadSnapshot() error { logrus.Debug("Loading snapshot...") role := data.CanonicalSnapshotRole consistentInfo := c.newBuilder.GetConsistentInfo(role) @@ -150,7 +218,7 @@ func (c *TUFClient) downloadSnapshot() error { // downloadTargets downloads all targets and delegated targets for the repository. // It uses a pre-order tree traversal as it's necessary to download parents first // to obtain the keys to validate children. -func (c *TUFClient) downloadTargets() error { +func (c *tufClient) downloadTargets() error { toDownload := []data.DelegationRole{{ BaseRole: data.BaseRole{Name: data.CanonicalTargetsRole}, Paths: []string{""}, @@ -183,7 +251,7 @@ func (c *TUFClient) downloadTargets() error { return nil } -func (c TUFClient) getTargetsFile(role data.DelegationRole, ci tuf.ConsistentInfo) ([]data.DelegationRole, error) { +func (c tufClient) getTargetsFile(role data.DelegationRole, ci tuf.ConsistentInfo) ([]data.DelegationRole, error) { logrus.Debugf("Loading %s...", role.Name) tgs := &data.SignedTargets{} @@ -198,8 +266,26 @@ func (c TUFClient) getTargetsFile(role data.DelegationRole, ci tuf.ConsistentInf return tgs.GetValidDelegations(role), nil } -func (c *TUFClient) tryLoadCacheThenRemote(consistentInfo tuf.ConsistentInfo) ([]byte, error) { - cachedTS, err := c.cache.GetSized(consistentInfo.RoleName, consistentInfo.Length()) +// downloadRoot is responsible for downloading the root.json +func (c *tufClient) downloadRoot() ([]byte, error) { + role := data.CanonicalRootRole + consistentInfo := c.newBuilder.GetConsistentInfo(role) + + // We can't read an exact size for the root metadata without risking getting stuck in the TUF update cycle + // since it's possible that downloading timestamp/snapshot metadata may fail due to a signature mismatch + if !consistentInfo.ChecksumKnown() { + logrus.Debugf("Loading root with no expected checksum") + + // get the cached root, if it exists, just for version checking + cachedRoot, _ := c.cache.GetSized(role.String(), -1) + // prefer to download a new root + return c.tryLoadRemote(consistentInfo, cachedRoot) + } + return c.tryLoadCacheThenRemote(consistentInfo) +} + +func (c *tufClient) tryLoadCacheThenRemote(consistentInfo tuf.ConsistentInfo) ([]byte, error) { + cachedTS, err := c.cache.GetSized(consistentInfo.RoleName.String(), consistentInfo.Length()) if err != nil { logrus.Debugf("no %s in cache, must download", consistentInfo.RoleName) return c.tryLoadRemote(consistentInfo, nil) @@ -214,7 +300,7 @@ func (c *TUFClient) tryLoadCacheThenRemote(consistentInfo tuf.ConsistentInfo) ([ return c.tryLoadRemote(consistentInfo, cachedTS) } -func (c *TUFClient) tryLoadRemote(consistentInfo tuf.ConsistentInfo, old []byte) ([]byte, error) { +func (c *tufClient) tryLoadRemote(consistentInfo tuf.ConsistentInfo, old []byte) ([]byte, error) { consistentName := consistentInfo.ConsistentName() raw, err := c.remote.GetSized(consistentName, consistentInfo.Length()) if err != nil { @@ -232,7 +318,7 @@ func (c *TUFClient) tryLoadRemote(consistentInfo tuf.ConsistentInfo, old []byte) return raw, err } logrus.Debugf("successfully verified downloaded %s", consistentName) - if err := c.cache.Set(consistentInfo.RoleName, raw); err != nil { + if err := c.cache.Set(consistentInfo.RoleName.String(), raw); err != nil { logrus.Debugf("Unable to write %s to cache: %s", consistentInfo.RoleName, err) } return raw, nil diff --git a/vendor/github.com/docker/notary/client/witness.go b/vendor/github.com/docker/notary/client/witness.go index 21a42aac467a..b52239baae62 100644 --- a/vendor/github.com/docker/notary/client/witness.go +++ b/vendor/github.com/docker/notary/client/witness.go @@ -1,8 +1,6 @@ package client import ( - "path/filepath" - "github.com/docker/notary/client/changelist" "github.com/docker/notary/tuf" "github.com/docker/notary/tuf/data" @@ -10,14 +8,9 @@ import ( // Witness creates change objects to witness (i.e. re-sign) the given // roles on the next publish. One change is created per role -func (r *NotaryRepository) Witness(roles ...string) ([]string, error) { - cl, err := changelist.NewFileChangelist(filepath.Join(r.tufRepoPath, "changelist")) - if err != nil { - return nil, err - } - defer cl.Close() - - successful := make([]string, 0, len(roles)) +func (r *repository) Witness(roles ...data.RoleName) ([]data.RoleName, error) { + var err error + successful := make([]data.RoleName, 0, len(roles)) for _, role := range roles { // scope is role c := changelist.NewTUFChange( @@ -27,7 +20,7 @@ func (r *NotaryRepository) Witness(roles ...string) ([]string, error) { "", nil, ) - err = cl.Add(c) + err = r.changelist.Add(c) if err != nil { break } @@ -36,7 +29,7 @@ func (r *NotaryRepository) Witness(roles ...string) ([]string, error) { return successful, err } -func witnessTargets(repo *tuf.Repo, invalid *tuf.Repo, role string) error { +func witnessTargets(repo *tuf.Repo, invalid *tuf.Repo, role data.RoleName) error { if r, ok := repo.Targets[role]; ok { // role is already valid, mark for re-signing/updating r.Dirty = true diff --git a/vendor/github.com/docker/notary/const.go b/vendor/github.com/docker/notary/const.go index 0c4d4037b4ed..a1bf3588cdb5 100644 --- a/vendor/github.com/docker/notary/const.go +++ b/vendor/github.com/docker/notary/const.go @@ -1,6 +1,8 @@ package notary -import "time" +import ( + "time" +) // application wide constants const ( @@ -12,14 +14,10 @@ const ( MinRSABitSize = 2048 // MinThreshold requires a minimum of one threshold for roles; currently we do not support a higher threshold MinThreshold = 1 - // PrivKeyPerms are the file permissions to use when writing private keys to disk - PrivKeyPerms = 0700 - // PubCertPerms are the file permissions to use when writing public certificates to disk - PubCertPerms = 0755 - // Sha256HexSize is how big a Sha256 hex is in number of characters - Sha256HexSize = 64 - // Sha512HexSize is how big a Sha512 hex is in number of characters - Sha512HexSize = 128 + // SHA256HexSize is how big a SHA256 hex is in number of characters + SHA256HexSize = 64 + // SHA512HexSize is how big a SHA512 hex is in number of characters + SHA512HexSize = 128 // SHA256 is the name of SHA256 hash algorithm SHA256 = "sha256" // SHA512 is the name of SHA512 hash algorithm @@ -29,8 +27,10 @@ const ( // PrivDir is the directory, under the notary repo base directory, where private keys are stored PrivDir = "private" // RootKeysSubdir is the subdirectory under PrivDir where root private keys are stored + // DEPRECATED: The only reason we need this constant is compatibility with older versions RootKeysSubdir = "root_keys" // NonRootKeysSubdir is the subdirectory under PrivDir where non-root private keys are stored + // DEPRECATED: The only reason we need this constant is compatibility with older versions NonRootKeysSubdir = "tuf_keys" // KeyExtension is the file extension to use for private key files KeyExtension = "key" @@ -54,17 +54,42 @@ const ( MySQLBackend = "mysql" MemoryBackend = "memory" + PostgresBackend = "postgres" SQLiteBackend = "sqlite3" RethinkDBBackend = "rethinkdb" + FileBackend = "file" DefaultImportRole = "delegation" + + // HealthCheckKeyManagement and HealthCheckSigner are the grpc service name + // for "KeyManagement" and "Signer" respectively which used for health check. + // The "Overall" indicates the querying for overall status of the server. + HealthCheckKeyManagement = "grpc.health.v1.Health.KeyManagement" + HealthCheckSigner = "grpc.health.v1.Health.Signer" + HealthCheckOverall = "grpc.health.v1.Health.Overall" + + // PrivExecPerms indicates the file permissions for directory + // and PrivNoExecPerms for file. + PrivExecPerms = 0700 + PrivNoExecPerms = 0600 + + // DefaultPageSize is the default number of records to return from the changefeed + DefaultPageSize = 100 +) + +// enum to use for setting and retrieving values from contexts +const ( + CtxKeyMetaStore CtxKey = iota + CtxKeyKeyAlgo + CtxKeyCryptoSvc + CtxKeyRepo ) -// NotaryDefaultExpiries is the construct used to configure the default expiry times of -// the various role files. -var NotaryDefaultExpiries = map[string]time.Duration{ - "root": NotaryRootExpiry, - "targets": NotaryTargetsExpiry, - "snapshot": NotarySnapshotExpiry, - "timestamp": NotaryTimestampExpiry, +// NotarySupportedBackends contains the backends we would like to support at present +var NotarySupportedBackends = []string{ + MemoryBackend, + MySQLBackend, + SQLiteBackend, + RethinkDBBackend, + PostgresBackend, } diff --git a/vendor/github.com/docker/notary/cryptoservice/certificate.go b/vendor/github.com/docker/notary/cryptoservice/certificate.go index 805a169af7f5..26de51039289 100644 --- a/vendor/github.com/docker/notary/cryptoservice/certificate.go +++ b/vendor/github.com/docker/notary/cryptoservice/certificate.go @@ -12,17 +12,17 @@ import ( ) // GenerateCertificate generates an X509 Certificate from a template, given a GUN and validity interval -func GenerateCertificate(rootKey data.PrivateKey, gun string, startTime, endTime time.Time) (*x509.Certificate, error) { +func GenerateCertificate(rootKey data.PrivateKey, gun data.GUN, startTime, endTime time.Time) (*x509.Certificate, error) { signer := rootKey.CryptoSigner() if signer == nil { - return nil, fmt.Errorf("key type not supported for Certificate generation: %s\n", rootKey.Algorithm()) + return nil, fmt.Errorf("key type not supported for Certificate generation: %s", rootKey.Algorithm()) } return generateCertificate(signer, gun, startTime, endTime) } -func generateCertificate(signer crypto.Signer, gun string, startTime, endTime time.Time) (*x509.Certificate, error) { - template, err := utils.NewCertificate(gun, startTime, endTime) +func generateCertificate(signer crypto.Signer, gun data.GUN, startTime, endTime time.Time) (*x509.Certificate, error) { + template, err := utils.NewCertificate(gun.String(), startTime, endTime) if err != nil { return nil, fmt.Errorf("failed to create the certificate template for: %s (%v)", gun, err) } diff --git a/vendor/github.com/docker/notary/cryptoservice/crypto_service.go b/vendor/github.com/docker/notary/cryptoservice/crypto_service.go index 0c696d51f934..0773f1bfadcd 100644 --- a/vendor/github.com/docker/notary/cryptoservice/crypto_service.go +++ b/vendor/github.com/docker/notary/cryptoservice/crypto_service.go @@ -1,17 +1,16 @@ package cryptoservice import ( - "crypto/rand" - "fmt" - "crypto/x509" "encoding/pem" "errors" - "github.com/sirupsen/logrus" + "fmt" + "github.com/docker/notary" "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/utils" + "github.com/sirupsen/logrus" ) var ( @@ -36,47 +35,23 @@ func NewCryptoService(keyStores ...trustmanager.KeyStore) *CryptoService { } // Create is used to generate keys for targets, snapshots and timestamps -func (cs *CryptoService) Create(role, gun, algorithm string) (data.PublicKey, error) { - var privKey data.PrivateKey - var err error - - switch algorithm { - case data.RSAKey: - privKey, err = utils.GenerateRSAKey(rand.Reader, notary.MinRSABitSize) - if err != nil { - return nil, fmt.Errorf("failed to generate RSA key: %v", err) - } - case data.ECDSAKey: - privKey, err = utils.GenerateECDSAKey(rand.Reader) - if err != nil { - return nil, fmt.Errorf("failed to generate EC key: %v", err) - } - case data.ED25519Key: - privKey, err = utils.GenerateED25519Key(rand.Reader) - if err != nil { - return nil, fmt.Errorf("failed to generate ED25519 key: %v", err) - } - default: - return nil, fmt.Errorf("private key type not supported for key generation: %s", algorithm) +func (cs *CryptoService) Create(role data.RoleName, gun data.GUN, algorithm string) (data.PublicKey, error) { + if algorithm == data.RSAKey { + return nil, fmt.Errorf("%s keys can only be imported", data.RSAKey) } - logrus.Debugf("generated new %s key for role: %s and keyID: %s", algorithm, role, privKey.ID()) - // Store the private key into our keystore - for _, ks := range cs.keyStores { - err = ks.AddKey(trustmanager.KeyInfo{Role: role, Gun: gun}, privKey) - if err == nil { - return data.PublicKeyFromPrivate(privKey), nil - } - } + privKey, err := utils.GenerateKey(algorithm) if err != nil { - return nil, fmt.Errorf("failed to add key to filestore: %v", err) + return nil, fmt.Errorf("failed to generate %s key: %v", algorithm, err) } + logrus.Debugf("generated new %s key for role: %s and keyID: %s", algorithm, role.String(), privKey.ID()) + pubKey := data.PublicKeyFromPrivate(privKey) - return nil, fmt.Errorf("keystores would not accept new private keys for unknown reasons") + return pubKey, cs.AddKey(role, gun, privKey) } // GetPrivateKey returns a private key and role if present by ID. -func (cs *CryptoService) GetPrivateKey(keyID string) (k data.PrivateKey, role string, err error) { +func (cs *CryptoService) GetPrivateKey(keyID string) (k data.PrivateKey, role data.RoleName, err error) { for _, ks := range cs.keyStores { if k, role, err = ks.GetKey(keyID); err == nil { return @@ -120,14 +95,14 @@ func (cs *CryptoService) RemoveKey(keyID string) (err error) { // AddKey adds a private key to a specified role. // The GUN is inferred from the cryptoservice itself for non-root roles -func (cs *CryptoService) AddKey(role, gun string, key data.PrivateKey) (err error) { +func (cs *CryptoService) AddKey(role data.RoleName, gun data.GUN, key data.PrivateKey) (err error) { // First check if this key already exists in any of our keystores for _, ks := range cs.keyStores { if keyInfo, err := ks.GetKeyInfo(key.ID()); err == nil { if keyInfo.Role != role { - return fmt.Errorf("key with same ID already exists for role: %s", keyInfo.Role) + return fmt.Errorf("key with same ID already exists for role: %s", keyInfo.Role.String()) } - logrus.Debugf("key with same ID %s and role %s already exists", key.ID(), keyInfo.Role) + logrus.Debugf("key with same ID %s and role %s already exists", key.ID(), keyInfo.Role.String()) return nil } } @@ -142,7 +117,7 @@ func (cs *CryptoService) AddKey(role, gun string, key data.PrivateKey) (err erro } // ListKeys returns a list of key IDs valid for the given role -func (cs *CryptoService) ListKeys(role string) []string { +func (cs *CryptoService) ListKeys(role data.RoleName) []string { var res []string for _, ks := range cs.keyStores { for k, r := range ks.ListKeys() { @@ -155,8 +130,8 @@ func (cs *CryptoService) ListKeys(role string) []string { } // ListAllKeys returns a map of key IDs to role -func (cs *CryptoService) ListAllKeys() map[string]string { - res := make(map[string]string) +func (cs *CryptoService) ListAllKeys() map[string]data.RoleName { + res := make(map[string]data.RoleName) for _, ks := range cs.keyStores { for k, r := range ks.ListKeys() { res[k] = r.Role // keys are content addressed so don't care about overwrites @@ -173,9 +148,12 @@ func CheckRootKeyIsEncrypted(pemBytes []byte) error { return ErrNoValidPrivateKey } - if !x509.IsEncryptedPEMBlock(block) { - return ErrRootKeyNotEncrypted + if block.Type == "ENCRYPTED PRIVATE KEY" { + return nil + } + if !notary.FIPSEnabled() && x509.IsEncryptedPEMBlock(block) { + return nil } - return nil + return ErrRootKeyNotEncrypted } diff --git a/vendor/github.com/docker/notary/fips.go b/vendor/github.com/docker/notary/fips.go new file mode 100644 index 000000000000..01ed2fb57034 --- /dev/null +++ b/vendor/github.com/docker/notary/fips.go @@ -0,0 +1,13 @@ +package notary + +import "os" + +// FIPSEnvVar is the name of the environment variable that is being used to switch +// between FIPS and non-FIPS mode +const FIPSEnvVar = "GOFIPS" + +// FIPSEnabled returns true if environment variable `GOFIPS` has been set to enable +// FIPS mode +func FIPSEnabled() bool { + return os.Getenv(FIPSEnvVar) != "" +} diff --git a/vendor/github.com/docker/notary/notary.go b/vendor/github.com/docker/notary/notary.go index d6f070a47b14..7d6747f5db14 100644 --- a/vendor/github.com/docker/notary/notary.go +++ b/vendor/github.com/docker/notary/notary.go @@ -5,3 +5,8 @@ package notary // confirmation), createNew will be true. Attempts is passed in so that implementers // decide how many chances to give to a human, for example. type PassRetriever func(keyName, alias string, createNew bool, attempts int) (passphrase string, giveup bool, err error) + +// CtxKey is a wrapper type for use in context.WithValue() to satisfy golint +// https://github.com/golang/go/issues/17293 +// https://github.com/golang/lint/pull/245 +type CtxKey int diff --git a/vendor/github.com/docker/notary/passphrase/passphrase.go b/vendor/github.com/docker/notary/passphrase/passphrase.go index 23c7e6a0b3ae..69b46482f8f2 100644 --- a/vendor/github.com/docker/notary/passphrase/passphrase.go +++ b/vendor/github.com/docker/notary/passphrase/passphrase.go @@ -11,15 +11,13 @@ import ( "path/filepath" "strings" - "github.com/docker/docker/pkg/term" "github.com/docker/notary" + "golang.org/x/crypto/ssh/terminal" ) const ( idBytesToDisplay = 7 tufRootAlias = "root" - tufTargetsAlias = "targets" - tufSnapshotAlias = "snapshot" tufRootKeyGenerationWarning = `You are about to create a new root signing key passphrase. This passphrase will be used to protect the most sensitive key in your signing system. Please choose a long, complex passphrase and be careful to keep the password and the @@ -51,7 +49,7 @@ var ( // Upon successful passphrase retrievals, the passphrase will be cached such that // subsequent prompts will produce the same passphrase. func PromptRetriever() notary.PassRetriever { - if !term.IsTerminal(os.Stdin.Fd()) { + if !terminal.IsTerminal(int(os.Stdin.Fd())) { return func(string, string, bool, int) (string, bool, error) { return "", false, ErrNoInput } @@ -93,17 +91,6 @@ func (br *boundRetriever) requestPassphrase(keyName, alias string, createNew boo displayAlias = val } - // If typing on the terminal, we do not want the terminal to echo the - // password that is typed (so it doesn't display) - if term.IsTerminal(os.Stdin.Fd()) { - state, err := term.SaveState(os.Stdin.Fd()) - if err != nil { - return "", false, err - } - term.DisableEcho(os.Stdin.Fd(), state) - defer term.RestoreTerminal(os.Stdin.Fd(), state) - } - indexOfLastSeparator := strings.LastIndex(keyName, string(filepath.Separator)) if indexOfLastSeparator == -1 { indexOfLastSeparator = 0 @@ -135,7 +122,7 @@ func (br *boundRetriever) requestPassphrase(keyName, alias string, createNew boo } stdin := bufio.NewReader(br.in) - passphrase, err := stdin.ReadBytes('\n') + passphrase, err := GetPassphrase(stdin) fmt.Fprintln(br.out) if err != nil { return "", false, err @@ -162,7 +149,8 @@ func (br *boundRetriever) verifyAndConfirmPassword(stdin *bufio.Reader, retPass, } fmt.Fprintf(br.out, "Repeat passphrase for new %s key%s: ", displayAlias, withID) - confirmation, err := stdin.ReadBytes('\n') + + confirmation, err := GetPassphrase(stdin) fmt.Fprintln(br.out) if err != nil { return err @@ -203,3 +191,20 @@ func ConstantRetriever(constantPassphrase string) notary.PassRetriever { return constantPassphrase, false, nil } } + +// GetPassphrase get the passphrase from bufio.Reader or from terminal. +// If typing on the terminal, we disable terminal to echo the passphrase. +func GetPassphrase(in *bufio.Reader) ([]byte, error) { + var ( + passphrase []byte + err error + ) + + if terminal.IsTerminal(int(os.Stdin.Fd())) { + passphrase, err = terminal.ReadPassword(int(os.Stdin.Fd())) + } else { + passphrase, err = in.ReadBytes('\n') + } + + return passphrase, err +} diff --git a/vendor/github.com/docker/notary/storage/filestore.go b/vendor/github.com/docker/notary/storage/filestore.go index 1d50a58be220..d7dc6bfb95ff 100644 --- a/vendor/github.com/docker/notary/storage/filestore.go +++ b/vendor/github.com/docker/notary/storage/filestore.go @@ -1,6 +1,8 @@ package storage import ( + "bytes" + "encoding/pem" "fmt" "io" "io/ioutil" @@ -9,19 +11,13 @@ import ( "strings" "github.com/docker/notary" + "github.com/sirupsen/logrus" ) -// NewFilesystemStore creates a new store in a directory tree -func NewFilesystemStore(baseDir, subDir, extension string) (*FilesystemStore, error) { - baseDir = filepath.Join(baseDir, subDir) - - return NewFileStore(baseDir, extension, notary.PrivKeyPerms) -} - // NewFileStore creates a fully configurable file store -func NewFileStore(baseDir, fileExt string, perms os.FileMode) (*FilesystemStore, error) { +func NewFileStore(baseDir, fileExt string) (*FilesystemStore, error) { baseDir = filepath.Clean(baseDir) - if err := createDirectory(baseDir, perms); err != nil { + if err := createDirectory(baseDir, notary.PrivExecPerms); err != nil { return nil, err } if !strings.HasPrefix(fileExt, ".") { @@ -31,34 +27,95 @@ func NewFileStore(baseDir, fileExt string, perms os.FileMode) (*FilesystemStore, return &FilesystemStore{ baseDir: baseDir, ext: fileExt, - perms: perms, }, nil } -// NewSimpleFileStore is a convenience wrapper to create a world readable, -// owner writeable filestore -func NewSimpleFileStore(baseDir, fileExt string) (*FilesystemStore, error) { - return NewFileStore(baseDir, fileExt, notary.PubCertPerms) -} - // NewPrivateKeyFileStorage initializes a new filestore for private keys, appending // the notary.PrivDir to the baseDir. func NewPrivateKeyFileStorage(baseDir, fileExt string) (*FilesystemStore, error) { baseDir = filepath.Join(baseDir, notary.PrivDir) - return NewFileStore(baseDir, fileExt, notary.PrivKeyPerms) + myStore, err := NewFileStore(baseDir, fileExt) + myStore.migrateTo0Dot4() + return myStore, err } // NewPrivateSimpleFileStore is a wrapper to create an owner readable/writeable // _only_ filestore func NewPrivateSimpleFileStore(baseDir, fileExt string) (*FilesystemStore, error) { - return NewFileStore(baseDir, fileExt, notary.PrivKeyPerms) + return NewFileStore(baseDir, fileExt) } // FilesystemStore is a store in a locally accessible directory type FilesystemStore struct { baseDir string ext string - perms os.FileMode +} + +func (f *FilesystemStore) moveKeyTo0Dot4Location(file string) { + keyID := filepath.Base(file) + fileDir := filepath.Dir(file) + d, _ := f.Get(file) + block, _ := pem.Decode(d) + if block == nil { + logrus.Warn("Key data for", file, "could not be decoded as a valid PEM block. The key will not been migrated and may not be available") + return + } + fileDir = strings.TrimPrefix(fileDir, notary.RootKeysSubdir) + fileDir = strings.TrimPrefix(fileDir, notary.NonRootKeysSubdir) + if fileDir != "" { + block.Headers["gun"] = filepath.ToSlash(fileDir[1:]) + } + if strings.Contains(keyID, "_") { + role := strings.Split(keyID, "_")[1] + keyID = strings.TrimSuffix(keyID, "_"+role) + block.Headers["role"] = role + } + var keyPEM bytes.Buffer + // since block came from decoding the PEM bytes in the first place, and all we're doing is adding some headers we ignore the possibility of an error while encoding the block + pem.Encode(&keyPEM, block) + f.Set(keyID, keyPEM.Bytes()) +} + +func (f *FilesystemStore) migrateTo0Dot4() { + rootKeysSubDir := filepath.Clean(filepath.Join(f.Location(), notary.RootKeysSubdir)) + nonRootKeysSubDir := filepath.Clean(filepath.Join(f.Location(), notary.NonRootKeysSubdir)) + if _, err := os.Stat(rootKeysSubDir); !os.IsNotExist(err) && f.Location() != rootKeysSubDir { + if rootKeysSubDir == "" || rootKeysSubDir == "/" { + // making sure we don't remove a user's homedir + logrus.Warn("The directory for root keys is an unsafe value, we are not going to delete the directory. Please delete it manually") + } else { + // root_keys exists, migrate things from it + listOnlyRootKeysDirStore, _ := NewFileStore(rootKeysSubDir, f.ext) + for _, file := range listOnlyRootKeysDirStore.ListFiles() { + f.moveKeyTo0Dot4Location(filepath.Join(notary.RootKeysSubdir, file)) + } + // delete the old directory + os.RemoveAll(rootKeysSubDir) + } + } + + if _, err := os.Stat(nonRootKeysSubDir); !os.IsNotExist(err) && f.Location() != nonRootKeysSubDir { + if nonRootKeysSubDir == "" || nonRootKeysSubDir == "/" { + // making sure we don't remove a user's homedir + logrus.Warn("The directory for non root keys is an unsafe value, we are not going to delete the directory. Please delete it manually") + } else { + // tuf_keys exists, migrate things from it + listOnlyNonRootKeysDirStore, _ := NewFileStore(nonRootKeysSubDir, f.ext) + for _, file := range listOnlyNonRootKeysDirStore.ListFiles() { + f.moveKeyTo0Dot4Location(filepath.Join(notary.NonRootKeysSubdir, file)) + } + // delete the old directory + os.RemoveAll(nonRootKeysSubDir) + } + } + + // if we have a trusted_certificates folder, let's delete for a complete migration since it is unused by new clients + certsSubDir := filepath.Join(f.Location(), "trusted_certificates") + if certsSubDir == "" || certsSubDir == "/" { + logrus.Warn("The directory for trusted certificate is an unsafe value, we are not going to delete the directory. Please delete it manually") + } else { + os.RemoveAll(certsSubDir) + } } func (f *FilesystemStore) getPath(name string) (string, error) { @@ -80,7 +137,7 @@ func (f *FilesystemStore) GetSized(name string, size int64) ([]byte, error) { if err != nil { return nil, err } - file, err := os.OpenFile(p, os.O_RDONLY, f.perms) + file, err := os.OpenFile(p, os.O_RDONLY, notary.PrivNoExecPerms) if err != nil { if os.IsNotExist(err) { err = ErrMetaNotFound{Resource: name} @@ -140,7 +197,7 @@ func (f *FilesystemStore) Set(name string, meta []byte) error { } // Ensures the parent directories of the file we are about to write exist - err = os.MkdirAll(filepath.Dir(fp), f.perms) + err = os.MkdirAll(filepath.Dir(fp), notary.PrivExecPerms) if err != nil { return err } @@ -149,7 +206,7 @@ func (f *FilesystemStore) Set(name string, meta []byte) error { os.RemoveAll(fp) // Write the file to disk - if err = ioutil.WriteFile(fp, meta, f.perms); err != nil { + if err = ioutil.WriteFile(fp, meta, notary.PrivNoExecPerms); err != nil { return err } return nil diff --git a/vendor/github.com/docker/notary/storage/httpstore.go b/vendor/github.com/docker/notary/storage/httpstore.go index 1b0f65b6f6e6..d0523fb67640 100644 --- a/vendor/github.com/docker/notary/storage/httpstore.go +++ b/vendor/github.com/docker/notary/storage/httpstore.go @@ -22,9 +22,17 @@ import ( "net/url" "path" - "github.com/sirupsen/logrus" "github.com/docker/notary" + "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/validation" + "github.com/sirupsen/logrus" +) + +const ( + // MaxErrorResponseSize is the maximum size for an error message - 1KiB + MaxErrorResponseSize int64 = 1 << 10 + // MaxKeySize is the maximum size for a stored TUF key - 256KiB + MaxKeySize = 256 << 10 ) // ErrServerUnavailable indicates an error from the server. code allows us to @@ -39,6 +47,21 @@ type NetworkError struct { } func (n NetworkError) Error() string { + if _, ok := n.Wrapped.(*url.Error); ok { + // QueryUnescape does the inverse transformation of QueryEscape, + // converting %AB into the byte 0xAB and '+' into ' ' (space). + // It returns an error if any % is not followed by two hexadecimal digits. + // + // If this happens, we log out the QueryUnescape error and return the + // original error to client. + res, err := url.QueryUnescape(n.Wrapped.Error()) + if err != nil { + logrus.Errorf("unescape network error message failed: %s", err) + return n.Wrapped.Error() + } + return res + } + return n.Wrapped.Error() } @@ -88,7 +111,9 @@ type HTTPStore struct { roundTrip http.RoundTripper } -// NewHTTPStore initializes a new store against a URL and a number of configuration options +// NewHTTPStore initializes a new store against a URL and a number of configuration options. +// +// In case of a nil `roundTrip`, a default offline store is used instead. func NewHTTPStore(baseURL, metaPrefix, metaExtension, keyExtension string, roundTrip http.RoundTripper) (RemoteStore, error) { base, err := url.Parse(baseURL) if err != nil { @@ -110,7 +135,8 @@ func NewHTTPStore(baseURL, metaPrefix, metaExtension, keyExtension string, round } func tryUnmarshalError(resp *http.Response, defaultError error) error { - bodyBytes, err := ioutil.ReadAll(resp.Body) + b := io.LimitReader(resp.Body, MaxErrorResponseSize) + bodyBytes, err := ioutil.ReadAll(b) if err != nil { return defaultError } @@ -269,8 +295,8 @@ func (s HTTPStore) buildMetaURL(name string) (*url.URL, error) { return s.buildURL(uri) } -func (s HTTPStore) buildKeyURL(name string) (*url.URL, error) { - filename := fmt.Sprintf("%s.%s", name, s.keyExtension) +func (s HTTPStore) buildKeyURL(name data.RoleName) (*url.URL, error) { + filename := fmt.Sprintf("%s.%s", name.String(), s.keyExtension) uri := path.Join(s.metaPrefix, filename) return s.buildURL(uri) } @@ -284,7 +310,7 @@ func (s HTTPStore) buildURL(uri string) (*url.URL, error) { } // GetKey retrieves a public key from the remote server -func (s HTTPStore) GetKey(role string) ([]byte, error) { +func (s HTTPStore) GetKey(role data.RoleName) ([]byte, error) { url, err := s.buildKeyURL(role) if err != nil { return nil, err @@ -298,10 +324,11 @@ func (s HTTPStore) GetKey(role string) ([]byte, error) { return nil, NetworkError{Wrapped: err} } defer resp.Body.Close() - if err := translateStatusToError(resp, role+" key"); err != nil { + if err := translateStatusToError(resp, role.String()+" key"); err != nil { return nil, err } - body, err := ioutil.ReadAll(resp.Body) + b := io.LimitReader(resp.Body, MaxKeySize) + body, err := ioutil.ReadAll(b) if err != nil { return nil, err } @@ -309,7 +336,7 @@ func (s HTTPStore) GetKey(role string) ([]byte, error) { } // RotateKey rotates a private key and returns the public component from the remote server -func (s HTTPStore) RotateKey(role string) ([]byte, error) { +func (s HTTPStore) RotateKey(role data.RoleName) ([]byte, error) { url, err := s.buildKeyURL(role) if err != nil { return nil, err @@ -323,10 +350,11 @@ func (s HTTPStore) RotateKey(role string) ([]byte, error) { return nil, NetworkError{Wrapped: err} } defer resp.Body.Close() - if err := translateStatusToError(resp, role+" key"); err != nil { + if err := translateStatusToError(resp, role.String()+" key"); err != nil { return nil, err } - body, err := ioutil.ReadAll(resp.Body) + b := io.LimitReader(resp.Body, MaxKeySize) + body, err := ioutil.ReadAll(b) if err != nil { return nil, err } diff --git a/vendor/github.com/docker/notary/storage/interfaces.go b/vendor/github.com/docker/notary/storage/interfaces.go index 867c9f181aff..c9ac03b602a3 100644 --- a/vendor/github.com/docker/notary/storage/interfaces.go +++ b/vendor/github.com/docker/notary/storage/interfaces.go @@ -1,5 +1,9 @@ package storage +import ( + "github.com/docker/notary/tuf/data" +) + // NoSizeLimit is represented as -1 for arguments to GetMeta const NoSizeLimit int64 = -1 @@ -15,8 +19,8 @@ type MetadataStore interface { // PublicKeyStore must be implemented by a key service type PublicKeyStore interface { - GetKey(role string) ([]byte, error) - RotateKey(role string) ([]byte, error) + GetKey(role data.RoleName) ([]byte, error) + RotateKey(role data.RoleName) ([]byte, error) } // RemoteStore is similar to LocalStore with the added expectation that it should diff --git a/vendor/github.com/docker/notary/storage/memorystore.go b/vendor/github.com/docker/notary/storage/memorystore.go index 8a2ade54da76..b4ae6466923b 100644 --- a/vendor/github.com/docker/notary/storage/memorystore.go +++ b/vendor/github.com/docker/notary/storage/memorystore.go @@ -2,25 +2,29 @@ package storage import ( "crypto/sha256" + "encoding/json" + "fmt" "github.com/docker/notary" + "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/utils" ) // NewMemoryStore returns a MetadataStore that operates entirely in memory. // Very useful for testing -func NewMemoryStore(initial map[string][]byte) *MemoryStore { - var consistent = make(map[string][]byte) - if initial == nil { - initial = make(map[string][]byte) - } else { - // add all seed meta to consistent - for name, data := range initial { - checksum := sha256.Sum256(data) - path := utils.ConsistentName(name, checksum[:]) - consistent[path] = data - } +func NewMemoryStore(seed map[data.RoleName][]byte) *MemoryStore { + var ( + consistent = make(map[string][]byte) + initial = make(map[string][]byte) + ) + // add all seed meta to consistent + for name, d := range seed { + checksum := sha256.Sum256(d) + path := utils.ConsistentName(name.String(), checksum[:]) + initial[name.String()] = d + consistent[path] = d } + return &MemoryStore{ data: initial, consistent: consistent, @@ -75,6 +79,15 @@ func (m MemoryStore) Get(name string) ([]byte, error) { func (m *MemoryStore) Set(name string, meta []byte) error { m.data[name] = meta + parsedMeta := &data.SignedMeta{} + err := json.Unmarshal(meta, parsedMeta) + if err == nil { + // no parse error means this is metadata and not a key, so store by version + version := parsedMeta.Signed.Version + versionedName := fmt.Sprintf("%d.%s", version, name) + m.data[versionedName] = meta + } + checksum := sha256.Sum256(meta) path := utils.ConsistentName(name, checksum[:]) m.consistent[path] = meta diff --git a/vendor/github.com/docker/notary/storage/offlinestore.go b/vendor/github.com/docker/notary/storage/offlinestore.go index fd297601e010..9a4faf6d4482 100644 --- a/vendor/github.com/docker/notary/storage/offlinestore.go +++ b/vendor/github.com/docker/notary/storage/offlinestore.go @@ -1,5 +1,9 @@ package storage +import ( + "github.com/docker/notary/tuf/data" +) + // ErrOffline is used to indicate we are operating offline type ErrOffline struct{} @@ -34,12 +38,12 @@ func (es OfflineStore) Remove(name string) error { } // GetKey returns ErrOffline -func (es OfflineStore) GetKey(role string) ([]byte, error) { +func (es OfflineStore) GetKey(role data.RoleName) ([]byte, error) { return nil, err } // RotateKey returns ErrOffline -func (es OfflineStore) RotateKey(role string) ([]byte, error) { +func (es OfflineStore) RotateKey(role data.RoleName) ([]byte, error) { return nil, err } diff --git a/vendor/github.com/docker/notary/trustmanager/errors.go b/vendor/github.com/docker/notary/trustmanager/errors.go new file mode 100644 index 000000000000..adfcb31b0fef --- /dev/null +++ b/vendor/github.com/docker/notary/trustmanager/errors.go @@ -0,0 +1,31 @@ +package trustmanager + +import "fmt" + +// ErrAttemptsExceeded is returned when too many attempts have been made to decrypt a key +type ErrAttemptsExceeded struct{} + +// ErrAttemptsExceeded is returned when too many attempts have been made to decrypt a key +func (err ErrAttemptsExceeded) Error() string { + return "maximum number of passphrase attempts exceeded" +} + +// ErrPasswordInvalid is returned when signing fails. It could also mean the signing +// key file was corrupted, but we have no way to distinguish. +type ErrPasswordInvalid struct{} + +// ErrPasswordInvalid is returned when signing fails. It could also mean the signing +// key file was corrupted, but we have no way to distinguish. +func (err ErrPasswordInvalid) Error() string { + return "password invalid, operation has failed." +} + +// ErrKeyNotFound is returned when the keystore fails to retrieve a specific key. +type ErrKeyNotFound struct { + KeyID string +} + +// ErrKeyNotFound is returned when the keystore fails to retrieve a specific key. +func (err ErrKeyNotFound) Error() string { + return fmt.Sprintf("signing key not found: %s", err.KeyID) +} diff --git a/vendor/github.com/docker/notary/trustmanager/interfaces.go b/vendor/github.com/docker/notary/trustmanager/interfaces.go index 34bc128d260a..5cce58983230 100644 --- a/vendor/github.com/docker/notary/trustmanager/interfaces.go +++ b/vendor/github.com/docker/notary/trustmanager/interfaces.go @@ -1,8 +1,6 @@ package trustmanager import ( - "fmt" - "github.com/docker/notary/tuf/data" ) @@ -34,32 +32,11 @@ type Storage interface { Location() string } -// ErrAttemptsExceeded is returned when too many attempts have been made to decrypt a key -type ErrAttemptsExceeded struct{} - -// ErrAttemptsExceeded is returned when too many attempts have been made to decrypt a key -func (err ErrAttemptsExceeded) Error() string { - return "maximum number of passphrase attempts exceeded" -} - -// ErrPasswordInvalid is returned when signing fails. It could also mean the signing -// key file was corrupted, but we have no way to distinguish. -type ErrPasswordInvalid struct{} - -// ErrPasswordInvalid is returned when signing fails. It could also mean the signing -// key file was corrupted, but we have no way to distinguish. -func (err ErrPasswordInvalid) Error() string { - return "password invalid, operation has failed." -} - -// ErrKeyNotFound is returned when the keystore fails to retrieve a specific key. -type ErrKeyNotFound struct { - KeyID string -} - -// ErrKeyNotFound is returned when the keystore fails to retrieve a specific key. -func (err ErrKeyNotFound) Error() string { - return fmt.Sprintf("signing key not found: %s", err.KeyID) +// KeyInfo stores the role and gun for a corresponding private key ID +// It is assumed that each private key ID is unique +type KeyInfo struct { + Gun data.GUN + Role data.RoleName } // KeyStore is a generic interface for private key storage @@ -69,14 +46,9 @@ type KeyStore interface { AddKey(keyInfo KeyInfo, privKey data.PrivateKey) error // Should fail with ErrKeyNotFound if the keystore is operating normally // and knows that it does not store the requested key. - GetKey(keyID string) (data.PrivateKey, string, error) + GetKey(keyID string) (data.PrivateKey, data.RoleName, error) GetKeyInfo(keyID string) (KeyInfo, error) ListKeys() map[string]KeyInfo RemoveKey(keyID string) error Name() string } - -type cachedKey struct { - alias string - key data.PrivateKey -} diff --git a/vendor/github.com/docker/notary/trustmanager/keystore.go b/vendor/github.com/docker/notary/trustmanager/keystore.go index 6fe259127c2a..eb42dede9aa3 100644 --- a/vendor/github.com/docker/notary/trustmanager/keystore.go +++ b/vendor/github.com/docker/notary/trustmanager/keystore.go @@ -1,26 +1,23 @@ package trustmanager import ( - "encoding/pem" "fmt" "path/filepath" "strings" "sync" - "github.com/sirupsen/logrus" "github.com/docker/notary" store "github.com/docker/notary/storage" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/utils" + "github.com/sirupsen/logrus" ) type keyInfoMap map[string]KeyInfo -// KeyInfo stores the role, path, and gun for a corresponding private key ID -// It is assumed that each private key ID is unique -type KeyInfo struct { - Gun string - Role string +type cachedKey struct { + role data.RoleName + key data.PrivateKey } // GenericKeyStore is a wrapper for Storage instances that provides @@ -80,40 +77,6 @@ func generateKeyInfoMap(s Storage) map[string]KeyInfo { return keyInfoMap } -// Attempts to infer the keyID, role, and GUN from the specified key path. -// Note that non-root roles can only be inferred if this is a legacy style filename: KEYID_ROLE.key -func inferKeyInfoFromKeyPath(keyPath string) (string, string, string) { - var keyID, role, gun string - keyID = filepath.Base(keyPath) - underscoreIndex := strings.LastIndex(keyID, "_") - - // This is the legacy KEYID_ROLE filename - // The keyID is the first part of the keyname - // The keyRole is the second part of the keyname - // in a key named abcde_root, abcde is the keyID and root is the KeyAlias - if underscoreIndex != -1 { - role = keyID[underscoreIndex+1:] - keyID = keyID[:underscoreIndex] - } - - if filepath.HasPrefix(keyPath, notary.RootKeysSubdir+"/") { - return keyID, data.CanonicalRootRole, "" - } - - keyPath = strings.TrimPrefix(keyPath, notary.NonRootKeysSubdir+"/") - gun = getGunFromFullID(keyPath) - return keyID, role, gun -} - -func getGunFromFullID(fullKeyID string) string { - keyGun := filepath.Dir(fullKeyID) - // If the gun is empty, Dir will return . - if keyGun == "." { - keyGun = "" - } - return keyGun -} - func (s *GenericKeyStore) loadKeyInfo() { s.keyInfoMap = generateKeyInfoMap(s.store) } @@ -139,9 +102,9 @@ func (s *GenericKeyStore) AddKey(keyInfo KeyInfo, privKey data.PrivateKey) error if keyInfo.Role == data.CanonicalRootRole || data.IsDelegation(keyInfo.Role) || !data.ValidRole(keyInfo.Role) { keyInfo.Gun = "" } - name := filepath.Join(keyInfo.Gun, privKey.ID()) + keyID := privKey.ID() for attempts := 0; ; attempts++ { - chosenPassphrase, giveup, err = s.PassRetriever(name, keyInfo.Role, true, attempts) + chosenPassphrase, giveup, err = s.PassRetriever(keyID, keyInfo.Role.String(), true, attempts) if err == nil { break } @@ -150,18 +113,14 @@ func (s *GenericKeyStore) AddKey(keyInfo KeyInfo, privKey data.PrivateKey) error } } - if chosenPassphrase != "" { - pemPrivKey, err = utils.EncryptPrivateKey(privKey, keyInfo.Role, keyInfo.Gun, chosenPassphrase) - } else { - pemPrivKey, err = utils.KeyToPEM(privKey, keyInfo.Role) - } + pemPrivKey, err = utils.ConvertPrivateKeyToPKCS8(privKey, keyInfo.Role, keyInfo.Gun, chosenPassphrase) if err != nil { return err } - s.cachedKeys[name] = &cachedKey{alias: keyInfo.Role, key: privKey} - err = s.store.Set(filepath.Join(getSubdir(keyInfo.Role), name), pemPrivKey) + s.cachedKeys[keyID] = &cachedKey{role: keyInfo.Role, key: privKey} + err = s.store.Set(keyID, pemPrivKey) if err != nil { return err } @@ -170,15 +129,21 @@ func (s *GenericKeyStore) AddKey(keyInfo KeyInfo, privKey data.PrivateKey) error } // GetKey returns the PrivateKey given a KeyID -func (s *GenericKeyStore) GetKey(name string) (data.PrivateKey, string, error) { +func (s *GenericKeyStore) GetKey(keyID string) (data.PrivateKey, data.RoleName, error) { s.Lock() defer s.Unlock() - cachedKeyEntry, ok := s.cachedKeys[name] + + cachedKeyEntry, ok := s.cachedKeys[keyID] if ok { - return cachedKeyEntry.key, cachedKeyEntry.alias, nil + return cachedKeyEntry.key, cachedKeyEntry.role, nil + } + + role, err := getKeyRole(s.store, keyID) + if err != nil { + return nil, "", err } - keyBytes, _, keyAlias, err := getKey(s.store, name) + keyBytes, err := s.store.Get(keyID) if err != nil { return nil, "", err } @@ -186,13 +151,13 @@ func (s *GenericKeyStore) GetKey(name string) (data.PrivateKey, string, error) { // See if the key is encrypted. If its encrypted we'll fail to parse the private key privKey, err := utils.ParsePEMPrivateKey(keyBytes, "") if err != nil { - privKey, _, err = GetPasswdDecryptBytes(s.PassRetriever, keyBytes, name, string(keyAlias)) + privKey, _, err = GetPasswdDecryptBytes(s.PassRetriever, keyBytes, keyID, string(role)) if err != nil { return nil, "", err } } - s.cachedKeys[name] = &cachedKey{alias: keyAlias, key: privKey} - return privKey, keyAlias, nil + s.cachedKeys[keyID] = &cachedKey{role: role, key: privKey} + return privKey, role, nil } // ListKeys returns a list of unique PublicKeys present on the KeyFileStore, by returning a copy of the keyInfoMap @@ -204,24 +169,14 @@ func (s *GenericKeyStore) ListKeys() map[string]KeyInfo { func (s *GenericKeyStore) RemoveKey(keyID string) error { s.Lock() defer s.Unlock() - - _, filename, _, err := getKey(s.store, keyID) - switch err.(type) { - case ErrKeyNotFound, nil: - break - default: - return err - } - delete(s.cachedKeys, keyID) - err = s.store.Remove(filename) // removing a file that doesn't exist doesn't fail + err := s.store.Remove(keyID) if err != nil { return err } - // Remove this key from our keyInfo map if we removed from our filesystem - delete(s.keyInfoMap, filepath.Base(keyID)) + delete(s.keyInfoMap, keyID) return nil } @@ -242,55 +197,37 @@ func copyKeyInfoMap(keyInfoMap map[string]KeyInfo) map[string]KeyInfo { // KeyInfoFromPEM attempts to get a keyID and KeyInfo from the filename and PEM bytes of a key func KeyInfoFromPEM(pemBytes []byte, filename string) (string, KeyInfo, error) { - keyID, role, gun := inferKeyInfoFromKeyPath(filename) - if role == "" { - block, _ := pem.Decode(pemBytes) - if block == nil { - return "", KeyInfo{}, fmt.Errorf("could not decode PEM block for key %s", filename) - } - if keyRole, ok := block.Headers["role"]; ok { - role = keyRole - } + var keyID string + keyID = filepath.Base(filename) + role, gun, err := utils.ExtractPrivateKeyAttributes(pemBytes) + if err != nil { + return "", KeyInfo{}, err } return keyID, KeyInfo{Gun: gun, Role: role}, nil } -// getKey finds the key and role for the given keyID. It attempts to -// look both in the newer format PEM headers, and also in the legacy filename -// format. It returns: the key bytes, the filename it was found under, the role, -// and an error -func getKey(s Storage, keyID string) ([]byte, string, string, error) { +// getKeyRole finds the role for the given keyID. It attempts to look +// both in the newer format PEM headers, and also in the legacy filename +// format. It returns: the role, and an error +func getKeyRole(s Storage, keyID string) (data.RoleName, error) { name := strings.TrimSpace(strings.TrimSuffix(filepath.Base(keyID), filepath.Ext(keyID))) for _, file := range s.ListFiles() { filename := filepath.Base(file) - if strings.HasPrefix(filename, name) { d, err := s.Get(file) if err != nil { - return nil, "", "", err - } - block, _ := pem.Decode(d) - if block != nil { - if role, ok := block.Headers["role"]; ok { - return d, file, role, nil - } + return "", err } - role := strings.TrimPrefix(filename, name+"_") - return d, file, role, nil + role, _, err := utils.ExtractPrivateKeyAttributes(d) + if err != nil { + return "", err + } + return role, nil } } - - return nil, "", "", ErrKeyNotFound{KeyID: keyID} -} - -// Assumes 2 subdirectories, 1 containing root keys and 1 containing TUF keys -func getSubdir(alias string) string { - if alias == data.CanonicalRootRole { - return notary.RootKeysSubdir - } - return notary.NonRootKeysSubdir + return "", ErrKeyNotFound{KeyID: keyID} } // GetPasswdDecryptBytes gets the password to decrypt the given pem bytes. diff --git a/vendor/github.com/docker/notary/trustmanager/yubikey/import.go b/vendor/github.com/docker/notary/trustmanager/yubikey/import.go index 08048ef5213e..a51af88bdfcb 100644 --- a/vendor/github.com/docker/notary/trustmanager/yubikey/import.go +++ b/vendor/github.com/docker/notary/trustmanager/yubikey/import.go @@ -5,8 +5,10 @@ package yubikey import ( "encoding/pem" "errors" + "github.com/docker/notary" "github.com/docker/notary/trustmanager" + "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/utils" ) @@ -39,7 +41,7 @@ func (s *YubiImport) Set(name string, bytes []byte) error { } ki := trustmanager.KeyInfo{ // GUN is ignored by YubiStore - Role: role, + Role: data.RoleName(role), } privKey, err := utils.ParsePEMPrivateKey(bytes, "") if err != nil { @@ -47,7 +49,7 @@ func (s *YubiImport) Set(name string, bytes []byte) error { s.passRetriever, bytes, name, - ki.Role, + ki.Role.String(), ) if err != nil { return err diff --git a/vendor/github.com/docker/notary/trustmanager/yubikey/yubikeystore.go b/vendor/github.com/docker/notary/trustmanager/yubikey/yubikeystore.go index 15fff257d976..33715da3503d 100644 --- a/vendor/github.com/docker/notary/trustmanager/yubikey/yubikeystore.go +++ b/vendor/github.com/docker/notary/trustmanager/yubikey/yubikeystore.go @@ -16,13 +16,13 @@ import ( "os" "time" - "github.com/sirupsen/logrus" "github.com/docker/notary" "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/signed" "github.com/docker/notary/tuf/utils" "github.com/miekg/pkcs11" + "github.com/sirupsen/logrus" ) const ( @@ -126,7 +126,7 @@ func (err errHSMNotPresent) Error() string { } type yubiSlot struct { - role string + role data.RoleName slotID []byte } @@ -208,7 +208,7 @@ func (y *YubiPrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts return sig, nil } } - return nil, errors.New("Failed to generate signature on Yubikey.") + return nil, errors.New("failed to generate signature on Yubikey") } // If a byte array is less than the number of bytes specified by @@ -230,7 +230,7 @@ func addECDSAKey( privKey data.PrivateKey, pkcs11KeyID []byte, passRetriever notary.PassRetriever, - role string, + role data.RoleName, ) error { logrus.Debugf("Attempting to add key to yubikey with ID: %s", privKey.ID()) @@ -250,7 +250,7 @@ func addECDSAKey( // Hard-coded policy: the generated certificate expires in 10 years. startTime := time.Now() - template, err := utils.NewCertificate(role, startTime, startTime.AddDate(10, 0, 0)) + template, err := utils.NewCertificate(role.String(), startTime, startTime.AddDate(10, 0, 0)) if err != nil { return fmt.Errorf("failed to create the certificate template: %v", err) } @@ -288,7 +288,7 @@ func addECDSAKey( return nil } -func getECDSAKey(ctx IPKCS11Ctx, session pkcs11.SessionHandle, pkcs11KeyID []byte) (*data.ECDSAPublicKey, string, error) { +func getECDSAKey(ctx IPKCS11Ctx, session pkcs11.SessionHandle, pkcs11KeyID []byte) (*data.ECDSAPublicKey, data.RoleName, error) { findTemplate := []*pkcs11.Attribute{ pkcs11.NewAttribute(pkcs11.CKA_TOKEN, true), pkcs11.NewAttribute(pkcs11.CKA_ID, pkcs11KeyID), @@ -448,45 +448,19 @@ func yubiRemoveKey(ctx IPKCS11Ctx, session pkcs11.SessionHandle, pkcs11KeyID []b func yubiListKeys(ctx IPKCS11Ctx, session pkcs11.SessionHandle) (keys map[string]yubiSlot, err error) { keys = make(map[string]yubiSlot) - findTemplate := []*pkcs11.Attribute{ - pkcs11.NewAttribute(pkcs11.CKA_TOKEN, true), - //pkcs11.NewAttribute(pkcs11.CKA_ID, pkcs11KeyID), - pkcs11.NewAttribute(pkcs11.CKA_CLASS, pkcs11.CKO_CERTIFICATE), - } attrTemplate := []*pkcs11.Attribute{ pkcs11.NewAttribute(pkcs11.CKA_ID, []byte{0}), pkcs11.NewAttribute(pkcs11.CKA_VALUE, []byte{0}), } - if err = ctx.FindObjectsInit(session, findTemplate); err != nil { - logrus.Debugf("Failed to init: %s", err.Error()) - return - } - objs, b, err := ctx.FindObjects(session, numSlots) - for err == nil { - var o []pkcs11.ObjectHandle - o, b, err = ctx.FindObjects(session, numSlots) - if err != nil { - continue - } - if len(o) == 0 { - break - } - objs = append(objs, o...) - } + objs, err := listObjects(ctx, session) if err != nil { - logrus.Debugf("Failed to find: %s %v", err.Error(), b) - if len(objs) == 0 { - return nil, err - } - } - if err = ctx.FindObjectsFinal(session); err != nil { - logrus.Debugf("Failed to finalize: %s", err.Error()) - return + return nil, err } + if len(objs) == 0 { - return nil, errors.New("No keys found in yubikey.") + return nil, errors.New("no keys found in yubikey") } logrus.Debugf("Found %d objects matching list filters", len(objs)) for _, obj := range objs { @@ -511,7 +485,7 @@ func yubiListKeys(ctx IPKCS11Ctx, session pkcs11.SessionHandle) (keys map[string if err != nil { continue } - if !data.ValidRole(cert.Subject.CommonName) { + if !data.ValidRole(data.RoleName(cert.Subject.CommonName)) { continue } } @@ -538,13 +512,49 @@ func yubiListKeys(ctx IPKCS11Ctx, session pkcs11.SessionHandle) (keys map[string } keys[data.NewECDSAPublicKey(pubBytes).ID()] = yubiSlot{ - role: cert.Subject.CommonName, + role: data.RoleName(cert.Subject.CommonName), slotID: slot, } } return } +func listObjects(ctx IPKCS11Ctx, session pkcs11.SessionHandle) ([]pkcs11.ObjectHandle, error) { + findTemplate := []*pkcs11.Attribute{ + pkcs11.NewAttribute(pkcs11.CKA_TOKEN, true), + pkcs11.NewAttribute(pkcs11.CKA_CLASS, pkcs11.CKO_CERTIFICATE), + } + + if err := ctx.FindObjectsInit(session, findTemplate); err != nil { + logrus.Debugf("Failed to init: %s", err.Error()) + return nil, err + } + + objs, b, err := ctx.FindObjects(session, numSlots) + for err == nil { + var o []pkcs11.ObjectHandle + o, b, err = ctx.FindObjects(session, numSlots) + if err != nil { + continue + } + if len(o) == 0 { + break + } + objs = append(objs, o...) + } + if err != nil { + logrus.Debugf("Failed to find: %s %v", err.Error(), b) + if len(objs) == 0 { + return nil, err + } + } + if err := ctx.FindObjectsFinal(session); err != nil { + logrus.Debugf("Failed to finalize: %s", err.Error()) + return nil, err + } + return objs, nil +} + func getNextEmptySlot(ctx IPKCS11Ctx, session pkcs11.SessionHandle) ([]byte, error) { findTemplate := []*pkcs11.Attribute{ pkcs11.NewAttribute(pkcs11.CKA_TOKEN, true), @@ -611,7 +621,7 @@ func getNextEmptySlot(ctx IPKCS11Ctx, session pkcs11.SessionHandle) ([]byte, err return []byte{byte(loc)}, nil } } - return nil, errors.New("Yubikey has no available slots.") + return nil, errors.New("yubikey has no available slots") } // YubiStore is a KeyStore for private keys inside a Yubikey @@ -687,7 +697,7 @@ func (s *YubiStore) AddKey(keyInfo trustmanager.KeyInfo, privKey data.PrivateKey // Only add if we haven't seen the key already. Return whether the key was // added. -func (s *YubiStore) addKey(keyID, role string, privKey data.PrivateKey) ( +func (s *YubiStore) addKey(keyID string, role data.RoleName, privKey data.PrivateKey) ( bool, error) { // We only allow adding root keys for now @@ -733,7 +743,7 @@ func (s *YubiStore) addKey(keyID, role string, privKey data.PrivateKey) ( // GetKey retrieves a key from the Yubikey only (it does not look inside the // backup store) -func (s *YubiStore) GetKey(keyID string) (data.PrivateKey, string, error) { +func (s *YubiStore) GetKey(keyID string) (data.PrivateKey, data.RoleName, error) { ctx, session, err := SetupHSMEnv(pkcs11Lib, s.libLoader) if err != nil { logrus.Debugf("No yubikey found, using alternative key storage: %s", err.Error()) diff --git a/vendor/github.com/docker/notary/trustpinning/certs.go b/vendor/github.com/docker/notary/trustpinning/certs.go index 9175dafc270c..c328a8314bc7 100644 --- a/vendor/github.com/docker/notary/trustpinning/certs.go +++ b/vendor/github.com/docker/notary/trustpinning/certs.go @@ -6,12 +6,14 @@ import ( "fmt" "strings" - "github.com/sirupsen/logrus" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/signed" "github.com/docker/notary/tuf/utils" + "github.com/sirupsen/logrus" ) +const wildcard = "*" + // ErrValidationFail is returned when there is no valid trusted certificates // being served inside of the roots.json type ErrValidationFail struct { @@ -82,7 +84,7 @@ We shall call this: TOFUS. Validation failure at any step will result in an ErrValidationFailed error. */ -func ValidateRoot(prevRoot *data.SignedRoot, root *data.Signed, gun string, trustPinning TrustPinConfig) (*data.SignedRoot, error) { +func ValidateRoot(prevRoot *data.SignedRoot, root *data.Signed, gun data.GUN, trustPinning TrustPinConfig) (*data.SignedRoot, error) { logrus.Debugf("entered ValidateRoot with dns: %s", gun) signedRoot, err := data.RootFromSigned(root) if err != nil { @@ -140,7 +142,7 @@ func ValidateRoot(prevRoot *data.SignedRoot, root *data.Signed, gun string, trus } // Regardless of having a previous root or not, confirm that the new root validates against the trust pinning - logrus.Debugf("checking root against trust_pinning config", gun) + logrus.Debugf("checking root against trust_pinning config for %s", gun) trustPinCheckFunc, err := NewTrustPinChecker(trustPinning, gun, !havePrevRoot) if err != nil { return nil, &ErrValidationFail{Reason: err.Error()} @@ -175,16 +177,27 @@ func ValidateRoot(prevRoot *data.SignedRoot, root *data.Signed, gun string, trus return data.RootFromSigned(root) } +// MatchCNToGun checks that the common name in a cert is valid for the given gun. +// This allows wildcards as suffixes, e.g. `namespace/*` +func MatchCNToGun(commonName string, gun data.GUN) bool { + if strings.HasSuffix(commonName, wildcard) { + prefix := strings.TrimRight(commonName, wildcard) + logrus.Debugf("checking gun %s against wildcard prefix %s", gun, prefix) + return strings.HasPrefix(gun.String(), prefix) + } + return commonName == gun.String() +} + // validRootLeafCerts returns a list of possibly (if checkExpiry is true) non-expired, non-sha1 certificates // found in root whose Common-Names match the provided GUN. Note that this // "validity" alone does not imply any measure of trust. -func validRootLeafCerts(allLeafCerts map[string]*x509.Certificate, gun string, checkExpiry bool) (map[string]*x509.Certificate, error) { +func validRootLeafCerts(allLeafCerts map[string]*x509.Certificate, gun data.GUN, checkExpiry bool) (map[string]*x509.Certificate, error) { validLeafCerts := make(map[string]*x509.Certificate) // Go through every leaf certificate and check that the CN matches the gun for id, cert := range allLeafCerts { - // Validate that this leaf certificate has a CN that matches the exact gun - if cert.Subject.CommonName != gun { + // Validate that this leaf certificate has a CN that matches the gun + if !MatchCNToGun(cert.Subject.CommonName, gun) { logrus.Debugf("error leaf certificate CN: %s doesn't match the given GUN: %s", cert.Subject.CommonName, gun) continue diff --git a/vendor/github.com/docker/notary/trustpinning/trustpin.go b/vendor/github.com/docker/notary/trustpinning/trustpin.go index acfa5658142f..e1777a16de2b 100644 --- a/vendor/github.com/docker/notary/trustpinning/trustpin.go +++ b/vendor/github.com/docker/notary/trustpinning/trustpin.go @@ -3,9 +3,11 @@ package trustpinning import ( "crypto/x509" "fmt" - "github.com/sirupsen/logrus" - "github.com/docker/notary/tuf/utils" "strings" + + "github.com/docker/notary/tuf/data" + "github.com/docker/notary/tuf/utils" + "github.com/sirupsen/logrus" ) // TrustPinConfig represents the configuration under the trust_pinning section of the config file @@ -17,7 +19,7 @@ type TrustPinConfig struct { } type trustPinChecker struct { - gun string + gun data.GUN config TrustPinConfig pinnedCAPool *x509.CertPool pinnedCertIDs []string @@ -27,14 +29,19 @@ type trustPinChecker struct { type CertChecker func(leafCert *x509.Certificate, intCerts []*x509.Certificate) bool // NewTrustPinChecker returns a new certChecker function from a TrustPinConfig for a GUN -func NewTrustPinChecker(trustPinConfig TrustPinConfig, gun string, firstBootstrap bool) (CertChecker, error) { +func NewTrustPinChecker(trustPinConfig TrustPinConfig, gun data.GUN, firstBootstrap bool) (CertChecker, error) { t := trustPinChecker{gun: gun, config: trustPinConfig} // Determine the mode, and if it's even valid - if pinnedCerts, ok := trustPinConfig.Certs[gun]; ok { + if pinnedCerts, ok := trustPinConfig.Certs[gun.String()]; ok { logrus.Debugf("trust-pinning using Cert IDs") t.pinnedCertIDs = pinnedCerts return t.certsCheck, nil } + var ok bool + t.pinnedCertIDs, ok = wildcardMatch(gun, trustPinConfig.Certs) + if ok { + return t.certsCheck, nil + } if caFilepath, err := getPinnedCAFilepathByPrefix(gun, trustPinConfig); err == nil { logrus.Debugf("trust-pinning using root CA bundle at: %s", caFilepath) @@ -103,19 +110,39 @@ func (t trustPinChecker) tofusCheck(leafCert *x509.Certificate, intCerts []*x509 // Will return the CA filepath corresponding to the most specific (longest) entry in the map that is still a prefix // of the provided gun. Returns an error if no entry matches this GUN as a prefix. -func getPinnedCAFilepathByPrefix(gun string, t TrustPinConfig) (string, error) { +func getPinnedCAFilepathByPrefix(gun data.GUN, t TrustPinConfig) (string, error) { specificGUN := "" specificCAFilepath := "" foundCA := false for gunPrefix, caFilepath := range t.CA { - if strings.HasPrefix(gun, gunPrefix) && len(gunPrefix) >= len(specificGUN) { + if strings.HasPrefix(gun.String(), gunPrefix) && len(gunPrefix) >= len(specificGUN) { specificGUN = gunPrefix specificCAFilepath = caFilepath foundCA = true } } if !foundCA { - return "", fmt.Errorf("could not find pinned CA for GUN: %s\n", gun) + return "", fmt.Errorf("could not find pinned CA for GUN: %s", gun) } return specificCAFilepath, nil } + +// wildcardMatch will attempt to match the most specific (longest prefix) wildcarded +// trustpinning option for key IDs. Given the simple globbing and the use of maps, +// it is impossible to have two different prefixes of equal length. +// This logic also solves the issue of Go's randomization of map iteration. +func wildcardMatch(gun data.GUN, certs map[string][]string) ([]string, bool) { + var ( + longest = "" + ids []string + ) + for gunPrefix, keyIDs := range certs { + if strings.HasSuffix(gunPrefix, "*") { + if strings.HasPrefix(gun.String(), gunPrefix[:len(gunPrefix)-1]) && len(gunPrefix) > len(longest) { + longest = gunPrefix + ids = keyIDs + } + } + } + return ids, ids != nil +} diff --git a/vendor/github.com/docker/notary/tuf/builder.go b/vendor/github.com/docker/notary/tuf/builder.go index 1eaf0498c55f..b868743774f1 100644 --- a/vendor/github.com/docker/notary/tuf/builder.go +++ b/vendor/github.com/docker/notary/tuf/builder.go @@ -28,7 +28,7 @@ func (e ErrInvalidBuilderInput) Error() string { // ConsistentInfo is the consistent name and size of a role, or just the name // of the role and a -1 if no file metadata for the role is known type ConsistentInfo struct { - RoleName string + RoleName data.RoleName fileMeta data.FileMeta } @@ -42,7 +42,7 @@ func (c ConsistentInfo) ChecksumKnown() bool { // ConsistentName returns the consistent name (rolename.sha256) for the role // given this consistent information func (c ConsistentInfo) ConsistentName() string { - return utils.ConsistentName(c.RoleName, c.fileMeta.Hashes[notary.SHA256]) + return utils.ConsistentName(c.RoleName.String(), c.fileMeta.Hashes[notary.SHA256]) } // Length returns the expected length of the role as per this consistent @@ -56,7 +56,8 @@ func (c ConsistentInfo) Length() int64 { // RepoBuilder is an interface for an object which builds a tuf.Repo type RepoBuilder interface { - Load(roleName string, content []byte, minVersion int, allowExpired bool) error + Load(roleName data.RoleName, content []byte, minVersion int, allowExpired bool) error + LoadRootForUpdate(content []byte, minVersion int, isFinal bool) error GenerateSnapshot(prev *data.SignedSnapshot) ([]byte, int, error) GenerateTimestamp(prev *data.SignedTimestamp) ([]byte, int, error) Finish() (*Repo, *Repo, error) @@ -64,15 +65,18 @@ type RepoBuilder interface { BootstrapNewBuilderWithNewTrustpin(trustpin trustpinning.TrustPinConfig) RepoBuilder // informative functions - IsLoaded(roleName string) bool - GetLoadedVersion(roleName string) int - GetConsistentInfo(roleName string) ConsistentInfo + IsLoaded(roleName data.RoleName) bool + GetLoadedVersion(roleName data.RoleName) int + GetConsistentInfo(roleName data.RoleName) ConsistentInfo } // finishedBuilder refuses any more input or output type finishedBuilder struct{} -func (f finishedBuilder) Load(roleName string, content []byte, minVersion int, allowExpired bool) error { +func (f finishedBuilder) Load(roleName data.RoleName, content []byte, minVersion int, allowExpired bool) error { + return ErrBuildDone +} +func (f finishedBuilder) LoadRootForUpdate(content []byte, minVersion int, isFinal bool) error { return ErrBuildDone } func (f finishedBuilder) GenerateSnapshot(prev *data.SignedSnapshot) ([]byte, int, error) { @@ -86,27 +90,27 @@ func (f finishedBuilder) BootstrapNewBuilder() RepoBuilder { return f } func (f finishedBuilder) BootstrapNewBuilderWithNewTrustpin(trustpin trustpinning.TrustPinConfig) RepoBuilder { return f } -func (f finishedBuilder) IsLoaded(roleName string) bool { return false } -func (f finishedBuilder) GetLoadedVersion(roleName string) int { return 0 } -func (f finishedBuilder) GetConsistentInfo(roleName string) ConsistentInfo { +func (f finishedBuilder) IsLoaded(roleName data.RoleName) bool { return false } +func (f finishedBuilder) GetLoadedVersion(roleName data.RoleName) int { return 0 } +func (f finishedBuilder) GetConsistentInfo(roleName data.RoleName) ConsistentInfo { return ConsistentInfo{RoleName: roleName} } // NewRepoBuilder is the only way to get a pre-built RepoBuilder -func NewRepoBuilder(gun string, cs signed.CryptoService, trustpin trustpinning.TrustPinConfig) RepoBuilder { +func NewRepoBuilder(gun data.GUN, cs signed.CryptoService, trustpin trustpinning.TrustPinConfig) RepoBuilder { return NewBuilderFromRepo(gun, NewRepo(cs), trustpin) } // NewBuilderFromRepo allows us to bootstrap a builder given existing repo data. // YOU PROBABLY SHOULDN'T BE USING THIS OUTSIDE OF TESTING CODE!!! -func NewBuilderFromRepo(gun string, repo *Repo, trustpin trustpinning.TrustPinConfig) RepoBuilder { +func NewBuilderFromRepo(gun data.GUN, repo *Repo, trustpin trustpinning.TrustPinConfig) RepoBuilder { return &repoBuilderWrapper{ RepoBuilder: &repoBuilder{ repo: repo, invalidRoles: NewRepo(nil), gun: gun, trustpin: trustpin, - loadedNotChecksummed: make(map[string][]byte), + loadedNotChecksummed: make(map[data.RoleName][]byte), }, } } @@ -134,13 +138,13 @@ type repoBuilder struct { invalidRoles *Repo // needed for root trust pininng verification - gun string + gun data.GUN trustpin trustpinning.TrustPinConfig // in case we load root and/or targets before snapshot and timestamp ( // or snapshot and not timestamp), so we know what to verify when the // data with checksums come in - loadedNotChecksummed map[string][]byte + loadedNotChecksummed map[data.RoleName][]byte // bootstrapped values to validate a new root prevRoot *data.SignedRoot @@ -159,7 +163,7 @@ func (rb *repoBuilder) BootstrapNewBuilder() RepoBuilder { repo: NewRepo(rb.repo.cryptoService), invalidRoles: NewRepo(nil), gun: rb.gun, - loadedNotChecksummed: make(map[string][]byte), + loadedNotChecksummed: make(map[data.RoleName][]byte), trustpin: rb.trustpin, prevRoot: rb.repo.Root, @@ -171,7 +175,7 @@ func (rb *repoBuilder) BootstrapNewBuilderWithNewTrustpin(trustpin trustpinning. return &repoBuilderWrapper{RepoBuilder: &repoBuilder{ repo: NewRepo(rb.repo.cryptoService), gun: rb.gun, - loadedNotChecksummed: make(map[string][]byte), + loadedNotChecksummed: make(map[data.RoleName][]byte), trustpin: trustpin, prevRoot: rb.repo.Root, @@ -180,7 +184,7 @@ func (rb *repoBuilder) BootstrapNewBuilderWithNewTrustpin(trustpin trustpinning. } // IsLoaded returns whether a particular role has already been loaded -func (rb *repoBuilder) IsLoaded(roleName string) bool { +func (rb *repoBuilder) IsLoaded(roleName data.RoleName) bool { switch roleName { case data.CanonicalRootRole: return rb.repo.Root != nil @@ -195,7 +199,7 @@ func (rb *repoBuilder) IsLoaded(roleName string) bool { // GetLoadedVersion returns the metadata version, if it is loaded, or 1 (the // minimum valid version number) otherwise -func (rb *repoBuilder) GetLoadedVersion(roleName string) int { +func (rb *repoBuilder) GetLoadedVersion(roleName data.RoleName) int { switch { case roleName == data.CanonicalRootRole && rb.repo.Root != nil: return rb.repo.Root.Signed.Version @@ -215,7 +219,7 @@ func (rb *repoBuilder) GetLoadedVersion(roleName string) int { // GetConsistentInfo returns the consistent name and size of a role, if it is known, // otherwise just the rolename and a -1 for size (both of which are inside a // ConsistentInfo object) -func (rb *repoBuilder) GetConsistentInfo(roleName string) ConsistentInfo { +func (rb *repoBuilder) GetConsistentInfo(roleName data.RoleName) ConsistentInfo { info := ConsistentInfo{RoleName: roleName} // starts out with unknown filemeta switch roleName { case data.CanonicalTimestampRole: @@ -224,29 +228,45 @@ func (rb *repoBuilder) GetConsistentInfo(roleName string) ConsistentInfo { info.fileMeta.Length = notary.MaxTimestampSize case data.CanonicalSnapshotRole: if rb.repo.Timestamp != nil { - info.fileMeta = rb.repo.Timestamp.Signed.Meta[roleName] + info.fileMeta = rb.repo.Timestamp.Signed.Meta[roleName.String()] } case data.CanonicalRootRole: switch { case rb.bootstrappedRootChecksum != nil: info.fileMeta = *rb.bootstrappedRootChecksum case rb.repo.Snapshot != nil: - info.fileMeta = rb.repo.Snapshot.Signed.Meta[roleName] + info.fileMeta = rb.repo.Snapshot.Signed.Meta[roleName.String()] } default: if rb.repo.Snapshot != nil { - info.fileMeta = rb.repo.Snapshot.Signed.Meta[roleName] + info.fileMeta = rb.repo.Snapshot.Signed.Meta[roleName.String()] } } return info } -func (rb *repoBuilder) Load(roleName string, content []byte, minVersion int, allowExpired bool) error { +func (rb *repoBuilder) Load(roleName data.RoleName, content []byte, minVersion int, allowExpired bool) error { + return rb.loadOptions(roleName, content, minVersion, allowExpired, false, false) +} + +// LoadRootForUpdate adds additional flags for updating the root.json file +func (rb *repoBuilder) LoadRootForUpdate(content []byte, minVersion int, isFinal bool) error { + if err := rb.loadOptions(data.CanonicalRootRole, content, minVersion, !isFinal, !isFinal, true); err != nil { + return err + } + if !isFinal { + rb.prevRoot = rb.repo.Root + } + return nil +} + +// loadOptions adds additional flags that should only be used for updating the root.json +func (rb *repoBuilder) loadOptions(roleName data.RoleName, content []byte, minVersion int, allowExpired, skipChecksum, allowLoaded bool) error { if !data.ValidRole(roleName) { return ErrInvalidBuilderInput{msg: fmt.Sprintf("%s is an invalid role", roleName)} } - if rb.IsLoaded(roleName) { + if !allowLoaded && rb.IsLoaded(roleName) { return ErrInvalidBuilderInput{msg: fmt.Sprintf("%s has already been loaded", roleName)} } @@ -255,9 +275,9 @@ func (rb *repoBuilder) Load(roleName string, content []byte, minVersion int, all case data.CanonicalRootRole: break case data.CanonicalTimestampRole, data.CanonicalSnapshotRole, data.CanonicalTargetsRole: - err = rb.checkPrereqsLoaded([]string{data.CanonicalRootRole}) + err = rb.checkPrereqsLoaded([]data.RoleName{data.CanonicalRootRole}) default: // delegations - err = rb.checkPrereqsLoaded([]string{data.CanonicalRootRole, data.CanonicalTargetsRole}) + err = rb.checkPrereqsLoaded([]data.RoleName{data.CanonicalRootRole, data.CanonicalTargetsRole}) } if err != nil { return err @@ -265,7 +285,7 @@ func (rb *repoBuilder) Load(roleName string, content []byte, minVersion int, all switch roleName { case data.CanonicalRootRole: - return rb.loadRoot(content, minVersion, allowExpired) + return rb.loadRoot(content, minVersion, allowExpired, skipChecksum) case data.CanonicalSnapshotRole: return rb.loadSnapshot(content, minVersion, allowExpired) case data.CanonicalTimestampRole: @@ -277,7 +297,7 @@ func (rb *repoBuilder) Load(roleName string, content []byte, minVersion int, all } } -func (rb *repoBuilder) checkPrereqsLoaded(prereqRoles []string) error { +func (rb *repoBuilder) checkPrereqsLoaded(prereqRoles []data.RoleName) error { for _, req := range prereqRoles { if !rb.IsLoaded(req) { return ErrInvalidBuilderInput{msg: fmt.Sprintf("%s must be loaded first", req)} @@ -301,7 +321,7 @@ func (rb *repoBuilder) GenerateSnapshot(prev *data.SignedSnapshot) ([]byte, int, return nil, 0, ErrInvalidBuilderInput{msg: "cannot generate snapshot if timestamp has already been loaded"} } - if err := rb.checkPrereqsLoaded([]string{data.CanonicalRootRole}); err != nil { + if err := rb.checkPrereqsLoaded([]data.RoleName{data.CanonicalRootRole}); err != nil { return nil, 0, err } @@ -310,7 +330,7 @@ func (rb *repoBuilder) GenerateSnapshot(prev *data.SignedSnapshot) ([]byte, int, // valid (it has a targets meta), we're good. switch prev { case nil: - if err := rb.checkPrereqsLoaded([]string{data.CanonicalTargetsRole}); err != nil { + if err := rb.checkPrereqsLoaded([]data.RoleName{data.CanonicalTargetsRole}); err != nil { return nil, 0, err } @@ -342,7 +362,7 @@ func (rb *repoBuilder) GenerateSnapshot(prev *data.SignedSnapshot) ([]byte, int, // the root and targets data (there may not be any) that that have been loaded, // remove all of them from rb.loadedNotChecksummed for tgtName := range rb.repo.Targets { - delete(rb.loadedNotChecksummed, tgtName) + delete(rb.loadedNotChecksummed, data.RoleName(tgtName)) } delete(rb.loadedNotChecksummed, data.CanonicalRootRole) @@ -367,7 +387,7 @@ func (rb *repoBuilder) GenerateTimestamp(prev *data.SignedTimestamp) ([]byte, in // SignTimestamp always serializes the loaded snapshot and signs in the data, so we must always // have the snapshot loaded first - if err := rb.checkPrereqsLoaded([]string{data.CanonicalRootRole, data.CanonicalSnapshotRole}); err != nil { + if err := rb.checkPrereqsLoaded([]data.RoleName{data.CanonicalRootRole, data.CanonicalSnapshotRole}); err != nil { return nil, 0, err } @@ -408,10 +428,10 @@ func (rb *repoBuilder) GenerateTimestamp(prev *data.SignedTimestamp) ([]byte, in } // loadRoot loads a root if one has not been loaded -func (rb *repoBuilder) loadRoot(content []byte, minVersion int, allowExpired bool) error { +func (rb *repoBuilder) loadRoot(content []byte, minVersion int, allowExpired, skipChecksum bool) error { roleName := data.CanonicalRootRole - signedObj, err := rb.bytesToSigned(content, data.CanonicalRootRole) + signedObj, err := rb.bytesToSigned(content, data.CanonicalRootRole, skipChecksum) if err != nil { return err } @@ -511,7 +531,7 @@ func (rb *repoBuilder) loadSnapshot(content []byte, minVersion int, allowExpired // this snapshot to bootstrap the next builder if needed - and we don't need to do // the 2-value assignment since we've already validated the signedSnapshot, which MUST // have root metadata - rootMeta := signedSnapshot.Signed.Meta[data.CanonicalRootRole] + rootMeta := signedSnapshot.Signed.Meta[data.CanonicalRootRole.String()] rb.nextRootChecksum = &rootMeta if err := rb.validateChecksumsFromSnapshot(signedSnapshot); err != nil { @@ -555,14 +575,14 @@ func (rb *repoBuilder) loadTargets(content []byte, minVersion int, allowExpired return nil } -func (rb *repoBuilder) loadDelegation(roleName string, content []byte, minVersion int, allowExpired bool) error { +func (rb *repoBuilder) loadDelegation(roleName data.RoleName, content []byte, minVersion int, allowExpired bool) error { delegationRole, err := rb.repo.GetDelegationRole(roleName) if err != nil { return err } // bytesToSigned checks checksum - signedObj, err := rb.bytesToSigned(content, roleName) + signedObj, err := rb.bytesToSigned(content, roleName, false) if err != nil { return err } @@ -599,8 +619,8 @@ func (rb *repoBuilder) validateChecksumsFromTimestamp(ts *data.SignedTimestamp) sn, ok := rb.loadedNotChecksummed[data.CanonicalSnapshotRole] if ok { // by this point, the SignedTimestamp has been validated so it must have a snapshot hash - snMeta := ts.Signed.Meta[data.CanonicalSnapshotRole].Hashes - if err := data.CheckHashes(sn, data.CanonicalSnapshotRole, snMeta); err != nil { + snMeta := ts.Signed.Meta[data.CanonicalSnapshotRole.String()].Hashes + if err := data.CheckHashes(sn, data.CanonicalSnapshotRole.String(), snMeta); err != nil { return err } delete(rb.loadedNotChecksummed, data.CanonicalSnapshotRole) @@ -609,13 +629,13 @@ func (rb *repoBuilder) validateChecksumsFromTimestamp(ts *data.SignedTimestamp) } func (rb *repoBuilder) validateChecksumsFromSnapshot(sn *data.SignedSnapshot) error { - var goodRoles []string + var goodRoles []data.RoleName for roleName, loadedBytes := range rb.loadedNotChecksummed { switch roleName { case data.CanonicalSnapshotRole, data.CanonicalTimestampRole: break default: - if err := data.CheckHashes(loadedBytes, roleName, sn.Signed.Meta[roleName].Hashes); err != nil { + if err := data.CheckHashes(loadedBytes, roleName.String(), sn.Signed.Meta[roleName.String()].Hashes); err != nil { return err } goodRoles = append(goodRoles, roleName) @@ -627,10 +647,10 @@ func (rb *repoBuilder) validateChecksumsFromSnapshot(sn *data.SignedSnapshot) er return nil } -func (rb *repoBuilder) validateChecksumFor(content []byte, roleName string) error { +func (rb *repoBuilder) validateChecksumFor(content []byte, roleName data.RoleName) error { // validate the bootstrap checksum for root, if provided if roleName == data.CanonicalRootRole && rb.bootstrappedRootChecksum != nil { - if err := data.CheckHashes(content, roleName, rb.bootstrappedRootChecksum.Hashes); err != nil { + if err := data.CheckHashes(content, roleName.String(), rb.bootstrappedRootChecksum.Hashes); err != nil { return err } } @@ -639,7 +659,7 @@ func (rb *repoBuilder) validateChecksumFor(content []byte, roleName string) erro // loaded it is validated (to make sure everything in the repo is self-consistent) checksums := rb.getChecksumsFor(roleName) if checksums != nil { // as opposed to empty, in which case hash check should fail - if err := data.CheckHashes(content, roleName, *checksums); err != nil { + if err := data.CheckHashes(content, roleName.String(), *checksums); err != nil { return err } } else if roleName != data.CanonicalTimestampRole { @@ -655,9 +675,11 @@ func (rb *repoBuilder) validateChecksumFor(content []byte, roleName string) erro // Checksums the given bytes, and if they validate, convert to a data.Signed object. // If a checksums are nil (as opposed to empty), adds the bytes to the list of roles that // haven't been checksummed (unless it's a timestamp, which has no checksum reference). -func (rb *repoBuilder) bytesToSigned(content []byte, roleName string) (*data.Signed, error) { - if err := rb.validateChecksumFor(content, roleName); err != nil { - return nil, err +func (rb *repoBuilder) bytesToSigned(content []byte, roleName data.RoleName, skipChecksum bool) (*data.Signed, error) { + if !skipChecksum { + if err := rb.validateChecksumFor(content, roleName); err != nil { + return nil, err + } } // unmarshal to signed @@ -671,7 +693,7 @@ func (rb *repoBuilder) bytesToSigned(content []byte, roleName string) (*data.Sig func (rb *repoBuilder) bytesToSignedAndValidateSigs(role data.BaseRole, content []byte) (*data.Signed, error) { - signedObj, err := rb.bytesToSigned(content, role.Name) + signedObj, err := rb.bytesToSigned(content, role.Name, false) if err != nil { return nil, err } @@ -690,7 +712,7 @@ func (rb *repoBuilder) bytesToSignedAndValidateSigs(role data.BaseRole, content // available. If the checksum reference *is* loaded, then always returns the // Hashes object for the given role - if it doesn't exist, returns an empty Hash // object (against which any checksum validation would fail). -func (rb *repoBuilder) getChecksumsFor(role string) *data.Hashes { +func (rb *repoBuilder) getChecksumsFor(role data.RoleName) *data.Hashes { var hashes data.Hashes switch role { case data.CanonicalTimestampRole: @@ -699,12 +721,12 @@ func (rb *repoBuilder) getChecksumsFor(role string) *data.Hashes { if rb.repo.Timestamp == nil { return nil } - hashes = rb.repo.Timestamp.Signed.Meta[data.CanonicalSnapshotRole].Hashes + hashes = rb.repo.Timestamp.Signed.Meta[data.CanonicalSnapshotRole.String()].Hashes default: if rb.repo.Snapshot == nil { return nil } - hashes = rb.repo.Snapshot.Signed.Meta[role].Hashes + hashes = rb.repo.Snapshot.Signed.Meta[role.String()].Hashes } return &hashes } diff --git a/vendor/github.com/docker/notary/tuf/data/errors.go b/vendor/github.com/docker/notary/tuf/data/errors.go index 5c1397d3e941..32dd25066c66 100644 --- a/vendor/github.com/docker/notary/tuf/data/errors.go +++ b/vendor/github.com/docker/notary/tuf/data/errors.go @@ -4,12 +4,12 @@ import "fmt" // ErrInvalidMetadata is the error to be returned when metadata is invalid type ErrInvalidMetadata struct { - role string + role RoleName msg string } func (e ErrInvalidMetadata) Error() string { - return fmt.Sprintf("%s type metadata invalid: %s", e.role, e.msg) + return fmt.Sprintf("%s type metadata invalid: %s", e.role.String(), e.msg) } // ErrMissingMeta - couldn't find the FileMeta object for the given Role, or diff --git a/vendor/github.com/docker/notary/tuf/data/keys.go b/vendor/github.com/docker/notary/tuf/data/keys.go index dea57b105a4b..c19ea8fa1c51 100644 --- a/vendor/github.com/docker/notary/tuf/data/keys.go +++ b/vendor/github.com/docker/notary/tuf/data/keys.go @@ -12,9 +12,9 @@ import ( "io" "math/big" - "github.com/sirupsen/logrus" "github.com/agl/ed25519" "github.com/docker/go/canonical/json" + "github.com/sirupsen/logrus" ) // PublicKey is the necessary interface for public keys @@ -376,7 +376,7 @@ func NewECDSAPrivateKey(public PublicKey, private []byte) (*ECDSAPrivateKey, err switch public.(type) { case *ECDSAPublicKey, *ECDSAx509PublicKey: default: - return nil, errors.New("Invalid public key type provided to NewECDSAPrivateKey") + return nil, errors.New("invalid public key type provided to NewECDSAPrivateKey") } ecdsaPrivKey, err := x509.ParseECPrivateKey(private) if err != nil { @@ -394,7 +394,7 @@ func NewRSAPrivateKey(public PublicKey, private []byte) (*RSAPrivateKey, error) switch public.(type) { case *RSAPublicKey, *RSAx509PublicKey: default: - return nil, errors.New("Invalid public key type provided to NewRSAPrivateKey") + return nil, errors.New("invalid public key type provided to NewRSAPrivateKey") } rsaPrivKey, err := x509.ParsePKCS1PrivateKey(private) if err != nil { @@ -445,7 +445,7 @@ type ecdsaSig struct { func (k ECDSAPrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) (signature []byte, err error) { ecdsaPrivKey, ok := k.CryptoSigner().(*ecdsa.PrivateKey) if !ok { - return nil, errors.New("Signer was based on the wrong key type") + return nil, errors.New("signer was based on the wrong key type") } hashed := sha256.Sum256(msg) sigASN1, err := ecdsaPrivKey.Sign(rand, hashed[:], opts) @@ -492,7 +492,7 @@ func (k ED25519PrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOp // Sign on an UnknownPrivateKey raises an error because the client does not // know how to sign with this key type. func (k UnknownPrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) (signature []byte, err error) { - return nil, errors.New("Unknown key type, cannot sign.") + return nil, errors.New("unknown key type, cannot sign") } // SignatureAlgorithm returns the SigAlgorithm for a ECDSAPrivateKey diff --git a/vendor/github.com/docker/notary/tuf/data/roles.go b/vendor/github.com/docker/notary/tuf/data/roles.go index 354054eecaa8..1a6541ca3cd7 100644 --- a/vendor/github.com/docker/notary/tuf/data/roles.go +++ b/vendor/github.com/docker/notary/tuf/data/roles.go @@ -10,16 +10,16 @@ import ( ) // Canonical base role names -const ( - CanonicalRootRole = "root" - CanonicalTargetsRole = "targets" - CanonicalSnapshotRole = "snapshot" - CanonicalTimestampRole = "timestamp" +var ( + CanonicalRootRole RoleName = "root" + CanonicalTargetsRole RoleName = "targets" + CanonicalSnapshotRole RoleName = "snapshot" + CanonicalTimestampRole RoleName = "timestamp" ) // BaseRoles is an easy to iterate list of the top level // roles. -var BaseRoles = []string{ +var BaseRoles = []RoleName{ CanonicalRootRole, CanonicalTargetsRole, CanonicalSnapshotRole, @@ -31,7 +31,7 @@ var delegationRegexp = regexp.MustCompile("^[-a-z0-9_/]+$") // ErrNoSuchRole indicates the roles doesn't exist type ErrNoSuchRole struct { - Role string + Role RoleName } func (e ErrNoSuchRole) Error() string { @@ -42,7 +42,7 @@ func (e ErrNoSuchRole) Error() string { // something like a role for which sone of the public keys were // not found in the TUF repo. type ErrInvalidRole struct { - Role string + Role RoleName Reason string } @@ -56,7 +56,7 @@ func (e ErrInvalidRole) Error() string { // ValidRole only determines the name is semantically // correct. For target delegated roles, it does NOT check // the the appropriate parent roles exist. -func ValidRole(name string) bool { +func ValidRole(name RoleName) bool { if IsDelegation(name) { return true } @@ -70,24 +70,25 @@ func ValidRole(name string) bool { } // IsDelegation checks if the role is a delegation or a root role -func IsDelegation(role string) bool { +func IsDelegation(role RoleName) bool { + strRole := role.String() targetsBase := CanonicalTargetsRole + "/" - whitelistedChars := delegationRegexp.MatchString(role) + whitelistedChars := delegationRegexp.MatchString(strRole) // Limit size of full role string to 255 chars for db column size limit correctLength := len(role) < 256 // Removes ., .., extra slashes, and trailing slash - isClean := path.Clean(role) == role - return strings.HasPrefix(role, targetsBase) && + isClean := path.Clean(strRole) == strRole + return strings.HasPrefix(strRole, targetsBase.String()) && whitelistedChars && correctLength && isClean } // IsBaseRole checks if the role is a base role -func IsBaseRole(role string) bool { +func IsBaseRole(role RoleName) bool { for _, baseRole := range BaseRoles { if role == baseRole { return true @@ -100,11 +101,11 @@ func IsBaseRole(role string) bool { // path, i.e. targets/*, targets/foo/*. // The wildcard may only appear as the final part of the delegation and must // be a whole segment, i.e. targets/foo* is not a valid wildcard delegation. -func IsWildDelegation(role string) bool { - if path.Clean(role) != role { +func IsWildDelegation(role RoleName) bool { + if path.Clean(role.String()) != role.String() { return false } - base := path.Dir(role) + base := role.Parent() if !(IsDelegation(base) || base == CanonicalTargetsRole) { return false } @@ -114,12 +115,12 @@ func IsWildDelegation(role string) bool { // BaseRole is an internal representation of a root/targets/snapshot/timestamp role, with its public keys included type BaseRole struct { Keys map[string]PublicKey - Name string + Name RoleName Threshold int } // NewBaseRole creates a new BaseRole object with the provided parameters -func NewBaseRole(name string, threshold int, keys ...PublicKey) BaseRole { +func NewBaseRole(name RoleName, threshold int, keys ...PublicKey) BaseRole { r := BaseRole{ Name: name, Threshold: threshold, @@ -199,7 +200,7 @@ func (d DelegationRole) Restrict(child DelegationRole) (DelegationRole, error) { // determined by delegation name. // Ex: targets/a is a direct parent of targets/a/b, but targets/a is not a direct parent of targets/a/b/c func (d DelegationRole) IsParentOf(child DelegationRole) bool { - return path.Dir(child.Name) == d.Name + return path.Dir(child.Name.String()) == d.Name.String() } // CheckPaths checks if a given path is valid for the role @@ -251,12 +252,12 @@ type RootRole struct { // Eventually should only be used for immediately before and after serialization/deserialization type Role struct { RootRole - Name string `json:"name"` + Name RoleName `json:"name"` Paths []string `json:"paths,omitempty"` } // NewRole creates a new Role object from the given parameters -func NewRole(name string, threshold int, keyIDs, paths []string) (*Role, error) { +func NewRole(name RoleName, threshold int, keyIDs, paths []string) (*Role, error) { if IsDelegation(name) { if len(paths) == 0 { logrus.Debugf("role %s with no Paths will never be able to publish content until one or more are added", name) diff --git a/vendor/github.com/docker/notary/tuf/data/root.go b/vendor/github.com/docker/notary/tuf/data/root.go index cfadbdbfb120..9420e87ce455 100644 --- a/vendor/github.com/docker/notary/tuf/data/root.go +++ b/vendor/github.com/docker/notary/tuf/data/root.go @@ -16,9 +16,9 @@ type SignedRoot struct { // Root is the Signed component of a root.json type Root struct { SignedCommon - Keys Keys `json:"keys"` - Roles map[string]*RootRole `json:"roles"` - ConsistentSnapshot bool `json:"consistent_snapshot"` + Keys Keys `json:"keys"` + Roles map[RoleName]*RootRole `json:"roles"` + ConsistentSnapshot bool `json:"consistent_snapshot"` } // isValidRootStructure returns an error, or nil, depending on whether the content of the struct @@ -51,7 +51,7 @@ func isValidRootStructure(r Root) error { return nil } -func isValidRootRoleStructure(metaContainingRole, rootRoleName string, r RootRole, validKeys Keys) error { +func isValidRootRoleStructure(metaContainingRole, rootRoleName RoleName, r RootRole, validKeys Keys) error { if r.Threshold < 1 { return ErrInvalidMetadata{ role: metaContainingRole, @@ -70,7 +70,7 @@ func isValidRootRoleStructure(metaContainingRole, rootRoleName string, r RootRol } // NewRoot initializes a new SignedRoot with a set of keys, roles, and the consistent flag -func NewRoot(keys map[string]PublicKey, roles map[string]*RootRole, consistent bool) (*SignedRoot, error) { +func NewRoot(keys map[string]PublicKey, roles map[RoleName]*RootRole, consistent bool) (*SignedRoot, error) { signedRoot := &SignedRoot{ Signatures: make([]Signature, 0), Signed: Root{ @@ -91,7 +91,7 @@ func NewRoot(keys map[string]PublicKey, roles map[string]*RootRole, consistent b // BuildBaseRole returns a copy of a BaseRole using the information in this SignedRoot for the specified role name. // Will error for invalid role name or key metadata within this SignedRoot -func (r SignedRoot) BuildBaseRole(roleName string) (BaseRole, error) { +func (r SignedRoot) BuildBaseRole(roleName RoleName) (BaseRole, error) { roleData, ok := r.Signed.Roles[roleName] if !ok { return BaseRole{}, ErrInvalidRole{Role: roleName, Reason: "role not found in root file"} diff --git a/vendor/github.com/docker/notary/tuf/data/snapshot.go b/vendor/github.com/docker/notary/tuf/data/snapshot.go index 16c4c2ef7580..2341979537ba 100644 --- a/vendor/github.com/docker/notary/tuf/data/snapshot.go +++ b/vendor/github.com/docker/notary/tuf/data/snapshot.go @@ -4,9 +4,9 @@ import ( "bytes" "fmt" - "github.com/sirupsen/logrus" "github.com/docker/go/canonical/json" "github.com/docker/notary" + "github.com/sirupsen/logrus" ) // SignedSnapshot is a fully unpacked snapshot.json @@ -37,22 +37,22 @@ func IsValidSnapshotStructure(s Snapshot) error { role: CanonicalSnapshotRole, msg: "version cannot be less than one"} } - for _, role := range []string{CanonicalRootRole, CanonicalTargetsRole} { + for _, file := range []RoleName{CanonicalRootRole, CanonicalTargetsRole} { // Meta is a map of FileMeta, so if the role isn't in the map it returns // an empty FileMeta, which has an empty map, and you can check on keys // from an empty map. // // For now sha256 is required and sha512 is not. - if _, ok := s.Meta[role].Hashes[notary.SHA256]; !ok { + if _, ok := s.Meta[file.String()].Hashes[notary.SHA256]; !ok { return ErrInvalidMetadata{ role: CanonicalSnapshotRole, - msg: fmt.Sprintf("missing %s sha256 checksum information", role), + msg: fmt.Sprintf("missing %s sha256 checksum information", file.String()), } } - if err := CheckValidHashStructures(s.Meta[role].Hashes); err != nil { + if err := CheckValidHashStructures(s.Meta[file.String()].Hashes); err != nil { return ErrInvalidMetadata{ role: CanonicalSnapshotRole, - msg: fmt.Sprintf("invalid %s checksum information, %v", role, err), + msg: fmt.Sprintf("invalid %s checksum information, %v", file.String(), err), } } } @@ -90,8 +90,8 @@ func NewSnapshot(root *Signed, targets *Signed) (*SignedSnapshot, error) { Expires: DefaultExpires(CanonicalSnapshotRole), }, Meta: Files{ - CanonicalRootRole: rootMeta, - CanonicalTargetsRole: targetsMeta, + CanonicalRootRole.String(): rootMeta, + CanonicalTargetsRole.String(): targetsMeta, }, }, }, nil @@ -117,27 +117,27 @@ func (sp *SignedSnapshot) ToSigned() (*Signed, error) { } // AddMeta updates a role in the snapshot with new meta -func (sp *SignedSnapshot) AddMeta(role string, meta FileMeta) { - sp.Signed.Meta[role] = meta +func (sp *SignedSnapshot) AddMeta(role RoleName, meta FileMeta) { + sp.Signed.Meta[role.String()] = meta sp.Dirty = true } // GetMeta gets the metadata for a particular role, returning an error if it's // not found -func (sp *SignedSnapshot) GetMeta(role string) (*FileMeta, error) { - if meta, ok := sp.Signed.Meta[role]; ok { +func (sp *SignedSnapshot) GetMeta(role RoleName) (*FileMeta, error) { + if meta, ok := sp.Signed.Meta[role.String()]; ok { if _, ok := meta.Hashes["sha256"]; ok { return &meta, nil } } - return nil, ErrMissingMeta{Role: role} + return nil, ErrMissingMeta{Role: role.String()} } // DeleteMeta removes a role from the snapshot. If the role doesn't // exist in the snapshot, it's a noop. -func (sp *SignedSnapshot) DeleteMeta(role string) { - if _, ok := sp.Signed.Meta[role]; ok { - delete(sp.Signed.Meta, role) +func (sp *SignedSnapshot) DeleteMeta(role RoleName) { + if _, ok := sp.Signed.Meta[role.String()]; ok { + delete(sp.Signed.Meta, role.String()) sp.Dirty = true } } diff --git a/vendor/github.com/docker/notary/tuf/data/targets.go b/vendor/github.com/docker/notary/tuf/data/targets.go index 04cca121fd17..f01eb5e6adc6 100644 --- a/vendor/github.com/docker/notary/tuf/data/targets.go +++ b/vendor/github.com/docker/notary/tuf/data/targets.go @@ -26,7 +26,7 @@ type Targets struct { // isValidTargetsStructure returns an error, or nil, depending on whether the content of the struct // is valid for targets metadata. This does not check signatures or expiry, just that // the metadata content is valid. -func isValidTargetsStructure(t Targets, roleName string) error { +func isValidTargetsStructure(t Targets, roleName RoleName) error { if roleName != CanonicalTargetsRole && !IsDelegation(roleName) { return ErrInvalidRole{Role: roleName} } @@ -43,7 +43,7 @@ func isValidTargetsStructure(t Targets, roleName string) error { } for _, roleObj := range t.Delegations.Roles { - if !IsDelegation(roleObj.Name) || path.Dir(roleObj.Name) != roleName { + if !IsDelegation(roleObj.Name) || path.Dir(roleObj.Name.String()) != roleName.String() { return ErrInvalidMetadata{ role: roleName, msg: fmt.Sprintf("delegation role %s invalid", roleObj.Name)} } @@ -99,7 +99,7 @@ func (t SignedTargets) GetValidDelegations(parent DelegationRole) []DelegationRo // BuildDelegationRole returns a copy of a DelegationRole using the information in this SignedTargets for the specified role name. // Will error for invalid role name or key metadata within this SignedTargets. Path data is not validated. -func (t *SignedTargets) BuildDelegationRole(roleName string) (DelegationRole, error) { +func (t *SignedTargets) BuildDelegationRole(roleName RoleName) (DelegationRole, error) { for _, role := range t.Signed.Delegations.Roles { if role.Name == roleName { pubKeys := make(map[string]PublicKey) @@ -184,7 +184,7 @@ func (t *SignedTargets) MarshalJSON() ([]byte, error) { // TargetsFromSigned fully unpacks a Signed object into a SignedTargets, given // a role name (so it can validate the SignedTargets object) -func TargetsFromSigned(s *Signed, roleName string) (*SignedTargets, error) { +func TargetsFromSigned(s *Signed, roleName RoleName) (*SignedTargets, error) { t := Targets{} if err := defaultSerializer.Unmarshal(*s.Signed, &t); err != nil { return nil, err diff --git a/vendor/github.com/docker/notary/tuf/data/timestamp.go b/vendor/github.com/docker/notary/tuf/data/timestamp.go index 1a642415616e..883641cd732d 100644 --- a/vendor/github.com/docker/notary/tuf/data/timestamp.go +++ b/vendor/github.com/docker/notary/tuf/data/timestamp.go @@ -41,11 +41,11 @@ func IsValidTimestampStructure(t Timestamp) error { // from an empty map. // // For now sha256 is required and sha512 is not. - if _, ok := t.Meta[CanonicalSnapshotRole].Hashes[notary.SHA256]; !ok { + if _, ok := t.Meta[CanonicalSnapshotRole.String()].Hashes[notary.SHA256]; !ok { return ErrInvalidMetadata{ role: CanonicalTimestampRole, msg: "missing snapshot sha256 checksum information"} } - if err := CheckValidHashStructures(t.Meta[CanonicalSnapshotRole].Hashes); err != nil { + if err := CheckValidHashStructures(t.Meta[CanonicalSnapshotRole.String()].Hashes); err != nil { return ErrInvalidMetadata{ role: CanonicalTimestampRole, msg: fmt.Sprintf("invalid snapshot checksum information, %v", err)} } @@ -72,7 +72,7 @@ func NewTimestamp(snapshot *Signed) (*SignedTimestamp, error) { Expires: DefaultExpires(CanonicalTimestampRole), }, Meta: Files{ - CanonicalSnapshotRole: snapshotMeta, + CanonicalSnapshotRole.String(): snapshotMeta, }, }, }, nil @@ -101,9 +101,9 @@ func (ts *SignedTimestamp) ToSigned() (*Signed, error) { // GetSnapshot gets the expected snapshot metadata hashes in the timestamp metadata, // or nil if it doesn't exist func (ts *SignedTimestamp) GetSnapshot() (*FileMeta, error) { - snapshotExpected, ok := ts.Signed.Meta[CanonicalSnapshotRole] + snapshotExpected, ok := ts.Signed.Meta[CanonicalSnapshotRole.String()] if !ok { - return nil, ErrMissingMeta{Role: CanonicalSnapshotRole} + return nil, ErrMissingMeta{Role: CanonicalSnapshotRole.String()} } return &snapshotExpected, nil } diff --git a/vendor/github.com/docker/notary/tuf/data/types.go b/vendor/github.com/docker/notary/tuf/data/types.go index 8ed45834f0ae..5c480d483793 100644 --- a/vendor/github.com/docker/notary/tuf/data/types.go +++ b/vendor/github.com/docker/notary/tuf/data/types.go @@ -1,6 +1,7 @@ package data import ( + "bytes" "crypto/sha256" "crypto/sha512" "crypto/subtle" @@ -9,14 +10,61 @@ import ( "hash" "io" "io/ioutil" + "path" "strings" "time" - "github.com/sirupsen/logrus" "github.com/docker/go/canonical/json" "github.com/docker/notary" + "github.com/sirupsen/logrus" ) +// GUN type for specifying gun +type GUN string + +func (g GUN) String() string { + return string(g) +} + +// RoleName type for specifying role +type RoleName string + +func (r RoleName) String() string { + return string(r) +} + +// Parent provides the parent path role from the provided child role +func (r RoleName) Parent() RoleName { + return RoleName(path.Dir(r.String())) +} + +// MetadataRoleMapToStringMap generates a map string of bytes from a map RoleName of bytes +func MetadataRoleMapToStringMap(roles map[RoleName][]byte) map[string][]byte { + metadata := make(map[string][]byte) + for k, v := range roles { + metadata[k.String()] = v + } + return metadata +} + +// NewRoleList generates an array of RoleName objects from a slice of strings +func NewRoleList(roles []string) []RoleName { + var roleNames []RoleName + for _, role := range roles { + roleNames = append(roleNames, RoleName(role)) + } + return roleNames +} + +// RolesListToStringList generates an array of string objects from a slice of roles +func RolesListToStringList(roles []RoleName) []string { + var roleNames []string + for _, role := range roles { + roleNames = append(roleNames, role.String()) + } + return roleNames +} + // SigAlgorithm for types of signatures type SigAlgorithm string @@ -26,6 +74,15 @@ func (k SigAlgorithm) String() string { const defaultHashAlgorithm = "sha256" +// NotaryDefaultExpiries is the construct used to configure the default expiry times of +// the various role files. +var NotaryDefaultExpiries = map[RoleName]time.Duration{ + CanonicalRootRole: notary.NotaryRootExpiry, + CanonicalTargetsRole: notary.NotaryTargetsExpiry, + CanonicalSnapshotRole: notary.NotarySnapshotExpiry, + CanonicalTimestampRole: notary.NotaryTimestampExpiry, +} + // Signature types const ( EDDSASignature SigAlgorithm = "eddsa" @@ -45,23 +102,15 @@ const ( ) // TUFTypes is the set of metadata types -var TUFTypes = map[string]string{ +var TUFTypes = map[RoleName]string{ CanonicalRootRole: "Root", CanonicalTargetsRole: "Targets", CanonicalSnapshotRole: "Snapshot", CanonicalTimestampRole: "Timestamp", } -// SetTUFTypes allows one to override some or all of the default -// type names in TUF. -func SetTUFTypes(ts map[string]string) { - for k, v := range ts { - TUFTypes[k] = v - } -} - // ValidTUFType checks if the given type is valid for the role -func ValidTUFType(typ, role string) bool { +func ValidTUFType(typ string, role RoleName) bool { if ValidRole(role) { // All targets delegation roles must have // the valid type is for targets. @@ -70,7 +119,7 @@ func ValidTUFType(typ, role string) bool { // a type return false } - if strings.HasPrefix(role, CanonicalTargetsRole+"/") { + if strings.HasPrefix(role.String(), CanonicalTargetsRole.String()+"/") { role = CanonicalTargetsRole } } @@ -133,6 +182,34 @@ type FileMeta struct { Custom *json.RawMessage `json:"custom,omitempty"` } +// Equals returns true if the other FileMeta object is equivalent to this one +func (f FileMeta) Equals(o FileMeta) bool { + if o.Length != f.Length || len(f.Hashes) != len(f.Hashes) { + return false + } + if f.Custom == nil && o.Custom != nil || f.Custom != nil && o.Custom == nil { + return false + } + // we don't care if these are valid hashes, just that they are equal + for key, val := range f.Hashes { + if !bytes.Equal(val, o.Hashes[key]) { + return false + } + } + if f.Custom == nil && o.Custom == nil { + return true + } + fBytes, err := f.Custom.MarshalJSON() + if err != nil { + return false + } + oBytes, err := o.Custom.MarshalJSON() + if err != nil { + return false + } + return bytes.Equal(fBytes, oBytes) +} + // CheckHashes verifies all the checksums specified by the "hashes" of the payload. func CheckHashes(payload []byte, name string, hashes Hashes) error { cnt := 0 @@ -269,7 +346,7 @@ func NewDelegations() *Delegations { } // These values are recommended TUF expiry times. -var defaultExpiryTimes = map[string]time.Duration{ +var defaultExpiryTimes = map[RoleName]time.Duration{ CanonicalRootRole: notary.Year, CanonicalTargetsRole: 90 * notary.Day, CanonicalSnapshotRole: 7 * notary.Day, @@ -277,10 +354,10 @@ var defaultExpiryTimes = map[string]time.Duration{ } // SetDefaultExpiryTimes allows one to change the default expiries. -func SetDefaultExpiryTimes(times map[string]time.Duration) { +func SetDefaultExpiryTimes(times map[RoleName]time.Duration) { for key, value := range times { if _, ok := defaultExpiryTimes[key]; !ok { - logrus.Errorf("Attempted to set default expiry for an unknown role: %s", key) + logrus.Errorf("Attempted to set default expiry for an unknown role: %s", key.String()) continue } defaultExpiryTimes[key] = value @@ -288,7 +365,7 @@ func SetDefaultExpiryTimes(times map[string]time.Duration) { } // DefaultExpires gets the default expiry time for the given role -func DefaultExpires(role string) time.Time { +func DefaultExpires(role RoleName) time.Time { if d, ok := defaultExpiryTimes[role]; ok { return time.Now().Add(d) } diff --git a/vendor/github.com/docker/notary/tuf/signed/ed25519.go b/vendor/github.com/docker/notary/tuf/signed/ed25519.go index 7a70739e408a..e08daba35553 100644 --- a/vendor/github.com/docker/notary/tuf/signed/ed25519.go +++ b/vendor/github.com/docker/notary/tuf/signed/ed25519.go @@ -10,7 +10,7 @@ import ( ) type edCryptoKey struct { - role string + role data.RoleName privKey data.PrivateKey } @@ -28,13 +28,13 @@ func NewEd25519() *Ed25519 { } // AddKey allows you to add a private key -func (e *Ed25519) AddKey(role, gun string, k data.PrivateKey) error { +func (e *Ed25519) AddKey(role data.RoleName, gun data.GUN, k data.PrivateKey) error { e.addKey(role, k) return nil } // addKey allows you to add a private key -func (e *Ed25519) addKey(role string, k data.PrivateKey) { +func (e *Ed25519) addKey(role data.RoleName, k data.PrivateKey) { e.keys[k.ID()] = edCryptoKey{ role: role, privKey: k, @@ -48,7 +48,7 @@ func (e *Ed25519) RemoveKey(keyID string) error { } // ListKeys returns the list of keys IDs for the role -func (e *Ed25519) ListKeys(role string) []string { +func (e *Ed25519) ListKeys(role data.RoleName) []string { keyIDs := make([]string, 0, len(e.keys)) for id, edCryptoKey := range e.keys { if edCryptoKey.role == role { @@ -59,8 +59,8 @@ func (e *Ed25519) ListKeys(role string) []string { } // ListAllKeys returns the map of keys IDs to role -func (e *Ed25519) ListAllKeys() map[string]string { - keys := make(map[string]string) +func (e *Ed25519) ListAllKeys() map[string]data.RoleName { + keys := make(map[string]data.RoleName) for id, edKey := range e.keys { keys[id] = edKey.role } @@ -68,7 +68,7 @@ func (e *Ed25519) ListAllKeys() map[string]string { } // Create generates a new key and returns the public part -func (e *Ed25519) Create(role, gun, algorithm string) (data.PublicKey, error) { +func (e *Ed25519) Create(role data.RoleName, gun data.GUN, algorithm string) (data.PublicKey, error) { if algorithm != data.ED25519Key { return nil, errors.New("only ED25519 supported by this cryptoservice") } @@ -103,7 +103,7 @@ func (e *Ed25519) GetKey(keyID string) data.PublicKey { } // GetPrivateKey returns a single private key and role if present, based on the ID -func (e *Ed25519) GetPrivateKey(keyID string) (data.PrivateKey, string, error) { +func (e *Ed25519) GetPrivateKey(keyID string) (data.PrivateKey, data.RoleName, error) { if k, ok := e.keys[keyID]; ok { return k.privKey, k.role, nil } diff --git a/vendor/github.com/docker/notary/tuf/signed/errors.go b/vendor/github.com/docker/notary/tuf/signed/errors.go index 2a633c86407f..5d4ff04ab70e 100644 --- a/vendor/github.com/docker/notary/tuf/signed/errors.go +++ b/vendor/github.com/docker/notary/tuf/signed/errors.go @@ -3,6 +3,8 @@ package signed import ( "fmt" "strings" + + "github.com/docker/notary/tuf/data" ) // ErrInsufficientSignatures - can not create enough signatures on a piece of @@ -29,12 +31,12 @@ func (e ErrInsufficientSignatures) Error() string { // ErrExpired indicates a piece of metadata has expired type ErrExpired struct { - Role string + Role data.RoleName Expired string } func (e ErrExpired) Error() string { - return fmt.Sprintf("%s expired at %v", e.Role, e.Expired) + return fmt.Sprintf("%s expired at %v", e.Role.String(), e.Expired) } // ErrLowVersion indicates the piece of metadata has a version number lower than diff --git a/vendor/github.com/docker/notary/tuf/signed/interface.go b/vendor/github.com/docker/notary/tuf/signed/interface.go index 862b23b8f5cb..03b426f16200 100644 --- a/vendor/github.com/docker/notary/tuf/signed/interface.go +++ b/vendor/github.com/docker/notary/tuf/signed/interface.go @@ -1,8 +1,6 @@ package signed -import ( - "github.com/docker/notary/tuf/data" -) +import "github.com/docker/notary/tuf/data" // KeyService provides management of keys locally. It will never // accept or provide private keys. Communication between the KeyService @@ -10,17 +8,17 @@ import ( type KeyService interface { // Create issues a new key pair and is responsible for loading // the private key into the appropriate signing service. - Create(role, gun, algorithm string) (data.PublicKey, error) + Create(role data.RoleName, gun data.GUN, algorithm string) (data.PublicKey, error) // AddKey adds a private key to the specified role and gun - AddKey(role, gun string, key data.PrivateKey) error + AddKey(role data.RoleName, gun data.GUN, key data.PrivateKey) error // GetKey retrieves the public key if present, otherwise it returns nil GetKey(keyID string) data.PublicKey // GetPrivateKey retrieves the private key and role if present and retrievable, // otherwise it returns nil and an error - GetPrivateKey(keyID string) (data.PrivateKey, string, error) + GetPrivateKey(keyID string) (data.PrivateKey, data.RoleName, error) // RemoveKey deletes the specified key, and returns an error only if the key // removal fails. If the key doesn't exist, no error should be returned. @@ -28,11 +26,11 @@ type KeyService interface { // ListKeys returns a list of key IDs for the role, or an empty list or // nil if there are no keys. - ListKeys(role string) []string + ListKeys(role data.RoleName) []string // ListAllKeys returns a map of all available signing key IDs to role, or // an empty map or nil if there are no keys. - ListAllKeys() map[string]string + ListAllKeys() map[string]data.RoleName } // CryptoService is deprecated and all instances of its use should be diff --git a/vendor/github.com/docker/notary/tuf/signed/sign.go b/vendor/github.com/docker/notary/tuf/signed/sign.go index 28e437c2351b..be1410d60173 100644 --- a/vendor/github.com/docker/notary/tuf/signed/sign.go +++ b/vendor/github.com/docker/notary/tuf/signed/sign.go @@ -14,10 +14,10 @@ package signed import ( "crypto/rand" - "github.com/sirupsen/logrus" "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/utils" + "github.com/sirupsen/logrus" ) // Sign takes a data.Signed and a cryptoservice containing private keys, diff --git a/vendor/github.com/docker/notary/tuf/signed/verifiers.go b/vendor/github.com/docker/notary/tuf/signed/verifiers.go index 7698fecd827a..e32b15378b3c 100644 --- a/vendor/github.com/docker/notary/tuf/signed/verifiers.go +++ b/vendor/github.com/docker/notary/tuf/signed/verifiers.go @@ -9,11 +9,10 @@ import ( "encoding/pem" "fmt" "math/big" - "reflect" - "github.com/sirupsen/logrus" "github.com/agl/ed25519" "github.com/docker/notary/tuf/data" + "github.com/sirupsen/logrus" ) const ( @@ -32,24 +31,6 @@ var Verifiers = map[data.SigAlgorithm]Verifier{ data.EDDSASignature: Ed25519Verifier{}, } -// RegisterVerifier provides a convenience function for init() functions -// to register additional verifiers or replace existing ones. -func RegisterVerifier(algorithm data.SigAlgorithm, v Verifier) { - curr, ok := Verifiers[algorithm] - if ok { - typOld := reflect.TypeOf(curr) - typNew := reflect.TypeOf(v) - logrus.Debugf( - "replacing already loaded verifier %s:%s with %s:%s", - typOld.PkgPath(), typOld.Name(), - typNew.PkgPath(), typNew.Name(), - ) - } else { - logrus.Debug("adding verifier for: ", algorithm) - } - Verifiers[algorithm] = v -} - // Ed25519Verifier used to verify Ed25519 signatures type Ed25519Verifier struct{} diff --git a/vendor/github.com/docker/notary/tuf/signed/verify.go b/vendor/github.com/docker/notary/tuf/signed/verify.go index b7bf0eb85924..c3bf1925ea07 100644 --- a/vendor/github.com/docker/notary/tuf/signed/verify.go +++ b/vendor/github.com/docker/notary/tuf/signed/verify.go @@ -6,18 +6,16 @@ import ( "strings" "time" - "github.com/sirupsen/logrus" "github.com/docker/go/canonical/json" "github.com/docker/notary/tuf/data" + "github.com/docker/notary/tuf/utils" + "github.com/sirupsen/logrus" ) // Various basic signing errors var ( - ErrMissingKey = errors.New("tuf: missing key") ErrNoSignatures = errors.New("tuf: data has no signatures") ErrInvalid = errors.New("tuf: signature verification failed") - ErrWrongMethod = errors.New("tuf: invalid signature type") - ErrUnknownRole = errors.New("tuf: unknown role") ErrWrongType = errors.New("tuf: meta file has wrong type") ) @@ -27,7 +25,7 @@ func IsExpired(t time.Time) bool { } // VerifyExpiry returns ErrExpired if the metadata is expired -func VerifyExpiry(s *data.SignedCommon, role string) error { +func VerifyExpiry(s *data.SignedCommon, role data.RoleName) error { if IsExpired(s.Expires) { logrus.Errorf("Metadata for %s expired", role) return ErrExpired{Role: role, Expired: s.Expires.Format("Mon Jan 2 15:04:05 MST 2006")} @@ -101,12 +99,25 @@ func VerifySignature(msg []byte, sig *data.Signature, pk data.PublicKey) error { method := sig.Method verifier, ok := Verifiers[method] if !ok { - return fmt.Errorf("signing method is not supported: %s\n", sig.Method) + return fmt.Errorf("signing method is not supported: %s", sig.Method) } if err := verifier.Verify(pk, sig.Signature, msg); err != nil { - return fmt.Errorf("signature was invalid\n") + return fmt.Errorf("signature was invalid") } sig.IsValid = true return nil } + +// VerifyPublicKeyMatchesPrivateKey checks if the private key and the public keys forms valid key pairs. +// Supports both x509 certificate PublicKeys and non-certificate PublicKeys +func VerifyPublicKeyMatchesPrivateKey(privKey data.PrivateKey, pubKey data.PublicKey) error { + pubKeyID, err := utils.CanonicalKeyID(pubKey) + if err != nil { + return fmt.Errorf("could not verify key pair: %v", err) + } + if privKey == nil || pubKeyID != privKey.ID() { + return fmt.Errorf("private key is nil or does not match public key") + } + return nil +} diff --git a/vendor/github.com/docker/notary/tuf/tuf.go b/vendor/github.com/docker/notary/tuf/tuf.go index 38be77989da9..2136e52dfeac 100644 --- a/vendor/github.com/docker/notary/tuf/tuf.go +++ b/vendor/github.com/docker/notary/tuf/tuf.go @@ -5,17 +5,14 @@ import ( "bytes" "encoding/json" "fmt" - "path" - "sort" - "strconv" "strings" "time" - "github.com/sirupsen/logrus" "github.com/docker/notary" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/signed" "github.com/docker/notary/tuf/utils" + "github.com/sirupsen/logrus" ) // ErrSigVerifyFail - signature verification failed @@ -43,7 +40,7 @@ func (e ErrLocalRootExpired) Error() string { // the repo. This means specifically that the relevant JSON file has not // been loaded. type ErrNotLoaded struct { - Role string + Role data.RoleName } func (err ErrNotLoaded) Error() string { @@ -60,7 +57,7 @@ type StopWalk struct{} // the Repo instance. type Repo struct { Root *data.SignedRoot - Targets map[string]*data.SignedTargets + Targets map[data.RoleName]*data.SignedTargets Snapshot *data.SignedSnapshot Timestamp *data.SignedTimestamp cryptoService signed.CryptoService @@ -78,13 +75,13 @@ type Repo struct { // can be nil. func NewRepo(cryptoService signed.CryptoService) *Repo { return &Repo{ - Targets: make(map[string]*data.SignedTargets), + Targets: make(map[data.RoleName]*data.SignedTargets), cryptoService: cryptoService, } } // AddBaseKeys is used to add keys to the role in root.json -func (tr *Repo) AddBaseKeys(role string, keys ...data.PublicKey) error { +func (tr *Repo) AddBaseKeys(role data.RoleName, keys ...data.PublicKey) error { if tr.Root == nil { return ErrNotLoaded{Role: data.CanonicalRootRole} } @@ -117,7 +114,7 @@ func (tr *Repo) AddBaseKeys(role string, keys ...data.PublicKey) error { } // ReplaceBaseKeys is used to replace all keys for the given role with the new keys -func (tr *Repo) ReplaceBaseKeys(role string, keys ...data.PublicKey) error { +func (tr *Repo) ReplaceBaseKeys(role data.RoleName, keys ...data.PublicKey) error { r, err := tr.GetBaseRole(role) if err != nil { return err @@ -130,7 +127,7 @@ func (tr *Repo) ReplaceBaseKeys(role string, keys ...data.PublicKey) error { } // RemoveBaseKeys is used to remove keys from the roles in root.json -func (tr *Repo) RemoveBaseKeys(role string, keyIDs ...string) error { +func (tr *Repo) RemoveBaseKeys(role data.RoleName, keyIDs ...string) error { if tr.Root == nil { return ErrNotLoaded{Role: data.CanonicalRootRole} } @@ -153,20 +150,7 @@ func (tr *Repo) RemoveBaseKeys(role string, keyIDs ...string) error { // also, whichever role had keys removed needs to be re-signed // root has already been marked dirty. - switch role { - case data.CanonicalSnapshotRole: - if tr.Snapshot != nil { - tr.Snapshot.Dirty = true - } - case data.CanonicalTargetsRole: - if target, ok := tr.Targets[data.CanonicalTargetsRole]; ok { - target.Dirty = true - } - case data.CanonicalTimestampRole: - if tr.Timestamp != nil { - tr.Timestamp.Dirty = true - } - } + tr.markRoleDirty(role) // determine which keys are no longer in use by any roles for roleName, r := range tr.Root.Signed.Roles { @@ -193,12 +177,30 @@ func (tr *Repo) RemoveBaseKeys(role string, keyIDs ...string) error { tr.cryptoService.RemoveKey(k) } } + tr.Root.Dirty = true return nil } +func (tr *Repo) markRoleDirty(role data.RoleName) { + switch role { + case data.CanonicalSnapshotRole: + if tr.Snapshot != nil { + tr.Snapshot.Dirty = true + } + case data.CanonicalTargetsRole: + if target, ok := tr.Targets[data.CanonicalTargetsRole]; ok { + target.Dirty = true + } + case data.CanonicalTimestampRole: + if tr.Timestamp != nil { + tr.Timestamp.Dirty = true + } + } +} + // GetBaseRole gets a base role from this repo's metadata -func (tr *Repo) GetBaseRole(name string) (data.BaseRole, error) { +func (tr *Repo) GetBaseRole(name data.RoleName) (data.BaseRole, error) { if !data.ValidRole(name) { return data.BaseRole{}, data.ErrInvalidRole{Role: name, Reason: "invalid base role name"} } @@ -215,7 +217,7 @@ func (tr *Repo) GetBaseRole(name string) (data.BaseRole, error) { } // GetDelegationRole gets a delegation role from this repo's metadata, walking from the targets role down to the delegation itself -func (tr *Repo) GetDelegationRole(name string) (data.DelegationRole, error) { +func (tr *Repo) GetDelegationRole(name data.RoleName) (data.DelegationRole, error) { if !data.IsDelegation(name) { return data.DelegationRole{}, data.ErrInvalidRole{Role: name, Reason: "invalid delegation name"} } @@ -267,7 +269,7 @@ func (tr *Repo) GetDelegationRole(name string) (data.DelegationRole, error) { } // Walk to the parent of this delegation, since that is where its role metadata exists - err := tr.WalkTargets("", path.Dir(name), buildDelegationRoleVisitor) + err := tr.WalkTargets("", name.Parent(), buildDelegationRoleVisitor) if err != nil { return data.DelegationRole{}, err } @@ -306,7 +308,7 @@ func (tr *Repo) GetAllLoadedRoles() []*data.Role { // Walk to parent, and either create or update this delegation. We can only create a new delegation if we're given keys // Ensure all updates are valid, by checking against parent ancestor paths and ensuring the keys meet the role threshold. -func delegationUpdateVisitor(roleName string, addKeys data.KeyList, removeKeys, addPaths, removePaths []string, clearAllPaths bool, newThreshold int) walkVisitorFunc { +func delegationUpdateVisitor(roleName data.RoleName, addKeys data.KeyList, removeKeys, addPaths, removePaths []string, clearAllPaths bool, newThreshold int) walkVisitorFunc { return func(tgt *data.SignedTargets, validRole data.DelegationRole) interface{} { var err error // Validate the changes underneath this restricted validRole for adding paths, reject invalid path additions @@ -382,11 +384,11 @@ func delegationUpdateVisitor(roleName string, addKeys data.KeyList, removeKeys, // a new delegation or updating an existing one. If keys are // provided, the IDs will be added to the role (if they do not exist // there already), and the keys will be added to the targets file. -func (tr *Repo) UpdateDelegationKeys(roleName string, addKeys data.KeyList, removeKeys []string, newThreshold int) error { +func (tr *Repo) UpdateDelegationKeys(roleName data.RoleName, addKeys data.KeyList, removeKeys []string, newThreshold int) error { if !data.IsDelegation(roleName) { return data.ErrInvalidRole{Role: roleName, Reason: "not a valid delegated role"} } - parent := path.Dir(roleName) + parent := roleName.Parent() if err := tr.VerifyCanSign(parent); err != nil { return err @@ -405,13 +407,13 @@ func (tr *Repo) UpdateDelegationKeys(roleName string, addKeys data.KeyList, remo // Walk to the parent of this delegation, since that is where its role metadata exists // We do not have to verify that the walker reached its desired role in this scenario // since we've already done another walk to the parent role in VerifyCanSign, and potentially made a targets file - return tr.WalkTargets("", parent, delegationUpdateVisitor(roleName, addKeys, removeKeys, []string{}, []string{}, false, newThreshold)) + return tr.WalkTargets("", roleName.Parent(), delegationUpdateVisitor(roleName, addKeys, removeKeys, []string{}, []string{}, false, newThreshold)) } // PurgeDelegationKeys removes the provided canonical key IDs from all delegations // present in the subtree rooted at role. The role argument must be provided in a wildcard // format, i.e. targets/* would remove the key from all delegations in the repo -func (tr *Repo) PurgeDelegationKeys(role string, removeKeys []string) error { +func (tr *Repo) PurgeDelegationKeys(role data.RoleName, removeKeys []string) error { if !data.IsWildDelegation(role) { return data.ErrInvalidRole{ Role: role, @@ -424,7 +426,7 @@ func (tr *Repo) PurgeDelegationKeys(role string, removeKeys []string) error { removeIDs[id] = struct{}{} } - start := path.Dir(role) + start := role.Parent() tufIDToCanon := make(map[string]string) purgeKeys := func(tgt *data.SignedTargets, validRole data.DelegationRole) interface{} { @@ -480,11 +482,11 @@ func (tr *Repo) PurgeDelegationKeys(role string, removeKeys []string) error { // UpdateDelegationPaths updates the appropriate delegation's paths. // It is not allowed to create a new delegation. -func (tr *Repo) UpdateDelegationPaths(roleName string, addPaths, removePaths []string, clearPaths bool) error { +func (tr *Repo) UpdateDelegationPaths(roleName data.RoleName, addPaths, removePaths []string, clearPaths bool) error { if !data.IsDelegation(roleName) { return data.ErrInvalidRole{Role: roleName, Reason: "not a valid delegated role"} } - parent := path.Dir(roleName) + parent := roleName.Parent() if err := tr.VerifyCanSign(parent); err != nil { return err @@ -510,12 +512,12 @@ func (tr *Repo) UpdateDelegationPaths(roleName string, addPaths, removePaths []s // DeleteDelegation removes a delegated targets role from its parent // targets object. It also deletes the delegation from the snapshot. // DeleteDelegation will only make use of the role Name field. -func (tr *Repo) DeleteDelegation(roleName string) error { +func (tr *Repo) DeleteDelegation(roleName data.RoleName) error { if !data.IsDelegation(roleName) { return data.ErrInvalidRole{Role: roleName, Reason: "not a valid delegated role"} } - parent := path.Dir(roleName) + parent := roleName.Parent() if err := tr.VerifyCanSign(parent); err != nil { return err } @@ -554,7 +556,7 @@ func (tr *Repo) DeleteDelegation(roleName string) error { // InitRoot initializes an empty root file with the 4 core roles passed to the // method, and the consistent flag. func (tr *Repo) InitRoot(root, timestamp, snapshot, targets data.BaseRole, consistent bool) error { - rootRoles := make(map[string]*data.RootRole) + rootRoles := make(map[data.RoleName]*data.RootRole) rootKeys := make(map[string]data.PublicKey) for _, r := range []data.BaseRole{root, timestamp, snapshot, targets} { @@ -576,11 +578,11 @@ func (tr *Repo) InitRoot(root, timestamp, snapshot, targets data.BaseRole, consi } // InitTargets initializes an empty targets, and returns the new empty target -func (tr *Repo) InitTargets(role string) (*data.SignedTargets, error) { +func (tr *Repo) InitTargets(role data.RoleName) (*data.SignedTargets, error) { if !data.IsDelegation(role) && role != data.CanonicalTargetsRole { return nil, data.ErrInvalidRole{ Role: role, - Reason: fmt.Sprintf("role is not a valid targets role name: %s", role), + Reason: fmt.Sprintf("role is not a valid targets role name: %s", role.String()), } } targets := data.NewTargets() @@ -631,7 +633,7 @@ func (tr *Repo) InitTimestamp() error { // TargetMeta returns the FileMeta entry for the given path in the // targets file associated with the given role. This may be nil if // the target isn't found in the targets file. -func (tr Repo) TargetMeta(role, path string) *data.FileMeta { +func (tr Repo) TargetMeta(role data.RoleName, path string) *data.FileMeta { if t, ok := tr.Targets[role]; ok { if m, ok := t.Signed.Targets[path]; ok { return &m @@ -642,7 +644,7 @@ func (tr Repo) TargetMeta(role, path string) *data.FileMeta { // TargetDelegations returns a slice of Roles that are valid publishers // for the target path provided. -func (tr Repo) TargetDelegations(role, path string) []*data.Role { +func (tr Repo) TargetDelegations(role data.RoleName, path string) []*data.Role { var roles []*data.Role if t, ok := tr.Targets[role]; ok { for _, r := range t.Signed.Delegations.Roles { @@ -659,7 +661,7 @@ func (tr Repo) TargetDelegations(role, path string) []*data.Role { // enough signing keys to meet the threshold, since we want to support the use // case of multiple signers for a role. It returns an error if the role doesn't // exist or if there are no signing keys. -func (tr *Repo) VerifyCanSign(roleName string) error { +func (tr *Repo) VerifyCanSign(roleName data.RoleName) error { var ( role data.BaseRole err error @@ -702,7 +704,7 @@ type walkVisitorFunc func(*data.SignedTargets, data.DelegationRole) interface{} // WalkTargets will apply the specified visitor function to iteratively walk the targets/delegation metadata tree, // until receiving a StopWalk. The walk starts from the base "targets" role, and searches for the correct targetPath and/or rolePath // to call the visitor function on. Any roles passed into skipRoles will be excluded from the walk, as well as roles in those subtrees -func (tr *Repo) WalkTargets(targetPath, rolePath string, visitTargets walkVisitorFunc, skipRoles ...string) error { +func (tr *Repo) WalkTargets(targetPath string, rolePath data.RoleName, visitTargets walkVisitorFunc, skipRoles ...data.RoleName) error { // Start with the base targets role, which implicitly has the "" targets path targetsRole, err := tr.GetBaseRole(data.CanonicalTargetsRole) if err != nil { @@ -728,15 +730,15 @@ func (tr *Repo) WalkTargets(targetPath, rolePath string, visitTargets walkVisito } // We're at a prefix of the desired role subtree, so add its delegation role children and continue walking - if strings.HasPrefix(rolePath, role.Name+"/") { + if strings.HasPrefix(rolePath.String(), role.Name.String()+"/") { roles = append(roles, signedTgt.GetValidDelegations(role)...) continue } // Determine whether to visit this role or not: - // If the paths validate against the specified targetPath and the rolePath is empty or is in the subtree. + // If the paths validate against the specified targetPath and the role is empty or is a path in the subtree. // Also check if we are choosing to skip visiting this role on this walk (see ListTargets and GetTargetByName priority) - if isValidPath(targetPath, role) && isAncestorRole(role.Name, rolePath) && !utils.StrSliceContains(skipRoles, role.Name) { + if isValidPath(targetPath, role) && isAncestorRole(role.Name, rolePath) && !utils.RoleNameSliceContains(skipRoles, role.Name) { // If we had matching path or role name, visit this target and determine whether or not to keep walking res := visitTargets(signedTgt, role) switch typedRes := res.(type) { @@ -763,8 +765,8 @@ func (tr *Repo) WalkTargets(targetPath, rolePath string, visitTargets walkVisito // Will return true if given an empty candidateAncestor role name // The HasPrefix check is for determining whether the role name for candidateChild is a child (direct or further down the chain) // of candidateAncestor, for ex: candidateAncestor targets/a and candidateChild targets/a/b/c -func isAncestorRole(candidateChild, candidateAncestor string) bool { - return candidateAncestor == "" || candidateAncestor == candidateChild || strings.HasPrefix(candidateChild, candidateAncestor+"/") +func isAncestorRole(candidateChild data.RoleName, candidateAncestor data.RoleName) bool { + return candidateAncestor.String() == "" || candidateAncestor == candidateChild || strings.HasPrefix(candidateChild.String(), candidateAncestor.String()+"/") } // helper function that returns whether the delegation Role is valid against the given path @@ -776,11 +778,12 @@ func isValidPath(candidatePath string, delgRole data.DelegationRole) bool { // AddTargets will attempt to add the given targets specifically to // the directed role. If the metadata for the role doesn't exist yet, // AddTargets will create one. -func (tr *Repo) AddTargets(role string, targets data.Files) (data.Files, error) { - err := tr.VerifyCanSign(role) - if err != nil { - return nil, err +func (tr *Repo) AddTargets(role data.RoleName, targets data.Files) (data.Files, error) { + cantSignErr := tr.VerifyCanSign(role) + if _, ok := cantSignErr.(data.ErrInvalidRole); ok { + return nil, cantSignErr } + var needSign bool // check existence of the role's metadata _, ok := tr.Targets[role] @@ -796,10 +799,18 @@ func (tr *Repo) AddTargets(role string, targets data.Files) (data.Files, error) addTargetVisitor := func(targetPath string, targetMeta data.FileMeta) func(*data.SignedTargets, data.DelegationRole) interface{} { return func(tgt *data.SignedTargets, validRole data.DelegationRole) interface{} { // We've already validated the role's target path in our walk, so just modify the metadata - tgt.Signed.Targets[targetPath] = targetMeta - tgt.Dirty = true - // Also add to our new addedTargets map to keep track of every target we've added successfully - addedTargets[targetPath] = targetMeta + if targetMeta.Equals(tgt.Signed.Targets[targetPath]) { + // Also add to our new addedTargets map because this target was "added" successfully + addedTargets[targetPath] = targetMeta + return StopWalk{} + } + needSign = true + if cantSignErr == nil { + tgt.Signed.Targets[targetPath] = targetMeta + tgt.Dirty = true + // Also add to our new addedTargets map to keep track of every target we've added successfully + addedTargets[targetPath] = targetMeta + } return StopWalk{} } } @@ -807,6 +818,9 @@ func (tr *Repo) AddTargets(role string, targets data.Files) (data.Files, error) // Walk the role tree while validating the target paths, and add all of our targets for path, target := range targets { tr.WalkTargets(path, role, addTargetVisitor(path, target)) + if needSign && cantSignErr != nil { + return nil, cantSignErr + } } if len(addedTargets) != len(targets) { return nil, fmt.Errorf("Could not add all targets") @@ -815,18 +829,21 @@ func (tr *Repo) AddTargets(role string, targets data.Files) (data.Files, error) } // RemoveTargets removes the given target (paths) from the given target role (delegation) -func (tr *Repo) RemoveTargets(role string, targets ...string) error { - if err := tr.VerifyCanSign(role); err != nil { - return err +func (tr *Repo) RemoveTargets(role data.RoleName, targets ...string) error { + cantSignErr := tr.VerifyCanSign(role) + if _, ok := cantSignErr.(data.ErrInvalidRole); ok { + return cantSignErr } - + var needSign bool removeTargetVisitor := func(targetPath string) func(*data.SignedTargets, data.DelegationRole) interface{} { return func(tgt *data.SignedTargets, validRole data.DelegationRole) interface{} { // We've already validated the role path in our walk, so just modify the metadata // We don't check against the target path against the valid role paths because it's // possible we got into an invalid state and are trying to fix it - delete(tgt.Signed.Targets, targetPath) - tgt.Dirty = true + if _, needSign = tgt.Signed.Targets[targetPath]; needSign && cantSignErr == nil { + delete(tgt.Signed.Targets, targetPath) + tgt.Dirty = true + } return StopWalk{} } } @@ -836,6 +853,9 @@ func (tr *Repo) RemoveTargets(role string, targets ...string) error { if ok { for _, path := range targets { tr.WalkTargets("", role, removeTargetVisitor(path)) + if needSign && cantSignErr != nil { + return cantSignErr + } } } @@ -843,7 +863,7 @@ func (tr *Repo) RemoveTargets(role string, targets ...string) error { } // UpdateSnapshot updates the FileMeta for the given role based on the Signed object -func (tr *Repo) UpdateSnapshot(role string, s *data.Signed) error { +func (tr *Repo) UpdateSnapshot(role data.RoleName, s *data.Signed) error { jsonData, err := json.Marshal(s) if err != nil { return err @@ -852,7 +872,7 @@ func (tr *Repo) UpdateSnapshot(role string, s *data.Signed) error { if err != nil { return err } - tr.Snapshot.Signed.Meta[role] = meta + tr.Snapshot.Signed.Meta[role.String()] = meta tr.Snapshot.Dirty = true return nil } @@ -867,28 +887,18 @@ func (tr *Repo) UpdateTimestamp(s *data.Signed) error { if err != nil { return err } - tr.Timestamp.Signed.Meta[data.CanonicalSnapshotRole] = meta + tr.Timestamp.Signed.Meta[data.CanonicalSnapshotRole.String()] = meta tr.Timestamp.Dirty = true return nil } -type versionedRootRole struct { - data.BaseRole - version int -} - -type versionedRootRoles []versionedRootRole - -func (v versionedRootRoles) Len() int { return len(v) } -func (v versionedRootRoles) Swap(i, j int) { v[i], v[j] = v[j], v[i] } -func (v versionedRootRoles) Less(i, j int) bool { return v[i].version < v[j].version } - // SignRoot signs the root, using all keys from the "root" role (i.e. currently trusted) // as well as available keys used to sign the previous version, if the public part is // carried in tr.Root.Keys and the private key is available (i.e. probably previously // trusted keys, to allow rollover). If there are any errors, attempt to put root // back to the way it was (so version won't be incremented, for instance). -func (tr *Repo) SignRoot(expires time.Time) (*data.Signed, error) { +// Extra signing keys can be added to support older clients +func (tr *Repo) SignRoot(expires time.Time, extraSigningKeys data.KeyList) (*data.Signed, error) { logrus.Debug("signing root...") // duplicate root and attempt to modify it rather than the existing root @@ -906,40 +916,12 @@ func (tr *Repo) SignRoot(expires time.Time) (*data.Signed, error) { return nil, err } - oldRootRoles := tr.getOldRootRoles() - - var latestSavedRole data.BaseRole - rolesToSignWith := make([]data.BaseRole, 0, len(oldRootRoles)) - - if len(oldRootRoles) > 0 { - sort.Sort(oldRootRoles) - for _, vRole := range oldRootRoles { - rolesToSignWith = append(rolesToSignWith, vRole.BaseRole) - } - latest := rolesToSignWith[len(rolesToSignWith)-1] - latestSavedRole = data.BaseRole{ - Name: data.CanonicalRootRole, - Threshold: latest.Threshold, - Keys: latest.Keys, - } - } + var rolesToSignWith []data.BaseRole - // If the root role (root keys or root threshold) has changed, save the - // previous role under the role name "root.", such that the "n" is the - // latest root.json version for which previous root role was valid. - // Also, guard against re-saving the previous role if the latest - // saved role is the same (which should not happen). - // n = root.json version of the originalRootRole (previous role) - // n+1 = root.json version of the currRoot (current role) - // n-m = root.json version of latestSavedRole (not necessarily n-1, because the - // last root rotation could have happened several root.json versions ago - if !tr.originalRootRole.Equals(currRoot) && !tr.originalRootRole.Equals(latestSavedRole) { + // If the root role (root keys or root threshold) has changed, sign with the + // previous root role keys + if !tr.originalRootRole.Equals(currRoot) { rolesToSignWith = append(rolesToSignWith, tr.originalRootRole) - latestSavedRole = tr.originalRootRole - - versionName := oldRootVersionName(tempRoot.Signed.Version) - tempRoot.Signed.Roles[versionName] = &data.RootRole{ - KeyIDs: latestSavedRole.ListKeyIDs(), Threshold: latestSavedRole.Threshold} } tempRoot.Signed.Expires = expires @@ -950,7 +932,8 @@ func (tr *Repo) SignRoot(expires time.Time) (*data.Signed, error) { if err != nil { return nil, err } - signed, err = tr.sign(signed, rolesToSignWith, tr.getOptionalRootKeys(rolesToSignWith)) + + signed, err = tr.sign(signed, rolesToSignWith, extraSigningKeys) if err != nil { return nil, err } @@ -961,68 +944,12 @@ func (tr *Repo) SignRoot(expires time.Time) (*data.Signed, error) { return signed, nil } -// get all the saved previous roles < the current root version -func (tr *Repo) getOldRootRoles() versionedRootRoles { - oldRootRoles := make(versionedRootRoles, 0, len(tr.Root.Signed.Roles)) - - // now go through the old roles - for roleName := range tr.Root.Signed.Roles { - // ensure that the rolename matches our format and that the version is - // not too high - if data.ValidRole(roleName) { - continue - } - nameTokens := strings.Split(roleName, ".") - if len(nameTokens) != 2 || nameTokens[0] != data.CanonicalRootRole { - continue - } - version, err := strconv.Atoi(nameTokens[1]) - if err != nil || version >= tr.Root.Signed.Version { - continue - } - - // ignore invalid roles, which shouldn't happen - oldRole, err := tr.Root.BuildBaseRole(roleName) - if err != nil { - continue - } - - oldRootRoles = append(oldRootRoles, versionedRootRole{BaseRole: oldRole, version: version}) - } - - return oldRootRoles -} - -// gets any extra optional root keys from the existing root.json signatures -// (because older repositories that have already done root rotation may not -// necessarily have older root roles) -func (tr *Repo) getOptionalRootKeys(signingRoles []data.BaseRole) []data.PublicKey { - oldKeysMap := make(map[string]data.PublicKey) - for _, oldSig := range tr.Root.Signatures { - if k, ok := tr.Root.Signed.Keys[oldSig.KeyID]; ok { - oldKeysMap[k.ID()] = k - } - } - for _, role := range signingRoles { - for keyID := range role.Keys { - delete(oldKeysMap, keyID) - } - } - - oldKeys := make([]data.PublicKey, 0, len(oldKeysMap)) - for _, key := range oldKeysMap { - oldKeys = append(oldKeys, key) - } - - return oldKeys -} - func oldRootVersionName(version int) string { return fmt.Sprintf("%s.%v", data.CanonicalRootRole, version) } // SignTargets signs the targets file for the given top level or delegated targets role -func (tr *Repo) SignTargets(role string, expires time.Time) (*data.Signed, error) { +func (tr *Repo) SignTargets(role data.RoleName, expires time.Time) (*data.Signed, error) { logrus.Debugf("sign targets called for role %s", role) if _, ok := tr.Targets[role]; !ok { return nil, data.ErrInvalidRole{ diff --git a/vendor/github.com/docker/notary/tuf/utils/pkcs8.go b/vendor/github.com/docker/notary/tuf/utils/pkcs8.go new file mode 100644 index 000000000000..cd18c6874212 --- /dev/null +++ b/vendor/github.com/docker/notary/tuf/utils/pkcs8.go @@ -0,0 +1,341 @@ +// Package utils contains tuf related utility functions however this file is hard +// forked from https://github.com/youmark/pkcs8 package. It has been further modified +// based on the requirements of Notary. For converting keys into PKCS#8 format, +// original package expected *crypto.PrivateKey interface, which then type inferred +// to either *rsa.PrivateKey or *ecdsa.PrivateKey depending on the need and later +// converted to ASN.1 DER encoded form, this whole process was superfluous here as +// keys are already being kept in ASN.1 DER format wrapped in data.PrivateKey +// structure. With these changes, package has became tightly coupled with notary as +// most of the method signatures have been updated. Moreover support for ED25519 +// keys has been added as well. License for original package is following: +// +// The MIT License (MIT) +// +// Copyright (c) 2014 youmark +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +package utils + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" + + "golang.org/x/crypto/pbkdf2" + + "github.com/docker/notary/tuf/data" +) + +// Copy from crypto/x509 +var ( + oidPublicKeyRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1} + oidPublicKeyDSA = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1} + oidPublicKeyECDSA = asn1.ObjectIdentifier{1, 2, 840, 10045, 2, 1} + // crypto/x509 doesn't have support for ED25519 + // http://www.oid-info.com/get/1.3.6.1.4.1.11591.15.1 + oidPublicKeyED25519 = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11591, 15, 1} +) + +// Copy from crypto/x509 +var ( + oidNamedCurveP224 = asn1.ObjectIdentifier{1, 3, 132, 0, 33} + oidNamedCurveP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7} + oidNamedCurveP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34} + oidNamedCurveP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35} +) + +// Copy from crypto/x509 +func oidFromNamedCurve(curve elliptic.Curve) (asn1.ObjectIdentifier, bool) { + switch curve { + case elliptic.P224(): + return oidNamedCurveP224, true + case elliptic.P256(): + return oidNamedCurveP256, true + case elliptic.P384(): + return oidNamedCurveP384, true + case elliptic.P521(): + return oidNamedCurveP521, true + } + + return nil, false +} + +// Unecrypted PKCS8 +var ( + oidPKCS5PBKDF2 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 5, 12} + oidPBES2 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 5, 13} + oidAES256CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 42} +) + +type ecPrivateKey struct { + Version int + PrivateKey []byte + NamedCurveOID asn1.ObjectIdentifier `asn1:"optional,explicit,tag:0"` + PublicKey asn1.BitString `asn1:"optional,explicit,tag:1"` +} + +type privateKeyInfo struct { + Version int + PrivateKeyAlgorithm []asn1.ObjectIdentifier + PrivateKey []byte +} + +// Encrypted PKCS8 +type pbkdf2Params struct { + Salt []byte + IterationCount int +} + +type pbkdf2Algorithms struct { + IDPBKDF2 asn1.ObjectIdentifier + PBKDF2Params pbkdf2Params +} + +type pbkdf2Encs struct { + EncryAlgo asn1.ObjectIdentifier + IV []byte +} + +type pbes2Params struct { + KeyDerivationFunc pbkdf2Algorithms + EncryptionScheme pbkdf2Encs +} + +type pbes2Algorithms struct { + IDPBES2 asn1.ObjectIdentifier + PBES2Params pbes2Params +} + +type encryptedPrivateKeyInfo struct { + EncryptionAlgorithm pbes2Algorithms + EncryptedData []byte +} + +// pkcs8 reflects an ASN.1, PKCS#8 PrivateKey. +// copied from https://github.com/golang/go/blob/964639cc338db650ccadeafb7424bc8ebb2c0f6c/src/crypto/x509/pkcs8.go#L17 +type pkcs8 struct { + Version int + Algo pkix.AlgorithmIdentifier + PrivateKey []byte +} + +func parsePKCS8ToTufKey(der []byte) (data.PrivateKey, error) { + var key pkcs8 + + if _, err := asn1.Unmarshal(der, &key); err != nil { + if _, ok := err.(asn1.StructuralError); ok { + return nil, errors.New("could not decrypt private key") + } + return nil, err + } + + if key.Algo.Algorithm.Equal(oidPublicKeyED25519) { + tufED25519PrivateKey, err := ED25519ToPrivateKey(key.PrivateKey) + if err != nil { + return nil, fmt.Errorf("could not convert ed25519.PrivateKey to data.PrivateKey: %v", err) + } + + return tufED25519PrivateKey, nil + } + + privKey, err := x509.ParsePKCS8PrivateKey(der) + if err != nil { + return nil, err + } + + switch priv := privKey.(type) { + case *rsa.PrivateKey: + tufRSAPrivateKey, err := RSAToPrivateKey(priv) + if err != nil { + return nil, fmt.Errorf("could not convert rsa.PrivateKey to data.PrivateKey: %v", err) + } + + return tufRSAPrivateKey, nil + case *ecdsa.PrivateKey: + tufECDSAPrivateKey, err := ECDSAToPrivateKey(priv) + if err != nil { + return nil, fmt.Errorf("could not convert ecdsa.PrivateKey to data.PrivateKey: %v", err) + } + + return tufECDSAPrivateKey, nil + } + + return nil, errors.New("unsupported key type") +} + +// ParsePKCS8ToTufKey requires PKCS#8 key in DER format and returns data.PrivateKey +// Password should be provided in case of Encrypted PKCS#8 key, else it should be nil. +func ParsePKCS8ToTufKey(der []byte, password []byte) (data.PrivateKey, error) { + if password == nil { + return parsePKCS8ToTufKey(der) + } + + var privKey encryptedPrivateKeyInfo + if _, err := asn1.Unmarshal(der, &privKey); err != nil { + return nil, errors.New("pkcs8: only PKCS #5 v2.0 supported") + } + + if !privKey.EncryptionAlgorithm.IDPBES2.Equal(oidPBES2) { + return nil, errors.New("pkcs8: only PBES2 supported") + } + + if !privKey.EncryptionAlgorithm.PBES2Params.KeyDerivationFunc.IDPBKDF2.Equal(oidPKCS5PBKDF2) { + return nil, errors.New("pkcs8: only PBKDF2 supported") + } + + encParam := privKey.EncryptionAlgorithm.PBES2Params.EncryptionScheme + kdfParam := privKey.EncryptionAlgorithm.PBES2Params.KeyDerivationFunc.PBKDF2Params + + switch { + case encParam.EncryAlgo.Equal(oidAES256CBC): + iv := encParam.IV + salt := kdfParam.Salt + iter := kdfParam.IterationCount + + encryptedKey := privKey.EncryptedData + symkey := pbkdf2.Key(password, salt, iter, 32, sha1.New) + block, err := aes.NewCipher(symkey) + if err != nil { + return nil, err + } + mode := cipher.NewCBCDecrypter(block, iv) + mode.CryptBlocks(encryptedKey, encryptedKey) + + // no need to explicitly remove padding, as ASN.1 unmarshalling will automatically discard it + key, err := parsePKCS8ToTufKey(encryptedKey) + if err != nil { + return nil, errors.New("pkcs8: incorrect password") + } + + return key, nil + default: + return nil, errors.New("pkcs8: only AES-256-CBC supported") + } + +} + +func convertTUFKeyToPKCS8(priv data.PrivateKey) ([]byte, error) { + var pkey privateKeyInfo + + switch priv.Algorithm() { + case data.RSAKey, data.RSAx509Key: + // Per RFC5958, if publicKey is present, then version is set to v2(1) else version is set to v1(0). + // But openssl set to v1 even publicKey is present + pkey.Version = 0 + pkey.PrivateKeyAlgorithm = make([]asn1.ObjectIdentifier, 1) + pkey.PrivateKeyAlgorithm[0] = oidPublicKeyRSA + pkey.PrivateKey = priv.Private() + case data.ECDSAKey, data.ECDSAx509Key: + // To extract Curve value, parsing ECDSA key to *ecdsa.PrivateKey + eckey, err := x509.ParseECPrivateKey(priv.Private()) + if err != nil { + return nil, err + } + + oidNamedCurve, ok := oidFromNamedCurve(eckey.Curve) + if !ok { + return nil, errors.New("pkcs8: unknown elliptic curve") + } + + // Per RFC5958, if publicKey is present, then version is set to v2(1) else version is set to v1(0). + // But openssl set to v1 even publicKey is present + pkey.Version = 1 + pkey.PrivateKeyAlgorithm = make([]asn1.ObjectIdentifier, 2) + pkey.PrivateKeyAlgorithm[0] = oidPublicKeyECDSA + pkey.PrivateKeyAlgorithm[1] = oidNamedCurve + pkey.PrivateKey = priv.Private() + case data.ED25519Key: + pkey.Version = 0 + pkey.PrivateKeyAlgorithm = make([]asn1.ObjectIdentifier, 1) + pkey.PrivateKeyAlgorithm[0] = oidPublicKeyED25519 + pkey.PrivateKey = priv.Private() + default: + return nil, fmt.Errorf("algorithm %s not supported", priv.Algorithm()) + } + + return asn1.Marshal(pkey) +} + +func convertTUFKeyToPKCS8Encrypted(priv data.PrivateKey, password []byte) ([]byte, error) { + // Convert private key into PKCS8 format + pkey, err := convertTUFKeyToPKCS8(priv) + if err != nil { + return nil, err + } + + // Calculate key from password based on PKCS5 algorithm + // Use 8 byte salt, 16 byte IV, and 2048 iteration + iter := 2048 + salt := make([]byte, 8) + iv := make([]byte, 16) + _, err = rand.Reader.Read(salt) + if err != nil { + return nil, err + } + + _, err = rand.Reader.Read(iv) + if err != nil { + return nil, err + } + + key := pbkdf2.Key(password, salt, iter, 32, sha1.New) + + // Use AES256-CBC mode, pad plaintext with PKCS5 padding scheme + padding := aes.BlockSize - len(pkey)%aes.BlockSize + if padding > 0 { + n := len(pkey) + pkey = append(pkey, make([]byte, padding)...) + for i := 0; i < padding; i++ { + pkey[n+i] = byte(padding) + } + } + + encryptedKey := make([]byte, len(pkey)) + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + mode := cipher.NewCBCEncrypter(block, iv) + mode.CryptBlocks(encryptedKey, pkey) + + pbkdf2algo := pbkdf2Algorithms{oidPKCS5PBKDF2, pbkdf2Params{salt, iter}} + pbkdf2encs := pbkdf2Encs{oidAES256CBC, iv} + pbes2algo := pbes2Algorithms{oidPBES2, pbes2Params{pbkdf2algo, pbkdf2encs}} + + encryptedPkey := encryptedPrivateKeyInfo{pbes2algo, encryptedKey} + return asn1.Marshal(encryptedPkey) +} + +// ConvertTUFKeyToPKCS8 converts a private key (data.Private) to PKCS#8 and returns in DER format +// if password is not nil, it would convert the Private Key to Encrypted PKCS#8. +func ConvertTUFKeyToPKCS8(priv data.PrivateKey, password []byte) ([]byte, error) { + if password == nil { + return convertTUFKeyToPKCS8(priv) + } + return convertTUFKeyToPKCS8Encrypted(priv, password) +} diff --git a/vendor/github.com/docker/notary/tuf/utils/utils.go b/vendor/github.com/docker/notary/tuf/utils/utils.go index 8de72b679720..2899a0340d11 100644 --- a/vendor/github.com/docker/notary/tuf/utils/utils.go +++ b/vendor/github.com/docker/notary/tuf/utils/utils.go @@ -3,36 +3,13 @@ package utils import ( "crypto/sha256" "crypto/sha512" - "crypto/tls" "encoding/hex" "fmt" "io" - "net/http" - "net/url" - "os" - "strings" "github.com/docker/notary/tuf/data" ) -// Download does a simple download from a URL -func Download(url url.URL) (*http.Response, error) { - tr := &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - } - client := &http.Client{Transport: tr} - return client.Get(url.String()) -} - -// Upload does a simple JSON upload to a URL -func Upload(url string, body io.Reader) (*http.Response, error) { - tr := &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - } - client := &http.Client{Transport: tr} - return client.Post(url, "application/json", body) -} - // StrSliceContains checks if the given string appears in the slice func StrSliceContains(ss []string, s string) bool { for _, v := range ss { @@ -43,23 +20,9 @@ func StrSliceContains(ss []string, s string) bool { return false } -// StrSliceRemove removes the the given string from the slice, returning a new slice -func StrSliceRemove(ss []string, s string) []string { - res := []string{} - for _, v := range ss { - if v != s { - res = append(res, v) - } - } - return res -} - -// StrSliceContainsI checks if the given string appears in the slice -// in a case insensitive manner -func StrSliceContainsI(ss []string, s string) bool { - s = strings.ToLower(s) +// RoleNameSliceContains checks if the given string appears in the slice +func RoleNameSliceContains(ss []data.RoleName, s data.RoleName) bool { for _, v := range ss { - v = strings.ToLower(v) if v == s { return true } @@ -67,11 +30,15 @@ func StrSliceContainsI(ss []string, s string) bool { return false } -// FileExists returns true if a file (or dir) exists at the given path, -// false otherwise -func FileExists(path string) bool { - _, err := os.Stat(path) - return os.IsNotExist(err) +// RoleNameSliceRemove removes the the given RoleName from the slice, returning a new slice +func RoleNameSliceRemove(ss []data.RoleName, s data.RoleName) []data.RoleName { + res := []data.RoleName{} + for _, v := range ss { + if v != s { + res = append(res, v) + } + } + return res } // NoopCloser is a simple Reader wrapper that does nothing when Close is @@ -131,7 +98,7 @@ func RemoveUnusedKeys(t *data.SignedTargets) { // FindRoleIndex returns the index of the role named or -1 if no // matching role is found. -func FindRoleIndex(rs []*data.Role, name string) int { +func FindRoleIndex(rs []*data.Role, name data.RoleName) int { for i, r := range rs { if r.Name == name { return i @@ -143,9 +110,9 @@ func FindRoleIndex(rs []*data.Role, name string) int { // ConsistentName generates the appropriate HTTP URL path for the role, // based on whether the repo is marked as consistent. The RemoteStore // is responsible for adding file extensions. -func ConsistentName(role string, hashSha256 []byte) string { - if len(hashSha256) > 0 { - hash := hex.EncodeToString(hashSha256) +func ConsistentName(role string, hashSHA256 []byte) string { + if len(hashSHA256) > 0 { + hash := hex.EncodeToString(hashSHA256) return fmt.Sprintf("%s.%s", role, hash) } return role diff --git a/vendor/github.com/docker/notary/tuf/utils/x509.go b/vendor/github.com/docker/notary/tuf/utils/x509.go index a95e7d2ceec2..2a6cf9777bac 100644 --- a/vendor/github.com/docker/notary/tuf/utils/x509.go +++ b/vendor/github.com/docker/notary/tuf/utils/x509.go @@ -16,16 +16,19 @@ import ( "math/big" "time" - "github.com/sirupsen/logrus" "github.com/agl/ed25519" "github.com/docker/notary" "github.com/docker/notary/tuf/data" + "github.com/sirupsen/logrus" ) // CanonicalKeyID returns the ID of the public bytes version of a TUF key. // On regular RSA/ECDSA TUF keys, this is just the key ID. On X509 RSA/ECDSA // TUF keys, this is the key ID of the public key part of the key in the leaf cert func CanonicalKeyID(k data.PublicKey) (string, error) { + if k == nil { + return "", errors.New("public key is nil") + } switch k.Algorithm() { case data.ECDSAx509Key, data.RSAx509Key: return X509PublicKeyID(k) @@ -82,12 +85,9 @@ func X509PublicKeyID(certPubKey data.PublicKey) (string, error) { return key.ID(), nil } -// ParsePEMPrivateKey returns a data.PrivateKey from a PEM encoded private key. It -// only supports RSA (PKCS#1) and attempts to decrypt using the passphrase, if encrypted. -func ParsePEMPrivateKey(pemBytes []byte, passphrase string) (data.PrivateKey, error) { - block, _ := pem.Decode(pemBytes) - if block == nil { - return nil, errors.New("no valid private key found") +func parseLegacyPrivateKey(block *pem.Block, passphrase string) (data.PrivateKey, error) { + if notary.FIPSEnabled() { + return nil, fmt.Errorf("%s not supported in FIPS mode", block.Type) } var privKeyBytes []byte @@ -142,6 +142,28 @@ func ParsePEMPrivateKey(pemBytes []byte, passphrase string) (data.PrivateKey, er } } +// ParsePEMPrivateKey returns a data.PrivateKey from a PEM encoded private key. It +// supports PKCS#8 as well as RSA/ECDSA (PKCS#1) only in non-FIPS mode and +// attempts to decrypt using the passphrase, if encrypted. +func ParsePEMPrivateKey(pemBytes []byte, passphrase string) (data.PrivateKey, error) { + block, _ := pem.Decode(pemBytes) + if block == nil { + return nil, errors.New("no valid private key found") + } + + switch block.Type { + case "RSA PRIVATE KEY", "EC PRIVATE KEY", "ED25519 PRIVATE KEY": + return parseLegacyPrivateKey(block, passphrase) + case "ENCRYPTED PRIVATE KEY", "PRIVATE KEY": + if passphrase == "" { + return ParsePKCS8ToTufKey(block.Bytes, nil) + } + return ParsePKCS8ToTufKey(block.Bytes, []byte(passphrase)) + default: + return nil, fmt.Errorf("unsupported key type %q", block.Type) + } +} + // CertToPEM is a utility function returns a PEM encoded x509 Certificate func CertToPEM(cert *x509.Certificate) []byte { pemCert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}) @@ -252,11 +274,31 @@ func ParsePEMPublicKey(pubKeyBytes []byte) (data.PublicKey, error) { return nil, fmt.Errorf("invalid certificate: %v", err) } return CertToKey(cert), nil + case "PUBLIC KEY": + keyType, err := keyTypeForPublicKey(pemBlock.Bytes) + if err != nil { + return nil, err + } + return data.NewPublicKey(keyType, pemBlock.Bytes), nil default: - return nil, fmt.Errorf("unsupported PEM block type %q, expected certificate", pemBlock.Type) + return nil, fmt.Errorf("unsupported PEM block type %q, expected CERTIFICATE or PUBLIC KEY", pemBlock.Type) } } +func keyTypeForPublicKey(pubKeyBytes []byte) (string, error) { + pub, err := x509.ParsePKIXPublicKey(pubKeyBytes) + if err != nil { + return "", fmt.Errorf("unable to parse pem encoded public key: %v", err) + } + switch pub.(type) { + case *ecdsa.PublicKey: + return data.ECDSAKey, nil + case *rsa.PublicKey: + return data.RSAKey, nil + } + return "", fmt.Errorf("unknown public key format") +} + // ValidateCertificate returns an error if the certificate is not valid for notary // Currently this is only ensuring the public key has a large enough modulus if RSA, // using a non SHA1 signature algorithm, and an optional time expiry check @@ -293,21 +335,16 @@ func ValidateCertificate(c *x509.Certificate, checkExpiry bool) error { return nil } -// GenerateRSAKey generates an RSA private key and returns a TUF PrivateKey -func GenerateRSAKey(random io.Reader, bits int) (data.PrivateKey, error) { - rsaPrivKey, err := rsa.GenerateKey(random, bits) - if err != nil { - return nil, fmt.Errorf("could not generate private key: %v", err) - } - - tufPrivKey, err := RSAToPrivateKey(rsaPrivKey) - if err != nil { - return nil, err +// GenerateKey returns a new private key using the provided algorithm or an +// error detailing why the key could not be generated +func GenerateKey(algorithm string) (data.PrivateKey, error) { + switch algorithm { + case data.ECDSAKey: + return GenerateECDSAKey(rand.Reader) + case data.ED25519Key: + return GenerateED25519Key(rand.Reader) } - - logrus.Debugf("generated RSA key with keyID: %s", tufPrivKey.ID()) - - return tufPrivKey, nil + return nil, fmt.Errorf("private key type not supported for key generation: %s", algorithm) } // RSAToPrivateKey converts an rsa.Private key to a TUF data.PrivateKey type @@ -394,85 +431,54 @@ func ED25519ToPrivateKey(privKeyBytes []byte) (data.PrivateKey, error) { return data.NewED25519PrivateKey(*pubKey, privKeyBytes) } -func blockType(k data.PrivateKey) (string, error) { - switch k.Algorithm() { - case data.RSAKey, data.RSAx509Key: - return "RSA PRIVATE KEY", nil - case data.ECDSAKey, data.ECDSAx509Key: - return "EC PRIVATE KEY", nil - case data.ED25519Key: - return "ED25519 PRIVATE KEY", nil - default: - return "", fmt.Errorf("algorithm %s not supported", k.Algorithm()) - } -} - -// KeyToPEM returns a PEM encoded key from a Private Key -func KeyToPEM(privKey data.PrivateKey, role string) ([]byte, error) { - bt, err := blockType(privKey) - if err != nil { - return nil, err +// ExtractPrivateKeyAttributes extracts role and gun values from private key bytes +func ExtractPrivateKeyAttributes(pemBytes []byte) (data.RoleName, data.GUN, error) { + block, _ := pem.Decode(pemBytes) + if block == nil { + return "", "", errors.New("PEM block is empty") } - headers := map[string]string{} - if role != "" { - headers = map[string]string{ - "role": role, + switch block.Type { + case "RSA PRIVATE KEY", "EC PRIVATE KEY", "ED25519 PRIVATE KEY": + if notary.FIPSEnabled() { + return "", "", fmt.Errorf("%s not supported in FIPS mode", block.Type) } + case "PRIVATE KEY", "ENCRYPTED PRIVATE KEY": + // do nothing for PKCS#8 keys + default: + return "", "", errors.New("unknown key format") } - - block := &pem.Block{ - Type: bt, - Headers: headers, - Bytes: privKey.Private(), - } - - return pem.EncodeToMemory(block), nil + return data.RoleName(block.Headers["role"]), data.GUN(block.Headers["gun"]), nil } -// EncryptPrivateKey returns an encrypted PEM key given a Privatekey -// and a passphrase -func EncryptPrivateKey(key data.PrivateKey, role, gun, passphrase string) ([]byte, error) { - bt, err := blockType(key) - if err != nil { - return nil, err - } - - password := []byte(passphrase) - cipherType := x509.PEMCipherAES256 +// ConvertPrivateKeyToPKCS8 converts a data.PrivateKey to PKCS#8 Format +func ConvertPrivateKeyToPKCS8(key data.PrivateKey, role data.RoleName, gun data.GUN, passphrase string) ([]byte, error) { + var ( + err error + der []byte + blockType = "PRIVATE KEY" + ) - encryptedPEMBlock, err := x509.EncryptPEMBlock(rand.Reader, - bt, - key.Private(), - password, - cipherType) + if passphrase == "" { + der, err = ConvertTUFKeyToPKCS8(key, nil) + } else { + blockType = "ENCRYPTED PRIVATE KEY" + der, err = ConvertTUFKeyToPKCS8(key, []byte(passphrase)) + } if err != nil { - return nil, err + return nil, fmt.Errorf("unable to convert to PKCS8 key") } - if encryptedPEMBlock.Headers == nil { - return nil, fmt.Errorf("unable to encrypt key - invalid PEM file produced") + headers := make(map[string]string) + if role != "" { + headers["role"] = role.String() } - encryptedPEMBlock.Headers["role"] = role if gun != "" { - encryptedPEMBlock.Headers["gun"] = gun + headers["gun"] = gun.String() } - return pem.EncodeToMemory(encryptedPEMBlock), nil -} - -// ReadRoleFromPEM returns the value from the role PEM header, if it exists -func ReadRoleFromPEM(pemBytes []byte) string { - pemBlock, _ := pem.Decode(pemBytes) - if pemBlock == nil || pemBlock.Headers == nil { - return "" - } - role, ok := pemBlock.Headers["role"] - if !ok { - return "" - } - return role + return pem.EncodeToMemory(&pem.Block{Bytes: der, Type: blockType, Headers: headers}), nil } // CertToKey transforms a single input certificate into its corresponding @@ -527,8 +533,8 @@ func CertBundleToKey(leafCert *x509.Certificate, intCerts []*x509.Certificate) ( return newKey, nil } -// NewCertificate returns an X509 Certificate following a template, given a GUN and validity interval. -func NewCertificate(gun string, startTime, endTime time.Time) (*x509.Certificate, error) { +// NewCertificate returns an X509 Certificate following a template, given a Common Name and validity interval. +func NewCertificate(commonName string, startTime, endTime time.Time) (*x509.Certificate, error) { serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) @@ -539,7 +545,7 @@ func NewCertificate(gun string, startTime, endTime time.Time) (*x509.Certificate return &x509.Certificate{ SerialNumber: serialNumber, Subject: pkix.Name{ - CommonName: gun, + CommonName: commonName, }, NotBefore: startTime, NotAfter: endTime, diff --git a/vendor/github.com/docker/notary/vendor.conf b/vendor/github.com/docker/notary/vendor.conf new file mode 100644 index 000000000000..d10edba040c2 --- /dev/null +++ b/vendor/github.com/docker/notary/vendor.conf @@ -0,0 +1,59 @@ +github.com/Shopify/logrus-bugsnag 6dbc35f2c30d1e37549f9673dd07912452ab28a5 +github.com/sirupsen/logrus f006c2ac4710855cf0f916dd6b77acf6b048dc6e # v1.0.3 +github.com/agl/ed25519 278e1ec8e8a6e017cd07577924d6766039146ced +github.com/bugsnag/bugsnag-go 13fd6b8acda029830ef9904df6b63be0a83369d0 +github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782 +github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702 +github.com/docker/distribution edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c +github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb +github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d +github.com/docker/go d30aec9fd63c35133f8f79c3412ad91a3b08be06 +github.com/dvsekhvalnov/jose2go 6387d3c1f5abd8443b223577d5a7e0f4e0e5731f # v1.2 +github.com/go-sql-driver/mysql a0583e0143b1624142adab07e0e97fe106d99561 # v1.3 +github.com/gorilla/mux e444e69cbd2e2e3e0749a2f3c717cec491552bbf +github.com/jinzhu/gorm 5409931a1bb87e484d68d649af9367c207713ea2 +github.com/jinzhu/inflection 1c35d901db3da928c72a72d8458480cc9ade058f +github.com/lib/pq 0dad96c0b94f8dee039aa40467f767467392a0af +github.com/mattn/go-sqlite3 b4142c444a8941d0d92b0b7103a24df9cd815e42 # v1.0.0 +github.com/miekg/pkcs11 ba39b9c6300b7e0be41b115330145ef8afdff7d6 +github.com/mitchellh/go-homedir df55a15e5ce646808815381b3db47a8c66ea62f4 +github.com/prometheus/client_golang 449ccefff16c8e2b7229f6be1921ba22f62461fe +github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 # model-0.0.2-12-gfa8ad6f +github.com/prometheus/procfs b1afdc266f54247f5dc725544f5d351a8661f502 +github.com/prometheus/common 4fdc91a58c9d3696b982e8a680f4997403132d44 +github.com/golang/protobuf c3cefd437628a0b7d31b34fe44b3a7a540e98527 +github.com/spf13/cobra f368244301305f414206f889b1735a54cfc8bde8 +github.com/spf13/viper be5ff3e4840cf692388bde7a057595a474ef379e +golang.org/x/crypto 5bcd134fee4dd1475da17714aac19c0aa0142e2f +golang.org/x/net 6a513affb38dc9788b449d59ffed099b8de18fa0 +golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f +google.golang.org/grpc 708a7f9f3283aa2d4f6132d287d78683babe55c8 # v1.0.5 +github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9 + +github.com/spf13/pflag cb88ea77998c3f024757528e3305022ab50b43be +github.com/spf13/cast 4d07383ffe94b5e5a6fa3af9211374a4507a0184 +gopkg.in/yaml.v2 bef53efd0c76e49e6de55ead051f886bea7e9420 +gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715 +github.com/gorilla/context 14f550f51af52180c2eefed15e5fd18d63c0a64a +github.com/spf13/jwalterweatherman 3d60171a64319ef63c78bd45bd60e6eab1e75f8b +github.com/mitchellh/mapstructure 2caf8efc93669b6c43e0441cdc6aed17546c96f3 +github.com/magiconair/properties 624009598839a9432bd97bb75552389422357723 # v1.5.3 +github.com/kr/text 6807e777504f54ad073ecef66747de158294b639 +github.com/kr/pretty bc9499caa0f45ee5edb2f0209fbd61fbf3d9018f # go.weekly.2011-12-22-18-gbc9499c +github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478 +github.com/docker/libtrust aabc10ec26b754e797f9028f4589c5b7bd90dc20 +github.com/beorn7/perks b965b613227fddccbfffe13eae360ed3fa822f8d +github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895 + +github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453 +github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 + +gopkg.in/dancannon/gorethink.v3 e324d6ad938205da6c1e8a0179dc97a5b1a92185 https://github.com/docker/gorethink # v3.0.0-logrus +# dependencies of gorethink.v3 +gopkg.in/gorethink/gorethink.v2 ac5be4ae8538d44ae8843b97fc9f90860cb48a85 https://github.com/docker/gorethink # v2.2.2-logrus +github.com/cenk/backoff 32cd0c5b3aef12c76ed64aaf678f6c79736be7dc # v1.0.0 + +# Testing requirements +github.com/stretchr/testify 089c7181b8c728499929ff09b62d3fdd8df8adff +github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a +github.com/google/certificate-transparency 0f6e3d1d1ba4d03fdaab7cd716f36255c2e48341 diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go new file mode 100644 index 000000000000..593f6530084f --- /dev/null +++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go @@ -0,0 +1,77 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC +2898 / PKCS #5 v2.0. + +A key derivation function is useful when encrypting data based on a password +or any other not-fully-random data. It uses a pseudorandom function to derive +a secure encryption key based on the password. + +While v2.0 of the standard defines only one pseudorandom function to use, +HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved +Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To +choose, you can pass the `New` functions from the different SHA packages to +pbkdf2.Key. +*/ +package pbkdf2 // import "golang.org/x/crypto/pbkdf2" + +import ( + "crypto/hmac" + "hash" +) + +// Key derives a key from the password, salt and iteration count, returning a +// []byte of length keylen that can be used as cryptographic key. The key is +// derived based on the method described as PBKDF2 with the HMAC variant using +// the supplied hash function. +// +// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you +// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by +// doing: +// +// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) +// +// Remember to get a good random salt. At least 8 bytes is recommended by the +// RFC. +// +// Using a higher iteration count will increase the cost of an exhaustive +// search but will also make derivation proportionally slower. +func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte { + prf := hmac.New(h, password) + hashLen := prf.Size() + numBlocks := (keyLen + hashLen - 1) / hashLen + + var buf [4]byte + dk := make([]byte, 0, numBlocks*hashLen) + U := make([]byte, hashLen) + for block := 1; block <= numBlocks; block++ { + // N.B.: || means concatenation, ^ means XOR + // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter + // U_1 = PRF(password, salt || uint(i)) + prf.Reset() + prf.Write(salt) + buf[0] = byte(block >> 24) + buf[1] = byte(block >> 16) + buf[2] = byte(block >> 8) + buf[3] = byte(block) + prf.Write(buf[:4]) + dk = prf.Sum(dk) + T := dk[len(dk)-hashLen:] + copy(U, T) + + // U_n = PRF(password, U_(n-1)) + for n := 2; n <= iter; n++ { + prf.Reset() + prf.Write(U) + U = U[:0] + U = prf.Sum(U) + for x := range U { + T[x] ^= U[x] + } + } + } + return dk[:keyLen] +}