Add glide.yaml and vendor deps

This commit is contained in:
Dalton Hubble 2016-12-03 22:43:32 -08:00
parent db918f12ad
commit 5b3d5e81bd
18880 changed files with 5166045 additions and 1 deletions

60
vendor/github.com/containers/image/signature/docker.go generated vendored Normal file
View file

@ -0,0 +1,60 @@
// Note: Consider the API unstable until the code supports at least three different image formats or transports.
package signature
import (
"fmt"
"github.com/containers/image/manifest"
)
// SignDockerManifest returns a signature for manifest as the specified dockerReference,
// using mech and keyIdentity.
func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) {
manifestDigest, err := manifest.Digest(m)
if err != nil {
return nil, err
}
sig := privateSignature{
Signature{
DockerManifestDigest: manifestDigest,
DockerReference: dockerReference,
},
}
return sig.sign(mech, keyIdentity)
}
// VerifyDockerManifestSignature checks that unverifiedSignature uses expectedKeyIdentity to sign unverifiedManifest as expectedDockerReference,
// using mech.
func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byte,
expectedDockerReference string, mech SigningMechanism, expectedKeyIdentity string) (*Signature, error) {
sig, err := verifyAndExtractSignature(mech, unverifiedSignature, signatureAcceptanceRules{
validateKeyIdentity: func(keyIdentity string) error {
if keyIdentity != expectedKeyIdentity {
return InvalidSignatureError{msg: fmt.Sprintf("Signature by %s does not match expected fingerprint %s", keyIdentity, expectedKeyIdentity)}
}
return nil
},
validateSignedDockerReference: func(signedDockerReference string) error {
if signedDockerReference != expectedDockerReference {
return InvalidSignatureError{msg: fmt.Sprintf("Docker reference %s does not match %s",
signedDockerReference, expectedDockerReference)}
}
return nil
},
validateSignedDockerManifestDigest: func(signedDockerManifestDigest string) error {
matches, err := manifest.MatchesDigest(unverifiedManifest, signedDockerManifestDigest)
if err != nil {
return err
}
if !matches {
return InvalidSignatureError{msg: fmt.Sprintf("Signature for docker digest %q does not match", signedDockerManifestDigest)}
}
return nil
},
})
if err != nil {
return nil, err
}
return sig, nil
}

View file

@ -0,0 +1,84 @@
package signature
import (
"io/ioutil"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestSignDockerManifest(t *testing.T) {
mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
require.NoError(t, err)
manifest, err := ioutil.ReadFile("fixtures/image.manifest.json")
require.NoError(t, err)
// Successful signing
signature, err := SignDockerManifest(manifest, TestImageSignatureReference, mech, TestKeyFingerprint)
require.NoError(t, err)
verified, err := VerifyDockerManifestSignature(signature, manifest, TestImageSignatureReference, mech, TestKeyFingerprint)
assert.NoError(t, err)
assert.Equal(t, TestImageSignatureReference, verified.DockerReference)
assert.Equal(t, TestImageManifestDigest, verified.DockerManifestDigest)
// Error computing Docker manifest
invalidManifest, err := ioutil.ReadFile("fixtures/v2s1-invalid-signatures.manifest.json")
require.NoError(t, err)
_, err = SignDockerManifest(invalidManifest, TestImageSignatureReference, mech, TestKeyFingerprint)
assert.Error(t, err)
// Error creating blob to sign
_, err = SignDockerManifest(manifest, "", mech, TestKeyFingerprint)
assert.Error(t, err)
// Error signing
_, err = SignDockerManifest(manifest, TestImageSignatureReference, mech, "this fingerprint doesn't exist")
assert.Error(t, err)
}
func TestVerifyDockerManifestSignature(t *testing.T) {
mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
require.NoError(t, err)
manifest, err := ioutil.ReadFile("fixtures/image.manifest.json")
require.NoError(t, err)
signature, err := ioutil.ReadFile("fixtures/image.signature")
require.NoError(t, err)
// Successful verification
sig, err := VerifyDockerManifestSignature(signature, manifest, TestImageSignatureReference, mech, TestKeyFingerprint)
require.NoError(t, err)
assert.Equal(t, TestImageSignatureReference, sig.DockerReference)
assert.Equal(t, TestImageManifestDigest, sig.DockerManifestDigest)
// For extra paranoia, test that we return nil data on error.
// Error computing Docker manifest
invalidManifest, err := ioutil.ReadFile("fixtures/v2s1-invalid-signatures.manifest.json")
require.NoError(t, err)
sig, err = VerifyDockerManifestSignature(signature, invalidManifest, TestImageSignatureReference, mech, TestKeyFingerprint)
assert.Error(t, err)
assert.Nil(t, sig)
// Error verifying signature
corruptSignature, err := ioutil.ReadFile("fixtures/corrupt.signature")
sig, err = VerifyDockerManifestSignature(corruptSignature, manifest, TestImageSignatureReference, mech, TestKeyFingerprint)
assert.Error(t, err)
assert.Nil(t, sig)
// Key fingerprint mismatch
sig, err = VerifyDockerManifestSignature(signature, manifest, TestImageSignatureReference, mech, "unexpected fingerprint")
assert.Error(t, err)
assert.Nil(t, sig)
// Docker reference mismatch
sig, err = VerifyDockerManifestSignature(signature, manifest, "example.com/doesnt/match", mech, TestKeyFingerprint)
assert.Error(t, err)
assert.Nil(t, sig)
// Docker manifest digest mismatch
sig, err = VerifyDockerManifestSignature(signature, []byte("unexpected manifest"), TestImageSignatureReference, mech, TestKeyFingerprint)
assert.Error(t, err)
assert.Nil(t, sig)
}

View file

@ -0,0 +1,4 @@
/*.gpg~
/.gpg-v21-migrated
/private-keys-v1.d
/random_seed

Binary file not shown.

View file

@ -0,0 +1 @@
../v2s1-invalid-signatures.manifest.json

View file

@ -0,0 +1 @@
../dir-img-valid/signature-1

View file

@ -0,0 +1 @@
../dir-img-valid/manifest.json

View file

@ -0,0 +1 @@
../invalid-blob.signature

View file

@ -0,0 +1 @@
../dir-img-valid/signature-1

View file

@ -0,0 +1,27 @@
{
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"config": {
"mediaType": "application/vnd.docker.container.image.v1+json",
"size": 7023,
"digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7"
},
"layers": [
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 32654,
"digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 16724,
"digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 73109,
"digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736"
}
],
"extra": "this manifest has been modified"
}

View file

@ -0,0 +1 @@
../dir-img-valid/signature-1

View file

@ -0,0 +1 @@
../dir-img-valid/signature-1

View file

@ -0,0 +1 @@
../dir-img-valid/manifest.json

View file

@ -0,0 +1 @@
../dir-img-valid/manifest.json

View file

@ -0,0 +1 @@
../dir-img-valid/signature-1

Binary file not shown.

View file

@ -0,0 +1 @@
../image.manifest.json

Binary file not shown.

Binary file not shown.

Binary file not shown.

View file

@ -0,0 +1,26 @@
{
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"config": {
"mediaType": "application/vnd.docker.container.image.v1+json",
"size": 7023,
"digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7"
},
"layers": [
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 32654,
"digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 16724,
"digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 73109,
"digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736"
}
]
}

Binary file not shown.

Binary file not shown.

View file

@ -0,0 +1,93 @@
{
"default": [
{
"type": "reject"
}
],
"transports": {
"dir": {
"": [
{
"type": "insecureAcceptAnything"
}
]
},
"docker": {
"example.com/playground": [
{
"type": "insecureAcceptAnything"
}
],
"example.com/production": [
{
"type": "signedBy",
"keyType": "GPGKeys",
"keyPath": "/keys/employee-gpg-keyring"
}
],
"example.com/hardened": [
{
"type": "signedBy",
"keyType": "GPGKeys",
"keyPath": "/keys/employee-gpg-keyring",
"signedIdentity": {
"type": "matchRepository"
}
},
{
"type": "signedBy",
"keyType": "signedByGPGKeys",
"keyPath": "/keys/public-key-signing-gpg-keyring",
"signedIdentity": {
"type": "matchExact"
}
},
{
"type": "signedBaseLayer",
"baseLayerIdentity": {
"type": "exactRepository",
"dockerRepository": "registry.access.redhat.com/rhel7/rhel"
}
}
],
"example.com/hardened-x509": [
{
"type": "signedBy",
"keyType": "X509Certificates",
"keyPath": "/keys/employee-cert-file",
"signedIdentity": {
"type": "matchRepository"
}
},
{
"type": "signedBy",
"keyType": "signedByX509CAs",
"keyPath": "/keys/public-key-signing-ca-file"
}
],
"registry.access.redhat.com": [
{
"type": "signedBy",
"keyType": "signedByGPGKeys",
"keyPath": "/keys/RH-key-signing-key-gpg-keyring"
}
],
"bogus/key-data-example": [
{
"type": "signedBy",
"keyType": "signedByGPGKeys",
"keyData": "bm9uc2Vuc2U="
}
],
"bogus/signed-identity-example": [
{
"type": "signedBaseLayer",
"baseLayerIdentity": {
"type": "exactReference",
"dockerReference": "registry.access.redhat.com/rhel7/rhel:latest"
}
}
]
}
}
}

View file

@ -0,0 +1,19 @@
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1
mI0EVurzqQEEAL3qkFq4K2URtSWVDYnQUNA9HdM9sqS2eAWfqUFMrkD5f+oN+LBL
tPyaE5GNLA0vXY7nHAM2TeM8ijZ/eMP17Raj64JL8GhCymL3wn2jNvb9XaF0R0s6
H0IaRPPu45A3SnxLwm4Orc/9Z7/UxtYjKSg9xOaTiVPzJgaf5Vm4J4ApABEBAAG0
EnNrb3BlbyB0ZXN0aW5nIGtleYi4BBMBAgAiBQJW6vOpAhsDBgsJCAcDAgYVCAIJ
CgsEFgIDAQIeAQIXgAAKCRDbcvIYi7RsyBbOBACgJFiKDlQ1UyvsNmGqJ7D0OpbS
1OppJlradKgZXyfahFswhFI+7ZREvELLHbinq3dBy5cLXRWzQKdJZNHknSN5Tjf2
0ipVBQuqpcBo+dnKiG4zH6fhTri7yeTZksIDfsqlI6FXDOdKLUSnahagEBn4yU+x
jHPvZk5SuuZv56A45biNBFbq86kBBADIC/9CsAlOmRALuYUmkhcqEjuFwn3wKz2d
IBjzgvro7zcVNNCgxQfMEjcUsvEh5cx13G3QQHcwOKy3M6Bv6VMhfZjd+1P1el4P
0fJS8GFmhWRBknMN8jFsgyohQeouQ798RFFv94KszfStNnr/ae8oao5URmoUXSCa
/MdUxn0YKwARAQABiJ8EGAECAAkFAlbq86kCGwwACgkQ23LyGIu0bMjUywQAq0dn
lUpDNSoLTcpNWuVvHQ7c/qmnE4TyiSLiRiAywdEWA6gMiyhUUucuGsEhMFP1WX1k
UNwArZ6UG7BDOUsvngP7jKGNqyUOQrq1s/r8D+0MrJGOWErGLlfttO2WeoijECkI
5qm8cXzAra3Xf/Z3VjxYTKSnNu37LtZkakdTdYE=
=tJAt
-----END PGP PUBLIC KEY BLOCK-----

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View file

@ -0,0 +1,11 @@
{
"schemaVersion": 1,
"name": "mitr/buxybox",
"tag": "latest",
"architecture": "amd64",
"fsLayers": [
],
"history": [
],
"signatures": 1
}

View file

@ -0,0 +1,10 @@
package signature
const (
// TestImageManifestDigest is the Docker manifest digest of "image.manifest.json"
TestImageManifestDigest = "sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55"
// TestImageSignatureReference is the Docker image reference signed in "image.signature"
TestImageSignatureReference = "testing/manifest"
// TestKeyFingerprint is the fingerprint of the private key in this directory.
TestKeyFingerprint = "1D8230F6CDB6A06716E414C1DB72F2188BB46CC8"
)

107
vendor/github.com/containers/image/signature/json.go generated vendored Normal file
View file

@ -0,0 +1,107 @@
package signature
import (
"bytes"
"encoding/json"
"fmt"
"io"
)
// jsonFormatError is returned when JSON does not match expected format.
type jsonFormatError string
func (err jsonFormatError) Error() string {
return string(err)
}
// validateExactMapKeys returns an error if the keys of m are not exactly expectedKeys, which must be pairwise distinct
func validateExactMapKeys(m map[string]interface{}, expectedKeys ...string) error {
if len(m) != len(expectedKeys) {
return jsonFormatError("Unexpected keys in a JSON object")
}
for _, k := range expectedKeys {
if _, ok := m[k]; !ok {
return jsonFormatError(fmt.Sprintf("Key %s missing in a JSON object", k))
}
}
// Assuming expectedKeys are pairwise distinct, we know m contains len(expectedKeys) different values in expectedKeys.
return nil
}
// mapField returns a member fieldName of m, if it is a JSON map, or an error.
func mapField(m map[string]interface{}, fieldName string) (map[string]interface{}, error) {
untyped, ok := m[fieldName]
if !ok {
return nil, jsonFormatError(fmt.Sprintf("Field %s missing", fieldName))
}
v, ok := untyped.(map[string]interface{})
if !ok {
return nil, jsonFormatError(fmt.Sprintf("Field %s is not a JSON object", fieldName))
}
return v, nil
}
// stringField returns a member fieldName of m, if it is a string, or an error.
func stringField(m map[string]interface{}, fieldName string) (string, error) {
untyped, ok := m[fieldName]
if !ok {
return "", jsonFormatError(fmt.Sprintf("Field %s missing", fieldName))
}
v, ok := untyped.(string)
if !ok {
return "", jsonFormatError(fmt.Sprintf("Field %s is not a JSON object", fieldName))
}
return v, nil
}
// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect
// (including duplicated keys, unrecognized keys, and non-matching types). Uses fieldResolver to
// determine the destination for a field value, which should return a pointer to the destination if valid, or nil if the key is rejected.
//
// The fieldResolver approach is useful for decoding the Policy.Transports map; using it for structs is a bit lazy,
// we could use reflection to automate this. Later?
func paranoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interface{}) error {
seenKeys := map[string]struct{}{}
dec := json.NewDecoder(bytes.NewReader(data))
t, err := dec.Token()
if err != nil {
return jsonFormatError(err.Error())
}
if t != json.Delim('{') {
return jsonFormatError(fmt.Sprintf("JSON object expected, got \"%s\"", t))
}
for {
t, err := dec.Token()
if err != nil {
return jsonFormatError(err.Error())
}
if t == json.Delim('}') {
break
}
key, ok := t.(string)
if !ok {
// Coverage: This should never happen, dec.Token() rejects non-string-literals in this state.
return jsonFormatError(fmt.Sprintf("Key string literal expected, got \"%s\"", t))
}
if _, ok := seenKeys[key]; ok {
return jsonFormatError(fmt.Sprintf("Duplicate key \"%s\"", key))
}
seenKeys[key] = struct{}{}
valuePtr := fieldResolver(key)
if valuePtr == nil {
return jsonFormatError(fmt.Sprintf("Unknown key \"%s\"", key))
}
// This works like json.Unmarshal, in particular it allows us to implement UnmarshalJSON to implement strict parsing of the field value.
if err := dec.Decode(valuePtr); err != nil {
return jsonFormatError(err.Error())
}
}
if _, err := dec.Token(); err != io.EOF {
return jsonFormatError("Unexpected data after JSON object")
}
return nil
}

View file

@ -0,0 +1,149 @@
package signature
import (
"encoding/json"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type mSI map[string]interface{} // To minimize typing the long name
// A short-hand way to get a JSON object field value or panic. No error handling done, we know
// what we are working with, a panic in a test is good enough, and fitting test cases on a single line
// is a priority.
func x(m mSI, fields ...string) mSI {
for _, field := range fields {
// Not .(mSI) because type assertion of an unnamed type to a named type always fails (the types
// are not "identical"), but the assignment is fine because they are "assignable".
m = m[field].(map[string]interface{})
}
return m
}
func TestValidateExactMapKeys(t *testing.T) {
// Empty map and keys
err := validateExactMapKeys(mSI{})
assert.NoError(t, err)
// Success
err = validateExactMapKeys(mSI{"a": nil, "b": 1}, "b", "a")
assert.NoError(t, err)
// Extra map keys
err = validateExactMapKeys(mSI{"a": nil, "b": 1}, "a")
assert.Error(t, err)
// Extra expected keys
err = validateExactMapKeys(mSI{"a": 1}, "b", "a")
assert.Error(t, err)
// Unexpected key values
err = validateExactMapKeys(mSI{"a": 1}, "b")
assert.Error(t, err)
}
func TestMapField(t *testing.T) {
// Field not found
_, err := mapField(mSI{"a": mSI{}}, "b")
assert.Error(t, err)
// Field has a wrong type
_, err = mapField(mSI{"a": 1}, "a")
assert.Error(t, err)
// Success
// FIXME? We can't use mSI as the type of child, that type apparently can't be converted to the raw map type.
child := map[string]interface{}{"b": mSI{}}
m, err := mapField(mSI{"a": child, "b": nil}, "a")
require.NoError(t, err)
assert.Equal(t, child, m)
}
func TestStringField(t *testing.T) {
// Field not found
_, err := stringField(mSI{"a": "x"}, "b")
assert.Error(t, err)
// Field has a wrong type
_, err = stringField(mSI{"a": 1}, "a")
assert.Error(t, err)
// Success
s, err := stringField(mSI{"a": "x", "b": nil}, "a")
require.NoError(t, err)
assert.Equal(t, "x", s)
}
// implementsUnmarshalJSON is a minimalistic type used to detect that
// paranoidUnmarshalJSONObject uses the json.Unmarshaler interface of resolved
// pointers.
type implementsUnmarshalJSON bool
// Compile-time check that Policy implements json.Unmarshaler.
var _ json.Unmarshaler = (*implementsUnmarshalJSON)(nil)
func (dest *implementsUnmarshalJSON) UnmarshalJSON(data []byte) error {
_ = data // We don't care, not really.
*dest = true // Mark handler as called
return nil
}
func TestParanoidUnmarshalJSONObject(t *testing.T) {
type testStruct struct {
A string
B int
}
ts := testStruct{}
var unmarshalJSONCalled implementsUnmarshalJSON
tsResolver := func(key string) interface{} {
switch key {
case "a":
return &ts.A
case "b":
return &ts.B
case "implementsUnmarshalJSON":
return &unmarshalJSONCalled
default:
return nil
}
}
// Empty object
ts = testStruct{}
err := paranoidUnmarshalJSONObject([]byte(`{}`), tsResolver)
require.NoError(t, err)
assert.Equal(t, testStruct{}, ts)
// Success
ts = testStruct{}
err = paranoidUnmarshalJSONObject([]byte(`{"a":"x", "b":2}`), tsResolver)
require.NoError(t, err)
assert.Equal(t, testStruct{A: "x", B: 2}, ts)
// json.Unamarshaler is used for decoding values
ts = testStruct{}
unmarshalJSONCalled = implementsUnmarshalJSON(false)
err = paranoidUnmarshalJSONObject([]byte(`{"implementsUnmarshalJSON":true}`), tsResolver)
require.NoError(t, err)
assert.Equal(t, unmarshalJSONCalled, implementsUnmarshalJSON(true))
// Various kinds of invalid input
for _, input := range []string{
``, // Empty input
`&`, // Entirely invalid JSON
`1`, // Not an object
`{&}`, // Invalid key JSON
`{1:1}`, // Key not a string
`{"b":1, "b":1}`, // Duplicate key
`{"thisdoesnotexist":1}`, // Key rejected by resolver
`{"a":&}`, // Invalid value JSON
`{"a":1}`, // Type mismatch
`{"a":"value"}{}`, // Extra data after object
} {
ts = testStruct{}
err := paranoidUnmarshalJSONObject([]byte(input), tsResolver)
assert.Error(t, err, input)
}
}

View file

@ -0,0 +1,121 @@
// Note: Consider the API unstable until the code supports at least three different image formats or transports.
package signature
import (
"bytes"
"fmt"
"github.com/mtrmac/gpgme"
)
// SigningMechanism abstracts a way to sign binary blobs and verify their signatures.
// FIXME: Eventually expand on keyIdentity (namespace them between mechanisms to
// eliminate ambiguities, support CA signatures and perhaps other key properties)
type SigningMechanism interface {
// ImportKeysFromBytes imports public keys from the supplied blob and returns their identities.
// The blob is assumed to have an appropriate format (the caller is expected to know which one).
// NOTE: This may modify long-term state (e.g. key storage in a directory underlying the mechanism).
ImportKeysFromBytes(blob []byte) ([]string, error)
// Sign creates a (non-detached) signature of input using keyidentity
Sign(input []byte, keyIdentity string) ([]byte, error)
// Verify parses unverifiedSignature and returns the content and the signer's identity
Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error)
}
// A GPG/OpenPGP signing mechanism.
type gpgSigningMechanism struct {
ctx *gpgme.Context
}
// NewGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism.
func NewGPGSigningMechanism() (SigningMechanism, error) {
return newGPGSigningMechanismInDirectory("")
}
// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty.
func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) {
ctx, err := gpgme.New()
if err != nil {
return nil, err
}
if err = ctx.SetProtocol(gpgme.ProtocolOpenPGP); err != nil {
return nil, err
}
if optionalDir != "" {
err := ctx.SetEngineInfo(gpgme.ProtocolOpenPGP, "", optionalDir)
if err != nil {
return nil, err
}
}
ctx.SetArmor(false)
ctx.SetTextMode(false)
return gpgSigningMechanism{ctx: ctx}, nil
}
// ImportKeysFromBytes implements SigningMechanism.ImportKeysFromBytes
func (m gpgSigningMechanism) ImportKeysFromBytes(blob []byte) ([]string, error) {
inputData, err := gpgme.NewDataBytes(blob)
if err != nil {
return nil, err
}
res, err := m.ctx.Import(inputData)
if err != nil {
return nil, err
}
keyIdentities := []string{}
for _, i := range res.Imports {
if i.Result == nil {
keyIdentities = append(keyIdentities, i.Fingerprint)
}
}
return keyIdentities, nil
}
// Sign implements SigningMechanism.Sign
func (m gpgSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) {
key, err := m.ctx.GetKey(keyIdentity, true)
if err != nil {
return nil, err
}
inputData, err := gpgme.NewDataBytes(input)
if err != nil {
return nil, err
}
var sigBuffer bytes.Buffer
sigData, err := gpgme.NewDataWriter(&sigBuffer)
if err != nil {
return nil, err
}
if err = m.ctx.Sign([]*gpgme.Key{key}, inputData, sigData, gpgme.SigModeNormal); err != nil {
return nil, err
}
return sigBuffer.Bytes(), nil
}
// Verify implements SigningMechanism.Verify
func (m gpgSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) {
signedBuffer := bytes.Buffer{}
signedData, err := gpgme.NewDataWriter(&signedBuffer)
if err != nil {
return nil, "", err
}
unverifiedSignatureData, err := gpgme.NewDataBytes(unverifiedSignature)
if err != nil {
return nil, "", err
}
_, sigs, err := m.ctx.Verify(unverifiedSignatureData, nil, signedData)
if err != nil {
return nil, "", err
}
if len(sigs) != 1 {
return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Unexpected GPG signature count %d", len(sigs))}
}
sig := sigs[0]
// This is sig.Summary == gpgme.SigSumValid except for key trust, which we handle ourselves
if sig.Status != nil || sig.Validity == gpgme.ValidityNever || sig.ValidityReason != nil || sig.WrongKeyUsage {
// FIXME: Better error reporting eventually
return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", sig)}
}
return signedBuffer.Bytes(), sig.Fingerprint, nil
}

View file

@ -0,0 +1,149 @@
package signature
import (
"bytes"
"io/ioutil"
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
testGPGHomeDirectory = "./fixtures"
)
func TestNewGPGSigningMechanism(t *testing.T) {
// A dumb test just for code coverage. We test more with newGPGSigningMechanismInDirectory().
_, err := NewGPGSigningMechanism()
assert.NoError(t, err)
}
func TestNewGPGSigningMechanismInDirectory(t *testing.T) {
// A dumb test just for code coverage.
_, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
assert.NoError(t, err)
// The various GPG failure cases are not obviously easy to reach.
}
func TestGPGSigningMechanismImportKeysFromBytes(t *testing.T) {
testDir, err := ioutil.TempDir("", "gpg-import-keys")
require.NoError(t, err)
defer os.RemoveAll(testDir)
mech, err := newGPGSigningMechanismInDirectory(testDir)
require.NoError(t, err)
// Try validating a signature when the key is unknown.
signature, err := ioutil.ReadFile("./fixtures/invalid-blob.signature")
require.NoError(t, err)
content, signingFingerprint, err := mech.Verify(signature)
require.Error(t, err)
// Successful import
keyBlob, err := ioutil.ReadFile("./fixtures/public-key.gpg")
require.NoError(t, err)
keyIdentities, err := mech.ImportKeysFromBytes(keyBlob)
require.NoError(t, err)
assert.Equal(t, []string{TestKeyFingerprint}, keyIdentities)
// After import, the signature should validate.
content, signingFingerprint, err = mech.Verify(signature)
require.NoError(t, err)
assert.Equal(t, []byte("This is not JSON\n"), content)
assert.Equal(t, TestKeyFingerprint, signingFingerprint)
// Two keys: just concatenate the valid input twice.
keyIdentities, err = mech.ImportKeysFromBytes(bytes.Join([][]byte{keyBlob, keyBlob}, nil))
require.NoError(t, err)
assert.Equal(t, []string{TestKeyFingerprint, TestKeyFingerprint}, keyIdentities)
// Invalid input: This is accepted anyway by GPG, just returns no keys.
keyIdentities, err = mech.ImportKeysFromBytes([]byte("This is invalid"))
require.NoError(t, err)
assert.Equal(t, []string{}, keyIdentities)
// The various GPG/GPGME failures cases are not obviously easy to reach.
}
func TestGPGSigningMechanismSign(t *testing.T) {
mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
require.NoError(t, err)
// Successful signing
content := []byte("content")
signature, err := mech.Sign(content, TestKeyFingerprint)
require.NoError(t, err)
signedContent, signingFingerprint, err := mech.Verify(signature)
require.NoError(t, err)
assert.EqualValues(t, content, signedContent)
assert.Equal(t, TestKeyFingerprint, signingFingerprint)
// Error signing
_, err = mech.Sign(content, "this fingerprint doesn't exist")
assert.Error(t, err)
// The various GPG/GPGME failures cases are not obviously easy to reach.
}
func assertSigningError(t *testing.T, content []byte, fingerprint string, err error) {
assert.Error(t, err)
assert.Nil(t, content)
assert.Empty(t, fingerprint)
}
func TestGPGSigningMechanismVerify(t *testing.T) {
mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
require.NoError(t, err)
// Successful verification
signature, err := ioutil.ReadFile("./fixtures/invalid-blob.signature")
require.NoError(t, err)
content, signingFingerprint, err := mech.Verify(signature)
require.NoError(t, err)
assert.Equal(t, []byte("This is not JSON\n"), content)
assert.Equal(t, TestKeyFingerprint, signingFingerprint)
// For extra paranoia, test that we return nil data on error.
// Completely invalid signature.
content, signingFingerprint, err = mech.Verify([]byte{})
assertSigningError(t, content, signingFingerprint, err)
content, signingFingerprint, err = mech.Verify([]byte("invalid signature"))
assertSigningError(t, content, signingFingerprint, err)
// Literal packet, not a signature
signature, err = ioutil.ReadFile("./fixtures/unsigned-literal.signature")
require.NoError(t, err)
content, signingFingerprint, err = mech.Verify(signature)
assertSigningError(t, content, signingFingerprint, err)
// Encrypted data, not a signature.
signature, err = ioutil.ReadFile("./fixtures/unsigned-encrypted.signature")
require.NoError(t, err)
content, signingFingerprint, err = mech.Verify(signature)
assertSigningError(t, content, signingFingerprint, err)
// FIXME? Is there a way to create a multi-signature so that gpgme_op_verify returns multiple signatures?
// Expired signature
signature, err = ioutil.ReadFile("./fixtures/expired.signature")
require.NoError(t, err)
content, signingFingerprint, err = mech.Verify(signature)
assertSigningError(t, content, signingFingerprint, err)
// Corrupt signature
signature, err = ioutil.ReadFile("./fixtures/corrupt.signature")
require.NoError(t, err)
content, signingFingerprint, err = mech.Verify(signature)
assertSigningError(t, content, signingFingerprint, err)
// Valid signature with an unknown key
signature, err = ioutil.ReadFile("./fixtures/unknown-key.signature")
require.NoError(t, err)
content, signingFingerprint, err = mech.Verify(signature)
assertSigningError(t, content, signingFingerprint, err)
// The various GPG/GPGME failures cases are not obviously easy to reach.
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,287 @@
// This defines the top-level policy evaluation API.
// To the extent possible, the interface of the fuctions provided
// here is intended to be completely unambiguous, and stable for users
// to rely on.
package signature
import (
"fmt"
"github.com/Sirupsen/logrus"
"github.com/containers/image/types"
)
// PolicyRequirementError is an explanatory text for rejecting a signature or an image.
type PolicyRequirementError string
func (err PolicyRequirementError) Error() string {
return string(err)
}
// signatureAcceptanceResult is the principal value returned by isSignatureAuthorAccepted.
type signatureAcceptanceResult string
const (
sarAccepted signatureAcceptanceResult = "sarAccepted"
sarRejected signatureAcceptanceResult = "sarRejected"
sarUnknown signatureAcceptanceResult = "sarUnknown"
)
// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image.
// The type is public, but its definition is private.
type PolicyRequirement interface {
// FIXME: For speed, we should support creating per-context state (not stored in the PolicyRequirement), to cache
// costly initialization like creating temporary GPG home directories and reading files.
// Setup() (someState, error)
// Then, the operations below would be done on the someState object, not directly on a PolicyRequirement.
// isSignatureAuthorAccepted, given an image and a signature blob, returns:
// - sarAccepted if the signature has been verified against the appropriate public key
// (where "appropriate public key" may depend on the contents of the signature);
// in that case a parsed Signature should be returned.
// - sarRejected if the signature has not been verified;
// in that case error must be non-nil, and should be an PolicyRequirementError if evaluation
// succeeded but the result was rejection.
// - sarUnknown if if this PolicyRequirement does not deal with signatures.
// NOTE: sarUnknown should not be returned if this PolicyRequirement should make a decision but something failed.
// Returning sarUnknown and a non-nil error value is invalid.
// WARNING: This makes the signature contents acceptable for futher processing,
// but it does not necessarily mean that the contents of the signature are
// consistent with local policy.
// For example:
// - Do not use a true value to determine whether to run
// a container based on this image; use IsRunningImageAllowed instead.
// - Just because a signature is accepted does not automatically mean the contents of the
// signature are authorized to run code as root, or to affect system or cluster configuration.
isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error)
// isRunningImageAllowed returns true if the requirement allows running an image.
// If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation
// succeeded but the result was rejection.
// WARNING: This validates signatures and the manifest, but does not download or validate the
// layers. Users must validate that the layers match their expected digests.
isRunningImageAllowed(image types.UnparsedImage) (bool, error)
}
// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement.
// The type is public, but its implementation is private.
type PolicyReferenceMatch interface {
// matchesDockerReference decides whether a specific image identity is accepted for an image
// (or, usually, for the image's Reference().DockerReference()). Note that
// image.Reference().DockerReference() may be nil.
matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool
}
// PolicyContext encapsulates a policy and possible cached state
// for speeding up its evaluation.
type PolicyContext struct {
Policy *Policy
state policyContextState // Internal consistency checking
}
// policyContextState is used internally to verify the users are not misusing a PolicyContext.
type policyContextState string
const (
pcInvalid policyContextState = ""
pcInitializing policyContextState = "Initializing"
pcReady policyContextState = "Ready"
pcInUse policyContextState = "InUse"
pcDestroying policyContextState = "Destroying"
pcDestroyed policyContextState = "Destroyed"
)
// changeContextState changes pc.state, or fails if the state is unexpected
func (pc *PolicyContext) changeState(expected, new policyContextState) error {
if pc.state != expected {
return fmt.Errorf(`"Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state)
}
pc.state = new
return nil
}
// NewPolicyContext sets up and initializes a context for the specified policy.
// The policy must not be modified while the context exists. FIXME: make a deep copy?
// If this function succeeds, the caller should call PolicyContext.Destroy() when done.
func NewPolicyContext(policy *Policy) (*PolicyContext, error) {
pc := &PolicyContext{Policy: policy, state: pcInitializing}
// FIXME: initialize
if err := pc.changeState(pcInitializing, pcReady); err != nil {
// Huh?! This should never fail, we didn't give the pointer to anybody.
// Just give up and leave unclean state around.
return nil, err
}
return pc, nil
}
// Destroy should be called when the user of the context is done with it.
func (pc *PolicyContext) Destroy() error {
if err := pc.changeState(pcReady, pcDestroying); err != nil {
return err
}
// FIXME: destroy
return pc.changeState(pcDestroying, pcDestroyed)
}
// policyIdentityLogName returns a string description of the image identity for policy purposes.
// ONLY use this for log messages, not for any decisions!
func policyIdentityLogName(ref types.ImageReference) string {
return ref.Transport().Name() + ":" + ref.PolicyConfigurationIdentity()
}
// requirementsForImageRef selects the appropriate requirements for ref.
func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) PolicyRequirements {
// Do we have a PolicyTransportScopes for this transport?
transportName := ref.Transport().Name()
if transportScopes, ok := pc.Policy.Transports[transportName]; ok {
// Look for a full match.
identity := ref.PolicyConfigurationIdentity()
if req, ok := transportScopes[identity]; ok {
logrus.Debugf(` Using transport "%s" policy section %s`, transportName, identity)
return req
}
// Look for a match of the possible parent namespaces.
for _, name := range ref.PolicyConfigurationNamespaces() {
if req, ok := transportScopes[name]; ok {
logrus.Debugf(` Using transport "%s" specific policy section %s`, transportName, name)
return req
}
}
// Look for a default match for the transport.
if req, ok := transportScopes[""]; ok {
logrus.Debugf(` Using transport "%s" policy section ""`, transportName)
return req
}
}
logrus.Debugf(" Using default policy section")
return pc.Policy.Default
}
// GetSignaturesWithAcceptedAuthor returns those signatures from an image
// for which the policy accepts the author (and which have been successfully
// verified).
// NOTE: This may legitimately return an empty list and no error, if the image
// has no signatures or only invalid signatures.
// WARNING: This makes the signature contents acceptable for futher processing,
// but it does not necessarily mean that the contents of the signature are
// consistent with local policy.
// For example:
// - Do not use a an existence of an accepted signature to determine whether to run
// a container based on this image; use IsRunningImageAllowed instead.
// - Just because a signature is accepted does not automatically mean the contents of the
// signature are authorized to run code as root, or to affect system or cluster configuration.
func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(image types.UnparsedImage) (sigs []*Signature, finalErr error) {
if err := pc.changeState(pcReady, pcInUse); err != nil {
return nil, err
}
defer func() {
if err := pc.changeState(pcInUse, pcReady); err != nil {
sigs = nil
finalErr = err
}
}()
logrus.Debugf("GetSignaturesWithAcceptedAuthor for image %s", policyIdentityLogName(image.Reference()))
reqs := pc.requirementsForImageRef(image.Reference())
// FIXME: rename Signatures to UnverifiedSignatures
unverifiedSignatures, err := image.Signatures()
if err != nil {
return nil, err
}
res := make([]*Signature, 0, len(unverifiedSignatures))
for sigNumber, sig := range unverifiedSignatures {
var acceptedSig *Signature // non-nil if accepted
rejected := false
// FIXME? Say more about the contents of the signature, i.e. parse it even before verification?!
logrus.Debugf("Evaluating signature %d:", sigNumber)
interpretingReqs:
for reqNumber, req := range reqs {
// FIXME: Log the requirement itself? For now, we use just the number.
// FIXME: supply state
switch res, as, err := req.isSignatureAuthorAccepted(image, sig); res {
case sarAccepted:
if as == nil { // Coverage: this should never happen
logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but no parsed contents", reqNumber)
rejected = true
break interpretingReqs
}
logrus.Debugf(" Requirement %d: signature accepted", reqNumber)
if acceptedSig == nil {
acceptedSig = as
} else if *as != *acceptedSig { // Coverage: this should never happen
// Huh?! Two ways of verifying the same signature blob resulted in two different parses of its already accepted contents?
logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but different parsed contents", reqNumber)
rejected = true
acceptedSig = nil
break interpretingReqs
}
case sarRejected:
logrus.Debugf(" Requirement %d: signature rejected: %s", reqNumber, err.Error())
rejected = true
break interpretingReqs
case sarUnknown:
if err != nil { // Coverage: this should never happen
logrus.Debugf(" Requirement %d: internal inconsistency: sarUnknown but an error message %s", reqNumber, err.Error())
rejected = true
break interpretingReqs
}
logrus.Debugf(" Requirement %d: signature state unknown, continuing", reqNumber)
default: // Coverage: this should never happen
logrus.Debugf(" Requirement %d: internal inconsistency: unknown result %#v", reqNumber, string(res))
rejected = true
break interpretingReqs
}
}
// This also handles the (invalid) case of empty reqs, by rejecting the signature.
if acceptedSig != nil && !rejected {
logrus.Debugf(" Overall: OK, signature accepted")
res = append(res, acceptedSig)
} else {
logrus.Debugf(" Overall: Signature not accepted")
}
}
return res, nil
}
// IsRunningImageAllowed returns true iff the policy allows running the image.
// If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation
// succeeded but the result was rejection.
// WARNING: This validates signatures and the manifest, but does not download or validate the
// layers. Users must validate that the layers match their expected digests.
func (pc *PolicyContext) IsRunningImageAllowed(image types.UnparsedImage) (res bool, finalErr error) {
if err := pc.changeState(pcReady, pcInUse); err != nil {
return false, err
}
defer func() {
if err := pc.changeState(pcInUse, pcReady); err != nil {
res = false
finalErr = err
}
}()
logrus.Debugf("IsRunningImageAllowed for image %s", policyIdentityLogName(image.Reference()))
reqs := pc.requirementsForImageRef(image.Reference())
if len(reqs) == 0 {
return false, PolicyRequirementError("List of verification policy requirements must not be empty")
}
for reqNumber, req := range reqs {
// FIXME: supply state
allowed, err := req.isRunningImageAllowed(image)
if !allowed {
logrus.Debugf("Requirement %d: denied, done", reqNumber)
return false, err
}
logrus.Debugf(" Requirement %d: allowed", reqNumber)
}
// We have tested that len(reqs) != 0, so at least one req must have explicitly allowed this image.
logrus.Debugf("Overall: allowed")
return true, nil
}

View file

@ -0,0 +1,18 @@
// Policy evaluation for prSignedBaseLayer.
package signature
import (
"github.com/Sirupsen/logrus"
"github.com/containers/image/types"
)
func (pr *prSignedBaseLayer) isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
return sarUnknown, nil, nil
}
func (pr *prSignedBaseLayer) isRunningImageAllowed(image types.UnparsedImage) (bool, error) {
// FIXME? Reject this at policy parsing time already?
logrus.Errorf("signedBaseLayer not implemented yet!")
return false, PolicyRequirementError("signedBaseLayer not implemented yet!")
}

View file

@ -0,0 +1,24 @@
package signature
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestPRSignedBaseLayerIsSignatureAuthorAccepted(t *testing.T) {
pr, err := NewPRSignedBaseLayer(NewPRMMatchRepository())
require.NoError(t, err)
// Pass nil pointers to, kind of, test that the return value does not depend on the parameters.
sar, parsedSig, err := pr.isSignatureAuthorAccepted(nil, nil)
assertSARUnknown(t, sar, parsedSig, err)
}
func TestPRSignedBaseLayerIsRunningImageAllowed(t *testing.T) {
// This will obviously need to change after signedBaseLayer is implemented.
pr, err := NewPRSignedBaseLayer(NewPRMMatchRepository())
require.NoError(t, err)
// Pass a nil pointer to, kind of, test that the return value does not depend on the image.
res, err := pr.isRunningImageAllowed(nil)
assertRunningRejectedPolicyRequirement(t, res, err)
}

View file

@ -0,0 +1,137 @@
// Policy evaluation for prSignedBy.
package signature
import (
"errors"
"fmt"
"io/ioutil"
"os"
"strings"
"github.com/containers/image/manifest"
"github.com/containers/image/types"
)
func (pr *prSignedBy) isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
switch pr.KeyType {
case SBKeyTypeGPGKeys:
case SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs:
// FIXME? Reject this at policy parsing time already?
return sarRejected, nil, fmt.Errorf(`"Unimplemented "keyType" value "%s"`, string(pr.KeyType))
default:
// This should never happen, newPRSignedBy ensures KeyType.IsValid()
return sarRejected, nil, fmt.Errorf(`"Unknown "keyType" value "%s"`, string(pr.KeyType))
}
if pr.KeyPath != "" && pr.KeyData != nil {
return sarRejected, nil, errors.New(`Internal inconsistency: both "keyPath" and "keyData" specified`)
}
// FIXME: move this to per-context initialization
var data []byte
if pr.KeyData != nil {
data = pr.KeyData
} else {
d, err := ioutil.ReadFile(pr.KeyPath)
if err != nil {
return sarRejected, nil, err
}
data = d
}
// FIXME: move this to per-context initialization
dir, err := ioutil.TempDir("", "skopeo-signedBy-")
if err != nil {
return sarRejected, nil, err
}
defer os.RemoveAll(dir)
mech, err := newGPGSigningMechanismInDirectory(dir)
if err != nil {
return sarRejected, nil, err
}
trustedIdentities, err := mech.ImportKeysFromBytes(data)
if err != nil {
return sarRejected, nil, err
}
if len(trustedIdentities) == 0 {
return sarRejected, nil, PolicyRequirementError("No public keys imported")
}
signature, err := verifyAndExtractSignature(mech, sig, signatureAcceptanceRules{
validateKeyIdentity: func(keyIdentity string) error {
for _, trustedIdentity := range trustedIdentities {
if keyIdentity == trustedIdentity {
return nil
}
}
// Coverage: We use a private GPG home directory and only import trusted keys, so this should
// not be reachable.
return PolicyRequirementError(fmt.Sprintf("Signature by key %s is not accepted", keyIdentity))
},
validateSignedDockerReference: func(ref string) error {
if !pr.SignedIdentity.matchesDockerReference(image, ref) {
return PolicyRequirementError(fmt.Sprintf("Signature for identity %s is not accepted", ref))
}
return nil
},
validateSignedDockerManifestDigest: func(digest string) error {
m, _, err := image.Manifest()
if err != nil {
return err
}
digestMatches, err := manifest.MatchesDigest(m, digest)
if err != nil {
return err
}
if !digestMatches {
return PolicyRequirementError(fmt.Sprintf("Signature for digest %s does not match", digest))
}
return nil
},
})
if err != nil {
return sarRejected, nil, err
}
return sarAccepted, signature, nil
}
func (pr *prSignedBy) isRunningImageAllowed(image types.UnparsedImage) (bool, error) {
sigs, err := image.Signatures()
if err != nil {
return false, err
}
var rejections []error
for _, s := range sigs {
var reason error
switch res, _, err := pr.isSignatureAuthorAccepted(image, s); res {
case sarAccepted:
// One accepted signature is enough.
return true, nil
case sarRejected:
reason = err
case sarUnknown:
// Huh?! This should not happen at all; treat it as any other invalid value.
fallthrough
default:
reason = fmt.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res))
}
rejections = append(rejections, reason)
}
var summary error
switch len(rejections) {
case 0:
summary = PolicyRequirementError("A signature was required, but no signature exists")
case 1:
summary = rejections[0]
default:
var msgs []string
for _, e := range rejections {
msgs = append(msgs, e.Error())
}
summary = PolicyRequirementError(fmt.Sprintf("None of the signatures were accepted, reasons: %s",
strings.Join(msgs, "; ")))
}
return false, summary
}

View file

@ -0,0 +1,264 @@
package signature
import (
"io/ioutil"
"os"
"path"
"testing"
"github.com/containers/image/directory"
"github.com/containers/image/docker/reference"
"github.com/containers/image/image"
"github.com/containers/image/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// dirImageMock returns a types.UnparsedImage for a directory, claiming a specified dockerReference.
// The caller must call .Close() on the returned UnparsedImage.
func dirImageMock(t *testing.T, dir, dockerReference string) types.UnparsedImage {
ref, err := reference.ParseNamed(dockerReference)
require.NoError(t, err)
return dirImageMockWithRef(t, dir, refImageReferenceMock{ref})
}
// dirImageMockWithRef returns a types.UnparsedImage for a directory, claiming a specified ref.
// The caller must call .Close() on the returned UnparsedImage.
func dirImageMockWithRef(t *testing.T, dir string, ref types.ImageReference) types.UnparsedImage {
srcRef, err := directory.NewReference(dir)
require.NoError(t, err)
src, err := srcRef.NewImageSource(nil, nil)
require.NoError(t, err)
return image.UnparsedFromSource(&dirImageSourceMock{
ImageSource: src,
ref: ref,
})
}
// dirImageSourceMock inherits dirImageSource, but overrides its Reference method.
type dirImageSourceMock struct {
types.ImageSource
ref types.ImageReference
}
func (d *dirImageSourceMock) Reference() types.ImageReference {
return d.ref
}
func TestPRSignedByIsSignatureAuthorAccepted(t *testing.T) {
ktGPG := SBKeyTypeGPGKeys
prm := NewPRMMatchExact()
testImage := dirImageMock(t, "fixtures/dir-img-valid", "testing/manifest:latest")
defer testImage.Close()
testImageSig, err := ioutil.ReadFile("fixtures/dir-img-valid/signature-1")
require.NoError(t, err)
// Successful validation, with KeyData and KeyPath
pr, err := NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
require.NoError(t, err)
sar, parsedSig, err := pr.isSignatureAuthorAccepted(testImage, testImageSig)
assertSARAccepted(t, sar, parsedSig, err, Signature{
DockerManifestDigest: TestImageManifestDigest,
DockerReference: "testing/manifest:latest",
})
keyData, err := ioutil.ReadFile("fixtures/public-key.gpg")
require.NoError(t, err)
pr, err = NewPRSignedByKeyData(ktGPG, keyData, prm)
require.NoError(t, err)
sar, parsedSig, err = pr.isSignatureAuthorAccepted(testImage, testImageSig)
assertSARAccepted(t, sar, parsedSig, err, Signature{
DockerManifestDigest: TestImageManifestDigest,
DockerReference: "testing/manifest:latest",
})
// Unimplemented and invalid KeyType values
for _, keyType := range []sbKeyType{SBKeyTypeSignedByGPGKeys,
SBKeyTypeX509Certificates,
SBKeyTypeSignedByX509CAs,
sbKeyType("This is invalid"),
} {
// Do not use NewPRSignedByKeyData, because it would reject invalid values.
pr := &prSignedBy{
KeyType: keyType,
KeyData: []byte("abc"),
SignedIdentity: prm,
}
// Pass nil pointers to, kind of, test that the return value does not depend on the parameters.
sar, parsedSig, err := pr.isSignatureAuthorAccepted(nil, nil)
assertSARRejected(t, sar, parsedSig, err)
}
// Both KeyPath and KeyData set. Do not use NewPRSignedBy*, because it would reject this.
prSB := &prSignedBy{
KeyType: ktGPG,
KeyPath: "/foo/bar",
KeyData: []byte("abc"),
SignedIdentity: prm,
}
// Pass nil pointers to, kind of, test that the return value does not depend on the parameters.
sar, parsedSig, err = prSB.isSignatureAuthorAccepted(nil, nil)
assertSARRejected(t, sar, parsedSig, err)
// Invalid KeyPath
pr, err = NewPRSignedByKeyPath(ktGPG, "/this/does/not/exist", prm)
require.NoError(t, err)
// Pass nil pointers to, kind of, test that the return value does not depend on the parameters.
sar, parsedSig, err = pr.isSignatureAuthorAccepted(nil, nil)
assertSARRejected(t, sar, parsedSig, err)
// Errors initializing the temporary GPG directory and mechanism are not obviously easy to reach.
// KeyData has no public keys.
pr, err = NewPRSignedByKeyData(ktGPG, []byte{}, prm)
require.NoError(t, err)
// Pass nil pointers to, kind of, test that the return value does not depend on the parameters.
sar, parsedSig, err = pr.isSignatureAuthorAccepted(nil, nil)
assertSARRejectedPolicyRequirement(t, sar, parsedSig, err)
// A signature which does not GPG verify
pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
require.NoError(t, err)
// Pass a nil pointer to, kind of, test that the return value does not depend on the image parmater..
sar, parsedSig, err = pr.isSignatureAuthorAccepted(nil, []byte("invalid signature"))
assertSARRejected(t, sar, parsedSig, err)
// A valid signature using an unknown key.
// (This is (currently?) rejected through the "mech.Verify fails" path, not the "!identityFound" path,
// because we use a temporary directory and only import the trusted keys.)
pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
require.NoError(t, err)
sig, err := ioutil.ReadFile("fixtures/unknown-key.signature")
require.NoError(t, err)
// Pass a nil pointer to, kind of, test that the return value does not depend on the image parmater..
sar, parsedSig, err = pr.isSignatureAuthorAccepted(nil, sig)
assertSARRejected(t, sar, parsedSig, err)
// A valid signature of an invalid JSON.
pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
require.NoError(t, err)
sig, err = ioutil.ReadFile("fixtures/invalid-blob.signature")
require.NoError(t, err)
// Pass a nil pointer to, kind of, test that the return value does not depend on the image parmater..
sar, parsedSig, err = pr.isSignatureAuthorAccepted(nil, sig)
assertSARRejected(t, sar, parsedSig, err)
assert.IsType(t, InvalidSignatureError{}, err)
// A valid signature with a rejected identity.
nonmatchingPRM, err := NewPRMExactReference("this/doesnt:match")
require.NoError(t, err)
pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", nonmatchingPRM)
require.NoError(t, err)
sar, parsedSig, err = pr.isSignatureAuthorAccepted(testImage, testImageSig)
assertSARRejectedPolicyRequirement(t, sar, parsedSig, err)
// Error reading image manifest
image := dirImageMock(t, "fixtures/dir-img-no-manifest", "testing/manifest:latest")
defer image.Close()
sig, err = ioutil.ReadFile("fixtures/dir-img-no-manifest/signature-1")
require.NoError(t, err)
pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
require.NoError(t, err)
sar, parsedSig, err = pr.isSignatureAuthorAccepted(image, sig)
assertSARRejected(t, sar, parsedSig, err)
// Error computing manifest digest
image = dirImageMock(t, "fixtures/dir-img-manifest-digest-error", "testing/manifest:latest")
defer image.Close()
sig, err = ioutil.ReadFile("fixtures/dir-img-manifest-digest-error/signature-1")
require.NoError(t, err)
pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
require.NoError(t, err)
sar, parsedSig, err = pr.isSignatureAuthorAccepted(image, sig)
assertSARRejected(t, sar, parsedSig, err)
// A valid signature with a non-matching manifest
image = dirImageMock(t, "fixtures/dir-img-modified-manifest", "testing/manifest:latest")
defer image.Close()
sig, err = ioutil.ReadFile("fixtures/dir-img-modified-manifest/signature-1")
require.NoError(t, err)
pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
require.NoError(t, err)
sar, parsedSig, err = pr.isSignatureAuthorAccepted(image, sig)
assertSARRejectedPolicyRequirement(t, sar, parsedSig, err)
}
// createInvalidSigDir creates a directory suitable for dirImageMock, in which image.Signatures()
// fails.
// The caller should eventually call os.RemoveAll on the returned path.
func createInvalidSigDir(t *testing.T) string {
dir, err := ioutil.TempDir("", "skopeo-test-unreadable-signature")
require.NoError(t, err)
err = ioutil.WriteFile(path.Join(dir, "manifest.json"), []byte("{}"), 0644)
require.NoError(t, err)
// Creating a 000-permissions file would work for unprivileged accounts, but root (in particular,
// in the Docker container we use for testing) would still have access. So, create a symlink
// pointing to itself, to cause an ELOOP. (Note that a symlink pointing to a nonexistent file would be treated
// just like a nonexistent signature file, and not an error.)
err = os.Symlink("signature-1", path.Join(dir, "signature-1"))
require.NoError(t, err)
return dir
}
func TestPRSignedByIsRunningImageAllowed(t *testing.T) {
ktGPG := SBKeyTypeGPGKeys
prm := NewPRMMatchExact()
// A simple success case: single valid signature.
image := dirImageMock(t, "fixtures/dir-img-valid", "testing/manifest:latest")
defer image.Close()
pr, err := NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
require.NoError(t, err)
allowed, err := pr.isRunningImageAllowed(image)
assertRunningAllowed(t, allowed, err)
// Error reading signatures
invalidSigDir := createInvalidSigDir(t)
defer os.RemoveAll(invalidSigDir)
image = dirImageMock(t, invalidSigDir, "testing/manifest:latest")
defer image.Close()
pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
require.NoError(t, err)
allowed, err = pr.isRunningImageAllowed(image)
assertRunningRejected(t, allowed, err)
// No signatures
image = dirImageMock(t, "fixtures/dir-img-unsigned", "testing/manifest:latest")
defer image.Close()
pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
require.NoError(t, err)
allowed, err = pr.isRunningImageAllowed(image)
assertRunningRejectedPolicyRequirement(t, allowed, err)
// 1 invalid signature: use dir-img-valid, but a non-matching Docker reference
image = dirImageMock(t, "fixtures/dir-img-valid", "testing/manifest:notlatest")
defer image.Close()
pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
require.NoError(t, err)
allowed, err = pr.isRunningImageAllowed(image)
assertRunningRejectedPolicyRequirement(t, allowed, err)
// 2 valid signatures
image = dirImageMock(t, "fixtures/dir-img-valid-2", "testing/manifest:latest")
defer image.Close()
pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
require.NoError(t, err)
allowed, err = pr.isRunningImageAllowed(image)
assertRunningAllowed(t, allowed, err)
// One invalid, one valid signature (in this order)
image = dirImageMock(t, "fixtures/dir-img-mixed", "testing/manifest:latest")
defer image.Close()
pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
require.NoError(t, err)
allowed, err = pr.isRunningImageAllowed(image)
assertRunningAllowed(t, allowed, err)
// 2 invalid signatures: use dir-img-valid-2, but a non-matching Docker reference
image = dirImageMock(t, "fixtures/dir-img-valid-2", "testing/manifest:notlatest")
defer image.Close()
pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm)
require.NoError(t, err)
allowed, err = pr.isRunningImageAllowed(image)
assertRunningRejectedPolicyRequirement(t, allowed, err)
}

View file

@ -0,0 +1,28 @@
// Policy evaluation for the various simple PolicyRequirement types.
package signature
import (
"fmt"
"github.com/containers/image/transports"
"github.com/containers/image/types"
)
func (pr *prInsecureAcceptAnything) isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
// prInsecureAcceptAnything semantics: Every image is allowed to run,
// but this does not consider the signature as verified.
return sarUnknown, nil, nil
}
func (pr *prInsecureAcceptAnything) isRunningImageAllowed(image types.UnparsedImage) (bool, error) {
return true, nil
}
func (pr *prReject) isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
return sarRejected, nil, PolicyRequirementError(fmt.Sprintf("Any signatures for image %s are rejected by policy.", transports.ImageName(image.Reference())))
}
func (pr *prReject) isRunningImageAllowed(image types.UnparsedImage) (bool, error) {
return false, PolicyRequirementError(fmt.Sprintf("Running image %s is rejected by policy.", transports.ImageName(image.Reference())))
}

View file

@ -0,0 +1,74 @@
package signature
import (
"testing"
"github.com/containers/image/docker/reference"
"github.com/containers/image/types"
)
// nameOnlyImageMock is a mock of types.UnparsedImage which only allows transports.ImageName to work
type nameOnlyImageMock struct {
forbiddenImageMock
}
func (nameOnlyImageMock) Reference() types.ImageReference {
return nameOnlyImageReferenceMock("== StringWithinTransport mock")
}
// nameOnlyImageReferenceMock is a mock of types.ImageReference which only allows transports.ImageName to work, returning self.
type nameOnlyImageReferenceMock string
func (ref nameOnlyImageReferenceMock) Transport() types.ImageTransport {
return nameImageTransportMock("== Transport mock")
}
func (ref nameOnlyImageReferenceMock) StringWithinTransport() string {
return string(ref)
}
func (ref nameOnlyImageReferenceMock) DockerReference() reference.Named {
panic("unexpected call to a mock function")
}
func (ref nameOnlyImageReferenceMock) PolicyConfigurationIdentity() string {
panic("unexpected call to a mock function")
}
func (ref nameOnlyImageReferenceMock) PolicyConfigurationNamespaces() []string {
panic("unexpected call to a mock function")
}
func (ref nameOnlyImageReferenceMock) NewImage(ctx *types.SystemContext) (types.Image, error) {
panic("unexpected call to a mock function")
}
func (ref nameOnlyImageReferenceMock) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) {
panic("unexpected call to a mock function")
}
func (ref nameOnlyImageReferenceMock) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
panic("unexpected call to a mock function")
}
func (ref nameOnlyImageReferenceMock) DeleteImage(ctx *types.SystemContext) error {
panic("unexpected call to a mock function")
}
func TestPRInsecureAcceptAnythingIsSignatureAuthorAccepted(t *testing.T) {
pr := NewPRInsecureAcceptAnything()
// Pass nil signature to, kind of, test that the return value does not depend on it.
sar, parsedSig, err := pr.isSignatureAuthorAccepted(nameOnlyImageMock{}, nil)
assertSARUnknown(t, sar, parsedSig, err)
}
func TestPRInsecureAcceptAnythingIsRunningImageAllowed(t *testing.T) {
pr := NewPRInsecureAcceptAnything()
res, err := pr.isRunningImageAllowed(nameOnlyImageMock{})
assertRunningAllowed(t, res, err)
}
func TestPRRejectIsSignatureAuthorAccepted(t *testing.T) {
pr := NewPRReject()
// Pass nil signature to, kind of, test that the return value does not depend on it.
sar, parsedSig, err := pr.isSignatureAuthorAccepted(nameOnlyImageMock{}, nil)
assertSARRejectedPolicyRequirement(t, sar, parsedSig, err)
}
func TestPRRejectIsRunningImageAllowed(t *testing.T) {
pr := NewPRReject()
res, err := pr.isRunningImageAllowed(nameOnlyImageMock{})
assertRunningRejectedPolicyRequirement(t, res, err)
}

View file

@ -0,0 +1,488 @@
package signature
import (
"fmt"
"os"
"testing"
"github.com/containers/image/docker/policyconfiguration"
"github.com/containers/image/docker/reference"
"github.com/containers/image/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestPolicyRequirementError(t *testing.T) {
// A stupid test just to keep code coverage
s := "test"
err := PolicyRequirementError(s)
assert.Equal(t, s, err.Error())
}
func TestPolicyContextChangeState(t *testing.T) {
pc, err := NewPolicyContext(&Policy{Default: PolicyRequirements{NewPRReject()}})
require.NoError(t, err)
defer pc.Destroy()
require.Equal(t, pcReady, pc.state)
err = pc.changeState(pcReady, pcInUse)
require.NoError(t, err)
err = pc.changeState(pcReady, pcInUse)
require.Error(t, err)
// Return state to pcReady to allow pc.Destroy to clean up.
err = pc.changeState(pcInUse, pcReady)
require.NoError(t, err)
}
func TestPolicyContextNewDestroy(t *testing.T) {
pc, err := NewPolicyContext(&Policy{Default: PolicyRequirements{NewPRReject()}})
require.NoError(t, err)
assert.Equal(t, pcReady, pc.state)
err = pc.Destroy()
require.NoError(t, err)
assert.Equal(t, pcDestroyed, pc.state)
// Trying to destroy when not pcReady
pc, err = NewPolicyContext(&Policy{Default: PolicyRequirements{NewPRReject()}})
require.NoError(t, err)
err = pc.changeState(pcReady, pcInUse)
require.NoError(t, err)
err = pc.Destroy()
require.Error(t, err)
assert.Equal(t, pcInUse, pc.state) // The state, and hopefully nothing else, has changed.
err = pc.changeState(pcInUse, pcReady)
require.NoError(t, err)
err = pc.Destroy()
assert.NoError(t, err)
}
// pcImageReferenceMock is a mock of types.ImageReference which returns itself in DockerReference
// and handles PolicyConfigurationIdentity and PolicyConfigurationReference consistently.
type pcImageReferenceMock struct {
transportName string
ref reference.Named
}
func (ref pcImageReferenceMock) Transport() types.ImageTransport {
return nameImageTransportMock(ref.transportName)
}
func (ref pcImageReferenceMock) StringWithinTransport() string {
// We use this in error messages, so sadly we must return something.
return "== StringWithinTransport mock"
}
func (ref pcImageReferenceMock) DockerReference() reference.Named {
return ref.ref
}
func (ref pcImageReferenceMock) PolicyConfigurationIdentity() string {
res, err := policyconfiguration.DockerReferenceIdentity(ref.ref)
if res == "" || err != nil {
panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err))
}
return res
}
func (ref pcImageReferenceMock) PolicyConfigurationNamespaces() []string {
if ref.ref == nil {
panic("unexpected call to a mock function")
}
return policyconfiguration.DockerReferenceNamespaces(ref.ref)
}
func (ref pcImageReferenceMock) NewImage(ctx *types.SystemContext) (types.Image, error) {
panic("unexpected call to a mock function")
}
func (ref pcImageReferenceMock) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) {
panic("unexpected call to a mock function")
}
func (ref pcImageReferenceMock) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
panic("unexpected call to a mock function")
}
func (ref pcImageReferenceMock) DeleteImage(ctx *types.SystemContext) error {
panic("unexpected call to a mock function")
}
func TestPolicyContextRequirementsForImageRef(t *testing.T) {
ktGPG := SBKeyTypeGPGKeys
prm := NewPRMMatchExact()
policy := &Policy{
Default: PolicyRequirements{NewPRReject()},
Transports: map[string]PolicyTransportScopes{},
}
// Just put _something_ into the PolicyTransportScopes map for the keys we care about, and make it pairwise
// distinct so that we can compare the values and show them when debugging the tests.
for _, t := range []struct{ transport, scope string }{
{"docker", ""},
{"docker", "unmatched"},
{"docker", "deep.com"},
{"docker", "deep.com/n1"},
{"docker", "deep.com/n1/n2"},
{"docker", "deep.com/n1/n2/n3"},
{"docker", "deep.com/n1/n2/n3/repo"},
{"docker", "deep.com/n1/n2/n3/repo:tag2"},
{"atomic", "unmatched"},
} {
if _, ok := policy.Transports[t.transport]; !ok {
policy.Transports[t.transport] = PolicyTransportScopes{}
}
policy.Transports[t.transport][t.scope] = PolicyRequirements{xNewPRSignedByKeyData(ktGPG, []byte(t.transport+t.scope), prm)}
}
pc, err := NewPolicyContext(policy)
require.NoError(t, err)
for _, c := range []struct{ inputTransport, input, matchedTransport, matched string }{
// Full match
{"docker", "deep.com/n1/n2/n3/repo:tag2", "docker", "deep.com/n1/n2/n3/repo:tag2"},
// Namespace matches
{"docker", "deep.com/n1/n2/n3/repo:nottag2", "docker", "deep.com/n1/n2/n3/repo"},
{"docker", "deep.com/n1/n2/n3/notrepo:tag2", "docker", "deep.com/n1/n2/n3"},
{"docker", "deep.com/n1/n2/notn3/repo:tag2", "docker", "deep.com/n1/n2"},
{"docker", "deep.com/n1/notn2/n3/repo:tag2", "docker", "deep.com/n1"},
// Host name match
{"docker", "deep.com/notn1/n2/n3/repo:tag2", "docker", "deep.com"},
// Default
{"docker", "this.doesnt/match:anything", "docker", ""},
// No match within a matched transport which doesn't have a "" scope
{"atomic", "this.doesnt/match:anything", "", ""},
// No configuration available for this transport at all
{"dir", "what/ever", "", ""}, // "what/ever" is not a valid scope for the real "dir" transport, but we only need it to be a valid reference.Named.
} {
var expected PolicyRequirements
if c.matchedTransport != "" {
e, ok := policy.Transports[c.matchedTransport][c.matched]
require.True(t, ok, fmt.Sprintf("case %s:%s: expected reqs not found", c.inputTransport, c.input))
expected = e
} else {
expected = policy.Default
}
ref, err := reference.ParseNamed(c.input)
require.NoError(t, err)
reqs := pc.requirementsForImageRef(pcImageReferenceMock{c.inputTransport, ref})
comment := fmt.Sprintf("case %s:%s: %#v", c.inputTransport, c.input, reqs[0])
// Do not use assert.Equal, which would do a deep contents comparison; we want to compare
// the pointers. Also, == does not work on slices; so test that the slices start at the
// same element and have the same length.
assert.True(t, &(reqs[0]) == &(expected[0]), comment)
assert.True(t, len(reqs) == len(expected), comment)
}
}
// pcImageMock returns a types.UnparsedImage for a directory, claiming a specified dockerReference and implementing PolicyConfigurationIdentity/PolicyConfigurationNamespaces.
// The caller must call .Close() on the returned Image.
func pcImageMock(t *testing.T, dir, dockerReference string) types.UnparsedImage {
ref, err := reference.ParseNamed(dockerReference)
require.NoError(t, err)
return dirImageMockWithRef(t, dir, pcImageReferenceMock{"docker", ref})
}
func TestPolicyContextGetSignaturesWithAcceptedAuthor(t *testing.T) {
expectedSig := &Signature{
DockerManifestDigest: TestImageManifestDigest,
DockerReference: "testing/manifest:latest",
}
pc, err := NewPolicyContext(&Policy{
Default: PolicyRequirements{NewPRReject()},
Transports: map[string]PolicyTransportScopes{
"docker": {
"docker.io/testing/manifest:latest": {
xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchExact()),
},
"docker.io/testing/manifest:twoAccepts": {
xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchRepository()),
xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchRepository()),
},
"docker.io/testing/manifest:acceptReject": {
xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchRepository()),
NewPRReject(),
},
"docker.io/testing/manifest:acceptUnknown": {
xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchRepository()),
xNewPRSignedBaseLayer(NewPRMMatchRepository()),
},
"docker.io/testing/manifest:rejectUnknown": {
NewPRReject(),
xNewPRSignedBaseLayer(NewPRMMatchRepository()),
},
"docker.io/testing/manifest:unknown": {
xNewPRSignedBaseLayer(NewPRMMatchRepository()),
},
"docker.io/testing/manifest:unknown2": {
NewPRInsecureAcceptAnything(),
},
"docker.io/testing/manifest:invalidEmptyRequirements": {},
},
},
})
require.NoError(t, err)
defer pc.Destroy()
// Success
img := pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:latest")
defer img.Close()
sigs, err := pc.GetSignaturesWithAcceptedAuthor(img)
require.NoError(t, err)
assert.Equal(t, []*Signature{expectedSig}, sigs)
// Two signatures
// FIXME? Use really different signatures for this?
img = pcImageMock(t, "fixtures/dir-img-valid-2", "testing/manifest:latest")
defer img.Close()
sigs, err = pc.GetSignaturesWithAcceptedAuthor(img)
require.NoError(t, err)
assert.Equal(t, []*Signature{expectedSig, expectedSig}, sigs)
// No signatures
img = pcImageMock(t, "fixtures/dir-img-unsigned", "testing/manifest:latest")
defer img.Close()
sigs, err = pc.GetSignaturesWithAcceptedAuthor(img)
require.NoError(t, err)
assert.Empty(t, sigs)
// Only invalid signatures
img = pcImageMock(t, "fixtures/dir-img-modified-manifest", "testing/manifest:latest")
defer img.Close()
sigs, err = pc.GetSignaturesWithAcceptedAuthor(img)
require.NoError(t, err)
assert.Empty(t, sigs)
// 1 invalid, 1 valid signature (in this order)
img = pcImageMock(t, "fixtures/dir-img-mixed", "testing/manifest:latest")
defer img.Close()
sigs, err = pc.GetSignaturesWithAcceptedAuthor(img)
require.NoError(t, err)
assert.Equal(t, []*Signature{expectedSig}, sigs)
// Two sarAccepted results for one signature
img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:twoAccepts")
defer img.Close()
sigs, err = pc.GetSignaturesWithAcceptedAuthor(img)
require.NoError(t, err)
assert.Equal(t, []*Signature{expectedSig}, sigs)
// sarAccepted+sarRejected for a signature
img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:acceptReject")
defer img.Close()
sigs, err = pc.GetSignaturesWithAcceptedAuthor(img)
require.NoError(t, err)
assert.Empty(t, sigs)
// sarAccepted+sarUnknown for a signature
img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:acceptUnknown")
defer img.Close()
sigs, err = pc.GetSignaturesWithAcceptedAuthor(img)
require.NoError(t, err)
assert.Equal(t, []*Signature{expectedSig}, sigs)
// sarRejected+sarUnknown for a signature
img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:rejectUnknown")
defer img.Close()
sigs, err = pc.GetSignaturesWithAcceptedAuthor(img)
require.NoError(t, err)
assert.Empty(t, sigs)
// sarUnknown only
img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:unknown")
defer img.Close()
sigs, err = pc.GetSignaturesWithAcceptedAuthor(img)
require.NoError(t, err)
assert.Empty(t, sigs)
img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:unknown2")
defer img.Close()
sigs, err = pc.GetSignaturesWithAcceptedAuthor(img)
require.NoError(t, err)
assert.Empty(t, sigs)
// Empty list of requirements (invalid)
img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:invalidEmptyRequirements")
defer img.Close()
sigs, err = pc.GetSignaturesWithAcceptedAuthor(img)
require.NoError(t, err)
assert.Empty(t, sigs)
// Failures: Make sure we return nil sigs.
// Unexpected state (context already destroyed)
destroyedPC, err := NewPolicyContext(pc.Policy)
require.NoError(t, err)
err = destroyedPC.Destroy()
require.NoError(t, err)
img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:latest")
defer img.Close()
sigs, err = destroyedPC.GetSignaturesWithAcceptedAuthor(img)
assert.Error(t, err)
assert.Nil(t, sigs)
// Not testing the pcInUse->pcReady transition, that would require custom PolicyRequirement
// implementations meddling with the state, or threads. This is for catching trivial programmer
// mistakes only, anyway.
// Error reading signatures.
invalidSigDir := createInvalidSigDir(t)
defer os.RemoveAll(invalidSigDir)
img = pcImageMock(t, invalidSigDir, "testing/manifest:latest")
defer img.Close()
sigs, err = pc.GetSignaturesWithAcceptedAuthor(img)
assert.Error(t, err)
assert.Nil(t, sigs)
}
func TestPolicyContextIsRunningImageAllowed(t *testing.T) {
pc, err := NewPolicyContext(&Policy{
Default: PolicyRequirements{NewPRReject()},
Transports: map[string]PolicyTransportScopes{
"docker": {
"docker.io/testing/manifest:latest": {
xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchExact()),
},
"docker.io/testing/manifest:twoAllows": {
xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchRepository()),
xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchRepository()),
},
"docker.io/testing/manifest:allowDeny": {
xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchRepository()),
NewPRReject(),
},
"docker.io/testing/manifest:reject": {
NewPRReject(),
},
"docker.io/testing/manifest:acceptAnything": {
NewPRInsecureAcceptAnything(),
},
"docker.io/testing/manifest:invalidEmptyRequirements": {},
},
},
})
require.NoError(t, err)
defer pc.Destroy()
// Success
img := pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:latest")
defer img.Close()
res, err := pc.IsRunningImageAllowed(img)
assertRunningAllowed(t, res, err)
// Two signatures
// FIXME? Use really different signatures for this?
img = pcImageMock(t, "fixtures/dir-img-valid-2", "testing/manifest:latest")
defer img.Close()
res, err = pc.IsRunningImageAllowed(img)
assertRunningAllowed(t, res, err)
// No signatures
img = pcImageMock(t, "fixtures/dir-img-unsigned", "testing/manifest:latest")
defer img.Close()
res, err = pc.IsRunningImageAllowed(img)
assertRunningRejectedPolicyRequirement(t, res, err)
// Only invalid signatures
img = pcImageMock(t, "fixtures/dir-img-modified-manifest", "testing/manifest:latest")
defer img.Close()
res, err = pc.IsRunningImageAllowed(img)
assertRunningRejectedPolicyRequirement(t, res, err)
// 1 invalid, 1 valid signature (in this order)
img = pcImageMock(t, "fixtures/dir-img-mixed", "testing/manifest:latest")
defer img.Close()
res, err = pc.IsRunningImageAllowed(img)
assertRunningAllowed(t, res, err)
// Two allowed results
img = pcImageMock(t, "fixtures/dir-img-mixed", "testing/manifest:twoAllows")
defer img.Close()
res, err = pc.IsRunningImageAllowed(img)
assertRunningAllowed(t, res, err)
// Allow + deny results
img = pcImageMock(t, "fixtures/dir-img-mixed", "testing/manifest:allowDeny")
defer img.Close()
res, err = pc.IsRunningImageAllowed(img)
assertRunningRejectedPolicyRequirement(t, res, err)
// prReject works
img = pcImageMock(t, "fixtures/dir-img-mixed", "testing/manifest:reject")
defer img.Close()
res, err = pc.IsRunningImageAllowed(img)
assertRunningRejectedPolicyRequirement(t, res, err)
// prInsecureAcceptAnything works
img = pcImageMock(t, "fixtures/dir-img-mixed", "testing/manifest:acceptAnything")
defer img.Close()
res, err = pc.IsRunningImageAllowed(img)
assertRunningAllowed(t, res, err)
// Empty list of requirements (invalid)
img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:invalidEmptyRequirements")
defer img.Close()
res, err = pc.IsRunningImageAllowed(img)
assertRunningRejectedPolicyRequirement(t, res, err)
// Unexpected state (context already destroyed)
destroyedPC, err := NewPolicyContext(pc.Policy)
require.NoError(t, err)
err = destroyedPC.Destroy()
require.NoError(t, err)
img = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:latest")
defer img.Close()
res, err = destroyedPC.IsRunningImageAllowed(img)
assertRunningRejected(t, res, err)
// Not testing the pcInUse->pcReady transition, that would require custom PolicyRequirement
// implementations meddling with the state, or threads. This is for catching trivial programmer
// mistakes only, anyway.
}
// Helpers for validating PolicyRequirement.isSignatureAuthorAccepted results:
// assertSARRejected verifies that isSignatureAuthorAccepted returns a consistent sarRejected result
// with the expected signature.
func assertSARAccepted(t *testing.T, sar signatureAcceptanceResult, parsedSig *Signature, err error, expectedSig Signature) {
assert.Equal(t, sarAccepted, sar)
assert.Equal(t, &expectedSig, parsedSig)
assert.NoError(t, err)
}
// assertSARRejected verifies that isSignatureAuthorAccepted returns a consistent sarRejected result.
func assertSARRejected(t *testing.T, sar signatureAcceptanceResult, parsedSig *Signature, err error) {
assert.Equal(t, sarRejected, sar)
assert.Nil(t, parsedSig)
assert.Error(t, err)
}
// assertSARRejectedPolicyRequiremnt verifies that isSignatureAuthorAccepted returns a consistent sarRejected resul,
// and that the returned error is a PolicyRequirementError..
func assertSARRejectedPolicyRequirement(t *testing.T, sar signatureAcceptanceResult, parsedSig *Signature, err error) {
assertSARRejected(t, sar, parsedSig, err)
assert.IsType(t, PolicyRequirementError(""), err)
}
// assertSARRejected verifies that isSignatureAuthorAccepted returns a consistent sarUnknown result.
func assertSARUnknown(t *testing.T, sar signatureAcceptanceResult, parsedSig *Signature, err error) {
assert.Equal(t, sarUnknown, sar)
assert.Nil(t, parsedSig)
assert.NoError(t, err)
}
// Helpers for validating PolicyRequirement.isRunningImageAllowed results:
// assertRunningAllowed verifies that isRunningImageAllowed returns a consistent true result
func assertRunningAllowed(t *testing.T, allowed bool, err error) {
assert.Equal(t, true, allowed)
assert.NoError(t, err)
}
// assertRunningRejected verifies that isRunningImageAllowed returns a consistent false result
func assertRunningRejected(t *testing.T, allowed bool, err error) {
assert.Equal(t, false, allowed)
assert.Error(t, err)
}
// assertRunningRejectedPolicyRequirement verifies that isRunningImageAllowed returns a consistent false result
// and that the returned error is a PolicyRequirementError.
func assertRunningRejectedPolicyRequirement(t *testing.T, allowed bool, err error) {
assertRunningRejected(t, allowed, err)
assert.IsType(t, PolicyRequirementError(""), err)
}

View file

@ -0,0 +1,78 @@
// PolicyReferenceMatch implementations.
package signature
import (
"fmt"
"github.com/containers/image/docker/reference"
"github.com/containers/image/transports"
"github.com/containers/image/types"
)
// parseImageAndDockerReference converts an image and a reference string into two parsed entities, failing on any error and handling unidentified images.
func parseImageAndDockerReference(image types.UnparsedImage, s2 string) (reference.Named, reference.Named, error) {
r1 := image.Reference().DockerReference()
if r1 == nil {
return nil, nil, PolicyRequirementError(fmt.Sprintf("Docker reference match attempted on image %s with no known Docker reference identity",
transports.ImageName(image.Reference())))
}
r2, err := reference.ParseNamed(s2)
if err != nil {
return nil, nil, err
}
return r1, r2, nil
}
func (prm *prmMatchExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool {
intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference)
if err != nil {
return false
}
// Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now.
if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) {
return false
}
return signature.String() == intended.String()
}
func (prm *prmMatchRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool {
intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference)
if err != nil {
return false
}
return signature.Name() == intended.Name()
}
// parseDockerReferences converts two reference strings into parsed entities, failing on any error
func parseDockerReferences(s1, s2 string) (reference.Named, reference.Named, error) {
r1, err := reference.ParseNamed(s1)
if err != nil {
return nil, nil, err
}
r2, err := reference.ParseNamed(s2)
if err != nil {
return nil, nil, err
}
return r1, r2, nil
}
func (prm *prmExactReference) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool {
intended, signature, err := parseDockerReferences(prm.DockerReference, signatureDockerReference)
if err != nil {
return false
}
// prm.DockerReference and signatureDockerReference should be exact; so, verify that now.
if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) {
return false
}
return signature.String() == intended.String()
}
func (prm *prmExactRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool {
intended, signature, err := parseDockerReferences(prm.DockerRepository, signatureDockerReference)
if err != nil {
return false
}
return signature.Name() == intended.Name()
}

View file

@ -0,0 +1,283 @@
package signature
import (
"fmt"
"testing"
"github.com/containers/image/docker/reference"
"github.com/containers/image/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
fullRHELRef = "registry.access.redhat.com/rhel7/rhel:7.2.3"
untaggedRHELRef = "registry.access.redhat.com/rhel7/rhel"
)
func TestParseImageAndDockerReference(t *testing.T) {
const (
ok1 = "busybox"
ok2 = fullRHELRef
bad1 = "UPPERCASE_IS_INVALID_IN_DOCKER_REFERENCES"
bad2 = ""
)
// Success
ref, err := reference.ParseNamed(ok1)
require.NoError(t, err)
r1, r2, err := parseImageAndDockerReference(refImageMock{ref}, ok2)
require.NoError(t, err)
assert.Equal(t, ok1, r1.String())
assert.Equal(t, ok2, r2.String())
// Unidentified images are rejected.
_, _, err = parseImageAndDockerReference(refImageMock{nil}, ok2)
require.Error(t, err)
assert.IsType(t, PolicyRequirementError(""), err)
// Failures
for _, refs := range [][]string{
{bad1, ok2},
{ok1, bad2},
{bad1, bad2},
} {
ref, err := reference.ParseNamed(refs[0])
if err == nil {
_, _, err := parseImageAndDockerReference(refImageMock{ref}, refs[1])
assert.Error(t, err)
}
}
}
// refImageMock is a mock of types.UnparsedImage which returns itself in Reference().DockerReference.
type refImageMock struct{ reference.Named }
func (ref refImageMock) Reference() types.ImageReference {
return refImageReferenceMock{ref.Named}
}
func (ref refImageMock) Close() {
panic("unexpected call to a mock function")
}
func (ref refImageMock) Manifest() ([]byte, string, error) {
panic("unexpected call to a mock function")
}
func (ref refImageMock) Signatures() ([][]byte, error) {
panic("unexpected call to a mock function")
}
// refImageReferenceMock is a mock of types.ImageReference which returns itself in DockerReference.
type refImageReferenceMock struct{ reference.Named }
func (ref refImageReferenceMock) Transport() types.ImageTransport {
// We use this in error messages, so sadly we must return something. But right now we do so only when DockerReference is nil, so restrict to that.
if ref.Named == nil {
return nameImageTransportMock("== Transport mock")
}
panic("unexpected call to a mock function")
}
func (ref refImageReferenceMock) StringWithinTransport() string {
// We use this in error messages, so sadly we must return something. But right now we do so only when DockerReference is nil, so restrict to that.
if ref.Named == nil {
return "== StringWithinTransport for an image with no Docker support"
}
panic("unexpected call to a mock function")
}
func (ref refImageReferenceMock) DockerReference() reference.Named {
return ref.Named
}
func (ref refImageReferenceMock) PolicyConfigurationIdentity() string {
panic("unexpected call to a mock function")
}
func (ref refImageReferenceMock) PolicyConfigurationNamespaces() []string {
panic("unexpected call to a mock function")
}
func (ref refImageReferenceMock) NewImage(ctx *types.SystemContext) (types.Image, error) {
panic("unexpected call to a mock function")
}
func (ref refImageReferenceMock) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) {
panic("unexpected call to a mock function")
}
func (ref refImageReferenceMock) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
panic("unexpected call to a mock function")
}
func (ref refImageReferenceMock) DeleteImage(ctx *types.SystemContext) error {
panic("unexpected call to a mock function")
}
// nameImageTransportMock is a mock of types.ImageTransport which returns itself in Name.
type nameImageTransportMock string
func (name nameImageTransportMock) Name() string {
return string(name)
}
func (name nameImageTransportMock) ParseReference(reference string) (types.ImageReference, error) {
panic("unexpected call to a mock function")
}
func (name nameImageTransportMock) ValidatePolicyConfigurationScope(scope string) error {
panic("unexpected call to a mock function")
}
type prmTableTest struct {
imageRef, sigRef string
result bool
}
// Test cases for exact reference match
var prmExactMatchTestTable = []prmTableTest{
// Success, simple matches
{"busybox:latest", "busybox:latest", true},
{fullRHELRef, fullRHELRef, true},
// Non-canonical reference format is canonicalized
{"library/busybox:latest", "busybox:latest", true},
{"busybox:latest", "library/busybox:latest", true},
{"docker.io/library/busybox:latest", "busybox:latest", true},
{"busybox:latest", "docker.io/library/busybox:latest", true},
// Mismatch
{"busybox:latest", "busybox:notlatest", false},
{"busybox:latest", "notbusybox:latest", false},
{"busybox:latest", "hostname/library/busybox:notlatest", false},
{"hostname/library/busybox:latest", "busybox:notlatest", false},
{"busybox:latest", fullRHELRef, false},
// Missing tags
{"busybox", "busybox:latest", false},
{"busybox:latest", "busybox", false},
{"busybox", "busybox", false},
// Invalid format
{"UPPERCASE_IS_INVALID_IN_DOCKER_REFERENCES", "busybox:latest", false},
{"busybox:latest", "UPPERCASE_IS_INVALID_IN_DOCKER_REFERENCES", false},
{"", "UPPERCASE_IS_INVALID_IN_DOCKER_REFERENCES", false},
// Even if they are exactly equal, invalid values are rejected.
{"INVALID", "INVALID", false},
}
// Test cases for repository-only reference match
var prmRepositoryMatchTestTable = []prmTableTest{
// Success, simple matches
{"busybox:latest", "busybox:latest", true},
{fullRHELRef, fullRHELRef, true},
// Non-canonical reference format is canonicalized
{"library/busybox:latest", "busybox:latest", true},
{"busybox:latest", "library/busybox:latest", true},
{"docker.io/library/busybox:latest", "busybox:latest", true},
{"busybox:latest", "docker.io/library/busybox:latest", true},
// The same as above, but with mismatching tags
{"busybox:latest", "busybox:notlatest", true},
{fullRHELRef + "tagsuffix", fullRHELRef, true},
{"library/busybox:latest", "busybox:notlatest", true},
{"busybox:latest", "library/busybox:notlatest", true},
{"docker.io/library/busybox:notlatest", "busybox:latest", true},
{"busybox:notlatest", "docker.io/library/busybox:latest", true},
// The same as above, but with defaulted tags (should not actually happen)
{"busybox", "busybox:notlatest", true},
{fullRHELRef, untaggedRHELRef, true},
{"library/busybox", "busybox", true},
{"busybox", "library/busybox", true},
{"docker.io/library/busybox", "busybox", true},
{"busybox", "docker.io/library/busybox", true},
// Mismatch
{"busybox:latest", "notbusybox:latest", false},
{"hostname/library/busybox:latest", "busybox:notlatest", false},
{"busybox:latest", fullRHELRef, false},
// Invalid format
{"UPPERCASE_IS_INVALID_IN_DOCKER_REFERENCES", "busybox:latest", false},
{"busybox:latest", "UPPERCASE_IS_INVALID_IN_DOCKER_REFERENCES", false},
{"", "UPPERCASE_IS_INVALID_IN_DOCKER_REFERENCES", false},
// Even if they are exactly equal, invalid values are rejected.
{"INVALID", "INVALID", false},
}
func TestPRMMatchExactMatchesDockerReference(t *testing.T) {
prm := NewPRMMatchExact()
for _, test := range prmExactMatchTestTable {
// This assumes that all ways to obtain a reference.Named perform equivalent validation,
// and therefore values refused by reference.ParseNamed can not happen in practice.
imageRef, err := reference.ParseNamed(test.imageRef)
if err != nil {
continue
}
res := prm.matchesDockerReference(refImageMock{imageRef}, test.sigRef)
assert.Equal(t, test.result, res, fmt.Sprintf("%s vs. %s", test.imageRef, test.sigRef))
}
// Even if they are signed with an empty string as a reference, unidentified images are rejected.
res := prm.matchesDockerReference(refImageMock{nil}, "")
assert.False(t, res, `unidentified vs. ""`)
}
func TestPRMMatchRepositoryMatchesDockerReference(t *testing.T) {
prm := NewPRMMatchRepository()
for _, test := range prmRepositoryMatchTestTable {
// This assumes that all ways to obtain a reference.Named perform equivalent validation,
// and therefore values refused by reference.ParseNamed can not happen in practice.
imageRef, err := reference.ParseNamed(test.imageRef)
if err != nil {
continue
}
res := prm.matchesDockerReference(refImageMock{imageRef}, test.sigRef)
assert.Equal(t, test.result, res, fmt.Sprintf("%s vs. %s", test.imageRef, test.sigRef))
}
// Even if they are signed with an empty string as a reference, unidentified images are rejected.
res := prm.matchesDockerReference(refImageMock{nil}, "")
assert.False(t, res, `unidentified vs. ""`)
}
func TestParseDockerReferences(t *testing.T) {
const (
ok1 = "busybox"
ok2 = fullRHELRef
bad1 = "UPPERCASE_IS_INVALID_IN_DOCKER_REFERENCES"
bad2 = ""
)
// Success
r1, r2, err := parseDockerReferences(ok1, ok2)
require.NoError(t, err)
assert.Equal(t, ok1, r1.String())
assert.Equal(t, ok2, r2.String())
// Failures
for _, refs := range [][]string{
{bad1, ok2},
{ok1, bad2},
{bad1, bad2},
} {
_, _, err := parseDockerReferences(refs[0], refs[1])
assert.Error(t, err)
}
}
// forbiddenImageMock is a mock of types.UnparsedImage which ensures Reference is not called
type forbiddenImageMock struct{}
func (ref forbiddenImageMock) Reference() types.ImageReference {
panic("unexpected call to a mock function")
}
func (ref forbiddenImageMock) Close() {
panic("unexpected call to a mock function")
}
func (ref forbiddenImageMock) Manifest() ([]byte, string, error) {
panic("unexpected call to a mock function")
}
func (ref forbiddenImageMock) Signatures() ([][]byte, error) {
panic("unexpected call to a mock function")
}
func TestPRMExactReferenceMatchesDockerReference(t *testing.T) {
for _, test := range prmExactMatchTestTable {
// Do not use NewPRMExactReference, we want to also test the case with an invalid DockerReference,
// even though NewPRMExactReference should never let it happen.
prm := prmExactReference{DockerReference: test.imageRef}
res := prm.matchesDockerReference(forbiddenImageMock{}, test.sigRef)
assert.Equal(t, test.result, res, fmt.Sprintf("%s vs. %s", test.imageRef, test.sigRef))
}
}
func TestPRMExactRepositoryMatchesDockerReference(t *testing.T) {
for _, test := range prmRepositoryMatchTestTable {
// Do not use NewPRMExactRepository, we want to also test the case with an invalid DockerReference,
// even though NewPRMExactRepository should never let it happen.
prm := prmExactRepository{DockerRepository: test.imageRef}
res := prm.matchesDockerReference(forbiddenImageMock{}, test.sigRef)
assert.Equal(t, test.result, res, fmt.Sprintf("%s vs. %s", test.imageRef, test.sigRef))
}
}

View file

@ -0,0 +1,145 @@
// Note: Consider the API unstable until the code supports at least three different image formats or transports.
// This defines types used to represent a signature verification policy in memory.
// Do not use the private types directly; either parse a configuration file, or construct a Policy from PolicyRequirements
// built using the constructor functions provided in policy_config.go.
package signature
// NOTE: Keep this in sync with docs/policy.json.md!
// Policy defines requirements for considering a signature, or an image, valid.
type Policy struct {
// Default applies to any image which does not have a matching policy in Transports.
// Note that this can happen even if a matching PolicyTransportScopes exists in Transports
// if the image matches none of the scopes.
Default PolicyRequirements `json:"default"`
Transports map[string]PolicyTransportScopes `json:"transports"`
}
// PolicyTransportScopes defines policies for images for a specific transport,
// for various scopes, the map keys.
// Scopes are defined by the transport (types.ImageReference.PolicyConfigurationIdentity etc.);
// there is one scope precisely matching to a single image, and namespace scopes as prefixes
// of the single-image scope. (e.g. hostname[/zero[/or[/more[/namespaces[/individualimage]]]]])
// The empty scope, if exists, is considered a parent namespace of all other scopes.
// Most specific scope wins, duplication is prohibited (hard failure).
type PolicyTransportScopes map[string]PolicyRequirements
// PolicyRequirements is a set of requirements applying to a set of images; each of them must be satisfied (though perhaps each by a different signature).
// Must not be empty, frequently will only contain a single element.
type PolicyRequirements []PolicyRequirement
// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image.
// The type is public, but its definition is private.
// prCommon is the common type field in a JSON encoding of PolicyRequirement.
type prCommon struct {
Type prTypeIdentifier `json:"type"`
}
// prTypeIdentifier is string designating a kind of a PolicyRequirement.
type prTypeIdentifier string
const (
prTypeInsecureAcceptAnything prTypeIdentifier = "insecureAcceptAnything"
prTypeReject prTypeIdentifier = "reject"
prTypeSignedBy prTypeIdentifier = "signedBy"
prTypeSignedBaseLayer prTypeIdentifier = "signedBaseLayer"
)
// prInsecureAcceptAnything is a PolicyRequirement with type = prTypeInsecureAcceptAnything:
// every image is allowed to run.
// Note that because PolicyRequirements are implicitly ANDed, this is necessary only if it is the only rule (to make the list non-empty and the policy explicit).
// NOTE: This allows the image to run; it DOES NOT consider the signature verified (per IsSignatureAuthorAccepted).
// FIXME? Better name?
type prInsecureAcceptAnything struct {
prCommon
}
// prReject is a PolicyRequirement with type = prTypeReject: every image is rejected.
type prReject struct {
prCommon
}
// prSignedBy is a PolicyRequirement with type = prTypeSignedBy: the image is signed by trusted keys for a specified identity
type prSignedBy struct {
prCommon
// KeyType specifies what kind of key reference KeyPath/KeyData is.
// Acceptable values are “GPGKeys” | “signedByGPGKeys” “X.509Certificates” | “signedByX.509CAs”
// FIXME: eventually also support GPGTOFU, X.509TOFU, with KeyPath only
KeyType sbKeyType `json:"keyType"`
// KeyPath is a pathname to a local file containing the trusted key(s). Exactly one of KeyPath and KeyData must be specified.
KeyPath string `json:"keyPath,omitempty"`
// KeyData contains the trusted key(s), base64-encoded. Exactly one of KeyPath and KeyData must be specified.
KeyData []byte `json:"keyData,omitempty"`
// SignedIdentity specifies what image identity the signature must be claiming about the image.
// Defaults to "match-exact" if not specified.
SignedIdentity PolicyReferenceMatch `json:"signedIdentity"`
}
// sbKeyType are the allowed values for prSignedBy.KeyType
type sbKeyType string
const (
// SBKeyTypeGPGKeys refers to keys contained in a GPG keyring
SBKeyTypeGPGKeys sbKeyType = "GPGKeys"
// SBKeyTypeSignedByGPGKeys refers to keys signed by keys in a GPG keyring
SBKeyTypeSignedByGPGKeys sbKeyType = "signedByGPGKeys"
// SBKeyTypeX509Certificates refers to keys in a set of X.509 certificates
// FIXME: PEM, DER?
SBKeyTypeX509Certificates sbKeyType = "X509Certificates"
// SBKeyTypeSignedByX509CAs refers to keys signed by one of the X.509 CAs
// FIXME: PEM, DER?
SBKeyTypeSignedByX509CAs sbKeyType = "signedByX509CAs"
)
// prSignedBaseLayer is a PolicyRequirement with type = prSignedBaseLayer: the image has a specified, correctly signed, base image.
type prSignedBaseLayer struct {
prCommon
// BaseLayerIdentity specifies the base image to look for. "match-exact" is rejected, "match-repository" is unlikely to be useful.
BaseLayerIdentity PolicyReferenceMatch `json:"baseLayerIdentity"`
}
// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement.
// The type is public, but its implementation is private.
// prmCommon is the common type field in a JSON encoding of PolicyReferenceMatch.
type prmCommon struct {
Type prmTypeIdentifier `json:"type"`
}
// prmTypeIdentifier is string designating a kind of a PolicyReferenceMatch.
type prmTypeIdentifier string
const (
prmTypeMatchExact prmTypeIdentifier = "matchExact"
prmTypeMatchRepository prmTypeIdentifier = "matchRepository"
prmTypeExactReference prmTypeIdentifier = "exactReference"
prmTypeExactRepository prmTypeIdentifier = "exactRepository"
)
// prmMatchExact is a PolicyReferenceMatch with type = prmMatchExact: the two references must match exactly.
type prmMatchExact struct {
prmCommon
}
// prmMatchRepository is a PolicyReferenceMatch with type = prmMatchRepository: the two references must use the same repository, may differ in the tag.
type prmMatchRepository struct {
prmCommon
}
// prmExactReference is a PolicyReferenceMatch with type = prmExactReference: matches a specified reference exactly.
type prmExactReference struct {
prmCommon
DockerReference string `json:"dockerReference"`
}
// prmExactRepository is a PolicyReferenceMatch with type = prmExactRepository: matches a specified repository, with any tag.
type prmExactRepository struct {
prmCommon
DockerRepository string `json:"dockerRepository"`
}

View file

@ -0,0 +1,191 @@
// Note: Consider the API unstable until the code supports at least three different image formats or transports.
package signature
import (
"encoding/json"
"errors"
"fmt"
"time"
"github.com/containers/image/version"
)
const (
signatureType = "atomic container signature"
)
// InvalidSignatureError is returned when parsing an invalid signature.
type InvalidSignatureError struct {
msg string
}
func (err InvalidSignatureError) Error() string {
return err.msg
}
// Signature is a parsed content of a signature.
type Signature struct {
DockerManifestDigest string // FIXME: more precise type?
DockerReference string // FIXME: more precise type?
}
// Wrap signature to add to it some methods which we don't want to make public.
type privateSignature struct {
Signature
}
// Compile-time check that privateSignature implements json.Marshaler
var _ json.Marshaler = (*privateSignature)(nil)
// MarshalJSON implements the json.Marshaler interface.
func (s privateSignature) MarshalJSON() ([]byte, error) {
return s.marshalJSONWithVariables(time.Now().UTC().Unix(), "atomic "+version.Version)
}
// Implementation of MarshalJSON, with a caller-chosen values of the variable items to help testing.
func (s privateSignature) marshalJSONWithVariables(timestamp int64, creatorID string) ([]byte, error) {
if s.DockerManifestDigest == "" || s.DockerReference == "" {
return nil, errors.New("Unexpected empty signature content")
}
critical := map[string]interface{}{
"type": signatureType,
"image": map[string]string{"docker-manifest-digest": s.DockerManifestDigest},
"identity": map[string]string{"docker-reference": s.DockerReference},
}
optional := map[string]interface{}{
"creator": creatorID,
"timestamp": timestamp,
}
signature := map[string]interface{}{
"critical": critical,
"optional": optional,
}
return json.Marshal(signature)
}
// Compile-time check that privateSignature implements json.Unmarshaler
var _ json.Unmarshaler = (*privateSignature)(nil)
// UnmarshalJSON implements the json.Unmarshaler interface
func (s *privateSignature) UnmarshalJSON(data []byte) error {
err := s.strictUnmarshalJSON(data)
if err != nil {
if _, ok := err.(jsonFormatError); ok {
err = InvalidSignatureError{msg: err.Error()}
}
}
return err
}
// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal jsonFormatError error type.
// Splitting it into a separate function allows us to do the jsonFormatError → InvalidSignatureError in a single place, the caller.
func (s *privateSignature) strictUnmarshalJSON(data []byte) error {
var untyped interface{}
if err := json.Unmarshal(data, &untyped); err != nil {
return err
}
o, ok := untyped.(map[string]interface{})
if !ok {
return InvalidSignatureError{msg: "Invalid signature format"}
}
if err := validateExactMapKeys(o, "critical", "optional"); err != nil {
return err
}
c, err := mapField(o, "critical")
if err != nil {
return err
}
if err := validateExactMapKeys(c, "type", "image", "identity"); err != nil {
return err
}
optional, err := mapField(o, "optional")
if err != nil {
return err
}
_ = optional // We don't use anything from here for now.
t, err := stringField(c, "type")
if err != nil {
return err
}
if t != signatureType {
return InvalidSignatureError{msg: fmt.Sprintf("Unrecognized signature type %s", t)}
}
image, err := mapField(c, "image")
if err != nil {
return err
}
if err := validateExactMapKeys(image, "docker-manifest-digest"); err != nil {
return err
}
digest, err := stringField(image, "docker-manifest-digest")
if err != nil {
return err
}
s.DockerManifestDigest = digest
identity, err := mapField(c, "identity")
if err != nil {
return err
}
if err := validateExactMapKeys(identity, "docker-reference"); err != nil {
return err
}
reference, err := stringField(identity, "docker-reference")
if err != nil {
return err
}
s.DockerReference = reference
return nil
}
// Sign formats the signature and returns a blob signed using mech and keyIdentity
func (s privateSignature) sign(mech SigningMechanism, keyIdentity string) ([]byte, error) {
json, err := json.Marshal(s)
if err != nil {
return nil, err
}
return mech.Sign(json, keyIdentity)
}
// signatureAcceptanceRules specifies how to decide whether an untrusted signature is acceptable.
// We centralize the actual parsing and data extraction in verifyAndExtractSignature; this supplies
// the policy. We use an object instead of supplying func parameters to verifyAndExtractSignature
// because all of the functions have the same type, so there is a risk of exchanging the functions;
// named members of this struct are more explicit.
type signatureAcceptanceRules struct {
validateKeyIdentity func(string) error
validateSignedDockerReference func(string) error
validateSignedDockerManifestDigest func(string) error
}
// verifyAndExtractSignature verifies that unverifiedSignature has been signed, and that its principial components
// match expected values, both as specified by rules, and returns it
func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte, rules signatureAcceptanceRules) (*Signature, error) {
signed, keyIdentity, err := mech.Verify(unverifiedSignature)
if err != nil {
return nil, err
}
if err := rules.validateKeyIdentity(keyIdentity); err != nil {
return nil, err
}
var unmatchedSignature privateSignature
if err := json.Unmarshal(signed, &unmatchedSignature); err != nil {
return nil, InvalidSignatureError{msg: err.Error()}
}
if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.DockerManifestDigest); err != nil {
return nil, err
}
if err := rules.validateSignedDockerReference(unmatchedSignature.DockerReference); err != nil {
return nil, err
}
signature := unmatchedSignature.Signature // Policy OK.
return &signature, nil
}

View file

@ -0,0 +1,288 @@
package signature
import (
"encoding/json"
"fmt"
"io/ioutil"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestInvalidSignatureError(t *testing.T) {
// A stupid test just to keep code coverage
s := "test"
err := InvalidSignatureError{msg: s}
assert.Equal(t, s, err.Error())
}
func TestMarshalJSON(t *testing.T) {
// Empty string values
s := privateSignature{Signature{DockerManifestDigest: "", DockerReference: "_"}}
_, err := s.MarshalJSON()
assert.Error(t, err)
s = privateSignature{Signature{DockerManifestDigest: "_", DockerReference: ""}}
_, err = s.MarshalJSON()
assert.Error(t, err)
// Success
s = privateSignature{Signature{DockerManifestDigest: "digest!@#", DockerReference: "reference#@!"}}
marshaled, err := s.marshalJSONWithVariables(0, "CREATOR")
require.NoError(t, err)
assert.Equal(t, []byte("{\"critical\":{\"identity\":{\"docker-reference\":\"reference#@!\"},\"image\":{\"docker-manifest-digest\":\"digest!@#\"},\"type\":\"atomic container signature\"},\"optional\":{\"creator\":\"CREATOR\",\"timestamp\":0}}"),
marshaled)
// We can't test MarshalJSON directly because the timestamp will keep changing, so just test that
// it doesn't fail. And call it through the JSON package for a good measure.
_, err = json.Marshal(s)
assert.NoError(t, err)
}
// Return the result of modifying validJSON with fn and unmarshaling it into *sig
func tryUnmarshalModifiedSignature(t *testing.T, sig *privateSignature, validJSON []byte, modifyFn func(mSI)) error {
var tmp mSI
err := json.Unmarshal(validJSON, &tmp)
require.NoError(t, err)
modifyFn(tmp)
testJSON, err := json.Marshal(tmp)
require.NoError(t, err)
*sig = privateSignature{}
return json.Unmarshal(testJSON, sig)
}
func TestUnmarshalJSON(t *testing.T) {
var s privateSignature
// Invalid input. Note that json.Unmarshal is guaranteed to validate input before calling our
// UnmarshalJSON implementation; so test that first, then test our error handling for completeness.
err := json.Unmarshal([]byte("&"), &s)
assert.Error(t, err)
err = s.UnmarshalJSON([]byte("&"))
assert.Error(t, err)
// Not an object
err = json.Unmarshal([]byte("1"), &s)
assert.Error(t, err)
// Start with a valid JSON.
validSig := privateSignature{
Signature{
DockerManifestDigest: "digest!@#",
DockerReference: "reference#@!",
},
}
validJSON, err := validSig.MarshalJSON()
require.NoError(t, err)
// Success
s = privateSignature{}
err = json.Unmarshal(validJSON, &s)
require.NoError(t, err)
assert.Equal(t, validSig, s)
// Various ways to corrupt the JSON
breakFns := []func(mSI){
// A top-level field is missing
func(v mSI) { delete(v, "critical") },
func(v mSI) { delete(v, "optional") },
// Extra top-level sub-object
func(v mSI) { v["unexpected"] = 1 },
// "critical" not an object
func(v mSI) { v["critical"] = 1 },
// "optional" not an object
func(v mSI) { v["optional"] = 1 },
// A field of "critical" is missing
func(v mSI) { delete(x(v, "critical"), "type") },
func(v mSI) { delete(x(v, "critical"), "image") },
func(v mSI) { delete(x(v, "critical"), "identity") },
// Extra field of "critical"
func(v mSI) { x(v, "critical")["unexpected"] = 1 },
// Invalid "type"
func(v mSI) { x(v, "critical")["type"] = 1 },
func(v mSI) { x(v, "critical")["type"] = "unexpected" },
// Invalid "image" object
func(v mSI) { x(v, "critical")["image"] = 1 },
func(v mSI) { delete(x(v, "critical", "image"), "docker-manifest-digest") },
func(v mSI) { x(v, "critical", "image")["unexpected"] = 1 },
// Invalid "docker-manifest-digest"
func(v mSI) { x(v, "critical", "image")["docker-manifest-digest"] = 1 },
// Invalid "identity" object
func(v mSI) { x(v, "critical")["identity"] = 1 },
func(v mSI) { delete(x(v, "critical", "identity"), "docker-reference") },
func(v mSI) { x(v, "critical", "identity")["unexpected"] = 1 },
// Invalid "docker-reference"
func(v mSI) { x(v, "critical", "identity")["docker-reference"] = 1 },
}
for _, fn := range breakFns {
err = tryUnmarshalModifiedSignature(t, &s, validJSON, fn)
assert.Error(t, err)
}
// Modifications to "optional" are allowed and ignored
allowedModificationFns := []func(mSI){
// Add an optional field
func(v mSI) { x(v, "optional")["unexpected"] = 1 },
// Delete an optional field
func(v mSI) { delete(x(v, "optional"), "creator") },
}
for _, fn := range allowedModificationFns {
err = tryUnmarshalModifiedSignature(t, &s, validJSON, fn)
require.NoError(t, err)
assert.Equal(t, validSig, s)
}
}
func TestSign(t *testing.T) {
mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
require.NoError(t, err)
sig := privateSignature{
Signature{
DockerManifestDigest: "digest!@#",
DockerReference: "reference#@!",
},
}
// Successful signing
signature, err := sig.sign(mech, TestKeyFingerprint)
require.NoError(t, err)
verified, err := verifyAndExtractSignature(mech, signature, signatureAcceptanceRules{
validateKeyIdentity: func(keyIdentity string) error {
if keyIdentity != TestKeyFingerprint {
return fmt.Errorf("Unexpected keyIdentity")
}
return nil
},
validateSignedDockerReference: func(signedDockerReference string) error {
if signedDockerReference != sig.DockerReference {
return fmt.Errorf("Unexpected signedDockerReference")
}
return nil
},
validateSignedDockerManifestDigest: func(signedDockerManifestDigest string) error {
if signedDockerManifestDigest != sig.DockerManifestDigest {
return fmt.Errorf("Unexpected signedDockerManifestDigest")
}
return nil
},
})
require.NoError(t, err)
assert.Equal(t, sig.Signature, *verified)
// Error creating blob to sign
_, err = privateSignature{}.sign(mech, TestKeyFingerprint)
assert.Error(t, err)
// Error signing
_, err = sig.sign(mech, "this fingerprint doesn't exist")
assert.Error(t, err)
}
func TestVerifyAndExtractSignature(t *testing.T) {
mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory)
require.NoError(t, err)
type triple struct{ keyIdentity, signedDockerReference, signedDockerManifestDigest string }
var wanted, recorded triple
// recordingRules are a plausible signatureAcceptanceRules implementations, but equally
// importantly record that we are passing the correct values to the rule callbacks.
recordingRules := signatureAcceptanceRules{
validateKeyIdentity: func(keyIdentity string) error {
recorded.keyIdentity = keyIdentity
if keyIdentity != wanted.keyIdentity {
return fmt.Errorf("keyIdentity mismatch")
}
return nil
},
validateSignedDockerReference: func(signedDockerReference string) error {
recorded.signedDockerReference = signedDockerReference
if signedDockerReference != wanted.signedDockerReference {
return fmt.Errorf("signedDockerReference mismatch")
}
return nil
},
validateSignedDockerManifestDigest: func(signedDockerManifestDigest string) error {
recorded.signedDockerManifestDigest = signedDockerManifestDigest
if signedDockerManifestDigest != wanted.signedDockerManifestDigest {
return fmt.Errorf("signedDockerManifestDigest mismatch")
}
return nil
},
}
signature, err := ioutil.ReadFile("./fixtures/image.signature")
require.NoError(t, err)
signatureData := triple{
keyIdentity: TestKeyFingerprint,
signedDockerReference: TestImageSignatureReference,
signedDockerManifestDigest: TestImageManifestDigest,
}
// Successful verification
wanted = signatureData
recorded = triple{}
sig, err := verifyAndExtractSignature(mech, signature, recordingRules)
require.NoError(t, err)
assert.Equal(t, TestImageSignatureReference, sig.DockerReference)
assert.Equal(t, TestImageManifestDigest, sig.DockerManifestDigest)
assert.Equal(t, signatureData, recorded)
// For extra paranoia, test that we return a nil signature object on error.
// Completely invalid signature.
recorded = triple{}
sig, err = verifyAndExtractSignature(mech, []byte{}, recordingRules)
assert.Error(t, err)
assert.Nil(t, sig)
assert.Equal(t, triple{}, recorded)
recorded = triple{}
sig, err = verifyAndExtractSignature(mech, []byte("invalid signature"), recordingRules)
assert.Error(t, err)
assert.Nil(t, sig)
assert.Equal(t, triple{}, recorded)
// Valid signature of non-JSON: asked for keyIdentity, only
invalidBlobSignature, err := ioutil.ReadFile("./fixtures/invalid-blob.signature")
require.NoError(t, err)
recorded = triple{}
sig, err = verifyAndExtractSignature(mech, invalidBlobSignature, recordingRules)
assert.Error(t, err)
assert.Nil(t, sig)
assert.Equal(t, triple{keyIdentity: signatureData.keyIdentity}, recorded)
// Valid signature with a wrong key: asked for keyIdentity, only
wanted = signatureData
wanted.keyIdentity = "unexpected fingerprint"
recorded = triple{}
sig, err = verifyAndExtractSignature(mech, signature, recordingRules)
assert.Error(t, err)
assert.Nil(t, sig)
assert.Equal(t, triple{keyIdentity: signatureData.keyIdentity}, recorded)
// Valid signature with a wrong manifest digest: asked for keyIdentity and signedDockerManifestDigest
wanted = signatureData
wanted.signedDockerManifestDigest = "invalid digest"
recorded = triple{}
sig, err = verifyAndExtractSignature(mech, signature, recordingRules)
assert.Error(t, err)
assert.Nil(t, sig)
assert.Equal(t, triple{
keyIdentity: signatureData.keyIdentity,
signedDockerManifestDigest: signatureData.signedDockerManifestDigest,
}, recorded)
// Valid signature with a wrong image reference
wanted = signatureData
wanted.signedDockerReference = "unexpected docker reference"
recorded = triple{}
sig, err = verifyAndExtractSignature(mech, signature, recordingRules)
assert.Error(t, err)
assert.Nil(t, sig)
assert.Equal(t, signatureData, recorded)
}