diff --git a/api/v3/clairpb/clair.pb.go b/api/v3/clairpb/clair.pb.go index 9796a51f..c537ab23 100644 --- a/api/v3/clairpb/clair.pb.go +++ b/api/v3/clairpb/clair.pb.go @@ -297,8 +297,10 @@ func (m *GetAncestryResponse) GetStatus() *ClairStatus { } type GetAncestryResponse_AncestryLayer struct { - Layer *Layer `protobuf:"bytes,1,opt,name=layer" json:"layer,omitempty"` - DetectedFeatures []*Feature `protobuf:"bytes,2,rep,name=detectedFeatures" json:"detectedFeatures,omitempty"` + // The layer's information. + Layer *Layer `protobuf:"bytes,1,opt,name=layer" json:"layer,omitempty"` + // The features detected in this layer. + DetectedFeatures []*Feature `protobuf:"bytes,2,rep,name=detected_features,json=detectedFeatures" json:"detected_features,omitempty"` } func (m *GetAncestryResponse_AncestryLayer) Reset() { *m = GetAncestryResponse_AncestryLayer{} } @@ -373,7 +375,7 @@ type PostAncestryRequest struct { // The format of the image being uploaded. Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` // The layers to be scanned for this Ancestry, ordered in the way that i th - // layer is the i + 1 th layer's parent. + // layer is the parent of i + 1 th layer. Layers []*PostAncestryRequest_PostLayer `protobuf:"bytes,3,rep,name=layers" json:"layers,omitempty"` } @@ -1023,85 +1025,85 @@ var _StatusService_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("api/v3/clairpb/clair.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1269 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0x4b, 0x6f, 0x1b, 0xd5, - 0x17, 0xd7, 0xd8, 0x71, 0x6c, 0x1f, 0xdb, 0x49, 0x7a, 0xed, 0xa6, 0x93, 0x49, 0x1f, 0xc9, 0xfc, - 0xff, 0x55, 0x4b, 0x8b, 0x6c, 0xd5, 0x65, 0x51, 0xca, 0x02, 0xa5, 0x8f, 0x84, 0x4a, 0xa5, 0xaa, - 0xa6, 0x90, 0x05, 0x08, 0x59, 0xd7, 0x33, 0xc7, 0xc9, 0x28, 0xe3, 0x19, 0x33, 0xf7, 0xda, 0xa9, - 0x55, 0x75, 0xc3, 0x96, 0x15, 0xb0, 0xe0, 0x33, 0xb0, 0xe1, 0x1b, 0xb0, 0x62, 0x8b, 0x84, 0x04, - 0x5b, 0xd8, 0xb1, 0xe0, 0x0b, 0xb0, 0x47, 0xf7, 0x31, 0x93, 0x19, 0xc7, 0x79, 0xb4, 0xac, 0x3c, - 0xe7, 0xfd, 0xfa, 0xdd, 0x73, 0x12, 0xb0, 0xe8, 0xc8, 0xef, 0x4c, 0xee, 0x76, 0xdc, 0x80, 0xfa, - 0xf1, 0xa8, 0xaf, 0x7e, 0xdb, 0xa3, 0x38, 0xe2, 0x11, 0xa9, 0xbb, 0x51, 0x8c, 0x11, 0x6b, 0x4b, - 0x9e, 0x75, 0x6d, 0x2f, 0x8a, 0xf6, 0x02, 0xec, 0x48, 0x59, 0x7f, 0x3c, 0xe8, 0x70, 0x7f, 0x88, - 0x8c, 0xd3, 0xe1, 0x48, 0xa9, 0x5b, 0x97, 0xb5, 0x82, 0xf0, 0x48, 0xc3, 0x30, 0xe2, 0x94, 0xfb, - 0x51, 0xc8, 0x94, 0xd4, 0xfe, 0xbe, 0x00, 0x8d, 0xdd, 0x71, 0x10, 0x62, 0x4c, 0xfb, 0x7e, 0xe0, - 0xf3, 0x29, 0x21, 0xb0, 0x10, 0xd2, 0x21, 0x9a, 0xc6, 0x86, 0x71, 0xb3, 0xea, 0xc8, 0x6f, 0x72, - 0x1d, 0x96, 0xc4, 0x2f, 0x1b, 0x51, 0x17, 0x7b, 0x52, 0x5a, 0x90, 0xd2, 0x46, 0xca, 0x7d, 0x26, - 0xd4, 0x36, 0xa0, 0xe6, 0x21, 0x73, 0x63, 0x7f, 0x24, 0x42, 0x98, 0x45, 0xa9, 0x93, 0x65, 0x09, - 0xe7, 0x81, 0x1f, 0x1e, 0x98, 0x0b, 0xca, 0xb9, 0xf8, 0x26, 0x16, 0x54, 0x18, 0x4e, 0x30, 0xf6, - 0xf9, 0xd4, 0x2c, 0x49, 0x7e, 0x4a, 0x0b, 0xd9, 0x10, 0x39, 0xf5, 0x28, 0xa7, 0xe6, 0xa2, 0x92, - 0x25, 0x34, 0x59, 0x83, 0xca, 0xc0, 0x7f, 0x89, 0x5e, 0xaf, 0x3f, 0x35, 0xcb, 0x52, 0x56, 0x96, - 0xf4, 0x83, 0x29, 0x79, 0x00, 0x17, 0xe8, 0x60, 0x80, 0x2e, 0x47, 0xaf, 0x37, 0xc1, 0x98, 0x89, - 0x82, 0xcd, 0xca, 0x46, 0xf1, 0x66, 0xad, 0x7b, 0xb1, 0x9d, 0x6d, 0x5f, 0x7b, 0x1b, 0x29, 0x1f, - 0xc7, 0xe8, 0xac, 0x24, 0xfa, 0xbb, 0x5a, 0xdd, 0xfe, 0xc5, 0x80, 0xb2, 0x96, 0xfe, 0x97, 0x9e, - 0x98, 0x50, 0xd6, 0x19, 0xe8, 0x7e, 0x24, 0xa4, 0x70, 0xa0, 0x3f, 0x7b, 0x83, 0x28, 0x1e, 0x52, - 0xae, 0xbb, 0xd2, 0xd0, 0xdc, 0x6d, 0xc9, 0x24, 0x8f, 0x61, 0x79, 0x92, 0x19, 0x90, 0x8f, 0xcc, - 0x2c, 0xc9, 0x4a, 0xd6, 0xf3, 0x95, 0xe4, 0xa6, 0xe8, 0xcc, 0xda, 0xd8, 0xeb, 0x50, 0x7a, 0x4a, - 0xa7, 0x18, 0x8b, 0x5a, 0xf6, 0x29, 0xdb, 0x4f, 0x6a, 0x11, 0xdf, 0xf6, 0xd7, 0x06, 0xd4, 0x1e, - 0x0a, 0x2f, 0x2f, 0x38, 0xe5, 0x63, 0x26, 0x92, 0x0e, 0x7c, 0xc6, 0x31, 0x66, 0xa6, 0xb1, 0x51, - 0x14, 0x49, 0x6b, 0x92, 0x5c, 0x86, 0xaa, 0x87, 0x1c, 0x5d, 0x1e, 0xc5, 0xcc, 0x2c, 0x48, 0xd9, - 0x11, 0x83, 0x3c, 0x82, 0x95, 0x80, 0x32, 0xde, 0x1b, 0x8f, 0x3c, 0xca, 0xb1, 0x27, 0xa0, 0x28, - 0xab, 0xae, 0x75, 0xad, 0xb6, 0x82, 0x61, 0x3b, 0xc1, 0x69, 0xfb, 0x93, 0x04, 0xa7, 0xce, 0x92, - 0xb0, 0xf9, 0x54, 0x9a, 0x08, 0xa6, 0xfd, 0x8d, 0x01, 0x64, 0x07, 0xf9, 0x56, 0xe8, 0x22, 0xe3, - 0xf1, 0xd4, 0xc1, 0x2f, 0xc7, 0xc8, 0x38, 0xf9, 0x1f, 0x34, 0xa8, 0x66, 0xf5, 0x32, 0xd3, 0xa8, - 0x27, 0x4c, 0xd9, 0xee, 0x3b, 0xd0, 0x3a, 0xf4, 0xf9, 0x7e, 0x6f, 0xb6, 0x65, 0x62, 0x36, 0x15, - 0xa7, 0x29, 0x64, 0xbb, 0x79, 0x91, 0xf0, 0x2b, 0x4d, 0x06, 0x6a, 0xd8, 0x4c, 0x66, 0x5c, 0x71, - 0xea, 0x82, 0xa9, 0x01, 0xc0, 0xec, 0x5f, 0x8b, 0xd0, 0xcc, 0xe5, 0xc4, 0x46, 0x51, 0xc8, 0x90, - 0x6c, 0x43, 0x25, 0x89, 0x2f, 0xf3, 0xa9, 0x75, 0x6f, 0xe5, 0xc7, 0x32, 0xc7, 0xa8, 0x9d, 0x32, - 0x52, 0x5b, 0x72, 0x07, 0x16, 0x99, 0xec, 0xbd, 0xcc, 0xb4, 0xd6, 0x5d, 0xcb, 0x7b, 0xc9, 0x0c, - 0xc7, 0xd1, 0x8a, 0xd6, 0x6b, 0x68, 0x24, 0x8e, 0xd4, 0x64, 0xdf, 0x81, 0x52, 0x20, 0x3e, 0x74, - 0x22, 0xcd, 0xbc, 0x0b, 0xa9, 0xe3, 0x28, 0x0d, 0xb2, 0x05, 0x2b, 0x6a, 0x6a, 0xe8, 0x25, 0x25, - 0xca, 0x69, 0x9e, 0xfc, 0x3e, 0x66, 0xd5, 0xad, 0x9f, 0x0c, 0xa8, 0x24, 0xf1, 0xe7, 0x3e, 0x90, - 0x1b, 0xb0, 0xcc, 0x5c, 0x1a, 0x86, 0xe8, 0xf5, 0x12, 0x30, 0x2d, 0x48, 0xc0, 0x2c, 0x69, 0xf6, - 0x53, 0x8d, 0xa9, 0xdb, 0x70, 0x21, 0x51, 0x3c, 0xc2, 0x56, 0x49, 0xaa, 0xae, 0x68, 0xc1, 0xa3, - 0x14, 0x62, 0x3b, 0xb0, 0x28, 0x4b, 0x60, 0xe6, 0xa2, 0xcc, 0xb7, 0x73, 0xfe, 0x76, 0xab, 0x0e, - 0x68, 0x73, 0xfb, 0xcf, 0x02, 0x34, 0x9f, 0x47, 0xec, 0xed, 0x60, 0xb6, 0x0a, 0x8b, 0xfa, 0xcd, - 0xaa, 0x47, 0xaf, 0x29, 0xf2, 0x30, 0xcd, 0xae, 0x28, 0xb3, 0xbb, 0x9d, 0xcf, 0x6e, 0x4e, 0x3c, - 0xc9, 0xcb, 0x65, 0x66, 0xfd, 0x6c, 0x40, 0x35, 0xe5, 0xce, 0x7b, 0xaf, 0x82, 0x37, 0xa2, 0x7c, - 0x5f, 0x07, 0x97, 0xdf, 0xc4, 0x81, 0xf2, 0x3e, 0x52, 0xef, 0x28, 0xf6, 0xbd, 0x37, 0x88, 0xdd, - 0xfe, 0x48, 0x99, 0x3e, 0x0e, 0x85, 0x34, 0x71, 0x64, 0xdd, 0x87, 0x7a, 0x56, 0x40, 0x56, 0xa0, - 0x78, 0x80, 0x53, 0x9d, 0x8a, 0xf8, 0x24, 0x2d, 0x28, 0x4d, 0x68, 0x30, 0x4e, 0x96, 0x9f, 0x22, - 0xee, 0x17, 0xee, 0x19, 0xf6, 0x13, 0x68, 0xe5, 0x43, 0xea, 0x17, 0x73, 0x84, 0x74, 0xe3, 0x9c, - 0x48, 0xb7, 0x7f, 0x34, 0x60, 0x75, 0x07, 0xf9, 0xb3, 0x88, 0xfb, 0x03, 0xdf, 0x95, 0xf7, 0x2b, - 0x99, 0xd6, 0x7b, 0xb0, 0x1a, 0x05, 0x5e, 0xee, 0xb9, 0x4f, 0x7b, 0x23, 0xba, 0x97, 0x8c, 0xad, - 0x15, 0x05, 0x5e, 0x6e, 0x33, 0x3e, 0xa7, 0x7b, 0x28, 0xac, 0x42, 0x3c, 0x9c, 0x67, 0xa5, 0xca, - 0x68, 0x85, 0x78, 0x78, 0xdc, 0xaa, 0x05, 0xa5, 0xc0, 0x1f, 0xfa, 0x5c, 0x2e, 0x88, 0x92, 0xa3, - 0x88, 0x14, 0xfa, 0x0b, 0x47, 0xd0, 0xb7, 0xff, 0x28, 0xc0, 0xa5, 0x63, 0x09, 0xeb, 0xfa, 0x77, - 0xa1, 0x1e, 0x66, 0xf8, 0xba, 0x0b, 0xdd, 0x63, 0x30, 0x9e, 0x67, 0xdc, 0xce, 0x31, 0x73, 0x7e, - 0xac, 0xbf, 0x0d, 0xa8, 0x67, 0xc5, 0x73, 0xdf, 0xa4, 0x09, 0x65, 0x37, 0x46, 0xca, 0xd1, 0xd3, - 0x95, 0x26, 0xa4, 0xb8, 0xb4, 0xca, 0x1d, 0x7a, 0xfa, 0x50, 0xa5, 0xb4, 0xb0, 0xf2, 0x30, 0x40, - 0x61, 0xa5, 0xaa, 0x4c, 0x48, 0xf2, 0x3e, 0x14, 0xa3, 0xc0, 0x93, 0x67, 0xbb, 0xd6, 0xbd, 0x31, - 0x03, 0x38, 0xba, 0x87, 0x69, 0xef, 0x03, 0xd4, 0x40, 0xf0, 0x91, 0x39, 0xc2, 0x46, 0x98, 0x86, - 0x78, 0x28, 0xaf, 0xfa, 0x9b, 0x98, 0x86, 0x78, 0x68, 0xff, 0x56, 0x80, 0xb5, 0x13, 0x55, 0xc8, - 0x26, 0xd4, 0xdd, 0x71, 0x1c, 0x63, 0xc8, 0xb3, 0x40, 0xa8, 0x69, 0x9e, 0x9c, 0xe4, 0x3a, 0x54, - 0x43, 0x7c, 0xc9, 0xb3, 0x23, 0xaf, 0x08, 0xc6, 0x29, 0x63, 0xde, 0x82, 0x46, 0x0e, 0x2e, 0xb2, - 0x13, 0x67, 0x1c, 0xe1, 0xbc, 0x05, 0xf9, 0x1c, 0x80, 0xa6, 0x69, 0xea, 0x23, 0xfe, 0xc1, 0x39, - 0x0b, 0x6f, 0x3f, 0x09, 0x3d, 0x7c, 0x89, 0xde, 0x56, 0x66, 0x0b, 0x39, 0x19, 0x77, 0xd6, 0x87, - 0xd0, 0x9c, 0xa3, 0x22, 0x8a, 0xf1, 0x05, 0x5b, 0x76, 0xa1, 0xe4, 0x28, 0x22, 0x85, 0x46, 0x21, - 0x83, 0xd9, 0xbb, 0x70, 0xe5, 0x63, 0x1a, 0x1f, 0x64, 0x21, 0xb4, 0xc5, 0x1c, 0xa4, 0x5e, 0xf2, - 0xd4, 0xe6, 0xe0, 0xc9, 0xde, 0x80, 0xab, 0x27, 0x19, 0x29, 0xc4, 0xda, 0x04, 0x56, 0x76, 0x90, - 0xeb, 0x07, 0xad, 0x3c, 0xd9, 0xdb, 0x70, 0x21, 0xc3, 0x7b, 0xeb, 0xbd, 0xd0, 0xfd, 0xc7, 0x80, - 0xe5, 0xa4, 0xda, 0x17, 0x18, 0x4f, 0x7c, 0x17, 0xc9, 0x18, 0x6a, 0x99, 0x1b, 0x40, 0x36, 0x4e, - 0x39, 0x0f, 0x32, 0x19, 0x6b, 0xf3, 0xcc, 0x03, 0x62, 0x6f, 0x7e, 0xf5, 0xfb, 0x5f, 0xdf, 0x15, - 0xd6, 0xc9, 0x5a, 0x27, 0x39, 0x02, 0x9d, 0x57, 0xb9, 0x1b, 0xf1, 0x9a, 0x1c, 0x40, 0x3d, 0xbb, - 0xed, 0xc8, 0xe6, 0x99, 0xcb, 0xd7, 0xb2, 0x4f, 0x53, 0xd1, 0x91, 0x5b, 0x32, 0xf2, 0x92, 0x5d, - 0x4d, 0x23, 0xdf, 0x37, 0x6e, 0x75, 0x7f, 0x28, 0x40, 0x33, 0xdb, 0xf2, 0xa4, 0xf6, 0xd7, 0xb0, - 0x3c, 0xb3, 0x38, 0xc8, 0xff, 0xcf, 0xd8, 0x2b, 0x2a, 0x95, 0xeb, 0xe7, 0xda, 0x3e, 0xf6, 0x15, - 0x99, 0xcd, 0x25, 0x72, 0xb1, 0x93, 0xdd, 0x3c, 0xac, 0xf3, 0x4a, 0xf5, 0xe0, 0x5b, 0x03, 0x56, - 0xe7, 0xa3, 0x81, 0xcc, 0xdc, 0xc1, 0x53, 0x81, 0x66, 0xbd, 0x7b, 0x3e, 0xe5, 0x7c, 0x52, 0xb7, - 0xe6, 0x27, 0xd5, 0x0d, 0xa1, 0xa1, 0x50, 0x93, 0x34, 0xe9, 0x0b, 0xa8, 0xa6, 0xe0, 0x23, 0x57, - 0x8f, 0x15, 0x9e, 0x43, 0xaa, 0x75, 0xed, 0x44, 0xb9, 0x8e, 0xbe, 0x2c, 0xa3, 0x57, 0x49, 0xb9, - 0xa3, 0x30, 0xf9, 0xe0, 0x2a, 0x34, 0xdd, 0x68, 0x98, 0x37, 0x1b, 0xf5, 0x3f, 0x2b, 0xeb, 0xff, - 0xe4, 0xfa, 0x8b, 0xf2, 0x0f, 0xe0, 0xbb, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x9d, 0x0c, 0x7d, - 0x93, 0xe2, 0x0d, 0x00, 0x00, + // 1268 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0x4b, 0x6f, 0x1b, 0x55, + 0x14, 0xd6, 0x38, 0x71, 0x6c, 0x1f, 0xdb, 0x49, 0x7a, 0xed, 0xa6, 0x93, 0x49, 0x1f, 0xc9, 0x40, + 0xd5, 0xd2, 0x22, 0x5b, 0x75, 0x59, 0x94, 0xb2, 0x40, 0xe9, 0x23, 0xa1, 0x52, 0xa9, 0xaa, 0x29, + 0x64, 0x01, 0x42, 0xd6, 0xcd, 0xcc, 0x71, 0x32, 0xca, 0x78, 0xc6, 0xcc, 0xbd, 0x4e, 0x6a, 0x55, + 0x65, 0xc1, 0x96, 0x15, 0xb0, 0xe0, 0x37, 0xb0, 0xe1, 0x1f, 0xb0, 0x62, 0xcb, 0x02, 0xc1, 0x16, + 0x76, 0x2c, 0xf8, 0x03, 0xec, 0xd1, 0x7d, 0x4d, 0x66, 0x12, 0x37, 0x49, 0xcb, 0xca, 0x73, 0xde, + 0xaf, 0xef, 0x9e, 0x93, 0x80, 0x43, 0x47, 0x61, 0x77, 0xff, 0x76, 0xd7, 0x8f, 0x68, 0x98, 0x8e, + 0xb6, 0xd5, 0x6f, 0x67, 0x94, 0x26, 0x3c, 0x21, 0x0d, 0x3f, 0x49, 0x31, 0x61, 0x1d, 0xc9, 0x73, + 0xae, 0xec, 0x24, 0xc9, 0x4e, 0x84, 0x5d, 0x29, 0xdb, 0x1e, 0x0f, 0xba, 0x3c, 0x1c, 0x22, 0xe3, + 0x74, 0x38, 0x52, 0xea, 0xce, 0x45, 0xad, 0x20, 0x3c, 0xd2, 0x38, 0x4e, 0x38, 0xe5, 0x61, 0x12, + 0x33, 0x25, 0x75, 0x7f, 0x28, 0x41, 0x73, 0x6b, 0x1c, 0xc5, 0x98, 0xd2, 0xed, 0x30, 0x0a, 0xf9, + 0x84, 0x10, 0x98, 0x8d, 0xe9, 0x10, 0x6d, 0x6b, 0xd5, 0xba, 0x5e, 0xf3, 0xe4, 0x37, 0xb9, 0x0a, + 0xf3, 0xe2, 0x97, 0x8d, 0xa8, 0x8f, 0x7d, 0x29, 0x2d, 0x49, 0x69, 0x33, 0xe3, 0x3e, 0x11, 0x6a, + 0xab, 0x50, 0x0f, 0x90, 0xf9, 0x69, 0x38, 0x12, 0x21, 0xec, 0x19, 0xa9, 0x93, 0x67, 0x09, 0xe7, + 0x51, 0x18, 0xef, 0xd9, 0xb3, 0xca, 0xb9, 0xf8, 0x26, 0x0e, 0x54, 0x19, 0xee, 0x63, 0x1a, 0xf2, + 0x89, 0x5d, 0x96, 0xfc, 0x8c, 0x16, 0xb2, 0x21, 0x72, 0x1a, 0x50, 0x4e, 0xed, 0x39, 0x25, 0x33, + 0x34, 0x59, 0x86, 0xea, 0x20, 0x7c, 0x8e, 0x41, 0x7f, 0x7b, 0x62, 0x57, 0xa4, 0xac, 0x22, 0xe9, + 0x7b, 0x13, 0x72, 0x0f, 0xce, 0xd1, 0xc1, 0x00, 0x7d, 0x8e, 0x41, 0x7f, 0x1f, 0x53, 0x26, 0x0a, + 0xb6, 0xab, 0xab, 0x33, 0xd7, 0xeb, 0xbd, 0xf3, 0x9d, 0x7c, 0xfb, 0x3a, 0x1b, 0x48, 0xf9, 0x38, + 0x45, 0x6f, 0xd1, 0xe8, 0x6f, 0x69, 0x75, 0xf7, 0x57, 0x0b, 0x2a, 0x5a, 0xfa, 0x7f, 0x7a, 0x62, + 0x43, 0x45, 0x67, 0xa0, 0xfb, 0x61, 0x48, 0xe1, 0x40, 0x7f, 0xf6, 0x07, 0x49, 0x3a, 0xa4, 0x5c, + 0x77, 0xa5, 0xa9, 0xb9, 0x1b, 0x92, 0x49, 0x1e, 0xc2, 0xc2, 0x7e, 0x6e, 0x40, 0x21, 0x32, 0xbb, + 0x2c, 0x2b, 0x59, 0x29, 0x56, 0x52, 0x98, 0xa2, 0x77, 0xd4, 0xc6, 0x5d, 0x81, 0xf2, 0x63, 0x3a, + 0xc1, 0x54, 0xd4, 0xb2, 0x4b, 0xd9, 0xae, 0xa9, 0x45, 0x7c, 0xbb, 0xdf, 0x58, 0x50, 0xbf, 0x2f, + 0xbc, 0x3c, 0xe3, 0x94, 0x8f, 0x99, 0x48, 0x3a, 0x0a, 0x19, 0xc7, 0x94, 0xd9, 0xd6, 0xea, 0x8c, + 0x48, 0x5a, 0x93, 0xe4, 0x22, 0xd4, 0x02, 0xe4, 0xe8, 0xf3, 0x24, 0x65, 0x76, 0x49, 0xca, 0x0e, + 0x19, 0xe4, 0x01, 0x2c, 0x46, 0x94, 0xf1, 0xfe, 0x78, 0x14, 0x50, 0x8e, 0x7d, 0x01, 0x45, 0x59, + 0x75, 0xbd, 0xe7, 0x74, 0x14, 0x0c, 0x3b, 0x06, 0xa7, 0x9d, 0x4f, 0x0c, 0x4e, 0xbd, 0x79, 0x61, + 0xf3, 0xa9, 0x34, 0x11, 0x4c, 0xf7, 0x5b, 0x0b, 0xc8, 0x26, 0xf2, 0xf5, 0xd8, 0x47, 0xc6, 0xd3, + 0x89, 0x87, 0x5f, 0x8e, 0x91, 0x71, 0xf2, 0x16, 0x34, 0xa9, 0x66, 0xf5, 0x73, 0xd3, 0x68, 0x18, + 0xa6, 0x6c, 0xf7, 0x2d, 0x68, 0x1f, 0x84, 0x7c, 0xb7, 0x7f, 0xb4, 0x65, 0x62, 0x36, 0x55, 0xaf, + 0x25, 0x64, 0x5b, 0x45, 0x91, 0xf0, 0x2b, 0x4d, 0x06, 0x6a, 0xd8, 0x4c, 0x66, 0x5c, 0xf5, 0x1a, + 0x82, 0xa9, 0x01, 0xc0, 0xdc, 0xdf, 0x66, 0xa0, 0x55, 0xc8, 0x89, 0x8d, 0x92, 0x98, 0x21, 0xd9, + 0x80, 0xaa, 0x89, 0x2f, 0xf3, 0xa9, 0xf7, 0x6e, 0x14, 0xc7, 0x32, 0xc5, 0xa8, 0x93, 0x31, 0x32, + 0x5b, 0x72, 0x0b, 0xe6, 0x98, 0xec, 0xbd, 0xcc, 0xb4, 0xde, 0x5b, 0x2e, 0x7a, 0xc9, 0x0d, 0xc7, + 0xd3, 0x8a, 0xce, 0x57, 0xd0, 0x34, 0x8e, 0xd4, 0x64, 0xdf, 0x81, 0x72, 0x24, 0x3e, 0x74, 0x22, + 0xad, 0xa2, 0x0b, 0xa9, 0xe3, 0x29, 0x0d, 0xf1, 0x40, 0xd4, 0xd4, 0x30, 0x38, 0xac, 0xbb, 0x74, + 0xe2, 0x03, 0x31, 0xfa, 0xa6, 0x25, 0xce, 0xcf, 0x16, 0x54, 0x4d, 0x02, 0x53, 0x5f, 0xc8, 0x35, + 0x58, 0x60, 0x3e, 0x8d, 0x63, 0x0c, 0xfa, 0x06, 0x4d, 0xb3, 0x12, 0x31, 0xf3, 0x9a, 0xfd, 0x58, + 0x83, 0xea, 0x26, 0x9c, 0x33, 0x8a, 0x87, 0xe0, 0x2a, 0x4b, 0xd5, 0x45, 0x2d, 0x78, 0x90, 0x61, + 0x6c, 0x13, 0xe6, 0x64, 0x0d, 0xcc, 0x9e, 0x93, 0xf9, 0x76, 0xcf, 0xde, 0x6f, 0xd5, 0x02, 0x6d, + 0xee, 0xfe, 0x55, 0x82, 0xd6, 0xd3, 0x84, 0xbd, 0x19, 0xce, 0x96, 0x60, 0x4e, 0x3f, 0x5a, 0xf5, + 0xea, 0x35, 0x45, 0xee, 0x67, 0xd9, 0xcd, 0xc8, 0xec, 0x6e, 0x16, 0xb3, 0x9b, 0x12, 0x4f, 0xf2, + 0x0a, 0x99, 0x39, 0xbf, 0x58, 0x50, 0xcb, 0xb8, 0xd3, 0x1e, 0xac, 0xe0, 0x8d, 0x28, 0xdf, 0xd5, + 0xc1, 0xe5, 0x37, 0xf1, 0xa0, 0xb2, 0x8b, 0x34, 0x38, 0x8c, 0x7d, 0xe7, 0x35, 0x62, 0x77, 0x3e, + 0x52, 0xa6, 0x0f, 0x63, 0x21, 0x35, 0x8e, 0x9c, 0xbb, 0xd0, 0xc8, 0x0b, 0xc8, 0x22, 0xcc, 0xec, + 0xe1, 0x44, 0xa7, 0x22, 0x3e, 0x49, 0x1b, 0xca, 0xfb, 0x34, 0x1a, 0x9b, 0xed, 0xa7, 0x88, 0xbb, + 0xa5, 0x3b, 0x96, 0xfb, 0x08, 0xda, 0xc5, 0x90, 0xfa, 0xc9, 0x1c, 0x42, 0xdd, 0x3a, 0x23, 0xd4, + 0xdd, 0x9f, 0x2c, 0x58, 0xda, 0x44, 0xfe, 0x24, 0xe1, 0xe1, 0x20, 0xf4, 0xe5, 0x01, 0x33, 0xd3, + 0x7a, 0x0f, 0x96, 0x92, 0x28, 0x28, 0xbc, 0xf7, 0x49, 0x7f, 0x44, 0x77, 0xcc, 0xd8, 0xda, 0x49, + 0x14, 0x14, 0x56, 0xe3, 0x53, 0xba, 0x83, 0xc2, 0x2a, 0xc6, 0x83, 0x69, 0x56, 0xaa, 0x8c, 0x76, + 0x8c, 0x07, 0xc7, 0xad, 0xda, 0x50, 0x8e, 0xc2, 0x61, 0xc8, 0xe5, 0x86, 0x28, 0x7b, 0x8a, 0xc8, + 0xa0, 0x3f, 0x7b, 0x08, 0x7d, 0xf7, 0xcf, 0x12, 0x5c, 0x38, 0x96, 0xb0, 0xae, 0x7f, 0x0b, 0x1a, + 0x71, 0x8e, 0xaf, 0xbb, 0xd0, 0x3b, 0x06, 0xe3, 0x69, 0xc6, 0x9d, 0x02, 0xb3, 0xe0, 0xc7, 0xf9, + 0xc7, 0x82, 0x46, 0x5e, 0x3c, 0xf5, 0x4d, 0xda, 0x50, 0xf1, 0x53, 0xa4, 0x1c, 0x03, 0x5d, 0xa9, + 0x21, 0xc5, 0xa9, 0x55, 0xee, 0x30, 0xd0, 0x97, 0x2a, 0xa3, 0x85, 0x55, 0x80, 0x11, 0x0a, 0x2b, + 0x55, 0xa5, 0x21, 0xc9, 0xfb, 0x30, 0x93, 0x44, 0x81, 0xbc, 0xdb, 0xf5, 0xde, 0xb5, 0x23, 0x80, + 0xa3, 0x3b, 0x98, 0xf5, 0x3e, 0x42, 0x0d, 0x84, 0x10, 0x99, 0x27, 0x6c, 0x84, 0x69, 0x8c, 0x07, + 0xf2, 0xac, 0xbf, 0x8e, 0x69, 0x8c, 0x07, 0xee, 0xef, 0x25, 0x58, 0x7e, 0xa5, 0x0a, 0x59, 0x83, + 0x86, 0x3f, 0x4e, 0x53, 0x8c, 0x79, 0x1e, 0x08, 0x75, 0xcd, 0x93, 0x93, 0x5c, 0x81, 0x5a, 0x8c, + 0xcf, 0x79, 0x7e, 0xe4, 0x55, 0xc1, 0x38, 0x61, 0xcc, 0xeb, 0xd0, 0x2c, 0xc0, 0x45, 0x76, 0xe2, + 0x94, 0x2b, 0x5c, 0xb4, 0x20, 0x9f, 0x03, 0xd0, 0x2c, 0x4d, 0x7d, 0xc5, 0x3f, 0x38, 0x63, 0xe1, + 0x9d, 0x47, 0x71, 0x80, 0xcf, 0x31, 0x58, 0xcf, 0x6d, 0x21, 0x2f, 0xe7, 0xce, 0xf9, 0x10, 0x5a, + 0x53, 0x54, 0x44, 0x31, 0xa1, 0x60, 0xcb, 0x2e, 0x94, 0x3d, 0x45, 0x64, 0xd0, 0x28, 0xe5, 0x30, + 0x7b, 0x1b, 0x2e, 0x7d, 0x4c, 0xd3, 0xbd, 0x3c, 0x84, 0xd6, 0x99, 0x87, 0x34, 0x30, 0x4f, 0x6d, + 0x0a, 0x9e, 0xdc, 0x55, 0xb8, 0xfc, 0x2a, 0x23, 0x85, 0x58, 0x97, 0xc0, 0xe2, 0x26, 0x72, 0xfd, + 0xa0, 0x95, 0x27, 0x77, 0x03, 0xce, 0xe5, 0x78, 0x6f, 0xbc, 0x17, 0x7a, 0xff, 0x5a, 0xb0, 0x60, + 0xaa, 0x7d, 0x86, 0xe9, 0x7e, 0xe8, 0x23, 0x19, 0x43, 0x3d, 0x77, 0x03, 0xc8, 0xea, 0x09, 0xe7, + 0x41, 0x26, 0xe3, 0xac, 0x9d, 0x7a, 0x40, 0xdc, 0xb5, 0xaf, 0xff, 0xf8, 0xfb, 0xfb, 0xd2, 0x0a, + 0x59, 0xee, 0x9a, 0x23, 0xd0, 0x7d, 0x51, 0xb8, 0x11, 0x2f, 0xc9, 0x1e, 0x34, 0xf2, 0xdb, 0x8e, + 0xac, 0x9d, 0xba, 0x7c, 0x1d, 0xf7, 0x24, 0x15, 0x1d, 0xb9, 0x2d, 0x23, 0xcf, 0xbb, 0xb5, 0x2c, + 0xf2, 0x5d, 0xeb, 0x46, 0xef, 0xc7, 0x12, 0xb4, 0xf2, 0x2d, 0x37, 0xb5, 0xbf, 0x84, 0x85, 0x23, + 0x8b, 0x83, 0xbc, 0x7d, 0xca, 0x5e, 0x51, 0xa9, 0x5c, 0x3d, 0xd3, 0xf6, 0x71, 0x2f, 0xc9, 0x6c, + 0x2e, 0x90, 0xf3, 0xdd, 0xfc, 0xe6, 0x61, 0xdd, 0x17, 0xaa, 0x07, 0xdf, 0x59, 0xb0, 0x34, 0x1d, + 0x0d, 0xe4, 0xc8, 0x1d, 0x3c, 0x11, 0x68, 0xce, 0xbb, 0x67, 0x53, 0x2e, 0x26, 0x75, 0x63, 0x7a, + 0x52, 0xbd, 0x18, 0x9a, 0x0a, 0x35, 0xa6, 0x49, 0x5f, 0x40, 0x2d, 0x03, 0x1f, 0xb9, 0x7c, 0xac, + 0xf0, 0x02, 0x52, 0x9d, 0x2b, 0xaf, 0x94, 0xeb, 0xe8, 0x0b, 0x32, 0x7a, 0x8d, 0x54, 0xba, 0x0a, + 0x93, 0xf7, 0x2e, 0x43, 0xcb, 0x4f, 0x86, 0x45, 0xb3, 0xd1, 0xf6, 0x67, 0x15, 0xfd, 0xaf, 0xdc, + 0xf6, 0x9c, 0xfc, 0x0b, 0xf8, 0xf6, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xc7, 0xaf, 0x17, 0x4a, + 0xe3, 0x0d, 0x00, 0x00, } diff --git a/api/v3/clairpb/clair.proto b/api/v3/clairpb/clair.proto index 0ddfcee0..aad4e658 100644 --- a/api/v3/clairpb/clair.proto +++ b/api/v3/clairpb/clair.proto @@ -99,9 +99,8 @@ message GetAncestryResponse { // The layer's information. Layer layer = 1; // The features detected in this layer. - repeated Feature detectedFeatures = 2; + repeated Feature detected_features = 2; } - message Ancestry { // The name of the desired ancestry. string name = 1; diff --git a/api/v3/clairpb/clair.swagger.json b/api/v3/clairpb/clair.swagger.json index b249c4e7..9bc7d0ee 100644 --- a/api/v3/clairpb/clair.swagger.json +++ b/api/v3/clairpb/clair.swagger.json @@ -206,13 +206,15 @@ "type": "object", "properties": { "layer": { - "$ref": "#/definitions/clairLayer" + "$ref": "#/definitions/clairLayer", + "description": "The layer's information." }, - "detectedFeatures": { + "detected_features": { "type": "array", "items": { "$ref": "#/definitions/clairFeature" - } + }, + "description": "The features detected in this layer." } } }, @@ -419,7 +421,7 @@ "items": { "$ref": "#/definitions/PostAncestryRequestPostLayer" }, - "description": "The layers to be scanned for this Ancestry, ordered in the way that i th\nlayer is the i + 1 th layer's parent." + "description": "The layers to be scanned for this Ancestry, ordered in the way that i th\nlayer is the parent of i + 1 th layer." } } }, diff --git a/api/v3/clairpb/convert.go b/api/v3/clairpb/convert.go index 98b31e94..364781e2 100644 --- a/api/v3/clairpb/convert.go +++ b/api/v3/clairpb/convert.go @@ -124,11 +124,17 @@ func VulnerabilityWithFixedInFromDatabaseModel(dbVuln database.VulnerabilityWith // AncestryFromDatabaseModel converts database ancestry to api ancestry. func AncestryFromDatabaseModel(dbAncestry database.Ancestry) *GetAncestryResponse_Ancestry { - ancestry := &GetAncestryResponse_Ancestry{Name: dbAncestry.Name} + ancestry := &GetAncestryResponse_Ancestry{ + Name: dbAncestry.Name, + ScannedDetectors: dbAncestry.ProcessedBy.Detectors, + ScannedListers: dbAncestry.ProcessedBy.Listers, + } + for _, layer := range dbAncestry.Layers { - ancestryLayer := &GetAncestryResponse_AncestryLayer{} - ancestryLayer.Layer = LayerFromDatabaseModel(layer) - ancestry.Layers = append(ancestry.Layers, ancestryLayer) + ancestry.Layers = append(ancestry.Layers, + &GetAncestryResponse_AncestryLayer{ + Layer: LayerFromDatabaseModel(layer), + }) } return ancestry diff --git a/api/v3/rpc.go b/api/v3/rpc.go index 50060802..9c893bf2 100644 --- a/api/v3/rpc.go +++ b/api/v3/rpc.go @@ -130,13 +130,18 @@ func (s *AncestryServer) GetAncestry(ctx context.Context, req *pb.GetAncestryReq return nil, status.Error(codes.NotFound, fmt.Sprintf("requested ancestry '%s' is not found", req.GetAncestryName())) } - respAncestry = &pb.GetAncestryResponse_Ancestry{Name: name} - respAncestry.ScannedDetectors = ancestry.ProcessedBy.Detectors - respAncestry.ScannedListers = ancestry.ProcessedBy.Listers - respAncestry.Layers = []*pb.GetAncestryResponse_AncestryLayer{} + respAncestry = &pb.GetAncestryResponse_Ancestry{ + Name: name, + ScannedDetectors: ancestry.ProcessedBy.Detectors, + ScannedListers: ancestry.ProcessedBy.Listers, + } for _, layer := range ancestry.Layers { - ancestryLayer := &pb.GetAncestryResponse_AncestryLayer{} + ancestryLayer := &pb.GetAncestryResponse_AncestryLayer{ + Layer: &pb.Layer{ + Hash: layer.Hash, + }, + } if req.GetWithVulnerabilities() { featureVulnerabilities, err := tx.FindAffectedNamespacedFeatures(layer.DetectedFeatures) diff --git a/database/database.go b/database/database.go index 16925bb1..5c427494 100644 --- a/database/database.go +++ b/database/database.go @@ -91,18 +91,18 @@ type Session interface { // UpsertAncestry inserts or replaces an ancestry and its namespaced // features and processors used to scan the ancestry. - UpsertAncestry(ancestry Ancestry, features []NamespacedFeature, processedBy Processors) error + UpsertAncestry(AncestryWithContent) error // FindAncestry retrieves an ancestry with processors used to scan the // ancestry. If the ancestry is not found, return false. // // The ancestry's processors are returned to short cut processing ancestry // if it has been processed by all processors in the current Clair instance. - FindAncestry(name string) (ancestry Ancestry, processedBy Processors, found bool, err error) + FindAncestry(name string) (ancestry Ancestry, found bool, err error) - // FindAncestryFeatures retrieves an ancestry with all detected namespaced - // features. If the ancestry is not found, return false. - FindAncestryFeatures(name string) (ancestry AncestryWithFeatures, found bool, err error) + // FindAncestryWithContent retrieves an ancestry with all detected + // namespaced features. If the ancestry is not found, return false. + FindAncestryWithContent(name string) (ancestry AncestryWithContent, found bool, err error) // PersistFeatures inserts a set of features if not in the database. PersistFeatures(features []Feature) error @@ -125,8 +125,8 @@ type Session interface { // PersistNamespaces inserts a set of namespaces if not in the database. PersistNamespaces([]Namespace) error - // PersistLayer inserts a layer if not in the datastore. - PersistLayer(Layer) error + // PersistLayer creates a layer using the blob Sum hash. + PersistLayer(hash string) error // PersistLayerContent persists a layer's content in the database. The given // namespaces and features can be partial content of this layer. @@ -135,8 +135,8 @@ type Session interface { // in the database. PersistLayerContent(hash string, namespaces []Namespace, features []Feature, processedBy Processors) error - // FindLayer retrieves a layer and the processors scanned the layer. - FindLayer(hash string) (layer Layer, processedBy Processors, found bool, err error) + // FindLayer retrieves the metadata of a layer. + FindLayer(hash string) (layer Layer, found bool, err error) // FindLayerWithContent returns a layer with all detected features and // namespaces. diff --git a/database/mock.go b/database/mock.go index 966e9c88..ed6e8f16 100644 --- a/database/mock.go +++ b/database/mock.go @@ -21,17 +21,17 @@ import "time" type MockSession struct { FctCommit func() error FctRollback func() error - FctUpsertAncestry func(Ancestry, []NamespacedFeature, Processors) error - FctFindAncestry func(name string) (Ancestry, Processors, bool, error) - FctFindAncestryFeatures func(name string) (AncestryWithFeatures, bool, error) + FctUpsertAncestry func(AncestryWithContent) error + FctFindAncestry func(name string) (Ancestry, bool, error) + FctFindAncestryWithContent func(name string) (AncestryWithContent, bool, error) FctFindAffectedNamespacedFeatures func(features []NamespacedFeature) ([]NullableAffectedNamespacedFeature, error) FctPersistNamespaces func([]Namespace) error FctPersistFeatures func([]Feature) error FctPersistNamespacedFeatures func([]NamespacedFeature) error FctCacheAffectedNamespacedFeatures func([]NamespacedFeature) error - FctPersistLayer func(Layer) error + FctPersistLayer func(hash string) error FctPersistLayerContent func(hash string, namespaces []Namespace, features []Feature, processedBy Processors) error - FctFindLayer func(name string) (Layer, Processors, bool, error) + FctFindLayer func(name string) (Layer, bool, error) FctFindLayerWithContent func(name string) (LayerWithContent, bool, error) FctInsertVulnerabilities func([]VulnerabilityWithAffected) error FctFindVulnerabilities func([]VulnerabilityID) ([]NullableVulnerability, error) @@ -63,23 +63,23 @@ func (ms *MockSession) Rollback() error { panic("required mock function not implemented") } -func (ms *MockSession) UpsertAncestry(ancestry Ancestry, features []NamespacedFeature, processedBy Processors) error { +func (ms *MockSession) UpsertAncestry(ancestry AncestryWithContent) error { if ms.FctUpsertAncestry != nil { - return ms.FctUpsertAncestry(ancestry, features, processedBy) + return ms.FctUpsertAncestry(ancestry) } panic("required mock function not implemented") } -func (ms *MockSession) FindAncestry(name string) (Ancestry, Processors, bool, error) { +func (ms *MockSession) FindAncestry(name string) (Ancestry, bool, error) { if ms.FctFindAncestry != nil { return ms.FctFindAncestry(name) } panic("required mock function not implemented") } -func (ms *MockSession) FindAncestryFeatures(name string) (AncestryWithFeatures, bool, error) { - if ms.FctFindAncestryFeatures != nil { - return ms.FctFindAncestryFeatures(name) +func (ms *MockSession) FindAncestryWithContent(name string) (AncestryWithContent, bool, error) { + if ms.FctFindAncestryWithContent != nil { + return ms.FctFindAncestryWithContent(name) } panic("required mock function not implemented") } @@ -119,7 +119,7 @@ func (ms *MockSession) CacheAffectedNamespacedFeatures(namespacedFeatures []Name panic("required mock function not implemented") } -func (ms *MockSession) PersistLayer(layer Layer) error { +func (ms *MockSession) PersistLayer(layer string) error { if ms.FctPersistLayer != nil { return ms.FctPersistLayer(layer) } @@ -133,7 +133,7 @@ func (ms *MockSession) PersistLayerContent(hash string, namespaces []Namespace, panic("required mock function not implemented") } -func (ms *MockSession) FindLayer(name string) (Layer, Processors, bool, error) { +func (ms *MockSession) FindLayer(name string) (Layer, bool, error) { if ms.FctFindLayer != nil { return ms.FctFindLayer(name) } diff --git a/database/models.go b/database/models.go index 43d41da9..e5a82358 100644 --- a/database/models.go +++ b/database/models.go @@ -20,7 +20,7 @@ import ( "time" ) -// Processors are extentions to scan layer's content. +// Processors are extentions to scan a layer's content. type Processors struct { Listers []string Detectors []string @@ -29,24 +29,39 @@ type Processors struct { // Ancestry is a manifest that keeps all layers in an image in order. type Ancestry struct { Name string + // ProcessedBy contains the processors that are used when computing the + // content of this ancestry. + ProcessedBy Processors // Layers should be ordered and i_th layer is the parent of i+1_th layer in // the slice. Layers []Layer } -// AncestryWithFeatures is an ancestry with namespaced features detected in the -// ancestry, which is processed by `ProcessedBy`. -type AncestryWithFeatures struct { +// AncestryWithContent has the ancestry's name and the Ancestry Layers +// associated with it. +type AncestryWithContent struct { Ancestry - ProcessedBy Processors - Features []NamespacedFeature + // TODO(sidchen) deduplicate the Layers here and the Layers in + // Ancestry.Layers. + // AncestryLayers should have the same order as Ancestry.Layers. + Layers []AncestryLayer +} + +// AncestryLayer is a layer with all detected namespaced features. +type AncestryLayer struct { + Layer + + // DetectedFeatures are the features introduced by this layer. + DetectedFeatures []NamespacedFeature } -// Layer corresponds to a layer in an image processed by `ProcessedBy`. +// Layer contains the metadata of a layer. type Layer struct { // Hash is content hash of the layer. Hash string + // ProcessedBy contains the processors that processed this layer. + ProcessedBy Processors } // LayerWithContent is a layer with its detected namespaces and features by @@ -54,9 +69,8 @@ type Layer struct { type LayerWithContent struct { Layer - ProcessedBy Processors - Namespaces []Namespace - Features []Feature + Namespaces []Namespace + Features []Feature } // Namespace is the contextual information around features. @@ -198,6 +212,7 @@ type VulnerabilityNotificationWithVulnerable struct { // PageNumber is used to do pagination. type PageNumber string +// MetadataMap is for storing the metadata returned by vulnerability database. type MetadataMap map[string]interface{} // NullableAffectedNamespacedFeature is an affectednamespacedfeature with diff --git a/database/pgsql/ancestry.go b/database/pgsql/ancestry.go index 17033144..b5678bc5 100644 --- a/database/pgsql/ancestry.go +++ b/database/pgsql/ancestry.go @@ -3,48 +3,37 @@ package pgsql import ( "database/sql" "errors" - "fmt" - "strings" - "github.com/lib/pq" log "github.com/sirupsen/logrus" "github.com/coreos/clair/database" "github.com/coreos/clair/pkg/commonerr" ) -func (tx *pgSession) UpsertAncestry(ancestry database.Ancestry, features []database.NamespacedFeature, processedBy database.Processors) error { +func (tx *pgSession) UpsertAncestry(ancestry database.AncestryWithContent) error { if ancestry.Name == "" { - log.Warning("Empty ancestry name is not allowed") + log.Error("Empty ancestry name is not allowed") return commonerr.NewBadRequestError("could not insert an ancestry with empty name") } if len(ancestry.Layers) == 0 { - log.Warning("Empty ancestry is not allowed") + log.Error("Empty ancestry is not allowed") return commonerr.NewBadRequestError("could not insert an ancestry with 0 layers") } - err := tx.deleteAncestry(ancestry.Name) - if err != nil { + if err := tx.deleteAncestry(ancestry.Name); err != nil { return err } var ancestryID int64 - err = tx.QueryRow(insertAncestry, ancestry.Name).Scan(&ancestryID) - if err != nil { + if err := tx.QueryRow(insertAncestry, ancestry.Name).Scan(&ancestryID); err != nil { if isErrUniqueViolation(err) { - return handleError("insertAncestry", errors.New("Other Go-routine is processing this ancestry (skip).")) + return handleError("insertAncestry", errors.New("other Go-routine is processing this ancestry (skip)")) } return handleError("insertAncestry", err) } - err = tx.insertAncestryLayers(ancestryID, ancestry.Layers) - if err != nil { - return err - } - - err = tx.insertAncestryFeatures(ancestryID, features) - if err != nil { + if err := tx.insertAncestryLayers(ancestryID, ancestry.Layers); err != nil { return err } @@ -52,71 +41,82 @@ func (tx *pgSession) UpsertAncestry(ancestry database.Ancestry, features []datab "persistAncestryLister", persistAncestryDetector, "persistAncestryDetector", - ancestryID, processedBy) + ancestryID, ancestry.ProcessedBy) } -func (tx *pgSession) FindAncestry(name string) (database.Ancestry, database.Processors, bool, error) { - ancestry := database.Ancestry{Name: name} - processed := database.Processors{} +func (tx *pgSession) FindAncestry(name string) (database.Ancestry, bool, error) { + var ( + ancestryID int64 + ancestry = database.Ancestry{Name: name} + err error + ) - var ancestryID int64 - err := tx.QueryRow(searchAncestry, name).Scan(&ancestryID) - if err != nil { + if err = tx.QueryRow(searchAncestry, name).Scan(&ancestryID); err != nil { if err == sql.ErrNoRows { - return ancestry, processed, false, nil + return ancestry, false, nil } - return ancestry, processed, false, handleError("searchAncestry", err) + return ancestry, false, handleError("searchAncestry", err) } - ancestry.Layers, err = tx.findAncestryLayers(ancestryID) - if err != nil { - return ancestry, processed, false, err + if ancestry.Layers, err = tx.findAncestryLayers(ancestryID); err != nil { + return ancestry, false, err } - processed.Detectors, err = tx.findProcessors(searchAncestryDetectors, "searchAncestryDetectors", "detector", ancestryID) - if err != nil { - return ancestry, processed, false, err + if ancestry.ProcessedBy.Detectors, err = tx.findProcessors(searchAncestryDetectors, "searchAncestryDetectors", "detector", ancestryID); err != nil { + return ancestry, false, err } - processed.Listers, err = tx.findProcessors(searchAncestryListers, "searchAncestryListers", "lister", ancestryID) - if err != nil { - return ancestry, processed, false, err + if ancestry.ProcessedBy.Listers, err = tx.findProcessors(searchAncestryListers, "searchAncestryListers", "lister", ancestryID); err != nil { + return ancestry, false, err } - return ancestry, processed, true, nil + return ancestry, true, nil } -func (tx *pgSession) FindAncestryFeatures(name string) (database.AncestryWithFeatures, bool, error) { +func (tx *pgSession) FindAncestryWithContent(name string) (database.AncestryWithContent, bool, error) { var ( - awf database.AncestryWithFeatures - ok bool - err error + ancestryContent database.AncestryWithContent + isValid bool + err error ) - awf.Ancestry, awf.ProcessedBy, ok, err = tx.FindAncestry(name) - if err != nil { - return awf, false, err - } - if !ok { - return awf, false, nil + if ancestryContent.Ancestry, isValid, err = tx.FindAncestry(name); err != nil || !isValid { + return ancestryContent, isValid, err } rows, err := tx.Query(searchAncestryFeatures, name) if err != nil { - return awf, false, handleError("searchAncestryFeatures", err) + return ancestryContent, false, handleError("searchAncestryFeatures", err) } + features := map[int][]database.NamespacedFeature{} for rows.Next() { - nf := database.NamespacedFeature{} - err := rows.Scan(&nf.Namespace.Name, &nf.Namespace.VersionFormat, &nf.Feature.Name, &nf.Feature.Version) - if err != nil { - return awf, false, handleError("searchAncestryFeatures", err) + var ( + feature database.NamespacedFeature + // layerIndex is used to determine which layer the namespaced feature belongs to. + layerIndex sql.NullInt64 + ) + + if err := rows.Scan(&feature.Namespace.Name, + &feature.Namespace.VersionFormat, + &feature.Feature.Name, &feature.Feature.Version, + &layerIndex); err != nil { + return ancestryContent, false, handleError("searchAncestryFeatures", err) } - nf.Feature.VersionFormat = nf.Namespace.VersionFormat - awf.Features = append(awf.Features, nf) + + feature.Feature.VersionFormat = feature.Namespace.VersionFormat // This looks strange. + features[int(layerIndex.Int64)] = append(features[int(layerIndex.Int64)], feature) + } + + // By the assumption of Ancestry Layer Index, we have the ancestry's layer + // index corresponding to the index in the array. + for index, layer := range ancestryContent.Ancestry.Layers { + ancestryLayer := database.AncestryLayer{Layer: layer} + ancestryLayer.DetectedFeatures, _ = features[index] + ancestryContent.Layers = append(ancestryContent.Layers, ancestryLayer) } - return awf, true, nil + return ancestryContent, true, nil } func (tx *pgSession) deleteAncestry(name string) error { @@ -164,97 +164,62 @@ func (tx *pgSession) findAncestryLayers(ancestryID int64) ([]database.Layer, err if err != nil { return nil, handleError("searchAncestryLayer", err) } + layers := []database.Layer{} for rows.Next() { var layer database.Layer - err := rows.Scan(&layer.Hash) - if err != nil { + if err := rows.Scan(&layer.Hash); err != nil { return nil, handleError("searchAncestryLayer", err) } + layers = append(layers, layer) } + return layers, nil } -func (tx *pgSession) insertAncestryLayers(ancestryID int64, layers []database.Layer) error { - layerIDs := map[string]sql.NullInt64{} - for _, l := range layers { - layerIDs[l.Hash] = sql.NullInt64{} - } - - layerHashes := []string{} - for hash := range layerIDs { - layerHashes = append(layerHashes, hash) - } - - rows, err := tx.Query(searchLayerIDs, pq.Array(layerHashes)) - if err != nil { - return handleError("searchLayerIDs", err) - } - - for rows.Next() { - var ( - layerID sql.NullInt64 - layerName string - ) - err := rows.Scan(&layerID, &layerName) - if err != nil { - return handleError("searchLayerIDs", err) - } - layerIDs[layerName] = layerID - } - - notFound := []string{} - for hash, id := range layerIDs { - if !id.Valid { - notFound = append(notFound, hash) - } - } - - if len(notFound) > 0 { - return handleError("searchLayerIDs", fmt.Errorf("Layer %s is not found in database", strings.Join(notFound, ","))) - } - +// insertAncestryLayers inserts the ancestry layers along with its content into +// the database. The layers are 0 based indexed in the original order. +func (tx *pgSession) insertAncestryLayers(ancestryID int64, layers []database.AncestryLayer) error { //TODO(Sida): use bulk insert. stmt, err := tx.Prepare(insertAncestryLayer) if err != nil { return handleError("insertAncestryLayer", err) } - defer stmt.Close() + ancestryLayerIDs := []sql.NullInt64{} for index, layer := range layers { - _, err := stmt.Exec(ancestryID, index, layerIDs[layer.Hash].Int64) - if err != nil { + var ancestryLayerID sql.NullInt64 + if err := stmt.QueryRow(ancestryID, index, layer.Hash).Scan(&ancestryLayerID); err != nil { return handleError("insertAncestryLayer", commonerr.CombineErrors(err, stmt.Close())) } - } - - return nil -} -func (tx *pgSession) insertAncestryFeatures(ancestryID int64, features []database.NamespacedFeature) error { - featureIDs, err := tx.findNamespacedFeatureIDs(features) - if err != nil { - return err + ancestryLayerIDs = append(ancestryLayerIDs, ancestryLayerID) } - //TODO(Sida): use bulk insert. - stmtFeatures, err := tx.Prepare(insertAncestryFeature) - if err != nil { - return handleError("insertAncestryFeature", err) + if err := stmt.Close(); err != nil { + return handleError("Failed to close insertAncestryLayer statement", err) } - defer stmtFeatures.Close() + stmt, err = tx.Prepare(insertAncestryLayerFeature) + defer stmt.Close() - for _, id := range featureIDs { - if !id.Valid { - return errors.New("requested namespaced feature is not in database") + for i, layer := range layers { + var ( + nsFeatureIDs []sql.NullInt64 + layerID = ancestryLayerIDs[i] + ) + + if nsFeatureIDs, err = tx.findNamespacedFeatureIDs(layer.DetectedFeatures); err != nil { + return err } - _, err := stmtFeatures.Exec(ancestryID, id) - if err != nil { - return handleError("insertAncestryFeature", err) + for _, id := range nsFeatureIDs { + if _, err := stmt.Exec(layerID, id); err != nil { + return handleError("insertAncestryLayerFeature", commonerr.CombineErrors(err, stmt.Close())) + } } + } return nil diff --git a/database/pgsql/ancestry_test.go b/database/pgsql/ancestry_test.go index 7851163c..2453f28d 100644 --- a/database/pgsql/ancestry_test.go +++ b/database/pgsql/ancestry_test.go @@ -26,26 +26,53 @@ import ( func TestUpsertAncestry(t *testing.T) { store, tx := openSessionForTest(t, "UpsertAncestry", true) defer closeTest(t, store, tx) - a1 := database.Ancestry{ - Name: "a1", - Layers: []database.Layer{ - {Hash: "layer-N"}, + a1 := database.AncestryWithContent{ + Ancestry: database.Ancestry{ + Name: "a1", + Layers: []database.Layer{ + {Hash: "layer-N"}, + }, + }, + Layers: []database.AncestryLayer{ + { + Layer: database.Layer{ + Hash: "layer-N", + }, + }, }, } - a2 := database.Ancestry{} + a2 := database.AncestryWithContent{} - a3 := database.Ancestry{ - Name: "a", - Layers: []database.Layer{ - {Hash: "layer-0"}, + a3 := database.AncestryWithContent{ + Ancestry: database.Ancestry{ + Name: "a", + Layers: []database.Layer{ + {Hash: "layer-0"}, + }, + }, + Layers: []database.AncestryLayer{ + { + Layer: database.Layer{ + Hash: "layer-0", + }, + }, }, } - a4 := database.Ancestry{ - Name: "a", - Layers: []database.Layer{ - {Hash: "layer-1"}, + a4 := database.AncestryWithContent{ + Ancestry: database.Ancestry{ + Name: "a", + Layers: []database.Layer{ + {Hash: "layer-1"}, + }, + }, + Layers: []database.AncestryLayer{ + { + Layer: database.Layer{ + Hash: "layer-1", + }, + }, }, } @@ -83,17 +110,20 @@ func TestUpsertAncestry(t *testing.T) { Feature: f2, } + a4.ProcessedBy = p // invalid case - assert.NotNil(t, tx.UpsertAncestry(a1, nil, database.Processors{})) - assert.NotNil(t, tx.UpsertAncestry(a2, nil, database.Processors{})) + assert.NotNil(t, tx.UpsertAncestry(a1)) + assert.NotNil(t, tx.UpsertAncestry(a2)) // valid case - assert.Nil(t, tx.UpsertAncestry(a3, nil, database.Processors{})) + assert.Nil(t, tx.UpsertAncestry(a3)) + a4.Layers[0].DetectedFeatures = []database.NamespacedFeature{nsf1, nsf2} // replace invalid case - assert.NotNil(t, tx.UpsertAncestry(a4, []database.NamespacedFeature{nsf1, nsf2}, p)) + assert.NotNil(t, tx.UpsertAncestry(a4)) + a4.Layers[0].DetectedFeatures = []database.NamespacedFeature{nsf1} // replace valid case - assert.Nil(t, tx.UpsertAncestry(a4, []database.NamespacedFeature{nsf1}, p)) + assert.Nil(t, tx.UpsertAncestry(a4)) // validate - ancestry, ok, err := tx.FindAncestryFeatures("a") + ancestry, ok, err := tx.FindAncestryWithContent("a") assert.Nil(t, err) assert.True(t, ok) assert.Equal(t, a4, ancestry.Ancestry) @@ -111,8 +141,7 @@ func TestFindAncestry(t *testing.T) { store, tx := openSessionForTest(t, "FindAncestry", true) defer closeTest(t, store, tx) - // not found - _, _, ok, err := tx.FindAncestry("ancestry-non") + _, ok, err := tx.FindAncestry("ancestry-non") assert.Nil(t, err) assert.False(t, ok) @@ -124,41 +153,52 @@ func TestFindAncestry(t *testing.T) { {Hash: "layer-2"}, {Hash: "layer-3a"}, }, + ProcessedBy: database.Processors{ + Detectors: []string{"os-release"}, + Listers: []string{"dpkg"}, + }, } - expectedProcessors := database.Processors{ - Detectors: []string{"os-release"}, - Listers: []string{"dpkg"}, - } - - // found - a, p, ok2, err := tx.FindAncestry("ancestry-1") + a, ok2, err := tx.FindAncestry("ancestry-1") if assert.Nil(t, err) && assert.True(t, ok2) { assertAncestryEqual(t, expected, a) - assertProcessorsEqual(t, expectedProcessors, p) } } -func assertAncestryWithFeatureEqual(t *testing.T, expected database.AncestryWithFeatures, actual database.AncestryWithFeatures) bool { - return assertAncestryEqual(t, expected.Ancestry, actual.Ancestry) && - assertNamespacedFeatureEqual(t, expected.Features, actual.Features) && - assertProcessorsEqual(t, expected.ProcessedBy, actual.ProcessedBy) +func assertAncestryWithFeatureEqual(t *testing.T, expected database.AncestryWithContent, actual database.AncestryWithContent) bool { + if assertAncestryEqual(t, expected.Ancestry, actual.Ancestry) && assert.Equal(t, len(expected.Layers), len(actual.Layers)) { + for index, layer := range expected.Layers { + if !assertAncestryLayerEqual(t, layer, actual.Layers[index]) { + return false + } + } + return true + } + return false } + +func assertAncestryLayerEqual(t *testing.T, expected database.AncestryLayer, actual database.AncestryLayer) bool { + return assertLayerEqual(t, expected.Layer, actual.Layer) && + assertNamespacedFeatureEqual(t, expected.DetectedFeatures, actual.DetectedFeatures) +} + func assertAncestryEqual(t *testing.T, expected database.Ancestry, actual database.Ancestry) bool { - return assert.Equal(t, expected.Name, actual.Name) && assert.Equal(t, expected.Layers, actual.Layers) + return assert.Equal(t, expected.Name, actual.Name) && + assert.Equal(t, expected.Layers, actual.Layers) && + assertProcessorsEqual(t, expected.ProcessedBy, actual.ProcessedBy) } -func TestFindAncestryFeatures(t *testing.T) { - store, tx := openSessionForTest(t, "FindAncestryFeatures", true) +func TestFindAncestryWithContent(t *testing.T) { + store, tx := openSessionForTest(t, "FindAncestryWithContent", true) defer closeTest(t, store, tx) // invalid - _, ok, err := tx.FindAncestryFeatures("ancestry-non") + _, ok, err := tx.FindAncestryWithContent("ancestry-non") if assert.Nil(t, err) { assert.False(t, ok) } - expected := database.AncestryWithFeatures{ + expected := database.AncestryWithContent{ Ancestry: database.Ancestry{ Name: "ancestry-2", Layers: []database.Layer{ @@ -167,41 +207,62 @@ func TestFindAncestryFeatures(t *testing.T) { {Hash: "layer-2"}, {Hash: "layer-3b"}, }, + ProcessedBy: database.Processors{ + Detectors: []string{"os-release"}, + Listers: []string{"dpkg"}, + }, }, - ProcessedBy: database.Processors{ - Detectors: []string{"os-release"}, - Listers: []string{"dpkg"}, - }, - Features: []database.NamespacedFeature{ + + Layers: []database.AncestryLayer{ { - Namespace: database.Namespace{ - Name: "debian:7", - VersionFormat: "dpkg", + Layer: database.Layer{ + Hash: "layer-0", + }, + DetectedFeatures: []database.NamespacedFeature{ + { + Namespace: database.Namespace{ + Name: "debian:7", + VersionFormat: "dpkg", + }, + Feature: database.Feature{ + Name: "wechat", + Version: "0.5", + VersionFormat: "dpkg", + }, + }, + { + Namespace: database.Namespace{ + Name: "debian:8", + VersionFormat: "dpkg", + }, + Feature: database.Feature{ + Name: "openssl", + Version: "1.0", + VersionFormat: "dpkg", + }, + }, }, - Feature: database.Feature{ - Name: "wechat", - Version: "0.5", - VersionFormat: "dpkg", + }, + { + Layer: database.Layer{ + Hash: "layer-1", }, }, { - Namespace: database.Namespace{ - Name: "debian:8", - VersionFormat: "dpkg", + Layer: database.Layer{ + Hash: "layer-2", }, - Feature: database.Feature{ - Name: "openssl", - Version: "1.0", - VersionFormat: "dpkg", + }, + { + Layer: database.Layer{ + Hash: "layer-3b", }, }, }, } // valid - ancestry, ok, err := tx.FindAncestryFeatures("ancestry-2") + ancestry, ok, err := tx.FindAncestryWithContent("ancestry-2") if assert.Nil(t, err) && assert.True(t, ok) { - assertAncestryEqual(t, expected.Ancestry, ancestry.Ancestry) - assertNamespacedFeatureEqual(t, expected.Features, ancestry.Features) - assertProcessorsEqual(t, expected.ProcessedBy, ancestry.ProcessedBy) + assertAncestryWithFeatureEqual(t, expected, ancestry) } } diff --git a/database/pgsql/layer.go b/database/pgsql/layer.go index c7cd5ce2..daa6a704 100644 --- a/database/pgsql/layer.go +++ b/database/pgsql/layer.go @@ -22,9 +22,9 @@ import ( "github.com/coreos/clair/pkg/commonerr" ) -func (tx *pgSession) FindLayer(hash string) (database.Layer, database.Processors, bool, error) { - l, p, _, ok, err := tx.findLayer(hash) - return l, p, ok, err +func (tx *pgSession) FindLayer(hash string) (database.Layer, bool, error) { + layer, _, ok, err := tx.findLayer(hash) + return layer, ok, err } func (tx *pgSession) FindLayerWithContent(hash string) (database.LayerWithContent, bool, error) { @@ -35,7 +35,7 @@ func (tx *pgSession) FindLayerWithContent(hash string) (database.LayerWithConten err error ) - layer.Layer, layer.ProcessedBy, layerID, ok, err = tx.findLayer(hash) + layer.Layer, layerID, ok, err = tx.findLayer(hash) if err != nil { return layer, false, err } @@ -49,12 +49,12 @@ func (tx *pgSession) FindLayerWithContent(hash string) (database.LayerWithConten return layer, true, nil } -func (tx *pgSession) PersistLayer(layer database.Layer) error { - if layer.Hash == "" { +func (tx *pgSession) PersistLayer(hash string) error { + if hash == "" { return commonerr.NewBadRequestError("Empty Layer Hash is not allowed") } - _, err := tx.Exec(queryPersistLayer(1), layer.Hash) + _, err := tx.Exec(queryPersistLayer(1), hash) if err != nil { return handleError("queryPersistLayer", err) } @@ -275,34 +275,33 @@ func (tx *pgSession) findLayerFeatures(layerID int64) ([]database.Feature, error return features, nil } -func (tx *pgSession) findLayer(hash string) (database.Layer, database.Processors, int64, bool, error) { +func (tx *pgSession) findLayer(hash string) (database.Layer, int64, bool, error) { var ( - layerID int64 - layer = database.Layer{Hash: hash} - processors database.Processors + layerID int64 + layer = database.Layer{Hash: hash, ProcessedBy: database.Processors{}} ) if hash == "" { - return layer, processors, layerID, false, commonerr.NewBadRequestError("Empty Layer Hash is not allowed") + return layer, layerID, false, commonerr.NewBadRequestError("Empty Layer Hash is not allowed") } err := tx.QueryRow(searchLayer, hash).Scan(&layerID) if err != nil { if err == sql.ErrNoRows { - return layer, processors, layerID, false, nil + return layer, layerID, false, nil } - return layer, processors, layerID, false, err + return layer, layerID, false, err } - processors.Detectors, err = tx.findProcessors(searchLayerDetectors, "searchLayerDetectors", "detector", layerID) + layer.ProcessedBy.Detectors, err = tx.findProcessors(searchLayerDetectors, "searchLayerDetectors", "detector", layerID) if err != nil { - return layer, processors, layerID, false, err + return layer, layerID, false, err } - processors.Listers, err = tx.findProcessors(searchLayerListers, "searchLayerListers", "lister", layerID) + layer.ProcessedBy.Listers, err = tx.findProcessors(searchLayerListers, "searchLayerListers", "lister", layerID) if err != nil { - return layer, processors, layerID, false, err + return layer, layerID, false, err } - return layer, processors, layerID, true, nil + return layer, layerID, true, nil } diff --git a/database/pgsql/layer_test.go b/database/pgsql/layer_test.go index e823a048..678fd508 100644 --- a/database/pgsql/layer_test.go +++ b/database/pgsql/layer_test.go @@ -26,8 +26,8 @@ func TestPersistLayer(t *testing.T) { datastore, tx := openSessionForTest(t, "PersistLayer", false) defer closeTest(t, datastore, tx) - l1 := database.Layer{} - l2 := database.Layer{Hash: "HESOYAM"} + l1 := "" + l2 := "HESOYAM" // invalid assert.NotNil(t, tx.PersistLayer(l1)) @@ -51,24 +51,25 @@ func TestFindLayer(t *testing.T) { datastore, tx := openSessionForTest(t, "FindLayer", true) defer closeTest(t, datastore, tx) - expected := database.Layer{Hash: "layer-4"} - expectedProcessors := database.Processors{ - Detectors: []string{"os-release", "apt-sources"}, - Listers: []string{"dpkg", "rpm"}, + expected := database.Layer{ + Hash: "layer-4", + ProcessedBy: database.Processors{ + Detectors: []string{"os-release", "apt-sources"}, + Listers: []string{"dpkg", "rpm"}, + }, } // invalid - _, _, _, err := tx.FindLayer("") + _, _, err := tx.FindLayer("") assert.NotNil(t, err) - _, _, ok, err := tx.FindLayer("layer-non") + _, ok, err := tx.FindLayer("layer-non") assert.Nil(t, err) assert.False(t, ok) // valid - layer, processors, ok2, err := tx.FindLayer("layer-4") + layer, ok2, err := tx.FindLayer("layer-4") if assert.Nil(t, err) && assert.True(t, ok2) { - assert.Equal(t, expected, layer) - assertProcessorsEqual(t, expectedProcessors, processors) + assertLayerEqual(t, expected, layer) } } @@ -85,6 +86,10 @@ func TestFindLayerWithContent(t *testing.T) { expectedL := database.LayerWithContent{ Layer: database.Layer{ Hash: "layer-4", + ProcessedBy: database.Processors{ + Detectors: []string{"os-release", "apt-sources"}, + Listers: []string{"dpkg", "rpm"}, + }, }, Features: []database.Feature{ {Name: "fake", Version: "2.0", VersionFormat: "rpm"}, @@ -94,10 +99,6 @@ func TestFindLayerWithContent(t *testing.T) { {Name: "debian:7", VersionFormat: "dpkg"}, {Name: "fake:1.0", VersionFormat: "rpm"}, }, - ProcessedBy: database.Processors{ - Detectors: []string{"os-release", "apt-sources"}, - Listers: []string{"dpkg", "rpm"}, - }, } layer, ok2, err := tx.FindLayerWithContent("layer-4") @@ -107,8 +108,12 @@ func TestFindLayerWithContent(t *testing.T) { } func assertLayerWithContentEqual(t *testing.T, expected database.LayerWithContent, actual database.LayerWithContent) bool { - return assert.Equal(t, expected.Layer, actual.Layer) && + return assertLayerEqual(t, expected.Layer, actual.Layer) && assertFeaturesEqual(t, expected.Features, actual.Features) && - assertProcessorsEqual(t, expected.ProcessedBy, actual.ProcessedBy) && assertNamespacesEqual(t, expected.Namespaces, actual.Namespaces) } + +func assertLayerEqual(t *testing.T, expected database.Layer, actual database.Layer) bool { + return assertProcessorsEqual(t, expected.ProcessedBy, actual.ProcessedBy) && + assert.Equal(t, expected.Hash, actual.Hash) +} diff --git a/database/pgsql/migrations/00001_initial_schema.go b/database/pgsql/migrations/00001_initial_schema.go index 14fff7d4..e51037ee 100644 --- a/database/pgsql/migrations/00001_initial_schema.go +++ b/database/pgsql/migrations/00001_initial_schema.go @@ -89,11 +89,11 @@ func init() { UNIQUE (ancestry_id, ancestry_index));`, `CREATE INDEX ON ancestry_layer(ancestry_id);`, - `CREATE TABLE IF NOT EXISTS ancestry_feature ( - id SERIAL PRIMARY KEY, - ancestry_id INT REFERENCES ancestry ON DELETE CASCADE, - namespaced_feature_id INT REFERENCES namespaced_feature ON DELETE CASCADE, - UNIQUE (ancestry_id, namespaced_feature_id));`, + `CREATE TABLE IF NOT EXISTS ancestry_feature( + id SERIAL PRIMARY KEY, + ancestry_layer_id INT REFERENCES ancestry_layer ON DELETE CASCADE, + namespaced_feature_id INT REFERENCES namespaced_feature ON DELETE CASCADE, + UNIQUE (ancestry_layer_id, namespaced_feature_id));`, `CREATE TABLE IF NOT EXISTS ancestry_lister ( id SERIAL PRIMARY KEY, @@ -168,9 +168,9 @@ func init() { `DROP TABLE IF EXISTS ancestry, ancestry_layer, - ancestry_feature, ancestry_detector, ancestry_lister, + ancestry_feature, feature, namespaced_feature, keyvalue, diff --git a/database/pgsql/queries.go b/database/pgsql/queries.go index c7bd689b..dc91f10b 100644 --- a/database/pgsql/queries.go +++ b/database/pgsql/queries.go @@ -196,10 +196,10 @@ const ( SELECT DISTINCT ON (a.id) a.id, a.name FROM vulnerability_affected_namespaced_feature AS vanf, - ancestry AS a, ancestry_feature AS af + ancestry_layer AS al, ancestry_feature AS af WHERE vanf.vulnerability_id = $1 - AND a.id >= $2 - AND a.id = af.ancestry_id + AND al.ancestry_id >= $2 + AND al.id = af.ancestry_layer_id AND af.namespaced_feature_id = vanf.namespaced_feature_id ORDER BY a.id ASC LIMIT $3;` @@ -211,9 +211,9 @@ const ( WHERE NOT EXISTS (SELECT id FROM ancestry_lister WHERE ancestry_id = $1 AND lister = $2) ON CONFLICT DO NOTHING` persistAncestryDetector = ` - INSERT INTO ancestry_detector (ancestry_id, detector) - SELECT CAST ($1 AS INTEGER), CAST ($2 AS TEXT) - WHERE NOT EXISTS (SELECT id FROM ancestry_detector WHERE ancestry_id = $1 AND detector = $2) ON CONFLICT DO NOTHING` + INSERT INTO ancestry_detector (ancestry_id, detector) + SELECT CAST ($1 AS INTEGER), CAST ($2 AS TEXT) + WHERE NOT EXISTS (SELECT id FROM ancestry_detector WHERE ancestry_id = $1 AND detector = $2) ON CONFLICT DO NOTHING` insertAncestry = `INSERT INTO ancestry (name) VALUES ($1) RETURNING id` @@ -225,20 +225,21 @@ const ( ORDER BY ancestry_layer.ancestry_index ASC` searchAncestryFeatures = ` - SELECT namespace.name, namespace.version_format, feature.name, feature.version - FROM namespace, feature, ancestry, namespaced_feature, ancestry_feature - WHERE ancestry.name = $1 - AND ancestry.id = ancestry_feature.ancestry_id - AND ancestry_feature.namespaced_feature_id = namespaced_feature.id - AND namespaced_feature.feature_id = feature.id - AND namespaced_feature.namespace_id = namespace.id` - - searchAncestry = `SELECT id FROM ancestry WHERE name = $1` - searchAncestryDetectors = `SELECT detector FROM ancestry_detector WHERE ancestry_id = $1` - searchAncestryListers = `SELECT lister FROM ancestry_lister WHERE ancestry_id = $1` - removeAncestry = `DELETE FROM ancestry WHERE name = $1` - insertAncestryLayer = `INSERT INTO ancestry_layer(ancestry_id, ancestry_index, layer_id) VALUES($1,$2,$3)` - insertAncestryFeature = `INSERT INTO ancestry_feature(ancestry_id, namespaced_feature_id) VALUES ($1, $2)` + SELECT namespace.name, namespace.version_format, feature.name, feature.version, ancestry_layer.ancestry_index + FROM namespace, feature, ancestry, namespaced_feature, ancestry_layer, ancestry_feature + WHERE ancestry.name = $1 + AND ancestry.id = ancestry_layer.ancestry_id + AND ancestry_feature.ancestry_layer_id = ancestry_layer.id + AND ancestry_feature.namespaced_feature_id = namespaced_feature.id + AND namespaced_feature.feature_id = feature.id + AND namespaced_feature.namespace_id = namespace.id` + + searchAncestry = `SELECT id FROM ancestry WHERE name = $1` + searchAncestryDetectors = `SELECT detector FROM ancestry_detector WHERE ancestry_id = $1` + searchAncestryListers = `SELECT lister FROM ancestry_lister WHERE ancestry_id = $1` + removeAncestry = `DELETE FROM ancestry WHERE name = $1` + insertAncestryLayer = `INSERT INTO ancestry_layer(ancestry_id, ancestry_index, layer_id) VALUES($1,$2, (SELECT layer.id FROM layer WHERE hash = $3 LIMIT 1)) RETURNING id` + insertAncestryLayerFeature = `INSERT INTO ancestry_feature(ancestry_layer_id, namespaced_feature_id) VALUES ($1, $2)` ) // NOTE(Sida): Every search query can only have count less than postgres set diff --git a/database/pgsql/testdata/data.sql b/database/pgsql/testdata/data.sql index a4ccd31c..9d8e0323 100644 --- a/database/pgsql/testdata/data.sql +++ b/database/pgsql/testdata/data.sql @@ -56,8 +56,8 @@ INSERT INTO layer_detector(id, layer_id, detector) VALUES INSERT INTO ancestry (id, name) VALUES (1, 'ancestry-1'), -- layer-0, layer-1, layer-2, layer-3a (2, 'ancestry-2'), -- layer-0, layer-1, layer-2, layer-3b - (3, 'ancestry-3'), -- empty; just for testing the vulnerable ancestry - (4, 'ancestry-4'); -- empty; just for testing the vulnerable ancestry + (3, 'ancestry-3'), -- layer-0 + (4, 'ancestry-4'); -- layer-0 INSERT INTO ancestry_lister (id, ancestry_id, lister) VALUES (1, 1, 'dpkg'), @@ -69,7 +69,9 @@ INSERT INTO ancestry_detector (id, ancestry_id, detector) VALUES INSERT INTO ancestry_layer (id, ancestry_id, layer_id, ancestry_index) VALUES (1, 1, 1, 0),(2, 1, 2, 1),(3, 1, 3, 2),(4, 1, 4, 3), - (5, 2, 1, 0),(6, 2, 2, 1),(7, 2, 3, 2),(8, 2, 5, 3); + (5, 2, 1, 0),(6, 2, 2, 1),(7, 2, 3, 2),(8, 2, 5, 3), + (9, 3, 1, 0), + (10, 4, 1, 0); INSERT INTO namespaced_feature(id, feature_id, namespace_id) VALUES (1, 1, 1), -- wechat 0.5, debian:7 @@ -77,10 +79,12 @@ INSERT INTO namespaced_feature(id, feature_id, namespace_id) VALUES (3, 2, 2), -- openssl 1.0, debian:8 (4, 3, 1); -- openssl 2.0, debian:7 -INSERT INTO ancestry_feature (id, ancestry_id, namespaced_feature_id) VALUES - (1, 1, 1), (2, 1, 4), - (3, 2, 1), (4, 2, 3), - (5, 3, 2), (6, 4, 2); -- assume that ancestry-3 and ancestry-4 are vulnerable. +-- assume that ancestry-3 and ancestry-4 are vulnerable. +INSERT INTO ancestry_feature (id, ancestry_layer_id, namespaced_feature_id) VALUES + (1, 1, 1), (2, 1, 4), -- ancestry-1, layer 0 introduces 1, 4 + (3, 5, 1), (4, 5, 3), -- ancestry-2, layer 0 introduces 1, 3 + (5, 9, 2), -- ancestry-3, layer 0 introduces 2 + (6, 10, 2); -- ancestry-4, layer 0 introduces 2 INSERT INTO vulnerability (id, namespace_id, name, description, link, severity) VALUES (1, 1, 'CVE-OPENSSL-1-DEB7', 'A vulnerability affecting OpenSSL < 2.0 on Debian 7.0', 'http://google.com/#q=CVE-OPENSSL-1-DEB7', 'High'), diff --git a/worker.go b/worker.go index a9c82762..66a7f7fc 100644 --- a/worker.go +++ b/worker.go @@ -160,8 +160,7 @@ func getLayer(datastore database.Datastore, req LayerRequest) (layer database.La } if !ok { - l := database.Layer{Hash: req.Hash} - err = tx.PersistLayer(l) + err = tx.PersistLayer(req.Hash) if err != nil { return } @@ -170,7 +169,9 @@ func getLayer(datastore database.Datastore, req LayerRequest) (layer database.La return } - layer = database.LayerWithContent{Layer: l} + layer = database.LayerWithContent{} + layer.Hash = req.Hash + preq = &processRequest{ request: req, notProcessedBy: Processors, @@ -313,11 +314,11 @@ func combineLayers(layer database.LayerWithContent, partial partialLayer) databa layer.ProcessedBy.Listers = append(layer.ProcessedBy.Listers, strutil.CompareStringLists(partial.processedBy.Listers, layer.ProcessedBy.Listers)...) return database.LayerWithContent{ Layer: database.Layer{ - Hash: layer.Hash, + Hash: layer.Hash, + ProcessedBy: layer.ProcessedBy, }, - ProcessedBy: layer.ProcessedBy, - Features: features, - Namespaces: namespaces, + Features: features, + Namespaces: namespaces, } } @@ -327,7 +328,7 @@ func isAncestryProcessed(datastore database.Datastore, name string) (bool, error return false, err } defer tx.Rollback() - _, processed, ok, err := tx.FindAncestry(name) + ancestry, ok, err := tx.FindAncestry(name) if err != nil { return false, err } @@ -335,14 +336,20 @@ func isAncestryProcessed(datastore database.Datastore, name string) (bool, error return false, nil } - notProcessed := getNotProcessedBy(processed) + notProcessed := getNotProcessedBy(ancestry.ProcessedBy) return len(notProcessed.Detectors) == 0 && len(notProcessed.Listers) == 0, nil } // ProcessAncestry downloads and scans an ancestry if it's not scanned by all // enabled processors in this instance of Clair. func ProcessAncestry(datastore database.Datastore, imageFormat, name string, layerRequest []LayerRequest) error { - var err error + var ( + err error + ok bool + layers []database.LayerWithContent + commonProcessors database.Processors + ) + if name == "" { return commonerr.NewBadRequestError("could not process a layer which does not have a name") } @@ -351,43 +358,53 @@ func ProcessAncestry(datastore database.Datastore, imageFormat, name string, lay return commonerr.NewBadRequestError("could not process a layer which does not have a format") } - if ok, err := isAncestryProcessed(datastore, name); ok && err == nil { + if ok, err = isAncestryProcessed(datastore, name); err != nil { + return err + } else if ok { log.WithField("ancestry", name).Debug("Ancestry is processed") return nil - } else if err != nil { - return err } - layers, err := processLayers(datastore, imageFormat, layerRequest) - if err != nil { + if layers, err = processLayers(datastore, imageFormat, layerRequest); err != nil { return err } - if !validateProcessors(layers) { - // This error might be triggered because of multiple workers are - // processing the same instance with different processors. - return errors.New("ancestry layers are scanned with different listers and detectors") + if commonProcessors, err = getProcessors(layers); err != nil { + return err } - return processAncestry(datastore, name, layers) + return processAncestry(datastore, name, layers, commonProcessors) } -func processAncestry(datastore database.Datastore, name string, layers []database.LayerWithContent) error { - ancestryFeatures, err := computeAncestryFeatures(layers) - if err != nil { - return err +// getNamespacedFeatures extracts the namespaced features introduced in each +// layer into one array. +func getNamespacedFeatures(layers []database.AncestryLayer) []database.NamespacedFeature { + features := []database.NamespacedFeature{} + for _, layer := range layers { + features = append(features, layer.DetectedFeatures...) } + return features +} - ancestryLayers := make([]database.Layer, 0, len(layers)) - for _, layer := range layers { - ancestryLayers = append(ancestryLayers, layer.Layer) +func processAncestry(datastore database.Datastore, name string, layers []database.LayerWithContent, commonProcessors database.Processors) error { + var ( + ancestry database.AncestryWithContent + err error + ) + + ancestry.Name = name + ancestry.ProcessedBy = commonProcessors + ancestry.Layers, err = computeAncestryLayers(layers, commonProcessors) + if err != nil { + return err } + ancestryFeatures := getNamespacedFeatures(ancestry.Layers) log.WithFields(log.Fields{ "ancestry": name, "number of features": len(ancestryFeatures), "processed by": Processors, - "number of layers": len(ancestryLayers), + "number of layers": len(ancestry.Layers), }).Debug("compute ancestry features") if err := persistNamespacedFeatures(datastore, ancestryFeatures); err != nil { @@ -399,7 +416,7 @@ func processAncestry(datastore database.Datastore, name string, layers []databas return err } - err = tx.UpsertAncestry(database.Ancestry{Name: name, Layers: ancestryLayers}, ancestryFeatures, Processors) + err = tx.UpsertAncestry(ancestry) if err != nil { tx.Rollback() return err @@ -440,44 +457,71 @@ func persistNamespacedFeatures(datastore database.Datastore, features []database return tx.Commit() } -// validateProcessors checks if the layers processed by same set of processors. -func validateProcessors(layers []database.LayerWithContent) bool { +// getProcessors retrieves common subset of the processors of each layer. +func getProcessors(layers []database.LayerWithContent) (database.Processors, error) { if len(layers) == 0 { - return true + return database.Processors{}, nil } + detectors := layers[0].ProcessedBy.Detectors listers := layers[0].ProcessedBy.Listers + detectorsLen := len(detectors) + listersLen := len(listers) + for _, l := range layers[1:] { - if len(strutil.CompareStringLists(detectors, l.ProcessedBy.Detectors)) != 0 || - len(strutil.CompareStringLists(listers, l.ProcessedBy.Listers)) != 0 { - return false + detectors := strutil.CompareStringListsInBoth(detectors, l.ProcessedBy.Detectors) + listers := strutil.CompareStringListsInBoth(listers, l.ProcessedBy.Listers) + + if len(detectors) != detectorsLen || len(listers) != listersLen { + // This error might be triggered because of multiple workers are + // processing the same instance with different processors. + // TODO(sidchen): Once the features can be associated with + // Detectors/Listers, we can support dynamically generating ancestry's + // detector/lister based on the layers. + return database.Processors{}, errors.New("processing layers with different Clair instances is currently unsupported") } } - return true + return database.Processors{ + Detectors: detectors, + Listers: listers, + }, nil +} + +type introducedFeature struct { + feature database.NamespacedFeature + layerIndex int } -// computeAncestryFeatures computes the features in an ancestry based on all -// layers. -func computeAncestryFeatures(ancestryLayers []database.LayerWithContent) ([]database.NamespacedFeature, error) { +// computeAncestryLayers computes ancestry's layers along with what features are +// introduced. +func computeAncestryLayers(layers []database.LayerWithContent, commonProcessors database.Processors) ([]database.AncestryLayer, error) { + // TODO(sidchen): Once the features are linked to specific processor, we + // will use commonProcessors to filter out the features for this ancestry. + // version format -> namespace namespaces := map[string]database.Namespace{} // version format -> feature ID -> feature - features := map[string]map[string]database.NamespacedFeature{} - for _, layer := range ancestryLayers { - // At start of the loop, namespaces and features always contain the - // previous layer's result. + features := map[string]map[string]introducedFeature{} + ancestryLayers := []database.AncestryLayer{} + for index, layer := range layers { + // Initialize the ancestry Layer + initializedLayer := database.AncestryLayer{Layer: layer.Layer, DetectedFeatures: []database.NamespacedFeature{}} + ancestryLayers = append(ancestryLayers, initializedLayer) + + // Precondition: namespaces and features contain the result from union + // of all parents. for _, ns := range layer.Namespaces { namespaces[ns.VersionFormat] = ns } // version format -> feature ID -> feature - currentFeatures := map[string]map[string]database.NamespacedFeature{} + currentFeatures := map[string]map[string]introducedFeature{} for _, f := range layer.Features { if ns, ok := namespaces[f.VersionFormat]; ok { - var currentMap map[string]database.NamespacedFeature + var currentMap map[string]introducedFeature if currentMap, ok = currentFeatures[f.VersionFormat]; !ok { - currentFeatures[f.VersionFormat] = make(map[string]database.NamespacedFeature) + currentFeatures[f.VersionFormat] = make(map[string]introducedFeature) currentMap = currentFeatures[f.VersionFormat] } @@ -490,9 +534,12 @@ func computeAncestryFeatures(ancestryLayers []database.LayerWithContent) ([]data } if !inherited { - currentMap[f.Name+":"+f.Version] = database.NamespacedFeature{ - Feature: f, - Namespace: ns, + currentMap[f.Name+":"+f.Version] = introducedFeature{ + feature: database.NamespacedFeature{ + Feature: f, + Namespace: ns, + }, + layerIndex: index, } } @@ -513,13 +560,16 @@ func computeAncestryFeatures(ancestryLayers []database.LayerWithContent) ([]data } } - ancestryFeatures := []database.NamespacedFeature{} for _, featureMap := range features { for _, feature := range featureMap { - ancestryFeatures = append(ancestryFeatures, feature) + ancestryLayers[feature.layerIndex].DetectedFeatures = append( + ancestryLayers[feature.layerIndex].DetectedFeatures, + feature.feature, + ) } } - return ancestryFeatures, nil + + return ancestryLayers, nil } // getNotProcessedBy returns a processors, which contains the detectors and diff --git a/worker_test.go b/worker_test.go index 5f6e0ff4..e78430a6 100644 --- a/worker_test.go +++ b/worker_test.go @@ -41,7 +41,7 @@ type mockDatastore struct { database.MockDatastore layers map[string]database.LayerWithContent - ancestry map[string]database.AncestryWithFeatures + ancestry map[string]database.AncestryWithContent namespaces map[string]database.Namespace features map[string]database.Feature namespacedFeatures map[string]database.NamespacedFeature @@ -65,32 +65,52 @@ func copyDatastore(md *mockDatastore) mockDatastore { layers[k] = database.LayerWithContent{ Layer: database.Layer{ Hash: l.Hash, - }, - ProcessedBy: database.Processors{ - Listers: listers, - Detectors: detectors, + ProcessedBy: database.Processors{ + Listers: listers, + Detectors: detectors, + }, }, Features: features, Namespaces: namespaces, } } - ancestry := map[string]database.AncestryWithFeatures{} + ancestry := map[string]database.AncestryWithContent{} for k, a := range md.ancestry { - nf := append([]database.NamespacedFeature(nil), a.Features...) - l := append([]database.Layer(nil), a.Layers...) - listers := append([]string(nil), a.ProcessedBy.Listers...) - detectors := append([]string(nil), a.ProcessedBy.Detectors...) - ancestry[k] = database.AncestryWithFeatures{ + ancestryLayers := []database.AncestryLayer{} + layers := []database.Layer{} + + for _, layer := range a.Layers { + layers = append(layers, database.Layer{ + Hash: layer.Hash, + ProcessedBy: database.Processors{ + Detectors: append([]string(nil), layer.Layer.ProcessedBy.Detectors...), + Listers: append([]string(nil), layer.Layer.ProcessedBy.Listers...), + }, + }) + + ancestryLayers = append(ancestryLayers, database.AncestryLayer{ + Layer: database.Layer{ + Hash: layer.Hash, + ProcessedBy: database.Processors{ + Detectors: append([]string(nil), layer.Layer.ProcessedBy.Detectors...), + Listers: append([]string(nil), layer.Layer.ProcessedBy.Listers...), + }, + }, + DetectedFeatures: append([]database.NamespacedFeature(nil), layer.DetectedFeatures...), + }) + } + + ancestry[k] = database.AncestryWithContent{ Ancestry: database.Ancestry{ Name: a.Name, - Layers: l, - }, - ProcessedBy: database.Processors{ - Detectors: detectors, - Listers: listers, + Layers: layers, + ProcessedBy: database.Processors{ + Detectors: append([]string(nil), a.ProcessedBy.Detectors...), + Listers: append([]string(nil), a.ProcessedBy.Listers...), + }, }, - Features: nf, + Layers: ancestryLayers, } } @@ -121,7 +141,7 @@ func newMockDatastore() *mockDatastore { errSessionDone := errors.New("Session Done") md := &mockDatastore{ layers: make(map[string]database.LayerWithContent), - ancestry: make(map[string]database.AncestryWithFeatures), + ancestry: make(map[string]database.AncestryWithContent), namespaces: make(map[string]database.Namespace), features: make(map[string]database.Feature), namespacedFeatures: make(map[string]database.NamespacedFeature), @@ -156,22 +176,20 @@ func newMockDatastore() *mockDatastore { return nil } - session.FctFindAncestry = func(name string) (database.Ancestry, database.Processors, bool, error) { - processors := database.Processors{} + session.FctFindAncestry = func(name string) (database.Ancestry, bool, error) { if session.terminated { - return database.Ancestry{}, processors, false, errSessionDone + return database.Ancestry{}, false, errSessionDone } ancestry, ok := session.copy.ancestry[name] - return ancestry.Ancestry, ancestry.ProcessedBy, ok, nil + return ancestry.Ancestry, ok, nil } - session.FctFindLayer = func(name string) (database.Layer, database.Processors, bool, error) { - processors := database.Processors{} + session.FctFindLayer = func(name string) (database.Layer, bool, error) { if session.terminated { - return database.Layer{}, processors, false, errSessionDone + return database.Layer{}, false, errSessionDone } layer, ok := session.copy.layers[name] - return layer.Layer, layer.ProcessedBy, ok, nil + return layer.Layer, ok, nil } session.FctFindLayerWithContent = func(name string) (database.LayerWithContent, bool, error) { @@ -182,12 +200,12 @@ func newMockDatastore() *mockDatastore { return layer, ok, nil } - session.FctPersistLayer = func(layer database.Layer) error { + session.FctPersistLayer = func(hash string) error { if session.terminated { return errSessionDone } - if _, ok := session.copy.layers[layer.Hash]; !ok { - session.copy.layers[layer.Hash] = database.LayerWithContent{Layer: layer} + if _, ok := session.copy.layers[hash]; !ok { + session.copy.layers[hash] = database.LayerWithContent{Layer: database.Layer{Hash: hash}} } return nil } @@ -267,25 +285,20 @@ func newMockDatastore() *mockDatastore { return nil } - session.FctUpsertAncestry = func(ancestry database.Ancestry, features []database.NamespacedFeature, processors database.Processors) error { + session.FctUpsertAncestry = func(ancestry database.AncestryWithContent) error { if session.terminated { return errSessionDone } + features := getNamespacedFeatures(ancestry.Layers) // ensure features are in the database for _, f := range features { if _, ok := session.copy.namespacedFeatures[NamespacedFeatureKey(&f)]; !ok { - return errors.New("namepsaced feature not in db") + return errors.New("namespaced feature not in db") } } - ancestryWFeature := database.AncestryWithFeatures{ - Ancestry: ancestry, - Features: features, - ProcessedBy: processors, - } - - session.copy.ancestry[ancestry.Name] = ancestryWFeature + session.copy.ancestry[ancestry.Name] = ancestry return nil } @@ -359,9 +372,11 @@ func TestProcessAncestryWithDistUpgrade(t *testing.T) { } assert.Nil(t, ProcessAncestry(datastore, "Docker", "Mock", layers)) + // check the ancestry features - assert.Len(t, datastore.ancestry["Mock"].Features, 74) - for _, f := range datastore.ancestry["Mock"].Features { + features := getNamespacedFeatures(datastore.ancestry["Mock"].Layers) + assert.Len(t, features, 74) + for _, f := range features { if _, ok := nonUpgradedMap[f.Feature]; ok { assert.Equal(t, "debian:7", f.Namespace.Name) } else { @@ -388,20 +403,20 @@ func TestProcessLayers(t *testing.T) { {Hash: "jessie", Path: testDataPath + "jessie.tar.gz"}, } - processedLayers, err := processLayers(datastore, "Docker", layers) + LayerWithContents, err := processLayers(datastore, "Docker", layers) assert.Nil(t, err) - assert.Len(t, processedLayers, 3) + assert.Len(t, LayerWithContents, 3) // ensure resubmit won't break the stuff - processedLayers, err = processLayers(datastore, "Docker", layers) + LayerWithContents, err = processLayers(datastore, "Docker", layers) assert.Nil(t, err) - assert.Len(t, processedLayers, 3) + assert.Len(t, LayerWithContents, 3) // Ensure each processed layer is correct - assert.Len(t, processedLayers[0].Namespaces, 0) - assert.Len(t, processedLayers[1].Namespaces, 1) - assert.Len(t, processedLayers[2].Namespaces, 1) - assert.Len(t, processedLayers[0].Features, 0) - assert.Len(t, processedLayers[1].Features, 52) - assert.Len(t, processedLayers[2].Features, 74) + assert.Len(t, LayerWithContents[0].Namespaces, 0) + assert.Len(t, LayerWithContents[1].Namespaces, 1) + assert.Len(t, LayerWithContents[2].Namespaces, 1) + assert.Len(t, LayerWithContents[0].Features, 0) + assert.Len(t, LayerWithContents[1].Features, 52) + assert.Len(t, LayerWithContents[2].Features, 74) // Ensure each layer has expected namespaces and features detected if blank, ok := datastore.layers["blank"]; ok { @@ -462,10 +477,10 @@ func TestClairUpgrade(t *testing.T) { } assert.Nil(t, ProcessAncestry(datastore, "Docker", "Mock", layers)) - assert.Len(t, datastore.ancestry["Mock"].Features, 0) + assert.Len(t, getNamespacedFeatures(datastore.ancestry["Mock"].Layers), 0) assert.Nil(t, ProcessAncestry(datastore, "Docker", "Mock2", layers2)) - assert.Len(t, datastore.ancestry["Mock2"].Features, 0) + assert.Len(t, getNamespacedFeatures(datastore.ancestry["Mock2"].Layers), 0) // Clair is upgraded to use a new namespace detector. The expected // behavior is that all layers will be rescanned with "apt-sources" and @@ -478,7 +493,7 @@ func TestClairUpgrade(t *testing.T) { // Even though Clair processors are upgraded, the ancestry's features should // not be upgraded without posting the ancestry to Clair again. assert.Nil(t, ProcessAncestry(datastore, "Docker", "Mock", layers)) - assert.Len(t, datastore.ancestry["Mock"].Features, 0) + assert.Len(t, getNamespacedFeatures(datastore.ancestry["Mock"].Layers), 0) // Clair is upgraded to use a new feature lister. The expected behavior is // that all layers will be rescanned with "dpkg" and the ancestry's features @@ -489,18 +504,18 @@ func TestClairUpgrade(t *testing.T) { } assert.Nil(t, ProcessAncestry(datastore, "Docker", "Mock", layers)) - assert.Len(t, datastore.ancestry["Mock"].Features, 74) + assert.Len(t, getNamespacedFeatures(datastore.ancestry["Mock"].Layers), 74) assert.Nil(t, ProcessAncestry(datastore, "Docker", "Mock2", layers2)) - assert.Len(t, datastore.ancestry["Mock2"].Features, 52) + assert.Len(t, getNamespacedFeatures(datastore.ancestry["Mock2"].Layers), 52) // check the namespaces are correct - for _, f := range datastore.ancestry["Mock"].Features { + for _, f := range getNamespacedFeatures(datastore.ancestry["Mock"].Layers) { if !assert.NotEqual(t, database.Namespace{}, f.Namespace) { assert.Fail(t, "Every feature should have a namespace attached") } } - for _, f := range datastore.ancestry["Mock2"].Features { + for _, f := range getNamespacedFeatures(datastore.ancestry["Mock2"].Layers) { if !assert.NotEqual(t, database.Namespace{}, f.Namespace) { assert.Fail(t, "Every feature should have a namespace attached") } @@ -624,8 +639,9 @@ func TestComputeAncestryFeatures(t *testing.T) { }: false, } - features, err := computeAncestryFeatures(layers) + ancestryLayers, err := computeAncestryLayers(layers, database.Processors{}) assert.Nil(t, err) + features := getNamespacedFeatures(ancestryLayers) for _, f := range features { if assert.Contains(t, expected, f) { if assert.False(t, expected[f]) {