Merge pull request #620 from KeyboardNerd/feature/detector
Internally version all detected content by extension
This commit is contained in:
commit
3c72fa29a6
@ -9,6 +9,8 @@ It is generated from these files:
|
|||||||
|
|
||||||
It has these top-level messages:
|
It has these top-level messages:
|
||||||
Vulnerability
|
Vulnerability
|
||||||
|
Detector
|
||||||
|
Namespace
|
||||||
Feature
|
Feature
|
||||||
Layer
|
Layer
|
||||||
ClairStatus
|
ClairStatus
|
||||||
@ -48,6 +50,30 @@ var _ = math.Inf
|
|||||||
// proto package needs to be updated.
|
// proto package needs to be updated.
|
||||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
|
type Detector_DType int32
|
||||||
|
|
||||||
|
const (
|
||||||
|
Detector_DETECTOR_D_TYPE_INVALID Detector_DType = 0
|
||||||
|
Detector_DETECTOR_D_TYPE_NAMESPACE Detector_DType = 1
|
||||||
|
Detector_DETECTOR_D_TYPE_FEATURE Detector_DType = 2
|
||||||
|
)
|
||||||
|
|
||||||
|
var Detector_DType_name = map[int32]string{
|
||||||
|
0: "DETECTOR_D_TYPE_INVALID",
|
||||||
|
1: "DETECTOR_D_TYPE_NAMESPACE",
|
||||||
|
2: "DETECTOR_D_TYPE_FEATURE",
|
||||||
|
}
|
||||||
|
var Detector_DType_value = map[string]int32{
|
||||||
|
"DETECTOR_D_TYPE_INVALID": 0,
|
||||||
|
"DETECTOR_D_TYPE_NAMESPACE": 1,
|
||||||
|
"DETECTOR_D_TYPE_FEATURE": 2,
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x Detector_DType) String() string {
|
||||||
|
return proto.EnumName(Detector_DType_name, int32(x))
|
||||||
|
}
|
||||||
|
func (Detector_DType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} }
|
||||||
|
|
||||||
type Vulnerability struct {
|
type Vulnerability struct {
|
||||||
// The name of the vulnerability.
|
// The name of the vulnerability.
|
||||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||||
@ -130,23 +156,88 @@ func (m *Vulnerability) GetAffectedVersions() []*Feature {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type Detector struct {
|
||||||
|
// The name of the detector.
|
||||||
|
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||||
|
// The version of the detector.
|
||||||
|
Version string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
|
||||||
|
// The type of the detector.
|
||||||
|
Dtype Detector_DType `protobuf:"varint,3,opt,name=dtype,enum=coreos.clair.Detector_DType" json:"dtype,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Detector) Reset() { *m = Detector{} }
|
||||||
|
func (m *Detector) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Detector) ProtoMessage() {}
|
||||||
|
func (*Detector) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
||||||
|
|
||||||
|
func (m *Detector) GetName() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Name
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Detector) GetVersion() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Version
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Detector) GetDtype() Detector_DType {
|
||||||
|
if m != nil {
|
||||||
|
return m.Dtype
|
||||||
|
}
|
||||||
|
return Detector_DETECTOR_D_TYPE_INVALID
|
||||||
|
}
|
||||||
|
|
||||||
|
type Namespace struct {
|
||||||
|
// The name of the namespace.
|
||||||
|
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||||
|
// The detector used to detect the namespace. This only exists when present in
|
||||||
|
// an Ancestry Feature.
|
||||||
|
Detector *Detector `protobuf:"bytes,2,opt,name=detector" json:"detector,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Namespace) Reset() { *m = Namespace{} }
|
||||||
|
func (m *Namespace) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Namespace) ProtoMessage() {}
|
||||||
|
func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
|
||||||
|
|
||||||
|
func (m *Namespace) GetName() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Name
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Namespace) GetDetector() *Detector {
|
||||||
|
if m != nil {
|
||||||
|
return m.Detector
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
type Feature struct {
|
type Feature struct {
|
||||||
// The name of the feature.
|
// The name of the feature.
|
||||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||||
// The name of the namespace in which the feature is detected.
|
// The namespace in which the feature is detected.
|
||||||
NamespaceName string `protobuf:"bytes,2,opt,name=namespace_name,json=namespaceName" json:"namespace_name,omitempty"`
|
Namespace *Namespace `protobuf:"bytes,2,opt,name=namespace" json:"namespace,omitempty"`
|
||||||
// The specific version of this feature.
|
// The specific version of this feature.
|
||||||
Version string `protobuf:"bytes,3,opt,name=version" json:"version,omitempty"`
|
Version string `protobuf:"bytes,3,opt,name=version" json:"version,omitempty"`
|
||||||
// The format used to parse version numbers for the feature.
|
// The format used to parse version numbers for the feature.
|
||||||
VersionFormat string `protobuf:"bytes,4,opt,name=version_format,json=versionFormat" json:"version_format,omitempty"`
|
VersionFormat string `protobuf:"bytes,4,opt,name=version_format,json=versionFormat" json:"version_format,omitempty"`
|
||||||
|
// The detector used to detect this feature. This only exists when present in
|
||||||
|
// an Ancestry.
|
||||||
|
Detector *Detector `protobuf:"bytes,5,opt,name=detector" json:"detector,omitempty"`
|
||||||
// The list of vulnerabilities that affect the feature.
|
// The list of vulnerabilities that affect the feature.
|
||||||
Vulnerabilities []*Vulnerability `protobuf:"bytes,5,rep,name=vulnerabilities" json:"vulnerabilities,omitempty"`
|
Vulnerabilities []*Vulnerability `protobuf:"bytes,6,rep,name=vulnerabilities" json:"vulnerabilities,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Feature) Reset() { *m = Feature{} }
|
func (m *Feature) Reset() { *m = Feature{} }
|
||||||
func (m *Feature) String() string { return proto.CompactTextString(m) }
|
func (m *Feature) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Feature) ProtoMessage() {}
|
func (*Feature) ProtoMessage() {}
|
||||||
func (*Feature) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
func (*Feature) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
|
||||||
|
|
||||||
func (m *Feature) GetName() string {
|
func (m *Feature) GetName() string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@ -155,11 +246,11 @@ func (m *Feature) GetName() string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Feature) GetNamespaceName() string {
|
func (m *Feature) GetNamespace() *Namespace {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
return m.NamespaceName
|
return m.Namespace
|
||||||
}
|
}
|
||||||
return ""
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Feature) GetVersion() string {
|
func (m *Feature) GetVersion() string {
|
||||||
@ -176,6 +267,13 @@ func (m *Feature) GetVersionFormat() string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *Feature) GetDetector() *Detector {
|
||||||
|
if m != nil {
|
||||||
|
return m.Detector
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (m *Feature) GetVulnerabilities() []*Vulnerability {
|
func (m *Feature) GetVulnerabilities() []*Vulnerability {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
return m.Vulnerabilities
|
return m.Vulnerabilities
|
||||||
@ -191,7 +289,7 @@ type Layer struct {
|
|||||||
func (m *Layer) Reset() { *m = Layer{} }
|
func (m *Layer) Reset() { *m = Layer{} }
|
||||||
func (m *Layer) String() string { return proto.CompactTextString(m) }
|
func (m *Layer) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Layer) ProtoMessage() {}
|
func (*Layer) ProtoMessage() {}
|
||||||
func (*Layer) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
|
func (*Layer) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
|
||||||
|
|
||||||
func (m *Layer) GetHash() string {
|
func (m *Layer) GetHash() string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@ -201,27 +299,18 @@ func (m *Layer) GetHash() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type ClairStatus struct {
|
type ClairStatus struct {
|
||||||
// The configured list of feature listers used to scan an ancestry.
|
// The implemented detectors in this Clair instance
|
||||||
Listers []string `protobuf:"bytes,1,rep,name=listers" json:"listers,omitempty"`
|
Detectors []*Detector `protobuf:"bytes,1,rep,name=detectors" json:"detectors,omitempty"`
|
||||||
// The configured list of namespace detectors used to scan an ancestry.
|
|
||||||
Detectors []string `protobuf:"bytes,2,rep,name=detectors" json:"detectors,omitempty"`
|
|
||||||
// The time at which the updater last ran.
|
// The time at which the updater last ran.
|
||||||
LastUpdateTime *google_protobuf.Timestamp `protobuf:"bytes,3,opt,name=last_update_time,json=lastUpdateTime" json:"last_update_time,omitempty"`
|
LastUpdateTime *google_protobuf.Timestamp `protobuf:"bytes,2,opt,name=last_update_time,json=lastUpdateTime" json:"last_update_time,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *ClairStatus) Reset() { *m = ClairStatus{} }
|
func (m *ClairStatus) Reset() { *m = ClairStatus{} }
|
||||||
func (m *ClairStatus) String() string { return proto.CompactTextString(m) }
|
func (m *ClairStatus) String() string { return proto.CompactTextString(m) }
|
||||||
func (*ClairStatus) ProtoMessage() {}
|
func (*ClairStatus) ProtoMessage() {}
|
||||||
func (*ClairStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
|
func (*ClairStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
|
||||||
|
|
||||||
func (m *ClairStatus) GetListers() []string {
|
func (m *ClairStatus) GetDetectors() []*Detector {
|
||||||
if m != nil {
|
|
||||||
return m.Listers
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ClairStatus) GetDetectors() []string {
|
|
||||||
if m != nil {
|
if m != nil {
|
||||||
return m.Detectors
|
return m.Detectors
|
||||||
}
|
}
|
||||||
@ -243,7 +332,7 @@ type GetAncestryRequest struct {
|
|||||||
func (m *GetAncestryRequest) Reset() { *m = GetAncestryRequest{} }
|
func (m *GetAncestryRequest) Reset() { *m = GetAncestryRequest{} }
|
||||||
func (m *GetAncestryRequest) String() string { return proto.CompactTextString(m) }
|
func (m *GetAncestryRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*GetAncestryRequest) ProtoMessage() {}
|
func (*GetAncestryRequest) ProtoMessage() {}
|
||||||
func (*GetAncestryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
|
func (*GetAncestryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
|
||||||
|
|
||||||
func (m *GetAncestryRequest) GetAncestryName() string {
|
func (m *GetAncestryRequest) GetAncestryName() string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@ -262,7 +351,7 @@ type GetAncestryResponse struct {
|
|||||||
func (m *GetAncestryResponse) Reset() { *m = GetAncestryResponse{} }
|
func (m *GetAncestryResponse) Reset() { *m = GetAncestryResponse{} }
|
||||||
func (m *GetAncestryResponse) String() string { return proto.CompactTextString(m) }
|
func (m *GetAncestryResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*GetAncestryResponse) ProtoMessage() {}
|
func (*GetAncestryResponse) ProtoMessage() {}
|
||||||
func (*GetAncestryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
|
func (*GetAncestryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
|
||||||
|
|
||||||
func (m *GetAncestryResponse) GetAncestry() *GetAncestryResponse_Ancestry {
|
func (m *GetAncestryResponse) GetAncestry() *GetAncestryResponse_Ancestry {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@ -289,7 +378,7 @@ func (m *GetAncestryResponse_AncestryLayer) Reset() { *m = GetAncestryRe
|
|||||||
func (m *GetAncestryResponse_AncestryLayer) String() string { return proto.CompactTextString(m) }
|
func (m *GetAncestryResponse_AncestryLayer) String() string { return proto.CompactTextString(m) }
|
||||||
func (*GetAncestryResponse_AncestryLayer) ProtoMessage() {}
|
func (*GetAncestryResponse_AncestryLayer) ProtoMessage() {}
|
||||||
func (*GetAncestryResponse_AncestryLayer) Descriptor() ([]byte, []int) {
|
func (*GetAncestryResponse_AncestryLayer) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor0, []int{5, 0}
|
return fileDescriptor0, []int{7, 0}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *GetAncestryResponse_AncestryLayer) GetLayer() *Layer {
|
func (m *GetAncestryResponse_AncestryLayer) GetLayer() *Layer {
|
||||||
@ -309,18 +398,17 @@ func (m *GetAncestryResponse_AncestryLayer) GetDetectedFeatures() []*Feature {
|
|||||||
type GetAncestryResponse_Ancestry struct {
|
type GetAncestryResponse_Ancestry struct {
|
||||||
// The name of the desired ancestry.
|
// The name of the desired ancestry.
|
||||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||||
// The configured list of feature listers used to scan this ancestry.
|
// The detectors used to scan this Ancestry. It may not be the current set
|
||||||
ScannedListers []string `protobuf:"bytes,4,rep,name=scanned_listers,json=scannedListers" json:"scanned_listers,omitempty"`
|
// of detectors in clair status.
|
||||||
// The configured list of namespace detectors used to scan an ancestry.
|
Detectors []*Detector `protobuf:"bytes,2,rep,name=detectors" json:"detectors,omitempty"`
|
||||||
ScannedDetectors []string `protobuf:"bytes,5,rep,name=scanned_detectors,json=scannedDetectors" json:"scanned_detectors,omitempty"`
|
|
||||||
// The list of layers along with detected features in each.
|
// The list of layers along with detected features in each.
|
||||||
Layers []*GetAncestryResponse_AncestryLayer `protobuf:"bytes,6,rep,name=layers" json:"layers,omitempty"`
|
Layers []*GetAncestryResponse_AncestryLayer `protobuf:"bytes,3,rep,name=layers" json:"layers,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *GetAncestryResponse_Ancestry) Reset() { *m = GetAncestryResponse_Ancestry{} }
|
func (m *GetAncestryResponse_Ancestry) Reset() { *m = GetAncestryResponse_Ancestry{} }
|
||||||
func (m *GetAncestryResponse_Ancestry) String() string { return proto.CompactTextString(m) }
|
func (m *GetAncestryResponse_Ancestry) String() string { return proto.CompactTextString(m) }
|
||||||
func (*GetAncestryResponse_Ancestry) ProtoMessage() {}
|
func (*GetAncestryResponse_Ancestry) ProtoMessage() {}
|
||||||
func (*GetAncestryResponse_Ancestry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5, 1} }
|
func (*GetAncestryResponse_Ancestry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 1} }
|
||||||
|
|
||||||
func (m *GetAncestryResponse_Ancestry) GetName() string {
|
func (m *GetAncestryResponse_Ancestry) GetName() string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@ -329,16 +417,9 @@ func (m *GetAncestryResponse_Ancestry) GetName() string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *GetAncestryResponse_Ancestry) GetScannedListers() []string {
|
func (m *GetAncestryResponse_Ancestry) GetDetectors() []*Detector {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
return m.ScannedListers
|
return m.Detectors
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *GetAncestryResponse_Ancestry) GetScannedDetectors() []string {
|
|
||||||
if m != nil {
|
|
||||||
return m.ScannedDetectors
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -364,7 +445,7 @@ type PostAncestryRequest struct {
|
|||||||
func (m *PostAncestryRequest) Reset() { *m = PostAncestryRequest{} }
|
func (m *PostAncestryRequest) Reset() { *m = PostAncestryRequest{} }
|
||||||
func (m *PostAncestryRequest) String() string { return proto.CompactTextString(m) }
|
func (m *PostAncestryRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*PostAncestryRequest) ProtoMessage() {}
|
func (*PostAncestryRequest) ProtoMessage() {}
|
||||||
func (*PostAncestryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
|
func (*PostAncestryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
|
||||||
|
|
||||||
func (m *PostAncestryRequest) GetAncestryName() string {
|
func (m *PostAncestryRequest) GetAncestryName() string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@ -400,7 +481,7 @@ func (m *PostAncestryRequest_PostLayer) Reset() { *m = PostAncestryReque
|
|||||||
func (m *PostAncestryRequest_PostLayer) String() string { return proto.CompactTextString(m) }
|
func (m *PostAncestryRequest_PostLayer) String() string { return proto.CompactTextString(m) }
|
||||||
func (*PostAncestryRequest_PostLayer) ProtoMessage() {}
|
func (*PostAncestryRequest_PostLayer) ProtoMessage() {}
|
||||||
func (*PostAncestryRequest_PostLayer) Descriptor() ([]byte, []int) {
|
func (*PostAncestryRequest_PostLayer) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor0, []int{6, 0}
|
return fileDescriptor0, []int{8, 0}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *PostAncestryRequest_PostLayer) GetHash() string {
|
func (m *PostAncestryRequest_PostLayer) GetHash() string {
|
||||||
@ -432,7 +513,7 @@ type PostAncestryResponse struct {
|
|||||||
func (m *PostAncestryResponse) Reset() { *m = PostAncestryResponse{} }
|
func (m *PostAncestryResponse) Reset() { *m = PostAncestryResponse{} }
|
||||||
func (m *PostAncestryResponse) String() string { return proto.CompactTextString(m) }
|
func (m *PostAncestryResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*PostAncestryResponse) ProtoMessage() {}
|
func (*PostAncestryResponse) ProtoMessage() {}
|
||||||
func (*PostAncestryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
|
func (*PostAncestryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
|
||||||
|
|
||||||
func (m *PostAncestryResponse) GetStatus() *ClairStatus {
|
func (m *PostAncestryResponse) GetStatus() *ClairStatus {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@ -457,7 +538,7 @@ type GetNotificationRequest struct {
|
|||||||
func (m *GetNotificationRequest) Reset() { *m = GetNotificationRequest{} }
|
func (m *GetNotificationRequest) Reset() { *m = GetNotificationRequest{} }
|
||||||
func (m *GetNotificationRequest) String() string { return proto.CompactTextString(m) }
|
func (m *GetNotificationRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*GetNotificationRequest) ProtoMessage() {}
|
func (*GetNotificationRequest) ProtoMessage() {}
|
||||||
func (*GetNotificationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
|
func (*GetNotificationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
|
||||||
|
|
||||||
func (m *GetNotificationRequest) GetOldVulnerabilityPage() string {
|
func (m *GetNotificationRequest) GetOldVulnerabilityPage() string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@ -495,7 +576,7 @@ type GetNotificationResponse struct {
|
|||||||
func (m *GetNotificationResponse) Reset() { *m = GetNotificationResponse{} }
|
func (m *GetNotificationResponse) Reset() { *m = GetNotificationResponse{} }
|
||||||
func (m *GetNotificationResponse) String() string { return proto.CompactTextString(m) }
|
func (m *GetNotificationResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*GetNotificationResponse) ProtoMessage() {}
|
func (*GetNotificationResponse) ProtoMessage() {}
|
||||||
func (*GetNotificationResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
|
func (*GetNotificationResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
|
||||||
|
|
||||||
func (m *GetNotificationResponse) GetNotification() *GetNotificationResponse_Notification {
|
func (m *GetNotificationResponse) GetNotification() *GetNotificationResponse_Notification {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@ -523,7 +604,7 @@ func (m *GetNotificationResponse_Notification) Reset() { *m = GetNotific
|
|||||||
func (m *GetNotificationResponse_Notification) String() string { return proto.CompactTextString(m) }
|
func (m *GetNotificationResponse_Notification) String() string { return proto.CompactTextString(m) }
|
||||||
func (*GetNotificationResponse_Notification) ProtoMessage() {}
|
func (*GetNotificationResponse_Notification) ProtoMessage() {}
|
||||||
func (*GetNotificationResponse_Notification) Descriptor() ([]byte, []int) {
|
func (*GetNotificationResponse_Notification) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor0, []int{9, 0}
|
return fileDescriptor0, []int{11, 0}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *GetNotificationResponse_Notification) GetName() string {
|
func (m *GetNotificationResponse_Notification) GetName() string {
|
||||||
@ -585,7 +666,7 @@ type PagedVulnerableAncestries struct {
|
|||||||
func (m *PagedVulnerableAncestries) Reset() { *m = PagedVulnerableAncestries{} }
|
func (m *PagedVulnerableAncestries) Reset() { *m = PagedVulnerableAncestries{} }
|
||||||
func (m *PagedVulnerableAncestries) String() string { return proto.CompactTextString(m) }
|
func (m *PagedVulnerableAncestries) String() string { return proto.CompactTextString(m) }
|
||||||
func (*PagedVulnerableAncestries) ProtoMessage() {}
|
func (*PagedVulnerableAncestries) ProtoMessage() {}
|
||||||
func (*PagedVulnerableAncestries) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
|
func (*PagedVulnerableAncestries) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
|
||||||
|
|
||||||
func (m *PagedVulnerableAncestries) GetCurrentPage() string {
|
func (m *PagedVulnerableAncestries) GetCurrentPage() string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@ -638,7 +719,7 @@ func (m *PagedVulnerableAncestries_IndexedAncestryName) String() string {
|
|||||||
}
|
}
|
||||||
func (*PagedVulnerableAncestries_IndexedAncestryName) ProtoMessage() {}
|
func (*PagedVulnerableAncestries_IndexedAncestryName) ProtoMessage() {}
|
||||||
func (*PagedVulnerableAncestries_IndexedAncestryName) Descriptor() ([]byte, []int) {
|
func (*PagedVulnerableAncestries_IndexedAncestryName) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor0, []int{10, 0}
|
return fileDescriptor0, []int{12, 0}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *PagedVulnerableAncestries_IndexedAncestryName) GetIndex() int32 {
|
func (m *PagedVulnerableAncestries_IndexedAncestryName) GetIndex() int32 {
|
||||||
@ -663,7 +744,7 @@ type MarkNotificationAsReadRequest struct {
|
|||||||
func (m *MarkNotificationAsReadRequest) Reset() { *m = MarkNotificationAsReadRequest{} }
|
func (m *MarkNotificationAsReadRequest) Reset() { *m = MarkNotificationAsReadRequest{} }
|
||||||
func (m *MarkNotificationAsReadRequest) String() string { return proto.CompactTextString(m) }
|
func (m *MarkNotificationAsReadRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*MarkNotificationAsReadRequest) ProtoMessage() {}
|
func (*MarkNotificationAsReadRequest) ProtoMessage() {}
|
||||||
func (*MarkNotificationAsReadRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
|
func (*MarkNotificationAsReadRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
|
||||||
|
|
||||||
func (m *MarkNotificationAsReadRequest) GetName() string {
|
func (m *MarkNotificationAsReadRequest) GetName() string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@ -678,7 +759,7 @@ type MarkNotificationAsReadResponse struct {
|
|||||||
func (m *MarkNotificationAsReadResponse) Reset() { *m = MarkNotificationAsReadResponse{} }
|
func (m *MarkNotificationAsReadResponse) Reset() { *m = MarkNotificationAsReadResponse{} }
|
||||||
func (m *MarkNotificationAsReadResponse) String() string { return proto.CompactTextString(m) }
|
func (m *MarkNotificationAsReadResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*MarkNotificationAsReadResponse) ProtoMessage() {}
|
func (*MarkNotificationAsReadResponse) ProtoMessage() {}
|
||||||
func (*MarkNotificationAsReadResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
|
func (*MarkNotificationAsReadResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
|
||||||
|
|
||||||
type GetStatusRequest struct {
|
type GetStatusRequest struct {
|
||||||
}
|
}
|
||||||
@ -686,7 +767,7 @@ type GetStatusRequest struct {
|
|||||||
func (m *GetStatusRequest) Reset() { *m = GetStatusRequest{} }
|
func (m *GetStatusRequest) Reset() { *m = GetStatusRequest{} }
|
||||||
func (m *GetStatusRequest) String() string { return proto.CompactTextString(m) }
|
func (m *GetStatusRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*GetStatusRequest) ProtoMessage() {}
|
func (*GetStatusRequest) ProtoMessage() {}
|
||||||
func (*GetStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
|
func (*GetStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
|
||||||
|
|
||||||
type GetStatusResponse struct {
|
type GetStatusResponse struct {
|
||||||
// The status of the current Clair instance.
|
// The status of the current Clair instance.
|
||||||
@ -696,7 +777,7 @@ type GetStatusResponse struct {
|
|||||||
func (m *GetStatusResponse) Reset() { *m = GetStatusResponse{} }
|
func (m *GetStatusResponse) Reset() { *m = GetStatusResponse{} }
|
||||||
func (m *GetStatusResponse) String() string { return proto.CompactTextString(m) }
|
func (m *GetStatusResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*GetStatusResponse) ProtoMessage() {}
|
func (*GetStatusResponse) ProtoMessage() {}
|
||||||
func (*GetStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
|
func (*GetStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
|
||||||
|
|
||||||
func (m *GetStatusResponse) GetStatus() *ClairStatus {
|
func (m *GetStatusResponse) GetStatus() *ClairStatus {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@ -707,6 +788,8 @@ func (m *GetStatusResponse) GetStatus() *ClairStatus {
|
|||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterType((*Vulnerability)(nil), "coreos.clair.Vulnerability")
|
proto.RegisterType((*Vulnerability)(nil), "coreos.clair.Vulnerability")
|
||||||
|
proto.RegisterType((*Detector)(nil), "coreos.clair.Detector")
|
||||||
|
proto.RegisterType((*Namespace)(nil), "coreos.clair.Namespace")
|
||||||
proto.RegisterType((*Feature)(nil), "coreos.clair.Feature")
|
proto.RegisterType((*Feature)(nil), "coreos.clair.Feature")
|
||||||
proto.RegisterType((*Layer)(nil), "coreos.clair.Layer")
|
proto.RegisterType((*Layer)(nil), "coreos.clair.Layer")
|
||||||
proto.RegisterType((*ClairStatus)(nil), "coreos.clair.ClairStatus")
|
proto.RegisterType((*ClairStatus)(nil), "coreos.clair.ClairStatus")
|
||||||
@ -726,6 +809,7 @@ func init() {
|
|||||||
proto.RegisterType((*MarkNotificationAsReadResponse)(nil), "coreos.clair.MarkNotificationAsReadResponse")
|
proto.RegisterType((*MarkNotificationAsReadResponse)(nil), "coreos.clair.MarkNotificationAsReadResponse")
|
||||||
proto.RegisterType((*GetStatusRequest)(nil), "coreos.clair.GetStatusRequest")
|
proto.RegisterType((*GetStatusRequest)(nil), "coreos.clair.GetStatusRequest")
|
||||||
proto.RegisterType((*GetStatusResponse)(nil), "coreos.clair.GetStatusResponse")
|
proto.RegisterType((*GetStatusResponse)(nil), "coreos.clair.GetStatusResponse")
|
||||||
|
proto.RegisterEnum("coreos.clair.Detector_DType", Detector_DType_name, Detector_DType_value)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
@ -1007,83 +1091,89 @@ var _StatusService_serviceDesc = grpc.ServiceDesc{
|
|||||||
func init() { proto.RegisterFile("api/v3/clairpb/clair.proto", fileDescriptor0) }
|
func init() { proto.RegisterFile("api/v3/clairpb/clair.proto", fileDescriptor0) }
|
||||||
|
|
||||||
var fileDescriptor0 = []byte{
|
var fileDescriptor0 = []byte{
|
||||||
// 1237 bytes of a gzipped FileDescriptorProto
|
// 1336 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0x4b, 0x6f, 0x1b, 0xd5,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x57, 0x4b, 0x6f, 0x1b, 0x55,
|
||||||
0x17, 0xd7, 0xd8, 0x71, 0x1c, 0x1f, 0xdb, 0x49, 0x7a, 0x93, 0xa6, 0x93, 0x49, 0x1f, 0xc9, 0xfc,
|
0x14, 0x66, 0x9c, 0x3a, 0xb6, 0x8f, 0xed, 0xc4, 0xbd, 0x49, 0x13, 0x67, 0xd2, 0x47, 0x32, 0x50,
|
||||||
0xff, 0x55, 0x4b, 0x8b, 0x6c, 0xe1, 0xb2, 0x68, 0xcb, 0x02, 0xa5, 0x8f, 0x84, 0x4a, 0xa5, 0xaa,
|
0x51, 0x0a, 0xb2, 0x85, 0x5b, 0xa4, 0xb6, 0x2c, 0x90, 0x9b, 0x38, 0x21, 0x52, 0x1b, 0xa2, 0x49,
|
||||||
0xa6, 0xd0, 0x05, 0x08, 0x59, 0xd7, 0x33, 0xc7, 0xc9, 0x28, 0xe3, 0x19, 0x33, 0xf7, 0xda, 0x89,
|
0x1a, 0x09, 0x10, 0x32, 0x37, 0x9e, 0xe3, 0x64, 0x94, 0xf1, 0xcc, 0x30, 0x73, 0x9d, 0xd4, 0xaa,
|
||||||
0x55, 0x95, 0x05, 0x5b, 0x76, 0xb0, 0xe0, 0x33, 0xb0, 0xe1, 0x1b, 0xb0, 0x62, 0xcb, 0x02, 0xc1,
|
0xca, 0x82, 0x1d, 0x3b, 0x04, 0x0b, 0x56, 0xfc, 0x00, 0x36, 0x88, 0xff, 0xc0, 0x9e, 0x05, 0x6c,
|
||||||
0x16, 0x76, 0x2c, 0xf8, 0x02, 0xec, 0xd1, 0x7d, 0x4d, 0x66, 0x12, 0xe7, 0xd1, 0xb2, 0xf2, 0x9c,
|
0x61, 0xc7, 0x82, 0x3f, 0xc0, 0x1e, 0xdd, 0xc7, 0x4c, 0x66, 0x92, 0x49, 0xe2, 0x76, 0xe5, 0x7b,
|
||||||
0xf7, 0xeb, 0x77, 0xcf, 0x49, 0xc0, 0xa1, 0xc3, 0xb0, 0x3d, 0xbe, 0xd3, 0xf6, 0x23, 0x1a, 0xa6,
|
0xde, 0x8f, 0xfb, 0xdd, 0x73, 0xc6, 0xa0, 0x53, 0xdf, 0x6e, 0x1e, 0xdd, 0x6b, 0xf6, 0x1c, 0x6a,
|
||||||
0xc3, 0x9e, 0xfa, 0x6d, 0x0d, 0xd3, 0x84, 0x27, 0xa4, 0xe1, 0x27, 0x29, 0x26, 0xac, 0x25, 0x79,
|
0x07, 0xfe, 0x9e, 0xfc, 0x6d, 0xf8, 0x81, 0xc7, 0x3c, 0x52, 0xe9, 0x79, 0x01, 0x7a, 0x61, 0x43,
|
||||||
0xce, 0xb5, 0x9d, 0x24, 0xd9, 0x89, 0xb0, 0x2d, 0x65, 0xbd, 0x51, 0xbf, 0xcd, 0xc3, 0x01, 0x32,
|
0xf0, 0xf4, 0x5b, 0xfb, 0x9e, 0xb7, 0xef, 0x60, 0x53, 0xc8, 0xf6, 0x86, 0xfd, 0x26, 0xb3, 0x07,
|
||||||
0x4e, 0x07, 0x43, 0xa5, 0xee, 0x5c, 0xd6, 0x0a, 0xc2, 0x23, 0x8d, 0xe3, 0x84, 0x53, 0x1e, 0x26,
|
0x18, 0x32, 0x3a, 0xf0, 0xa5, 0xba, 0x7e, 0x5d, 0x29, 0x70, 0x8f, 0xd4, 0x75, 0x3d, 0x46, 0x99,
|
||||||
0x31, 0x53, 0x52, 0xf7, 0xfb, 0x12, 0x34, 0x5f, 0x8e, 0xa2, 0x18, 0x53, 0xda, 0x0b, 0xa3, 0x90,
|
0xed, 0xb9, 0xa1, 0x94, 0x1a, 0x3f, 0xe6, 0xa0, 0xba, 0x3b, 0x74, 0x5c, 0x0c, 0xe8, 0x9e, 0xed,
|
||||||
0x4f, 0x08, 0x81, 0x99, 0x98, 0x0e, 0xd0, 0xb6, 0xd6, 0xad, 0x9b, 0x35, 0x4f, 0x7e, 0x93, 0xeb,
|
0xd8, 0x6c, 0x44, 0x08, 0x5c, 0x71, 0xe9, 0x00, 0xeb, 0xda, 0x92, 0x76, 0xa7, 0x64, 0x8a, 0x33,
|
||||||
0x30, 0x2f, 0x7e, 0xd9, 0x90, 0xfa, 0xd8, 0x95, 0xd2, 0x92, 0x94, 0x36, 0x33, 0xee, 0x33, 0xa1,
|
0xb9, 0x0d, 0x53, 0xfc, 0x37, 0xf4, 0x69, 0x0f, 0xbb, 0x42, 0x9a, 0x13, 0xd2, 0x6a, 0xcc, 0xdd,
|
||||||
0xb6, 0x0e, 0xf5, 0x00, 0x99, 0x9f, 0x86, 0x43, 0x11, 0xc2, 0x2e, 0x4b, 0x9d, 0x3c, 0x4b, 0x38,
|
0xe4, 0x6a, 0x4b, 0x50, 0xb6, 0x30, 0xec, 0x05, 0xb6, 0xcf, 0x43, 0xd4, 0x27, 0x84, 0x4e, 0x92,
|
||||||
0x8f, 0xc2, 0x78, 0xcf, 0x9e, 0x51, 0xce, 0xc5, 0x37, 0x71, 0x60, 0x8e, 0xe1, 0x18, 0xd3, 0x90,
|
0xc5, 0x9d, 0x3b, 0xb6, 0x7b, 0x58, 0xbf, 0x22, 0x9d, 0xf3, 0x33, 0xd1, 0xa1, 0x18, 0xe2, 0x11,
|
||||||
0x4f, 0xec, 0x8a, 0xe4, 0x67, 0xb4, 0x90, 0x0d, 0x90, 0xd3, 0x80, 0x72, 0x6a, 0xcf, 0x2a, 0x99,
|
0x06, 0x36, 0x1b, 0xd5, 0xf3, 0x82, 0x1f, 0xd3, 0x5c, 0x36, 0x40, 0x46, 0x2d, 0xca, 0x68, 0x7d,
|
||||||
0xa1, 0xc9, 0x2a, 0xcc, 0xf5, 0xc3, 0x03, 0x0c, 0xba, 0xbd, 0x89, 0x5d, 0x95, 0xb2, 0xaa, 0xa4,
|
0x52, 0xca, 0x22, 0x9a, 0x2c, 0x40, 0xb1, 0x6f, 0x3f, 0x47, 0xab, 0xbb, 0x37, 0xaa, 0x17, 0x84,
|
||||||
0x1f, 0x4c, 0xc8, 0x03, 0xb8, 0x40, 0xfb, 0x7d, 0xf4, 0x39, 0x06, 0xdd, 0x31, 0xa6, 0x4c, 0x14,
|
0xac, 0x20, 0xe8, 0xc7, 0x23, 0xf2, 0x18, 0xae, 0xd2, 0x7e, 0x1f, 0x7b, 0x0c, 0xad, 0xee, 0x11,
|
||||||
0x6c, 0xcf, 0xad, 0x97, 0x6f, 0xd6, 0x3b, 0x17, 0x5b, 0xf9, 0xf6, 0xb5, 0xb6, 0x90, 0xf2, 0x51,
|
0x06, 0x21, 0x2f, 0xb8, 0x5e, 0x5c, 0x9a, 0xb8, 0x53, 0x6e, 0x5d, 0x6b, 0x24, 0xdb, 0xd7, 0x58,
|
||||||
0x8a, 0xde, 0xa2, 0xd1, 0x7f, 0xa9, 0xd5, 0xdd, 0x5f, 0x2c, 0xa8, 0x6a, 0xe9, 0x7f, 0xe9, 0x89,
|
0x43, 0xca, 0x86, 0x01, 0x9a, 0xb5, 0x48, 0x7f, 0x57, 0xa9, 0x1b, 0xbf, 0x6b, 0x50, 0x5c, 0x45,
|
||||||
0x0d, 0x55, 0x9d, 0x81, 0xee, 0x87, 0x21, 0x85, 0x03, 0xfd, 0xd9, 0xed, 0x27, 0xe9, 0x80, 0x72,
|
0x86, 0x3d, 0xe6, 0x05, 0x99, 0x4d, 0xa9, 0x43, 0x41, 0xf9, 0x56, 0xdd, 0x88, 0x48, 0xd2, 0x82,
|
||||||
0xdd, 0x95, 0xa6, 0xe6, 0x6e, 0x49, 0x26, 0x79, 0x0c, 0x0b, 0xe3, 0xdc, 0x80, 0x42, 0x64, 0x76,
|
0xbc, 0xc5, 0x46, 0x3e, 0x8a, 0x0e, 0x4c, 0xb5, 0xae, 0xa7, 0x43, 0x46, 0x4e, 0x1b, 0xab, 0x3b,
|
||||||
0x45, 0x56, 0xb2, 0x56, 0xac, 0xa4, 0x30, 0x45, 0xef, 0xa8, 0x8d, 0xbb, 0x06, 0x95, 0xa7, 0x74,
|
0x23, 0x1f, 0x4d, 0xa9, 0x6a, 0x7c, 0x09, 0x79, 0x41, 0x93, 0x45, 0x98, 0x5f, 0xed, 0xec, 0x74,
|
||||||
0x82, 0xa9, 0xa8, 0x65, 0x97, 0xb2, 0x5d, 0x53, 0x8b, 0xf8, 0x76, 0xbf, 0xb1, 0xa0, 0xfe, 0x50,
|
0x56, 0x76, 0x3e, 0x31, 0xbb, 0xab, 0xdd, 0x9d, 0x4f, 0xb7, 0x3a, 0xdd, 0x8d, 0xcd, 0xdd, 0xf6,
|
||||||
0x78, 0x79, 0xc1, 0x29, 0x1f, 0x31, 0x91, 0x74, 0x14, 0x32, 0x8e, 0x29, 0xb3, 0xad, 0xf5, 0xb2,
|
0x93, 0x8d, 0xd5, 0xda, 0x1b, 0xe4, 0x06, 0x2c, 0x9c, 0x16, 0x6e, 0xb6, 0x9f, 0x76, 0xb6, 0xb7,
|
||||||
0x48, 0x5a, 0x93, 0xe4, 0x32, 0xd4, 0x02, 0xe4, 0xe8, 0xf3, 0x24, 0x65, 0x76, 0x49, 0xca, 0x0e,
|
0xda, 0x2b, 0x9d, 0x9a, 0x96, 0x65, 0xbb, 0xd6, 0x69, 0xef, 0x3c, 0x33, 0x3b, 0xb5, 0x9c, 0xb1,
|
||||||
0x19, 0xe4, 0x11, 0x2c, 0x46, 0x94, 0xf1, 0xee, 0x68, 0x18, 0x50, 0x8e, 0x5d, 0x01, 0x45, 0x59,
|
0x0d, 0xa5, 0xcd, 0xe8, 0xba, 0x32, 0x0b, 0x6a, 0x41, 0xd1, 0x52, 0xb9, 0x89, 0x8a, 0xca, 0xad,
|
||||||
0x75, 0xbd, 0xe3, 0xb4, 0x14, 0x0c, 0x5b, 0x06, 0xa7, 0xad, 0x4f, 0x0c, 0x4e, 0xbd, 0x79, 0x61,
|
0xb9, 0xec, 0xcc, 0xcd, 0x58, 0xcf, 0xf8, 0x2e, 0x07, 0x05, 0xd5, 0xc3, 0x4c, 0x9f, 0x1f, 0x40,
|
||||||
0xf3, 0xa9, 0x34, 0x11, 0x4c, 0xf7, 0x1e, 0x90, 0x6d, 0xe4, 0x9b, 0xb1, 0x8f, 0x8c, 0xa7, 0x13,
|
0x29, 0xc6, 0x88, 0x72, 0x3a, 0x9f, 0x76, 0x1a, 0xe7, 0x64, 0x9e, 0x68, 0x26, 0x7b, 0x3b, 0x91,
|
||||||
0x0f, 0xbf, 0x1c, 0x21, 0xe3, 0xe4, 0x7f, 0xd0, 0xa4, 0x9a, 0xd5, 0xcd, 0x0d, 0xa3, 0x61, 0x98,
|
0xee, 0xed, 0x6d, 0x98, 0x52, 0xc7, 0x6e, 0xdf, 0x0b, 0x06, 0x94, 0x29, 0x2c, 0x55, 0x15, 0x77,
|
||||||
0xa2, 0xdb, 0xee, 0xaf, 0x65, 0x58, 0x2a, 0xd8, 0xb2, 0x61, 0x12, 0x33, 0x24, 0x5b, 0x30, 0x67,
|
0x4d, 0x30, 0x53, 0xb5, 0xe4, 0xc7, 0xab, 0x85, 0x74, 0x60, 0xfa, 0x28, 0xf1, 0x14, 0x6c, 0x0c,
|
||||||
0xf4, 0xa4, 0x5d, 0xbd, 0x73, 0xab, 0xd8, 0xbd, 0x29, 0x46, 0xad, 0x8c, 0x91, 0xd9, 0x92, 0xf7,
|
0xeb, 0x93, 0x02, 0x33, 0x8b, 0x69, 0xd3, 0xd4, 0x7b, 0x31, 0x4f, 0xdb, 0x18, 0x8b, 0x90, 0x7f,
|
||||||
0x60, 0x96, 0xc9, 0x16, 0xc9, 0x61, 0xd7, 0x3b, 0xab, 0x45, 0x2f, 0xb9, 0x1e, 0x7a, 0x5a, 0xd1,
|
0x42, 0x47, 0x28, 0x40, 0x73, 0x40, 0xc3, 0x83, 0xa8, 0x1f, 0xfc, 0x6c, 0x7c, 0xab, 0x41, 0x79,
|
||||||
0xf9, 0x0a, 0x9a, 0xc6, 0x91, 0x1a, 0xc0, 0x3b, 0x50, 0x89, 0xc4, 0x87, 0x4e, 0x64, 0xa9, 0xe8,
|
0x85, 0x7b, 0xd9, 0x66, 0x94, 0x0d, 0x43, 0x72, 0x1f, 0x4a, 0x51, 0xfc, 0xb0, 0xae, 0x89, 0x68,
|
||||||
0x42, 0xea, 0x78, 0x4a, 0x43, 0xe0, 0x58, 0x35, 0x17, 0x83, 0x6e, 0x5f, 0x61, 0x51, 0x75, 0xfd,
|
0xe7, 0x25, 0x7a, 0xa2, 0x48, 0x56, 0xa1, 0xe6, 0xd0, 0x90, 0x75, 0x87, 0xbe, 0x45, 0x19, 0x76,
|
||||||
0x64, 0x1c, 0x1b, 0x7d, 0xcd, 0x60, 0xce, 0x4f, 0x16, 0xcc, 0x99, 0x04, 0xa6, 0x02, 0xf9, 0x06,
|
0xf9, 0x93, 0x57, 0xcd, 0xd5, 0x1b, 0xf2, 0xb9, 0x37, 0xa2, 0x79, 0xd0, 0xd8, 0x89, 0xe6, 0x81,
|
||||||
0x2c, 0x30, 0x9f, 0xc6, 0x31, 0x06, 0x5d, 0x33, 0xf4, 0x19, 0x39, 0xd8, 0x79, 0xcd, 0x7e, 0xaa,
|
0x39, 0xc5, 0x6d, 0x9e, 0x09, 0x13, 0xce, 0x34, 0x1e, 0x02, 0x59, 0x47, 0xd6, 0x76, 0x7b, 0x18,
|
||||||
0x67, 0x7f, 0x1b, 0x2e, 0x18, 0xc5, 0x43, 0x0c, 0x54, 0xa4, 0xea, 0xa2, 0x16, 0x3c, 0xca, 0xa0,
|
0xb2, 0x60, 0x64, 0xe2, 0x57, 0x43, 0x0c, 0x19, 0x79, 0x13, 0xaa, 0x54, 0xb1, 0xba, 0x89, 0xeb,
|
||||||
0xb0, 0x0d, 0xb3, 0xb2, 0x06, 0x66, 0xcf, 0xca, 0x7c, 0xdb, 0xe7, 0xef, 0xb7, 0x6a, 0x81, 0x36,
|
0xac, 0x44, 0x4c, 0x7e, 0x5f, 0xc6, 0xaf, 0x13, 0x30, 0x93, 0xb2, 0x0d, 0x7d, 0xcf, 0x0d, 0x91,
|
||||||
0x77, 0xff, 0x2c, 0xc1, 0xd2, 0xf3, 0x84, 0xbd, 0x15, 0x1e, 0xc8, 0x0a, 0xcc, 0xea, 0xb7, 0xa5,
|
0xac, 0x41, 0x31, 0xd2, 0x13, 0x76, 0xe5, 0xd6, 0xdd, 0x74, 0x35, 0x19, 0x46, 0x8d, 0x98, 0x11,
|
||||||
0x1e, 0xa7, 0xa6, 0xc8, 0xc3, 0x2c, 0xbb, 0xb2, 0xcc, 0xee, 0x76, 0x31, 0xbb, 0x29, 0xf1, 0x24,
|
0xdb, 0x92, 0xf7, 0x61, 0x32, 0x14, 0x0d, 0x52, 0x65, 0x2d, 0xa4, 0xbd, 0x24, 0x3a, 0x68, 0x2a,
|
||||||
0xaf, 0x90, 0x99, 0xf3, 0xb3, 0x05, 0xb5, 0x8c, 0x3b, 0xed, 0x5d, 0x09, 0xde, 0x90, 0xf2, 0x5d,
|
0x45, 0xfd, 0x6b, 0xa8, 0x46, 0x8e, 0x64, 0xfb, 0xdf, 0x81, 0xbc, 0xc3, 0x0f, 0x2a, 0x91, 0x99,
|
||||||
0x1d, 0x5c, 0x7e, 0x13, 0x0f, 0xaa, 0xbb, 0x48, 0x83, 0xc3, 0xd8, 0x77, 0xdf, 0x20, 0x76, 0xeb,
|
0xb4, 0x0b, 0xa1, 0x63, 0x4a, 0x0d, 0x3e, 0x2f, 0x64, 0x73, 0xd1, 0xea, 0xf6, 0x25, 0x9a, 0x79,
|
||||||
0x23, 0x65, 0xfa, 0x38, 0x16, 0x52, 0xe3, 0xc8, 0xb9, 0x0f, 0x8d, 0xbc, 0x80, 0x2c, 0x42, 0x79,
|
0xe4, 0x8b, 0xe6, 0x45, 0xa4, 0xaf, 0x18, 0xa1, 0xfe, 0x93, 0x06, 0xc5, 0x28, 0x81, 0xcc, 0xa7,
|
||||||
0x0f, 0x27, 0x3a, 0x15, 0xf1, 0x49, 0x96, 0xa1, 0x32, 0xa6, 0xd1, 0xc8, 0x2c, 0x29, 0x45, 0xdc,
|
0x90, 0xba, 0xea, 0xdc, 0xb8, 0x57, 0xbd, 0x0e, 0x93, 0x22, 0xc7, 0xb0, 0x3e, 0x21, 0x4c, 0x9a,
|
||||||
0x2f, 0xdd, 0xb5, 0xdc, 0x27, 0xb0, 0x5c, 0x0c, 0xa9, 0x9f, 0xcc, 0x21, 0xd4, 0xad, 0x73, 0x42,
|
0xe3, 0xf7, 0x53, 0x96, 0xa8, 0xcc, 0x8d, 0xbf, 0x73, 0x30, 0xb3, 0xe5, 0x85, 0xaf, 0x75, 0xdf,
|
||||||
0xdd, 0xfd, 0xd1, 0x82, 0x95, 0x6d, 0xe4, 0xcf, 0x12, 0x1e, 0xf6, 0x43, 0x5f, 0xde, 0x19, 0x33,
|
0x64, 0x0e, 0x26, 0xd5, 0x6b, 0x93, 0xa3, 0x4e, 0x51, 0x64, 0xe5, 0x54, 0x76, 0xef, 0xa6, 0xb3,
|
||||||
0xad, 0xf7, 0x61, 0x25, 0x89, 0x82, 0x6e, 0x7e, 0x2b, 0x4d, 0xba, 0x43, 0xba, 0x63, 0xc6, 0xb6,
|
0xcb, 0x88, 0x27, 0x78, 0xa9, 0xcc, 0xf4, 0xdf, 0x34, 0x28, 0xc5, 0xdc, 0xac, 0x57, 0xc3, 0x79,
|
||||||
0x9c, 0x44, 0x41, 0x61, 0x83, 0x3d, 0xa7, 0x3b, 0x28, 0xac, 0x62, 0xdc, 0x9f, 0x66, 0xa5, 0xca,
|
0x3e, 0x65, 0x07, 0x2a, 0xb8, 0x38, 0x13, 0x13, 0x0a, 0x07, 0x48, 0xad, 0x93, 0xd8, 0x0f, 0x5e,
|
||||||
0x58, 0x8e, 0x71, 0xff, 0xb8, 0xd5, 0x32, 0x54, 0xa2, 0x70, 0x10, 0x72, 0xb9, 0x7a, 0x2a, 0x9e,
|
0x21, 0x76, 0xe3, 0x63, 0x69, 0xda, 0x71, 0xb9, 0x34, 0x72, 0xa4, 0x3f, 0x82, 0x4a, 0x52, 0x40,
|
||||||
0x22, 0x32, 0xe8, 0xcf, 0x1c, 0x42, 0xdf, 0xfd, 0xa3, 0x04, 0x97, 0x8e, 0x25, 0xac, 0xeb, 0x7f,
|
0x6a, 0x30, 0x71, 0x88, 0x23, 0x95, 0x0a, 0x3f, 0x92, 0x59, 0xc8, 0x1f, 0x51, 0x67, 0x18, 0x2d,
|
||||||
0x09, 0x8d, 0x38, 0xc7, 0xd7, 0x5d, 0xe8, 0x1c, 0x83, 0xf1, 0x34, 0xe3, 0x56, 0x81, 0x59, 0xf0,
|
0x40, 0x49, 0x3c, 0xca, 0x3d, 0xd0, 0x8c, 0x0d, 0x98, 0x4d, 0x87, 0x54, 0x4f, 0xe2, 0x04, 0xca,
|
||||||
0xe3, 0xfc, 0x6d, 0x41, 0x23, 0x2f, 0x9e, 0xfa, 0x26, 0x6d, 0xa8, 0xfa, 0x29, 0x52, 0x8e, 0x81,
|
0xda, 0x98, 0x50, 0x36, 0x7e, 0xd1, 0x60, 0x6e, 0x1d, 0xd9, 0xa6, 0xc7, 0xec, 0xbe, 0xdd, 0x13,
|
||||||
0xae, 0xd4, 0x90, 0xe2, 0x22, 0x2a, 0x77, 0x18, 0xe8, 0x83, 0x92, 0xd1, 0xc2, 0x2a, 0xc0, 0x08,
|
0xfb, 0x3a, 0xba, 0xad, 0xfb, 0x30, 0xe7, 0x39, 0x56, 0x37, 0x39, 0x73, 0x46, 0x5d, 0x9f, 0xee,
|
||||||
0x85, 0x95, 0xaa, 0xd2, 0x90, 0xe4, 0x1e, 0x94, 0x93, 0x28, 0x90, 0xe7, 0xb5, 0xde, 0xb9, 0x71,
|
0x47, 0xd7, 0x36, 0xeb, 0x39, 0x56, 0x6a, 0x3e, 0x6d, 0xd1, 0x7d, 0x0e, 0xbd, 0x39, 0x17, 0x8f,
|
||||||
0x04, 0x70, 0x74, 0x07, 0xb3, 0xde, 0x47, 0xa8, 0x81, 0x10, 0x22, 0xf3, 0x84, 0x8d, 0x30, 0x8d,
|
0xb3, 0xac, 0x64, 0x19, 0xb3, 0x2e, 0x1e, 0x9f, 0xb5, 0x9a, 0x85, 0xbc, 0x63, 0x0f, 0x6c, 0x26,
|
||||||
0x71, 0x5f, 0x5e, 0xdf, 0x37, 0x31, 0x8d, 0x71, 0xdf, 0xfd, 0xad, 0x04, 0xab, 0x27, 0xaa, 0x90,
|
0x46, 0x70, 0xde, 0x94, 0x44, 0x0c, 0xed, 0x2b, 0x27, 0xd0, 0x36, 0xfe, 0xca, 0xc1, 0xfc, 0x99,
|
||||||
0x0d, 0x68, 0xf8, 0xa3, 0x34, 0xc5, 0x98, 0xe7, 0x81, 0x50, 0xd7, 0x3c, 0x39, 0xc9, 0x35, 0xa8,
|
0x84, 0x55, 0xfd, 0xbb, 0x50, 0x71, 0x13, 0x7c, 0xd5, 0x85, 0xd6, 0x19, 0x18, 0x67, 0x19, 0x37,
|
||||||
0xc5, 0x78, 0xc0, 0xf3, 0x23, 0x9f, 0x13, 0x8c, 0x53, 0xc6, 0xbc, 0x09, 0xcd, 0x02, 0x5c, 0x64,
|
0x52, 0xcc, 0x94, 0x1f, 0xfd, 0x5f, 0x0d, 0x2a, 0x49, 0xf1, 0x79, 0x3b, 0xba, 0x17, 0x20, 0x65,
|
||||||
0x27, 0xce, 0x38, 0x96, 0x45, 0x0b, 0xf2, 0x39, 0x00, 0xcd, 0xd2, 0xd4, 0xc7, 0xf6, 0x83, 0x73,
|
0x68, 0x45, 0x3b, 0x5a, 0x91, 0xfc, 0xcb, 0x42, 0xba, 0x43, 0x4b, 0xad, 0x98, 0x98, 0xe6, 0x56,
|
||||||
0x16, 0xde, 0x7a, 0x12, 0x07, 0x78, 0x80, 0xc1, 0x66, 0x6e, 0x0b, 0x79, 0x39, 0x77, 0xce, 0x87,
|
0x16, 0x3a, 0xc8, 0xad, 0x64, 0x95, 0x11, 0x49, 0x1e, 0xc2, 0x84, 0xe7, 0x58, 0x6a, 0xa3, 0xbc,
|
||||||
0xb0, 0x34, 0x45, 0x45, 0x14, 0x13, 0x0a, 0xb6, 0xec, 0x42, 0xc5, 0x53, 0x44, 0x06, 0x8d, 0x52,
|
0x7d, 0x0a, 0x70, 0x74, 0x1f, 0xe3, 0xde, 0x3b, 0xa8, 0x80, 0x60, 0x63, 0x68, 0x72, 0x1b, 0x6e,
|
||||||
0x0e, 0xb3, 0x77, 0xe0, 0xca, 0xc7, 0x34, 0xdd, 0xcb, 0x43, 0x68, 0x93, 0x79, 0x48, 0x03, 0xf3,
|
0xea, 0xe2, 0xb1, 0xf8, 0x8a, 0x79, 0x15, 0x53, 0x17, 0x8f, 0x8d, 0x3f, 0x72, 0xb0, 0x70, 0xae,
|
||||||
0xd4, 0xa6, 0xe0, 0xc9, 0x5d, 0x87, 0xab, 0x27, 0x19, 0x29, 0xc4, 0xba, 0x04, 0x16, 0xb7, 0x91,
|
0x0a, 0x59, 0x86, 0x4a, 0x6f, 0x18, 0x04, 0xe8, 0xb2, 0x24, 0x10, 0xca, 0x8a, 0x27, 0x6e, 0x72,
|
||||||
0xeb, 0x07, 0xad, 0x3c, 0xb9, 0x5b, 0x70, 0x21, 0xc7, 0x7b, 0xeb, 0xbd, 0xd0, 0xf9, 0xc7, 0x82,
|
0x11, 0x4a, 0x2e, 0x3e, 0x67, 0xc9, 0x2b, 0x2f, 0x72, 0xc6, 0x05, 0xd7, 0xdc, 0x86, 0x6a, 0x0a,
|
||||||
0x05, 0x53, 0xed, 0x0b, 0x4c, 0xc7, 0xa1, 0x8f, 0x64, 0x04, 0xf5, 0xdc, 0x0d, 0x20, 0xeb, 0xa7,
|
0x2e, 0xa2, 0x13, 0x97, 0xac, 0xc2, 0xb4, 0x05, 0xf9, 0x1c, 0x80, 0xc6, 0x69, 0xd6, 0xf3, 0xe2,
|
||||||
0x9c, 0x07, 0x99, 0x8c, 0xb3, 0x71, 0xe6, 0x01, 0x71, 0x37, 0xbe, 0xfe, 0xfd, 0xaf, 0xef, 0x4a,
|
0x91, 0x7e, 0x38, 0x66, 0xe1, 0x8d, 0x0d, 0xd7, 0xc2, 0xe7, 0x68, 0xb5, 0x13, 0x53, 0xc8, 0x4c,
|
||||||
0x6b, 0x64, 0xb5, 0x6d, 0x8e, 0x40, 0xfb, 0x55, 0xe1, 0x46, 0xbc, 0x26, 0x7b, 0xd0, 0xc8, 0x6f,
|
0xb8, 0xd3, 0x3f, 0x82, 0x99, 0x0c, 0x15, 0x5e, 0x8c, 0xcd, 0xd9, 0xa2, 0x0b, 0x79, 0x53, 0x12,
|
||||||
0x3b, 0xb2, 0x71, 0xe6, 0xf2, 0x75, 0xdc, 0xd3, 0x54, 0x74, 0xe4, 0x65, 0x19, 0x79, 0xde, 0xad,
|
0x31, 0x34, 0x72, 0x09, 0xcc, 0xde, 0x83, 0x1b, 0x4f, 0x69, 0x70, 0x98, 0x84, 0x50, 0x3b, 0x34,
|
||||||
0x65, 0x91, 0xef, 0x5b, 0xb7, 0x3a, 0x3f, 0x94, 0x60, 0x29, 0xdf, 0x72, 0x53, 0xfb, 0x6b, 0x58,
|
0x91, 0x5a, 0xd1, 0x53, 0xcb, 0xc0, 0x93, 0xb1, 0x04, 0x37, 0xcf, 0x33, 0x92, 0x88, 0x35, 0x08,
|
||||||
0x38, 0xb2, 0x38, 0xc8, 0xff, 0xcf, 0xd8, 0x2b, 0x2a, 0x95, 0xeb, 0xe7, 0xda, 0x3e, 0xee, 0x15,
|
0xd4, 0xd6, 0x91, 0xa9, 0x07, 0x2d, 0x3d, 0x19, 0x6b, 0x70, 0x35, 0xc1, 0x7b, 0xed, 0xb9, 0xd0,
|
||||||
0x99, 0xcd, 0x25, 0x72, 0xb1, 0x9d, 0xdf, 0x3c, 0xac, 0xfd, 0x4a, 0xf5, 0xe0, 0x5b, 0x0b, 0x56,
|
0xfa, 0x4f, 0x83, 0xe9, 0xa8, 0xda, 0x6d, 0x0c, 0x8e, 0xec, 0x1e, 0x92, 0x21, 0x94, 0x13, 0x3b,
|
||||||
0xa6, 0xa3, 0x81, 0x1c, 0xb9, 0x83, 0xa7, 0x02, 0xcd, 0x79, 0xf7, 0x7c, 0xca, 0xc5, 0xa4, 0x6e,
|
0x80, 0x2c, 0x5d, 0xb0, 0x1e, 0x44, 0x32, 0xfa, 0xf2, 0xa5, 0x0b, 0xc4, 0x58, 0xfe, 0xe6, 0xcf,
|
||||||
0x4d, 0x4f, 0xaa, 0x13, 0x43, 0x53, 0xa1, 0xc6, 0x34, 0xe9, 0x0b, 0xa8, 0x65, 0xe0, 0x23, 0x57,
|
0x7f, 0x7e, 0xc8, 0x2d, 0x92, 0x85, 0x66, 0xb4, 0x04, 0x9a, 0x2f, 0x52, 0x3b, 0xe2, 0x25, 0x39,
|
||||||
0x8f, 0x15, 0x5e, 0x40, 0xaa, 0x73, 0xed, 0x44, 0xb9, 0x8e, 0xbe, 0x20, 0xa3, 0xd7, 0x48, 0xb5,
|
0x84, 0x4a, 0x72, 0xda, 0x91, 0xe5, 0x4b, 0x87, 0xaf, 0x6e, 0x5c, 0xa4, 0xa2, 0x22, 0xcf, 0x8a,
|
||||||
0xad, 0x30, 0xf9, 0xe0, 0x2a, 0x2c, 0xf9, 0xc9, 0xa0, 0x68, 0x36, 0xec, 0x7d, 0x56, 0xd5, 0xff,
|
0xc8, 0x53, 0x46, 0x29, 0x8e, 0xfc, 0x48, 0xbb, 0xdb, 0xfa, 0x39, 0x07, 0x33, 0xc9, 0x96, 0x47,
|
||||||
0x71, 0xf5, 0x66, 0xe5, 0x1f, 0xaa, 0x77, 0xfe, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x7b, 0xd2, 0x74,
|
0xb5, 0xbf, 0x84, 0xe9, 0x53, 0x83, 0x83, 0xbc, 0x75, 0xc9, 0x5c, 0x91, 0xa9, 0xdc, 0x1e, 0x6b,
|
||||||
0xfa, 0x8a, 0x0d, 0x00, 0x00,
|
0xfa, 0x18, 0x37, 0x44, 0x36, 0xf3, 0xe4, 0x5a, 0x33, 0x39, 0x79, 0xc2, 0xe6, 0x0b, 0xd9, 0x83,
|
||||||
|
0xef, 0x35, 0x98, 0xcb, 0x46, 0x03, 0x39, 0xb5, 0x07, 0x2f, 0x04, 0x9a, 0xfe, 0xde, 0x78, 0xca,
|
||||||
|
0xe9, 0xa4, 0xee, 0x66, 0x27, 0xd5, 0x72, 0xa1, 0x2a, 0x51, 0x13, 0x35, 0xe9, 0x0b, 0x28, 0xc5,
|
||||||
|
0xe0, 0x23, 0x37, 0xcf, 0x14, 0x9e, 0x42, 0xaa, 0x7e, 0xeb, 0x5c, 0xb9, 0x8a, 0x3e, 0x2d, 0xa2,
|
||||||
|
0x97, 0x48, 0xa1, 0x29, 0x31, 0xf9, 0xf8, 0x26, 0xcc, 0xf4, 0xbc, 0x41, 0xda, 0xcc, 0xdf, 0xfb,
|
||||||
|
0xac, 0xa0, 0xfe, 0xb9, 0xee, 0x4d, 0x8a, 0x0f, 0xd1, 0x7b, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff,
|
||||||
|
0xcb, 0x5c, 0xce, 0x34, 0xd2, 0x0e, 0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
@ -44,17 +44,42 @@ message Vulnerability {
|
|||||||
repeated Feature affected_versions = 8;
|
repeated Feature affected_versions = 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message Detector {
|
||||||
|
enum DType {
|
||||||
|
DETECTOR_D_TYPE_INVALID = 0;
|
||||||
|
DETECTOR_D_TYPE_NAMESPACE = 1;
|
||||||
|
DETECTOR_D_TYPE_FEATURE = 2;
|
||||||
|
}
|
||||||
|
// The name of the detector.
|
||||||
|
string name = 1;
|
||||||
|
// The version of the detector.
|
||||||
|
string version = 2;
|
||||||
|
// The type of the detector.
|
||||||
|
DType dtype = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Namespace {
|
||||||
|
// The name of the namespace.
|
||||||
|
string name = 1;
|
||||||
|
// The detector used to detect the namespace. This only exists when present in
|
||||||
|
// an Ancestry Feature.
|
||||||
|
Detector detector = 2;
|
||||||
|
}
|
||||||
|
|
||||||
message Feature {
|
message Feature {
|
||||||
// The name of the feature.
|
// The name of the feature.
|
||||||
string name = 1;
|
string name = 1;
|
||||||
// The name of the namespace in which the feature is detected.
|
// The namespace in which the feature is detected.
|
||||||
string namespace_name = 2;
|
Namespace namespace = 2;
|
||||||
// The specific version of this feature.
|
// The specific version of this feature.
|
||||||
string version = 3;
|
string version = 3;
|
||||||
// The format used to parse version numbers for the feature.
|
// The format used to parse version numbers for the feature.
|
||||||
string version_format = 4;
|
string version_format = 4;
|
||||||
|
// The detector used to detect this feature. This only exists when present in
|
||||||
|
// an Ancestry.
|
||||||
|
Detector detector = 5;
|
||||||
// The list of vulnerabilities that affect the feature.
|
// The list of vulnerabilities that affect the feature.
|
||||||
repeated Vulnerability vulnerabilities = 5;
|
repeated Vulnerability vulnerabilities = 6;
|
||||||
}
|
}
|
||||||
|
|
||||||
message Layer {
|
message Layer {
|
||||||
@ -77,12 +102,10 @@ service AncestryService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
message ClairStatus {
|
message ClairStatus {
|
||||||
// The configured list of feature listers used to scan an ancestry.
|
// The implemented detectors in this Clair instance
|
||||||
repeated string listers = 1;
|
repeated Detector detectors = 1;
|
||||||
// The configured list of namespace detectors used to scan an ancestry.
|
|
||||||
repeated string detectors = 2;
|
|
||||||
// The time at which the updater last ran.
|
// The time at which the updater last ran.
|
||||||
google.protobuf.Timestamp last_update_time = 3;
|
google.protobuf.Timestamp last_update_time = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GetAncestryRequest {
|
message GetAncestryRequest {
|
||||||
@ -100,12 +123,11 @@ message GetAncestryResponse {
|
|||||||
message Ancestry {
|
message Ancestry {
|
||||||
// The name of the desired ancestry.
|
// The name of the desired ancestry.
|
||||||
string name = 1;
|
string name = 1;
|
||||||
// The configured list of feature listers used to scan this ancestry.
|
// The detectors used to scan this Ancestry. It may not be the current set
|
||||||
repeated string scanned_listers = 4;
|
// of detectors in clair status.
|
||||||
// The configured list of namespace detectors used to scan an ancestry.
|
repeated Detector detectors = 2;
|
||||||
repeated string scanned_detectors = 5;
|
|
||||||
// The list of layers along with detected features in each.
|
// The list of layers along with detected features in each.
|
||||||
repeated AncestryLayer layers = 6;
|
repeated AncestryLayer layers = 3;
|
||||||
}
|
}
|
||||||
// The ancestry requested.
|
// The ancestry requested.
|
||||||
Ancestry ancestry = 1;
|
Ancestry ancestry = 1;
|
||||||
|
@ -156,6 +156,15 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"definitions": {
|
"definitions": {
|
||||||
|
"DetectorDType": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": [
|
||||||
|
"DETECTOR_D_TYPE_INVALID",
|
||||||
|
"DETECTOR_D_TYPE_NAMESPACE",
|
||||||
|
"DETECTOR_D_TYPE_FEATURE"
|
||||||
|
],
|
||||||
|
"default": "DETECTOR_D_TYPE_INVALID"
|
||||||
|
},
|
||||||
"GetAncestryResponseAncestry": {
|
"GetAncestryResponseAncestry": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
@ -163,19 +172,12 @@
|
|||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "The name of the desired ancestry."
|
"description": "The name of the desired ancestry."
|
||||||
},
|
},
|
||||||
"scanned_listers": {
|
"detectors": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"items": {
|
"items": {
|
||||||
"type": "string"
|
"$ref": "#/definitions/clairDetector"
|
||||||
},
|
},
|
||||||
"description": "The configured list of feature listers used to scan this ancestry."
|
"description": "The detectors used to scan this Ancestry. It may not be the current set\nof detectors in clair status."
|
||||||
},
|
|
||||||
"scanned_detectors": {
|
|
||||||
"type": "array",
|
|
||||||
"items": {
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
"description": "The configured list of namespace detectors used to scan an ancestry."
|
|
||||||
},
|
},
|
||||||
"layers": {
|
"layers": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
@ -268,19 +270,12 @@
|
|||||||
"clairClairStatus": {
|
"clairClairStatus": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"listers": {
|
|
||||||
"type": "array",
|
|
||||||
"items": {
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
"description": "The configured list of feature listers used to scan an ancestry."
|
|
||||||
},
|
|
||||||
"detectors": {
|
"detectors": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"items": {
|
"items": {
|
||||||
"type": "string"
|
"$ref": "#/definitions/clairDetector"
|
||||||
},
|
},
|
||||||
"description": "The configured list of namespace detectors used to scan an ancestry."
|
"title": "The implemented detectors in this Clair instance"
|
||||||
},
|
},
|
||||||
"last_update_time": {
|
"last_update_time": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
@ -289,6 +284,23 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"clairDetector": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"name": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The name of the detector."
|
||||||
|
},
|
||||||
|
"version": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The version of the detector."
|
||||||
|
},
|
||||||
|
"dtype": {
|
||||||
|
"$ref": "#/definitions/DetectorDType",
|
||||||
|
"description": "The type of the detector."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"clairFeature": {
|
"clairFeature": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
@ -296,9 +308,9 @@
|
|||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "The name of the feature."
|
"description": "The name of the feature."
|
||||||
},
|
},
|
||||||
"namespace_name": {
|
"namespace": {
|
||||||
"type": "string",
|
"$ref": "#/definitions/clairNamespace",
|
||||||
"description": "The name of the namespace in which the feature is detected."
|
"description": "The namespace in which the feature is detected."
|
||||||
},
|
},
|
||||||
"version": {
|
"version": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
@ -308,6 +320,10 @@
|
|||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "The format used to parse version numbers for the feature."
|
"description": "The format used to parse version numbers for the feature."
|
||||||
},
|
},
|
||||||
|
"detector": {
|
||||||
|
"$ref": "#/definitions/clairDetector",
|
||||||
|
"description": "The detector used to detect this feature. This only exists when present in\nan Ancestry."
|
||||||
|
},
|
||||||
"vulnerabilities": {
|
"vulnerabilities": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"items": {
|
"items": {
|
||||||
@ -360,6 +376,19 @@
|
|||||||
"clairMarkNotificationAsReadResponse": {
|
"clairMarkNotificationAsReadResponse": {
|
||||||
"type": "object"
|
"type": "object"
|
||||||
},
|
},
|
||||||
|
"clairNamespace": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"name": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The name of the namespace."
|
||||||
|
},
|
||||||
|
"detector": {
|
||||||
|
"$ref": "#/definitions/clairDetector",
|
||||||
|
"description": "The detector used to detect the namespace. This only exists when present in\nan Ancestry Feature."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"clairPagedVulnerableAncestries": {
|
"clairPagedVulnerableAncestries": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
@ -22,6 +22,13 @@ import (
|
|||||||
"github.com/coreos/clair/ext/versionfmt"
|
"github.com/coreos/clair/ext/versionfmt"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// DatabaseDetectorTypeMapping maps the database detector type to the integer
|
||||||
|
// enum proto.
|
||||||
|
var DatabaseDetectorTypeMapping = map[database.DetectorType]Detector_DType{
|
||||||
|
database.NamespaceDetectorType: Detector_DType(1),
|
||||||
|
database.FeatureDetectorType: Detector_DType(2),
|
||||||
|
}
|
||||||
|
|
||||||
// PagedVulnerableAncestriesFromDatabaseModel converts database
|
// PagedVulnerableAncestriesFromDatabaseModel converts database
|
||||||
// PagedVulnerableAncestries to api PagedVulnerableAncestries and assigns
|
// PagedVulnerableAncestries to api PagedVulnerableAncestries and assigns
|
||||||
// indexes to ancestries.
|
// indexes to ancestries.
|
||||||
@ -122,23 +129,38 @@ func VulnerabilityWithFixedInFromDatabaseModel(dbVuln database.VulnerabilityWith
|
|||||||
return vuln, nil
|
return vuln, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// LayerFromDatabaseModel converts database layer to api layer.
|
|
||||||
func LayerFromDatabaseModel(dbLayer database.LayerMetadata) *Layer {
|
|
||||||
layer := Layer{Hash: dbLayer.Hash}
|
|
||||||
return &layer
|
|
||||||
}
|
|
||||||
|
|
||||||
// NamespacedFeatureFromDatabaseModel converts database namespacedFeature to api Feature.
|
// NamespacedFeatureFromDatabaseModel converts database namespacedFeature to api Feature.
|
||||||
func NamespacedFeatureFromDatabaseModel(feature database.NamespacedFeature) *Feature {
|
func NamespacedFeatureFromDatabaseModel(feature database.AncestryFeature) *Feature {
|
||||||
version := feature.Feature.Version
|
version := feature.Feature.Version
|
||||||
if version == versionfmt.MaxVersion {
|
if version == versionfmt.MaxVersion {
|
||||||
version = "None"
|
version = "None"
|
||||||
}
|
}
|
||||||
|
|
||||||
return &Feature{
|
return &Feature{
|
||||||
Name: feature.Feature.Name,
|
Name: feature.Feature.Name,
|
||||||
NamespaceName: feature.Namespace.Name,
|
Namespace: &Namespace{
|
||||||
|
Name: feature.Namespace.Name,
|
||||||
|
Detector: DetectorFromDatabaseModel(feature.NamespaceBy),
|
||||||
|
},
|
||||||
VersionFormat: feature.Namespace.VersionFormat,
|
VersionFormat: feature.Namespace.VersionFormat,
|
||||||
Version: version,
|
Version: version,
|
||||||
|
Detector: DetectorFromDatabaseModel(feature.FeatureBy),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func DetectorFromDatabaseModel(detector database.Detector) *Detector {
|
||||||
|
return &Detector{
|
||||||
|
Name: detector.Name,
|
||||||
|
Version: detector.Version,
|
||||||
|
Dtype: DatabaseDetectorTypeMapping[detector.DType],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func DetectorsFromDatabaseModel(dbDetectors []database.Detector) []*Detector {
|
||||||
|
detectors := make([]*Detector, 0, len(dbDetectors))
|
||||||
|
for _, d := range dbDetectors {
|
||||||
|
detectors = append(detectors, DetectorFromDatabaseModel(d))
|
||||||
|
}
|
||||||
|
|
||||||
|
return detectors
|
||||||
|
}
|
||||||
|
@ -129,9 +129,8 @@ func (s *AncestryServer) GetAncestry(ctx context.Context, req *pb.GetAncestryReq
|
|||||||
}
|
}
|
||||||
|
|
||||||
pbAncestry := &pb.GetAncestryResponse_Ancestry{
|
pbAncestry := &pb.GetAncestryResponse_Ancestry{
|
||||||
Name: ancestry.Name,
|
Name: ancestry.Name,
|
||||||
ScannedDetectors: ancestry.ProcessedBy.Detectors,
|
Detectors: pb.DetectorsFromDatabaseModel(ancestry.By),
|
||||||
ScannedListers: ancestry.ProcessedBy.Listers,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, layer := range ancestry.Layers {
|
for _, layer := range ancestry.Layers {
|
||||||
|
@ -13,8 +13,7 @@ import (
|
|||||||
// protobuf struct.
|
// protobuf struct.
|
||||||
func GetClairStatus(store database.Datastore) (*pb.ClairStatus, error) {
|
func GetClairStatus(store database.Datastore) (*pb.ClairStatus, error) {
|
||||||
status := &pb.ClairStatus{
|
status := &pb.ClairStatus{
|
||||||
Listers: clair.Processors.Listers,
|
Detectors: pb.DetectorsFromDatabaseModel(clair.EnabledDetectors),
|
||||||
Detectors: clair.Processors.Detectors,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
t, firstUpdate, err := clair.GetLastUpdateTime(store)
|
t, firstUpdate, err := clair.GetLastUpdateTime(store)
|
||||||
@ -34,41 +33,49 @@ func GetClairStatus(store database.Datastore) (*pb.ClairStatus, error) {
|
|||||||
|
|
||||||
// GetPbAncestryLayer retrieves an ancestry layer with vulnerabilities and
|
// GetPbAncestryLayer retrieves an ancestry layer with vulnerabilities and
|
||||||
// features in an ancestry based on the provided database layer.
|
// features in an ancestry based on the provided database layer.
|
||||||
func GetPbAncestryLayer(session database.Session, layer database.AncestryLayer) (*pb.GetAncestryResponse_AncestryLayer, error) {
|
func GetPbAncestryLayer(tx database.Session, layer database.AncestryLayer) (*pb.GetAncestryResponse_AncestryLayer, error) {
|
||||||
pbLayer := &pb.GetAncestryResponse_AncestryLayer{
|
pbLayer := &pb.GetAncestryResponse_AncestryLayer{
|
||||||
Layer: &pb.Layer{
|
Layer: &pb.Layer{
|
||||||
Hash: layer.Hash,
|
Hash: layer.Hash,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
features := layer.GetFeatures()
|
||||||
features []database.NullableAffectedNamespacedFeature
|
affectedFeatures, err := tx.FindAffectedNamespacedFeatures(features)
|
||||||
err error
|
if err != nil {
|
||||||
)
|
|
||||||
|
|
||||||
if features, err = session.FindAffectedNamespacedFeatures(layer.DetectedFeatures); err != nil {
|
|
||||||
return nil, status.Error(codes.Internal, err.Error())
|
return nil, status.Error(codes.Internal, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, feature := range features {
|
// NOTE(sidac): It's quite inefficient, but the easiest way to implement
|
||||||
|
// this feature for now, we should refactor the implementation if there's
|
||||||
|
// any performance issue. It's expected that the number of features is less
|
||||||
|
// than 1000.
|
||||||
|
for _, feature := range affectedFeatures {
|
||||||
if !feature.Valid {
|
if !feature.Valid {
|
||||||
return nil, status.Error(codes.Internal, "ancestry feature is not found")
|
return nil, status.Error(codes.Internal, "ancestry feature is not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
for _, detectedFeature := range layer.Features {
|
||||||
pbFeature = pb.NamespacedFeatureFromDatabaseModel(feature.NamespacedFeature)
|
if detectedFeature.NamespacedFeature != feature.NamespacedFeature {
|
||||||
pbVuln *pb.Vulnerability
|
continue
|
||||||
err error
|
|
||||||
)
|
|
||||||
for _, vuln := range feature.AffectedBy {
|
|
||||||
if pbVuln, err = pb.VulnerabilityWithFixedInFromDatabaseModel(vuln); err != nil {
|
|
||||||
return nil, status.Error(codes.Internal, err.Error())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pbFeature.Vulnerabilities = append(pbFeature.Vulnerabilities, pbVuln)
|
var (
|
||||||
}
|
pbFeature = pb.NamespacedFeatureFromDatabaseModel(detectedFeature)
|
||||||
|
pbVuln *pb.Vulnerability
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
pbLayer.DetectedFeatures = append(pbLayer.DetectedFeatures, pbFeature)
|
for _, vuln := range feature.AffectedBy {
|
||||||
|
if pbVuln, err = pb.VulnerabilityWithFixedInFromDatabaseModel(vuln); err != nil {
|
||||||
|
return nil, status.Error(codes.Internal, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
pbFeature.Vulnerabilities = append(pbFeature.Vulnerabilities, pbVuln)
|
||||||
|
}
|
||||||
|
|
||||||
|
pbLayer.DetectedFeatures = append(pbLayer.DetectedFeatures, pbFeature)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return pbLayer, nil
|
return pbLayer, nil
|
||||||
|
@ -26,8 +26,6 @@ import (
|
|||||||
"github.com/coreos/clair"
|
"github.com/coreos/clair"
|
||||||
"github.com/coreos/clair/api"
|
"github.com/coreos/clair/api"
|
||||||
"github.com/coreos/clair/database"
|
"github.com/coreos/clair/database"
|
||||||
"github.com/coreos/clair/ext/featurefmt"
|
|
||||||
"github.com/coreos/clair/ext/featurens"
|
|
||||||
"github.com/coreos/clair/ext/notification"
|
"github.com/coreos/clair/ext/notification"
|
||||||
"github.com/coreos/clair/ext/vulnsrc"
|
"github.com/coreos/clair/ext/vulnsrc"
|
||||||
"github.com/coreos/clair/pkg/pagination"
|
"github.com/coreos/clair/pkg/pagination"
|
||||||
@ -47,7 +45,6 @@ type File struct {
|
|||||||
type Config struct {
|
type Config struct {
|
||||||
Database database.RegistrableComponentConfig
|
Database database.RegistrableComponentConfig
|
||||||
Updater *clair.UpdaterConfig
|
Updater *clair.UpdaterConfig
|
||||||
Worker *clair.WorkerConfig
|
|
||||||
Notifier *notification.Config
|
Notifier *notification.Config
|
||||||
API *api.Config
|
API *api.Config
|
||||||
}
|
}
|
||||||
@ -62,10 +59,6 @@ func DefaultConfig() Config {
|
|||||||
EnabledUpdaters: vulnsrc.ListUpdaters(),
|
EnabledUpdaters: vulnsrc.ListUpdaters(),
|
||||||
Interval: 1 * time.Hour,
|
Interval: 1 * time.Hour,
|
||||||
},
|
},
|
||||||
Worker: &clair.WorkerConfig{
|
|
||||||
EnabledDetectors: featurens.ListDetectors(),
|
|
||||||
EnabledListers: featurefmt.ListListers(),
|
|
||||||
},
|
|
||||||
API: &api.Config{
|
API: &api.Config{
|
||||||
HealthAddr: "0.0.0.0:6061",
|
HealthAddr: "0.0.0.0:6061",
|
||||||
Addr: "0.0.0.0:6060",
|
Addr: "0.0.0.0:6060",
|
||||||
|
@ -102,40 +102,13 @@ func stopCPUProfiling(f *os.File) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func configClairVersion(config *Config) {
|
func configClairVersion(config *Config) {
|
||||||
listers := featurefmt.ListListers()
|
clair.EnabledDetectors = append(featurefmt.ListListers(), featurens.ListDetectors()...)
|
||||||
detectors := featurens.ListDetectors()
|
clair.EnabledUpdaters = strutil.Intersect(config.Updater.EnabledUpdaters, vulnsrc.ListUpdaters())
|
||||||
updaters := vulnsrc.ListUpdaters()
|
|
||||||
|
|
||||||
log.WithFields(log.Fields{
|
log.WithFields(log.Fields{
|
||||||
"Listers": strings.Join(listers, ","),
|
"Detectors": database.SerializeDetectors(clair.EnabledDetectors),
|
||||||
"Detectors": strings.Join(detectors, ","),
|
"Updaters": clair.EnabledUpdaters,
|
||||||
"Updaters": strings.Join(updaters, ","),
|
}).Info("enabled Clair extensions")
|
||||||
}).Info("Clair registered components")
|
|
||||||
|
|
||||||
unregDetectors := strutil.CompareStringLists(config.Worker.EnabledDetectors, detectors)
|
|
||||||
unregListers := strutil.CompareStringLists(config.Worker.EnabledListers, listers)
|
|
||||||
unregUpdaters := strutil.CompareStringLists(config.Updater.EnabledUpdaters, updaters)
|
|
||||||
if len(unregDetectors) != 0 || len(unregListers) != 0 || len(unregUpdaters) != 0 {
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"Unknown Detectors": strings.Join(unregDetectors, ","),
|
|
||||||
"Unknown Listers": strings.Join(unregListers, ","),
|
|
||||||
"Unknown Updaters": strings.Join(unregUpdaters, ","),
|
|
||||||
"Available Listers": strings.Join(featurefmt.ListListers(), ","),
|
|
||||||
"Available Detectors": strings.Join(featurens.ListDetectors(), ","),
|
|
||||||
"Available Updaters": strings.Join(vulnsrc.ListUpdaters(), ","),
|
|
||||||
}).Fatal("Unknown or unregistered components are configured")
|
|
||||||
}
|
|
||||||
|
|
||||||
// verify the user specified detectors/listers/updaters are implemented. If
|
|
||||||
// some are not registered, it logs warning and won't use the unregistered
|
|
||||||
// extensions.
|
|
||||||
|
|
||||||
clair.Processors = database.Processors{
|
|
||||||
Detectors: strutil.CompareStringListsInBoth(config.Worker.EnabledDetectors, detectors),
|
|
||||||
Listers: strutil.CompareStringListsInBoth(config.Worker.EnabledListers, listers),
|
|
||||||
}
|
|
||||||
|
|
||||||
clair.EnabledUpdaters = strutil.CompareStringListsInBoth(config.Updater.EnabledUpdaters, updaters)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Boot starts Clair instance with the provided config.
|
// Boot starts Clair instance with the provided config.
|
||||||
@ -160,6 +133,7 @@ func Boot(config *Config) {
|
|||||||
|
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
|
|
||||||
|
clair.InitWorker(db)
|
||||||
// Start notifier
|
// Start notifier
|
||||||
st.Begin()
|
st.Begin()
|
||||||
go clair.RunNotifier(config.Notifier, db, st)
|
go clair.RunNotifier(config.Notifier, db, st)
|
||||||
@ -180,6 +154,18 @@ func Boot(config *Config) {
|
|||||||
st.Stop()
|
st.Stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Initialize logging system
|
||||||
|
func configureLogger(flagLogLevel *string) {
|
||||||
|
logLevel, err := log.ParseLevel(strings.ToUpper(*flagLogLevel))
|
||||||
|
if err != nil {
|
||||||
|
log.WithError(err).Error("failed to set logger parser level")
|
||||||
|
}
|
||||||
|
|
||||||
|
log.SetLevel(logLevel)
|
||||||
|
log.SetOutput(os.Stdout)
|
||||||
|
log.SetFormatter(&formatter.JSONExtendedFormatter{ShowLn: true})
|
||||||
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
// Parse command-line arguments
|
// Parse command-line arguments
|
||||||
flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ExitOnError)
|
flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ExitOnError)
|
||||||
@ -189,6 +175,7 @@ func main() {
|
|||||||
flagInsecureTLS := flag.Bool("insecure-tls", false, "Disable TLS server's certificate chain and hostname verification when pulling layers.")
|
flagInsecureTLS := flag.Bool("insecure-tls", false, "Disable TLS server's certificate chain and hostname verification when pulling layers.")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
|
configureLogger(flagLogLevel)
|
||||||
// Check for dependencies.
|
// Check for dependencies.
|
||||||
for _, bin := range BinaryDependencies {
|
for _, bin := range BinaryDependencies {
|
||||||
_, err := exec.LookPath(bin)
|
_, err := exec.LookPath(bin)
|
||||||
@ -197,12 +184,6 @@ func main() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize logging system
|
|
||||||
logLevel, err := log.ParseLevel(strings.ToUpper(*flagLogLevel))
|
|
||||||
log.SetLevel(logLevel)
|
|
||||||
log.SetOutput(os.Stdout)
|
|
||||||
log.SetFormatter(&formatter.JSONExtendedFormatter{ShowLn: true})
|
|
||||||
|
|
||||||
config, err := LoadConfig(*flagConfigPath)
|
config, err := LoadConfig(*flagConfigPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithError(err).Fatal("failed to load configuration")
|
log.WithError(err).Fatal("failed to load configuration")
|
||||||
|
@ -51,19 +51,6 @@ clair:
|
|||||||
keyfile:
|
keyfile:
|
||||||
certfile:
|
certfile:
|
||||||
|
|
||||||
worker:
|
|
||||||
namespace_detectors:
|
|
||||||
- os-release
|
|
||||||
- lsb-release
|
|
||||||
- apt-sources
|
|
||||||
- alpine-release
|
|
||||||
- redhat-release
|
|
||||||
|
|
||||||
feature_listers:
|
|
||||||
- apk
|
|
||||||
- dpkg
|
|
||||||
- rpm
|
|
||||||
|
|
||||||
updater:
|
updater:
|
||||||
# Frequency the database will be updated with vulnerabilities from the default data sources
|
# Frequency the database will be updated with vulnerabilities from the default data sources
|
||||||
# The value 0 disables the updater entirely.
|
# The value 0 disables the updater entirely.
|
||||||
|
@ -33,6 +33,14 @@ var (
|
|||||||
// fails (i.e. when an entity which is supposed to be unique is detected
|
// fails (i.e. when an entity which is supposed to be unique is detected
|
||||||
// twice)
|
// twice)
|
||||||
ErrInconsistent = errors.New("database: inconsistent database")
|
ErrInconsistent = errors.New("database: inconsistent database")
|
||||||
|
|
||||||
|
// ErrInvalidParameters is an error that occurs when the parameters are not valid.
|
||||||
|
ErrInvalidParameters = errors.New("database: parameters are not valid")
|
||||||
|
|
||||||
|
// ErrMissingEntities is an error that occurs when an associated immutable
|
||||||
|
// entity doesn't exist in the database. This error can indicate a wrong
|
||||||
|
// implementation or corrupted database.
|
||||||
|
ErrMissingEntities = errors.New("database: associated immutable entities are missing in the database")
|
||||||
)
|
)
|
||||||
|
|
||||||
// RegistrableComponentConfig is a configuration block that can be used to
|
// RegistrableComponentConfig is a configuration block that can be used to
|
||||||
@ -99,6 +107,9 @@ type Session interface {
|
|||||||
// namespaced features. If the ancestry is not found, return false.
|
// namespaced features. If the ancestry is not found, return false.
|
||||||
FindAncestry(name string) (ancestry Ancestry, found bool, err error)
|
FindAncestry(name string) (ancestry Ancestry, found bool, err error)
|
||||||
|
|
||||||
|
// PersistDetector inserts a slice of detectors if not in the database.
|
||||||
|
PersistDetectors(detectors []Detector) error
|
||||||
|
|
||||||
// PersistFeatures inserts a set of features if not in the database.
|
// PersistFeatures inserts a set of features if not in the database.
|
||||||
PersistFeatures(features []Feature) error
|
PersistFeatures(features []Feature) error
|
||||||
|
|
||||||
@ -120,12 +131,10 @@ type Session interface {
|
|||||||
// PersistNamespaces inserts a set of namespaces if not in the database.
|
// PersistNamespaces inserts a set of namespaces if not in the database.
|
||||||
PersistNamespaces([]Namespace) error
|
PersistNamespaces([]Namespace) error
|
||||||
|
|
||||||
// PersistLayer persists a layer's content in the database. The given
|
// PersistLayer appends a layer's content in the database.
|
||||||
// namespaces and features can be partial content of this layer.
|
|
||||||
//
|
//
|
||||||
// The layer, namespaces and features are expected to be already existing
|
// If any feature, namespace, or detector is not in the database, it returns not found error.
|
||||||
// in the database.
|
PersistLayer(hash string, features []LayerFeature, namespaces []LayerNamespace, detectedBy []Detector) error
|
||||||
PersistLayer(hash string, namespaces []Namespace, features []Feature, processedBy Processors) error
|
|
||||||
|
|
||||||
// FindLayer returns a layer with all detected features and
|
// FindLayer returns a layer with all detected features and
|
||||||
// namespaces.
|
// namespaces.
|
||||||
@ -157,8 +166,8 @@ type Session interface {
|
|||||||
// affected ancestries affected by old or new vulnerability.
|
// affected ancestries affected by old or new vulnerability.
|
||||||
//
|
//
|
||||||
// Because the number of affected ancestries maybe large, they are paginated
|
// Because the number of affected ancestries maybe large, they are paginated
|
||||||
// and their pages are specified by the paination token, which, if empty, are
|
// and their pages are specified by the pagination token, which should be
|
||||||
// always considered first page.
|
// considered first page when it's empty.
|
||||||
FindVulnerabilityNotification(name string, limit int, oldVulnerabilityPage pagination.Token, newVulnerabilityPage pagination.Token) (noti VulnerabilityNotificationWithVulnerable, found bool, err error)
|
FindVulnerabilityNotification(name string, limit int, oldVulnerabilityPage pagination.Token, newVulnerabilityPage pagination.Token) (noti VulnerabilityNotificationWithVulnerable, found bool, err error)
|
||||||
|
|
||||||
// MarkNotificationAsRead marks a Notification as notified now, assuming
|
// MarkNotificationAsRead marks a Notification as notified now, assuming
|
||||||
|
287
database/dbutil.go
Normal file
287
database/dbutil.go
Normal file
@ -0,0 +1,287 @@
|
|||||||
|
// Copyright 2018 clair authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package database
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/deckarep/golang-set"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DeduplicateNamespaces deduplicates a list of namespaces.
|
||||||
|
func DeduplicateNamespaces(namespaces ...Namespace) []Namespace {
|
||||||
|
nsSet := mapset.NewSet()
|
||||||
|
for _, ns := range namespaces {
|
||||||
|
nsSet.Add(ns)
|
||||||
|
}
|
||||||
|
|
||||||
|
uniqueNamespaces := make([]Namespace, 0, nsSet.Cardinality())
|
||||||
|
for ns := range nsSet.Iter() {
|
||||||
|
uniqueNamespaces = append(uniqueNamespaces, ns.(Namespace))
|
||||||
|
}
|
||||||
|
|
||||||
|
return uniqueNamespaces
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeduplicateFeatures deduplicates a list of list of features.
|
||||||
|
func DeduplicateFeatures(features ...Feature) []Feature {
|
||||||
|
fSet := mapset.NewSet()
|
||||||
|
for _, f := range features {
|
||||||
|
fSet.Add(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
uniqueFeatures := make([]Feature, 0, fSet.Cardinality())
|
||||||
|
for f := range fSet.Iter() {
|
||||||
|
uniqueFeatures = append(uniqueFeatures, f.(Feature))
|
||||||
|
}
|
||||||
|
|
||||||
|
return uniqueFeatures
|
||||||
|
}
|
||||||
|
|
||||||
|
// PersistPartialLayerAndCommit wraps session PersistLayer function with begin and
|
||||||
|
// commit.
|
||||||
|
func PersistPartialLayerAndCommit(datastore Datastore, layer *Layer) error {
|
||||||
|
tx, err := datastore.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
|
||||||
|
if err := tx.PersistLayer(layer.Hash, layer.Features, layer.Namespaces, layer.By); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return tx.Commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
// PersistFeaturesAndCommit wraps session PersistFeaturesAndCommit function with begin and commit.
|
||||||
|
func PersistFeaturesAndCommit(datastore Datastore, features []Feature) error {
|
||||||
|
tx, err := datastore.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
|
||||||
|
if err := tx.PersistFeatures(features); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return tx.Commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
// PersistNamespacesAndCommit wraps session PersistNamespaces function with
|
||||||
|
// begin and commit.
|
||||||
|
func PersistNamespacesAndCommit(datastore Datastore, namespaces []Namespace) error {
|
||||||
|
tx, err := datastore.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
|
||||||
|
if err := tx.PersistNamespaces(namespaces); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return tx.Commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindAncestryAndRollback wraps session FindAncestry function with begin and
|
||||||
|
// rollback.
|
||||||
|
func FindAncestryAndRollback(datastore Datastore, name string) (Ancestry, bool, error) {
|
||||||
|
tx, err := datastore.Begin()
|
||||||
|
defer tx.Rollback()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return Ancestry{}, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return tx.FindAncestry(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindLayerAndRollback wraps session FindLayer function with begin and rollback.
|
||||||
|
func FindLayerAndRollback(datastore Datastore, hash string) (layer Layer, ok bool, err error) {
|
||||||
|
var tx Session
|
||||||
|
if tx, err = datastore.Begin(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
defer tx.Rollback()
|
||||||
|
layer, ok, err = tx.FindLayer(hash)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeduplicateNamespacedFeatures returns a copy of all unique features in the
|
||||||
|
// input.
|
||||||
|
func DeduplicateNamespacedFeatures(features []NamespacedFeature) []NamespacedFeature {
|
||||||
|
nsSet := mapset.NewSet()
|
||||||
|
for _, ns := range features {
|
||||||
|
nsSet.Add(ns)
|
||||||
|
}
|
||||||
|
|
||||||
|
uniqueFeatures := make([]NamespacedFeature, 0, nsSet.Cardinality())
|
||||||
|
for ns := range nsSet.Iter() {
|
||||||
|
uniqueFeatures = append(uniqueFeatures, ns.(NamespacedFeature))
|
||||||
|
}
|
||||||
|
|
||||||
|
return uniqueFeatures
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAncestryFeatures returns a list of unique namespaced features in the
|
||||||
|
// ancestry.
|
||||||
|
func GetAncestryFeatures(ancestry Ancestry) []NamespacedFeature {
|
||||||
|
features := []NamespacedFeature{}
|
||||||
|
for _, layer := range ancestry.Layers {
|
||||||
|
features = append(features, layer.GetFeatures()...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return DeduplicateNamespacedFeatures(features)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpsertAncestryAndCommit wraps session UpsertAncestry function with begin and commit.
|
||||||
|
func UpsertAncestryAndCommit(datastore Datastore, ancestry Ancestry) error {
|
||||||
|
tx, err := datastore.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = tx.UpsertAncestry(ancestry); err != nil {
|
||||||
|
tx.Rollback()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = tx.Commit(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PersistNamespacedFeaturesAndCommit wraps session PersistNamespacedFeatures function
|
||||||
|
// with begin and commit.
|
||||||
|
func PersistNamespacedFeaturesAndCommit(datastore Datastore, features []NamespacedFeature) error {
|
||||||
|
tx, err := datastore.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := tx.PersistNamespacedFeatures(features); err != nil {
|
||||||
|
tx.Rollback()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := tx.Commit(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CacheRelatedVulnerabilityAndCommit wraps session CacheAffectedNamespacedFeatures
|
||||||
|
// function with begin and commit.
|
||||||
|
func CacheRelatedVulnerabilityAndCommit(datastore Datastore, features []NamespacedFeature) error {
|
||||||
|
tx, err := datastore.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := tx.CacheAffectedNamespacedFeatures(features); err != nil {
|
||||||
|
tx.Rollback()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return tx.Commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntersectDetectors returns the detectors in both d1 and d2.
|
||||||
|
func IntersectDetectors(d1 []Detector, d2 []Detector) []Detector {
|
||||||
|
d1Set := mapset.NewSet()
|
||||||
|
for _, d := range d1 {
|
||||||
|
d1Set.Add(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
d2Set := mapset.NewSet()
|
||||||
|
for _, d := range d2 {
|
||||||
|
d2Set.Add(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
inter := d1Set.Intersect(d2Set)
|
||||||
|
detectors := make([]Detector, 0, inter.Cardinality())
|
||||||
|
for d := range inter.Iter() {
|
||||||
|
detectors = append(detectors, d.(Detector))
|
||||||
|
}
|
||||||
|
|
||||||
|
return detectors
|
||||||
|
}
|
||||||
|
|
||||||
|
// DiffDetectors returns the detectors belongs to d1 but not d2
|
||||||
|
func DiffDetectors(d1 []Detector, d2 []Detector) []Detector {
|
||||||
|
d1Set := mapset.NewSet()
|
||||||
|
for _, d := range d1 {
|
||||||
|
d1Set.Add(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
d2Set := mapset.NewSet()
|
||||||
|
for _, d := range d2 {
|
||||||
|
d2Set.Add(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
diff := d1Set.Difference(d2Set)
|
||||||
|
detectors := make([]Detector, 0, diff.Cardinality())
|
||||||
|
for d := range diff.Iter() {
|
||||||
|
detectors = append(detectors, d.(Detector))
|
||||||
|
}
|
||||||
|
|
||||||
|
return detectors
|
||||||
|
}
|
||||||
|
|
||||||
|
// MergeLayers merges all content in new layer to l, where the content is
|
||||||
|
// updated.
|
||||||
|
func MergeLayers(l *Layer, new *Layer) *Layer {
|
||||||
|
featureSet := mapset.NewSet()
|
||||||
|
namespaceSet := mapset.NewSet()
|
||||||
|
bySet := mapset.NewSet()
|
||||||
|
|
||||||
|
for _, f := range l.Features {
|
||||||
|
featureSet.Add(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ns := range l.Namespaces {
|
||||||
|
namespaceSet.Add(ns)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d := range l.By {
|
||||||
|
bySet.Add(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, feature := range new.Features {
|
||||||
|
if !featureSet.Contains(feature) {
|
||||||
|
l.Features = append(l.Features, feature)
|
||||||
|
featureSet.Add(feature)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, namespace := range new.Namespaces {
|
||||||
|
if !namespaceSet.Contains(namespace) {
|
||||||
|
l.Namespaces = append(l.Namespaces, namespace)
|
||||||
|
namespaceSet.Add(namespace)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, detector := range new.By {
|
||||||
|
if !bySet.Contains(detector) {
|
||||||
|
l.By = append(l.By, detector)
|
||||||
|
bySet.Add(detector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return l
|
||||||
|
}
|
144
database/detector.go
Normal file
144
database/detector.go
Normal file
@ -0,0 +1,144 @@
|
|||||||
|
// Copyright 2018 clair authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package database
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql/driver"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// NamespaceDetectorType is a type of detector that extracts the namespaces.
|
||||||
|
NamespaceDetectorType DetectorType = "namespace"
|
||||||
|
// FeatureDetectorType is a type of detector that extracts the features.
|
||||||
|
FeatureDetectorType DetectorType = "feature"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DetectorTypes contains all detector types.
|
||||||
|
var (
|
||||||
|
DetectorTypes = []DetectorType{
|
||||||
|
NamespaceDetectorType,
|
||||||
|
FeatureDetectorType,
|
||||||
|
}
|
||||||
|
// ErrFailedToParseDetectorType is the error returned when a detector type could
|
||||||
|
// not be parsed from a string.
|
||||||
|
ErrFailedToParseDetectorType = errors.New("failed to parse DetectorType from input")
|
||||||
|
// ErrInvalidDetector is the error returned when a detector from database has
|
||||||
|
// invalid name or version or type.
|
||||||
|
ErrInvalidDetector = errors.New("the detector has invalid metadata")
|
||||||
|
)
|
||||||
|
|
||||||
|
// DetectorType is the type of a detector.
|
||||||
|
type DetectorType string
|
||||||
|
|
||||||
|
// Value implements the database/sql/driver.Valuer interface.
|
||||||
|
func (s DetectorType) Value() (driver.Value, error) {
|
||||||
|
return string(s), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan implements the database/sql.Scanner interface.
|
||||||
|
func (s *DetectorType) Scan(value interface{}) error {
|
||||||
|
val, ok := value.([]byte)
|
||||||
|
if !ok {
|
||||||
|
return errors.New("could not scan a Severity from a non-string input")
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
*s, err = NewDetectorType(string(val))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDetectorType attempts to parse a string into a standard DetectorType
|
||||||
|
// value.
|
||||||
|
func NewDetectorType(s string) (DetectorType, error) {
|
||||||
|
for _, ss := range DetectorTypes {
|
||||||
|
if strings.EqualFold(s, string(ss)) {
|
||||||
|
return ss, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", ErrFailedToParseDetectorType
|
||||||
|
}
|
||||||
|
|
||||||
|
// Valid checks if a detector type is defined.
|
||||||
|
func (s DetectorType) Valid() bool {
|
||||||
|
for _, t := range DetectorTypes {
|
||||||
|
if s == t {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Detector is an versioned Clair extension.
|
||||||
|
type Detector struct {
|
||||||
|
// Name of an extension should be non-empty and uniquely identifies the
|
||||||
|
// extension.
|
||||||
|
Name string
|
||||||
|
// Version of an extension should be non-empty.
|
||||||
|
Version string
|
||||||
|
// DType is the type of the extension and should be one of the types in
|
||||||
|
// DetectorTypes.
|
||||||
|
DType DetectorType
|
||||||
|
}
|
||||||
|
|
||||||
|
// Valid checks if all fields in the detector satisfies the spec.
|
||||||
|
func (d Detector) Valid() bool {
|
||||||
|
if d.Name == "" || d.Version == "" || !d.DType.Valid() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a unique string representation of the detector.
|
||||||
|
func (d Detector) String() string {
|
||||||
|
return fmt.Sprintf("%s:%s", d.Name, d.Version)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNamespaceDetector returns a new namespace detector.
|
||||||
|
func NewNamespaceDetector(name, version string) Detector {
|
||||||
|
return Detector{
|
||||||
|
Name: name,
|
||||||
|
Version: version,
|
||||||
|
DType: NamespaceDetectorType,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFeatureDetector returns a new feature detector.
|
||||||
|
func NewFeatureDetector(name, version string) Detector {
|
||||||
|
return Detector{
|
||||||
|
Name: name,
|
||||||
|
Version: version,
|
||||||
|
DType: FeatureDetectorType,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SerializeDetectors returns the string representation of given detectors.
|
||||||
|
func SerializeDetectors(detectors []Detector) []string {
|
||||||
|
strDetectors := []string{}
|
||||||
|
for _, d := range detectors {
|
||||||
|
strDetectors = append(strDetectors, d.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
return strDetectors
|
||||||
|
}
|
@ -30,9 +30,10 @@ type MockSession struct {
|
|||||||
FctFindAffectedNamespacedFeatures func(features []NamespacedFeature) ([]NullableAffectedNamespacedFeature, error)
|
FctFindAffectedNamespacedFeatures func(features []NamespacedFeature) ([]NullableAffectedNamespacedFeature, error)
|
||||||
FctPersistNamespaces func([]Namespace) error
|
FctPersistNamespaces func([]Namespace) error
|
||||||
FctPersistFeatures func([]Feature) error
|
FctPersistFeatures func([]Feature) error
|
||||||
|
FctPersistDetectors func(detectors []Detector) error
|
||||||
FctPersistNamespacedFeatures func([]NamespacedFeature) error
|
FctPersistNamespacedFeatures func([]NamespacedFeature) error
|
||||||
FctCacheAffectedNamespacedFeatures func([]NamespacedFeature) error
|
FctCacheAffectedNamespacedFeatures func([]NamespacedFeature) error
|
||||||
FctPersistLayer func(hash string, namespaces []Namespace, features []Feature, processedBy Processors) error
|
FctPersistLayer func(hash string, features []LayerFeature, namespaces []LayerNamespace, by []Detector) error
|
||||||
FctFindLayer func(name string) (Layer, bool, error)
|
FctFindLayer func(name string) (Layer, bool, error)
|
||||||
FctInsertVulnerabilities func([]VulnerabilityWithAffected) error
|
FctInsertVulnerabilities func([]VulnerabilityWithAffected) error
|
||||||
FctFindVulnerabilities func([]VulnerabilityID) ([]NullableVulnerability, error)
|
FctFindVulnerabilities func([]VulnerabilityID) ([]NullableVulnerability, error)
|
||||||
@ -85,6 +86,13 @@ func (ms *MockSession) FindAffectedNamespacedFeatures(features []NamespacedFeatu
|
|||||||
panic("required mock function not implemented")
|
panic("required mock function not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ms *MockSession) PersistDetectors(detectors []Detector) error {
|
||||||
|
if ms.FctPersistDetectors != nil {
|
||||||
|
return ms.FctPersistDetectors(detectors)
|
||||||
|
}
|
||||||
|
panic("required mock function not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
func (ms *MockSession) PersistNamespaces(namespaces []Namespace) error {
|
func (ms *MockSession) PersistNamespaces(namespaces []Namespace) error {
|
||||||
if ms.FctPersistNamespaces != nil {
|
if ms.FctPersistNamespaces != nil {
|
||||||
return ms.FctPersistNamespaces(namespaces)
|
return ms.FctPersistNamespaces(namespaces)
|
||||||
@ -113,9 +121,9 @@ func (ms *MockSession) CacheAffectedNamespacedFeatures(namespacedFeatures []Name
|
|||||||
panic("required mock function not implemented")
|
panic("required mock function not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ms *MockSession) PersistLayer(hash string, namespaces []Namespace, features []Feature, processedBy Processors) error {
|
func (ms *MockSession) PersistLayer(hash string, features []LayerFeature, namespaces []LayerNamespace, detectors []Detector) error {
|
||||||
if ms.FctPersistLayer != nil {
|
if ms.FctPersistLayer != nil {
|
||||||
return ms.FctPersistLayer(hash, namespaces, features, processedBy)
|
return ms.FctPersistLayer(hash, features, namespaces, detectors)
|
||||||
}
|
}
|
||||||
panic("required mock function not implemented")
|
panic("required mock function not implemented")
|
||||||
}
|
}
|
||||||
|
@ -22,47 +22,129 @@ import (
|
|||||||
"github.com/coreos/clair/pkg/pagination"
|
"github.com/coreos/clair/pkg/pagination"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Processors are extentions to scan a layer's content.
|
|
||||||
type Processors struct {
|
|
||||||
Listers []string
|
|
||||||
Detectors []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ancestry is a manifest that keeps all layers in an image in order.
|
// Ancestry is a manifest that keeps all layers in an image in order.
|
||||||
type Ancestry struct {
|
type Ancestry struct {
|
||||||
|
// Name is a globally unique value for a set of layers. This is often the
|
||||||
|
// sha256 digest of an OCI/Docker manifest.
|
||||||
Name string
|
Name string
|
||||||
// ProcessedBy contains the processors that are used when computing the
|
// By contains the processors that are used when computing the
|
||||||
// content of this ancestry.
|
// content of this ancestry.
|
||||||
ProcessedBy Processors
|
By []Detector
|
||||||
// Layers should be ordered and i_th layer is the parent of i+1_th layer in
|
// Layers should be ordered and i_th layer is the parent of i+1_th layer in
|
||||||
// the slice.
|
// the slice.
|
||||||
Layers []AncestryLayer
|
Layers []AncestryLayer
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Valid checks if the ancestry is compliant to spec.
|
||||||
|
func (a *Ancestry) Valid() bool {
|
||||||
|
if a == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if a.Name == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d := range a.By {
|
||||||
|
if !d.Valid() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, l := range a.Layers {
|
||||||
|
if !l.Valid() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// AncestryLayer is a layer with all detected namespaced features.
|
// AncestryLayer is a layer with all detected namespaced features.
|
||||||
type AncestryLayer struct {
|
type AncestryLayer struct {
|
||||||
LayerMetadata
|
// Hash is the sha-256 tarsum on the layer's blob content.
|
||||||
|
|
||||||
// DetectedFeatures are the features introduced by this layer when it was
|
|
||||||
// processed.
|
|
||||||
DetectedFeatures []NamespacedFeature
|
|
||||||
}
|
|
||||||
|
|
||||||
// LayerMetadata contains the metadata of a layer.
|
|
||||||
type LayerMetadata struct {
|
|
||||||
// Hash is content hash of the layer.
|
|
||||||
Hash string
|
Hash string
|
||||||
// ProcessedBy contains the processors that processed this layer.
|
// Features are the features introduced by this layer when it was
|
||||||
ProcessedBy Processors
|
// processed.
|
||||||
|
Features []AncestryFeature
|
||||||
}
|
}
|
||||||
|
|
||||||
// Layer is a layer with its detected namespaces and features by
|
// Valid checks if the Ancestry Layer is compliant to the spec.
|
||||||
// ProcessedBy.
|
func (l *AncestryLayer) Valid() bool {
|
||||||
type Layer struct {
|
if l == nil {
|
||||||
LayerMetadata
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
Namespaces []Namespace
|
if l.Hash == "" {
|
||||||
Features []Feature
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFeatures returns the Ancestry's features.
|
||||||
|
func (l *AncestryLayer) GetFeatures() []NamespacedFeature {
|
||||||
|
nsf := make([]NamespacedFeature, 0, len(l.Features))
|
||||||
|
for _, f := range l.Features {
|
||||||
|
nsf = append(nsf, f.NamespacedFeature)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nsf
|
||||||
|
}
|
||||||
|
|
||||||
|
// AncestryFeature is a namespaced feature with the detectors used to
|
||||||
|
// find this feature.
|
||||||
|
type AncestryFeature struct {
|
||||||
|
NamespacedFeature
|
||||||
|
|
||||||
|
// FeatureBy is the detector that detected the feature.
|
||||||
|
FeatureBy Detector
|
||||||
|
// NamespaceBy is the detector that detected the namespace.
|
||||||
|
NamespaceBy Detector
|
||||||
|
}
|
||||||
|
|
||||||
|
// Layer is a layer with all the detected features and namespaces.
|
||||||
|
type Layer struct {
|
||||||
|
// Hash is the sha-256 tarsum on the layer's blob content.
|
||||||
|
Hash string
|
||||||
|
// By contains a list of detectors scanned this Layer.
|
||||||
|
By []Detector
|
||||||
|
Namespaces []LayerNamespace
|
||||||
|
Features []LayerFeature
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Layer) GetFeatures() []Feature {
|
||||||
|
features := make([]Feature, 0, len(l.Features))
|
||||||
|
for _, f := range l.Features {
|
||||||
|
features = append(features, f.Feature)
|
||||||
|
}
|
||||||
|
|
||||||
|
return features
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Layer) GetNamespaces() []Namespace {
|
||||||
|
namespaces := make([]Namespace, 0, len(l.Namespaces))
|
||||||
|
for _, ns := range l.Namespaces {
|
||||||
|
namespaces = append(namespaces, ns.Namespace)
|
||||||
|
}
|
||||||
|
|
||||||
|
return namespaces
|
||||||
|
}
|
||||||
|
|
||||||
|
// LayerNamespace is a namespace with detection information.
|
||||||
|
type LayerNamespace struct {
|
||||||
|
Namespace
|
||||||
|
|
||||||
|
// By is the detector found the namespace.
|
||||||
|
By Detector
|
||||||
|
}
|
||||||
|
|
||||||
|
// LayerFeature is a feature with detection information.
|
||||||
|
type LayerFeature struct {
|
||||||
|
Feature
|
||||||
|
|
||||||
|
// By is the detector found the feature.
|
||||||
|
By Detector
|
||||||
}
|
}
|
||||||
|
|
||||||
// Namespace is the contextual information around features.
|
// Namespace is the contextual information around features.
|
||||||
|
@ -10,75 +10,38 @@ import (
|
|||||||
"github.com/coreos/clair/pkg/commonerr"
|
"github.com/coreos/clair/pkg/commonerr"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ancestryLayerWithID struct {
|
const (
|
||||||
database.AncestryLayer
|
insertAncestry = `
|
||||||
|
INSERT INTO ancestry (name) VALUES ($1) RETURNING id`
|
||||||
|
|
||||||
layerID int64
|
findAncestryLayerHashes = `
|
||||||
}
|
SELECT layer.hash, ancestry_layer.ancestry_index
|
||||||
|
FROM layer, ancestry_layer
|
||||||
|
WHERE ancestry_layer.ancestry_id = $1
|
||||||
|
AND ancestry_layer.layer_id = layer.id
|
||||||
|
ORDER BY ancestry_layer.ancestry_index ASC`
|
||||||
|
|
||||||
func (tx *pgSession) UpsertAncestry(ancestry database.Ancestry) error {
|
findAncestryFeatures = `
|
||||||
if ancestry.Name == "" {
|
SELECT namespace.name, namespace.version_format, feature.name,
|
||||||
log.Error("Empty ancestry name is not allowed")
|
feature.version, feature.version_format, ancestry_layer.ancestry_index,
|
||||||
return commonerr.NewBadRequestError("could not insert an ancestry with empty name")
|
ancestry_feature.feature_detector_id, ancestry_feature.namespace_detector_id
|
||||||
}
|
FROM namespace, feature, namespaced_feature, ancestry_layer, ancestry_feature
|
||||||
|
WHERE ancestry_layer.ancestry_id = $1
|
||||||
|
AND ancestry_feature.ancestry_layer_id = ancestry_layer.id
|
||||||
|
AND ancestry_feature.namespaced_feature_id = namespaced_feature.id
|
||||||
|
AND namespaced_feature.feature_id = feature.id
|
||||||
|
AND namespaced_feature.namespace_id = namespace.id`
|
||||||
|
|
||||||
if len(ancestry.Layers) == 0 {
|
findAncestryID = `SELECT id FROM ancestry WHERE name = $1`
|
||||||
log.Error("Empty ancestry is not allowed")
|
removeAncestry = `DELETE FROM ancestry WHERE name = $1`
|
||||||
return commonerr.NewBadRequestError("could not insert an ancestry with 0 layers")
|
insertAncestryLayers = `
|
||||||
}
|
INSERT INTO ancestry_layer (ancestry_id, ancestry_index, layer_id) VALUES ($1, $2, $3)
|
||||||
|
RETURNING id`
|
||||||
if err := tx.deleteAncestry(ancestry.Name); err != nil {
|
insertAncestryFeatures = `
|
||||||
return err
|
INSERT INTO ancestry_feature
|
||||||
}
|
(ancestry_layer_id, namespaced_feature_id, feature_detector_id, namespace_detector_id) VALUES
|
||||||
|
($1, $2, $3, $4)`
|
||||||
var ancestryID int64
|
)
|
||||||
if err := tx.QueryRow(insertAncestry, ancestry.Name).Scan(&ancestryID); err != nil {
|
|
||||||
if isErrUniqueViolation(err) {
|
|
||||||
return handleError("insertAncestry", errors.New("other Go-routine is processing this ancestry (skip)"))
|
|
||||||
}
|
|
||||||
return handleError("insertAncestry", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := tx.insertAncestryLayers(ancestryID, ancestry.Layers); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return tx.persistProcessors(persistAncestryLister,
|
|
||||||
"persistAncestryLister",
|
|
||||||
persistAncestryDetector,
|
|
||||||
"persistAncestryDetector",
|
|
||||||
ancestryID, ancestry.ProcessedBy)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tx *pgSession) findAncestryID(name string) (int64, bool, error) {
|
|
||||||
var id sql.NullInt64
|
|
||||||
if err := tx.QueryRow(searchAncestry, name).Scan(&id); err != nil {
|
|
||||||
if err == sql.ErrNoRows {
|
|
||||||
return 0, false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0, false, handleError("searchAncestry", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return id.Int64, true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tx *pgSession) findAncestryProcessors(id int64) (database.Processors, error) {
|
|
||||||
var (
|
|
||||||
processors database.Processors
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
|
|
||||||
if processors.Detectors, err = tx.findProcessors(searchAncestryDetectors, id); err != nil {
|
|
||||||
return processors, handleError("searchAncestryDetectors", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if processors.Listers, err = tx.findProcessors(searchAncestryListers, id); err != nil {
|
|
||||||
return processors, handleError("searchAncestryListers", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return processors, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tx *pgSession) FindAncestry(name string) (database.Ancestry, bool, error) {
|
func (tx *pgSession) FindAncestry(name string) (database.Ancestry, bool, error) {
|
||||||
var (
|
var (
|
||||||
@ -91,7 +54,7 @@ func (tx *pgSession) FindAncestry(name string) (database.Ancestry, bool, error)
|
|||||||
return ancestry, ok, err
|
return ancestry, ok, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if ancestry.ProcessedBy, err = tx.findAncestryProcessors(id); err != nil {
|
if ancestry.By, err = tx.findAncestryDetectors(id); err != nil {
|
||||||
return ancestry, false, err
|
return ancestry, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -102,99 +65,187 @@ func (tx *pgSession) FindAncestry(name string) (database.Ancestry, bool, error)
|
|||||||
return ancestry, true, nil
|
return ancestry, true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tx *pgSession) deleteAncestry(name string) error {
|
func (tx *pgSession) UpsertAncestry(ancestry database.Ancestry) error {
|
||||||
result, err := tx.Exec(removeAncestry, name)
|
if !ancestry.Valid() {
|
||||||
if err != nil {
|
return database.ErrInvalidParameters
|
||||||
return handleError("removeAncestry", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = result.RowsAffected()
|
if err := tx.removeAncestry(ancestry.Name); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
id, err := tx.insertAncestry(ancestry.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return handleError("removeAncestry", err)
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
detectorIDs, err := tx.findDetectorIDs(ancestry.By)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// insert ancestry metadata
|
||||||
|
if err := tx.insertAncestryDetectors(id, detectorIDs); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
layers := make([]string, 0, len(ancestry.Layers))
|
||||||
|
for _, layer := range ancestry.Layers {
|
||||||
|
layers = append(layers, layer.Hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
layerIDs, ok, err := tx.findLayerIDs(layers)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
log.Error("layer cannot be found, this indicates that the internal logic of calling UpsertAncestry is wrong or the database is corrupted.")
|
||||||
|
return database.ErrMissingEntities
|
||||||
|
}
|
||||||
|
|
||||||
|
ancestryLayerIDs, err := tx.insertAncestryLayers(id, layerIDs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, id := range ancestryLayerIDs {
|
||||||
|
if err := tx.insertAncestryFeatures(id, ancestry.Layers[i]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tx *pgSession) findProcessors(query string, id int64) ([]string, error) {
|
func (tx *pgSession) insertAncestry(name string) (int64, error) {
|
||||||
var (
|
var id int64
|
||||||
processors []string
|
err := tx.QueryRow(insertAncestry, name).Scan(&id)
|
||||||
processor string
|
|
||||||
)
|
|
||||||
|
|
||||||
rows, err := tx.Query(query, id)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if isErrUniqueViolation(err) {
|
||||||
|
return 0, handleError("insertAncestry", errors.New("other Go-routine is processing this ancestry (skip)"))
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0, handleError("insertAncestry", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.WithFields(log.Fields{"ancestry": name, "id": id}).Debug("database: inserted ancestry")
|
||||||
|
return id, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *pgSession) findAncestryID(name string) (int64, bool, error) {
|
||||||
|
var id sql.NullInt64
|
||||||
|
if err := tx.QueryRow(findAncestryID, name).Scan(&id); err != nil {
|
||||||
if err == sql.ErrNoRows {
|
if err == sql.ErrNoRows {
|
||||||
return nil, nil
|
return 0, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, err
|
return 0, false, handleError("findAncestryID", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for rows.Next() {
|
return id.Int64, true, nil
|
||||||
if err := rows.Scan(&processor); err != nil {
|
}
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
processors = append(processors, processor)
|
func (tx *pgSession) removeAncestry(name string) error {
|
||||||
|
result, err := tx.Exec(removeAncestry, name)
|
||||||
|
if err != nil {
|
||||||
|
return handleError("removeAncestry", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return processors, nil
|
affected, err := result.RowsAffected()
|
||||||
|
if err != nil {
|
||||||
|
return handleError("removeAncestry", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if affected != 0 {
|
||||||
|
log.WithField("ancestry", name).Debug("removed ancestry")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tx *pgSession) findAncestryLayers(id int64) ([]database.AncestryLayer, error) {
|
func (tx *pgSession) findAncestryLayers(id int64) ([]database.AncestryLayer, error) {
|
||||||
var (
|
detectors, err := tx.findAllDetectors()
|
||||||
err error
|
if err != nil {
|
||||||
rows *sql.Rows
|
return nil, err
|
||||||
// layer index -> Ancestry Layer + Layer ID
|
|
||||||
layers = map[int64]ancestryLayerWithID{}
|
|
||||||
// layer index -> layer-wise features
|
|
||||||
features = map[int64][]database.NamespacedFeature{}
|
|
||||||
ancestryLayers []database.AncestryLayer
|
|
||||||
)
|
|
||||||
|
|
||||||
// retrieve ancestry layer metadata
|
|
||||||
if rows, err = tx.Query(searchAncestryLayer, id); err != nil {
|
|
||||||
return nil, handleError("searchAncestryLayer", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
layerMap, err := tx.findAncestryLayerHashes(id)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.WithField("map", layerMap).Debug("found layer hashes")
|
||||||
|
featureMap, err := tx.findAncestryFeatures(id, detectors)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
layers := make([]database.AncestryLayer, len(layerMap))
|
||||||
|
for index, layer := range layerMap {
|
||||||
|
// index MUST match the ancestry layer slice index.
|
||||||
|
if layers[index].Hash == "" && len(layers[index].Features) == 0 {
|
||||||
|
layers[index] = database.AncestryLayer{
|
||||||
|
Hash: layer,
|
||||||
|
Features: featureMap[index],
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"ancestry ID": id,
|
||||||
|
"duplicated ancestry index": index,
|
||||||
|
}).WithError(database.ErrInconsistent).Error("ancestry layers with same ancestry_index is not allowed")
|
||||||
|
return nil, database.ErrInconsistent
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return layers, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *pgSession) findAncestryLayerHashes(ancestryID int64) (map[int64]string, error) {
|
||||||
|
// retrieve layer indexes and hashes
|
||||||
|
rows, err := tx.Query(findAncestryLayerHashes, ancestryID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, handleError("findAncestryLayerHashes", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
layerHashes := map[int64]string{}
|
||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
var (
|
var (
|
||||||
layer database.AncestryLayer
|
hash string
|
||||||
index sql.NullInt64
|
index int64
|
||||||
id sql.NullInt64
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if err = rows.Scan(&layer.Hash, &id, &index); err != nil {
|
if err = rows.Scan(&hash, &index); err != nil {
|
||||||
return nil, handleError("searchAncestryLayer", err)
|
return nil, handleError("findAncestryLayerHashes", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !index.Valid || !id.Valid {
|
if _, ok := layerHashes[index]; ok {
|
||||||
panic("null ancestry ID or ancestry index violates database constraints")
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := layers[index.Int64]; ok {
|
|
||||||
// one ancestry index should correspond to only one layer
|
// one ancestry index should correspond to only one layer
|
||||||
return nil, database.ErrInconsistent
|
return nil, database.ErrInconsistent
|
||||||
}
|
}
|
||||||
|
|
||||||
layers[index.Int64] = ancestryLayerWithID{layer, id.Int64}
|
layerHashes[index] = hash
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, layer := range layers {
|
return layerHashes, nil
|
||||||
if layer.ProcessedBy, err = tx.findLayerProcessors(layer.layerID); err != nil {
|
}
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
func (tx *pgSession) findAncestryFeatures(ancestryID int64, detectors detectorMap) (map[int64][]database.AncestryFeature, error) {
|
||||||
|
// ancestry_index -> ancestry features
|
||||||
|
featureMap := make(map[int64][]database.AncestryFeature)
|
||||||
// retrieve ancestry layer's namespaced features
|
// retrieve ancestry layer's namespaced features
|
||||||
if rows, err = tx.Query(searchAncestryFeatures, id); err != nil {
|
rows, err := tx.Query(findAncestryFeatures, ancestryID)
|
||||||
return nil, handleError("searchAncestryFeatures", err)
|
if err != nil {
|
||||||
|
return nil, handleError("findAncestryFeatures", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
var (
|
var (
|
||||||
feature database.NamespacedFeature
|
featureDetectorID int64
|
||||||
|
namespaceDetectorID int64
|
||||||
|
feature database.NamespacedFeature
|
||||||
// index is used to determine which layer the feature belongs to.
|
// index is used to determine which layer the feature belongs to.
|
||||||
index sql.NullInt64
|
index sql.NullInt64
|
||||||
)
|
)
|
||||||
@ -206,8 +257,10 @@ func (tx *pgSession) findAncestryLayers(id int64) ([]database.AncestryLayer, err
|
|||||||
&feature.Feature.Version,
|
&feature.Feature.Version,
|
||||||
&feature.Feature.VersionFormat,
|
&feature.Feature.VersionFormat,
|
||||||
&index,
|
&index,
|
||||||
|
&featureDetectorID,
|
||||||
|
&namespaceDetectorID,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
return nil, handleError("searchAncestryFeatures", err)
|
return nil, handleError("findAncestryFeatures", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if feature.Feature.VersionFormat != feature.Namespace.VersionFormat {
|
if feature.Feature.VersionFormat != feature.Namespace.VersionFormat {
|
||||||
@ -216,59 +269,88 @@ func (tx *pgSession) findAncestryLayers(id int64) ([]database.AncestryLayer, err
|
|||||||
return nil, database.ErrInconsistent
|
return nil, database.ErrInconsistent
|
||||||
}
|
}
|
||||||
|
|
||||||
features[index.Int64] = append(features[index.Int64], feature)
|
fDetector, ok := detectors.byID[featureDetectorID]
|
||||||
|
if !ok {
|
||||||
|
return nil, database.ErrInconsistent
|
||||||
|
}
|
||||||
|
|
||||||
|
nsDetector, ok := detectors.byID[namespaceDetectorID]
|
||||||
|
if !ok {
|
||||||
|
return nil, database.ErrInconsistent
|
||||||
|
}
|
||||||
|
|
||||||
|
featureMap[index.Int64] = append(featureMap[index.Int64], database.AncestryFeature{
|
||||||
|
NamespacedFeature: feature,
|
||||||
|
FeatureBy: fDetector,
|
||||||
|
NamespaceBy: nsDetector,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
for index, layer := range layers {
|
return featureMap, nil
|
||||||
layer.DetectedFeatures = features[index]
|
|
||||||
ancestryLayers = append(ancestryLayers, layer.AncestryLayer)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ancestryLayers, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// insertAncestryLayers inserts the ancestry layers along with its content into
|
// insertAncestryLayers inserts the ancestry layers along with its content into
|
||||||
// the database. The layers are 0 based indexed in the original order.
|
// the database. The layers are 0 based indexed in the original order.
|
||||||
func (tx *pgSession) insertAncestryLayers(ancestryID int64, layers []database.AncestryLayer) error {
|
func (tx *pgSession) insertAncestryLayers(ancestryID int64, layers []int64) ([]int64, error) {
|
||||||
//TODO(Sida): use bulk insert.
|
stmt, err := tx.Prepare(insertAncestryLayers)
|
||||||
stmt, err := tx.Prepare(insertAncestryLayer)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return handleError("insertAncestryLayer", err)
|
return nil, handleError("insertAncestryLayers", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ancestryLayerIDs := []sql.NullInt64{}
|
ancestryLayerIDs := []int64{}
|
||||||
for index, layer := range layers {
|
for index, layerID := range layers {
|
||||||
var ancestryLayerID sql.NullInt64
|
var ancestryLayerID sql.NullInt64
|
||||||
if err := stmt.QueryRow(ancestryID, index, layer.Hash).Scan(&ancestryLayerID); err != nil {
|
if err := stmt.QueryRow(ancestryID, index, layerID).Scan(&ancestryLayerID); err != nil {
|
||||||
return handleError("insertAncestryLayer", commonerr.CombineErrors(err, stmt.Close()))
|
return nil, handleError("insertAncestryLayers", commonerr.CombineErrors(err, stmt.Close()))
|
||||||
}
|
}
|
||||||
|
|
||||||
ancestryLayerIDs = append(ancestryLayerIDs, ancestryLayerID)
|
if !ancestryLayerID.Valid {
|
||||||
|
return nil, database.ErrInconsistent
|
||||||
|
}
|
||||||
|
|
||||||
|
ancestryLayerIDs = append(ancestryLayerIDs, ancestryLayerID.Int64)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := stmt.Close(); err != nil {
|
if err := stmt.Close(); err != nil {
|
||||||
return handleError("Failed to close insertAncestryLayer statement", err)
|
return nil, handleError("insertAncestryLayers", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ancestryLayerIDs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *pgSession) insertAncestryFeatures(ancestryLayerID int64, layer database.AncestryLayer) error {
|
||||||
|
detectors, err := tx.findAllDetectors()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
nsFeatureIDs, err := tx.findNamespacedFeatureIDs(layer.GetFeatures())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// find the detectors for each feature
|
||||||
|
stmt, err := tx.Prepare(insertAncestryFeatures)
|
||||||
|
if err != nil {
|
||||||
|
return handleError("insertAncestryFeatures", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
stmt, err = tx.Prepare(insertAncestryLayerFeature)
|
|
||||||
defer stmt.Close()
|
defer stmt.Close()
|
||||||
|
|
||||||
for i, layer := range layers {
|
for index, id := range nsFeatureIDs {
|
||||||
var (
|
namespaceDetectorID, ok := detectors.byValue[layer.Features[index].NamespaceBy]
|
||||||
nsFeatureIDs []sql.NullInt64
|
if !ok {
|
||||||
layerID = ancestryLayerIDs[i]
|
return database.ErrMissingEntities
|
||||||
)
|
|
||||||
|
|
||||||
if nsFeatureIDs, err = tx.findNamespacedFeatureIDs(layer.DetectedFeatures); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, id := range nsFeatureIDs {
|
featureDetectorID, ok := detectors.byValue[layer.Features[index].FeatureBy]
|
||||||
if _, err := stmt.Exec(layerID, id); err != nil {
|
if !ok {
|
||||||
return handleError("insertAncestryLayerFeature", commonerr.CombineErrors(err, stmt.Close()))
|
return database.ErrMissingEntities
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if _, err := stmt.Exec(ancestryLayerID, id, featureDetectorID, namespaceDetectorID); err != nil {
|
||||||
|
return handleError("insertAncestryFeatures", commonerr.CombineErrors(err, stmt.Close()))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -15,7 +15,6 @@
|
|||||||
package pgsql
|
package pgsql
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"sort"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@ -23,190 +22,117 @@ import (
|
|||||||
"github.com/coreos/clair/database"
|
"github.com/coreos/clair/database"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var upsertAncestryTests = []struct {
|
||||||
|
in *database.Ancestry
|
||||||
|
err string
|
||||||
|
title string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
title: "ancestry with invalid layer",
|
||||||
|
in: &database.Ancestry{
|
||||||
|
Name: "a1",
|
||||||
|
Layers: []database.AncestryLayer{
|
||||||
|
{
|
||||||
|
Hash: "layer-non-existing",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
err: database.ErrMissingEntities.Error(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
title: "ancestry with invalid name",
|
||||||
|
in: &database.Ancestry{},
|
||||||
|
err: database.ErrInvalidParameters.Error(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
title: "new valid ancestry",
|
||||||
|
in: &database.Ancestry{
|
||||||
|
Name: "a",
|
||||||
|
Layers: []database.AncestryLayer{{Hash: "layer-0"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
title: "ancestry with invalid feature",
|
||||||
|
in: &database.Ancestry{
|
||||||
|
Name: "a",
|
||||||
|
By: []database.Detector{realDetectors[1], realDetectors[2]},
|
||||||
|
Layers: []database.AncestryLayer{{Hash: "layer-1", Features: []database.AncestryFeature{
|
||||||
|
{fakeNamespacedFeatures[1], fakeDetector[1], fakeDetector[2]},
|
||||||
|
}}},
|
||||||
|
},
|
||||||
|
err: database.ErrMissingEntities.Error(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
title: "replace old ancestry",
|
||||||
|
in: &database.Ancestry{
|
||||||
|
Name: "a",
|
||||||
|
By: []database.Detector{realDetectors[1], realDetectors[2]},
|
||||||
|
Layers: []database.AncestryLayer{
|
||||||
|
{"layer-1", []database.AncestryFeature{{realNamespacedFeatures[1], realDetectors[2], realDetectors[1]}}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
func TestUpsertAncestry(t *testing.T) {
|
func TestUpsertAncestry(t *testing.T) {
|
||||||
store, tx := openSessionForTest(t, "UpsertAncestry", true)
|
store, tx := openSessionForTest(t, "UpsertAncestry", true)
|
||||||
defer closeTest(t, store, tx)
|
defer closeTest(t, store, tx)
|
||||||
a1 := database.Ancestry{
|
for _, test := range upsertAncestryTests {
|
||||||
Name: "a1",
|
t.Run(test.title, func(t *testing.T) {
|
||||||
Layers: []database.AncestryLayer{
|
err := tx.UpsertAncestry(*test.in)
|
||||||
{
|
if test.err != "" {
|
||||||
LayerMetadata: database.LayerMetadata{
|
assert.EqualError(t, err, test.err, "unexpected error")
|
||||||
Hash: "layer-N",
|
return
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
a2 := database.Ancestry{}
|
|
||||||
|
|
||||||
a3 := database.Ancestry{
|
|
||||||
Name: "a",
|
|
||||||
Layers: []database.AncestryLayer{
|
|
||||||
{
|
|
||||||
LayerMetadata: database.LayerMetadata{
|
|
||||||
Hash: "layer-0",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
a4 := database.Ancestry{
|
|
||||||
Name: "a",
|
|
||||||
Layers: []database.AncestryLayer{
|
|
||||||
{
|
|
||||||
LayerMetadata: database.LayerMetadata{
|
|
||||||
Hash: "layer-1",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
f1 := database.Feature{
|
|
||||||
Name: "wechat",
|
|
||||||
Version: "0.5",
|
|
||||||
VersionFormat: "dpkg",
|
|
||||||
}
|
|
||||||
|
|
||||||
// not in database
|
|
||||||
f2 := database.Feature{
|
|
||||||
Name: "wechat",
|
|
||||||
Version: "0.6",
|
|
||||||
VersionFormat: "dpkg",
|
|
||||||
}
|
|
||||||
|
|
||||||
n1 := database.Namespace{
|
|
||||||
Name: "debian:7",
|
|
||||||
VersionFormat: "dpkg",
|
|
||||||
}
|
|
||||||
|
|
||||||
p := database.Processors{
|
|
||||||
Listers: []string{"dpkg", "non-existing"},
|
|
||||||
Detectors: []string{"os-release", "non-existing"},
|
|
||||||
}
|
|
||||||
|
|
||||||
nsf1 := database.NamespacedFeature{
|
|
||||||
Namespace: n1,
|
|
||||||
Feature: f1,
|
|
||||||
}
|
|
||||||
|
|
||||||
// not in database
|
|
||||||
nsf2 := database.NamespacedFeature{
|
|
||||||
Namespace: n1,
|
|
||||||
Feature: f2,
|
|
||||||
}
|
|
||||||
|
|
||||||
a4.ProcessedBy = p
|
|
||||||
// invalid case
|
|
||||||
assert.NotNil(t, tx.UpsertAncestry(a1))
|
|
||||||
assert.NotNil(t, tx.UpsertAncestry(a2))
|
|
||||||
// valid case
|
|
||||||
assert.Nil(t, tx.UpsertAncestry(a3))
|
|
||||||
a4.Layers[0].DetectedFeatures = []database.NamespacedFeature{nsf1, nsf2}
|
|
||||||
// replace invalid case
|
|
||||||
assert.NotNil(t, tx.UpsertAncestry(a4))
|
|
||||||
a4.Layers[0].DetectedFeatures = []database.NamespacedFeature{nsf1}
|
|
||||||
// replace valid case
|
|
||||||
assert.Nil(t, tx.UpsertAncestry(a4))
|
|
||||||
// validate
|
|
||||||
ancestry, ok, err := tx.FindAncestry("a")
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.True(t, ok)
|
|
||||||
assertAncestryEqual(t, a4, ancestry)
|
|
||||||
}
|
|
||||||
|
|
||||||
func assertProcessorsEqual(t *testing.T, expected database.Processors, actual database.Processors) bool {
|
|
||||||
sort.Strings(expected.Detectors)
|
|
||||||
sort.Strings(actual.Detectors)
|
|
||||||
sort.Strings(expected.Listers)
|
|
||||||
sort.Strings(actual.Listers)
|
|
||||||
return assert.Equal(t, expected.Detectors, actual.Detectors) && assert.Equal(t, expected.Listers, actual.Listers)
|
|
||||||
}
|
|
||||||
|
|
||||||
func assertAncestryEqual(t *testing.T, expected database.Ancestry, actual database.Ancestry) bool {
|
|
||||||
assert.Equal(t, expected.Name, actual.Name)
|
|
||||||
assertProcessorsEqual(t, expected.ProcessedBy, actual.ProcessedBy)
|
|
||||||
if assert.Equal(t, len(expected.Layers), len(actual.Layers)) {
|
|
||||||
for index, layer := range expected.Layers {
|
|
||||||
if !assertAncestryLayerEqual(t, layer, actual.Layers[index]) {
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
}
|
assert.Nil(t, err)
|
||||||
return true
|
actual, ok, err := tx.FindAncestry(test.in.Name)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.True(t, ok)
|
||||||
|
database.AssertAncestryEqual(t, test.in, &actual)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertAncestryLayerEqual(t *testing.T, expected database.AncestryLayer, actual database.AncestryLayer) bool {
|
var findAncestryTests = []struct {
|
||||||
return assertLayerEqual(t, expected.LayerMetadata, actual.LayerMetadata) &&
|
title string
|
||||||
assertNamespacedFeatureEqual(t, expected.DetectedFeatures, actual.DetectedFeatures)
|
in string
|
||||||
|
|
||||||
|
ancestry *database.Ancestry
|
||||||
|
err string
|
||||||
|
ok bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
title: "missing ancestry",
|
||||||
|
in: "ancestry-non",
|
||||||
|
err: "",
|
||||||
|
ancestry: nil,
|
||||||
|
ok: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
title: "valid ancestry",
|
||||||
|
in: "ancestry-2",
|
||||||
|
err: "",
|
||||||
|
ok: true,
|
||||||
|
ancestry: takeAncestryPointerFromMap(realAncestries, 2),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFindAncestry(t *testing.T) {
|
func TestFindAncestry(t *testing.T) {
|
||||||
store, tx := openSessionForTest(t, "FindAncestry", true)
|
store, tx := openSessionForTest(t, "FindAncestry", true)
|
||||||
defer closeTest(t, store, tx)
|
defer closeTest(t, store, tx)
|
||||||
|
for _, test := range findAncestryTests {
|
||||||
|
t.Run(test.title, func(t *testing.T) {
|
||||||
|
ancestry, ok, err := tx.FindAncestry(test.in)
|
||||||
|
if test.err != "" {
|
||||||
|
assert.EqualError(t, err, test.err, "unexpected error")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// invalid
|
assert.Nil(t, err)
|
||||||
_, ok, err := tx.FindAncestry("ancestry-non")
|
assert.Equal(t, test.ok, ok)
|
||||||
if assert.Nil(t, err) {
|
if test.ok {
|
||||||
assert.False(t, ok)
|
database.AssertAncestryEqual(t, test.ancestry, &ancestry)
|
||||||
}
|
}
|
||||||
|
})
|
||||||
expected := database.Ancestry{
|
|
||||||
Name: "ancestry-2",
|
|
||||||
ProcessedBy: database.Processors{
|
|
||||||
Detectors: []string{"os-release"},
|
|
||||||
Listers: []string{"dpkg"},
|
|
||||||
},
|
|
||||||
Layers: []database.AncestryLayer{
|
|
||||||
{
|
|
||||||
LayerMetadata: database.LayerMetadata{
|
|
||||||
Hash: "layer-0",
|
|
||||||
},
|
|
||||||
DetectedFeatures: []database.NamespacedFeature{
|
|
||||||
{
|
|
||||||
Namespace: database.Namespace{
|
|
||||||
Name: "debian:7",
|
|
||||||
VersionFormat: "dpkg",
|
|
||||||
},
|
|
||||||
Feature: database.Feature{
|
|
||||||
Name: "wechat",
|
|
||||||
Version: "0.5",
|
|
||||||
VersionFormat: "dpkg",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Namespace: database.Namespace{
|
|
||||||
Name: "debian:8",
|
|
||||||
VersionFormat: "dpkg",
|
|
||||||
},
|
|
||||||
Feature: database.Feature{
|
|
||||||
Name: "openssl",
|
|
||||||
Version: "1.0",
|
|
||||||
VersionFormat: "dpkg",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
LayerMetadata: database.LayerMetadata{
|
|
||||||
Hash: "layer-1",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
LayerMetadata: database.LayerMetadata{
|
|
||||||
Hash: "layer-2",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
LayerMetadata: database.LayerMetadata{
|
|
||||||
Hash: "layer-3b",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
// valid
|
|
||||||
ancestry, ok, err := tx.FindAncestry("ancestry-2")
|
|
||||||
if assert.Nil(t, err) && assert.True(t, ok) {
|
|
||||||
assertAncestryEqual(t, expected, ancestry)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -220,7 +220,7 @@ func TestCaching(t *testing.T) {
|
|||||||
actualAffectedNames = append(actualAffectedNames, s.Name)
|
actualAffectedNames = append(actualAffectedNames, s.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.Len(t, strutil.CompareStringLists(expectedAffectedNames, actualAffectedNames), 0)
|
assert.Len(t, strutil.Difference(expectedAffectedNames, actualAffectedNames), 0)
|
||||||
assert.Len(t, strutil.CompareStringLists(actualAffectedNames, expectedAffectedNames), 0)
|
assert.Len(t, strutil.Difference(actualAffectedNames, expectedAffectedNames), 0)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
198
database/pgsql/detector.go
Normal file
198
database/pgsql/detector.go
Normal file
@ -0,0 +1,198 @@
|
|||||||
|
// Copyright 2018 clair authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package pgsql
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
|
||||||
|
"github.com/deckarep/golang-set"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
"github.com/coreos/clair/database"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
soiDetector = `
|
||||||
|
INSERT INTO detector (name, version, dtype)
|
||||||
|
SELECT CAST ($1 AS TEXT), CAST ($2 AS TEXT), CAST ($3 AS detector_type )
|
||||||
|
WHERE NOT EXISTS (SELECT id FROM detector WHERE name = $1 AND version = $2 AND dtype = $3);`
|
||||||
|
|
||||||
|
selectAncestryDetectors = `
|
||||||
|
SELECT d.name, d.version, d.dtype
|
||||||
|
FROM ancestry_detector, detector AS d
|
||||||
|
WHERE ancestry_detector.detector_id = d.id AND ancestry_detector.ancestry_id = $1;`
|
||||||
|
|
||||||
|
selectLayerDetectors = `
|
||||||
|
SELECT d.name, d.version, d.dtype
|
||||||
|
FROM layer_detector, detector AS d
|
||||||
|
WHERE layer_detector.detector_id = d.id AND layer_detector.layer_id = $1;`
|
||||||
|
|
||||||
|
insertAncestryDetectors = `
|
||||||
|
INSERT INTO ancestry_detector (ancestry_id, detector_id)
|
||||||
|
SELECT $1, $2
|
||||||
|
WHERE NOT EXISTS (SELECT id FROM ancestry_detector WHERE ancestry_id = $1 AND detector_id = $2)`
|
||||||
|
|
||||||
|
persistLayerDetector = `
|
||||||
|
INSERT INTO layer_detector (layer_id, detector_id)
|
||||||
|
SELECT $1, $2
|
||||||
|
WHERE NOT EXISTS (SELECT id FROM layer_detector WHERE layer_id = $1 AND detector_id = $2)`
|
||||||
|
|
||||||
|
findDetectorID = `SELECT id FROM detector WHERE name = $1 AND version = $2 AND dtype = $3`
|
||||||
|
findAllDetectors = `SELECT id, name, version, dtype FROM detector`
|
||||||
|
)
|
||||||
|
|
||||||
|
type detectorMap struct {
|
||||||
|
byID map[int64]database.Detector
|
||||||
|
byValue map[database.Detector]int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *pgSession) PersistDetectors(detectors []database.Detector) error {
|
||||||
|
for _, d := range detectors {
|
||||||
|
if !d.Valid() {
|
||||||
|
log.WithField("detector", d).Debug("Invalid Detector")
|
||||||
|
return database.ErrInvalidParameters
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := tx.Exec(soiDetector, d.Name, d.Version, d.DType)
|
||||||
|
if err != nil {
|
||||||
|
return handleError("soiDetector", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
count, err := r.RowsAffected()
|
||||||
|
if err != nil {
|
||||||
|
return handleError("soiDetector", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count == 0 {
|
||||||
|
log.Debug("detector already exists: ", d)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *pgSession) persistLayerDetector(layerID int64, detectorID int64) error {
|
||||||
|
if _, err := tx.Exec(persistLayerDetector, layerID, detectorID); err != nil {
|
||||||
|
return handleError("persistLayerDetector", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *pgSession) persistLayerDetectors(layerID int64, detectorIDs []int64) error {
|
||||||
|
alreadySaved := mapset.NewSet()
|
||||||
|
for _, id := range detectorIDs {
|
||||||
|
if alreadySaved.Contains(id) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
alreadySaved.Add(id)
|
||||||
|
if err := tx.persistLayerDetector(layerID, id); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *pgSession) insertAncestryDetectors(ancestryID int64, detectorIDs []int64) error {
|
||||||
|
for _, detectorID := range detectorIDs {
|
||||||
|
if _, err := tx.Exec(insertAncestryDetectors, ancestryID, detectorID); err != nil {
|
||||||
|
return handleError("insertAncestryDetectors", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *pgSession) findAncestryDetectors(id int64) ([]database.Detector, error) {
|
||||||
|
detectors, err := tx.getDetectors(selectAncestryDetectors, id)
|
||||||
|
log.WithField("detectors", detectors).Debug("found ancestry detectors")
|
||||||
|
return detectors, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *pgSession) findLayerDetectors(id int64) ([]database.Detector, error) {
|
||||||
|
detectors, err := tx.getDetectors(selectLayerDetectors, id)
|
||||||
|
log.WithField("detectors", detectors).Debug("found layer detectors")
|
||||||
|
return detectors, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// findDetectorIDs retrieve ids of the detectors from the database, if any is not
|
||||||
|
// found, return the error.
|
||||||
|
func (tx *pgSession) findDetectorIDs(detectors []database.Detector) ([]int64, error) {
|
||||||
|
ids := []int64{}
|
||||||
|
for _, d := range detectors {
|
||||||
|
id := sql.NullInt64{}
|
||||||
|
err := tx.QueryRow(findDetectorID, d.Name, d.Version, d.DType).Scan(&id)
|
||||||
|
if err != nil {
|
||||||
|
return nil, handleError("findDetectorID", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !id.Valid {
|
||||||
|
return nil, database.ErrInconsistent
|
||||||
|
}
|
||||||
|
|
||||||
|
ids = append(ids, id.Int64)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ids, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *pgSession) getDetectors(query string, id int64) ([]database.Detector, error) {
|
||||||
|
rows, err := tx.Query(query, id)
|
||||||
|
if err != nil {
|
||||||
|
return nil, handleError("getDetectors", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
detectors := []database.Detector{}
|
||||||
|
for rows.Next() {
|
||||||
|
d := database.Detector{}
|
||||||
|
err := rows.Scan(&d.Name, &d.Version, &d.DType)
|
||||||
|
if err != nil {
|
||||||
|
return nil, handleError("getDetectors", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !d.Valid() {
|
||||||
|
return nil, database.ErrInvalidDetector
|
||||||
|
}
|
||||||
|
|
||||||
|
detectors = append(detectors, d)
|
||||||
|
}
|
||||||
|
|
||||||
|
return detectors, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *pgSession) findAllDetectors() (detectorMap, error) {
|
||||||
|
rows, err := tx.Query(findAllDetectors)
|
||||||
|
if err != nil {
|
||||||
|
return detectorMap{}, handleError("searchAllDetectors", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
detectors := detectorMap{byID: make(map[int64]database.Detector), byValue: make(map[database.Detector]int64)}
|
||||||
|
for rows.Next() {
|
||||||
|
var (
|
||||||
|
id int64
|
||||||
|
d database.Detector
|
||||||
|
)
|
||||||
|
if err := rows.Scan(&id, &d.Name, &d.Version, &d.DType); err != nil {
|
||||||
|
return detectorMap{}, handleError("searchAllDetectors", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
detectors.byID[id] = d
|
||||||
|
detectors.byValue[d] = id
|
||||||
|
}
|
||||||
|
|
||||||
|
return detectors, nil
|
||||||
|
}
|
119
database/pgsql/detector_test.go
Normal file
119
database/pgsql/detector_test.go
Normal file
@ -0,0 +1,119 @@
|
|||||||
|
// Copyright 2018 clair authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package pgsql
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/deckarep/golang-set"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/coreos/clair/database"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testGetAllDetectors(tx *pgSession) []database.Detector {
|
||||||
|
query := `SELECT name, version, dtype FROM detector`
|
||||||
|
rows, err := tx.Query(query)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
detectors := []database.Detector{}
|
||||||
|
for rows.Next() {
|
||||||
|
d := database.Detector{}
|
||||||
|
if err := rows.Scan(&d.Name, &d.Version, &d.DType); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
detectors = append(detectors, d)
|
||||||
|
}
|
||||||
|
|
||||||
|
return detectors
|
||||||
|
}
|
||||||
|
|
||||||
|
var persistDetectorTests = []struct {
|
||||||
|
title string
|
||||||
|
in []database.Detector
|
||||||
|
err string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
title: "invalid detector",
|
||||||
|
in: []database.Detector{
|
||||||
|
{},
|
||||||
|
database.NewFeatureDetector("name", "2.0"),
|
||||||
|
},
|
||||||
|
err: database.ErrInvalidParameters.Error(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
title: "invalid detector 2",
|
||||||
|
in: []database.Detector{
|
||||||
|
database.NewFeatureDetector("name", "2.0"),
|
||||||
|
{"name", "1.0", "random not valid dtype"},
|
||||||
|
},
|
||||||
|
err: database.ErrInvalidParameters.Error(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
title: "detectors with some different fields",
|
||||||
|
in: []database.Detector{
|
||||||
|
database.NewFeatureDetector("name", "2.0"),
|
||||||
|
database.NewFeatureDetector("name", "1.0"),
|
||||||
|
database.NewNamespaceDetector("name", "1.0"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
title: "duplicated detectors (parameter level)",
|
||||||
|
in: []database.Detector{
|
||||||
|
database.NewFeatureDetector("name", "1.0"),
|
||||||
|
database.NewFeatureDetector("name", "1.0"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
title: "duplicated detectors (db level)",
|
||||||
|
in: []database.Detector{
|
||||||
|
database.NewNamespaceDetector("os-release", "1.0"),
|
||||||
|
database.NewNamespaceDetector("os-release", "1.0"),
|
||||||
|
database.NewFeatureDetector("dpkg", "1.0"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPersistDetector(t *testing.T) {
|
||||||
|
datastore, tx := openSessionForTest(t, "PersistDetector", true)
|
||||||
|
defer closeTest(t, datastore, tx)
|
||||||
|
|
||||||
|
for _, test := range persistDetectorTests {
|
||||||
|
t.Run(test.title, func(t *testing.T) {
|
||||||
|
err := tx.PersistDetectors(test.in)
|
||||||
|
if test.err != "" {
|
||||||
|
require.EqualError(t, err, test.err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
detectors := testGetAllDetectors(tx)
|
||||||
|
|
||||||
|
// ensure no duplicated detectors
|
||||||
|
detectorSet := mapset.NewSet()
|
||||||
|
for _, d := range detectors {
|
||||||
|
require.False(t, detectorSet.Contains(d), "duplicated: %v", d)
|
||||||
|
detectorSet.Add(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensure all persisted detectors are actually saved
|
||||||
|
for _, d := range test.in {
|
||||||
|
require.True(t, detectorSet.Contains(d), "detector: %v, detectors: %v", d, detectorSet)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
@ -16,7 +16,6 @@ package pgsql
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"errors"
|
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
"github.com/lib/pq"
|
"github.com/lib/pq"
|
||||||
@ -27,14 +26,42 @@ import (
|
|||||||
"github.com/coreos/clair/pkg/commonerr"
|
"github.com/coreos/clair/pkg/commonerr"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
const (
|
||||||
errFeatureNotFound = errors.New("Feature not found")
|
soiNamespacedFeature = `
|
||||||
)
|
WITH new_feature_ns AS (
|
||||||
|
INSERT INTO namespaced_feature(feature_id, namespace_id)
|
||||||
|
SELECT CAST ($1 AS INTEGER), CAST ($2 AS INTEGER)
|
||||||
|
WHERE NOT EXISTS ( SELECT id FROM namespaced_feature WHERE namespaced_feature.feature_id = $1 AND namespaced_feature.namespace_id = $2)
|
||||||
|
RETURNING id
|
||||||
|
)
|
||||||
|
SELECT id FROM namespaced_feature WHERE namespaced_feature.feature_id = $1 AND namespaced_feature.namespace_id = $2
|
||||||
|
UNION
|
||||||
|
SELECT id FROM new_feature_ns`
|
||||||
|
|
||||||
type vulnerabilityAffecting struct {
|
searchPotentialAffectingVulneraibilities = `
|
||||||
vulnerabilityID int64
|
SELECT nf.id, v.id, vaf.affected_version, vaf.id
|
||||||
addedByID int64
|
FROM vulnerability_affected_feature AS vaf, vulnerability AS v,
|
||||||
}
|
namespaced_feature AS nf, feature AS f
|
||||||
|
WHERE nf.id = ANY($1)
|
||||||
|
AND nf.feature_id = f.id
|
||||||
|
AND nf.namespace_id = v.namespace_id
|
||||||
|
AND vaf.feature_name = f.name
|
||||||
|
AND vaf.vulnerability_id = v.id
|
||||||
|
AND v.deleted_at IS NULL`
|
||||||
|
|
||||||
|
searchNamespacedFeaturesVulnerabilities = `
|
||||||
|
SELECT vanf.namespaced_feature_id, v.name, v.description, v.link,
|
||||||
|
v.severity, v.metadata, vaf.fixedin, n.name, n.version_format
|
||||||
|
FROM vulnerability_affected_namespaced_feature AS vanf,
|
||||||
|
Vulnerability AS v,
|
||||||
|
vulnerability_affected_feature AS vaf,
|
||||||
|
namespace AS n
|
||||||
|
WHERE vanf.namespaced_feature_id = ANY($1)
|
||||||
|
AND vaf.id = vanf.added_by
|
||||||
|
AND v.id = vanf.vulnerability_id
|
||||||
|
AND n.id = v.namespace_id
|
||||||
|
AND v.deleted_at IS NULL`
|
||||||
|
)
|
||||||
|
|
||||||
func (tx *pgSession) PersistFeatures(features []database.Feature) error {
|
func (tx *pgSession) PersistFeatures(features []database.Feature) error {
|
||||||
if len(features) == 0 {
|
if len(features) == 0 {
|
||||||
@ -88,7 +115,7 @@ func (tx *pgSession) searchAffectingVulnerabilities(features []database.Namespac
|
|||||||
fMap := map[int64]database.NamespacedFeature{}
|
fMap := map[int64]database.NamespacedFeature{}
|
||||||
for i, f := range features {
|
for i, f := range features {
|
||||||
if !ids[i].Valid {
|
if !ids[i].Valid {
|
||||||
return nil, errFeatureNotFound
|
return nil, database.ErrMissingEntities
|
||||||
}
|
}
|
||||||
fMap[ids[i].Int64] = f
|
fMap[ids[i].Int64] = f
|
||||||
}
|
}
|
||||||
@ -180,7 +207,7 @@ func (tx *pgSession) PersistNamespacedFeatures(features []database.NamespacedFea
|
|||||||
if ids, err := tx.findFeatureIDs(fToFind); err == nil {
|
if ids, err := tx.findFeatureIDs(fToFind); err == nil {
|
||||||
for i, id := range ids {
|
for i, id := range ids {
|
||||||
if !id.Valid {
|
if !id.Valid {
|
||||||
return errFeatureNotFound
|
return database.ErrMissingEntities
|
||||||
}
|
}
|
||||||
fIDs[fToFind[i]] = id
|
fIDs[fToFind[i]] = id
|
||||||
}
|
}
|
||||||
@ -196,7 +223,7 @@ func (tx *pgSession) PersistNamespacedFeatures(features []database.NamespacedFea
|
|||||||
if ids, err := tx.findNamespaceIDs(nsToFind); err == nil {
|
if ids, err := tx.findNamespaceIDs(nsToFind); err == nil {
|
||||||
for i, id := range ids {
|
for i, id := range ids {
|
||||||
if !id.Valid {
|
if !id.Valid {
|
||||||
return errNamespaceNotFound
|
return database.ErrMissingEntities
|
||||||
}
|
}
|
||||||
nsIDs[nsToFind[i]] = id
|
nsIDs[nsToFind[i]] = id
|
||||||
}
|
}
|
||||||
|
@ -52,7 +52,7 @@ func TestPersistNamespacedFeatures(t *testing.T) {
|
|||||||
|
|
||||||
// existing features
|
// existing features
|
||||||
f1 := database.Feature{
|
f1 := database.Feature{
|
||||||
Name: "wechat",
|
Name: "ourchat",
|
||||||
Version: "0.5",
|
Version: "0.5",
|
||||||
VersionFormat: "dpkg",
|
VersionFormat: "dpkg",
|
||||||
}
|
}
|
||||||
@ -213,27 +213,6 @@ func listFeatures(t *testing.T, tx *pgSession) []database.Feature {
|
|||||||
return fs
|
return fs
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertFeaturesEqual(t *testing.T, expected []database.Feature, actual []database.Feature) bool {
|
|
||||||
if assert.Len(t, actual, len(expected)) {
|
|
||||||
has := map[database.Feature]bool{}
|
|
||||||
for _, nf := range expected {
|
|
||||||
has[nf] = false
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, nf := range actual {
|
|
||||||
has[nf] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
for nf, visited := range has {
|
|
||||||
if !assert.True(t, visited, nf.Name+" is expected") {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func assertNamespacedFeatureEqual(t *testing.T, expected []database.NamespacedFeature, actual []database.NamespacedFeature) bool {
|
func assertNamespacedFeatureEqual(t *testing.T, expected []database.NamespacedFeature, actual []database.NamespacedFeature) bool {
|
||||||
if assert.Len(t, actual, len(expected)) {
|
if assert.Len(t, actual, len(expected)) {
|
||||||
has := map[database.NamespacedFeature]bool{}
|
has := map[database.NamespacedFeature]bool{}
|
||||||
|
@ -23,6 +23,15 @@ import (
|
|||||||
"github.com/coreos/clair/pkg/commonerr"
|
"github.com/coreos/clair/pkg/commonerr"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
searchKeyValue = `SELECT value FROM KeyValue WHERE key = $1`
|
||||||
|
upsertKeyValue = `
|
||||||
|
INSERT INTO KeyValue(key, value)
|
||||||
|
VALUES ($1, $2)
|
||||||
|
ON CONFLICT ON CONSTRAINT keyvalue_key_key
|
||||||
|
DO UPDATE SET key=$1, value=$2`
|
||||||
|
)
|
||||||
|
|
||||||
func (tx *pgSession) UpdateKeyValue(key, value string) (err error) {
|
func (tx *pgSession) UpdateKeyValue(key, value string) (err error) {
|
||||||
if key == "" || value == "" {
|
if key == "" || value == "" {
|
||||||
log.Warning("could not insert a flag which has an empty name or value")
|
log.Warning("could not insert a flag which has an empty name or value")
|
||||||
|
@ -18,300 +18,349 @@ import (
|
|||||||
"database/sql"
|
"database/sql"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
|
"github.com/deckarep/golang-set"
|
||||||
|
|
||||||
"github.com/coreos/clair/database"
|
"github.com/coreos/clair/database"
|
||||||
"github.com/coreos/clair/pkg/commonerr"
|
"github.com/coreos/clair/pkg/commonerr"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (tx *pgSession) FindLayer(hash string) (database.Layer, bool, error) {
|
const (
|
||||||
var (
|
soiLayer = `
|
||||||
layer database.Layer
|
WITH new_layer AS (
|
||||||
layerID int64
|
INSERT INTO layer (hash)
|
||||||
ok bool
|
SELECT CAST ($1 AS VARCHAR)
|
||||||
err error
|
WHERE NOT EXISTS (SELECT id FROM layer WHERE hash = $1)
|
||||||
)
|
RETURNING id
|
||||||
|
)
|
||||||
|
SELECT id FROM new_Layer
|
||||||
|
UNION
|
||||||
|
SELECT id FROM layer WHERE hash = $1`
|
||||||
|
|
||||||
layer.LayerMetadata, layerID, ok, err = tx.findLayer(hash)
|
findLayerFeatures = `
|
||||||
|
SELECT f.name, f.version, f.version_format, lf.detector_id
|
||||||
|
FROM layer_feature AS lf, feature AS f
|
||||||
|
WHERE lf.feature_id = f.id
|
||||||
|
AND lf.layer_id = $1`
|
||||||
|
|
||||||
|
findLayerNamespaces = `
|
||||||
|
SELECT ns.name, ns.version_format, ln.detector_id
|
||||||
|
FROM layer_namespace AS ln, namespace AS ns
|
||||||
|
WHERE ln.namespace_id = ns.id
|
||||||
|
AND ln.layer_id = $1`
|
||||||
|
|
||||||
|
findLayerID = `SELECT id FROM layer WHERE hash = $1`
|
||||||
|
)
|
||||||
|
|
||||||
|
// dbLayerNamespace represents the layer_namespace table.
|
||||||
|
type dbLayerNamespace struct {
|
||||||
|
layerID int64
|
||||||
|
namespaceID int64
|
||||||
|
detectorID int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// dbLayerFeature represents the layer_feature table
|
||||||
|
type dbLayerFeature struct {
|
||||||
|
layerID int64
|
||||||
|
featureID int64
|
||||||
|
detectorID int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *pgSession) FindLayer(hash string) (database.Layer, bool, error) {
|
||||||
|
layer := database.Layer{Hash: hash}
|
||||||
|
if hash == "" {
|
||||||
|
return layer, false, commonerr.NewBadRequestError("non empty layer hash is expected.")
|
||||||
|
}
|
||||||
|
|
||||||
|
layerID, ok, err := tx.findLayerID(hash)
|
||||||
|
if err != nil || !ok {
|
||||||
|
return layer, ok, err
|
||||||
|
}
|
||||||
|
|
||||||
|
detectorMap, err := tx.findAllDetectors()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return layer, false, err
|
return layer, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !ok {
|
if layer.By, err = tx.findLayerDetectors(layerID); err != nil {
|
||||||
return layer, false, nil
|
return layer, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if layer.Features, err = tx.findLayerFeatures(layerID, detectorMap); err != nil {
|
||||||
|
return layer, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if layer.Namespaces, err = tx.findLayerNamespaces(layerID, detectorMap); err != nil {
|
||||||
|
return layer, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
layer.Features, err = tx.findLayerFeatures(layerID)
|
|
||||||
layer.Namespaces, err = tx.findLayerNamespaces(layerID)
|
|
||||||
return layer, true, nil
|
return layer, true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tx *pgSession) persistLayer(hash string) (int64, error) {
|
func sanitizePersistLayerInput(hash string, features []database.LayerFeature, namespaces []database.LayerNamespace, detectedBy []database.Detector) error {
|
||||||
if hash == "" {
|
if hash == "" {
|
||||||
return -1, commonerr.NewBadRequestError("Empty Layer Hash is not allowed")
|
return commonerr.NewBadRequestError("expected non-empty layer hash")
|
||||||
}
|
}
|
||||||
|
|
||||||
id := sql.NullInt64{}
|
detectedBySet := mapset.NewSet()
|
||||||
if err := tx.QueryRow(soiLayer, hash).Scan(&id); err != nil {
|
for _, d := range detectedBy {
|
||||||
return -1, handleError("queryPersistLayer", err)
|
detectedBySet.Add(d)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !id.Valid {
|
for _, f := range features {
|
||||||
panic("null layer.id violates database constraint")
|
if !detectedBySet.Contains(f.By) {
|
||||||
|
return database.ErrInvalidParameters
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return id.Int64, nil
|
for _, n := range namespaces {
|
||||||
|
if !detectedBySet.Contains(n.By) {
|
||||||
|
return database.ErrInvalidParameters
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PersistLayer relates layer identified by hash with namespaces,
|
// PersistLayer saves the content of a layer to the database.
|
||||||
// features and processors provided. If the layer, namespaces, features are not
|
func (tx *pgSession) PersistLayer(hash string, features []database.LayerFeature, namespaces []database.LayerNamespace, detectedBy []database.Detector) error {
|
||||||
// in database, the function returns an error.
|
|
||||||
func (tx *pgSession) PersistLayer(hash string, namespaces []database.Namespace, features []database.Feature, processedBy database.Processors) error {
|
|
||||||
if hash == "" {
|
|
||||||
return commonerr.NewBadRequestError("Empty layer hash is not allowed")
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
err error
|
err error
|
||||||
id int64
|
id int64
|
||||||
|
detectorIDs []int64
|
||||||
)
|
)
|
||||||
|
|
||||||
if id, err = tx.persistLayer(hash); err != nil {
|
if err = sanitizePersistLayerInput(hash, features, namespaces, detectedBy); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = tx.persistLayerNamespace(id, namespaces); err != nil {
|
if id, err = tx.soiLayer(hash); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = tx.persistLayerFeatures(id, features); err != nil {
|
if detectorIDs, err = tx.findDetectorIDs(detectedBy); err != nil {
|
||||||
|
if err == commonerr.ErrNotFound {
|
||||||
|
return database.ErrMissingEntities
|
||||||
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = tx.persistLayerDetectors(id, processedBy.Detectors); err != nil {
|
if err = tx.persistLayerDetectors(id, detectorIDs); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = tx.persistLayerListers(id, processedBy.Listers); err != nil {
|
if err = tx.persistAllLayerFeatures(id, features); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = tx.persistAllLayerNamespaces(id, namespaces); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tx *pgSession) persistLayerDetectors(id int64, detectors []string) error {
|
func (tx *pgSession) persistAllLayerNamespaces(layerID int64, namespaces []database.LayerNamespace) error {
|
||||||
if len(detectors) == 0 {
|
detectorMap, err := tx.findAllDetectors()
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sorting is needed before inserting into database to prevent deadlock.
|
|
||||||
sort.Strings(detectors)
|
|
||||||
keys := make([]interface{}, len(detectors)*2)
|
|
||||||
for i, d := range detectors {
|
|
||||||
keys[i*2] = id
|
|
||||||
keys[i*2+1] = d
|
|
||||||
}
|
|
||||||
_, err := tx.Exec(queryPersistLayerDetectors(len(detectors)), keys...)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return handleError("queryPersistLayerDetectors", err)
|
return err
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tx *pgSession) persistLayerListers(id int64, listers []string) error {
|
|
||||||
if len(listers) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
sort.Strings(listers)
|
// TODO(sidac): This kind of type conversion is very useless and wasteful,
|
||||||
keys := make([]interface{}, len(listers)*2)
|
// we need interfaces around the database models to reduce these kind of
|
||||||
for i, d := range listers {
|
// operations.
|
||||||
keys[i*2] = id
|
rawNamespaces := make([]database.Namespace, 0, len(namespaces))
|
||||||
keys[i*2+1] = d
|
for _, ns := range namespaces {
|
||||||
|
rawNamespaces = append(rawNamespaces, ns.Namespace)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := tx.Exec(queryPersistLayerListers(len(listers)), keys...)
|
rawNamespaceIDs, err := tx.findNamespaceIDs(rawNamespaces)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return handleError("queryPersistLayerDetectors", err)
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dbLayerNamespaces := make([]dbLayerNamespace, 0, len(namespaces))
|
||||||
|
for i, ns := range namespaces {
|
||||||
|
detectorID := detectorMap.byValue[ns.By]
|
||||||
|
namespaceID := rawNamespaceIDs[i].Int64
|
||||||
|
if !rawNamespaceIDs[i].Valid {
|
||||||
|
return database.ErrMissingEntities
|
||||||
|
}
|
||||||
|
|
||||||
|
dbLayerNamespaces = append(dbLayerNamespaces, dbLayerNamespace{layerID, namespaceID, detectorID})
|
||||||
|
}
|
||||||
|
|
||||||
|
return tx.persistLayerNamespaces(dbLayerNamespaces)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *pgSession) persistAllLayerFeatures(layerID int64, features []database.LayerFeature) error {
|
||||||
|
detectorMap, err := tx.findAllDetectors()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
rawFeatures := make([]database.Feature, 0, len(features))
|
||||||
|
for _, f := range features {
|
||||||
|
rawFeatures = append(rawFeatures, f.Feature)
|
||||||
|
}
|
||||||
|
|
||||||
|
featureIDs, err := tx.findFeatureIDs(rawFeatures)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
dbFeatures := make([]dbLayerFeature, 0, len(features))
|
||||||
|
for i, f := range features {
|
||||||
|
detectorID := detectorMap.byValue[f.By]
|
||||||
|
featureID := featureIDs[i].Int64
|
||||||
|
if !featureIDs[i].Valid {
|
||||||
|
return database.ErrMissingEntities
|
||||||
|
}
|
||||||
|
|
||||||
|
dbFeatures = append(dbFeatures, dbLayerFeature{layerID, featureID, detectorID})
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := tx.persistLayerFeatures(dbFeatures); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tx *pgSession) persistLayerFeatures(id int64, features []database.Feature) error {
|
func (tx *pgSession) persistLayerFeatures(features []dbLayerFeature) error {
|
||||||
if len(features) == 0 {
|
if len(features) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
fIDs, err := tx.findFeatureIDs(features)
|
sort.Slice(features, func(i, j int) bool {
|
||||||
if err != nil {
|
return features[i].featureID < features[j].featureID
|
||||||
return err
|
})
|
||||||
|
|
||||||
|
keys := make([]interface{}, len(features)*3)
|
||||||
|
for i, feature := range features {
|
||||||
|
keys[i*3] = feature.layerID
|
||||||
|
keys[i*3+1] = feature.featureID
|
||||||
|
keys[i*3+2] = feature.detectorID
|
||||||
}
|
}
|
||||||
|
|
||||||
ids := make([]int, len(fIDs))
|
_, err := tx.Exec(queryPersistLayerFeature(len(features)), keys...)
|
||||||
for i, fID := range fIDs {
|
|
||||||
if !fID.Valid {
|
|
||||||
return errNamespaceNotFound
|
|
||||||
}
|
|
||||||
ids[i] = int(fID.Int64)
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.IntSlice(ids).Sort()
|
|
||||||
keys := make([]interface{}, len(features)*2)
|
|
||||||
for i, fID := range ids {
|
|
||||||
keys[i*2] = id
|
|
||||||
keys[i*2+1] = fID
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = tx.Exec(queryPersistLayerFeature(len(features)), keys...)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return handleError("queryPersistLayerFeature", err)
|
return handleError("queryPersistLayerFeature", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tx *pgSession) persistLayerNamespace(id int64, namespaces []database.Namespace) error {
|
func (tx *pgSession) persistLayerNamespaces(namespaces []dbLayerNamespace) error {
|
||||||
if len(namespaces) == 0 {
|
if len(namespaces) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
nsIDs, err := tx.findNamespaceIDs(namespaces)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// for every bulk persist operation, the input data should be sorted.
|
// for every bulk persist operation, the input data should be sorted.
|
||||||
ids := make([]int, len(nsIDs))
|
sort.Slice(namespaces, func(i, j int) bool {
|
||||||
for i, nsID := range nsIDs {
|
return namespaces[i].namespaceID < namespaces[j].namespaceID
|
||||||
if !nsID.Valid {
|
})
|
||||||
panic(errNamespaceNotFound)
|
|
||||||
}
|
elementSize := 3
|
||||||
ids[i] = int(nsID.Int64)
|
keys := make([]interface{}, len(namespaces)*elementSize)
|
||||||
|
for i, row := range namespaces {
|
||||||
|
keys[i*3] = row.layerID
|
||||||
|
keys[i*3+1] = row.namespaceID
|
||||||
|
keys[i*3+2] = row.detectorID
|
||||||
}
|
}
|
||||||
|
|
||||||
sort.IntSlice(ids).Sort()
|
_, err := tx.Exec(queryPersistLayerNamespace(len(namespaces)), keys...)
|
||||||
|
|
||||||
keys := make([]interface{}, len(namespaces)*2)
|
|
||||||
for i, nsID := range ids {
|
|
||||||
keys[i*2] = id
|
|
||||||
keys[i*2+1] = nsID
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = tx.Exec(queryPersistLayerNamespace(len(namespaces)), keys...)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return handleError("queryPersistLayerNamespace", err)
|
return handleError("queryPersistLayerNamespace", err)
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tx *pgSession) persistProcessors(listerQuery, listerQueryName, detectorQuery, detectorQueryName string, id int64, processors database.Processors) error {
|
|
||||||
stmt, err := tx.Prepare(listerQuery)
|
|
||||||
if err != nil {
|
|
||||||
return handleError(listerQueryName, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, l := range processors.Listers {
|
|
||||||
_, err := stmt.Exec(id, l)
|
|
||||||
if err != nil {
|
|
||||||
stmt.Close()
|
|
||||||
return handleError(listerQueryName, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := stmt.Close(); err != nil {
|
|
||||||
return handleError(listerQueryName, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
stmt, err = tx.Prepare(detectorQuery)
|
|
||||||
if err != nil {
|
|
||||||
return handleError(detectorQueryName, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, d := range processors.Detectors {
|
|
||||||
_, err := stmt.Exec(id, d)
|
|
||||||
if err != nil {
|
|
||||||
stmt.Close()
|
|
||||||
return handleError(detectorQueryName, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := stmt.Close(); err != nil {
|
|
||||||
return handleError(detectorQueryName, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tx *pgSession) findLayerNamespaces(layerID int64) ([]database.Namespace, error) {
|
func (tx *pgSession) findLayerNamespaces(layerID int64, detectors detectorMap) ([]database.LayerNamespace, error) {
|
||||||
var namespaces []database.Namespace
|
rows, err := tx.Query(findLayerNamespaces, layerID)
|
||||||
|
|
||||||
rows, err := tx.Query(searchLayerNamespaces, layerID)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, handleError("searchLayerFeatures", err)
|
return nil, handleError("findLayerNamespaces", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
namespaces := []database.LayerNamespace{}
|
||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
ns := database.Namespace{}
|
var (
|
||||||
err := rows.Scan(&ns.Name, &ns.VersionFormat)
|
namespace database.LayerNamespace
|
||||||
if err != nil {
|
detectorID int64
|
||||||
|
)
|
||||||
|
|
||||||
|
if err := rows.Scan(&namespace.Name, &namespace.VersionFormat, &detectorID); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
namespaces = append(namespaces, ns)
|
|
||||||
|
namespace.By = detectors.byID[detectorID]
|
||||||
|
namespaces = append(namespaces, namespace)
|
||||||
}
|
}
|
||||||
|
|
||||||
return namespaces, nil
|
return namespaces, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tx *pgSession) findLayerFeatures(layerID int64) ([]database.Feature, error) {
|
func (tx *pgSession) findLayerFeatures(layerID int64, detectors detectorMap) ([]database.LayerFeature, error) {
|
||||||
var features []database.Feature
|
rows, err := tx.Query(findLayerFeatures, layerID)
|
||||||
|
|
||||||
rows, err := tx.Query(searchLayerFeatures, layerID)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, handleError("searchLayerFeatures", err)
|
return nil, handleError("findLayerFeatures", err)
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
features := []database.LayerFeature{}
|
||||||
|
for rows.Next() {
|
||||||
|
var (
|
||||||
|
detectorID int64
|
||||||
|
feature database.LayerFeature
|
||||||
|
)
|
||||||
|
if err := rows.Scan(&feature.Name, &feature.Version, &feature.VersionFormat, &detectorID); err != nil {
|
||||||
|
return nil, handleError("findLayerFeatures", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
feature.By = detectors.byID[detectorID]
|
||||||
|
features = append(features, feature)
|
||||||
}
|
}
|
||||||
|
|
||||||
for rows.Next() {
|
|
||||||
f := database.Feature{}
|
|
||||||
err := rows.Scan(&f.Name, &f.Version, &f.VersionFormat)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
features = append(features, f)
|
|
||||||
}
|
|
||||||
return features, nil
|
return features, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tx *pgSession) findLayer(hash string) (database.LayerMetadata, int64, bool, error) {
|
func (tx *pgSession) findLayerID(hash string) (int64, bool, error) {
|
||||||
var (
|
var layerID int64
|
||||||
layerID int64
|
err := tx.QueryRow(findLayerID, hash).Scan(&layerID)
|
||||||
layer = database.LayerMetadata{Hash: hash, ProcessedBy: database.Processors{}}
|
|
||||||
)
|
|
||||||
|
|
||||||
if hash == "" {
|
|
||||||
return layer, layerID, false, commonerr.NewBadRequestError("Empty Layer Hash is not allowed")
|
|
||||||
}
|
|
||||||
|
|
||||||
err := tx.QueryRow(searchLayer, hash).Scan(&layerID)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == sql.ErrNoRows {
|
if err == sql.ErrNoRows {
|
||||||
return layer, layerID, false, nil
|
return layerID, false, nil
|
||||||
}
|
}
|
||||||
return layer, layerID, false, err
|
|
||||||
|
return layerID, false, handleError("findLayerID", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
layer.ProcessedBy, err = tx.findLayerProcessors(layerID)
|
return layerID, true, nil
|
||||||
return layer, layerID, true, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tx *pgSession) findLayerProcessors(id int64) (database.Processors, error) {
|
func (tx *pgSession) findLayerIDs(hashes []string) ([]int64, bool, error) {
|
||||||
var (
|
layerIDs := make([]int64, 0, len(hashes))
|
||||||
err error
|
for _, hash := range hashes {
|
||||||
processors database.Processors
|
id, ok, err := tx.findLayerID(hash)
|
||||||
)
|
if !ok {
|
||||||
|
return nil, false, nil
|
||||||
|
}
|
||||||
|
|
||||||
if processors.Detectors, err = tx.findProcessors(searchLayerDetectors, id); err != nil {
|
if err != nil {
|
||||||
return processors, handleError("searchLayerDetectors", err)
|
return nil, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
layerIDs = append(layerIDs, id)
|
||||||
}
|
}
|
||||||
|
|
||||||
if processors.Listers, err = tx.findProcessors(searchLayerListers, id); err != nil {
|
return layerIDs, true, nil
|
||||||
return processors, handleError("searchLayerListers", err)
|
}
|
||||||
}
|
|
||||||
|
func (tx *pgSession) soiLayer(hash string) (int64, error) {
|
||||||
return processors, nil
|
var id int64
|
||||||
|
if err := tx.QueryRow(soiLayer, hash).Scan(&id); err != nil {
|
||||||
|
return 0, handleError("soiLayer", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return id, nil
|
||||||
}
|
}
|
||||||
|
@ -22,105 +22,169 @@ import (
|
|||||||
"github.com/coreos/clair/database"
|
"github.com/coreos/clair/database"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var persistLayerTests = []struct {
|
||||||
|
title string
|
||||||
|
name string
|
||||||
|
by []database.Detector
|
||||||
|
features []database.LayerFeature
|
||||||
|
namespaces []database.LayerNamespace
|
||||||
|
layer *database.Layer
|
||||||
|
err string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
title: "invalid layer name",
|
||||||
|
name: "",
|
||||||
|
err: "expected non-empty layer hash",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
title: "layer with inconsistent feature and detectors",
|
||||||
|
name: "random-forest",
|
||||||
|
by: []database.Detector{realDetectors[2]},
|
||||||
|
features: []database.LayerFeature{
|
||||||
|
{realFeatures[1], realDetectors[1]},
|
||||||
|
},
|
||||||
|
err: "database: parameters are not valid",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
title: "layer with non-existing feature",
|
||||||
|
name: "random-forest",
|
||||||
|
err: "database: associated immutable entities are missing in the database",
|
||||||
|
by: []database.Detector{realDetectors[2]},
|
||||||
|
features: []database.LayerFeature{
|
||||||
|
{fakeFeatures[1], realDetectors[2]},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
title: "layer with non-existing namespace",
|
||||||
|
name: "random-forest2",
|
||||||
|
err: "database: associated immutable entities are missing in the database",
|
||||||
|
by: []database.Detector{realDetectors[1]},
|
||||||
|
namespaces: []database.LayerNamespace{
|
||||||
|
{fakeNamespaces[1], realDetectors[1]},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
title: "layer with non-existing detector",
|
||||||
|
name: "random-forest3",
|
||||||
|
err: "database: associated immutable entities are missing in the database",
|
||||||
|
by: []database.Detector{fakeDetector[1]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
title: "valid layer",
|
||||||
|
name: "hamsterhouse",
|
||||||
|
by: []database.Detector{realDetectors[1], realDetectors[2]},
|
||||||
|
features: []database.LayerFeature{
|
||||||
|
{realFeatures[1], realDetectors[2]},
|
||||||
|
{realFeatures[2], realDetectors[2]},
|
||||||
|
},
|
||||||
|
namespaces: []database.LayerNamespace{
|
||||||
|
{realNamespaces[1], realDetectors[1]},
|
||||||
|
},
|
||||||
|
layer: &database.Layer{
|
||||||
|
Hash: "hamsterhouse",
|
||||||
|
By: []database.Detector{realDetectors[1], realDetectors[2]},
|
||||||
|
Features: []database.LayerFeature{
|
||||||
|
{realFeatures[1], realDetectors[2]},
|
||||||
|
{realFeatures[2], realDetectors[2]},
|
||||||
|
},
|
||||||
|
Namespaces: []database.LayerNamespace{
|
||||||
|
{realNamespaces[1], realDetectors[1]},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
title: "update existing layer",
|
||||||
|
name: "layer-1",
|
||||||
|
by: []database.Detector{realDetectors[3], realDetectors[4]},
|
||||||
|
features: []database.LayerFeature{
|
||||||
|
{realFeatures[4], realDetectors[3]},
|
||||||
|
},
|
||||||
|
namespaces: []database.LayerNamespace{
|
||||||
|
{realNamespaces[3], realDetectors[4]},
|
||||||
|
},
|
||||||
|
layer: &database.Layer{
|
||||||
|
Hash: "layer-1",
|
||||||
|
By: []database.Detector{realDetectors[1], realDetectors[2], realDetectors[3], realDetectors[4]},
|
||||||
|
Features: []database.LayerFeature{
|
||||||
|
{realFeatures[1], realDetectors[2]},
|
||||||
|
{realFeatures[2], realDetectors[2]},
|
||||||
|
{realFeatures[4], realDetectors[3]},
|
||||||
|
},
|
||||||
|
Namespaces: []database.LayerNamespace{
|
||||||
|
{realNamespaces[1], realDetectors[1]},
|
||||||
|
{realNamespaces[3], realDetectors[4]},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
func TestPersistLayer(t *testing.T) {
|
func TestPersistLayer(t *testing.T) {
|
||||||
datastore, tx := openSessionForTest(t, "PersistLayer", false)
|
datastore, tx := openSessionForTest(t, "PersistLayer", true)
|
||||||
defer closeTest(t, datastore, tx)
|
defer closeTest(t, datastore, tx)
|
||||||
|
|
||||||
// invalid
|
for _, test := range persistLayerTests {
|
||||||
assert.NotNil(t, tx.PersistLayer("", nil, nil, database.Processors{}))
|
t.Run(test.title, func(t *testing.T) {
|
||||||
// insert namespaces + features to
|
err := tx.PersistLayer(test.name, test.features, test.namespaces, test.by)
|
||||||
namespaces := []database.Namespace{
|
if test.err != "" {
|
||||||
{
|
assert.EqualError(t, err, test.err, "unexpected error")
|
||||||
Name: "sushi shop",
|
return
|
||||||
VersionFormat: "apk",
|
}
|
||||||
},
|
|
||||||
|
assert.Nil(t, err)
|
||||||
|
if test.layer != nil {
|
||||||
|
layer, ok, err := tx.FindLayer(test.name)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.True(t, ok)
|
||||||
|
database.AssertLayerEqual(t, test.layer, &layer)
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
features := []database.Feature{
|
var findLayerTests = []struct {
|
||||||
{
|
title string
|
||||||
Name: "blue fin sashimi",
|
in string
|
||||||
Version: "v1.0",
|
|
||||||
VersionFormat: "apk",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
processors := database.Processors{
|
out *database.Layer
|
||||||
Listers: []string{"release"},
|
err string
|
||||||
Detectors: []string{"apk"},
|
ok bool
|
||||||
}
|
}{
|
||||||
|
{
|
||||||
assert.Nil(t, tx.PersistNamespaces(namespaces))
|
title: "invalid layer name",
|
||||||
assert.Nil(t, tx.PersistFeatures(features))
|
in: "",
|
||||||
|
err: "non empty layer hash is expected.",
|
||||||
// Valid
|
},
|
||||||
assert.Nil(t, tx.PersistLayer("RANDOM_FOREST", namespaces, features, processors))
|
{
|
||||||
|
title: "non-existing layer",
|
||||||
nonExistingFeature := []database.Feature{{Name: "lobster sushi", Version: "v0.1", VersionFormat: "apk"}}
|
in: "layer-non-existing",
|
||||||
// Invalid:
|
ok: false,
|
||||||
assert.NotNil(t, tx.PersistLayer("RANDOM_FOREST", namespaces, nonExistingFeature, processors))
|
out: nil,
|
||||||
|
},
|
||||||
assert.Nil(t, tx.PersistFeatures(nonExistingFeature))
|
{
|
||||||
// Update the layer
|
title: "existing layer",
|
||||||
assert.Nil(t, tx.PersistLayer("RANDOM_FOREST", namespaces, nonExistingFeature, processors))
|
in: "layer-4",
|
||||||
|
ok: true,
|
||||||
// confirm update
|
out: takeLayerPointerFromMap(realLayers, 6),
|
||||||
layer, ok, err := tx.FindLayer("RANDOM_FOREST")
|
},
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.True(t, ok)
|
|
||||||
|
|
||||||
expectedLayer := database.Layer{
|
|
||||||
LayerMetadata: database.LayerMetadata{
|
|
||||||
Hash: "RANDOM_FOREST",
|
|
||||||
ProcessedBy: processors,
|
|
||||||
},
|
|
||||||
Features: append(features, nonExistingFeature...),
|
|
||||||
Namespaces: namespaces,
|
|
||||||
}
|
|
||||||
|
|
||||||
assertLayerWithContentEqual(t, expectedLayer, layer)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFindLayer(t *testing.T) {
|
func TestFindLayer(t *testing.T) {
|
||||||
datastore, tx := openSessionForTest(t, "FindLayer", true)
|
datastore, tx := openSessionForTest(t, "FindLayer", true)
|
||||||
defer closeTest(t, datastore, tx)
|
defer closeTest(t, datastore, tx)
|
||||||
|
|
||||||
_, _, err := tx.FindLayer("")
|
for _, test := range findLayerTests {
|
||||||
assert.NotNil(t, err)
|
t.Run(test.title, func(t *testing.T) {
|
||||||
_, ok, err := tx.FindLayer("layer-non")
|
layer, ok, err := tx.FindLayer(test.in)
|
||||||
assert.Nil(t, err)
|
if test.err != "" {
|
||||||
assert.False(t, ok)
|
assert.EqualError(t, err, test.err, "unexpected error")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
expectedL := database.Layer{
|
assert.Nil(t, err)
|
||||||
LayerMetadata: database.LayerMetadata{
|
assert.Equal(t, test.ok, ok)
|
||||||
Hash: "layer-4",
|
if test.ok {
|
||||||
ProcessedBy: database.Processors{
|
database.AssertLayerEqual(t, test.out, &layer)
|
||||||
Detectors: []string{"os-release", "apt-sources"},
|
}
|
||||||
Listers: []string{"dpkg", "rpm"},
|
})
|
||||||
},
|
|
||||||
},
|
|
||||||
Features: []database.Feature{
|
|
||||||
{Name: "fake", Version: "2.0", VersionFormat: "rpm"},
|
|
||||||
{Name: "openssl", Version: "2.0", VersionFormat: "dpkg"},
|
|
||||||
},
|
|
||||||
Namespaces: []database.Namespace{
|
|
||||||
{Name: "debian:7", VersionFormat: "dpkg"},
|
|
||||||
{Name: "fake:1.0", VersionFormat: "rpm"},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
layer, ok2, err := tx.FindLayer("layer-4")
|
|
||||||
if assert.Nil(t, err) && assert.True(t, ok2) {
|
|
||||||
assertLayerWithContentEqual(t, expectedL, layer)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertLayerWithContentEqual(t *testing.T, expected database.Layer, actual database.Layer) bool {
|
|
||||||
return assertLayerEqual(t, expected.LayerMetadata, actual.LayerMetadata) &&
|
|
||||||
assertFeaturesEqual(t, expected.Features, actual.Features) &&
|
|
||||||
assertNamespacesEqual(t, expected.Namespaces, actual.Namespaces)
|
|
||||||
}
|
|
||||||
|
|
||||||
func assertLayerEqual(t *testing.T, expected database.LayerMetadata, actual database.LayerMetadata) bool {
|
|
||||||
return assertProcessorsEqual(t, expected.ProcessedBy, actual.ProcessedBy) &&
|
|
||||||
assert.Equal(t, expected.Hash, actual.Hash)
|
|
||||||
}
|
|
||||||
|
@ -23,6 +23,14 @@ import (
|
|||||||
"github.com/coreos/clair/pkg/commonerr"
|
"github.com/coreos/clair/pkg/commonerr"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
soiLock = `INSERT INTO lock(name, owner, until) VALUES ($1, $2, $3)`
|
||||||
|
searchLock = `SELECT owner, until FROM Lock WHERE name = $1`
|
||||||
|
updateLock = `UPDATE Lock SET until = $3 WHERE name = $1 AND owner = $2`
|
||||||
|
removeLock = `DELETE FROM Lock WHERE name = $1 AND owner = $2`
|
||||||
|
removeLockExpired = `DELETE FROM LOCK WHERE until < CURRENT_TIMESTAMP`
|
||||||
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
errLockNotFound = errors.New("lock is not in database")
|
errLockNotFound = errors.New("lock is not in database")
|
||||||
)
|
)
|
||||||
|
@ -14,179 +14,223 @@
|
|||||||
|
|
||||||
package migrations
|
package migrations
|
||||||
|
|
||||||
import "github.com/remind101/migrate"
|
var (
|
||||||
|
// entities are the basic building blocks to relate the vulnerabilities with
|
||||||
func init() {
|
// the ancestry.
|
||||||
RegisterMigration(migrate.Migration{
|
entities = MigrationQuery{
|
||||||
ID: 1,
|
Up: []string{
|
||||||
Up: migrate.Queries([]string{
|
|
||||||
// namespaces
|
// namespaces
|
||||||
`CREATE TABLE IF NOT EXISTS namespace (
|
`CREATE TABLE IF NOT EXISTS namespace (
|
||||||
id SERIAL PRIMARY KEY,
|
id SERIAL PRIMARY KEY,
|
||||||
name TEXT NULL,
|
name TEXT NULL,
|
||||||
version_format TEXT,
|
version_format TEXT,
|
||||||
UNIQUE (name, version_format));`,
|
UNIQUE (name, version_format));`,
|
||||||
`CREATE INDEX ON namespace(name);`,
|
`CREATE INDEX ON namespace(name);`,
|
||||||
|
|
||||||
// features
|
// features
|
||||||
`CREATE TABLE IF NOT EXISTS feature (
|
`CREATE TABLE IF NOT EXISTS feature (
|
||||||
id SERIAL PRIMARY KEY,
|
id SERIAL PRIMARY KEY,
|
||||||
name TEXT NOT NULL,
|
name TEXT NOT NULL,
|
||||||
version TEXT NOT NULL,
|
version TEXT NOT NULL,
|
||||||
version_format TEXT NOT NULL,
|
version_format TEXT NOT NULL,
|
||||||
UNIQUE (name, version, version_format));`,
|
UNIQUE (name, version, version_format));`,
|
||||||
`CREATE INDEX ON feature(name);`,
|
`CREATE INDEX ON feature(name);`,
|
||||||
|
|
||||||
`CREATE TABLE IF NOT EXISTS namespaced_feature (
|
`CREATE TABLE IF NOT EXISTS namespaced_feature (
|
||||||
id SERIAL PRIMARY KEY,
|
id SERIAL PRIMARY KEY,
|
||||||
namespace_id INT REFERENCES namespace,
|
namespace_id INT REFERENCES namespace ON DELETE CASCADE,
|
||||||
feature_id INT REFERENCES feature,
|
feature_id INT REFERENCES feature ON DELETE CASCADE,
|
||||||
UNIQUE (namespace_id, feature_id));`,
|
UNIQUE (namespace_id, feature_id));`,
|
||||||
|
},
|
||||||
|
Down: []string{
|
||||||
|
`DROP TABLE IF EXISTS namespace, feature, namespaced_feature CASCADE;`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// detector is analysis extensions used by the worker.
|
||||||
|
detector = MigrationQuery{
|
||||||
|
Up: []string{
|
||||||
|
// Detector Type
|
||||||
|
`CREATE TYPE detector_type AS ENUM ('namespace', 'feature');`,
|
||||||
|
|
||||||
|
// Detector
|
||||||
|
`CREATE TABLE IF NOT EXISTS detector (
|
||||||
|
id SERIAL PRIMARY KEY,
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
version TEXT NOT NULL,
|
||||||
|
dtype detector_type NOT NULL,
|
||||||
|
UNIQUE (name, version, dtype));`,
|
||||||
|
},
|
||||||
|
Down: []string{
|
||||||
|
`DROP TABLE IF EXISTS detector CASCADE;`,
|
||||||
|
`DROP TYPE IF EXISTS detector_type;`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// layer contains all metadata and scanned features and namespaces.
|
||||||
|
layer = MigrationQuery{
|
||||||
|
Up: []string{
|
||||||
// layers
|
// layers
|
||||||
`CREATE TABLE IF NOT EXISTS layer(
|
`CREATE TABLE IF NOT EXISTS layer(
|
||||||
id SERIAL PRIMARY KEY,
|
id SERIAL PRIMARY KEY,
|
||||||
hash TEXT NOT NULL UNIQUE);`,
|
hash TEXT NOT NULL UNIQUE);`,
|
||||||
|
|
||||||
`CREATE TABLE IF NOT EXISTS layer_feature (
|
`CREATE TABLE IF NOT EXISTS layer_detector(
|
||||||
id SERIAL PRIMARY KEY,
|
id SERIAL PRIMARY KEY,
|
||||||
layer_id INT REFERENCES layer ON DELETE CASCADE,
|
layer_id INT REFERENCES layer ON DELETE CASCADE,
|
||||||
feature_id INT REFERENCES feature ON DELETE CASCADE,
|
detector_id INT REFERENCES detector ON DELETE CASCADE,
|
||||||
UNIQUE (layer_id, feature_id));`,
|
UNIQUE(layer_id, detector_id));`,
|
||||||
`CREATE INDEX ON layer_feature(layer_id);`,
|
|
||||||
|
|
||||||
`CREATE TABLE IF NOT EXISTS layer_lister (
|
|
||||||
id SERIAL PRIMARY KEY,
|
|
||||||
layer_id INT REFERENCES layer ON DELETE CASCADE,
|
|
||||||
lister TEXT NOT NULL,
|
|
||||||
UNIQUE (layer_id, lister));`,
|
|
||||||
`CREATE INDEX ON layer_lister(layer_id);`,
|
|
||||||
|
|
||||||
`CREATE TABLE IF NOT EXISTS layer_detector (
|
|
||||||
id SERIAL PRIMARY KEY,
|
|
||||||
layer_id INT REFERENCES layer ON DELETE CASCADE,
|
|
||||||
detector TEXT,
|
|
||||||
UNIQUE (layer_id, detector));`,
|
|
||||||
`CREATE INDEX ON layer_detector(layer_id);`,
|
`CREATE INDEX ON layer_detector(layer_id);`,
|
||||||
|
|
||||||
`CREATE TABLE IF NOT EXISTS layer_namespace (
|
`CREATE TABLE IF NOT EXISTS layer_feature (
|
||||||
id SERIAL PRIMARY KEY,
|
id SERIAL PRIMARY KEY,
|
||||||
layer_id INT REFERENCES layer ON DELETE CASCADE,
|
layer_id INT REFERENCES layer ON DELETE CASCADE,
|
||||||
namespace_id INT REFERENCES namespace ON DELETE CASCADE,
|
feature_id INT REFERENCES feature ON DELETE CASCADE,
|
||||||
UNIQUE (layer_id, namespace_id));`,
|
detector_id INT REFERENCES detector ON DELETE CASCADE,
|
||||||
`CREATE INDEX ON layer_namespace(layer_id);`,
|
UNIQUE (layer_id, feature_id));`,
|
||||||
|
`CREATE INDEX ON layer_feature(layer_id);`,
|
||||||
|
|
||||||
|
`CREATE TABLE IF NOT EXISTS layer_namespace (
|
||||||
|
id SERIAL PRIMARY KEY,
|
||||||
|
layer_id INT REFERENCES layer ON DELETE CASCADE,
|
||||||
|
namespace_id INT REFERENCES namespace ON DELETE CASCADE,
|
||||||
|
detector_id INT REFERENCES detector ON DELETE CASCADE,
|
||||||
|
UNIQUE (layer_id, namespace_id));`,
|
||||||
|
`CREATE INDEX ON layer_namespace(layer_id);`,
|
||||||
|
},
|
||||||
|
Down: []string{
|
||||||
|
`DROP TABLE IF EXISTS layer, layer_detector, layer_feature, layer_namespace CASCADE;`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// ancestry contains all meta information around scanned manifest and its
|
||||||
|
// layers.
|
||||||
|
ancestry = MigrationQuery{
|
||||||
|
Up: []string{
|
||||||
// ancestry
|
// ancestry
|
||||||
`CREATE TABLE IF NOT EXISTS ancestry (
|
`CREATE TABLE IF NOT EXISTS ancestry (
|
||||||
id SERIAL PRIMARY KEY,
|
id SERIAL PRIMARY KEY,
|
||||||
name TEXT NOT NULL UNIQUE);`,
|
name TEXT NOT NULL UNIQUE);`,
|
||||||
|
|
||||||
`CREATE TABLE IF NOT EXISTS ancestry_layer (
|
`CREATE TABLE IF NOT EXISTS ancestry_layer (
|
||||||
id SERIAL PRIMARY KEY,
|
id SERIAL PRIMARY KEY,
|
||||||
ancestry_id INT REFERENCES ancestry ON DELETE CASCADE,
|
ancestry_id INT REFERENCES ancestry ON DELETE CASCADE,
|
||||||
ancestry_index INT NOT NULL,
|
ancestry_index INT NOT NULL,
|
||||||
layer_id INT REFERENCES layer ON DELETE RESTRICT,
|
layer_id INT NOT NULL REFERENCES layer ON DELETE RESTRICT,
|
||||||
UNIQUE (ancestry_id, ancestry_index));`,
|
UNIQUE (ancestry_id, ancestry_index));`,
|
||||||
`CREATE INDEX ON ancestry_layer(ancestry_id);`,
|
`CREATE INDEX ON ancestry_layer(ancestry_id);`,
|
||||||
|
|
||||||
`CREATE TABLE IF NOT EXISTS ancestry_feature(
|
`CREATE TABLE IF NOT EXISTS ancestry_feature(
|
||||||
id SERIAL PRIMARY KEY,
|
id SERIAL PRIMARY KEY,
|
||||||
ancestry_layer_id INT REFERENCES ancestry_layer ON DELETE CASCADE,
|
ancestry_layer_id INT REFERENCES ancestry_layer ON DELETE CASCADE,
|
||||||
namespaced_feature_id INT REFERENCES namespaced_feature ON DELETE CASCADE,
|
namespaced_feature_id INT REFERENCES namespaced_feature ON DELETE CASCADE,
|
||||||
UNIQUE (ancestry_layer_id, namespaced_feature_id));`,
|
feature_detector_id INT REFERENCES detector ON DELETE CASCADE,
|
||||||
|
namespace_detector_id INT REFERENCES detector ON DELETE CASCADE,
|
||||||
|
UNIQUE (ancestry_layer_id, namespaced_feature_id));`,
|
||||||
|
|
||||||
`CREATE TABLE IF NOT EXISTS ancestry_lister (
|
`CREATE TABLE IF NOT EXISTS ancestry_detector(
|
||||||
id SERIAL PRIMARY KEY,
|
id SERIAL PRIMARY KEY,
|
||||||
ancestry_id INT REFERENCES ancestry ON DELETE CASCADE,
|
ancestry_id INT REFERENCES ancestry ON DELETE CASCADE,
|
||||||
lister TEXT,
|
detector_id INT REFERENCES detector ON DELETE CASCADE,
|
||||||
UNIQUE (ancestry_id, lister));`,
|
UNIQUE(ancestry_id, detector_id));`,
|
||||||
`CREATE INDEX ON ancestry_lister(ancestry_id);`,
|
|
||||||
|
|
||||||
`CREATE TABLE IF NOT EXISTS ancestry_detector (
|
|
||||||
id SERIAL PRIMARY KEY,
|
|
||||||
ancestry_id INT REFERENCES ancestry ON DELETE CASCADE,
|
|
||||||
detector TEXT,
|
|
||||||
UNIQUE (ancestry_id, detector));`,
|
|
||||||
`CREATE INDEX ON ancestry_detector(ancestry_id);`,
|
`CREATE INDEX ON ancestry_detector(ancestry_id);`,
|
||||||
|
},
|
||||||
|
Down: []string{
|
||||||
|
`DROP TABLE IF EXISTS ancestry, ancestry_layer, ancestry_feature, ancestry_detector CASCADE;`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// vulnerability contains the metadata and vulnerability affecting relation.
|
||||||
|
vulnerability = MigrationQuery{
|
||||||
|
Up: []string{
|
||||||
`CREATE TYPE severity AS ENUM ('Unknown', 'Negligible', 'Low', 'Medium', 'High', 'Critical', 'Defcon1');`,
|
`CREATE TYPE severity AS ENUM ('Unknown', 'Negligible', 'Low', 'Medium', 'High', 'Critical', 'Defcon1');`,
|
||||||
|
|
||||||
// vulnerability
|
// vulnerability
|
||||||
`CREATE TABLE IF NOT EXISTS vulnerability (
|
`CREATE TABLE IF NOT EXISTS vulnerability (
|
||||||
id SERIAL PRIMARY KEY,
|
id SERIAL PRIMARY KEY,
|
||||||
namespace_id INT NOT NULL REFERENCES Namespace,
|
namespace_id INT REFERENCES Namespace,
|
||||||
name TEXT NOT NULL,
|
name TEXT NOT NULL,
|
||||||
description TEXT NULL,
|
description TEXT NULL,
|
||||||
link TEXT NULL,
|
link TEXT NULL,
|
||||||
severity severity NOT NULL,
|
severity severity NOT NULL,
|
||||||
metadata TEXT NULL,
|
metadata TEXT NULL,
|
||||||
created_at TIMESTAMP WITH TIME ZONE,
|
created_at TIMESTAMP WITH TIME ZONE,
|
||||||
deleted_at TIMESTAMP WITH TIME ZONE NULL);`,
|
deleted_at TIMESTAMP WITH TIME ZONE NULL);`,
|
||||||
`CREATE INDEX ON vulnerability(namespace_id, name);`,
|
`CREATE INDEX ON vulnerability(namespace_id, name);`,
|
||||||
`CREATE INDEX ON vulnerability(namespace_id);`,
|
`CREATE INDEX ON vulnerability(namespace_id);`,
|
||||||
|
|
||||||
`CREATE TABLE IF NOT EXISTS vulnerability_affected_feature (
|
`CREATE TABLE IF NOT EXISTS vulnerability_affected_feature (
|
||||||
id SERIAL PRIMARY KEY,
|
id SERIAL PRIMARY KEY,
|
||||||
vulnerability_id INT NOT NULL REFERENCES vulnerability ON DELETE CASCADE,
|
vulnerability_id INT REFERENCES vulnerability ON DELETE CASCADE,
|
||||||
feature_name TEXT NOT NULL,
|
feature_name TEXT NOT NULL,
|
||||||
affected_version TEXT,
|
affected_version TEXT,
|
||||||
fixedin TEXT);`,
|
fixedin TEXT);`,
|
||||||
`CREATE INDEX ON vulnerability_affected_feature(vulnerability_id, feature_name);`,
|
`CREATE INDEX ON vulnerability_affected_feature(vulnerability_id, feature_name);`,
|
||||||
|
|
||||||
`CREATE TABLE IF NOT EXISTS vulnerability_affected_namespaced_feature(
|
`CREATE TABLE IF NOT EXISTS vulnerability_affected_namespaced_feature(
|
||||||
id SERIAL PRIMARY KEY,
|
id SERIAL PRIMARY KEY,
|
||||||
vulnerability_id INT NOT NULL REFERENCES vulnerability ON DELETE CASCADE,
|
vulnerability_id INT REFERENCES vulnerability ON DELETE CASCADE,
|
||||||
namespaced_feature_id INT NOT NULL REFERENCES namespaced_feature ON DELETE CASCADE,
|
namespaced_feature_id INT REFERENCES namespaced_feature ON DELETE CASCADE,
|
||||||
added_by INT NOT NULL REFERENCES vulnerability_affected_feature ON DELETE CASCADE,
|
added_by INT REFERENCES vulnerability_affected_feature ON DELETE CASCADE,
|
||||||
UNIQUE (vulnerability_id, namespaced_feature_id));`,
|
UNIQUE (vulnerability_id, namespaced_feature_id));`,
|
||||||
`CREATE INDEX ON vulnerability_affected_namespaced_feature(namespaced_feature_id);`,
|
`CREATE INDEX ON vulnerability_affected_namespaced_feature(namespaced_feature_id);`,
|
||||||
|
},
|
||||||
|
Down: []string{
|
||||||
|
`DROP TYPE IF EXISTS severity;`,
|
||||||
|
`DROP TABLE IF EXISTS vulnerability, vulnerability_affected_feature, vulnerability_affected_namespaced_feature CASCADE;`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// updaterLock is the lock to be used by updater to prevent multiple
|
||||||
|
// updaters running on the same vulnerability source.
|
||||||
|
updaterLock = MigrationQuery{
|
||||||
|
Up: []string{
|
||||||
`CREATE TABLE IF NOT EXISTS KeyValue (
|
`CREATE TABLE IF NOT EXISTS KeyValue (
|
||||||
id SERIAL PRIMARY KEY,
|
id SERIAL PRIMARY KEY,
|
||||||
key TEXT NOT NULL UNIQUE,
|
key TEXT NOT NULL UNIQUE,
|
||||||
value TEXT);`,
|
value TEXT);`,
|
||||||
|
|
||||||
`CREATE TABLE IF NOT EXISTS Lock (
|
`CREATE TABLE IF NOT EXISTS Lock (
|
||||||
id SERIAL PRIMARY KEY,
|
id SERIAL PRIMARY KEY,
|
||||||
name VARCHAR(64) NOT NULL UNIQUE,
|
name VARCHAR(64) NOT NULL UNIQUE,
|
||||||
owner VARCHAR(64) NOT NULL,
|
owner VARCHAR(64) NOT NULL,
|
||||||
until TIMESTAMP WITH TIME ZONE);`,
|
until TIMESTAMP WITH TIME ZONE);`,
|
||||||
`CREATE INDEX ON Lock (owner);`,
|
`CREATE INDEX ON Lock (owner);`,
|
||||||
|
},
|
||||||
|
Down: []string{
|
||||||
|
`DROP TABLE IF EXISTS KeyValue, Lock CASCADE;`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
// Notification
|
// notification is the vulnerability notification spawned by the
|
||||||
|
// vulnerability changes.
|
||||||
|
notification = MigrationQuery{
|
||||||
|
Up: []string{
|
||||||
`CREATE TABLE IF NOT EXISTS Vulnerability_Notification (
|
`CREATE TABLE IF NOT EXISTS Vulnerability_Notification (
|
||||||
id SERIAL PRIMARY KEY,
|
id SERIAL PRIMARY KEY,
|
||||||
name VARCHAR(64) NOT NULL UNIQUE,
|
name VARCHAR(64) NOT NULL UNIQUE,
|
||||||
created_at TIMESTAMP WITH TIME ZONE,
|
created_at TIMESTAMP WITH TIME ZONE,
|
||||||
notified_at TIMESTAMP WITH TIME ZONE NULL,
|
notified_at TIMESTAMP WITH TIME ZONE NULL,
|
||||||
deleted_at TIMESTAMP WITH TIME ZONE NULL,
|
deleted_at TIMESTAMP WITH TIME ZONE NULL,
|
||||||
old_vulnerability_id INT NULL REFERENCES Vulnerability ON DELETE CASCADE,
|
old_vulnerability_id INT NULL REFERENCES Vulnerability ON DELETE CASCADE,
|
||||||
new_vulnerability_id INT NULL REFERENCES Vulnerability ON DELETE CASCADE);`,
|
new_vulnerability_id INT NULL REFERENCES Vulnerability ON DELETE CASCADE);`,
|
||||||
`CREATE INDEX ON Vulnerability_Notification (notified_at);`,
|
`CREATE INDEX ON Vulnerability_Notification (notified_at);`,
|
||||||
}),
|
},
|
||||||
Down: migrate.Queries([]string{
|
Down: []string{
|
||||||
`DROP TABLE IF EXISTS
|
`DROP TABLE IF EXISTS Vulnerability_Notification CASCADE;`,
|
||||||
ancestry,
|
},
|
||||||
ancestry_layer,
|
}
|
||||||
ancestry_detector,
|
)
|
||||||
ancestry_lister,
|
|
||||||
ancestry_feature,
|
func init() {
|
||||||
feature,
|
RegisterMigration(NewSimpleMigration(1,
|
||||||
namespaced_feature,
|
[]MigrationQuery{
|
||||||
keyvalue,
|
entities,
|
||||||
layer,
|
detector,
|
||||||
layer_detector,
|
layer,
|
||||||
layer_feature,
|
ancestry,
|
||||||
layer_lister,
|
vulnerability,
|
||||||
layer_namespace,
|
updaterLock,
|
||||||
lock,
|
notification,
|
||||||
namespace,
|
}))
|
||||||
vulnerability,
|
|
||||||
vulnerability_affected_feature,
|
|
||||||
vulnerability_affected_namespaced_feature,
|
|
||||||
vulnerability_notification
|
|
||||||
CASCADE;`,
|
|
||||||
`DROP TYPE IF EXISTS severity;`,
|
|
||||||
}),
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
30
database/pgsql/migrations/util.go
Normal file
30
database/pgsql/migrations/util.go
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
package migrations
|
||||||
|
|
||||||
|
import "github.com/remind101/migrate"
|
||||||
|
|
||||||
|
// MigrationQuery contains the Up migration and Down migration in Plain strings.
|
||||||
|
type MigrationQuery struct {
|
||||||
|
Up []string
|
||||||
|
Down []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConcatMigrationQueries concats migration queries in the give order.
|
||||||
|
func ConcatMigrationQueries(qs []MigrationQuery) MigrationQuery {
|
||||||
|
r := MigrationQuery{}
|
||||||
|
for _, q := range qs {
|
||||||
|
r.Up = append(r.Up, q.Up...)
|
||||||
|
r.Down = append(r.Down, q.Down...)
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSimpleMigration returns a simple migration plan with all provided
|
||||||
|
// migration queries concatted in order.
|
||||||
|
func NewSimpleMigration(id int, qs []MigrationQuery) migrate.Migration {
|
||||||
|
q := ConcatMigrationQueries(qs)
|
||||||
|
return migrate.Migration{
|
||||||
|
ID: id,
|
||||||
|
Up: migrate.Queries(q.Up),
|
||||||
|
Down: migrate.Queries(q.Down),
|
||||||
|
}
|
||||||
|
}
|
@ -16,15 +16,14 @@ package pgsql
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"errors"
|
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
"github.com/coreos/clair/database"
|
"github.com/coreos/clair/database"
|
||||||
"github.com/coreos/clair/pkg/commonerr"
|
"github.com/coreos/clair/pkg/commonerr"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
const (
|
||||||
errNamespaceNotFound = errors.New("Requested Namespace is not in database")
|
searchNamespaceID = `SELECT id FROM Namespace WHERE name = $1 AND version_format = $2`
|
||||||
)
|
)
|
||||||
|
|
||||||
// PersistNamespaces soi namespaces into database.
|
// PersistNamespaces soi namespaces into database.
|
||||||
|
@ -42,42 +42,3 @@ func TestPersistNamespaces(t *testing.T) {
|
|||||||
assert.Len(t, nsList, 1)
|
assert.Len(t, nsList, 1)
|
||||||
assert.Equal(t, ns2, nsList[0])
|
assert.Equal(t, ns2, nsList[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertNamespacesEqual(t *testing.T, expected []database.Namespace, actual []database.Namespace) bool {
|
|
||||||
if assert.Len(t, actual, len(expected)) {
|
|
||||||
has := map[database.Namespace]bool{}
|
|
||||||
for _, i := range expected {
|
|
||||||
has[i] = false
|
|
||||||
}
|
|
||||||
for _, i := range actual {
|
|
||||||
has[i] = true
|
|
||||||
}
|
|
||||||
for key, v := range has {
|
|
||||||
if !assert.True(t, v, key.Name+"is expected") {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func listNamespaces(t *testing.T, tx *pgSession) []database.Namespace {
|
|
||||||
rows, err := tx.Query("SELECT name, version_format FROM namespace")
|
|
||||||
if err != nil {
|
|
||||||
t.FailNow()
|
|
||||||
}
|
|
||||||
defer rows.Close()
|
|
||||||
|
|
||||||
namespaces := []database.Namespace{}
|
|
||||||
for rows.Next() {
|
|
||||||
var ns database.Namespace
|
|
||||||
err := rows.Scan(&ns.Name, &ns.VersionFormat)
|
|
||||||
if err != nil {
|
|
||||||
t.FailNow()
|
|
||||||
}
|
|
||||||
namespaces = append(namespaces, ns)
|
|
||||||
}
|
|
||||||
|
|
||||||
return namespaces
|
|
||||||
}
|
|
||||||
|
@ -26,6 +26,49 @@ import (
|
|||||||
"github.com/coreos/clair/pkg/pagination"
|
"github.com/coreos/clair/pkg/pagination"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
insertNotification = `
|
||||||
|
INSERT INTO Vulnerability_Notification(name, created_at, old_vulnerability_id, new_vulnerability_id)
|
||||||
|
VALUES ($1, $2, $3, $4)`
|
||||||
|
|
||||||
|
updatedNotificationAsRead = `
|
||||||
|
UPDATE Vulnerability_Notification
|
||||||
|
SET notified_at = CURRENT_TIMESTAMP
|
||||||
|
WHERE name = $1`
|
||||||
|
|
||||||
|
removeNotification = `
|
||||||
|
UPDATE Vulnerability_Notification
|
||||||
|
SET deleted_at = CURRENT_TIMESTAMP
|
||||||
|
WHERE name = $1 AND deleted_at IS NULL`
|
||||||
|
|
||||||
|
searchNotificationAvailable = `
|
||||||
|
SELECT name, created_at, notified_at, deleted_at
|
||||||
|
FROM Vulnerability_Notification
|
||||||
|
WHERE (notified_at IS NULL OR notified_at < $1)
|
||||||
|
AND deleted_at IS NULL
|
||||||
|
AND name NOT IN (SELECT name FROM Lock)
|
||||||
|
ORDER BY Random()
|
||||||
|
LIMIT 1`
|
||||||
|
|
||||||
|
searchNotification = `
|
||||||
|
SELECT created_at, notified_at, deleted_at, old_vulnerability_id, new_vulnerability_id
|
||||||
|
FROM Vulnerability_Notification
|
||||||
|
WHERE name = $1`
|
||||||
|
|
||||||
|
searchNotificationVulnerableAncestry = `
|
||||||
|
SELECT DISTINCT ON (a.id)
|
||||||
|
a.id, a.name
|
||||||
|
FROM vulnerability_affected_namespaced_feature AS vanf,
|
||||||
|
ancestry_layer AS al, ancestry_feature AS af, ancestry AS a
|
||||||
|
WHERE vanf.vulnerability_id = $1
|
||||||
|
AND a.id >= $2
|
||||||
|
AND al.ancestry_id = a.id
|
||||||
|
AND al.id = af.ancestry_layer_id
|
||||||
|
AND af.namespaced_feature_id = vanf.namespaced_feature_id
|
||||||
|
ORDER BY a.id ASC
|
||||||
|
LIMIT $3;`
|
||||||
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
errNotificationNotFound = errors.New("requested notification is not found")
|
errNotificationNotFound = errors.New("requested notification is not found")
|
||||||
)
|
)
|
||||||
@ -168,14 +211,12 @@ func (tx *pgSession) findPagedVulnerableAncestries(vulnID int64, limit int, curr
|
|||||||
vulnPage := database.PagedVulnerableAncestries{Limit: limit}
|
vulnPage := database.PagedVulnerableAncestries{Limit: limit}
|
||||||
currentPage := Page{0}
|
currentPage := Page{0}
|
||||||
if currentToken != pagination.FirstPageToken {
|
if currentToken != pagination.FirstPageToken {
|
||||||
var err error
|
if err := tx.key.UnmarshalToken(currentToken, ¤tPage); err != nil {
|
||||||
err = tx.key.UnmarshalToken(currentToken, ¤tPage)
|
|
||||||
if err != nil {
|
|
||||||
return vulnPage, err
|
return vulnPage, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err := tx.QueryRow(searchVulnerabilityByID, vulnID).Scan(
|
if err := tx.QueryRow(searchVulnerabilityByID, vulnID).Scan(
|
||||||
&vulnPage.Name,
|
&vulnPage.Name,
|
||||||
&vulnPage.Description,
|
&vulnPage.Description,
|
||||||
&vulnPage.Link,
|
&vulnPage.Link,
|
||||||
@ -183,8 +224,7 @@ func (tx *pgSession) findPagedVulnerableAncestries(vulnID int64, limit int, curr
|
|||||||
&vulnPage.Metadata,
|
&vulnPage.Metadata,
|
||||||
&vulnPage.Namespace.Name,
|
&vulnPage.Namespace.Name,
|
||||||
&vulnPage.Namespace.VersionFormat,
|
&vulnPage.Namespace.VersionFormat,
|
||||||
)
|
); err != nil {
|
||||||
if err != nil {
|
|
||||||
return vulnPage, handleError("searchVulnerabilityByID", err)
|
return vulnPage, handleError("searchVulnerabilityByID", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -247,7 +287,6 @@ func (tx *pgSession) FindVulnerabilityNotification(name string, limit int, oldPa
|
|||||||
}
|
}
|
||||||
|
|
||||||
noti.Name = name
|
noti.Name = name
|
||||||
|
|
||||||
err := tx.QueryRow(searchNotification, name).Scan(&created, ¬ified,
|
err := tx.QueryRow(searchNotification, name).Scan(&created, ¬ified,
|
||||||
&deleted, &oldVulnID, &newVulnID)
|
&deleted, &oldVulnID, &newVulnID)
|
||||||
|
|
||||||
|
@ -19,121 +19,144 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/coreos/clair/database"
|
"github.com/coreos/clair/database"
|
||||||
|
"github.com/coreos/clair/pkg/pagination"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestPagination(t *testing.T) {
|
type findVulnerabilityNotificationIn struct {
|
||||||
datastore, tx := openSessionForTest(t, "Pagination", true)
|
notificationName string
|
||||||
|
pageSize int
|
||||||
|
oldAffectedAncestryPage pagination.Token
|
||||||
|
newAffectedAncestryPage pagination.Token
|
||||||
|
}
|
||||||
|
|
||||||
|
type findVulnerabilityNotificationOut struct {
|
||||||
|
notification *database.VulnerabilityNotificationWithVulnerable
|
||||||
|
ok bool
|
||||||
|
err string
|
||||||
|
}
|
||||||
|
|
||||||
|
var findVulnerabilityNotificationTests = []struct {
|
||||||
|
title string
|
||||||
|
in findVulnerabilityNotificationIn
|
||||||
|
out findVulnerabilityNotificationOut
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
title: "find notification with invalid page",
|
||||||
|
in: findVulnerabilityNotificationIn{
|
||||||
|
notificationName: "test",
|
||||||
|
pageSize: 1,
|
||||||
|
oldAffectedAncestryPage: pagination.FirstPageToken,
|
||||||
|
newAffectedAncestryPage: pagination.Token("random non sense"),
|
||||||
|
},
|
||||||
|
out: findVulnerabilityNotificationOut{
|
||||||
|
err: pagination.ErrInvalidToken.Error(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
title: "find non-existing notification",
|
||||||
|
in: findVulnerabilityNotificationIn{
|
||||||
|
notificationName: "non-existing",
|
||||||
|
pageSize: 1,
|
||||||
|
oldAffectedAncestryPage: pagination.FirstPageToken,
|
||||||
|
newAffectedAncestryPage: pagination.FirstPageToken,
|
||||||
|
},
|
||||||
|
out: findVulnerabilityNotificationOut{
|
||||||
|
ok: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
title: "find existing notification first page",
|
||||||
|
in: findVulnerabilityNotificationIn{
|
||||||
|
notificationName: "test",
|
||||||
|
pageSize: 1,
|
||||||
|
oldAffectedAncestryPage: pagination.FirstPageToken,
|
||||||
|
newAffectedAncestryPage: pagination.FirstPageToken,
|
||||||
|
},
|
||||||
|
out: findVulnerabilityNotificationOut{
|
||||||
|
&database.VulnerabilityNotificationWithVulnerable{
|
||||||
|
NotificationHook: realNotification[1].NotificationHook,
|
||||||
|
Old: &database.PagedVulnerableAncestries{
|
||||||
|
Vulnerability: realVulnerability[2],
|
||||||
|
Limit: 1,
|
||||||
|
Affected: make(map[int]string),
|
||||||
|
Current: mustMarshalToken(testPaginationKey, Page{0}),
|
||||||
|
Next: mustMarshalToken(testPaginationKey, Page{0}),
|
||||||
|
End: true,
|
||||||
|
},
|
||||||
|
New: &database.PagedVulnerableAncestries{
|
||||||
|
Vulnerability: realVulnerability[1],
|
||||||
|
Limit: 1,
|
||||||
|
Affected: map[int]string{3: "ancestry-3"},
|
||||||
|
Current: mustMarshalToken(testPaginationKey, Page{0}),
|
||||||
|
Next: mustMarshalToken(testPaginationKey, Page{4}),
|
||||||
|
End: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
true,
|
||||||
|
"",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
title: "find existing notification of second page of new affected ancestry",
|
||||||
|
in: findVulnerabilityNotificationIn{
|
||||||
|
notificationName: "test",
|
||||||
|
pageSize: 1,
|
||||||
|
oldAffectedAncestryPage: pagination.FirstPageToken,
|
||||||
|
newAffectedAncestryPage: mustMarshalToken(testPaginationKey, Page{4}),
|
||||||
|
},
|
||||||
|
out: findVulnerabilityNotificationOut{
|
||||||
|
&database.VulnerabilityNotificationWithVulnerable{
|
||||||
|
NotificationHook: realNotification[1].NotificationHook,
|
||||||
|
Old: &database.PagedVulnerableAncestries{
|
||||||
|
Vulnerability: realVulnerability[2],
|
||||||
|
Limit: 1,
|
||||||
|
Affected: make(map[int]string),
|
||||||
|
Current: mustMarshalToken(testPaginationKey, Page{0}),
|
||||||
|
Next: mustMarshalToken(testPaginationKey, Page{0}),
|
||||||
|
End: true,
|
||||||
|
},
|
||||||
|
New: &database.PagedVulnerableAncestries{
|
||||||
|
Vulnerability: realVulnerability[1],
|
||||||
|
Limit: 1,
|
||||||
|
Affected: map[int]string{4: "ancestry-4"},
|
||||||
|
Current: mustMarshalToken(testPaginationKey, Page{4}),
|
||||||
|
Next: mustMarshalToken(testPaginationKey, Page{0}),
|
||||||
|
End: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
true,
|
||||||
|
"",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFindVulnerabilityNotification(t *testing.T) {
|
||||||
|
datastore, tx := openSessionForTest(t, "pagination", true)
|
||||||
defer closeTest(t, datastore, tx)
|
defer closeTest(t, datastore, tx)
|
||||||
|
|
||||||
ns := database.Namespace{
|
for _, test := range findVulnerabilityNotificationTests {
|
||||||
Name: "debian:7",
|
t.Run(test.title, func(t *testing.T) {
|
||||||
VersionFormat: "dpkg",
|
notification, ok, err := tx.FindVulnerabilityNotification(test.in.notificationName, test.in.pageSize, test.in.oldAffectedAncestryPage, test.in.newAffectedAncestryPage)
|
||||||
}
|
if test.out.err != "" {
|
||||||
|
require.EqualError(t, err, test.out.err)
|
||||||
vNew := database.Vulnerability{
|
return
|
||||||
Namespace: ns,
|
|
||||||
Name: "CVE-OPENSSL-1-DEB7",
|
|
||||||
Description: "A vulnerability affecting OpenSSL < 2.0 on Debian 7.0",
|
|
||||||
Link: "http://google.com/#q=CVE-OPENSSL-1-DEB7",
|
|
||||||
Severity: database.HighSeverity,
|
|
||||||
}
|
|
||||||
|
|
||||||
vOld := database.Vulnerability{
|
|
||||||
Namespace: ns,
|
|
||||||
Name: "CVE-NOPE",
|
|
||||||
Description: "A vulnerability affecting nothing",
|
|
||||||
Severity: database.UnknownSeverity,
|
|
||||||
}
|
|
||||||
|
|
||||||
noti, ok, err := tx.FindVulnerabilityNotification("test", 1, "", "")
|
|
||||||
oldPage := database.PagedVulnerableAncestries{
|
|
||||||
Vulnerability: vOld,
|
|
||||||
Limit: 1,
|
|
||||||
Affected: make(map[int]string),
|
|
||||||
End: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
newPage1 := database.PagedVulnerableAncestries{
|
|
||||||
Vulnerability: vNew,
|
|
||||||
Limit: 1,
|
|
||||||
Affected: map[int]string{3: "ancestry-3"},
|
|
||||||
End: false,
|
|
||||||
}
|
|
||||||
|
|
||||||
newPage2 := database.PagedVulnerableAncestries{
|
|
||||||
Vulnerability: vNew,
|
|
||||||
Limit: 1,
|
|
||||||
Affected: map[int]string{4: "ancestry-4"},
|
|
||||||
Next: "",
|
|
||||||
End: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
if assert.Nil(t, err) && assert.True(t, ok) {
|
|
||||||
assert.Equal(t, "test", noti.Name)
|
|
||||||
if assert.NotNil(t, noti.Old) && assert.NotNil(t, noti.New) {
|
|
||||||
var oldPage Page
|
|
||||||
err := tx.key.UnmarshalToken(noti.Old.Current, &oldPage)
|
|
||||||
if !assert.Nil(t, err) {
|
|
||||||
assert.FailNow(t, "")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.Equal(t, int64(0), oldPage.StartID)
|
require.Nil(t, err)
|
||||||
var newPage Page
|
if !test.out.ok {
|
||||||
err = tx.key.UnmarshalToken(noti.New.Current, &newPage)
|
require.Equal(t, test.out.ok, ok)
|
||||||
if !assert.Nil(t, err) {
|
return
|
||||||
assert.FailNow(t, "")
|
|
||||||
}
|
|
||||||
var newPageNext Page
|
|
||||||
err = tx.key.UnmarshalToken(noti.New.Next, &newPageNext)
|
|
||||||
if !assert.Nil(t, err) {
|
|
||||||
assert.FailNow(t, "")
|
|
||||||
}
|
|
||||||
assert.Equal(t, int64(0), newPage.StartID)
|
|
||||||
assert.Equal(t, int64(4), newPageNext.StartID)
|
|
||||||
|
|
||||||
noti.Old.Current = ""
|
|
||||||
noti.New.Current = ""
|
|
||||||
noti.New.Next = ""
|
|
||||||
assert.Equal(t, oldPage, *noti.Old)
|
|
||||||
assert.Equal(t, newPage1, *noti.New)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pageNum1, err := tx.key.MarshalToken(Page{0})
|
|
||||||
if !assert.Nil(t, err) {
|
|
||||||
assert.FailNow(t, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
pageNum2, err := tx.key.MarshalToken(Page{4})
|
|
||||||
if !assert.Nil(t, err) {
|
|
||||||
assert.FailNow(t, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
noti, ok, err = tx.FindVulnerabilityNotification("test", 1, pageNum1, pageNum2)
|
|
||||||
if assert.Nil(t, err) && assert.True(t, ok) {
|
|
||||||
assert.Equal(t, "test", noti.Name)
|
|
||||||
if assert.NotNil(t, noti.Old) && assert.NotNil(t, noti.New) {
|
|
||||||
var oldCurrentPage Page
|
|
||||||
err = tx.key.UnmarshalToken(noti.Old.Current, &oldCurrentPage)
|
|
||||||
if !assert.Nil(t, err) {
|
|
||||||
assert.FailNow(t, "")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var newCurrentPage Page
|
require.True(t, ok)
|
||||||
err = tx.key.UnmarshalToken(noti.New.Current, &newCurrentPage)
|
assertVulnerabilityNotificationWithVulnerableEqual(t, testPaginationKey, test.out.notification, ¬ification)
|
||||||
if !assert.Nil(t, err) {
|
})
|
||||||
assert.FailNow(t, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.Equal(t, int64(0), oldCurrentPage.StartID)
|
|
||||||
assert.Equal(t, int64(4), newCurrentPage.StartID)
|
|
||||||
noti.Old.Current = ""
|
|
||||||
noti.New.Current = ""
|
|
||||||
assert.Equal(t, oldPage, *noti.Old)
|
|
||||||
assert.Equal(t, newPage2, *noti.New)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -270,6 +270,7 @@ func migrateDatabase(db *sql.DB) error {
|
|||||||
// createDatabase creates a new database.
|
// createDatabase creates a new database.
|
||||||
// The source parameter should not contain a dbname.
|
// The source parameter should not contain a dbname.
|
||||||
func createDatabase(source, dbName string) error {
|
func createDatabase(source, dbName string) error {
|
||||||
|
log.WithFields(log.Fields{"source": source, "dbName": dbName}).Debug("creating database...")
|
||||||
// Open database.
|
// Open database.
|
||||||
db, err := sql.Open("postgres", source)
|
db, err := sql.Open("postgres", source)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -325,7 +326,7 @@ func handleError(desc string, err error) error {
|
|||||||
return commonerr.ErrNotFound
|
return commonerr.ErrNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
log.WithError(err).WithField("Description", desc).Error("Handled Database Error")
|
log.WithError(err).WithField("Description", desc).Error("database: handled database error")
|
||||||
promErrorsTotal.WithLabelValues(desc).Inc()
|
promErrorsTotal.WithLabelValues(desc).Inc()
|
||||||
|
|
||||||
if _, o := err.(*pq.Error); o || err == sql.ErrTxDone || strings.HasPrefix(err.Error(), "sql:") {
|
if _, o := err.(*pq.Error); o || err == sql.ErrTxDone || strings.HasPrefix(err.Error(), "sql:") {
|
||||||
|
@ -37,6 +37,8 @@ var (
|
|||||||
withFixtureName, withoutFixtureName string
|
withFixtureName, withoutFixtureName string
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var testPaginationKey = pagination.Must(pagination.NewKey())
|
||||||
|
|
||||||
func genTemplateDatabase(name string, loadFixture bool) (sourceURL string, dbName string) {
|
func genTemplateDatabase(name string, loadFixture bool) (sourceURL string, dbName string) {
|
||||||
config := generateTestConfig(name, loadFixture, false)
|
config := generateTestConfig(name, loadFixture, false)
|
||||||
source := config.Options["source"].(string)
|
source := config.Options["source"].(string)
|
||||||
@ -215,13 +217,15 @@ func generateTestConfig(testName string, loadFixture bool, manageLife bool) data
|
|||||||
source = fmt.Sprintf(sourceEnv, dbName)
|
source = fmt.Sprintf(sourceEnv, dbName)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.Infof("pagination key for current test: %s", testPaginationKey.String())
|
||||||
|
|
||||||
return database.RegistrableComponentConfig{
|
return database.RegistrableComponentConfig{
|
||||||
Options: map[string]interface{}{
|
Options: map[string]interface{}{
|
||||||
"source": source,
|
"source": source,
|
||||||
"cachesize": 0,
|
"cachesize": 0,
|
||||||
"managedatabaselifecycle": manageLife,
|
"managedatabaselifecycle": manageLife,
|
||||||
"fixturepath": fixturePath,
|
"fixturepath": fixturePath,
|
||||||
"paginationkey": pagination.Must(pagination.NewKey()).String(),
|
"paginationkey": testPaginationKey.String(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -247,6 +251,8 @@ func openSessionForTest(t *testing.T, name string, loadFixture bool) (*pgSQL, *p
|
|||||||
t.Error(err)
|
t.Error(err)
|
||||||
t.FailNow()
|
t.FailNow()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.Infof("transaction pagination key: '%s'", tx.(*pgSession).key.String())
|
||||||
return store, tx.(*pgSession)
|
return store, tx.(*pgSession)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -21,235 +21,6 @@ import (
|
|||||||
"github.com/lib/pq"
|
"github.com/lib/pq"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
lockVulnerabilityAffects = `LOCK vulnerability_affected_namespaced_feature IN SHARE ROW EXCLUSIVE MODE`
|
|
||||||
|
|
||||||
// keyvalue.go
|
|
||||||
searchKeyValue = `SELECT value FROM KeyValue WHERE key = $1`
|
|
||||||
upsertKeyValue = `
|
|
||||||
INSERT INTO KeyValue(key, value)
|
|
||||||
VALUES ($1, $2)
|
|
||||||
ON CONFLICT ON CONSTRAINT keyvalue_key_key
|
|
||||||
DO UPDATE SET key=$1, value=$2`
|
|
||||||
|
|
||||||
// namespace.go
|
|
||||||
|
|
||||||
searchNamespaceID = `SELECT id FROM Namespace WHERE name = $1 AND version_format = $2`
|
|
||||||
|
|
||||||
// feature.go
|
|
||||||
soiNamespacedFeature = `
|
|
||||||
WITH new_feature_ns AS (
|
|
||||||
INSERT INTO namespaced_feature(feature_id, namespace_id)
|
|
||||||
SELECT CAST ($1 AS INTEGER), CAST ($2 AS INTEGER)
|
|
||||||
WHERE NOT EXISTS ( SELECT id FROM namespaced_feature WHERE namespaced_feature.feature_id = $1 AND namespaced_feature.namespace_id = $2)
|
|
||||||
RETURNING id
|
|
||||||
)
|
|
||||||
SELECT id FROM namespaced_feature WHERE namespaced_feature.feature_id = $1 AND namespaced_feature.namespace_id = $2
|
|
||||||
UNION
|
|
||||||
SELECT id FROM new_feature_ns`
|
|
||||||
|
|
||||||
searchPotentialAffectingVulneraibilities = `
|
|
||||||
SELECT nf.id, v.id, vaf.affected_version, vaf.id
|
|
||||||
FROM vulnerability_affected_feature AS vaf, vulnerability AS v,
|
|
||||||
namespaced_feature AS nf, feature AS f
|
|
||||||
WHERE nf.id = ANY($1)
|
|
||||||
AND nf.feature_id = f.id
|
|
||||||
AND nf.namespace_id = v.namespace_id
|
|
||||||
AND vaf.feature_name = f.name
|
|
||||||
AND vaf.vulnerability_id = v.id
|
|
||||||
AND v.deleted_at IS NULL`
|
|
||||||
|
|
||||||
searchNamespacedFeaturesVulnerabilities = `
|
|
||||||
SELECT vanf.namespaced_feature_id, v.name, v.description, v.link,
|
|
||||||
v.severity, v.metadata, vaf.fixedin, n.name, n.version_format
|
|
||||||
FROM vulnerability_affected_namespaced_feature AS vanf,
|
|
||||||
Vulnerability AS v,
|
|
||||||
vulnerability_affected_feature AS vaf,
|
|
||||||
namespace AS n
|
|
||||||
WHERE vanf.namespaced_feature_id = ANY($1)
|
|
||||||
AND vaf.id = vanf.added_by
|
|
||||||
AND v.id = vanf.vulnerability_id
|
|
||||||
AND n.id = v.namespace_id
|
|
||||||
AND v.deleted_at IS NULL`
|
|
||||||
|
|
||||||
// layer.go
|
|
||||||
soiLayer = `
|
|
||||||
WITH new_layer AS (
|
|
||||||
INSERT INTO layer (hash)
|
|
||||||
SELECT CAST ($1 AS VARCHAR)
|
|
||||||
WHERE NOT EXISTS (SELECT id FROM layer WHERE hash = $1)
|
|
||||||
RETURNING id
|
|
||||||
)
|
|
||||||
SELECT id FROM new_Layer
|
|
||||||
UNION
|
|
||||||
SELECT id FROM layer WHERE hash = $1`
|
|
||||||
|
|
||||||
searchLayerFeatures = `
|
|
||||||
SELECT feature.Name, feature.Version, feature.version_format
|
|
||||||
FROM feature, layer_feature
|
|
||||||
WHERE layer_feature.layer_id = $1
|
|
||||||
AND layer_feature.feature_id = feature.id`
|
|
||||||
|
|
||||||
searchLayerNamespaces = `
|
|
||||||
SELECT namespace.Name, namespace.version_format
|
|
||||||
FROM namespace, layer_namespace
|
|
||||||
WHERE layer_namespace.layer_id = $1
|
|
||||||
AND layer_namespace.namespace_id = namespace.id`
|
|
||||||
|
|
||||||
searchLayer = `SELECT id FROM layer WHERE hash = $1`
|
|
||||||
searchLayerDetectors = `SELECT detector FROM layer_detector WHERE layer_id = $1`
|
|
||||||
searchLayerListers = `SELECT lister FROM layer_lister WHERE layer_id = $1`
|
|
||||||
|
|
||||||
// lock.go
|
|
||||||
soiLock = `INSERT INTO lock(name, owner, until) VALUES ($1, $2, $3)`
|
|
||||||
|
|
||||||
searchLock = `SELECT owner, until FROM Lock WHERE name = $1`
|
|
||||||
updateLock = `UPDATE Lock SET until = $3 WHERE name = $1 AND owner = $2`
|
|
||||||
removeLock = `DELETE FROM Lock WHERE name = $1 AND owner = $2`
|
|
||||||
removeLockExpired = `DELETE FROM LOCK WHERE until < CURRENT_TIMESTAMP`
|
|
||||||
|
|
||||||
// vulnerability.go
|
|
||||||
searchVulnerability = `
|
|
||||||
SELECT v.id, v.description, v.link, v.severity, v.metadata, n.version_format
|
|
||||||
FROM vulnerability AS v, namespace AS n
|
|
||||||
WHERE v.namespace_id = n.id
|
|
||||||
AND v.name = $1
|
|
||||||
AND n.name = $2
|
|
||||||
AND v.deleted_at IS NULL
|
|
||||||
`
|
|
||||||
|
|
||||||
insertVulnerabilityAffected = `
|
|
||||||
INSERT INTO vulnerability_affected_feature(vulnerability_id, feature_name, affected_version, fixedin)
|
|
||||||
VALUES ($1, $2, $3, $4)
|
|
||||||
RETURNING ID
|
|
||||||
`
|
|
||||||
|
|
||||||
searchVulnerabilityAffected = `
|
|
||||||
SELECT vulnerability_id, feature_name, affected_version, fixedin
|
|
||||||
FROM vulnerability_affected_feature
|
|
||||||
WHERE vulnerability_id = ANY($1)
|
|
||||||
`
|
|
||||||
|
|
||||||
searchVulnerabilityByID = `
|
|
||||||
SELECT v.name, v.description, v.link, v.severity, v.metadata, n.name, n.version_format
|
|
||||||
FROM vulnerability AS v, namespace AS n
|
|
||||||
WHERE v.namespace_id = n.id
|
|
||||||
AND v.id = $1`
|
|
||||||
|
|
||||||
searchVulnerabilityPotentialAffected = `
|
|
||||||
WITH req AS (
|
|
||||||
SELECT vaf.id AS vaf_id, n.id AS n_id, vaf.feature_name AS name, v.id AS vulnerability_id
|
|
||||||
FROM vulnerability_affected_feature AS vaf,
|
|
||||||
vulnerability AS v,
|
|
||||||
namespace AS n
|
|
||||||
WHERE vaf.vulnerability_id = ANY($1)
|
|
||||||
AND v.id = vaf.vulnerability_id
|
|
||||||
AND n.id = v.namespace_id
|
|
||||||
)
|
|
||||||
SELECT req.vulnerability_id, nf.id, f.version, req.vaf_id AS added_by
|
|
||||||
FROM feature AS f, namespaced_feature AS nf, req
|
|
||||||
WHERE f.name = req.name
|
|
||||||
AND nf.namespace_id = req.n_id
|
|
||||||
AND nf.feature_id = f.id`
|
|
||||||
|
|
||||||
insertVulnerabilityAffectedNamespacedFeature = `
|
|
||||||
INSERT INTO vulnerability_affected_namespaced_feature(vulnerability_id, namespaced_feature_id, added_by)
|
|
||||||
VALUES ($1, $2, $3)`
|
|
||||||
|
|
||||||
insertVulnerability = `
|
|
||||||
WITH ns AS (
|
|
||||||
SELECT id FROM namespace WHERE name = $6 AND version_format = $7
|
|
||||||
)
|
|
||||||
INSERT INTO Vulnerability(namespace_id, name, description, link, severity, metadata, created_at)
|
|
||||||
VALUES((SELECT id FROM ns), $1, $2, $3, $4, $5, CURRENT_TIMESTAMP)
|
|
||||||
RETURNING id`
|
|
||||||
|
|
||||||
removeVulnerability = `
|
|
||||||
UPDATE Vulnerability
|
|
||||||
SET deleted_at = CURRENT_TIMESTAMP
|
|
||||||
WHERE namespace_id = (SELECT id FROM Namespace WHERE name = $1)
|
|
||||||
AND name = $2
|
|
||||||
AND deleted_at IS NULL
|
|
||||||
RETURNING id`
|
|
||||||
|
|
||||||
// notification.go
|
|
||||||
insertNotification = `
|
|
||||||
INSERT INTO Vulnerability_Notification(name, created_at, old_vulnerability_id, new_vulnerability_id)
|
|
||||||
VALUES ($1, $2, $3, $4)`
|
|
||||||
|
|
||||||
updatedNotificationAsRead = `
|
|
||||||
UPDATE Vulnerability_Notification
|
|
||||||
SET notified_at = CURRENT_TIMESTAMP
|
|
||||||
WHERE name = $1`
|
|
||||||
|
|
||||||
removeNotification = `
|
|
||||||
UPDATE Vulnerability_Notification
|
|
||||||
SET deleted_at = CURRENT_TIMESTAMP
|
|
||||||
WHERE name = $1 AND deleted_at IS NULL`
|
|
||||||
|
|
||||||
searchNotificationAvailable = `
|
|
||||||
SELECT name, created_at, notified_at, deleted_at
|
|
||||||
FROM Vulnerability_Notification
|
|
||||||
WHERE (notified_at IS NULL OR notified_at < $1)
|
|
||||||
AND deleted_at IS NULL
|
|
||||||
AND name NOT IN (SELECT name FROM Lock)
|
|
||||||
ORDER BY Random()
|
|
||||||
LIMIT 1`
|
|
||||||
|
|
||||||
searchNotification = `
|
|
||||||
SELECT created_at, notified_at, deleted_at, old_vulnerability_id, new_vulnerability_id
|
|
||||||
FROM Vulnerability_Notification
|
|
||||||
WHERE name = $1`
|
|
||||||
|
|
||||||
searchNotificationVulnerableAncestry = `
|
|
||||||
SELECT DISTINCT ON (a.id)
|
|
||||||
a.id, a.name
|
|
||||||
FROM vulnerability_affected_namespaced_feature AS vanf,
|
|
||||||
ancestry_layer AS al, ancestry_feature AS af
|
|
||||||
WHERE vanf.vulnerability_id = $1
|
|
||||||
AND al.ancestry_id >= $2
|
|
||||||
AND al.id = af.ancestry_layer_id
|
|
||||||
AND af.namespaced_feature_id = vanf.namespaced_feature_id
|
|
||||||
ORDER BY a.id ASC
|
|
||||||
LIMIT $3;`
|
|
||||||
|
|
||||||
// ancestry.go
|
|
||||||
persistAncestryLister = `
|
|
||||||
INSERT INTO ancestry_lister (ancestry_id, lister)
|
|
||||||
SELECT CAST ($1 AS INTEGER), CAST ($2 AS TEXT)
|
|
||||||
WHERE NOT EXISTS (SELECT id FROM ancestry_lister WHERE ancestry_id = $1 AND lister = $2) ON CONFLICT DO NOTHING`
|
|
||||||
|
|
||||||
persistAncestryDetector = `
|
|
||||||
INSERT INTO ancestry_detector (ancestry_id, detector)
|
|
||||||
SELECT CAST ($1 AS INTEGER), CAST ($2 AS TEXT)
|
|
||||||
WHERE NOT EXISTS (SELECT id FROM ancestry_detector WHERE ancestry_id = $1 AND detector = $2) ON CONFLICT DO NOTHING`
|
|
||||||
|
|
||||||
insertAncestry = `INSERT INTO ancestry (name) VALUES ($1) RETURNING id`
|
|
||||||
|
|
||||||
searchAncestryLayer = `
|
|
||||||
SELECT layer.hash, layer.id, ancestry_layer.ancestry_index
|
|
||||||
FROM layer, ancestry_layer
|
|
||||||
WHERE ancestry_layer.ancestry_id = $1
|
|
||||||
AND ancestry_layer.layer_id = layer.id
|
|
||||||
ORDER BY ancestry_layer.ancestry_index ASC`
|
|
||||||
|
|
||||||
searchAncestryFeatures = `
|
|
||||||
SELECT namespace.name, namespace.version_format, feature.name, feature.version, feature.version_format, ancestry_layer.ancestry_index
|
|
||||||
FROM namespace, feature, namespaced_feature, ancestry_layer, ancestry_feature
|
|
||||||
WHERE ancestry_layer.ancestry_id = $1
|
|
||||||
AND ancestry_feature.ancestry_layer_id = ancestry_layer.id
|
|
||||||
AND ancestry_feature.namespaced_feature_id = namespaced_feature.id
|
|
||||||
AND namespaced_feature.feature_id = feature.id
|
|
||||||
AND namespaced_feature.namespace_id = namespace.id`
|
|
||||||
|
|
||||||
searchAncestry = `SELECT id FROM ancestry WHERE name = $1`
|
|
||||||
searchAncestryDetectors = `SELECT detector FROM ancestry_detector WHERE ancestry_id = $1`
|
|
||||||
searchAncestryListers = `SELECT lister FROM ancestry_lister WHERE ancestry_id = $1`
|
|
||||||
removeAncestry = `DELETE FROM ancestry WHERE name = $1`
|
|
||||||
insertAncestryLayer = `INSERT INTO ancestry_layer(ancestry_id, ancestry_index, layer_id) VALUES($1,$2, (SELECT layer.id FROM layer WHERE hash = $3 LIMIT 1)) RETURNING id`
|
|
||||||
insertAncestryLayerFeature = `INSERT INTO ancestry_feature(ancestry_layer_id, namespaced_feature_id) VALUES ($1, $2)`
|
|
||||||
)
|
|
||||||
|
|
||||||
// NOTE(Sida): Every search query can only have count less than postgres set
|
// NOTE(Sida): Every search query can only have count less than postgres set
|
||||||
// stack depth. IN will be resolved to nested OR_s and the parser might exceed
|
// stack depth. IN will be resolved to nested OR_s and the parser might exceed
|
||||||
// stack depth. TODO(Sida): Generate different queries for different count: if
|
// stack depth. TODO(Sida): Generate different queries for different count: if
|
||||||
@ -350,7 +121,8 @@ func queryPersistLayerFeature(count int) string {
|
|||||||
"layer_feature",
|
"layer_feature",
|
||||||
"layer_feature_layer_id_feature_id_key",
|
"layer_feature_layer_id_feature_id_key",
|
||||||
"layer_id",
|
"layer_id",
|
||||||
"feature_id")
|
"feature_id",
|
||||||
|
"detector_id")
|
||||||
}
|
}
|
||||||
|
|
||||||
func queryPersistNamespace(count int) string {
|
func queryPersistNamespace(count int) string {
|
||||||
@ -361,28 +133,13 @@ func queryPersistNamespace(count int) string {
|
|||||||
"version_format")
|
"version_format")
|
||||||
}
|
}
|
||||||
|
|
||||||
func queryPersistLayerListers(count int) string {
|
|
||||||
return queryPersist(count,
|
|
||||||
"layer_lister",
|
|
||||||
"layer_lister_layer_id_lister_key",
|
|
||||||
"layer_id",
|
|
||||||
"lister")
|
|
||||||
}
|
|
||||||
|
|
||||||
func queryPersistLayerDetectors(count int) string {
|
|
||||||
return queryPersist(count,
|
|
||||||
"layer_detector",
|
|
||||||
"layer_detector_layer_id_detector_key",
|
|
||||||
"layer_id",
|
|
||||||
"detector")
|
|
||||||
}
|
|
||||||
|
|
||||||
func queryPersistLayerNamespace(count int) string {
|
func queryPersistLayerNamespace(count int) string {
|
||||||
return queryPersist(count,
|
return queryPersist(count,
|
||||||
"layer_namespace",
|
"layer_namespace",
|
||||||
"layer_namespace_layer_id_namespace_id_key",
|
"layer_namespace_layer_id_namespace_id_key",
|
||||||
"layer_id",
|
"layer_id",
|
||||||
"namespace_id")
|
"namespace_id",
|
||||||
|
"detector_id")
|
||||||
}
|
}
|
||||||
|
|
||||||
// size of key and array should be both greater than 0
|
// size of key and array should be both greater than 0
|
||||||
|
157
database/pgsql/testdata/data.sql
vendored
157
database/pgsql/testdata/data.sql
vendored
@ -1,57 +1,69 @@
|
|||||||
|
-- initialize entities
|
||||||
INSERT INTO namespace (id, name, version_format) VALUES
|
INSERT INTO namespace (id, name, version_format) VALUES
|
||||||
(1, 'debian:7', 'dpkg'),
|
(1, 'debian:7', 'dpkg'),
|
||||||
(2, 'debian:8', 'dpkg'),
|
(2, 'debian:8', 'dpkg'),
|
||||||
(3, 'fake:1.0', 'rpm');
|
(3, 'fake:1.0', 'rpm');
|
||||||
|
|
||||||
INSERT INTO feature (id, name, version, version_format) VALUES
|
INSERT INTO feature (id, name, version, version_format) VALUES
|
||||||
(1, 'wechat', '0.5', 'dpkg'),
|
(1, 'ourchat', '0.5', 'dpkg'),
|
||||||
(2, 'openssl', '1.0', 'dpkg'),
|
(2, 'openssl', '1.0', 'dpkg'),
|
||||||
(3, 'openssl', '2.0', 'dpkg'),
|
(3, 'openssl', '2.0', 'dpkg'),
|
||||||
(4, 'fake', '2.0', 'rpm');
|
(4, 'fake', '2.0', 'rpm');
|
||||||
|
|
||||||
|
INSERT INTO namespaced_feature(id, feature_id, namespace_id) VALUES
|
||||||
|
(1, 1, 1), -- ourchat 0.5, debian:7
|
||||||
|
(2, 2, 1), -- openssl 1.0, debian:7
|
||||||
|
(3, 2, 2), -- openssl 1.0, debian:8
|
||||||
|
(4, 3, 1); -- openssl 2.0, debian:7
|
||||||
|
|
||||||
|
INSERT INTO detector(id, name, version, dtype) VALUES
|
||||||
|
(1, 'os-release', '1.0', 'namespace'),
|
||||||
|
(2, 'dpkg', '1.0', 'feature'),
|
||||||
|
(3, 'rpm', '1.0', 'feature'),
|
||||||
|
(4, 'apt-sources', '1.0', 'namespace');
|
||||||
|
|
||||||
|
-- initialize layers
|
||||||
INSERT INTO layer (id, hash) VALUES
|
INSERT INTO layer (id, hash) VALUES
|
||||||
(1, 'layer-0'), -- blank
|
(1, 'layer-0'), -- blank
|
||||||
(2, 'layer-1'), -- debian:7; wechat 0.5, openssl 1.0
|
(2, 'layer-1'), -- debian:7; ourchat 0.5, openssl 1.0
|
||||||
(3, 'layer-2'), -- debian:7; wechat 0.5, openssl 2.0
|
(3, 'layer-2'), -- debian:7; ourchat 0.5, openssl 2.0
|
||||||
(4, 'layer-3a'),-- debian:7;
|
(4, 'layer-3a'),-- debian:7;
|
||||||
(5, 'layer-3b'),-- debian:8; wechat 0.5, openssl 1.0
|
(5, 'layer-3b'),-- debian:8; ourchat 0.5, openssl 1.0
|
||||||
(6, 'layer-4'); -- debian:7, fake:1.0; openssl 2.0 (debian), fake 2.0 (fake)
|
(6, 'layer-4'); -- debian:7, fake:1.0; openssl 2.0 (debian), fake 2.0 (fake)
|
||||||
|
|
||||||
INSERT INTO layer_namespace(id, layer_id, namespace_id) VALUES
|
INSERT INTO layer_namespace(id, layer_id, namespace_id, detector_id) VALUES
|
||||||
(1, 2, 1),
|
(1, 2, 1, 1), -- layer-1: debian:7
|
||||||
(2, 3, 1),
|
(2, 3, 1, 1), -- layer-2: debian:7
|
||||||
(3, 4, 1),
|
(3, 4, 1, 1), -- layer-3a: debian:7
|
||||||
(4, 5, 2),
|
(4, 5, 2, 1), -- layer-3b: debian:8
|
||||||
(5, 6, 1),
|
(5, 6, 1, 1), -- layer-4: debian:7
|
||||||
(6, 6, 3);
|
(6, 6, 3, 4); -- layer-4: fake:1.0
|
||||||
|
|
||||||
INSERT INTO layer_feature(id, layer_id, feature_id) VALUES
|
INSERT INTO layer_feature(id, layer_id, feature_id, detector_id) VALUES
|
||||||
(1, 2, 1),
|
(1, 2, 1, 2), -- layer-1: ourchat 0.5
|
||||||
(2, 2, 2),
|
(2, 2, 2, 2), -- layer-1: openssl 1.0
|
||||||
(3, 3, 1),
|
(3, 3, 1, 2), -- layer-2: ourchat 0.5
|
||||||
(4, 3, 3),
|
(4, 3, 3, 2), -- layer-2: openssl 2.0
|
||||||
(5, 5, 1),
|
(5, 5, 1, 2), -- layer-3b: ourchat 0.5
|
||||||
(6, 5, 2),
|
(6, 5, 2, 2), -- layer-3b: openssl 1.0
|
||||||
(7, 6, 4),
|
(7, 6, 4, 3), -- layer-4: fake 2.0
|
||||||
(8, 6, 3);
|
(8, 6, 3, 2); -- layer-4: openssl 2.0
|
||||||
|
|
||||||
INSERT INTO layer_lister(id, layer_id, lister) VALUES
|
INSERT INTO layer_detector(layer_id, detector_id) VALUES
|
||||||
(1, 1, 'dpkg'),
|
(1, 1),
|
||||||
(2, 2, 'dpkg'),
|
(2, 1),
|
||||||
(3, 3, 'dpkg'),
|
(3, 1),
|
||||||
(4, 4, 'dpkg'),
|
(4, 1),
|
||||||
(5, 5, 'dpkg'),
|
(5, 1),
|
||||||
(6, 6, 'dpkg'),
|
(6, 1),
|
||||||
(7, 6, 'rpm');
|
(6, 4),
|
||||||
|
(1, 2),
|
||||||
INSERT INTO layer_detector(id, layer_id, detector) VALUES
|
(2, 2),
|
||||||
(1, 1, 'os-release'),
|
(3, 2),
|
||||||
(2, 2, 'os-release'),
|
(4, 2),
|
||||||
(3, 3, 'os-release'),
|
(5, 2),
|
||||||
(4, 4, 'os-release'),
|
(6, 2),
|
||||||
(5, 5, 'os-release'),
|
(6, 3);
|
||||||
(6, 6, 'os-release'),
|
|
||||||
(7, 6, 'apt-sources');
|
|
||||||
|
|
||||||
INSERT INTO ancestry (id, name) VALUES
|
INSERT INTO ancestry (id, name) VALUES
|
||||||
(1, 'ancestry-1'), -- layer-0, layer-1, layer-2, layer-3a
|
(1, 'ancestry-1'), -- layer-0, layer-1, layer-2, layer-3a
|
||||||
@ -59,32 +71,39 @@ INSERT INTO ancestry (id, name) VALUES
|
|||||||
(3, 'ancestry-3'), -- layer-0
|
(3, 'ancestry-3'), -- layer-0
|
||||||
(4, 'ancestry-4'); -- layer-0
|
(4, 'ancestry-4'); -- layer-0
|
||||||
|
|
||||||
INSERT INTO ancestry_lister (id, ancestry_id, lister) VALUES
|
INSERT INTO ancestry_detector (ancestry_id, detector_id) VALUES
|
||||||
(1, 1, 'dpkg'),
|
(1, 2),
|
||||||
(2, 2, 'dpkg');
|
(2, 2),
|
||||||
|
(1, 1),
|
||||||
INSERT INTO ancestry_detector (id, ancestry_id, detector) VALUES
|
(2, 1);
|
||||||
(1, 1, 'os-release'),
|
|
||||||
(2, 2, 'os-release');
|
|
||||||
|
|
||||||
INSERT INTO ancestry_layer (id, ancestry_id, layer_id, ancestry_index) VALUES
|
INSERT INTO ancestry_layer (id, ancestry_id, layer_id, ancestry_index) VALUES
|
||||||
|
-- ancestry-1: layer-0, layer-1, layer-2, layer-3a
|
||||||
(1, 1, 1, 0),(2, 1, 2, 1),(3, 1, 3, 2),(4, 1, 4, 3),
|
(1, 1, 1, 0),(2, 1, 2, 1),(3, 1, 3, 2),(4, 1, 4, 3),
|
||||||
|
-- ancestry-2: layer-0, layer-1, layer-2, layer-3b
|
||||||
(5, 2, 1, 0),(6, 2, 2, 1),(7, 2, 3, 2),(8, 2, 5, 3),
|
(5, 2, 1, 0),(6, 2, 2, 1),(7, 2, 3, 2),(8, 2, 5, 3),
|
||||||
(9, 3, 1, 0),
|
-- ancestry-3: layer-1
|
||||||
(10, 4, 1, 0);
|
(9, 3, 2, 0),
|
||||||
|
-- ancestry-4: layer-1
|
||||||
INSERT INTO namespaced_feature(id, feature_id, namespace_id) VALUES
|
(10, 4, 2, 0);
|
||||||
(1, 1, 1), -- wechat 0.5, debian:7
|
|
||||||
(2, 2, 1), -- openssl 1.0, debian:7
|
|
||||||
(3, 2, 2), -- openssl 1.0, debian:8
|
|
||||||
(4, 3, 1); -- openssl 2.0, debian:7
|
|
||||||
|
|
||||||
-- assume that ancestry-3 and ancestry-4 are vulnerable.
|
-- assume that ancestry-3 and ancestry-4 are vulnerable.
|
||||||
INSERT INTO ancestry_feature (id, ancestry_layer_id, namespaced_feature_id) VALUES
|
INSERT INTO ancestry_feature (id, ancestry_layer_id, namespaced_feature_id, feature_detector_id, namespace_detector_id) VALUES
|
||||||
(1, 1, 1), (2, 1, 4), -- ancestry-1, layer 0 introduces 1, 4
|
-- ancestry-1:
|
||||||
(3, 5, 1), (4, 5, 3), -- ancestry-2, layer 0 introduces 1, 3
|
-- layer-2: ourchat 0.5 <- detected by dpkg 1.0 (2); debian: 7 <- detected by os-release 1.0 (1)
|
||||||
(5, 9, 2), -- ancestry-3, layer 0 introduces 2
|
-- layer-2: openssl 2.0, debian:7
|
||||||
(6, 10, 2); -- ancestry-4, layer 0 introduces 2
|
(1, 3, 1, 2, 1), (2, 3, 4, 2, 1),
|
||||||
|
-- ancestry 2:
|
||||||
|
-- 1(ourchat 0.5; debian:7 layer-2)
|
||||||
|
-- 3(openssl 1.0; debian:8 layer-3b)
|
||||||
|
(3, 7, 1, 2, 1), (4, 8, 3, 2, 1),
|
||||||
|
-- ancestry-3:
|
||||||
|
-- 2(openssl 1.0, debian:7 layer-1)
|
||||||
|
-- 1(ourchat 0.5, debian:7 layer-1)
|
||||||
|
(5, 9, 2, 2, 1), (6, 9, 1, 2, 1), -- vulnerable
|
||||||
|
-- ancestry-4:
|
||||||
|
-- same as ancestry-3
|
||||||
|
(7, 10, 2, 2, 1), (8, 10, 1, 2, 1); -- vulnerable
|
||||||
|
|
||||||
INSERT INTO vulnerability (id, namespace_id, name, description, link, severity) VALUES
|
INSERT INTO vulnerability (id, namespace_id, name, description, link, severity) VALUES
|
||||||
(1, 1, 'CVE-OPENSSL-1-DEB7', 'A vulnerability affecting OpenSSL < 2.0 on Debian 7.0', 'http://google.com/#q=CVE-OPENSSL-1-DEB7', 'High'),
|
(1, 1, 'CVE-OPENSSL-1-DEB7', 'A vulnerability affecting OpenSSL < 2.0 on Debian 7.0', 'http://google.com/#q=CVE-OPENSSL-1-DEB7', 'High'),
|
||||||
@ -103,19 +122,23 @@ INSERT INTO vulnerability_affected_namespaced_feature(id, vulnerability_id, name
|
|||||||
INSERT INTO vulnerability_notification(id, name, created_at, notified_at, deleted_at, old_vulnerability_id, new_vulnerability_id) VALUES
|
INSERT INTO vulnerability_notification(id, name, created_at, notified_at, deleted_at, old_vulnerability_id, new_vulnerability_id) VALUES
|
||||||
(1, 'test', NULL, NULL, NULL, 2, 1); -- 'CVE-NOPE' -> 'CVE-OPENSSL-1-DEB7'
|
(1, 'test', NULL, NULL, NULL, 2, 1); -- 'CVE-NOPE' -> 'CVE-OPENSSL-1-DEB7'
|
||||||
|
|
||||||
|
SELECT pg_catalog.setval(pg_get_serial_sequence('feature', 'id'), (SELECT MAX(id) FROM feature)+1);
|
||||||
SELECT pg_catalog.setval(pg_get_serial_sequence('namespace', 'id'), (SELECT MAX(id) FROM namespace)+1);
|
SELECT pg_catalog.setval(pg_get_serial_sequence('namespace', 'id'), (SELECT MAX(id) FROM namespace)+1);
|
||||||
|
SELECT pg_catalog.setval(pg_get_serial_sequence('namespaced_feature', 'id'), (SELECT MAX(id) FROM namespaced_feature)+1);
|
||||||
|
SELECT pg_catalog.setval(pg_get_serial_sequence('detector', 'id'), (SELECT MAX(id) FROM detector)+1);
|
||||||
|
|
||||||
SELECT pg_catalog.setval(pg_get_serial_sequence('ancestry', 'id'), (SELECT MAX(id) FROM ancestry)+1);
|
SELECT pg_catalog.setval(pg_get_serial_sequence('ancestry', 'id'), (SELECT MAX(id) FROM ancestry)+1);
|
||||||
SELECT pg_catalog.setval(pg_get_serial_sequence('ancestry_layer', 'id'), (SELECT MAX(id) FROM ancestry_layer)+1);
|
SELECT pg_catalog.setval(pg_get_serial_sequence('ancestry_layer', 'id'), (SELECT MAX(id) FROM ancestry_layer)+1);
|
||||||
SELECT pg_catalog.setval(pg_get_serial_sequence('ancestry_feature', 'id'), (SELECT MAX(id) FROM ancestry_feature)+1);
|
SELECT pg_catalog.setval(pg_get_serial_sequence('ancestry_feature', 'id'), (SELECT MAX(id) FROM ancestry_feature)+1);
|
||||||
SELECT pg_catalog.setval(pg_get_serial_sequence('ancestry_detector', 'id'), (SELECT MAX(id) FROM ancestry_detector)+1);
|
SELECT pg_catalog.setval(pg_get_serial_sequence('ancestry_detector', 'id'), (SELECT MAX(id) FROM ancestry_detector)+1);
|
||||||
SELECT pg_catalog.setval(pg_get_serial_sequence('ancestry_lister', 'id'), (SELECT MAX(id) FROM ancestry_lister)+1);
|
|
||||||
SELECT pg_catalog.setval(pg_get_serial_sequence('feature', 'id'), (SELECT MAX(id) FROM feature)+1);
|
|
||||||
SELECT pg_catalog.setval(pg_get_serial_sequence('namespaced_feature', 'id'), (SELECT MAX(id) FROM namespaced_feature)+1);
|
|
||||||
SELECT pg_catalog.setval(pg_get_serial_sequence('layer', 'id'), (SELECT MAX(id) FROM layer)+1);
|
SELECT pg_catalog.setval(pg_get_serial_sequence('layer', 'id'), (SELECT MAX(id) FROM layer)+1);
|
||||||
|
SELECT pg_catalog.setval(pg_get_serial_sequence('layer_feature', 'id'), (SELECT MAX(id) FROM layer_feature)+1);
|
||||||
SELECT pg_catalog.setval(pg_get_serial_sequence('layer_namespace', 'id'), (SELECT MAX(id) FROM layer_namespace)+1);
|
SELECT pg_catalog.setval(pg_get_serial_sequence('layer_namespace', 'id'), (SELECT MAX(id) FROM layer_namespace)+1);
|
||||||
SELECT pg_catalog.setval(pg_get_serial_sequence('layer_detector', 'id'), (SELECT MAX(id) FROM layer_detector)+1);
|
SELECT pg_catalog.setval(pg_get_serial_sequence('layer_detector', 'id'), (SELECT MAX(id) FROM layer_detector)+1);
|
||||||
SELECT pg_catalog.setval(pg_get_serial_sequence('layer_lister', 'id'), (SELECT MAX(id) FROM layer_lister)+1);
|
|
||||||
SELECT pg_catalog.setval(pg_get_serial_sequence('vulnerability', 'id'), (SELECT MAX(id) FROM vulnerability)+1);
|
SELECT pg_catalog.setval(pg_get_serial_sequence('vulnerability', 'id'), (SELECT MAX(id) FROM vulnerability)+1);
|
||||||
SELECT pg_catalog.setval(pg_get_serial_sequence('vulnerability_affected_feature', 'id'), (SELECT MAX(id) FROM vulnerability_affected_feature)+1);
|
SELECT pg_catalog.setval(pg_get_serial_sequence('vulnerability_affected_feature', 'id'), (SELECT MAX(id) FROM vulnerability_affected_feature)+1);
|
||||||
SELECT pg_catalog.setval(pg_get_serial_sequence('vulnerability_affected_namespaced_feature', 'id'), (SELECT MAX(id) FROM vulnerability_affected_namespaced_feature)+1);
|
SELECT pg_catalog.setval(pg_get_serial_sequence('vulnerability_affected_namespaced_feature', 'id'), (SELECT MAX(id) FROM vulnerability_affected_namespaced_feature)+1);
|
||||||
SELECT pg_catalog.setval(pg_get_serial_sequence('vulnerability_notification', 'id'), (SELECT MAX(id) FROM vulnerability_notification)+1);
|
SELECT pg_catalog.setval(pg_get_serial_sequence('vulnerability_notification', 'id'), (SELECT MAX(id) FROM vulnerability_notification)+1);
|
||||||
|
SELECT pg_catalog.setval(pg_get_serial_sequence('detector', 'id'), (SELECT MAX(id) FROM detector)+1);
|
||||||
|
262
database/pgsql/testutil.go
Normal file
262
database/pgsql/testutil.go
Normal file
@ -0,0 +1,262 @@
|
|||||||
|
// Copyright 2018 clair authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package pgsql
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
|
"github.com/coreos/clair/database"
|
||||||
|
"github.com/coreos/clair/pkg/pagination"
|
||||||
|
)
|
||||||
|
|
||||||
|
// int keys must be the consistent with the database ID.
|
||||||
|
var (
|
||||||
|
realFeatures = map[int]database.Feature{
|
||||||
|
1: {"ourchat", "0.5", "dpkg"},
|
||||||
|
2: {"openssl", "1.0", "dpkg"},
|
||||||
|
3: {"openssl", "2.0", "dpkg"},
|
||||||
|
4: {"fake", "2.0", "rpm"},
|
||||||
|
}
|
||||||
|
|
||||||
|
realNamespaces = map[int]database.Namespace{
|
||||||
|
1: {"debian:7", "dpkg"},
|
||||||
|
2: {"debian:8", "dpkg"},
|
||||||
|
3: {"fake:1.0", "rpm"},
|
||||||
|
}
|
||||||
|
|
||||||
|
realNamespacedFeatures = map[int]database.NamespacedFeature{
|
||||||
|
1: {realFeatures[1], realNamespaces[1]},
|
||||||
|
2: {realFeatures[2], realNamespaces[1]},
|
||||||
|
3: {realFeatures[2], realNamespaces[2]},
|
||||||
|
4: {realFeatures[3], realNamespaces[1]},
|
||||||
|
}
|
||||||
|
|
||||||
|
realDetectors = map[int]database.Detector{
|
||||||
|
1: database.NewNamespaceDetector("os-release", "1.0"),
|
||||||
|
2: database.NewFeatureDetector("dpkg", "1.0"),
|
||||||
|
3: database.NewFeatureDetector("rpm", "1.0"),
|
||||||
|
4: database.NewNamespaceDetector("apt-sources", "1.0"),
|
||||||
|
}
|
||||||
|
|
||||||
|
realLayers = map[int]database.Layer{
|
||||||
|
2: {
|
||||||
|
Hash: "layer-1",
|
||||||
|
By: []database.Detector{realDetectors[1], realDetectors[2]},
|
||||||
|
Features: []database.LayerFeature{
|
||||||
|
{realFeatures[1], realDetectors[2]},
|
||||||
|
{realFeatures[2], realDetectors[2]},
|
||||||
|
},
|
||||||
|
Namespaces: []database.LayerNamespace{
|
||||||
|
{realNamespaces[1], realDetectors[1]},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
6: {
|
||||||
|
Hash: "layer-4",
|
||||||
|
By: []database.Detector{realDetectors[1], realDetectors[2], realDetectors[3], realDetectors[4]},
|
||||||
|
Features: []database.LayerFeature{
|
||||||
|
{realFeatures[4], realDetectors[3]},
|
||||||
|
{realFeatures[3], realDetectors[2]},
|
||||||
|
},
|
||||||
|
Namespaces: []database.LayerNamespace{
|
||||||
|
{realNamespaces[1], realDetectors[1]},
|
||||||
|
{realNamespaces[3], realDetectors[4]},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
realAncestries = map[int]database.Ancestry{
|
||||||
|
2: {
|
||||||
|
Name: "ancestry-2",
|
||||||
|
By: []database.Detector{realDetectors[2], realDetectors[1]},
|
||||||
|
Layers: []database.AncestryLayer{
|
||||||
|
{
|
||||||
|
"layer-0",
|
||||||
|
[]database.AncestryFeature{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"layer-1",
|
||||||
|
[]database.AncestryFeature{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"layer-2",
|
||||||
|
[]database.AncestryFeature{
|
||||||
|
{
|
||||||
|
realNamespacedFeatures[1],
|
||||||
|
realDetectors[2],
|
||||||
|
realDetectors[1],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"layer-3b",
|
||||||
|
[]database.AncestryFeature{
|
||||||
|
{
|
||||||
|
realNamespacedFeatures[3],
|
||||||
|
realDetectors[2],
|
||||||
|
realDetectors[1],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
realVulnerability = map[int]database.Vulnerability{
|
||||||
|
1: {
|
||||||
|
Name: "CVE-OPENSSL-1-DEB7",
|
||||||
|
Namespace: realNamespaces[1],
|
||||||
|
Description: "A vulnerability affecting OpenSSL < 2.0 on Debian 7.0",
|
||||||
|
Link: "http://google.com/#q=CVE-OPENSSL-1-DEB7",
|
||||||
|
Severity: database.HighSeverity,
|
||||||
|
},
|
||||||
|
2: {
|
||||||
|
Name: "CVE-NOPE",
|
||||||
|
Namespace: realNamespaces[1],
|
||||||
|
Description: "A vulnerability affecting nothing",
|
||||||
|
Severity: database.UnknownSeverity,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
realNotification = map[int]database.VulnerabilityNotification{
|
||||||
|
1: {
|
||||||
|
NotificationHook: database.NotificationHook{
|
||||||
|
Name: "test",
|
||||||
|
},
|
||||||
|
Old: takeVulnerabilityPointerFromMap(realVulnerability, 2),
|
||||||
|
New: takeVulnerabilityPointerFromMap(realVulnerability, 1),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
fakeFeatures = map[int]database.Feature{
|
||||||
|
1: {
|
||||||
|
Name: "ourchat",
|
||||||
|
Version: "0.6",
|
||||||
|
VersionFormat: "dpkg",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
fakeNamespaces = map[int]database.Namespace{
|
||||||
|
1: {"green hat", "rpm"},
|
||||||
|
}
|
||||||
|
fakeNamespacedFeatures = map[int]database.NamespacedFeature{
|
||||||
|
1: {
|
||||||
|
Feature: fakeFeatures[0],
|
||||||
|
Namespace: realNamespaces[0],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
fakeDetector = map[int]database.Detector{
|
||||||
|
1: {
|
||||||
|
Name: "fake",
|
||||||
|
Version: "1.0",
|
||||||
|
DType: database.FeatureDetectorType,
|
||||||
|
},
|
||||||
|
2: {
|
||||||
|
Name: "fake2",
|
||||||
|
Version: "2.0",
|
||||||
|
DType: database.NamespaceDetectorType,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func takeVulnerabilityPointerFromMap(m map[int]database.Vulnerability, id int) *database.Vulnerability {
|
||||||
|
x := m[id]
|
||||||
|
return &x
|
||||||
|
}
|
||||||
|
|
||||||
|
func takeAncestryPointerFromMap(m map[int]database.Ancestry, id int) *database.Ancestry {
|
||||||
|
x := m[id]
|
||||||
|
return &x
|
||||||
|
}
|
||||||
|
|
||||||
|
func takeLayerPointerFromMap(m map[int]database.Layer, id int) *database.Layer {
|
||||||
|
x := m[id]
|
||||||
|
return &x
|
||||||
|
}
|
||||||
|
|
||||||
|
func listNamespaces(t *testing.T, tx *pgSession) []database.Namespace {
|
||||||
|
rows, err := tx.Query("SELECT name, version_format FROM namespace")
|
||||||
|
if err != nil {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
namespaces := []database.Namespace{}
|
||||||
|
for rows.Next() {
|
||||||
|
var ns database.Namespace
|
||||||
|
err := rows.Scan(&ns.Name, &ns.VersionFormat)
|
||||||
|
if err != nil {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
namespaces = append(namespaces, ns)
|
||||||
|
}
|
||||||
|
|
||||||
|
return namespaces
|
||||||
|
}
|
||||||
|
|
||||||
|
func assertVulnerabilityNotificationWithVulnerableEqual(t *testing.T, key pagination.Key, expected, actual *database.VulnerabilityNotificationWithVulnerable) bool {
|
||||||
|
if expected == actual {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if expected == nil || actual == nil {
|
||||||
|
return assert.Equal(t, expected, actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
return assert.Equal(t, expected.NotificationHook, actual.NotificationHook) &&
|
||||||
|
AssertPagedVulnerableAncestriesEqual(t, key, expected.Old, actual.Old) &&
|
||||||
|
AssertPagedVulnerableAncestriesEqual(t, key, expected.New, actual.New)
|
||||||
|
}
|
||||||
|
|
||||||
|
func AssertPagedVulnerableAncestriesEqual(t *testing.T, key pagination.Key, expected, actual *database.PagedVulnerableAncestries) bool {
|
||||||
|
if expected == actual {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if expected == nil || actual == nil {
|
||||||
|
return assert.Equal(t, expected, actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
return database.AssertVulnerabilityEqual(t, &expected.Vulnerability, &actual.Vulnerability) &&
|
||||||
|
assert.Equal(t, expected.Limit, actual.Limit) &&
|
||||||
|
assert.Equal(t, mustUnmarshalToken(key, expected.Current), mustUnmarshalToken(key, actual.Current)) &&
|
||||||
|
assert.Equal(t, mustUnmarshalToken(key, expected.Next), mustUnmarshalToken(key, actual.Next)) &&
|
||||||
|
assert.Equal(t, expected.End, actual.End) &&
|
||||||
|
database.AssertIntStringMapEqual(t, expected.Affected, actual.Affected)
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustUnmarshalToken(key pagination.Key, token pagination.Token) Page {
|
||||||
|
if token == pagination.FirstPageToken {
|
||||||
|
return Page{}
|
||||||
|
}
|
||||||
|
|
||||||
|
p := Page{}
|
||||||
|
if err := key.UnmarshalToken(token, &p); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustMarshalToken(key pagination.Key, v interface{}) pagination.Token {
|
||||||
|
token, err := key.MarshalToken(v)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return token
|
||||||
|
}
|
@ -26,6 +26,73 @@ import (
|
|||||||
"github.com/coreos/clair/ext/versionfmt"
|
"github.com/coreos/clair/ext/versionfmt"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
lockVulnerabilityAffects = `LOCK vulnerability_affected_namespaced_feature IN SHARE ROW EXCLUSIVE MODE`
|
||||||
|
|
||||||
|
searchVulnerability = `
|
||||||
|
SELECT v.id, v.description, v.link, v.severity, v.metadata, n.version_format
|
||||||
|
FROM vulnerability AS v, namespace AS n
|
||||||
|
WHERE v.namespace_id = n.id
|
||||||
|
AND v.name = $1
|
||||||
|
AND n.name = $2
|
||||||
|
AND v.deleted_at IS NULL
|
||||||
|
`
|
||||||
|
|
||||||
|
insertVulnerabilityAffected = `
|
||||||
|
INSERT INTO vulnerability_affected_feature(vulnerability_id, feature_name, affected_version, fixedin)
|
||||||
|
VALUES ($1, $2, $3, $4)
|
||||||
|
RETURNING ID
|
||||||
|
`
|
||||||
|
|
||||||
|
searchVulnerabilityAffected = `
|
||||||
|
SELECT vulnerability_id, feature_name, affected_version, fixedin
|
||||||
|
FROM vulnerability_affected_feature
|
||||||
|
WHERE vulnerability_id = ANY($1)
|
||||||
|
`
|
||||||
|
|
||||||
|
searchVulnerabilityByID = `
|
||||||
|
SELECT v.name, v.description, v.link, v.severity, v.metadata, n.name, n.version_format
|
||||||
|
FROM vulnerability AS v, namespace AS n
|
||||||
|
WHERE v.namespace_id = n.id
|
||||||
|
AND v.id = $1`
|
||||||
|
|
||||||
|
searchVulnerabilityPotentialAffected = `
|
||||||
|
WITH req AS (
|
||||||
|
SELECT vaf.id AS vaf_id, n.id AS n_id, vaf.feature_name AS name, v.id AS vulnerability_id
|
||||||
|
FROM vulnerability_affected_feature AS vaf,
|
||||||
|
vulnerability AS v,
|
||||||
|
namespace AS n
|
||||||
|
WHERE vaf.vulnerability_id = ANY($1)
|
||||||
|
AND v.id = vaf.vulnerability_id
|
||||||
|
AND n.id = v.namespace_id
|
||||||
|
)
|
||||||
|
SELECT req.vulnerability_id, nf.id, f.version, req.vaf_id AS added_by
|
||||||
|
FROM feature AS f, namespaced_feature AS nf, req
|
||||||
|
WHERE f.name = req.name
|
||||||
|
AND nf.namespace_id = req.n_id
|
||||||
|
AND nf.feature_id = f.id`
|
||||||
|
|
||||||
|
insertVulnerabilityAffectedNamespacedFeature = `
|
||||||
|
INSERT INTO vulnerability_affected_namespaced_feature(vulnerability_id, namespaced_feature_id, added_by)
|
||||||
|
VALUES ($1, $2, $3)`
|
||||||
|
|
||||||
|
insertVulnerability = `
|
||||||
|
WITH ns AS (
|
||||||
|
SELECT id FROM namespace WHERE name = $6 AND version_format = $7
|
||||||
|
)
|
||||||
|
INSERT INTO Vulnerability(namespace_id, name, description, link, severity, metadata, created_at)
|
||||||
|
VALUES((SELECT id FROM ns), $1, $2, $3, $4, $5, CURRENT_TIMESTAMP)
|
||||||
|
RETURNING id`
|
||||||
|
|
||||||
|
removeVulnerability = `
|
||||||
|
UPDATE Vulnerability
|
||||||
|
SET deleted_at = CURRENT_TIMESTAMP
|
||||||
|
WHERE namespace_id = (SELECT id FROM Namespace WHERE name = $1)
|
||||||
|
AND name = $2
|
||||||
|
AND deleted_at IS NULL
|
||||||
|
RETURNING id`
|
||||||
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
errVulnerabilityNotFound = errors.New("vulnerability is not in database")
|
errVulnerabilityNotFound = errors.New("vulnerability is not in database")
|
||||||
)
|
)
|
||||||
|
@ -306,14 +306,14 @@ func TestFindVulnerabilityIDs(t *testing.T) {
|
|||||||
|
|
||||||
ids, err := tx.findLatestDeletedVulnerabilityIDs([]database.VulnerabilityID{{Name: "CVE-DELETED", Namespace: "debian:7"}})
|
ids, err := tx.findLatestDeletedVulnerabilityIDs([]database.VulnerabilityID{{Name: "CVE-DELETED", Namespace: "debian:7"}})
|
||||||
if assert.Nil(t, err) {
|
if assert.Nil(t, err) {
|
||||||
if !(assert.Len(t, ids, 1) && assert.True(t, ids[0].Valid) && assert.Equal(t, 3, ids[0].Int64)) {
|
if !(assert.Len(t, ids, 1) && assert.True(t, ids[0].Valid) && assert.Equal(t, 3, int(ids[0].Int64))) {
|
||||||
assert.Fail(t, "")
|
assert.Fail(t, "")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ids, err = tx.findNotDeletedVulnerabilityIDs([]database.VulnerabilityID{{Name: "CVE-NOPE", Namespace: "debian:7"}})
|
ids, err = tx.findNotDeletedVulnerabilityIDs([]database.VulnerabilityID{{Name: "CVE-NOPE", Namespace: "debian:7"}})
|
||||||
if assert.Nil(t, err) {
|
if assert.Nil(t, err) {
|
||||||
if !(assert.Len(t, ids, 1) && assert.True(t, ids[0].Valid) && assert.Equal(t, 2, ids[0].Int64)) {
|
if !(assert.Len(t, ids, 1) && assert.True(t, ids[0].Valid) && assert.Equal(t, 2, int(ids[0].Int64))) {
|
||||||
assert.Fail(t, "")
|
assert.Fail(t, "")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
297
database/testutil.go
Normal file
297
database/testutil.go
Normal file
@ -0,0 +1,297 @@
|
|||||||
|
// Copyright 2018 clair authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package database
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"sort"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/deckarep/golang-set"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AssertDetectorsEqual asserts actual detectors are content wise equal to
|
||||||
|
// expected detectors regardless of the ordering.
|
||||||
|
func AssertDetectorsEqual(t *testing.T, expected, actual []Detector) bool {
|
||||||
|
if len(expected) != len(actual) {
|
||||||
|
return assert.Fail(t, "detectors are not equal", "expected: '%v', actual: '%v'", expected, actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Slice(expected, func(i, j int) bool {
|
||||||
|
return expected[i].String() < expected[j].String()
|
||||||
|
})
|
||||||
|
|
||||||
|
sort.Slice(actual, func(i, j int) bool {
|
||||||
|
return actual[i].String() < actual[j].String()
|
||||||
|
})
|
||||||
|
|
||||||
|
for i := range expected {
|
||||||
|
if expected[i] != actual[i] {
|
||||||
|
return assert.Fail(t, "detectors are not equal", "expected: '%v', actual: '%v'", expected, actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// AssertAncestryEqual asserts actual ancestry equals to expected ancestry
|
||||||
|
// content wise.
|
||||||
|
func AssertAncestryEqual(t *testing.T, expected, actual *Ancestry) bool {
|
||||||
|
if expected == actual {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if actual == nil || expected == nil {
|
||||||
|
return assert.Equal(t, expected, actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !assert.Equal(t, expected.Name, actual.Name) || !AssertDetectorsEqual(t, expected.By, actual.By) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if assert.Equal(t, len(expected.Layers), len(actual.Layers)) {
|
||||||
|
for index := range expected.Layers {
|
||||||
|
if !AssertAncestryLayerEqual(t, &expected.Layers[index], &actual.Layers[index]) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// AssertAncestryLayerEqual asserts actual ancestry layer equals to expected
|
||||||
|
// ancestry layer content wise.
|
||||||
|
func AssertAncestryLayerEqual(t *testing.T, expected, actual *AncestryLayer) bool {
|
||||||
|
if !assert.Equal(t, expected.Hash, actual.Hash) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if !assert.Equal(t, len(expected.Features), len(actual.Features),
|
||||||
|
"layer: %s\nExpected: %v\n Actual: %v",
|
||||||
|
expected.Hash, expected.Features, actual.Features,
|
||||||
|
) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// feature -> is in actual layer
|
||||||
|
hitCounter := map[AncestryFeature]bool{}
|
||||||
|
for _, f := range expected.Features {
|
||||||
|
hitCounter[f] = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// if there's no extra features and no duplicated features, since expected
|
||||||
|
// and actual have the same length, their result must equal.
|
||||||
|
for _, f := range actual.Features {
|
||||||
|
v, ok := hitCounter[f]
|
||||||
|
assert.True(t, ok, "unexpected feature %s", f)
|
||||||
|
assert.False(t, v, "duplicated feature %s", f)
|
||||||
|
hitCounter[f] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
for f, visited := range hitCounter {
|
||||||
|
assert.True(t, visited, "missing feature %s", f)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// AssertElementsEqual asserts that content in actual equals to content in
|
||||||
|
// expected array regardless of ordering.
|
||||||
|
//
|
||||||
|
// Note: This function uses interface wise comparison.
|
||||||
|
func AssertElementsEqual(t *testing.T, expected, actual []interface{}) bool {
|
||||||
|
counter := map[interface{}]bool{}
|
||||||
|
for _, f := range expected {
|
||||||
|
counter[f] = false
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, f := range actual {
|
||||||
|
v, ok := counter[f]
|
||||||
|
if !assert.True(t, ok, "unexpected element %v\nExpected: %v\n Actual: %v\n", f, expected, actual) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if !assert.False(t, v, "duplicated element %v\nExpected: %v\n Actual: %v\n", f, expected, actual) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
counter[f] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
for f, visited := range counter {
|
||||||
|
if !assert.True(t, visited, "missing feature %v\nExpected: %v\n Actual: %v\n", f, expected, actual) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// AssertFeaturesEqual asserts content in actual equals content in expected
|
||||||
|
// regardless of ordering.
|
||||||
|
func AssertFeaturesEqual(t *testing.T, expected, actual []Feature) bool {
|
||||||
|
if assert.Len(t, actual, len(expected)) {
|
||||||
|
has := map[Feature]bool{}
|
||||||
|
for _, nf := range expected {
|
||||||
|
has[nf] = false
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, nf := range actual {
|
||||||
|
has[nf] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
for nf, visited := range has {
|
||||||
|
if !assert.True(t, visited, nf.Name+" is expected") {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// AssertLayerFeaturesEqual asserts content in actual equals to content in
|
||||||
|
// expected regardless of ordering.
|
||||||
|
func AssertLayerFeaturesEqual(t *testing.T, expected, actual []LayerFeature) bool {
|
||||||
|
if !assert.Len(t, actual, len(expected)) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedInterfaces := []interface{}{}
|
||||||
|
for _, e := range expected {
|
||||||
|
expectedInterfaces = append(expectedInterfaces, e)
|
||||||
|
}
|
||||||
|
|
||||||
|
actualInterfaces := []interface{}{}
|
||||||
|
for _, a := range actual {
|
||||||
|
actualInterfaces = append(actualInterfaces, a)
|
||||||
|
}
|
||||||
|
|
||||||
|
return AssertElementsEqual(t, expectedInterfaces, actualInterfaces)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AssertNamespacesEqual asserts content in actual equals to content in
|
||||||
|
// expected regardless of ordering.
|
||||||
|
func AssertNamespacesEqual(t *testing.T, expected, actual []Namespace) bool {
|
||||||
|
expectedInterfaces := []interface{}{}
|
||||||
|
for _, e := range expected {
|
||||||
|
expectedInterfaces = append(expectedInterfaces, e)
|
||||||
|
}
|
||||||
|
|
||||||
|
actualInterfaces := []interface{}{}
|
||||||
|
for _, a := range actual {
|
||||||
|
actualInterfaces = append(actualInterfaces, a)
|
||||||
|
}
|
||||||
|
|
||||||
|
return AssertElementsEqual(t, expectedInterfaces, actualInterfaces)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AssertLayerNamespacesEqual asserts content in actual equals to content in
|
||||||
|
// expected regardless of ordering.
|
||||||
|
func AssertLayerNamespacesEqual(t *testing.T, expected, actual []LayerNamespace) bool {
|
||||||
|
expectedInterfaces := []interface{}{}
|
||||||
|
for _, e := range expected {
|
||||||
|
expectedInterfaces = append(expectedInterfaces, e)
|
||||||
|
}
|
||||||
|
|
||||||
|
actualInterfaces := []interface{}{}
|
||||||
|
for _, a := range actual {
|
||||||
|
actualInterfaces = append(actualInterfaces, a)
|
||||||
|
}
|
||||||
|
|
||||||
|
return AssertElementsEqual(t, expectedInterfaces, actualInterfaces)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AssertLayerEqual asserts actual layer equals to expected layer content wise.
|
||||||
|
func AssertLayerEqual(t *testing.T, expected, actual *Layer) bool {
|
||||||
|
if expected == actual {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if expected == nil || actual == nil {
|
||||||
|
return assert.Equal(t, expected, actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
return assert.Equal(t, expected.Hash, actual.Hash) &&
|
||||||
|
AssertDetectorsEqual(t, expected.By, actual.By) &&
|
||||||
|
AssertLayerFeaturesEqual(t, expected.Features, actual.Features) &&
|
||||||
|
AssertLayerNamespacesEqual(t, expected.Namespaces, actual.Namespaces)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AssertIntStringMapEqual asserts two maps with integer as key and string as
|
||||||
|
// value are equal.
|
||||||
|
func AssertIntStringMapEqual(t *testing.T, expected, actual map[int]string) bool {
|
||||||
|
checked := mapset.NewSet()
|
||||||
|
for k, v := range expected {
|
||||||
|
assert.Equal(t, v, actual[k])
|
||||||
|
checked.Add(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
for k := range actual {
|
||||||
|
if !assert.True(t, checked.Contains(k)) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// AssertVulnerabilityEqual asserts two vulnerabilities are equal.
|
||||||
|
func AssertVulnerabilityEqual(t *testing.T, expected, actual *Vulnerability) bool {
|
||||||
|
return assert.Equal(t, expected.Name, actual.Name) &&
|
||||||
|
assert.Equal(t, expected.Link, actual.Link) &&
|
||||||
|
assert.Equal(t, expected.Description, actual.Description) &&
|
||||||
|
assert.Equal(t, expected.Namespace, actual.Namespace) &&
|
||||||
|
assert.Equal(t, expected.Severity, actual.Severity) &&
|
||||||
|
AssertMetadataMapEqual(t, expected.Metadata, actual.Metadata)
|
||||||
|
}
|
||||||
|
|
||||||
|
func castMetadataMapToInterface(metadata MetadataMap) map[string]interface{} {
|
||||||
|
content, err := json.Marshal(metadata)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
data := make(map[string]interface{})
|
||||||
|
if err := json.Unmarshal(content, &data); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
// AssertMetadataMapEqual asserts two metadata maps are equal.
|
||||||
|
func AssertMetadataMapEqual(t *testing.T, expected, actual MetadataMap) bool {
|
||||||
|
expectedMap := castMetadataMapToInterface(expected)
|
||||||
|
actualMap := castMetadataMapToInterface(actual)
|
||||||
|
checked := mapset.NewSet()
|
||||||
|
for k, v := range expectedMap {
|
||||||
|
if !assert.Equal(t, v, (actualMap)[k]) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
checked.Add(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
for k := range actual {
|
||||||
|
if !assert.True(t, checked.Contains(k)) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
@ -29,7 +29,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
featurefmt.RegisterLister("apk", dpkg.ParserName, &lister{})
|
featurefmt.RegisterLister("apk", "1.0", &lister{})
|
||||||
}
|
}
|
||||||
|
|
||||||
type lister struct{}
|
type lister struct{}
|
||||||
|
@ -37,7 +37,7 @@ var (
|
|||||||
type lister struct{}
|
type lister struct{}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
featurefmt.RegisterLister("dpkg", dpkg.ParserName, &lister{})
|
featurefmt.RegisterLister("dpkg", "1.0", &lister{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l lister) ListFeatures(files tarutil.FilesMap) ([]database.Feature, error) {
|
func (l lister) ListFeatures(files tarutil.FilesMap) ([]database.Feature, error) {
|
||||||
|
@ -31,9 +31,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
listersM sync.RWMutex
|
listersM sync.RWMutex
|
||||||
listers = make(map[string]Lister)
|
listers = make(map[string]lister)
|
||||||
versionfmtListerName = make(map[string][]string)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Lister represents an ability to list the features present in an image layer.
|
// Lister represents an ability to list the features present in an image layer.
|
||||||
@ -48,13 +47,19 @@ type Lister interface {
|
|||||||
RequiredFilenames() []string
|
RequiredFilenames() []string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type lister struct {
|
||||||
|
Lister
|
||||||
|
|
||||||
|
info database.Detector
|
||||||
|
}
|
||||||
|
|
||||||
// RegisterLister makes a Lister available by the provided name.
|
// RegisterLister makes a Lister available by the provided name.
|
||||||
//
|
//
|
||||||
// If called twice with the same name, the name is blank, or if the provided
|
// If called twice with the same name, the name is blank, or if the provided
|
||||||
// Lister is nil, this function panics.
|
// Lister is nil, this function panics.
|
||||||
func RegisterLister(name string, versionfmt string, l Lister) {
|
func RegisterLister(name string, version string, l Lister) {
|
||||||
if name == "" {
|
if name == "" || version == "" {
|
||||||
panic("featurefmt: could not register a Lister with an empty name")
|
panic("featurefmt: could not register a Lister with an empty name or version")
|
||||||
}
|
}
|
||||||
if l == nil {
|
if l == nil {
|
||||||
panic("featurefmt: could not register a nil Lister")
|
panic("featurefmt: could not register a nil Lister")
|
||||||
@ -67,51 +72,65 @@ func RegisterLister(name string, versionfmt string, l Lister) {
|
|||||||
panic("featurefmt: RegisterLister called twice for " + name)
|
panic("featurefmt: RegisterLister called twice for " + name)
|
||||||
}
|
}
|
||||||
|
|
||||||
listers[name] = l
|
listers[name] = lister{l, database.NewFeatureDetector(name, version)}
|
||||||
versionfmtListerName[versionfmt] = append(versionfmtListerName[versionfmt], name)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListFeatures produces the list of Features in an image layer using
|
// ListFeatures produces the list of Features in an image layer using
|
||||||
// every registered Lister.
|
// every registered Lister.
|
||||||
func ListFeatures(files tarutil.FilesMap, listerNames []string) ([]database.Feature, error) {
|
func ListFeatures(files tarutil.FilesMap, toUse []database.Detector) ([]database.LayerFeature, error) {
|
||||||
listersM.RLock()
|
listersM.RLock()
|
||||||
defer listersM.RUnlock()
|
defer listersM.RUnlock()
|
||||||
|
|
||||||
var totalFeatures []database.Feature
|
features := []database.LayerFeature{}
|
||||||
|
for _, d := range toUse {
|
||||||
|
// Only use the detector with the same type
|
||||||
|
if d.DType != database.FeatureDetectorType {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
for _, name := range listerNames {
|
if lister, ok := listers[d.Name]; ok {
|
||||||
if lister, ok := listers[name]; ok {
|
fs, err := lister.ListFeatures(files)
|
||||||
features, err := lister.ListFeatures(files)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []database.Feature{}, err
|
return nil, err
|
||||||
}
|
}
|
||||||
totalFeatures = append(totalFeatures, features...)
|
|
||||||
|
for _, f := range fs {
|
||||||
|
features = append(features, database.LayerFeature{
|
||||||
|
Feature: f,
|
||||||
|
By: lister.info,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
log.WithField("Name", name).Warn("Unknown Lister")
|
log.WithField("Name", d).Fatal("unknown feature detector")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return totalFeatures, nil
|
return features, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RequiredFilenames returns the total list of files required for all
|
// RequiredFilenames returns all files required by the give extensions. Any
|
||||||
// registered Listers.
|
// extension metadata that has non feature-detector type will be skipped.
|
||||||
func RequiredFilenames(listerNames []string) (files []string) {
|
func RequiredFilenames(toUse []database.Detector) (files []string) {
|
||||||
listersM.RLock()
|
listersM.RLock()
|
||||||
defer listersM.RUnlock()
|
defer listersM.RUnlock()
|
||||||
|
|
||||||
for _, lister := range listers {
|
for _, d := range toUse {
|
||||||
files = append(files, lister.RequiredFilenames()...)
|
if d.DType != database.FeatureDetectorType {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
files = append(files, listers[d.Name].RequiredFilenames()...)
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListListers returns the names of all the registered feature listers.
|
// ListListers returns the names of all the registered feature listers.
|
||||||
func ListListers() []string {
|
func ListListers() []database.Detector {
|
||||||
r := []string{}
|
r := []database.Detector{}
|
||||||
for name := range listers {
|
for _, d := range listers {
|
||||||
r = append(r, name)
|
r = append(r, d.info)
|
||||||
}
|
}
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
@ -35,7 +35,7 @@ import (
|
|||||||
type lister struct{}
|
type lister struct{}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
featurefmt.RegisterLister("rpm", rpm.ParserName, &lister{})
|
featurefmt.RegisterLister("rpm", "1.0", &lister{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l lister) ListFeatures(files tarutil.FilesMap) ([]database.Feature, error) {
|
func (l lister) ListFeatures(files tarutil.FilesMap) ([]database.Feature, error) {
|
||||||
|
@ -36,7 +36,7 @@ const (
|
|||||||
var versionRegexp = regexp.MustCompile(`^(\d)+\.(\d)+\.(\d)+$`)
|
var versionRegexp = regexp.MustCompile(`^(\d)+\.(\d)+\.(\d)+$`)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
featurens.RegisterDetector("alpine-release", &detector{})
|
featurens.RegisterDetector("alpine-release", "1.0", &detector{})
|
||||||
}
|
}
|
||||||
|
|
||||||
type detector struct{}
|
type detector struct{}
|
||||||
|
@ -32,7 +32,7 @@ import (
|
|||||||
type detector struct{}
|
type detector struct{}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
featurens.RegisterDetector("apt-sources", &detector{})
|
featurens.RegisterDetector("apt-sources", "1.0", &detector{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d detector) Detect(files tarutil.FilesMap) (*database.Namespace, error) {
|
func (d detector) Detect(files tarutil.FilesMap) (*database.Namespace, error) {
|
||||||
|
@ -29,7 +29,7 @@ import (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
detectorsM sync.RWMutex
|
detectorsM sync.RWMutex
|
||||||
detectors = make(map[string]Detector)
|
detectors = make(map[string]detector)
|
||||||
)
|
)
|
||||||
|
|
||||||
// Detector represents an ability to detect a namespace used for organizing
|
// Detector represents an ability to detect a namespace used for organizing
|
||||||
@ -46,13 +46,19 @@ type Detector interface {
|
|||||||
RequiredFilenames() []string
|
RequiredFilenames() []string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type detector struct {
|
||||||
|
Detector
|
||||||
|
|
||||||
|
info database.Detector
|
||||||
|
}
|
||||||
|
|
||||||
// RegisterDetector makes a detector available by the provided name.
|
// RegisterDetector makes a detector available by the provided name.
|
||||||
//
|
//
|
||||||
// If called twice with the same name, the name is blank, or if the provided
|
// If called twice with the same name, the name is blank, or if the provided
|
||||||
// Detector is nil, this function panics.
|
// Detector is nil, this function panics.
|
||||||
func RegisterDetector(name string, d Detector) {
|
func RegisterDetector(name string, version string, d Detector) {
|
||||||
if name == "" {
|
if name == "" || version == "" {
|
||||||
panic("namespace: could not register a Detector with an empty name")
|
panic("namespace: could not register a Detector with an empty name or version")
|
||||||
}
|
}
|
||||||
if d == nil {
|
if d == nil {
|
||||||
panic("namespace: could not register a nil Detector")
|
panic("namespace: could not register a nil Detector")
|
||||||
@ -61,60 +67,69 @@ func RegisterDetector(name string, d Detector) {
|
|||||||
detectorsM.Lock()
|
detectorsM.Lock()
|
||||||
defer detectorsM.Unlock()
|
defer detectorsM.Unlock()
|
||||||
|
|
||||||
if _, dup := detectors[name]; dup {
|
if _, ok := detectors[name]; ok {
|
||||||
panic("namespace: RegisterDetector called twice for " + name)
|
panic("namespace: RegisterDetector called twice for " + name)
|
||||||
}
|
}
|
||||||
|
|
||||||
detectors[name] = d
|
detectors[name] = detector{d, database.NewNamespaceDetector(name, version)}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Detect iterators through all registered Detectors and returns all non-nil detected namespaces
|
// Detect uses detectors specified to retrieve the detect result.
|
||||||
func Detect(files tarutil.FilesMap, detectorNames []string) ([]database.Namespace, error) {
|
func Detect(files tarutil.FilesMap, toUse []database.Detector) ([]database.LayerNamespace, error) {
|
||||||
detectorsM.RLock()
|
detectorsM.RLock()
|
||||||
defer detectorsM.RUnlock()
|
defer detectorsM.RUnlock()
|
||||||
namespaces := map[string]*database.Namespace{}
|
|
||||||
for _, name := range detectorNames {
|
namespaces := []database.LayerNamespace{}
|
||||||
if detector, ok := detectors[name]; ok {
|
for _, d := range toUse {
|
||||||
|
// Only use the detector with the same type
|
||||||
|
if d.DType != database.NamespaceDetectorType {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if detector, ok := detectors[d.Name]; ok {
|
||||||
namespace, err := detector.Detect(files)
|
namespace, err := detector.Detect(files)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithError(err).WithField("name", name).Warning("failed while attempting to detect namespace")
|
log.WithError(err).WithField("detector", d).Warning("failed while attempting to detect namespace")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if namespace != nil {
|
if namespace != nil {
|
||||||
log.WithFields(log.Fields{"name": name, "namespace": namespace.Name}).Debug("detected namespace")
|
log.WithFields(log.Fields{"detector": d, "namespace": namespace.Name}).Debug("detected namespace")
|
||||||
namespaces[namespace.Name] = namespace
|
namespaces = append(namespaces, database.LayerNamespace{
|
||||||
|
Namespace: *namespace,
|
||||||
|
By: detector.info,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.WithField("Name", name).Warn("Unknown namespace detector")
|
log.WithField("detector", d).Fatal("unknown namespace detector")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
nslist := []database.Namespace{}
|
return namespaces, nil
|
||||||
for _, ns := range namespaces {
|
|
||||||
nslist = append(nslist, *ns)
|
|
||||||
}
|
|
||||||
return nslist, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// RequiredFilenames returns the total list of files required for all
|
// RequiredFilenames returns all files required by the give extensions. Any
|
||||||
// registered Detectors.
|
// extension metadata that has non namespace-detector type will be skipped.
|
||||||
func RequiredFilenames(detectorNames []string) (files []string) {
|
func RequiredFilenames(toUse []database.Detector) (files []string) {
|
||||||
detectorsM.RLock()
|
detectorsM.RLock()
|
||||||
defer detectorsM.RUnlock()
|
defer detectorsM.RUnlock()
|
||||||
|
|
||||||
for _, detector := range detectors {
|
for _, d := range toUse {
|
||||||
files = append(files, detector.RequiredFilenames()...)
|
if d.DType != database.NamespaceDetectorType {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
files = append(files, detectors[d.Name].RequiredFilenames()...)
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListDetectors returns the names of all registered namespace detectors.
|
// ListDetectors returns the info of all registered namespace detectors.
|
||||||
func ListDetectors() []string {
|
func ListDetectors() []database.Detector {
|
||||||
r := []string{}
|
r := make([]database.Detector, 0, len(detectors))
|
||||||
for name := range detectors {
|
for _, d := range detectors {
|
||||||
r = append(r, name)
|
r = append(r, d.info)
|
||||||
}
|
}
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
@ -16,40 +16,14 @@ import (
|
|||||||
_ "github.com/coreos/clair/ext/featurens/redhatrelease"
|
_ "github.com/coreos/clair/ext/featurens/redhatrelease"
|
||||||
)
|
)
|
||||||
|
|
||||||
type MultipleNamespaceTestData struct {
|
var namespaceDetectorTests = []struct {
|
||||||
Files tarutil.FilesMap
|
in tarutil.FilesMap
|
||||||
ExpectedNamespaces []database.Namespace
|
out []database.LayerNamespace
|
||||||
}
|
err string
|
||||||
|
}{
|
||||||
func assertnsNameEqual(t *testing.T, nslist_expected, nslist []database.Namespace) {
|
{
|
||||||
assert.Equal(t, len(nslist_expected), len(nslist))
|
in: tarutil.FilesMap{
|
||||||
expected := map[string]struct{}{}
|
"etc/os-release": []byte(`
|
||||||
input := map[string]struct{}{}
|
|
||||||
// compare the two sets
|
|
||||||
for i := range nslist_expected {
|
|
||||||
expected[nslist_expected[i].Name] = struct{}{}
|
|
||||||
input[nslist[i].Name] = struct{}{}
|
|
||||||
}
|
|
||||||
assert.Equal(t, expected, input)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testMultipleNamespace(t *testing.T, testData []MultipleNamespaceTestData) {
|
|
||||||
for _, td := range testData {
|
|
||||||
nslist, err := featurens.Detect(td.Files, featurens.ListDetectors())
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assertnsNameEqual(t, td.ExpectedNamespaces, nslist)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMultipleNamespaceDetector(t *testing.T) {
|
|
||||||
testData := []MultipleNamespaceTestData{
|
|
||||||
{
|
|
||||||
ExpectedNamespaces: []database.Namespace{
|
|
||||||
{Name: "debian:8", VersionFormat: "dpkg"},
|
|
||||||
{Name: "alpine:v3.3", VersionFormat: "dpkg"},
|
|
||||||
},
|
|
||||||
Files: tarutil.FilesMap{
|
|
||||||
"etc/os-release": []byte(`
|
|
||||||
PRETTY_NAME="Debian GNU/Linux 8 (jessie)"
|
PRETTY_NAME="Debian GNU/Linux 8 (jessie)"
|
||||||
NAME="Debian GNU/Linux"
|
NAME="Debian GNU/Linux"
|
||||||
VERSION_ID="8"
|
VERSION_ID="8"
|
||||||
@ -58,9 +32,23 @@ ID=debian
|
|||||||
HOME_URL="http://www.debian.org/"
|
HOME_URL="http://www.debian.org/"
|
||||||
SUPPORT_URL="http://www.debian.org/support/"
|
SUPPORT_URL="http://www.debian.org/support/"
|
||||||
BUG_REPORT_URL="https://bugs.debian.org/"`),
|
BUG_REPORT_URL="https://bugs.debian.org/"`),
|
||||||
"etc/alpine-release": []byte(`3.3.4`),
|
"etc/alpine-release": []byte(`3.3.4`),
|
||||||
},
|
|
||||||
},
|
},
|
||||||
}
|
out: []database.LayerNamespace{
|
||||||
testMultipleNamespace(t, testData)
|
{database.Namespace{"debian:8", "dpkg"}, database.NewNamespaceDetector("os-release", "1.0")},
|
||||||
|
{database.Namespace{"alpine:v3.3", "dpkg"}, database.NewNamespaceDetector("alpine-release", "1.0")},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNamespaceDetector(t *testing.T) {
|
||||||
|
for _, test := range namespaceDetectorTests {
|
||||||
|
out, err := featurens.Detect(test.in, featurens.ListDetectors())
|
||||||
|
if test.err != "" {
|
||||||
|
assert.EqualError(t, err, test.err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
database.AssertLayerNamespacesEqual(t, test.out, out)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -38,7 +38,7 @@ var (
|
|||||||
type detector struct{}
|
type detector struct{}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
featurens.RegisterDetector("lsb-release", &detector{})
|
featurens.RegisterDetector("lsb-release", "1.0", &detector{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d detector) Detect(files tarutil.FilesMap) (*database.Namespace, error) {
|
func (d detector) Detect(files tarutil.FilesMap) (*database.Namespace, error) {
|
||||||
|
@ -45,7 +45,7 @@ var (
|
|||||||
type detector struct{}
|
type detector struct{}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
featurens.RegisterDetector("os-release", &detector{})
|
featurens.RegisterDetector("os-release", "1.0", &detector{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d detector) Detect(files tarutil.FilesMap) (*database.Namespace, error) {
|
func (d detector) Detect(files tarutil.FilesMap) (*database.Namespace, error) {
|
||||||
|
@ -38,7 +38,7 @@ var (
|
|||||||
type detector struct{}
|
type detector struct{}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
featurens.RegisterDetector("redhat-release", &detector{})
|
featurens.RegisterDetector("redhat-release", "1.0", &detector{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d detector) Detect(files tarutil.FilesMap) (*database.Namespace, error) {
|
func (d detector) Detect(files tarutil.FilesMap) (*database.Namespace, error) {
|
||||||
|
@ -33,6 +33,7 @@ import (
|
|||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"github.com/coreos/clair/pkg/commonerr"
|
"github.com/coreos/clair/pkg/commonerr"
|
||||||
|
"github.com/coreos/clair/pkg/strutil"
|
||||||
"github.com/coreos/clair/pkg/tarutil"
|
"github.com/coreos/clair/pkg/tarutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -106,7 +107,7 @@ func UnregisterExtractor(name string) {
|
|||||||
func Extract(format, path string, headers map[string]string, toExtract []string) (tarutil.FilesMap, error) {
|
func Extract(format, path string, headers map[string]string, toExtract []string) (tarutil.FilesMap, error) {
|
||||||
var layerReader io.ReadCloser
|
var layerReader io.ReadCloser
|
||||||
if strings.HasPrefix(path, "http://") || strings.HasPrefix(path, "https://") {
|
if strings.HasPrefix(path, "http://") || strings.HasPrefix(path, "https://") {
|
||||||
// Create a new HTTP request object.
|
log.WithField("path", strutil.CleanURL(path)).Debug("start downloading layer blob...")
|
||||||
request, err := http.NewRequest("GET", path, nil)
|
request, err := http.NewRequest("GET", path, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, ErrCouldNotFindLayer
|
return nil, ErrCouldNotFindLayer
|
||||||
@ -127,21 +128,23 @@ func Extract(format, path string, headers map[string]string, toExtract []string)
|
|||||||
client := &http.Client{Transport: tr}
|
client := &http.Client{Transport: tr}
|
||||||
r, err := client.Do(request)
|
r, err := client.Do(request)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithError(err).Warning("could not download layer")
|
log.WithError(err).Error("could not download layer")
|
||||||
return nil, ErrCouldNotFindLayer
|
return nil, ErrCouldNotFindLayer
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fail if we don't receive a 2xx HTTP status code.
|
// Fail if we don't receive a 2xx HTTP status code.
|
||||||
if math.Floor(float64(r.StatusCode/100)) != 2 {
|
if math.Floor(float64(r.StatusCode/100)) != 2 {
|
||||||
log.WithField("status code", r.StatusCode).Warning("could not download layer: expected 2XX")
|
log.WithError(ErrCouldNotFindLayer).WithField("status code", r.StatusCode).Error("could not download layer: expected 2XX")
|
||||||
return nil, ErrCouldNotFindLayer
|
return nil, ErrCouldNotFindLayer
|
||||||
}
|
}
|
||||||
|
|
||||||
layerReader = r.Body
|
layerReader = r.Body
|
||||||
} else {
|
} else {
|
||||||
|
log.WithField("path", strutil.CleanURL(path)).Debug("start reading layer blob from local file system...")
|
||||||
var err error
|
var err error
|
||||||
layerReader, err = os.Open(path)
|
layerReader, err = os.Open(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.WithError(ErrCouldNotFindLayer).Error("could not open layer")
|
||||||
return nil, ErrCouldNotFindLayer
|
return nil, ErrCouldNotFindLayer
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
6
glide.lock
generated
6
glide.lock
generated
@ -1,5 +1,5 @@
|
|||||||
hash: 9bf7ad53b92119a17068b8724d9b406a7ca84b5bfcd0baba44b08c696a538b14
|
hash: 3fd0e471868863d6ef4cd32bbcdc9b3d061911a15b458e7edd26cfba4faa62db
|
||||||
updated: 2018-09-06T15:58:19.234504-04:00
|
updated: 2018-09-17T13:13:44.344244-04:00
|
||||||
imports:
|
imports:
|
||||||
- name: github.com/beorn7/perks
|
- name: github.com/beorn7/perks
|
||||||
version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
||||||
@ -15,6 +15,8 @@ imports:
|
|||||||
version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9
|
version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9
|
||||||
subpackages:
|
subpackages:
|
||||||
- spew
|
- spew
|
||||||
|
- name: github.com/deckarep/golang-set
|
||||||
|
version: cbaa98ba5575e67703b32b4b19f73c91f3c4159e
|
||||||
- name: github.com/fernet/fernet-go
|
- name: github.com/fernet/fernet-go
|
||||||
version: 1b2437bc582b3cfbb341ee5a29f8ef5b42912ff2
|
version: 1b2437bc582b3cfbb341ee5a29f8ef5b42912ff2
|
||||||
- name: github.com/golang/protobuf
|
- name: github.com/golang/protobuf
|
||||||
|
@ -28,3 +28,5 @@ import:
|
|||||||
- assert
|
- assert
|
||||||
- package: gopkg.in/yaml.v2
|
- package: gopkg.in/yaml.v2
|
||||||
- package: github.com/cockroachdb/cmux
|
- package: github.com/cockroachdb/cmux
|
||||||
|
- package: github.com/deckarep/golang-set
|
||||||
|
version: ^1.7.1
|
||||||
|
@ -14,42 +14,46 @@
|
|||||||
|
|
||||||
package strutil
|
package strutil
|
||||||
|
|
||||||
// CompareStringLists returns the strings that are present in X but not in Y.
|
import (
|
||||||
func CompareStringLists(X, Y []string) []string {
|
"regexp"
|
||||||
m := make(map[string]bool)
|
|
||||||
|
|
||||||
for _, y := range Y {
|
set "github.com/deckarep/golang-set"
|
||||||
m[y] = true
|
)
|
||||||
}
|
|
||||||
|
|
||||||
diff := []string{}
|
var urlParametersRegexp = regexp.MustCompile(`(\?|\&)([^=]+)\=([^ &]+)`)
|
||||||
|
|
||||||
|
func convertToSet(X []string) set.Set {
|
||||||
|
s := set.NewSet()
|
||||||
for _, x := range X {
|
for _, x := range X {
|
||||||
if m[x] {
|
s.Add(x)
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
diff = append(diff, x)
|
|
||||||
m[x] = true
|
|
||||||
}
|
}
|
||||||
|
return s
|
||||||
return diff
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CompareStringListsInBoth returns the strings that are present in both X and Y.
|
func setToStringSlice(s set.Set) []string {
|
||||||
func CompareStringListsInBoth(X, Y []string) []string {
|
strs := make([]string, 0, s.Cardinality())
|
||||||
m := make(map[string]struct{})
|
for _, str := range s.ToSlice() {
|
||||||
|
strs = append(strs, str.(string))
|
||||||
for _, y := range Y {
|
|
||||||
m[y] = struct{}{}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
diff := []string{}
|
return strs
|
||||||
for _, x := range X {
|
}
|
||||||
if _, e := m[x]; e {
|
|
||||||
diff = append(diff, x)
|
// Difference returns the strings that are present in X but not in Y.
|
||||||
delete(m, x)
|
func Difference(X, Y []string) []string {
|
||||||
}
|
x := convertToSet(X)
|
||||||
}
|
y := convertToSet(Y)
|
||||||
|
return setToStringSlice(x.Difference(y))
|
||||||
return diff
|
}
|
||||||
|
|
||||||
|
// Intersect returns the strings that are present in both X and Y.
|
||||||
|
func Intersect(X, Y []string) []string {
|
||||||
|
x := convertToSet(X)
|
||||||
|
y := convertToSet(Y)
|
||||||
|
return setToStringSlice(x.Intersect(y))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CleanURL removes all parameters from an URL.
|
||||||
|
func CleanURL(str string) string {
|
||||||
|
return urlParametersRegexp.ReplaceAllString(str, "")
|
||||||
}
|
}
|
||||||
|
@ -21,12 +21,12 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestStringComparison(t *testing.T) {
|
func TestStringComparison(t *testing.T) {
|
||||||
cmp := CompareStringLists([]string{"a", "b", "b", "a"}, []string{"a", "c"})
|
cmp := Difference([]string{"a", "b", "b", "a"}, []string{"a", "c"})
|
||||||
assert.Len(t, cmp, 1)
|
assert.Len(t, cmp, 1)
|
||||||
assert.NotContains(t, cmp, "a")
|
assert.NotContains(t, cmp, "a")
|
||||||
assert.Contains(t, cmp, "b")
|
assert.Contains(t, cmp, "b")
|
||||||
|
|
||||||
cmp = CompareStringListsInBoth([]string{"a", "a", "b", "c"}, []string{"a", "c", "c"})
|
cmp = Intersect([]string{"a", "a", "b", "c"}, []string{"a", "c", "c"})
|
||||||
assert.Len(t, cmp, 2)
|
assert.Len(t, cmp, 2)
|
||||||
assert.NotContains(t, cmp, "b")
|
assert.NotContains(t, cmp, "b")
|
||||||
assert.Contains(t, cmp, "a")
|
assert.Contains(t, cmp, "a")
|
||||||
|
@ -208,7 +208,7 @@ func update(datastore database.Datastore, firstUpdate bool) {
|
|||||||
namespaces = append(namespaces, ns)
|
namespaces = append(namespaces, ns)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := persistNamespaces(datastore, namespaces); err != nil {
|
if err := database.PersistNamespacesAndCommit(datastore, namespaces); err != nil {
|
||||||
log.WithError(err).Error("Unable to insert namespaces")
|
log.WithError(err).Error("Unable to insert namespaces")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
22
vendor/github.com/deckarep/golang-set/.gitignore
generated
vendored
Normal file
22
vendor/github.com/deckarep/golang-set/.gitignore
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||||
|
*.o
|
||||||
|
*.a
|
||||||
|
*.so
|
||||||
|
|
||||||
|
# Folders
|
||||||
|
_obj
|
||||||
|
_test
|
||||||
|
|
||||||
|
# Architecture specific extensions/prefixes
|
||||||
|
*.[568vq]
|
||||||
|
[568vq].out
|
||||||
|
|
||||||
|
*.cgo1.go
|
||||||
|
*.cgo2.c
|
||||||
|
_cgo_defun.c
|
||||||
|
_cgo_gotypes.go
|
||||||
|
_cgo_export.*
|
||||||
|
|
||||||
|
_testmain.go
|
||||||
|
|
||||||
|
*.exe
|
11
vendor/github.com/deckarep/golang-set/.travis.yml
generated
vendored
Normal file
11
vendor/github.com/deckarep/golang-set/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
language: go
|
||||||
|
|
||||||
|
go:
|
||||||
|
- 1.8
|
||||||
|
- 1.9
|
||||||
|
- tip
|
||||||
|
|
||||||
|
script:
|
||||||
|
- go test -race ./...
|
||||||
|
- go test -bench=.
|
||||||
|
|
22
vendor/github.com/deckarep/golang-set/LICENSE
generated
vendored
Normal file
22
vendor/github.com/deckarep/golang-set/LICENSE
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
Open Source Initiative OSI - The MIT License (MIT):Licensing
|
||||||
|
|
||||||
|
The MIT License (MIT)
|
||||||
|
Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
this software and associated documentation files (the "Software"), to deal in
|
||||||
|
the Software without restriction, including without limitation the rights to
|
||||||
|
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||||
|
of the Software, and to permit persons to whom the Software is furnished to do
|
||||||
|
so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
95
vendor/github.com/deckarep/golang-set/README.md
generated
vendored
Normal file
95
vendor/github.com/deckarep/golang-set/README.md
generated
vendored
Normal file
@ -0,0 +1,95 @@
|
|||||||
|
[](https://travis-ci.org/deckarep/golang-set)
|
||||||
|
[](https://goreportcard.com/report/github.com/deckarep/golang-set)
|
||||||
|
[](http://godoc.org/github.com/deckarep/golang-set)
|
||||||
|
|
||||||
|
## golang-set
|
||||||
|
|
||||||
|
|
||||||
|
The missing set collection for the Go language. Until Go has sets built-in...use this.
|
||||||
|
|
||||||
|
Coming from Python one of the things I miss is the superbly wonderful set collection. This is my attempt to mimic the primary features of the set from Python.
|
||||||
|
You can of course argue that there is no need for a set in Go, otherwise the creators would have added one to the standard library. To those I say simply ignore this repository
|
||||||
|
and carry-on and to the rest that find this useful please contribute in helping me make it better by:
|
||||||
|
|
||||||
|
* Helping to make more idiomatic improvements to the code.
|
||||||
|
* Helping to increase the performance of it. ~~(So far, no attempt has been made, but since it uses a map internally, I expect it to be mostly performant.)~~
|
||||||
|
* Helping to make the unit-tests more robust and kick-ass.
|
||||||
|
* Helping to fill in the [documentation.](http://godoc.org/github.com/deckarep/golang-set)
|
||||||
|
* Simply offering feedback and suggestions. (Positive, constructive feedback is appreciated.)
|
||||||
|
|
||||||
|
I have to give some credit for helping seed the idea with this post on [stackoverflow.](http://programmers.stackexchange.com/questions/177428/sets-data-structure-in-golang)
|
||||||
|
|
||||||
|
*Update* - as of 3/9/2014, you can use a compile-time generic version of this package in the [gen](http://clipperhouse.github.io/gen/) framework. This framework allows you to use the golang-set in a completely generic and type-safe way by allowing you to generate a supporting .go file based on your custom types.
|
||||||
|
|
||||||
|
## Features (as of 9/22/2014)
|
||||||
|
|
||||||
|
* a CartesianProduct() method has been added with unit-tests: [Read more about the cartesian product](http://en.wikipedia.org/wiki/Cartesian_product)
|
||||||
|
|
||||||
|
## Features (as of 9/15/2014)
|
||||||
|
|
||||||
|
* a PowerSet() method has been added with unit-tests: [Read more about the Power set](http://en.wikipedia.org/wiki/Power_set)
|
||||||
|
|
||||||
|
## Features (as of 4/22/2014)
|
||||||
|
|
||||||
|
* One common interface to both implementations
|
||||||
|
* Two set implementations to choose from
|
||||||
|
* a thread-safe implementation designed for concurrent use
|
||||||
|
* a non-thread-safe implementation designed for performance
|
||||||
|
* 75 benchmarks for both implementations
|
||||||
|
* 35 unit tests for both implementations
|
||||||
|
* 14 concurrent tests for the thread-safe implementation
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Please see the unit test file for additional usage examples. The Python set documentation will also do a better job than I can of explaining how a set typically [works.](http://docs.python.org/2/library/sets.html) Please keep in mind
|
||||||
|
however that the Python set is a built-in type and supports additional features and syntax that make it awesome.
|
||||||
|
|
||||||
|
## Examples but not exhaustive:
|
||||||
|
|
||||||
|
```go
|
||||||
|
requiredClasses := mapset.NewSet()
|
||||||
|
requiredClasses.Add("Cooking")
|
||||||
|
requiredClasses.Add("English")
|
||||||
|
requiredClasses.Add("Math")
|
||||||
|
requiredClasses.Add("Biology")
|
||||||
|
|
||||||
|
scienceSlice := []interface{}{"Biology", "Chemistry"}
|
||||||
|
scienceClasses := mapset.NewSetFromSlice(scienceSlice)
|
||||||
|
|
||||||
|
electiveClasses := mapset.NewSet()
|
||||||
|
electiveClasses.Add("Welding")
|
||||||
|
electiveClasses.Add("Music")
|
||||||
|
electiveClasses.Add("Automotive")
|
||||||
|
|
||||||
|
bonusClasses := mapset.NewSet()
|
||||||
|
bonusClasses.Add("Go Programming")
|
||||||
|
bonusClasses.Add("Python Programming")
|
||||||
|
|
||||||
|
//Show me all the available classes I can take
|
||||||
|
allClasses := requiredClasses.Union(scienceClasses).Union(electiveClasses).Union(bonusClasses)
|
||||||
|
fmt.Println(allClasses) //Set{Cooking, English, Math, Chemistry, Welding, Biology, Music, Automotive, Go Programming, Python Programming}
|
||||||
|
|
||||||
|
|
||||||
|
//Is cooking considered a science class?
|
||||||
|
fmt.Println(scienceClasses.Contains("Cooking")) //false
|
||||||
|
|
||||||
|
//Show me all classes that are not science classes, since I hate science.
|
||||||
|
fmt.Println(allClasses.Difference(scienceClasses)) //Set{Music, Automotive, Go Programming, Python Programming, Cooking, English, Math, Welding}
|
||||||
|
|
||||||
|
//Which science classes are also required classes?
|
||||||
|
fmt.Println(scienceClasses.Intersect(requiredClasses)) //Set{Biology}
|
||||||
|
|
||||||
|
//How many bonus classes do you offer?
|
||||||
|
fmt.Println(bonusClasses.Cardinality()) //2
|
||||||
|
|
||||||
|
//Do you have the following classes? Welding, Automotive and English?
|
||||||
|
fmt.Println(allClasses.IsSuperset(mapset.NewSetFromSlice([]interface{}{"Welding", "Automotive", "English"}))) //true
|
||||||
|
```
|
||||||
|
|
||||||
|
Thanks!
|
||||||
|
|
||||||
|
-Ralph
|
||||||
|
|
||||||
|
[](https://bitdeli.com/free "Bitdeli Badge")
|
||||||
|
|
||||||
|
[](https://github.com/igrigorik/ga-beacon)
|
674
vendor/github.com/deckarep/golang-set/bench_test.go
generated
vendored
Normal file
674
vendor/github.com/deckarep/golang-set/bench_test.go
generated
vendored
Normal file
@ -0,0 +1,674 @@
|
|||||||
|
package mapset
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func nrand(n int) []int {
|
||||||
|
i := make([]int, n)
|
||||||
|
for ind := range i {
|
||||||
|
i[ind] = rand.Int()
|
||||||
|
}
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
|
||||||
|
func toInterfaces(i []int) []interface{} {
|
||||||
|
ifs := make([]interface{}, len(i))
|
||||||
|
for ind, v := range i {
|
||||||
|
ifs[ind] = v
|
||||||
|
}
|
||||||
|
return ifs
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchAdd(b *testing.B, s Set) {
|
||||||
|
nums := nrand(b.N)
|
||||||
|
b.ResetTimer()
|
||||||
|
for _, v := range nums {
|
||||||
|
s.Add(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkAddSafe(b *testing.B) {
|
||||||
|
benchAdd(b, NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkAddUnsafe(b *testing.B) {
|
||||||
|
benchAdd(b, NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchRemove(b *testing.B, s Set) {
|
||||||
|
nums := nrand(b.N)
|
||||||
|
for _, v := range nums {
|
||||||
|
s.Add(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
for _, v := range nums {
|
||||||
|
s.Remove(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkRemoveSafe(b *testing.B) {
|
||||||
|
benchRemove(b, NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkRemoveUnsafe(b *testing.B) {
|
||||||
|
benchRemove(b, NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchCardinality(b *testing.B, s Set) {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
s.Cardinality()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkCardinalitySafe(b *testing.B) {
|
||||||
|
benchCardinality(b, NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkCardinalityUnsafe(b *testing.B) {
|
||||||
|
benchCardinality(b, NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchClear(b *testing.B, s Set) {
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
s.Clear()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkClearSafe(b *testing.B) {
|
||||||
|
benchClear(b, NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkClearUnsafe(b *testing.B) {
|
||||||
|
benchClear(b, NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchClone(b *testing.B, n int, s Set) {
|
||||||
|
nums := toInterfaces(nrand(n))
|
||||||
|
for _, v := range nums {
|
||||||
|
s.Add(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
s.Clone()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkClone1Safe(b *testing.B) {
|
||||||
|
benchClone(b, 1, NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkClone1Unsafe(b *testing.B) {
|
||||||
|
benchClone(b, 1, NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkClone10Safe(b *testing.B) {
|
||||||
|
benchClone(b, 10, NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkClone10Unsafe(b *testing.B) {
|
||||||
|
benchClone(b, 10, NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkClone100Safe(b *testing.B) {
|
||||||
|
benchClone(b, 100, NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkClone100Unsafe(b *testing.B) {
|
||||||
|
benchClone(b, 100, NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchContains(b *testing.B, n int, s Set) {
|
||||||
|
nums := toInterfaces(nrand(n))
|
||||||
|
for _, v := range nums {
|
||||||
|
s.Add(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
nums[n-1] = -1 // Definitely not in s
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
s.Contains(nums...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkContains1Safe(b *testing.B) {
|
||||||
|
benchContains(b, 1, NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkContains1Unsafe(b *testing.B) {
|
||||||
|
benchContains(b, 1, NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkContains10Safe(b *testing.B) {
|
||||||
|
benchContains(b, 10, NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkContains10Unsafe(b *testing.B) {
|
||||||
|
benchContains(b, 10, NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkContains100Safe(b *testing.B) {
|
||||||
|
benchContains(b, 100, NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkContains100Unsafe(b *testing.B) {
|
||||||
|
benchContains(b, 100, NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchEqual(b *testing.B, n int, s, t Set) {
|
||||||
|
nums := nrand(n)
|
||||||
|
for _, v := range nums {
|
||||||
|
s.Add(v)
|
||||||
|
t.Add(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
s.Equal(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkEqual1Safe(b *testing.B) {
|
||||||
|
benchEqual(b, 1, NewSet(), NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkEqual1Unsafe(b *testing.B) {
|
||||||
|
benchEqual(b, 1, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkEqual10Safe(b *testing.B) {
|
||||||
|
benchEqual(b, 10, NewSet(), NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkEqual10Unsafe(b *testing.B) {
|
||||||
|
benchEqual(b, 10, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkEqual100Safe(b *testing.B) {
|
||||||
|
benchEqual(b, 100, NewSet(), NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkEqual100Unsafe(b *testing.B) {
|
||||||
|
benchEqual(b, 100, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchDifference(b *testing.B, n int, s, t Set) {
|
||||||
|
nums := nrand(n)
|
||||||
|
for _, v := range nums {
|
||||||
|
s.Add(v)
|
||||||
|
}
|
||||||
|
for _, v := range nums[:n/2] {
|
||||||
|
t.Add(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
s.Difference(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchIsSubset(b *testing.B, n int, s, t Set) {
|
||||||
|
nums := nrand(n)
|
||||||
|
for _, v := range nums {
|
||||||
|
s.Add(v)
|
||||||
|
t.Add(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
s.IsSubset(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIsSubset1Safe(b *testing.B) {
|
||||||
|
benchIsSubset(b, 1, NewSet(), NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIsSubset1Unsafe(b *testing.B) {
|
||||||
|
benchIsSubset(b, 1, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIsSubset10Safe(b *testing.B) {
|
||||||
|
benchIsSubset(b, 10, NewSet(), NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIsSubset10Unsafe(b *testing.B) {
|
||||||
|
benchIsSubset(b, 10, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIsSubset100Safe(b *testing.B) {
|
||||||
|
benchIsSubset(b, 100, NewSet(), NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIsSubset100Unsafe(b *testing.B) {
|
||||||
|
benchIsSubset(b, 100, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchIsSuperset(b *testing.B, n int, s, t Set) {
|
||||||
|
nums := nrand(n)
|
||||||
|
for _, v := range nums {
|
||||||
|
s.Add(v)
|
||||||
|
t.Add(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
s.IsSuperset(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIsSuperset1Safe(b *testing.B) {
|
||||||
|
benchIsSuperset(b, 1, NewSet(), NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIsSuperset1Unsafe(b *testing.B) {
|
||||||
|
benchIsSuperset(b, 1, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIsSuperset10Safe(b *testing.B) {
|
||||||
|
benchIsSuperset(b, 10, NewSet(), NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIsSuperset10Unsafe(b *testing.B) {
|
||||||
|
benchIsSuperset(b, 10, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIsSuperset100Safe(b *testing.B) {
|
||||||
|
benchIsSuperset(b, 100, NewSet(), NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIsSuperset100Unsafe(b *testing.B) {
|
||||||
|
benchIsSuperset(b, 100, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchIsProperSubset(b *testing.B, n int, s, t Set) {
|
||||||
|
nums := nrand(n)
|
||||||
|
for _, v := range nums {
|
||||||
|
s.Add(v)
|
||||||
|
t.Add(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
s.IsProperSubset(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIsProperSubset1Safe(b *testing.B) {
|
||||||
|
benchIsProperSubset(b, 1, NewSet(), NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIsProperSubset1Unsafe(b *testing.B) {
|
||||||
|
benchIsProperSubset(b, 1, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIsProperSubset10Safe(b *testing.B) {
|
||||||
|
benchIsProperSubset(b, 10, NewSet(), NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIsProperSubset10Unsafe(b *testing.B) {
|
||||||
|
benchIsProperSubset(b, 10, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIsProperSubset100Safe(b *testing.B) {
|
||||||
|
benchIsProperSubset(b, 100, NewSet(), NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIsProperSubset100Unsafe(b *testing.B) {
|
||||||
|
benchIsProperSubset(b, 100, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchIsProperSuperset(b *testing.B, n int, s, t Set) {
|
||||||
|
nums := nrand(n)
|
||||||
|
for _, v := range nums {
|
||||||
|
s.Add(v)
|
||||||
|
t.Add(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
s.IsProperSuperset(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIsProperSuperset1Safe(b *testing.B) {
|
||||||
|
benchIsProperSuperset(b, 1, NewSet(), NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIsProperSuperset1Unsafe(b *testing.B) {
|
||||||
|
benchIsProperSuperset(b, 1, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIsProperSuperset10Safe(b *testing.B) {
|
||||||
|
benchIsProperSuperset(b, 10, NewSet(), NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIsProperSuperset10Unsafe(b *testing.B) {
|
||||||
|
benchIsProperSuperset(b, 10, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIsProperSuperset100Safe(b *testing.B) {
|
||||||
|
benchIsProperSuperset(b, 100, NewSet(), NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIsProperSuperset100Unsafe(b *testing.B) {
|
||||||
|
benchIsProperSuperset(b, 100, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkDifference1Safe(b *testing.B) {
|
||||||
|
benchDifference(b, 1, NewSet(), NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkDifference1Unsafe(b *testing.B) {
|
||||||
|
benchDifference(b, 1, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkDifference10Safe(b *testing.B) {
|
||||||
|
benchDifference(b, 10, NewSet(), NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkDifference10Unsafe(b *testing.B) {
|
||||||
|
benchDifference(b, 10, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkDifference100Safe(b *testing.B) {
|
||||||
|
benchDifference(b, 100, NewSet(), NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkDifference100Unsafe(b *testing.B) {
|
||||||
|
benchDifference(b, 100, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchIntersect(b *testing.B, n int, s, t Set) {
|
||||||
|
nums := nrand(int(float64(n) * float64(1.5)))
|
||||||
|
for _, v := range nums[:n] {
|
||||||
|
s.Add(v)
|
||||||
|
}
|
||||||
|
for _, v := range nums[n/2:] {
|
||||||
|
t.Add(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
s.Intersect(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIntersect1Safe(b *testing.B) {
|
||||||
|
benchIntersect(b, 1, NewSet(), NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIntersect1Unsafe(b *testing.B) {
|
||||||
|
benchIntersect(b, 1, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIntersect10Safe(b *testing.B) {
|
||||||
|
benchIntersect(b, 10, NewSet(), NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIntersect10Unsafe(b *testing.B) {
|
||||||
|
benchIntersect(b, 10, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIntersect100Safe(b *testing.B) {
|
||||||
|
benchIntersect(b, 100, NewSet(), NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIntersect100Unsafe(b *testing.B) {
|
||||||
|
benchIntersect(b, 100, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchSymmetricDifference(b *testing.B, n int, s, t Set) {
|
||||||
|
nums := nrand(int(float64(n) * float64(1.5)))
|
||||||
|
for _, v := range nums[:n] {
|
||||||
|
s.Add(v)
|
||||||
|
}
|
||||||
|
for _, v := range nums[n/2:] {
|
||||||
|
t.Add(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
s.SymmetricDifference(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkSymmetricDifference1Safe(b *testing.B) {
|
||||||
|
benchSymmetricDifference(b, 1, NewSet(), NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkSymmetricDifference1Unsafe(b *testing.B) {
|
||||||
|
benchSymmetricDifference(b, 1, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkSymmetricDifference10Safe(b *testing.B) {
|
||||||
|
benchSymmetricDifference(b, 10, NewSet(), NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkSymmetricDifference10Unsafe(b *testing.B) {
|
||||||
|
benchSymmetricDifference(b, 10, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkSymmetricDifference100Safe(b *testing.B) {
|
||||||
|
benchSymmetricDifference(b, 100, NewSet(), NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkSymmetricDifference100Unsafe(b *testing.B) {
|
||||||
|
benchSymmetricDifference(b, 100, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchUnion(b *testing.B, n int, s, t Set) {
|
||||||
|
nums := nrand(n)
|
||||||
|
for _, v := range nums[:n/2] {
|
||||||
|
s.Add(v)
|
||||||
|
}
|
||||||
|
for _, v := range nums[n/2:] {
|
||||||
|
t.Add(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
s.Union(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkUnion1Safe(b *testing.B) {
|
||||||
|
benchUnion(b, 1, NewSet(), NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkUnion1Unsafe(b *testing.B) {
|
||||||
|
benchUnion(b, 1, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkUnion10Safe(b *testing.B) {
|
||||||
|
benchUnion(b, 10, NewSet(), NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkUnion10Unsafe(b *testing.B) {
|
||||||
|
benchUnion(b, 10, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkUnion100Safe(b *testing.B) {
|
||||||
|
benchUnion(b, 100, NewSet(), NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkUnion100Unsafe(b *testing.B) {
|
||||||
|
benchUnion(b, 100, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchEach(b *testing.B, n int, s Set) {
|
||||||
|
nums := nrand(n)
|
||||||
|
for _, v := range nums {
|
||||||
|
s.Add(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
s.Each(func(elem interface{}) bool {
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkEach1Safe(b *testing.B) {
|
||||||
|
benchEach(b, 1, NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkEach1Unsafe(b *testing.B) {
|
||||||
|
benchEach(b, 1, NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkEach10Safe(b *testing.B) {
|
||||||
|
benchEach(b, 10, NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkEach10Unsafe(b *testing.B) {
|
||||||
|
benchEach(b, 10, NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkEach100Safe(b *testing.B) {
|
||||||
|
benchEach(b, 100, NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkEach100Unsafe(b *testing.B) {
|
||||||
|
benchEach(b, 100, NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchIter(b *testing.B, n int, s Set) {
|
||||||
|
nums := nrand(n)
|
||||||
|
for _, v := range nums {
|
||||||
|
s.Add(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
c := s.Iter()
|
||||||
|
for range c {
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIter1Safe(b *testing.B) {
|
||||||
|
benchIter(b, 1, NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIter1Unsafe(b *testing.B) {
|
||||||
|
benchIter(b, 1, NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIter10Safe(b *testing.B) {
|
||||||
|
benchIter(b, 10, NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIter10Unsafe(b *testing.B) {
|
||||||
|
benchIter(b, 10, NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIter100Safe(b *testing.B) {
|
||||||
|
benchIter(b, 100, NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIter100Unsafe(b *testing.B) {
|
||||||
|
benchIter(b, 100, NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchIterator(b *testing.B, n int, s Set) {
|
||||||
|
nums := nrand(n)
|
||||||
|
for _, v := range nums {
|
||||||
|
s.Add(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
c := s.Iterator().C
|
||||||
|
for range c {
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIterator1Safe(b *testing.B) {
|
||||||
|
benchIterator(b, 1, NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIterator1Unsafe(b *testing.B) {
|
||||||
|
benchIterator(b, 1, NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIterator10Safe(b *testing.B) {
|
||||||
|
benchIterator(b, 10, NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIterator10Unsafe(b *testing.B) {
|
||||||
|
benchIterator(b, 10, NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIterator100Safe(b *testing.B) {
|
||||||
|
benchIterator(b, 100, NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkIterator100Unsafe(b *testing.B) {
|
||||||
|
benchIterator(b, 100, NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchString(b *testing.B, n int, s Set) {
|
||||||
|
nums := nrand(n)
|
||||||
|
for _, v := range nums {
|
||||||
|
s.Add(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
_ = s.String()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkString1Safe(b *testing.B) {
|
||||||
|
benchString(b, 1, NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkString1Unsafe(b *testing.B) {
|
||||||
|
benchString(b, 1, NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkString10Safe(b *testing.B) {
|
||||||
|
benchString(b, 10, NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkString10Unsafe(b *testing.B) {
|
||||||
|
benchString(b, 10, NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkString100Safe(b *testing.B) {
|
||||||
|
benchString(b, 100, NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkString100Unsafe(b *testing.B) {
|
||||||
|
benchString(b, 100, NewThreadUnsafeSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchToSlice(b *testing.B, s Set) {
|
||||||
|
nums := nrand(b.N)
|
||||||
|
for _, v := range nums {
|
||||||
|
s.Add(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
s.ToSlice()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkToSliceSafe(b *testing.B) {
|
||||||
|
benchToSlice(b, NewSet())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkToSliceUnsafe(b *testing.B) {
|
||||||
|
benchToSlice(b, NewThreadUnsafeSet())
|
||||||
|
}
|
58
vendor/github.com/deckarep/golang-set/iterator.go
generated
vendored
Normal file
58
vendor/github.com/deckarep/golang-set/iterator.go
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
/*
|
||||||
|
Open Source Initiative OSI - The MIT License (MIT):Licensing
|
||||||
|
|
||||||
|
The MIT License (MIT)
|
||||||
|
Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
this software and associated documentation files (the "Software"), to deal in
|
||||||
|
the Software without restriction, including without limitation the rights to
|
||||||
|
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||||
|
of the Software, and to permit persons to whom the Software is furnished to do
|
||||||
|
so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package mapset
|
||||||
|
|
||||||
|
// Iterator defines an iterator over a Set, its C channel can be used to range over the Set's
|
||||||
|
// elements.
|
||||||
|
type Iterator struct {
|
||||||
|
C <-chan interface{}
|
||||||
|
stop chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop stops the Iterator, no further elements will be received on C, C will be closed.
|
||||||
|
func (i *Iterator) Stop() {
|
||||||
|
// Allows for Stop() to be called multiple times
|
||||||
|
// (close() panics when called on already closed channel)
|
||||||
|
defer func() {
|
||||||
|
recover()
|
||||||
|
}()
|
||||||
|
|
||||||
|
close(i.stop)
|
||||||
|
|
||||||
|
// Exhaust any remaining elements.
|
||||||
|
for range i.C {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// newIterator returns a new Iterator instance together with its item and stop channels.
|
||||||
|
func newIterator() (*Iterator, chan<- interface{}, <-chan struct{}) {
|
||||||
|
itemChan := make(chan interface{})
|
||||||
|
stopChan := make(chan struct{})
|
||||||
|
return &Iterator{
|
||||||
|
C: itemChan,
|
||||||
|
stop: stopChan,
|
||||||
|
}, itemChan, stopChan
|
||||||
|
}
|
32
vendor/github.com/deckarep/golang-set/iterator_example_test.go
generated
vendored
Normal file
32
vendor/github.com/deckarep/golang-set/iterator_example_test.go
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
package mapset
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
type YourType struct {
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleIterator() {
|
||||||
|
set := NewSetFromSlice([]interface{}{
|
||||||
|
&YourType{Name: "Alise"},
|
||||||
|
&YourType{Name: "Bob"},
|
||||||
|
&YourType{Name: "John"},
|
||||||
|
&YourType{Name: "Nick"},
|
||||||
|
})
|
||||||
|
|
||||||
|
var found *YourType
|
||||||
|
it := set.Iterator()
|
||||||
|
|
||||||
|
for elem := range it.C {
|
||||||
|
if elem.(*YourType).Name == "John" {
|
||||||
|
found = elem.(*YourType)
|
||||||
|
it.Stop()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Found %+v\n", found)
|
||||||
|
|
||||||
|
// Output: Found &{Name:John}
|
||||||
|
}
|
217
vendor/github.com/deckarep/golang-set/set.go
generated
vendored
Normal file
217
vendor/github.com/deckarep/golang-set/set.go
generated
vendored
Normal file
@ -0,0 +1,217 @@
|
|||||||
|
/*
|
||||||
|
Open Source Initiative OSI - The MIT License (MIT):Licensing
|
||||||
|
|
||||||
|
The MIT License (MIT)
|
||||||
|
Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
this software and associated documentation files (the "Software"), to deal in
|
||||||
|
the Software without restriction, including without limitation the rights to
|
||||||
|
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||||
|
of the Software, and to permit persons to whom the Software is furnished to do
|
||||||
|
so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package mapset implements a simple and generic set collection.
|
||||||
|
// Items stored within it are unordered and unique. It supports
|
||||||
|
// typical set operations: membership testing, intersection, union,
|
||||||
|
// difference, symmetric difference and cloning.
|
||||||
|
//
|
||||||
|
// Package mapset provides two implementations of the Set
|
||||||
|
// interface. The default implementation is safe for concurrent
|
||||||
|
// access, but a non-thread-safe implementation is also provided for
|
||||||
|
// programs that can benefit from the slight speed improvement and
|
||||||
|
// that can enforce mutual exclusion through other means.
|
||||||
|
package mapset
|
||||||
|
|
||||||
|
// Set is the primary interface provided by the mapset package. It
|
||||||
|
// represents an unordered set of data and a large number of
|
||||||
|
// operations that can be applied to that set.
|
||||||
|
type Set interface {
|
||||||
|
// Adds an element to the set. Returns whether
|
||||||
|
// the item was added.
|
||||||
|
Add(i interface{}) bool
|
||||||
|
|
||||||
|
// Returns the number of elements in the set.
|
||||||
|
Cardinality() int
|
||||||
|
|
||||||
|
// Removes all elements from the set, leaving
|
||||||
|
// the empty set.
|
||||||
|
Clear()
|
||||||
|
|
||||||
|
// Returns a clone of the set using the same
|
||||||
|
// implementation, duplicating all keys.
|
||||||
|
Clone() Set
|
||||||
|
|
||||||
|
// Returns whether the given items
|
||||||
|
// are all in the set.
|
||||||
|
Contains(i ...interface{}) bool
|
||||||
|
|
||||||
|
// Returns the difference between this set
|
||||||
|
// and other. The returned set will contain
|
||||||
|
// all elements of this set that are not also
|
||||||
|
// elements of other.
|
||||||
|
//
|
||||||
|
// Note that the argument to Difference
|
||||||
|
// must be of the same type as the receiver
|
||||||
|
// of the method. Otherwise, Difference will
|
||||||
|
// panic.
|
||||||
|
Difference(other Set) Set
|
||||||
|
|
||||||
|
// Determines if two sets are equal to each
|
||||||
|
// other. If they have the same cardinality
|
||||||
|
// and contain the same elements, they are
|
||||||
|
// considered equal. The order in which
|
||||||
|
// the elements were added is irrelevant.
|
||||||
|
//
|
||||||
|
// Note that the argument to Equal must be
|
||||||
|
// of the same type as the receiver of the
|
||||||
|
// method. Otherwise, Equal will panic.
|
||||||
|
Equal(other Set) bool
|
||||||
|
|
||||||
|
// Returns a new set containing only the elements
|
||||||
|
// that exist only in both sets.
|
||||||
|
//
|
||||||
|
// Note that the argument to Intersect
|
||||||
|
// must be of the same type as the receiver
|
||||||
|
// of the method. Otherwise, Intersect will
|
||||||
|
// panic.
|
||||||
|
Intersect(other Set) Set
|
||||||
|
|
||||||
|
// Determines if every element in this set is in
|
||||||
|
// the other set but the two sets are not equal.
|
||||||
|
//
|
||||||
|
// Note that the argument to IsProperSubset
|
||||||
|
// must be of the same type as the receiver
|
||||||
|
// of the method. Otherwise, IsProperSubset
|
||||||
|
// will panic.
|
||||||
|
IsProperSubset(other Set) bool
|
||||||
|
|
||||||
|
// Determines if every element in the other set
|
||||||
|
// is in this set but the two sets are not
|
||||||
|
// equal.
|
||||||
|
//
|
||||||
|
// Note that the argument to IsSuperset
|
||||||
|
// must be of the same type as the receiver
|
||||||
|
// of the method. Otherwise, IsSuperset will
|
||||||
|
// panic.
|
||||||
|
IsProperSuperset(other Set) bool
|
||||||
|
|
||||||
|
// Determines if every element in this set is in
|
||||||
|
// the other set.
|
||||||
|
//
|
||||||
|
// Note that the argument to IsSubset
|
||||||
|
// must be of the same type as the receiver
|
||||||
|
// of the method. Otherwise, IsSubset will
|
||||||
|
// panic.
|
||||||
|
IsSubset(other Set) bool
|
||||||
|
|
||||||
|
// Determines if every element in the other set
|
||||||
|
// is in this set.
|
||||||
|
//
|
||||||
|
// Note that the argument to IsSuperset
|
||||||
|
// must be of the same type as the receiver
|
||||||
|
// of the method. Otherwise, IsSuperset will
|
||||||
|
// panic.
|
||||||
|
IsSuperset(other Set) bool
|
||||||
|
|
||||||
|
// Iterates over elements and executes the passed func against each element.
|
||||||
|
// If passed func returns true, stop iteration at the time.
|
||||||
|
Each(func(interface{}) bool)
|
||||||
|
|
||||||
|
// Returns a channel of elements that you can
|
||||||
|
// range over.
|
||||||
|
Iter() <-chan interface{}
|
||||||
|
|
||||||
|
// Returns an Iterator object that you can
|
||||||
|
// use to range over the set.
|
||||||
|
Iterator() *Iterator
|
||||||
|
|
||||||
|
// Remove a single element from the set.
|
||||||
|
Remove(i interface{})
|
||||||
|
|
||||||
|
// Provides a convenient string representation
|
||||||
|
// of the current state of the set.
|
||||||
|
String() string
|
||||||
|
|
||||||
|
// Returns a new set with all elements which are
|
||||||
|
// in either this set or the other set but not in both.
|
||||||
|
//
|
||||||
|
// Note that the argument to SymmetricDifference
|
||||||
|
// must be of the same type as the receiver
|
||||||
|
// of the method. Otherwise, SymmetricDifference
|
||||||
|
// will panic.
|
||||||
|
SymmetricDifference(other Set) Set
|
||||||
|
|
||||||
|
// Returns a new set with all elements in both sets.
|
||||||
|
//
|
||||||
|
// Note that the argument to Union must be of the
|
||||||
|
|
||||||
|
// same type as the receiver of the method.
|
||||||
|
// Otherwise, IsSuperset will panic.
|
||||||
|
Union(other Set) Set
|
||||||
|
|
||||||
|
// Pop removes and returns an arbitrary item from the set.
|
||||||
|
Pop() interface{}
|
||||||
|
|
||||||
|
// Returns all subsets of a given set (Power Set).
|
||||||
|
PowerSet() Set
|
||||||
|
|
||||||
|
// Returns the Cartesian Product of two sets.
|
||||||
|
CartesianProduct(other Set) Set
|
||||||
|
|
||||||
|
// Returns the members of the set as a slice.
|
||||||
|
ToSlice() []interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSet creates and returns a reference to an empty set. Operations
|
||||||
|
// on the resulting set are thread-safe.
|
||||||
|
func NewSet(s ...interface{}) Set {
|
||||||
|
set := newThreadSafeSet()
|
||||||
|
for _, item := range s {
|
||||||
|
set.Add(item)
|
||||||
|
}
|
||||||
|
return &set
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSetWith creates and returns a new set with the given elements.
|
||||||
|
// Operations on the resulting set are thread-safe.
|
||||||
|
func NewSetWith(elts ...interface{}) Set {
|
||||||
|
return NewSetFromSlice(elts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSetFromSlice creates and returns a reference to a set from an
|
||||||
|
// existing slice. Operations on the resulting set are thread-safe.
|
||||||
|
func NewSetFromSlice(s []interface{}) Set {
|
||||||
|
a := NewSet(s...)
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewThreadUnsafeSet creates and returns a reference to an empty set.
|
||||||
|
// Operations on the resulting set are not thread-safe.
|
||||||
|
func NewThreadUnsafeSet() Set {
|
||||||
|
set := newThreadUnsafeSet()
|
||||||
|
return &set
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewThreadUnsafeSetFromSlice creates and returns a reference to a
|
||||||
|
// set from an existing slice. Operations on the resulting set are
|
||||||
|
// not thread-safe.
|
||||||
|
func NewThreadUnsafeSetFromSlice(s []interface{}) Set {
|
||||||
|
a := NewThreadUnsafeSet()
|
||||||
|
for _, item := range s {
|
||||||
|
a.Add(item)
|
||||||
|
}
|
||||||
|
return a
|
||||||
|
}
|
1200
vendor/github.com/deckarep/golang-set/set_test.go
generated
vendored
Normal file
1200
vendor/github.com/deckarep/golang-set/set_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
283
vendor/github.com/deckarep/golang-set/threadsafe.go
generated
vendored
Normal file
283
vendor/github.com/deckarep/golang-set/threadsafe.go
generated
vendored
Normal file
@ -0,0 +1,283 @@
|
|||||||
|
/*
|
||||||
|
Open Source Initiative OSI - The MIT License (MIT):Licensing
|
||||||
|
|
||||||
|
The MIT License (MIT)
|
||||||
|
Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
this software and associated documentation files (the "Software"), to deal in
|
||||||
|
the Software without restriction, including without limitation the rights to
|
||||||
|
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||||
|
of the Software, and to permit persons to whom the Software is furnished to do
|
||||||
|
so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package mapset
|
||||||
|
|
||||||
|
import "sync"
|
||||||
|
|
||||||
|
type threadSafeSet struct {
|
||||||
|
s threadUnsafeSet
|
||||||
|
sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func newThreadSafeSet() threadSafeSet {
|
||||||
|
return threadSafeSet{s: newThreadUnsafeSet()}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) Add(i interface{}) bool {
|
||||||
|
set.Lock()
|
||||||
|
ret := set.s.Add(i)
|
||||||
|
set.Unlock()
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) Contains(i ...interface{}) bool {
|
||||||
|
set.RLock()
|
||||||
|
ret := set.s.Contains(i...)
|
||||||
|
set.RUnlock()
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) IsSubset(other Set) bool {
|
||||||
|
o := other.(*threadSafeSet)
|
||||||
|
|
||||||
|
set.RLock()
|
||||||
|
o.RLock()
|
||||||
|
|
||||||
|
ret := set.s.IsSubset(&o.s)
|
||||||
|
set.RUnlock()
|
||||||
|
o.RUnlock()
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) IsProperSubset(other Set) bool {
|
||||||
|
o := other.(*threadSafeSet)
|
||||||
|
|
||||||
|
set.RLock()
|
||||||
|
defer set.RUnlock()
|
||||||
|
o.RLock()
|
||||||
|
defer o.RUnlock()
|
||||||
|
|
||||||
|
return set.s.IsProperSubset(&o.s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) IsSuperset(other Set) bool {
|
||||||
|
return other.IsSubset(set)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) IsProperSuperset(other Set) bool {
|
||||||
|
return other.IsProperSubset(set)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) Union(other Set) Set {
|
||||||
|
o := other.(*threadSafeSet)
|
||||||
|
|
||||||
|
set.RLock()
|
||||||
|
o.RLock()
|
||||||
|
|
||||||
|
unsafeUnion := set.s.Union(&o.s).(*threadUnsafeSet)
|
||||||
|
ret := &threadSafeSet{s: *unsafeUnion}
|
||||||
|
set.RUnlock()
|
||||||
|
o.RUnlock()
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) Intersect(other Set) Set {
|
||||||
|
o := other.(*threadSafeSet)
|
||||||
|
|
||||||
|
set.RLock()
|
||||||
|
o.RLock()
|
||||||
|
|
||||||
|
unsafeIntersection := set.s.Intersect(&o.s).(*threadUnsafeSet)
|
||||||
|
ret := &threadSafeSet{s: *unsafeIntersection}
|
||||||
|
set.RUnlock()
|
||||||
|
o.RUnlock()
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) Difference(other Set) Set {
|
||||||
|
o := other.(*threadSafeSet)
|
||||||
|
|
||||||
|
set.RLock()
|
||||||
|
o.RLock()
|
||||||
|
|
||||||
|
unsafeDifference := set.s.Difference(&o.s).(*threadUnsafeSet)
|
||||||
|
ret := &threadSafeSet{s: *unsafeDifference}
|
||||||
|
set.RUnlock()
|
||||||
|
o.RUnlock()
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) SymmetricDifference(other Set) Set {
|
||||||
|
o := other.(*threadSafeSet)
|
||||||
|
|
||||||
|
set.RLock()
|
||||||
|
o.RLock()
|
||||||
|
|
||||||
|
unsafeDifference := set.s.SymmetricDifference(&o.s).(*threadUnsafeSet)
|
||||||
|
ret := &threadSafeSet{s: *unsafeDifference}
|
||||||
|
set.RUnlock()
|
||||||
|
o.RUnlock()
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) Clear() {
|
||||||
|
set.Lock()
|
||||||
|
set.s = newThreadUnsafeSet()
|
||||||
|
set.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) Remove(i interface{}) {
|
||||||
|
set.Lock()
|
||||||
|
delete(set.s, i)
|
||||||
|
set.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) Cardinality() int {
|
||||||
|
set.RLock()
|
||||||
|
defer set.RUnlock()
|
||||||
|
return len(set.s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) Each(cb func(interface{}) bool) {
|
||||||
|
set.RLock()
|
||||||
|
for elem := range set.s {
|
||||||
|
if cb(elem) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
set.RUnlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) Iter() <-chan interface{} {
|
||||||
|
ch := make(chan interface{})
|
||||||
|
go func() {
|
||||||
|
set.RLock()
|
||||||
|
|
||||||
|
for elem := range set.s {
|
||||||
|
ch <- elem
|
||||||
|
}
|
||||||
|
close(ch)
|
||||||
|
set.RUnlock()
|
||||||
|
}()
|
||||||
|
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) Iterator() *Iterator {
|
||||||
|
iterator, ch, stopCh := newIterator()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
set.RLock()
|
||||||
|
L:
|
||||||
|
for elem := range set.s {
|
||||||
|
select {
|
||||||
|
case <-stopCh:
|
||||||
|
break L
|
||||||
|
case ch <- elem:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
close(ch)
|
||||||
|
set.RUnlock()
|
||||||
|
}()
|
||||||
|
|
||||||
|
return iterator
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) Equal(other Set) bool {
|
||||||
|
o := other.(*threadSafeSet)
|
||||||
|
|
||||||
|
set.RLock()
|
||||||
|
o.RLock()
|
||||||
|
|
||||||
|
ret := set.s.Equal(&o.s)
|
||||||
|
set.RUnlock()
|
||||||
|
o.RUnlock()
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) Clone() Set {
|
||||||
|
set.RLock()
|
||||||
|
|
||||||
|
unsafeClone := set.s.Clone().(*threadUnsafeSet)
|
||||||
|
ret := &threadSafeSet{s: *unsafeClone}
|
||||||
|
set.RUnlock()
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) String() string {
|
||||||
|
set.RLock()
|
||||||
|
ret := set.s.String()
|
||||||
|
set.RUnlock()
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) PowerSet() Set {
|
||||||
|
set.RLock()
|
||||||
|
unsafePowerSet := set.s.PowerSet().(*threadUnsafeSet)
|
||||||
|
set.RUnlock()
|
||||||
|
|
||||||
|
ret := &threadSafeSet{s: newThreadUnsafeSet()}
|
||||||
|
for subset := range unsafePowerSet.Iter() {
|
||||||
|
unsafeSubset := subset.(*threadUnsafeSet)
|
||||||
|
ret.Add(&threadSafeSet{s: *unsafeSubset})
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) Pop() interface{} {
|
||||||
|
set.Lock()
|
||||||
|
defer set.Unlock()
|
||||||
|
return set.s.Pop()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) CartesianProduct(other Set) Set {
|
||||||
|
o := other.(*threadSafeSet)
|
||||||
|
|
||||||
|
set.RLock()
|
||||||
|
o.RLock()
|
||||||
|
|
||||||
|
unsafeCartProduct := set.s.CartesianProduct(&o.s).(*threadUnsafeSet)
|
||||||
|
ret := &threadSafeSet{s: *unsafeCartProduct}
|
||||||
|
set.RUnlock()
|
||||||
|
o.RUnlock()
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) ToSlice() []interface{} {
|
||||||
|
keys := make([]interface{}, 0, set.Cardinality())
|
||||||
|
set.RLock()
|
||||||
|
for elem := range set.s {
|
||||||
|
keys = append(keys, elem)
|
||||||
|
}
|
||||||
|
set.RUnlock()
|
||||||
|
return keys
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) MarshalJSON() ([]byte, error) {
|
||||||
|
set.RLock()
|
||||||
|
b, err := set.s.MarshalJSON()
|
||||||
|
set.RUnlock()
|
||||||
|
|
||||||
|
return b, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadSafeSet) UnmarshalJSON(p []byte) error {
|
||||||
|
set.RLock()
|
||||||
|
err := set.s.UnmarshalJSON(p)
|
||||||
|
set.RUnlock()
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
524
vendor/github.com/deckarep/golang-set/threadsafe_test.go
generated
vendored
Normal file
524
vendor/github.com/deckarep/golang-set/threadsafe_test.go
generated
vendored
Normal file
@ -0,0 +1,524 @@
|
|||||||
|
/*
|
||||||
|
Open Source Initiative OSI - The MIT License (MIT):Licensing
|
||||||
|
|
||||||
|
The MIT License (MIT)
|
||||||
|
Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
this software and associated documentation files (the "Software"), to deal in
|
||||||
|
the Software without restriction, including without limitation the rights to
|
||||||
|
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||||
|
of the Software, and to permit persons to whom the Software is furnished to do
|
||||||
|
so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package mapset
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"math/rand"
|
||||||
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
const N = 1000
|
||||||
|
|
||||||
|
func Test_AddConcurrent(t *testing.T) {
|
||||||
|
runtime.GOMAXPROCS(2)
|
||||||
|
|
||||||
|
s := NewSet()
|
||||||
|
ints := rand.Perm(N)
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(len(ints))
|
||||||
|
for i := 0; i < len(ints); i++ {
|
||||||
|
go func(i int) {
|
||||||
|
s.Add(i)
|
||||||
|
wg.Done()
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
for _, i := range ints {
|
||||||
|
if !s.Contains(i) {
|
||||||
|
t.Errorf("Set is missing element: %v", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_CardinalityConcurrent(t *testing.T) {
|
||||||
|
runtime.GOMAXPROCS(2)
|
||||||
|
|
||||||
|
s := NewSet()
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
elems := s.Cardinality()
|
||||||
|
for i := 0; i < N; i++ {
|
||||||
|
newElems := s.Cardinality()
|
||||||
|
if newElems < elems {
|
||||||
|
t.Errorf("Cardinality shrunk from %v to %v", elems, newElems)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
|
||||||
|
for i := 0; i < N; i++ {
|
||||||
|
s.Add(rand.Int())
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_ClearConcurrent(t *testing.T) {
|
||||||
|
runtime.GOMAXPROCS(2)
|
||||||
|
|
||||||
|
s := NewSet()
|
||||||
|
ints := rand.Perm(N)
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(len(ints))
|
||||||
|
for i := 0; i < len(ints); i++ {
|
||||||
|
go func() {
|
||||||
|
s.Clear()
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
go func(i int) {
|
||||||
|
s.Add(i)
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_CloneConcurrent(t *testing.T) {
|
||||||
|
runtime.GOMAXPROCS(2)
|
||||||
|
|
||||||
|
s := NewSet()
|
||||||
|
ints := rand.Perm(N)
|
||||||
|
|
||||||
|
for _, v := range ints {
|
||||||
|
s.Add(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(len(ints))
|
||||||
|
for i := range ints {
|
||||||
|
go func(i int) {
|
||||||
|
s.Remove(i)
|
||||||
|
wg.Done()
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.Clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_ContainsConcurrent(t *testing.T) {
|
||||||
|
runtime.GOMAXPROCS(2)
|
||||||
|
|
||||||
|
s := NewSet()
|
||||||
|
ints := rand.Perm(N)
|
||||||
|
interfaces := make([]interface{}, 0)
|
||||||
|
for _, v := range ints {
|
||||||
|
s.Add(v)
|
||||||
|
interfaces = append(interfaces, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
for range ints {
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
s.Contains(interfaces...)
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_DifferenceConcurrent(t *testing.T) {
|
||||||
|
runtime.GOMAXPROCS(2)
|
||||||
|
|
||||||
|
s, ss := NewSet(), NewSet()
|
||||||
|
ints := rand.Perm(N)
|
||||||
|
for _, v := range ints {
|
||||||
|
s.Add(v)
|
||||||
|
ss.Add(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
for range ints {
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
s.Difference(ss)
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_EqualConcurrent(t *testing.T) {
|
||||||
|
runtime.GOMAXPROCS(2)
|
||||||
|
|
||||||
|
s, ss := NewSet(), NewSet()
|
||||||
|
ints := rand.Perm(N)
|
||||||
|
for _, v := range ints {
|
||||||
|
s.Add(v)
|
||||||
|
ss.Add(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
for range ints {
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
s.Equal(ss)
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_IntersectConcurrent(t *testing.T) {
|
||||||
|
runtime.GOMAXPROCS(2)
|
||||||
|
|
||||||
|
s, ss := NewSet(), NewSet()
|
||||||
|
ints := rand.Perm(N)
|
||||||
|
for _, v := range ints {
|
||||||
|
s.Add(v)
|
||||||
|
ss.Add(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
for range ints {
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
s.Intersect(ss)
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_IsSubsetConcurrent(t *testing.T) {
|
||||||
|
runtime.GOMAXPROCS(2)
|
||||||
|
|
||||||
|
s, ss := NewSet(), NewSet()
|
||||||
|
ints := rand.Perm(N)
|
||||||
|
for _, v := range ints {
|
||||||
|
s.Add(v)
|
||||||
|
ss.Add(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
for range ints {
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
s.IsSubset(ss)
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_IsProperSubsetConcurrent(t *testing.T) {
|
||||||
|
runtime.GOMAXPROCS(2)
|
||||||
|
|
||||||
|
s, ss := NewSet(), NewSet()
|
||||||
|
ints := rand.Perm(N)
|
||||||
|
for _, v := range ints {
|
||||||
|
s.Add(v)
|
||||||
|
ss.Add(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
for range ints {
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
s.IsProperSubset(ss)
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_IsSupersetConcurrent(t *testing.T) {
|
||||||
|
runtime.GOMAXPROCS(2)
|
||||||
|
|
||||||
|
s, ss := NewSet(), NewSet()
|
||||||
|
ints := rand.Perm(N)
|
||||||
|
for _, v := range ints {
|
||||||
|
s.Add(v)
|
||||||
|
ss.Add(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
for range ints {
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
s.IsSuperset(ss)
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_IsProperSupersetConcurrent(t *testing.T) {
|
||||||
|
runtime.GOMAXPROCS(2)
|
||||||
|
|
||||||
|
s, ss := NewSet(), NewSet()
|
||||||
|
ints := rand.Perm(N)
|
||||||
|
for _, v := range ints {
|
||||||
|
s.Add(v)
|
||||||
|
ss.Add(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
for range ints {
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
s.IsProperSuperset(ss)
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_EachConcurrent(t *testing.T) {
|
||||||
|
runtime.GOMAXPROCS(2)
|
||||||
|
concurrent := 10
|
||||||
|
|
||||||
|
s := NewSet()
|
||||||
|
ints := rand.Perm(N)
|
||||||
|
for _, v := range ints {
|
||||||
|
s.Add(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
var count int64
|
||||||
|
wg := new(sync.WaitGroup)
|
||||||
|
wg.Add(concurrent)
|
||||||
|
for n := 0; n < concurrent; n++ {
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
s.Each(func(elem interface{}) bool {
|
||||||
|
atomic.AddInt64(&count, 1)
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
if count != int64(N*concurrent) {
|
||||||
|
t.Errorf("%v != %v", count, int64(N*concurrent))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_IterConcurrent(t *testing.T) {
|
||||||
|
runtime.GOMAXPROCS(2)
|
||||||
|
|
||||||
|
s := NewSet()
|
||||||
|
ints := rand.Perm(N)
|
||||||
|
for _, v := range ints {
|
||||||
|
s.Add(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
cs := make([]<-chan interface{}, 0)
|
||||||
|
for range ints {
|
||||||
|
cs = append(cs, s.Iter())
|
||||||
|
}
|
||||||
|
|
||||||
|
c := make(chan interface{})
|
||||||
|
go func() {
|
||||||
|
for n := 0; n < len(ints)*N; {
|
||||||
|
for _, d := range cs {
|
||||||
|
select {
|
||||||
|
case <-d:
|
||||||
|
n++
|
||||||
|
c <- nil
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
close(c)
|
||||||
|
}()
|
||||||
|
|
||||||
|
for range c {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_RemoveConcurrent(t *testing.T) {
|
||||||
|
runtime.GOMAXPROCS(2)
|
||||||
|
|
||||||
|
s := NewSet()
|
||||||
|
ints := rand.Perm(N)
|
||||||
|
for _, v := range ints {
|
||||||
|
s.Add(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(len(ints))
|
||||||
|
for _, v := range ints {
|
||||||
|
go func(i int) {
|
||||||
|
s.Remove(i)
|
||||||
|
wg.Done()
|
||||||
|
}(v)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
if s.Cardinality() != 0 {
|
||||||
|
t.Errorf("Expected cardinality 0; got %v", s.Cardinality())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_StringConcurrent(t *testing.T) {
|
||||||
|
runtime.GOMAXPROCS(2)
|
||||||
|
|
||||||
|
s := NewSet()
|
||||||
|
ints := rand.Perm(N)
|
||||||
|
for _, v := range ints {
|
||||||
|
s.Add(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(len(ints))
|
||||||
|
for range ints {
|
||||||
|
go func() {
|
||||||
|
_ = s.String()
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_SymmetricDifferenceConcurrent(t *testing.T) {
|
||||||
|
runtime.GOMAXPROCS(2)
|
||||||
|
|
||||||
|
s, ss := NewSet(), NewSet()
|
||||||
|
ints := rand.Perm(N)
|
||||||
|
for _, v := range ints {
|
||||||
|
s.Add(v)
|
||||||
|
ss.Add(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
for range ints {
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
s.SymmetricDifference(ss)
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_ToSlice(t *testing.T) {
|
||||||
|
runtime.GOMAXPROCS(2)
|
||||||
|
|
||||||
|
s := NewSet()
|
||||||
|
ints := rand.Perm(N)
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(len(ints))
|
||||||
|
for i := 0; i < len(ints); i++ {
|
||||||
|
go func(i int) {
|
||||||
|
s.Add(i)
|
||||||
|
wg.Done()
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
setAsSlice := s.ToSlice()
|
||||||
|
if len(setAsSlice) != s.Cardinality() {
|
||||||
|
t.Errorf("Set length is incorrect: %v", len(setAsSlice))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, i := range setAsSlice {
|
||||||
|
if !s.Contains(i) {
|
||||||
|
t.Errorf("Set is missing element: %v", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test_ToSliceDeadlock - fixes issue: https://github.com/deckarep/golang-set/issues/36
|
||||||
|
// This code reveals the deadlock however it doesn't happen consistently.
|
||||||
|
func Test_ToSliceDeadlock(t *testing.T) {
|
||||||
|
runtime.GOMAXPROCS(2)
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
set := NewSet()
|
||||||
|
workers := 10
|
||||||
|
wg.Add(workers)
|
||||||
|
for i := 1; i <= workers; i++ {
|
||||||
|
go func() {
|
||||||
|
for j := 0; j < 1000; j++ {
|
||||||
|
set.Add(1)
|
||||||
|
set.ToSlice()
|
||||||
|
}
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_UnmarshalJSON(t *testing.T) {
|
||||||
|
s := []byte(`["test", 1, 2, 3, ["4,5,6"]]`)
|
||||||
|
expected := NewSetFromSlice(
|
||||||
|
[]interface{}{
|
||||||
|
json.Number("1"),
|
||||||
|
json.Number("2"),
|
||||||
|
json.Number("3"),
|
||||||
|
"test",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
actual := NewSet()
|
||||||
|
err := json.Unmarshal(s, actual)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error should be nil: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !expected.Equal(actual) {
|
||||||
|
t.Errorf("Expected no difference, got: %v", expected.Difference(actual))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_MarshalJSON(t *testing.T) {
|
||||||
|
expected := NewSetFromSlice(
|
||||||
|
[]interface{}{
|
||||||
|
json.Number("1"),
|
||||||
|
"test",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
b, err := json.Marshal(
|
||||||
|
NewSetFromSlice(
|
||||||
|
[]interface{}{
|
||||||
|
1,
|
||||||
|
"test",
|
||||||
|
},
|
||||||
|
),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error should be nil: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
actual := NewSet()
|
||||||
|
err = json.Unmarshal(b, actual)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error should be nil: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !expected.Equal(actual) {
|
||||||
|
t.Errorf("Expected no difference, got: %v", expected.Difference(actual))
|
||||||
|
}
|
||||||
|
}
|
337
vendor/github.com/deckarep/golang-set/threadunsafe.go
generated
vendored
Normal file
337
vendor/github.com/deckarep/golang-set/threadunsafe.go
generated
vendored
Normal file
@ -0,0 +1,337 @@
|
|||||||
|
/*
|
||||||
|
Open Source Initiative OSI - The MIT License (MIT):Licensing
|
||||||
|
|
||||||
|
The MIT License (MIT)
|
||||||
|
Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
this software and associated documentation files (the "Software"), to deal in
|
||||||
|
the Software without restriction, including without limitation the rights to
|
||||||
|
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||||
|
of the Software, and to permit persons to whom the Software is furnished to do
|
||||||
|
so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package mapset
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type threadUnsafeSet map[interface{}]struct{}
|
||||||
|
|
||||||
|
// An OrderedPair represents a 2-tuple of values.
|
||||||
|
type OrderedPair struct {
|
||||||
|
First interface{}
|
||||||
|
Second interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newThreadUnsafeSet() threadUnsafeSet {
|
||||||
|
return make(threadUnsafeSet)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equal says whether two 2-tuples contain the same values in the same order.
|
||||||
|
func (pair *OrderedPair) Equal(other OrderedPair) bool {
|
||||||
|
if pair.First == other.First &&
|
||||||
|
pair.Second == other.Second {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) Add(i interface{}) bool {
|
||||||
|
_, found := (*set)[i]
|
||||||
|
if found {
|
||||||
|
return false //False if it existed already
|
||||||
|
}
|
||||||
|
|
||||||
|
(*set)[i] = struct{}{}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) Contains(i ...interface{}) bool {
|
||||||
|
for _, val := range i {
|
||||||
|
if _, ok := (*set)[val]; !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) IsSubset(other Set) bool {
|
||||||
|
_ = other.(*threadUnsafeSet)
|
||||||
|
for elem := range *set {
|
||||||
|
if !other.Contains(elem) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) IsProperSubset(other Set) bool {
|
||||||
|
return set.IsSubset(other) && !set.Equal(other)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) IsSuperset(other Set) bool {
|
||||||
|
return other.IsSubset(set)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) IsProperSuperset(other Set) bool {
|
||||||
|
return set.IsSuperset(other) && !set.Equal(other)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) Union(other Set) Set {
|
||||||
|
o := other.(*threadUnsafeSet)
|
||||||
|
|
||||||
|
unionedSet := newThreadUnsafeSet()
|
||||||
|
|
||||||
|
for elem := range *set {
|
||||||
|
unionedSet.Add(elem)
|
||||||
|
}
|
||||||
|
for elem := range *o {
|
||||||
|
unionedSet.Add(elem)
|
||||||
|
}
|
||||||
|
return &unionedSet
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) Intersect(other Set) Set {
|
||||||
|
o := other.(*threadUnsafeSet)
|
||||||
|
|
||||||
|
intersection := newThreadUnsafeSet()
|
||||||
|
// loop over smaller set
|
||||||
|
if set.Cardinality() < other.Cardinality() {
|
||||||
|
for elem := range *set {
|
||||||
|
if other.Contains(elem) {
|
||||||
|
intersection.Add(elem)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for elem := range *o {
|
||||||
|
if set.Contains(elem) {
|
||||||
|
intersection.Add(elem)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &intersection
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) Difference(other Set) Set {
|
||||||
|
_ = other.(*threadUnsafeSet)
|
||||||
|
|
||||||
|
difference := newThreadUnsafeSet()
|
||||||
|
for elem := range *set {
|
||||||
|
if !other.Contains(elem) {
|
||||||
|
difference.Add(elem)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &difference
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) SymmetricDifference(other Set) Set {
|
||||||
|
_ = other.(*threadUnsafeSet)
|
||||||
|
|
||||||
|
aDiff := set.Difference(other)
|
||||||
|
bDiff := other.Difference(set)
|
||||||
|
return aDiff.Union(bDiff)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) Clear() {
|
||||||
|
*set = newThreadUnsafeSet()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) Remove(i interface{}) {
|
||||||
|
delete(*set, i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) Cardinality() int {
|
||||||
|
return len(*set)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) Each(cb func(interface{}) bool) {
|
||||||
|
for elem := range *set {
|
||||||
|
if cb(elem) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) Iter() <-chan interface{} {
|
||||||
|
ch := make(chan interface{})
|
||||||
|
go func() {
|
||||||
|
for elem := range *set {
|
||||||
|
ch <- elem
|
||||||
|
}
|
||||||
|
close(ch)
|
||||||
|
}()
|
||||||
|
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) Iterator() *Iterator {
|
||||||
|
iterator, ch, stopCh := newIterator()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
L:
|
||||||
|
for elem := range *set {
|
||||||
|
select {
|
||||||
|
case <-stopCh:
|
||||||
|
break L
|
||||||
|
case ch <- elem:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
close(ch)
|
||||||
|
}()
|
||||||
|
|
||||||
|
return iterator
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) Equal(other Set) bool {
|
||||||
|
_ = other.(*threadUnsafeSet)
|
||||||
|
|
||||||
|
if set.Cardinality() != other.Cardinality() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for elem := range *set {
|
||||||
|
if !other.Contains(elem) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) Clone() Set {
|
||||||
|
clonedSet := newThreadUnsafeSet()
|
||||||
|
for elem := range *set {
|
||||||
|
clonedSet.Add(elem)
|
||||||
|
}
|
||||||
|
return &clonedSet
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) String() string {
|
||||||
|
items := make([]string, 0, len(*set))
|
||||||
|
|
||||||
|
for elem := range *set {
|
||||||
|
items = append(items, fmt.Sprintf("%v", elem))
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("Set{%s}", strings.Join(items, ", "))
|
||||||
|
}
|
||||||
|
|
||||||
|
// String outputs a 2-tuple in the form "(A, B)".
|
||||||
|
func (pair OrderedPair) String() string {
|
||||||
|
return fmt.Sprintf("(%v, %v)", pair.First, pair.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) Pop() interface{} {
|
||||||
|
for item := range *set {
|
||||||
|
delete(*set, item)
|
||||||
|
return item
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) PowerSet() Set {
|
||||||
|
powSet := NewThreadUnsafeSet()
|
||||||
|
nullset := newThreadUnsafeSet()
|
||||||
|
powSet.Add(&nullset)
|
||||||
|
|
||||||
|
for es := range *set {
|
||||||
|
u := newThreadUnsafeSet()
|
||||||
|
j := powSet.Iter()
|
||||||
|
for er := range j {
|
||||||
|
p := newThreadUnsafeSet()
|
||||||
|
if reflect.TypeOf(er).Name() == "" {
|
||||||
|
k := er.(*threadUnsafeSet)
|
||||||
|
for ek := range *(k) {
|
||||||
|
p.Add(ek)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
p.Add(er)
|
||||||
|
}
|
||||||
|
p.Add(es)
|
||||||
|
u.Add(&p)
|
||||||
|
}
|
||||||
|
|
||||||
|
powSet = powSet.Union(&u)
|
||||||
|
}
|
||||||
|
|
||||||
|
return powSet
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) CartesianProduct(other Set) Set {
|
||||||
|
o := other.(*threadUnsafeSet)
|
||||||
|
cartProduct := NewThreadUnsafeSet()
|
||||||
|
|
||||||
|
for i := range *set {
|
||||||
|
for j := range *o {
|
||||||
|
elem := OrderedPair{First: i, Second: j}
|
||||||
|
cartProduct.Add(elem)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return cartProduct
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *threadUnsafeSet) ToSlice() []interface{} {
|
||||||
|
keys := make([]interface{}, 0, set.Cardinality())
|
||||||
|
for elem := range *set {
|
||||||
|
keys = append(keys, elem)
|
||||||
|
}
|
||||||
|
|
||||||
|
return keys
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON creates a JSON array from the set, it marshals all elements
|
||||||
|
func (set *threadUnsafeSet) MarshalJSON() ([]byte, error) {
|
||||||
|
items := make([]string, 0, set.Cardinality())
|
||||||
|
|
||||||
|
for elem := range *set {
|
||||||
|
b, err := json.Marshal(elem)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
items = append(items, string(b))
|
||||||
|
}
|
||||||
|
|
||||||
|
return []byte(fmt.Sprintf("[%s]", strings.Join(items, ","))), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON recreates a set from a JSON array, it only decodes
|
||||||
|
// primitive types. Numbers are decoded as json.Number.
|
||||||
|
func (set *threadUnsafeSet) UnmarshalJSON(b []byte) error {
|
||||||
|
var i []interface{}
|
||||||
|
|
||||||
|
d := json.NewDecoder(bytes.NewReader(b))
|
||||||
|
d.UseNumber()
|
||||||
|
err := d.Decode(&i)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range i {
|
||||||
|
switch t := v.(type) {
|
||||||
|
case []interface{}, map[string]interface{}:
|
||||||
|
continue
|
||||||
|
default:
|
||||||
|
set.Add(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
611
worker.go
611
worker.go
@ -16,9 +16,9 @@ package clair
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"regexp"
|
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/deckarep/golang-set"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"github.com/coreos/clair/database"
|
"github.com/coreos/clair/database"
|
||||||
@ -27,10 +27,7 @@ import (
|
|||||||
"github.com/coreos/clair/ext/imagefmt"
|
"github.com/coreos/clair/ext/imagefmt"
|
||||||
"github.com/coreos/clair/pkg/commonerr"
|
"github.com/coreos/clair/pkg/commonerr"
|
||||||
"github.com/coreos/clair/pkg/strutil"
|
"github.com/coreos/clair/pkg/strutil"
|
||||||
)
|
"github.com/coreos/clair/pkg/tarutil"
|
||||||
|
|
||||||
const (
|
|
||||||
logLayerName = "layer"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -38,25 +35,10 @@ var (
|
|||||||
// manager is not supported.
|
// manager is not supported.
|
||||||
ErrUnsupported = commonerr.NewBadRequestError("worker: OS and/or package manager are not supported")
|
ErrUnsupported = commonerr.NewBadRequestError("worker: OS and/or package manager are not supported")
|
||||||
|
|
||||||
// ErrParentUnknown is the error that should be raised when a parent layer
|
// EnabledDetectors are detectors to be used to scan the layers.
|
||||||
// has yet to be processed for the current layer.
|
EnabledDetectors []database.Detector
|
||||||
ErrParentUnknown = commonerr.NewBadRequestError("worker: parent layer is unknown, it must be processed first")
|
|
||||||
|
|
||||||
urlParametersRegexp = regexp.MustCompile(`(\?|\&)([^=]+)\=([^ &]+)`)
|
|
||||||
|
|
||||||
// Processors contain the names of namespace detectors and feature listers
|
|
||||||
// enabled in this instance of Clair.
|
|
||||||
//
|
|
||||||
// Processors are initialized during booting and configured in the
|
|
||||||
// configuration file.
|
|
||||||
Processors database.Processors
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type WorkerConfig struct {
|
|
||||||
EnabledDetectors []string `yaml:"namespace_detectors"`
|
|
||||||
EnabledListers []string `yaml:"feature_listers"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// LayerRequest represents all information necessary to download and process a
|
// LayerRequest represents all information necessary to download and process a
|
||||||
// layer.
|
// layer.
|
||||||
type LayerRequest struct {
|
type LayerRequest struct {
|
||||||
@ -65,296 +47,176 @@ type LayerRequest struct {
|
|||||||
Headers map[string]string
|
Headers map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
// partialLayer stores layer's content detected by `processedBy` processors.
|
type processResult struct {
|
||||||
type partialLayer struct {
|
existingLayer *database.Layer
|
||||||
hash string
|
newLayerContent *database.Layer
|
||||||
processedBy database.Processors
|
err error
|
||||||
namespaces []database.Namespace
|
|
||||||
features []database.Feature
|
|
||||||
|
|
||||||
err error
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// processRequest stores parameters used for processing layers.
|
// processRequest stores parameters used for processing a layer.
|
||||||
type processRequest struct {
|
type processRequest struct {
|
||||||
request LayerRequest
|
LayerRequest
|
||||||
// notProcessedBy represents a set of processors used to process the
|
|
||||||
// request.
|
existingLayer *database.Layer
|
||||||
notProcessedBy database.Processors
|
detectors []database.Detector
|
||||||
}
|
}
|
||||||
|
|
||||||
// cleanURL removes all parameters from an URL.
|
type introducedFeature struct {
|
||||||
func cleanURL(str string) string {
|
feature database.AncestryFeature
|
||||||
return urlParametersRegexp.ReplaceAllString(str, "")
|
layerIndex int
|
||||||
}
|
}
|
||||||
|
|
||||||
// processLayers in parallel processes a set of requests for unique set of layers
|
// processRequests in parallel processes a set of requests for unique set of layers
|
||||||
// and returns sets of unique namespaces, features and layers to be inserted
|
// and returns sets of unique namespaces, features and layers to be inserted
|
||||||
// into the database.
|
// into the database.
|
||||||
func processRequests(imageFormat string, toDetect []processRequest) ([]database.Namespace, []database.Feature, map[string]partialLayer, error) {
|
func processRequests(imageFormat string, toDetect map[string]*processRequest) (map[string]*processResult, error) {
|
||||||
wg := &sync.WaitGroup{}
|
wg := &sync.WaitGroup{}
|
||||||
wg.Add(len(toDetect))
|
wg.Add(len(toDetect))
|
||||||
results := make([]partialLayer, len(toDetect))
|
|
||||||
for i := range toDetect {
|
|
||||||
go func(req *processRequest, res *partialLayer) {
|
|
||||||
res.hash = req.request.Hash
|
|
||||||
res.processedBy = req.notProcessedBy
|
|
||||||
res.namespaces, res.features, res.err = detectContent(imageFormat, req.request.Hash, req.request.Path, req.request.Headers, req.notProcessedBy)
|
|
||||||
wg.Done()
|
|
||||||
}(&toDetect[i], &results[i])
|
|
||||||
}
|
|
||||||
wg.Wait()
|
|
||||||
distinctNS := map[database.Namespace]struct{}{}
|
|
||||||
distinctF := map[database.Feature]struct{}{}
|
|
||||||
|
|
||||||
|
results := map[string]*processResult{}
|
||||||
|
for i := range toDetect {
|
||||||
|
results[i] = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range toDetect {
|
||||||
|
result := processResult{}
|
||||||
|
results[i] = &result
|
||||||
|
go func(req *processRequest, res *processResult) {
|
||||||
|
*res = *detectContent(imageFormat, req)
|
||||||
|
wg.Done()
|
||||||
|
}(toDetect[i], &result)
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
errs := []error{}
|
errs := []error{}
|
||||||
for _, r := range results {
|
for _, r := range results {
|
||||||
errs = append(errs, r.err)
|
errs = append(errs, r.err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := commonerr.CombineErrors(errs...); err != nil {
|
if err := commonerr.CombineErrors(errs...); err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
updates := map[string]partialLayer{}
|
return results, nil
|
||||||
for _, r := range results {
|
|
||||||
for _, ns := range r.namespaces {
|
|
||||||
distinctNS[ns] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, f := range r.features {
|
|
||||||
distinctF[f] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := updates[r.hash]; !ok {
|
|
||||||
updates[r.hash] = r
|
|
||||||
} else {
|
|
||||||
return nil, nil, nil, errors.New("Duplicated updates is not allowed")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
namespaces := make([]database.Namespace, 0, len(distinctNS))
|
|
||||||
features := make([]database.Feature, 0, len(distinctF))
|
|
||||||
|
|
||||||
for ns := range distinctNS {
|
|
||||||
namespaces = append(namespaces, ns)
|
|
||||||
}
|
|
||||||
|
|
||||||
for f := range distinctF {
|
|
||||||
features = append(features, f)
|
|
||||||
}
|
|
||||||
return namespaces, features, updates, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func getLayer(datastore database.Datastore, req LayerRequest) (layer database.Layer, preq *processRequest, err error) {
|
func getProcessRequest(datastore database.Datastore, req LayerRequest) (preq *processRequest, err error) {
|
||||||
var (
|
layer, ok, err := database.FindLayerAndRollback(datastore, req.Hash)
|
||||||
tx database.Session
|
if err != nil {
|
||||||
ok bool
|
|
||||||
)
|
|
||||||
|
|
||||||
if tx, err = datastore.Begin(); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
defer tx.Rollback()
|
|
||||||
|
|
||||||
if layer, ok, err = tx.FindLayer(req.Hash); err != nil {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if !ok {
|
if !ok {
|
||||||
layer = database.Layer{
|
log.WithField("layer", req.Hash).Debug("found no existing layer in database")
|
||||||
LayerMetadata: database.LayerMetadata{
|
|
||||||
Hash: req.Hash,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
preq = &processRequest{
|
preq = &processRequest{
|
||||||
request: req,
|
LayerRequest: req,
|
||||||
notProcessedBy: Processors,
|
existingLayer: &database.Layer{Hash: req.Hash},
|
||||||
|
detectors: EnabledDetectors,
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
notProcessed := getNotProcessedBy(layer.ProcessedBy)
|
log.WithFields(log.Fields{
|
||||||
if !(len(notProcessed.Detectors) == 0 && len(notProcessed.Listers) == 0 && ok) {
|
"layer": layer.Hash,
|
||||||
preq = &processRequest{
|
"detectors": layer.By,
|
||||||
request: req,
|
"feature count": len(layer.Features),
|
||||||
notProcessedBy: notProcessed,
|
"namespace count": len(layer.Namespaces),
|
||||||
}
|
}).Debug("found existing layer in database")
|
||||||
|
|
||||||
|
preq = &processRequest{
|
||||||
|
LayerRequest: req,
|
||||||
|
existingLayer: &layer,
|
||||||
|
detectors: database.DiffDetectors(EnabledDetectors, layer.By),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func persistProcessResult(datastore database.Datastore, results map[string]*processResult) error {
|
||||||
|
features := []database.Feature{}
|
||||||
|
namespaces := []database.Namespace{}
|
||||||
|
for _, r := range results {
|
||||||
|
features = append(features, r.newLayerContent.GetFeatures()...)
|
||||||
|
namespaces = append(namespaces, r.newLayerContent.GetNamespaces()...)
|
||||||
|
}
|
||||||
|
|
||||||
|
features = database.DeduplicateFeatures(features...)
|
||||||
|
namespaces = database.DeduplicateNamespaces(namespaces...)
|
||||||
|
if err := database.PersistNamespacesAndCommit(datastore, namespaces); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := database.PersistFeaturesAndCommit(datastore, features); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, layer := range results {
|
||||||
|
if err := database.PersistPartialLayerAndCommit(datastore, layer.newLayerContent); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// processLayers processes a set of post layer requests, stores layers and
|
// processLayers processes a set of post layer requests, stores layers and
|
||||||
// returns an ordered list of processed layers with detected features and
|
// returns an ordered list of processed layers with detected features and
|
||||||
// namespaces.
|
// namespaces.
|
||||||
func processLayers(datastore database.Datastore, imageFormat string, requests []LayerRequest) ([]database.Layer, error) {
|
func processLayers(datastore database.Datastore, imageFormat string, requests []LayerRequest) ([]database.Layer, error) {
|
||||||
toDetect := []processRequest{}
|
var (
|
||||||
layers := map[string]database.Layer{}
|
reqMap = make(map[string]*processRequest)
|
||||||
for _, req := range requests {
|
err error
|
||||||
if _, ok := layers[req.Hash]; ok {
|
)
|
||||||
continue
|
|
||||||
}
|
for _, r := range requests {
|
||||||
layer, preq, err := getLayer(datastore, req)
|
reqMap[r.Hash], err = getProcessRequest(datastore, r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
layers[req.Hash] = layer
|
|
||||||
if preq != nil {
|
|
||||||
toDetect = append(toDetect, *preq)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
namespaces, features, partialLayers, err := processRequests(imageFormat, toDetect)
|
results, err := processRequests(imageFormat, reqMap)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Store partial results.
|
if err := persistProcessResult(datastore, results); err != nil {
|
||||||
if err := persistNamespaces(datastore, namespaces); err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := persistFeatures(datastore, features); err != nil {
|
completeLayers := getProcessResultLayers(results)
|
||||||
return nil, err
|
layers := make([]database.Layer, 0, len(requests))
|
||||||
|
for _, r := range requests {
|
||||||
|
layers = append(layers, completeLayers[r.Hash])
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, layer := range partialLayers {
|
return layers, nil
|
||||||
if err := persistPartialLayer(datastore, layer); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"Hash": layer.hash,
|
|
||||||
"namespace count": len(layer.namespaces),
|
|
||||||
"feature count": len(layer.features),
|
|
||||||
"namespace detectors": layer.processedBy.Detectors,
|
|
||||||
"feature listers": layer.processedBy.Listers,
|
|
||||||
}).Debug("saved layer")
|
|
||||||
}
|
|
||||||
|
|
||||||
// NOTE(Sida): The full layers are computed using partially
|
|
||||||
// processed layers in current database session. If any other instances of
|
|
||||||
// Clair are changing some layers in this set of layers, it might generate
|
|
||||||
// different results especially when the other Clair is with different
|
|
||||||
// processors.
|
|
||||||
completeLayers := []database.Layer{}
|
|
||||||
for _, req := range requests {
|
|
||||||
if partialLayer, ok := partialLayers[req.Hash]; ok {
|
|
||||||
completeLayers = append(completeLayers, combineLayers(layers[req.Hash], partialLayer))
|
|
||||||
} else {
|
|
||||||
completeLayers = append(completeLayers, layers[req.Hash])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return completeLayers, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func persistPartialLayer(datastore database.Datastore, layer partialLayer) error {
|
func getProcessResultLayers(results map[string]*processResult) map[string]database.Layer {
|
||||||
tx, err := datastore.Begin()
|
layers := map[string]database.Layer{}
|
||||||
if err != nil {
|
for name, r := range results {
|
||||||
return err
|
layers[name] = *database.MergeLayers(r.existingLayer, r.newLayerContent)
|
||||||
}
|
|
||||||
defer tx.Rollback()
|
|
||||||
|
|
||||||
if err := tx.PersistLayer(layer.hash, layer.namespaces, layer.features, layer.processedBy); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return tx.Commit()
|
return layers
|
||||||
}
|
|
||||||
|
|
||||||
func persistFeatures(datastore database.Datastore, features []database.Feature) error {
|
|
||||||
tx, err := datastore.Begin()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer tx.Rollback()
|
|
||||||
|
|
||||||
if err := tx.PersistFeatures(features); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return tx.Commit()
|
|
||||||
}
|
|
||||||
|
|
||||||
func persistNamespaces(datastore database.Datastore, namespaces []database.Namespace) error {
|
|
||||||
tx, err := datastore.Begin()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer tx.Rollback()
|
|
||||||
|
|
||||||
if err := tx.PersistNamespaces(namespaces); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return tx.Commit()
|
|
||||||
}
|
|
||||||
|
|
||||||
// combineLayers merges `layer` and `partial` without duplicated content.
|
|
||||||
func combineLayers(layer database.Layer, partial partialLayer) database.Layer {
|
|
||||||
mapF := map[database.Feature]struct{}{}
|
|
||||||
mapNS := map[database.Namespace]struct{}{}
|
|
||||||
for _, f := range layer.Features {
|
|
||||||
mapF[f] = struct{}{}
|
|
||||||
}
|
|
||||||
for _, ns := range layer.Namespaces {
|
|
||||||
mapNS[ns] = struct{}{}
|
|
||||||
}
|
|
||||||
for _, f := range partial.features {
|
|
||||||
mapF[f] = struct{}{}
|
|
||||||
}
|
|
||||||
for _, ns := range partial.namespaces {
|
|
||||||
mapNS[ns] = struct{}{}
|
|
||||||
}
|
|
||||||
features := make([]database.Feature, 0, len(mapF))
|
|
||||||
namespaces := make([]database.Namespace, 0, len(mapNS))
|
|
||||||
for f := range mapF {
|
|
||||||
features = append(features, f)
|
|
||||||
}
|
|
||||||
for ns := range mapNS {
|
|
||||||
namespaces = append(namespaces, ns)
|
|
||||||
}
|
|
||||||
|
|
||||||
layer.ProcessedBy.Detectors = append(layer.ProcessedBy.Detectors, strutil.CompareStringLists(partial.processedBy.Detectors, layer.ProcessedBy.Detectors)...)
|
|
||||||
layer.ProcessedBy.Listers = append(layer.ProcessedBy.Listers, strutil.CompareStringLists(partial.processedBy.Listers, layer.ProcessedBy.Listers)...)
|
|
||||||
return database.Layer{
|
|
||||||
LayerMetadata: database.LayerMetadata{
|
|
||||||
Hash: layer.Hash,
|
|
||||||
ProcessedBy: layer.ProcessedBy,
|
|
||||||
},
|
|
||||||
Features: features,
|
|
||||||
Namespaces: namespaces,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func isAncestryProcessed(datastore database.Datastore, name string) (bool, error) {
|
func isAncestryProcessed(datastore database.Datastore, name string) (bool, error) {
|
||||||
tx, err := datastore.Begin()
|
ancestry, ok, err := database.FindAncestryAndRollback(datastore, name)
|
||||||
if err != nil {
|
if err != nil || !ok {
|
||||||
return false, err
|
return ok, err
|
||||||
}
|
|
||||||
defer tx.Rollback()
|
|
||||||
ancestry, ok, err := tx.FindAncestry(name)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
if !ok {
|
|
||||||
return false, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
notProcessed := getNotProcessedBy(ancestry.ProcessedBy)
|
return len(database.DiffDetectors(EnabledDetectors, ancestry.By)) == 0, nil
|
||||||
return len(notProcessed.Detectors) == 0 && len(notProcessed.Listers) == 0, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProcessAncestry downloads and scans an ancestry if it's not scanned by all
|
// ProcessAncestry downloads and scans an ancestry if it's not scanned by all
|
||||||
// enabled processors in this instance of Clair.
|
// enabled processors in this instance of Clair.
|
||||||
func ProcessAncestry(datastore database.Datastore, imageFormat, name string, layerRequest []LayerRequest) error {
|
func ProcessAncestry(datastore database.Datastore, imageFormat, name string, layerRequest []LayerRequest) error {
|
||||||
var (
|
var (
|
||||||
err error
|
err error
|
||||||
ok bool
|
ok bool
|
||||||
layers []database.Layer
|
layers []database.Layer
|
||||||
commonProcessors database.Processors
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if name == "" {
|
if name == "" {
|
||||||
@ -365,10 +227,12 @@ func ProcessAncestry(datastore database.Datastore, imageFormat, name string, lay
|
|||||||
return commonerr.NewBadRequestError("could not process a layer which does not have a format")
|
return commonerr.NewBadRequestError("could not process a layer which does not have a format")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.WithField("ancestry", name).Debug("start processing ancestry...")
|
||||||
if ok, err = isAncestryProcessed(datastore, name); err != nil {
|
if ok, err = isAncestryProcessed(datastore, name); err != nil {
|
||||||
|
log.WithError(err).Error("could not determine if ancestry is processed")
|
||||||
return err
|
return err
|
||||||
} else if ok {
|
} else if ok {
|
||||||
log.WithField("name", name).Debug("ancestry is already processed")
|
log.WithField("ancestry", name).Debug("ancestry is already processed")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -376,155 +240,100 @@ func ProcessAncestry(datastore database.Datastore, imageFormat, name string, lay
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if commonProcessors, err = getProcessors(layers); err != nil {
|
return processAncestry(datastore, name, layers)
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return processAncestry(datastore, name, layers, commonProcessors)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// getNamespacedFeatures extracts the namespaced features introduced in each
|
func processAncestry(datastore database.Datastore, name string, layers []database.Layer) error {
|
||||||
// layer into one array.
|
|
||||||
func getNamespacedFeatures(layers []database.AncestryLayer) []database.NamespacedFeature {
|
|
||||||
features := []database.NamespacedFeature{}
|
|
||||||
for _, layer := range layers {
|
|
||||||
features = append(features, layer.DetectedFeatures...)
|
|
||||||
}
|
|
||||||
return features
|
|
||||||
}
|
|
||||||
|
|
||||||
func processAncestry(datastore database.Datastore, name string, layers []database.Layer, commonProcessors database.Processors) error {
|
|
||||||
var (
|
var (
|
||||||
ancestry database.Ancestry
|
ancestry = database.Ancestry{Name: name}
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
|
|
||||||
ancestry.Name = name
|
ancestry.Layers, ancestry.By, err = computeAncestryLayers(layers)
|
||||||
ancestry.ProcessedBy = commonProcessors
|
|
||||||
ancestry.Layers, err = computeAncestryLayers(layers, commonProcessors)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
ancestryFeatures := getNamespacedFeatures(ancestry.Layers)
|
ancestryFeatures := database.GetAncestryFeatures(ancestry)
|
||||||
log.WithFields(log.Fields{
|
log.WithFields(log.Fields{
|
||||||
"ancestry": name,
|
"ancestry": name,
|
||||||
"number of features": len(ancestryFeatures),
|
"processed by": EnabledDetectors,
|
||||||
"processed by": Processors,
|
"features count": len(ancestryFeatures),
|
||||||
"number of layers": len(ancestry.Layers),
|
"layer count": len(ancestry.Layers),
|
||||||
}).Debug("compute ancestry features")
|
}).Debug("compute ancestry features")
|
||||||
|
|
||||||
if err := persistNamespacedFeatures(datastore, ancestryFeatures); err != nil {
|
if err := database.PersistNamespacedFeaturesAndCommit(datastore, ancestryFeatures); err != nil {
|
||||||
|
log.WithField("ancestry", name).WithError(err).Error("could not persist namespaced features for ancestry")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
tx, err := datastore.Begin()
|
if err := database.CacheRelatedVulnerabilityAndCommit(datastore, ancestryFeatures); err != nil {
|
||||||
if err != nil {
|
log.WithField("ancestry", name).WithError(err).Error("failed to cache feature related vulnerability")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = tx.UpsertAncestry(ancestry)
|
if err := database.UpsertAncestryAndCommit(datastore, ancestry); err != nil {
|
||||||
if err != nil {
|
log.WithField("ancestry", name).WithError(err).Error("could not upsert ancestry")
|
||||||
tx.Rollback()
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = tx.Commit()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func persistNamespacedFeatures(datastore database.Datastore, features []database.NamespacedFeature) error {
|
func getCommonDetectors(layers []database.Layer) mapset.Set {
|
||||||
tx, err := datastore.Begin()
|
// find the common detector for all layers and filter the namespaces and
|
||||||
if err != nil {
|
// features based on that.
|
||||||
return err
|
commonDetectors := mapset.NewSet()
|
||||||
|
for _, d := range layers[0].By {
|
||||||
|
commonDetectors.Add(d)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := tx.PersistNamespacedFeatures(features); err != nil {
|
for _, l := range layers {
|
||||||
tx.Rollback()
|
detectors := mapset.NewSet()
|
||||||
return err
|
for _, d := range l.By {
|
||||||
}
|
detectors.Add(d)
|
||||||
|
|
||||||
if err := tx.Commit(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
tx, err = datastore.Begin()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := tx.CacheAffectedNamespacedFeatures(features); err != nil {
|
|
||||||
tx.Rollback()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return tx.Commit()
|
|
||||||
}
|
|
||||||
|
|
||||||
// getProcessors retrieves common subset of the processors of each layer.
|
|
||||||
func getProcessors(layers []database.Layer) (database.Processors, error) {
|
|
||||||
if len(layers) == 0 {
|
|
||||||
return database.Processors{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
detectors := layers[0].ProcessedBy.Detectors
|
|
||||||
listers := layers[0].ProcessedBy.Listers
|
|
||||||
|
|
||||||
detectorsLen := len(detectors)
|
|
||||||
listersLen := len(listers)
|
|
||||||
|
|
||||||
for _, l := range layers[1:] {
|
|
||||||
detectors := strutil.CompareStringListsInBoth(detectors, l.ProcessedBy.Detectors)
|
|
||||||
listers := strutil.CompareStringListsInBoth(listers, l.ProcessedBy.Listers)
|
|
||||||
|
|
||||||
if len(detectors) != detectorsLen || len(listers) != listersLen {
|
|
||||||
// This error might be triggered because of multiple workers are
|
|
||||||
// processing the same instance with different processors.
|
|
||||||
// TODO(sidchen): Once the features can be associated with
|
|
||||||
// Detectors/Listers, we can support dynamically generating ancestry's
|
|
||||||
// detector/lister based on the layers.
|
|
||||||
return database.Processors{}, errors.New("processing layers with different Clair instances is currently unsupported")
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return database.Processors{
|
|
||||||
Detectors: detectors,
|
|
||||||
Listers: listers,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type introducedFeature struct {
|
commonDetectors = commonDetectors.Intersect(detectors)
|
||||||
feature database.NamespacedFeature
|
}
|
||||||
layerIndex int
|
|
||||||
|
return commonDetectors
|
||||||
}
|
}
|
||||||
|
|
||||||
// computeAncestryLayers computes ancestry's layers along with what features are
|
// computeAncestryLayers computes ancestry's layers along with what features are
|
||||||
// introduced.
|
// introduced.
|
||||||
func computeAncestryLayers(layers []database.Layer, commonProcessors database.Processors) ([]database.AncestryLayer, error) {
|
func computeAncestryLayers(layers []database.Layer) ([]database.AncestryLayer, []database.Detector, error) {
|
||||||
// TODO(sidchen): Once the features are linked to specific processor, we
|
if len(layers) == 0 {
|
||||||
// will use commonProcessors to filter out the features for this ancestry.
|
return nil, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
commonDetectors := getCommonDetectors(layers)
|
||||||
// version format -> namespace
|
// version format -> namespace
|
||||||
namespaces := map[string]database.Namespace{}
|
namespaces := map[string]database.LayerNamespace{}
|
||||||
// version format -> feature ID -> feature
|
// version format -> feature ID -> feature
|
||||||
features := map[string]map[string]introducedFeature{}
|
features := map[string]map[string]introducedFeature{}
|
||||||
ancestryLayers := []database.AncestryLayer{}
|
ancestryLayers := []database.AncestryLayer{}
|
||||||
for index, layer := range layers {
|
for index, layer := range layers {
|
||||||
// Initialize the ancestry Layer
|
initializedLayer := database.AncestryLayer{Hash: layer.Hash}
|
||||||
initializedLayer := database.AncestryLayer{LayerMetadata: layer.LayerMetadata, DetectedFeatures: []database.NamespacedFeature{}}
|
|
||||||
ancestryLayers = append(ancestryLayers, initializedLayer)
|
ancestryLayers = append(ancestryLayers, initializedLayer)
|
||||||
|
|
||||||
// Precondition: namespaces and features contain the result from union
|
// Precondition: namespaces and features contain the result from union
|
||||||
// of all parents.
|
// of all parents.
|
||||||
for _, ns := range layer.Namespaces {
|
for _, ns := range layer.Namespaces {
|
||||||
|
if !commonDetectors.Contains(ns.By) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
namespaces[ns.VersionFormat] = ns
|
namespaces[ns.VersionFormat] = ns
|
||||||
}
|
}
|
||||||
|
|
||||||
// version format -> feature ID -> feature
|
// version format -> feature ID -> feature
|
||||||
currentFeatures := map[string]map[string]introducedFeature{}
|
currentFeatures := map[string]map[string]introducedFeature{}
|
||||||
for _, f := range layer.Features {
|
for _, f := range layer.Features {
|
||||||
|
if !commonDetectors.Contains(f.By) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
if ns, ok := namespaces[f.VersionFormat]; ok {
|
if ns, ok := namespaces[f.VersionFormat]; ok {
|
||||||
var currentMap map[string]introducedFeature
|
var currentMap map[string]introducedFeature
|
||||||
if currentMap, ok = currentFeatures[f.VersionFormat]; !ok {
|
if currentMap, ok = currentFeatures[f.VersionFormat]; !ok {
|
||||||
@ -542,16 +351,20 @@ func computeAncestryLayers(layers []database.Layer, commonProcessors database.Pr
|
|||||||
|
|
||||||
if !inherited {
|
if !inherited {
|
||||||
currentMap[f.Name+":"+f.Version] = introducedFeature{
|
currentMap[f.Name+":"+f.Version] = introducedFeature{
|
||||||
feature: database.NamespacedFeature{
|
feature: database.AncestryFeature{
|
||||||
Feature: f,
|
NamespacedFeature: database.NamespacedFeature{
|
||||||
Namespace: ns,
|
Feature: f.Feature,
|
||||||
|
Namespace: ns.Namespace,
|
||||||
|
},
|
||||||
|
NamespaceBy: ns.By,
|
||||||
|
FeatureBy: f.By,
|
||||||
},
|
},
|
||||||
layerIndex: index,
|
layerIndex: index,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
return nil, errors.New("No corresponding version format")
|
return nil, nil, errors.New("No corresponding version format")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -569,57 +382,97 @@ func computeAncestryLayers(layers []database.Layer, commonProcessors database.Pr
|
|||||||
|
|
||||||
for _, featureMap := range features {
|
for _, featureMap := range features {
|
||||||
for _, feature := range featureMap {
|
for _, feature := range featureMap {
|
||||||
ancestryLayers[feature.layerIndex].DetectedFeatures = append(
|
ancestryLayers[feature.layerIndex].Features = append(
|
||||||
ancestryLayers[feature.layerIndex].DetectedFeatures,
|
ancestryLayers[feature.layerIndex].Features,
|
||||||
feature.feature,
|
feature.feature,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return ancestryLayers, nil
|
detectors := make([]database.Detector, 0, commonDetectors.Cardinality())
|
||||||
|
for d := range commonDetectors.Iter() {
|
||||||
|
detectors = append(detectors, d.(database.Detector))
|
||||||
|
}
|
||||||
|
|
||||||
|
return ancestryLayers, detectors, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getNotProcessedBy returns a processors, which contains the detectors and
|
func extractRequiredFiles(imageFormat string, req *processRequest) (tarutil.FilesMap, error) {
|
||||||
// listers not in `processedBy` but implemented in the current clair instance.
|
requiredFiles := append(featurefmt.RequiredFilenames(req.detectors), featurens.RequiredFilenames(req.detectors)...)
|
||||||
func getNotProcessedBy(processedBy database.Processors) database.Processors {
|
if len(requiredFiles) == 0 {
|
||||||
notProcessedLister := strutil.CompareStringLists(Processors.Listers, processedBy.Listers)
|
log.WithFields(log.Fields{
|
||||||
notProcessedDetector := strutil.CompareStringLists(Processors.Detectors, processedBy.Detectors)
|
"layer": req.Hash,
|
||||||
return database.Processors{
|
"detectors": req.detectors,
|
||||||
Listers: notProcessedLister,
|
}).Info("layer requires no file to extract")
|
||||||
Detectors: notProcessedDetector,
|
return make(tarutil.FilesMap), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
files, err := imagefmt.Extract(imageFormat, req.Path, req.Headers, requiredFiles)
|
||||||
|
if err != nil {
|
||||||
|
log.WithError(err).WithFields(log.Fields{
|
||||||
|
"layer": req.Hash,
|
||||||
|
"path": strutil.CleanURL(req.Path),
|
||||||
|
}).Error("failed to extract data from path")
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return files, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// detectContent downloads a layer and detects all features and namespaces.
|
// detectContent downloads a layer and detects all features and namespaces.
|
||||||
func detectContent(imageFormat, name, path string, headers map[string]string, toProcess database.Processors) (namespaces []database.Namespace, featureVersions []database.Feature, err error) {
|
func detectContent(imageFormat string, req *processRequest) (res *processResult) {
|
||||||
log.WithFields(log.Fields{"Hash": name}).Debug("Process Layer")
|
var (
|
||||||
totalRequiredFiles := append(featurefmt.RequiredFilenames(toProcess.Listers), featurens.RequiredFilenames(toProcess.Detectors)...)
|
files tarutil.FilesMap
|
||||||
files, err := imagefmt.Extract(imageFormat, path, headers, totalRequiredFiles)
|
layer = database.Layer{Hash: req.Hash, By: req.detectors}
|
||||||
if err != nil {
|
)
|
||||||
log.WithError(err).WithFields(log.Fields{
|
|
||||||
logLayerName: name,
|
res = &processResult{req.existingLayer, &layer, nil}
|
||||||
"path": cleanURL(path),
|
log.WithFields(log.Fields{
|
||||||
}).Error("failed to extract data from path")
|
"layer": req.Hash,
|
||||||
|
"detectors": req.detectors,
|
||||||
|
}).Info("detecting layer content...")
|
||||||
|
|
||||||
|
files, res.err = extractRequiredFiles(imageFormat, req)
|
||||||
|
if res.err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
namespaces, err = featurens.Detect(files, toProcess.Detectors)
|
if layer.Namespaces, res.err = featurens.Detect(files, req.detectors); res.err != nil {
|
||||||
if err != nil {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(featureVersions) > 0 {
|
if layer.Features, res.err = featurefmt.ListFeatures(files, req.detectors); res.err != nil {
|
||||||
log.WithFields(log.Fields{logLayerName: name, "count": len(namespaces)}).Debug("detected layer namespaces")
|
|
||||||
}
|
|
||||||
|
|
||||||
featureVersions, err = featurefmt.ListFeatures(files, toProcess.Listers)
|
|
||||||
if err != nil {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(featureVersions) > 0 {
|
log.WithFields(log.Fields{
|
||||||
log.WithFields(log.Fields{logLayerName: name, "count": len(featureVersions)}).Debug("detected layer features")
|
"layer": req.Hash,
|
||||||
}
|
"detectors": req.detectors,
|
||||||
|
"namespace count": len(layer.Namespaces),
|
||||||
|
"feature count": len(layer.Features),
|
||||||
|
}).Info("processed layer")
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// InitWorker initializes the worker.
|
||||||
|
func InitWorker(datastore database.Datastore) {
|
||||||
|
if len(EnabledDetectors) == 0 {
|
||||||
|
log.Warn("no enabled detector, and therefore, no ancestry will be processed.")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
tx, err := datastore.Begin()
|
||||||
|
if err != nil {
|
||||||
|
log.WithError(err).Fatal("cannot connect to database to initialize worker")
|
||||||
|
}
|
||||||
|
|
||||||
|
defer tx.Rollback()
|
||||||
|
if err := tx.PersistDetectors(EnabledDetectors); err != nil {
|
||||||
|
log.WithError(err).Fatal("cannot insert detectors to initialize worker")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := tx.Commit(); err != nil {
|
||||||
|
log.WithError(err).Fatal("cannot commit detector changes to initialize worker")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
443
worker_test.go
443
worker_test.go
@ -22,12 +22,12 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/coreos/clair/database"
|
"github.com/coreos/clair/database"
|
||||||
"github.com/coreos/clair/ext/featurefmt"
|
"github.com/coreos/clair/ext/featurefmt"
|
||||||
"github.com/coreos/clair/ext/featurens"
|
"github.com/coreos/clair/ext/featurens"
|
||||||
"github.com/coreos/clair/ext/versionfmt/dpkg"
|
"github.com/coreos/clair/ext/versionfmt/dpkg"
|
||||||
"github.com/coreos/clair/pkg/strutil"
|
|
||||||
|
|
||||||
// Register the required detectors.
|
// Register the required detectors.
|
||||||
_ "github.com/coreos/clair/ext/featurefmt/dpkg"
|
_ "github.com/coreos/clair/ext/featurefmt/dpkg"
|
||||||
@ -58,55 +58,27 @@ type mockSession struct {
|
|||||||
func copyDatastore(md *mockDatastore) mockDatastore {
|
func copyDatastore(md *mockDatastore) mockDatastore {
|
||||||
layers := map[string]database.Layer{}
|
layers := map[string]database.Layer{}
|
||||||
for k, l := range md.layers {
|
for k, l := range md.layers {
|
||||||
features := append([]database.Feature(nil), l.Features...)
|
|
||||||
namespaces := append([]database.Namespace(nil), l.Namespaces...)
|
|
||||||
listers := append([]string(nil), l.ProcessedBy.Listers...)
|
|
||||||
detectors := append([]string(nil), l.ProcessedBy.Detectors...)
|
|
||||||
layers[k] = database.Layer{
|
layers[k] = database.Layer{
|
||||||
LayerMetadata: database.LayerMetadata{
|
Hash: l.Hash,
|
||||||
Hash: l.Hash,
|
By: append([]database.Detector{}, l.By...),
|
||||||
ProcessedBy: database.Processors{
|
Features: append([]database.LayerFeature{}, l.Features...),
|
||||||
Listers: listers,
|
Namespaces: append([]database.LayerNamespace{}, l.Namespaces...),
|
||||||
Detectors: detectors,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Features: features,
|
|
||||||
Namespaces: namespaces,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ancestry := map[string]database.Ancestry{}
|
ancestry := map[string]database.Ancestry{}
|
||||||
for k, a := range md.ancestry {
|
for k, a := range md.ancestry {
|
||||||
ancestryLayers := []database.AncestryLayer{}
|
ancestryLayers := []database.AncestryLayer{}
|
||||||
layers := []database.LayerMetadata{}
|
|
||||||
|
|
||||||
for _, layer := range a.Layers {
|
for _, layer := range a.Layers {
|
||||||
layers = append(layers, database.LayerMetadata{
|
|
||||||
Hash: layer.Hash,
|
|
||||||
ProcessedBy: database.Processors{
|
|
||||||
Detectors: append([]string(nil), layer.LayerMetadata.ProcessedBy.Detectors...),
|
|
||||||
Listers: append([]string(nil), layer.LayerMetadata.ProcessedBy.Listers...),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
ancestryLayers = append(ancestryLayers, database.AncestryLayer{
|
ancestryLayers = append(ancestryLayers, database.AncestryLayer{
|
||||||
LayerMetadata: database.LayerMetadata{
|
Hash: layer.Hash,
|
||||||
Hash: layer.Hash,
|
Features: append([]database.AncestryFeature{}, layer.Features...),
|
||||||
ProcessedBy: database.Processors{
|
|
||||||
Detectors: append([]string(nil), layer.LayerMetadata.ProcessedBy.Detectors...),
|
|
||||||
Listers: append([]string(nil), layer.LayerMetadata.ProcessedBy.Listers...),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
DetectedFeatures: append([]database.NamespacedFeature(nil), layer.DetectedFeatures...),
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
ancestry[k] = database.Ancestry{
|
ancestry[k] = database.Ancestry{
|
||||||
Name: a.Name,
|
Name: a.Name,
|
||||||
ProcessedBy: database.Processors{
|
By: append([]database.Detector{}, a.By...),
|
||||||
Detectors: append([]string(nil), a.ProcessedBy.Detectors...),
|
|
||||||
Listers: append([]string(nil), a.ProcessedBy.Listers...),
|
|
||||||
},
|
|
||||||
Layers: ancestryLayers,
|
Layers: ancestryLayers,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -125,6 +97,7 @@ func copyDatastore(md *mockDatastore) mockDatastore {
|
|||||||
for k, f := range md.namespacedFeatures {
|
for k, f := range md.namespacedFeatures {
|
||||||
namespacedFeatures[k] = f
|
namespacedFeatures[k] = f
|
||||||
}
|
}
|
||||||
|
|
||||||
return mockDatastore{
|
return mockDatastore{
|
||||||
layers: layers,
|
layers: layers,
|
||||||
ancestry: ancestry,
|
ancestry: ancestry,
|
||||||
@ -194,10 +167,7 @@ func newMockDatastore() *mockDatastore {
|
|||||||
return errSessionDone
|
return errSessionDone
|
||||||
}
|
}
|
||||||
for _, n := range ns {
|
for _, n := range ns {
|
||||||
_, ok := session.copy.namespaces[n.Name]
|
session.copy.namespaces[NamespaceKey(&n)] = n
|
||||||
if !ok {
|
|
||||||
session.copy.namespaces[n.Name] = n
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -207,63 +177,36 @@ func newMockDatastore() *mockDatastore {
|
|||||||
return errSessionDone
|
return errSessionDone
|
||||||
}
|
}
|
||||||
for _, f := range fs {
|
for _, f := range fs {
|
||||||
key := FeatureKey(&f)
|
session.copy.features[FeatureKey(&f)] = f
|
||||||
_, ok := session.copy.features[key]
|
|
||||||
if !ok {
|
|
||||||
session.copy.features[key] = f
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
session.FctPersistLayer = func(hash string, namespaces []database.Namespace, features []database.Feature, processedBy database.Processors) error {
|
session.FctPersistLayer = func(hash string, features []database.LayerFeature, namespaces []database.LayerNamespace, by []database.Detector) error {
|
||||||
if session.terminated {
|
if session.terminated {
|
||||||
return errSessionDone
|
return errSessionDone
|
||||||
}
|
}
|
||||||
|
|
||||||
// update the layer
|
|
||||||
_, ok := session.copy.layers[hash]
|
|
||||||
if !ok {
|
|
||||||
session.copy.layers[hash] = database.Layer{}
|
|
||||||
}
|
|
||||||
|
|
||||||
layer, ok := session.copy.layers[hash]
|
|
||||||
if !ok {
|
|
||||||
return errors.New("Failed to insert layer")
|
|
||||||
}
|
|
||||||
|
|
||||||
layerFeatures := map[string]database.Feature{}
|
|
||||||
layerNamespaces := map[string]database.Namespace{}
|
|
||||||
for _, f := range layer.Features {
|
|
||||||
layerFeatures[FeatureKey(&f)] = f
|
|
||||||
}
|
|
||||||
for _, n := range layer.Namespaces {
|
|
||||||
layerNamespaces[n.Name] = n
|
|
||||||
}
|
|
||||||
|
|
||||||
// ensure that all the namespaces, features are in the database
|
|
||||||
for _, ns := range namespaces {
|
for _, ns := range namespaces {
|
||||||
if _, ok := session.copy.namespaces[ns.Name]; !ok {
|
if _, ok := session.copy.namespaces[NamespaceKey(&ns.Namespace)]; !ok {
|
||||||
return errors.New("Namespaces should be in the database")
|
panic("")
|
||||||
}
|
|
||||||
if _, ok := layerNamespaces[ns.Name]; !ok {
|
|
||||||
layer.Namespaces = append(layer.Namespaces, ns)
|
|
||||||
layerNamespaces[ns.Name] = ns
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range features {
|
for _, f := range features {
|
||||||
if _, ok := session.copy.features[FeatureKey(&f)]; !ok {
|
if _, ok := session.copy.features[FeatureKey(&f.Feature)]; !ok {
|
||||||
return errors.New("Namespaces should be in the database")
|
panic("")
|
||||||
}
|
|
||||||
if _, ok := layerFeatures[FeatureKey(&f)]; !ok {
|
|
||||||
layer.Features = append(layer.Features, f)
|
|
||||||
layerFeatures[FeatureKey(&f)] = f
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
layer.ProcessedBy.Detectors = append(layer.ProcessedBy.Detectors, strutil.CompareStringLists(processedBy.Detectors, layer.ProcessedBy.Detectors)...)
|
layer, _ := session.copy.layers[hash]
|
||||||
layer.ProcessedBy.Listers = append(layer.ProcessedBy.Listers, strutil.CompareStringLists(processedBy.Listers, layer.ProcessedBy.Listers)...)
|
database.MergeLayers(&layer, &database.Layer{
|
||||||
|
Hash: hash,
|
||||||
|
By: by,
|
||||||
|
Namespaces: namespaces,
|
||||||
|
Features: features,
|
||||||
|
})
|
||||||
|
|
||||||
session.copy.layers[hash] = layer
|
session.copy.layers[hash] = layer
|
||||||
return nil
|
return nil
|
||||||
@ -274,11 +217,12 @@ func newMockDatastore() *mockDatastore {
|
|||||||
return errSessionDone
|
return errSessionDone
|
||||||
}
|
}
|
||||||
|
|
||||||
features := getNamespacedFeatures(ancestry.Layers)
|
// ensure the namespaces features are in the code base
|
||||||
// ensure features are in the database
|
for _, l := range ancestry.Layers {
|
||||||
for _, f := range features {
|
for _, f := range l.GetFeatures() {
|
||||||
if _, ok := session.copy.namespacedFeatures[NamespacedFeatureKey(&f)]; !ok {
|
if _, ok := session.copy.namespacedFeatures[NamespacedFeatureKey(&f)]; !ok {
|
||||||
return errors.New("namespaced feature not in db")
|
panic("")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -288,6 +232,14 @@ func newMockDatastore() *mockDatastore {
|
|||||||
|
|
||||||
session.FctPersistNamespacedFeatures = func(namespacedFeatures []database.NamespacedFeature) error {
|
session.FctPersistNamespacedFeatures = func(namespacedFeatures []database.NamespacedFeature) error {
|
||||||
for i, f := range namespacedFeatures {
|
for i, f := range namespacedFeatures {
|
||||||
|
if _, ok := session.copy.features[FeatureKey(&f.Feature)]; !ok {
|
||||||
|
panic("")
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := session.copy.namespaces[NamespaceKey(&f.Namespace)]; !ok {
|
||||||
|
panic("")
|
||||||
|
}
|
||||||
|
|
||||||
session.copy.namespacedFeatures[NamespacedFeatureKey(&f)] = namespacedFeatures[i]
|
session.copy.namespacedFeatures[NamespacedFeatureKey(&f)] = namespacedFeatures[i]
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -304,10 +256,7 @@ func newMockDatastore() *mockDatastore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMain(m *testing.M) {
|
func TestMain(m *testing.M) {
|
||||||
Processors = database.Processors{
|
EnabledDetectors = append(featurefmt.ListListers(), featurens.ListDetectors()...)
|
||||||
Listers: featurefmt.ListListers(),
|
|
||||||
Detectors: featurens.ListDetectors(),
|
|
||||||
}
|
|
||||||
m.Run()
|
m.Run()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -315,11 +264,16 @@ func FeatureKey(f *database.Feature) string {
|
|||||||
return strings.Join([]string{f.Name, f.VersionFormat, f.Version}, "__")
|
return strings.Join([]string{f.Name, f.VersionFormat, f.Version}, "__")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NamespaceKey(ns *database.Namespace) string {
|
||||||
|
return strings.Join([]string{ns.Name, ns.VersionFormat}, "__")
|
||||||
|
}
|
||||||
|
|
||||||
func NamespacedFeatureKey(f *database.NamespacedFeature) string {
|
func NamespacedFeatureKey(f *database.NamespacedFeature) string {
|
||||||
return strings.Join([]string{f.Name, f.Namespace.Name}, "__")
|
return strings.Join([]string{f.Name, f.Namespace.Name}, "__")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestProcessAncestryWithDistUpgrade(t *testing.T) {
|
func TestProcessAncestryWithDistUpgrade(t *testing.T) {
|
||||||
|
// TODO(sidac): Change to use table driven tests.
|
||||||
// Create the list of Features that should not been upgraded from one layer to another.
|
// Create the list of Features that should not been upgraded from one layer to another.
|
||||||
nonUpgradedFeatures := []database.Feature{
|
nonUpgradedFeatures := []database.Feature{
|
||||||
{Name: "libtext-wrapi18n-perl", Version: "0.06-7"},
|
{Name: "libtext-wrapi18n-perl", Version: "0.06-7"},
|
||||||
@ -358,7 +312,12 @@ func TestProcessAncestryWithDistUpgrade(t *testing.T) {
|
|||||||
assert.Nil(t, ProcessAncestry(datastore, "Docker", "Mock", layers))
|
assert.Nil(t, ProcessAncestry(datastore, "Docker", "Mock", layers))
|
||||||
|
|
||||||
// check the ancestry features
|
// check the ancestry features
|
||||||
features := getNamespacedFeatures(datastore.ancestry["Mock"].Layers)
|
features := []database.AncestryFeature{}
|
||||||
|
for i, l := range datastore.ancestry["Mock"].Layers {
|
||||||
|
assert.Equal(t, layers[i].Hash, l.Hash)
|
||||||
|
features = append(features, l.Features...)
|
||||||
|
}
|
||||||
|
|
||||||
assert.Len(t, features, 74)
|
assert.Len(t, features, 74)
|
||||||
for _, f := range features {
|
for _, f := range features {
|
||||||
if _, ok := nonUpgradedMap[f.Feature]; ok {
|
if _, ok := nonUpgradedMap[f.Feature]; ok {
|
||||||
@ -367,12 +326,6 @@ func TestProcessAncestryWithDistUpgrade(t *testing.T) {
|
|||||||
assert.Equal(t, "debian:8", f.Namespace.Name)
|
assert.Equal(t, "debian:8", f.Namespace.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.Equal(t, []database.LayerMetadata{
|
|
||||||
{Hash: "blank"},
|
|
||||||
{Hash: "wheezy"},
|
|
||||||
{Hash: "jessie"},
|
|
||||||
}, datastore.ancestry["Mock"].Layers)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestProcessLayers(t *testing.T) {
|
func TestProcessLayers(t *testing.T) {
|
||||||
@ -404,8 +357,7 @@ func TestProcessLayers(t *testing.T) {
|
|||||||
|
|
||||||
// Ensure each layer has expected namespaces and features detected
|
// Ensure each layer has expected namespaces and features detected
|
||||||
if blank, ok := datastore.layers["blank"]; ok {
|
if blank, ok := datastore.layers["blank"]; ok {
|
||||||
assert.Equal(t, blank.ProcessedBy.Detectors, Processors.Detectors)
|
database.AssertDetectorsEqual(t, EnabledDetectors, blank.By)
|
||||||
assert.Equal(t, blank.ProcessedBy.Listers, Processors.Listers)
|
|
||||||
assert.Len(t, blank.Namespaces, 0)
|
assert.Len(t, blank.Namespaces, 0)
|
||||||
assert.Len(t, blank.Features, 0)
|
assert.Len(t, blank.Features, 0)
|
||||||
} else {
|
} else {
|
||||||
@ -414,9 +366,11 @@ func TestProcessLayers(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if wheezy, ok := datastore.layers["wheezy"]; ok {
|
if wheezy, ok := datastore.layers["wheezy"]; ok {
|
||||||
assert.Equal(t, wheezy.ProcessedBy.Detectors, Processors.Detectors)
|
database.AssertDetectorsEqual(t, EnabledDetectors, wheezy.By)
|
||||||
assert.Equal(t, wheezy.ProcessedBy.Listers, Processors.Listers)
|
assert.Equal(t, []database.LayerNamespace{
|
||||||
assert.Equal(t, wheezy.Namespaces, []database.Namespace{{Name: "debian:7", VersionFormat: dpkg.ParserName}})
|
{database.Namespace{"debian:7", dpkg.ParserName}, database.NewNamespaceDetector("os-release", "1.0")},
|
||||||
|
}, wheezy.Namespaces)
|
||||||
|
|
||||||
assert.Len(t, wheezy.Features, 52)
|
assert.Len(t, wheezy.Features, 52)
|
||||||
} else {
|
} else {
|
||||||
assert.Fail(t, "wheezy is not stored")
|
assert.Fail(t, "wheezy is not stored")
|
||||||
@ -424,9 +378,10 @@ func TestProcessLayers(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if jessie, ok := datastore.layers["jessie"]; ok {
|
if jessie, ok := datastore.layers["jessie"]; ok {
|
||||||
assert.Equal(t, jessie.ProcessedBy.Detectors, Processors.Detectors)
|
database.AssertDetectorsEqual(t, EnabledDetectors, jessie.By)
|
||||||
assert.Equal(t, jessie.ProcessedBy.Listers, Processors.Listers)
|
assert.Equal(t, []database.LayerNamespace{
|
||||||
assert.Equal(t, jessie.Namespaces, []database.Namespace{{Name: "debian:8", VersionFormat: dpkg.ParserName}})
|
{database.Namespace{"debian:8", dpkg.ParserName}, database.NewNamespaceDetector("os-release", "1.0")},
|
||||||
|
}, jessie.Namespaces)
|
||||||
assert.Len(t, jessie.Features, 74)
|
assert.Len(t, jessie.Features, 74)
|
||||||
} else {
|
} else {
|
||||||
assert.Fail(t, "jessie is not stored")
|
assert.Fail(t, "jessie is not stored")
|
||||||
@ -434,157 +389,124 @@ func TestProcessLayers(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestUpgradeClair checks if a clair is upgraded and certain ancestry's
|
func getFeatures(a database.Ancestry) []database.AncestryFeature {
|
||||||
// features should not change. We assume that Clair should only upgrade
|
features := []database.AncestryFeature{}
|
||||||
func TestClairUpgrade(t *testing.T) {
|
for _, l := range a.Layers {
|
||||||
_, f, _, _ := runtime.Caller(0)
|
features = append(features, l.Features...)
|
||||||
testDataPath := filepath.Join(filepath.Dir(f)) + "/testdata/DistUpgrade/"
|
|
||||||
|
|
||||||
datastore := newMockDatastore()
|
|
||||||
|
|
||||||
// suppose there are two ancestries.
|
|
||||||
layers := []LayerRequest{
|
|
||||||
{Hash: "blank", Path: testDataPath + "blank.tar.gz"},
|
|
||||||
{Hash: "wheezy", Path: testDataPath + "wheezy.tar.gz"},
|
|
||||||
{Hash: "jessie", Path: testDataPath + "jessie.tar.gz"},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
layers2 := []LayerRequest{
|
return features
|
||||||
{Hash: "blank", Path: testDataPath + "blank.tar.gz"},
|
|
||||||
{Hash: "wheezy", Path: testDataPath + "wheezy.tar.gz"},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Suppose user scan an ancestry with an old instance of Clair.
|
|
||||||
Processors = database.Processors{
|
|
||||||
Detectors: []string{"os-release"},
|
|
||||||
Listers: []string{"rpm"},
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.Nil(t, ProcessAncestry(datastore, "Docker", "Mock", layers))
|
|
||||||
assert.Len(t, getNamespacedFeatures(datastore.ancestry["Mock"].Layers), 0)
|
|
||||||
|
|
||||||
assert.Nil(t, ProcessAncestry(datastore, "Docker", "Mock2", layers2))
|
|
||||||
assert.Len(t, getNamespacedFeatures(datastore.ancestry["Mock2"].Layers), 0)
|
|
||||||
|
|
||||||
// Clair is upgraded to use a new namespace detector. The expected
|
|
||||||
// behavior is that all layers will be rescanned with "apt-sources" and
|
|
||||||
// the ancestry's features are recalculated.
|
|
||||||
Processors = database.Processors{
|
|
||||||
Detectors: []string{"os-release", "apt-sources"},
|
|
||||||
Listers: []string{"rpm"},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Even though Clair processors are upgraded, the ancestry's features should
|
|
||||||
// not be upgraded without posting the ancestry to Clair again.
|
|
||||||
assert.Nil(t, ProcessAncestry(datastore, "Docker", "Mock", layers))
|
|
||||||
assert.Len(t, getNamespacedFeatures(datastore.ancestry["Mock"].Layers), 0)
|
|
||||||
|
|
||||||
// Clair is upgraded to use a new feature lister. The expected behavior is
|
|
||||||
// that all layers will be rescanned with "dpkg" and the ancestry's features
|
|
||||||
// are invalidated and recalculated.
|
|
||||||
Processors = database.Processors{
|
|
||||||
Detectors: []string{"os-release", "apt-sources"},
|
|
||||||
Listers: []string{"rpm", "dpkg"},
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.Nil(t, ProcessAncestry(datastore, "Docker", "Mock", layers))
|
|
||||||
assert.Len(t, getNamespacedFeatures(datastore.ancestry["Mock"].Layers), 74)
|
|
||||||
assert.Nil(t, ProcessAncestry(datastore, "Docker", "Mock2", layers2))
|
|
||||||
assert.Len(t, getNamespacedFeatures(datastore.ancestry["Mock2"].Layers), 52)
|
|
||||||
|
|
||||||
// check the namespaces are correct
|
|
||||||
for _, f := range getNamespacedFeatures(datastore.ancestry["Mock"].Layers) {
|
|
||||||
if !assert.NotEqual(t, database.Namespace{}, f.Namespace) {
|
|
||||||
assert.Fail(t, "Every feature should have a namespace attached")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, f := range getNamespacedFeatures(datastore.ancestry["Mock2"].Layers) {
|
|
||||||
if !assert.NotEqual(t, database.Namespace{}, f.Namespace) {
|
|
||||||
assert.Fail(t, "Every feature should have a namespace attached")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestMultipleNamespaces tests computing ancestry features
|
|
||||||
func TestComputeAncestryFeatures(t *testing.T) {
|
func TestComputeAncestryFeatures(t *testing.T) {
|
||||||
vf1 := "format 1"
|
vf1 := "format 1"
|
||||||
vf2 := "format 2"
|
vf2 := "format 2"
|
||||||
|
|
||||||
ns1a := database.Namespace{
|
nd1 := database.NewNamespaceDetector("apk", "1.0")
|
||||||
Name: "namespace 1:a",
|
fd1 := database.NewFeatureDetector("fd1", "1.0")
|
||||||
VersionFormat: vf1,
|
// this detector only scans one layer with one extra feature, this one
|
||||||
|
// should be omitted.
|
||||||
|
fd2 := database.NewFeatureDetector("fd2", "1.0")
|
||||||
|
|
||||||
|
ns1a := database.LayerNamespace{
|
||||||
|
database.Namespace{
|
||||||
|
Name: "namespace 1:a",
|
||||||
|
VersionFormat: vf1,
|
||||||
|
}, nd1,
|
||||||
}
|
}
|
||||||
|
|
||||||
ns1b := database.Namespace{
|
ns1b := database.LayerNamespace{
|
||||||
Name: "namespace 1:b",
|
database.Namespace{
|
||||||
VersionFormat: vf1,
|
Name: "namespace 1:b",
|
||||||
}
|
VersionFormat: vf1,
|
||||||
|
}, nd1}
|
||||||
|
|
||||||
ns2a := database.Namespace{
|
ns2a := database.LayerNamespace{
|
||||||
Name: "namespace 2:a",
|
database.Namespace{
|
||||||
VersionFormat: vf2,
|
Name: "namespace 2:a",
|
||||||
}
|
VersionFormat: vf2,
|
||||||
|
}, nd1}
|
||||||
|
|
||||||
ns2b := database.Namespace{
|
ns2b := database.LayerNamespace{
|
||||||
Name: "namespace 2:b",
|
database.Namespace{
|
||||||
VersionFormat: vf2,
|
Name: "namespace 2:b",
|
||||||
}
|
VersionFormat: vf2,
|
||||||
|
}, nd1}
|
||||||
|
|
||||||
f1 := database.Feature{
|
f1 := database.LayerFeature{
|
||||||
Name: "feature 1",
|
database.Feature{
|
||||||
Version: "0.1",
|
Name: "feature 1",
|
||||||
VersionFormat: vf1,
|
Version: "0.1",
|
||||||
}
|
VersionFormat: vf1,
|
||||||
|
}, fd1}
|
||||||
|
|
||||||
f2 := database.Feature{
|
f2 := database.LayerFeature{database.Feature{
|
||||||
Name: "feature 2",
|
Name: "feature 2",
|
||||||
Version: "0.2",
|
Version: "0.2",
|
||||||
VersionFormat: vf1,
|
VersionFormat: vf1,
|
||||||
}
|
}, fd2}
|
||||||
|
|
||||||
f3 := database.Feature{
|
f3 := database.LayerFeature{
|
||||||
Name: "feature 1",
|
database.Feature{
|
||||||
Version: "0.3",
|
Name: "feature 1",
|
||||||
VersionFormat: vf2,
|
Version: "0.3",
|
||||||
}
|
VersionFormat: vf2,
|
||||||
|
}, fd1}
|
||||||
|
|
||||||
f4 := database.Feature{
|
f4 := database.LayerFeature{
|
||||||
Name: "feature 2",
|
database.Feature{
|
||||||
Version: "0.3",
|
Name: "feature 2",
|
||||||
VersionFormat: vf2,
|
Version: "0.3",
|
||||||
|
VersionFormat: vf2,
|
||||||
|
}, fd1}
|
||||||
|
|
||||||
|
f5 := database.LayerFeature{
|
||||||
|
database.Feature{
|
||||||
|
Name: "feature 3",
|
||||||
|
Version: "0.3",
|
||||||
|
VersionFormat: vf2,
|
||||||
|
},
|
||||||
|
fd2,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Suppose Clair is watching two files for namespaces one containing ns1
|
// Suppose Clair is watching two files for namespaces one containing ns1
|
||||||
// changes e.g. os-release and the other one containing ns2 changes e.g.
|
// changes e.g. os-release and the other one containing ns2 changes e.g.
|
||||||
// node.
|
// node.
|
||||||
blank := database.Layer{LayerMetadata: database.LayerMetadata{Hash: "blank"}}
|
blank := database.Layer{
|
||||||
|
Hash: "blank",
|
||||||
|
By: []database.Detector{nd1, fd1, fd1},
|
||||||
|
}
|
||||||
initNS1a := database.Layer{
|
initNS1a := database.Layer{
|
||||||
LayerMetadata: database.LayerMetadata{Hash: "init ns1a"},
|
Hash: "initNS1a",
|
||||||
Namespaces: []database.Namespace{ns1a},
|
By: []database.Detector{nd1, fd1, fd1},
|
||||||
Features: []database.Feature{f1, f2},
|
Namespaces: []database.LayerNamespace{ns1a},
|
||||||
|
Features: []database.LayerFeature{f1, f2},
|
||||||
}
|
}
|
||||||
|
|
||||||
upgradeNS2b := database.Layer{
|
upgradeNS2b := database.Layer{
|
||||||
LayerMetadata: database.LayerMetadata{Hash: "upgrade ns2b"},
|
Hash: "upgradeNS2b",
|
||||||
Namespaces: []database.Namespace{ns2b},
|
By: []database.Detector{nd1, fd1, fd1},
|
||||||
|
Namespaces: []database.LayerNamespace{ns2b},
|
||||||
}
|
}
|
||||||
|
|
||||||
upgradeNS1b := database.Layer{
|
upgradeNS1b := database.Layer{
|
||||||
LayerMetadata: database.LayerMetadata{Hash: "upgrade ns1b"},
|
Hash: "upgradeNS1b",
|
||||||
Namespaces: []database.Namespace{ns1b},
|
By: []database.Detector{nd1, fd1, fd1, fd2},
|
||||||
Features: []database.Feature{f1, f2},
|
Namespaces: []database.LayerNamespace{ns1b},
|
||||||
|
Features: []database.LayerFeature{f1, f2, f5},
|
||||||
}
|
}
|
||||||
|
|
||||||
initNS2a := database.Layer{
|
initNS2a := database.Layer{
|
||||||
LayerMetadata: database.LayerMetadata{Hash: "init ns2a"},
|
Hash: "initNS2a",
|
||||||
Namespaces: []database.Namespace{ns2a},
|
By: []database.Detector{nd1, fd1, fd1},
|
||||||
Features: []database.Feature{f3, f4},
|
Namespaces: []database.LayerNamespace{ns2a},
|
||||||
|
Features: []database.LayerFeature{f3, f4},
|
||||||
}
|
}
|
||||||
|
|
||||||
removeF2 := database.Layer{
|
removeF2 := database.Layer{
|
||||||
LayerMetadata: database.LayerMetadata{Hash: "remove f2"},
|
Hash: "removeF2",
|
||||||
Features: []database.Feature{f1},
|
By: []database.Detector{nd1, fd1, fd1},
|
||||||
|
Features: []database.LayerFeature{f1},
|
||||||
}
|
}
|
||||||
|
|
||||||
// blank -> ns1:a, f1 f2 (init)
|
// blank -> ns1:a, f1 f2 (init)
|
||||||
@ -597,44 +519,65 @@ func TestComputeAncestryFeatures(t *testing.T) {
|
|||||||
// -> blank (empty)
|
// -> blank (empty)
|
||||||
|
|
||||||
layers := []database.Layer{
|
layers := []database.Layer{
|
||||||
blank,
|
blank, // empty
|
||||||
initNS1a,
|
initNS1a, // namespace: NS1a, features: f1, f2
|
||||||
removeF2,
|
removeF2, // namespace: , features: f1
|
||||||
initNS2a,
|
initNS2a, // namespace: NS2a, features: f3, f4 ( under NS2a )
|
||||||
upgradeNS2b,
|
upgradeNS2b, // namespace: NS2b, ( f3, f4 are now under NS2b )
|
||||||
blank,
|
blank, // empty
|
||||||
upgradeNS1b,
|
upgradeNS1b, // namespace: NS1b, ( f1, f2 are now under NS1b, and they are introduced in this layer. )
|
||||||
removeF2,
|
removeF2, // namespace: , features: f1
|
||||||
blank,
|
blank,
|
||||||
}
|
}
|
||||||
|
|
||||||
expected := map[database.NamespacedFeature]bool{
|
expected := []database.AncestryLayer{
|
||||||
{
|
{
|
||||||
Feature: f1,
|
"blank",
|
||||||
Namespace: ns1a,
|
[]database.AncestryFeature{},
|
||||||
}: false,
|
},
|
||||||
{
|
{
|
||||||
Feature: f3,
|
"initNS1a",
|
||||||
Namespace: ns2a,
|
[]database.AncestryFeature{{database.NamespacedFeature{f1.Feature, ns1a.Namespace}, f1.By, ns1a.By}},
|
||||||
}: false,
|
},
|
||||||
{
|
{
|
||||||
Feature: f4,
|
"removeF2",
|
||||||
Namespace: ns2a,
|
[]database.AncestryFeature{},
|
||||||
}: false,
|
},
|
||||||
|
{
|
||||||
|
"initNS2a",
|
||||||
|
[]database.AncestryFeature{
|
||||||
|
{database.NamespacedFeature{f3.Feature, ns2a.Namespace}, f3.By, ns2a.By},
|
||||||
|
{database.NamespacedFeature{f4.Feature, ns2a.Namespace}, f4.By, ns2a.By},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"upgradeNS2b",
|
||||||
|
[]database.AncestryFeature{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"blank",
|
||||||
|
[]database.AncestryFeature{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"upgradeNS1b",
|
||||||
|
[]database.AncestryFeature{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"removeF2",
|
||||||
|
[]database.AncestryFeature{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"blank",
|
||||||
|
[]database.AncestryFeature{},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
ancestryLayers, err := computeAncestryLayers(layers, database.Processors{})
|
expectedDetectors := []database.Detector{nd1, fd1}
|
||||||
assert.Nil(t, err)
|
ancestryLayers, detectors, err := computeAncestryLayers(layers)
|
||||||
features := getNamespacedFeatures(ancestryLayers)
|
require.Nil(t, err)
|
||||||
for _, f := range features {
|
|
||||||
if assert.Contains(t, expected, f) {
|
|
||||||
if assert.False(t, expected[f]) {
|
|
||||||
expected[f] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for f, visited := range expected {
|
database.AssertDetectorsEqual(t, expectedDetectors, detectors)
|
||||||
assert.True(t, visited, "expected feature is missing : "+f.Namespace.Name+":"+f.Name)
|
for i := range expected {
|
||||||
|
database.AssertAncestryLayerEqual(t, &expected[i], &ancestryLayers[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user