Compare commits
275 Commits
experiment
...
master
Author | SHA1 | Date |
---|---|---|
Andy | 0085d3e042 | 5 years ago |
Andy | 0e5c4b6445 | 5 years ago |
Jimmy Zelinskie | d0bd4c7ab8 | 5 years ago |
Dominik Csapak | e08fe792ef | 5 years ago |
Jimmy Zelinskie | 5fef44dd04 | 5 years ago |
Eric Sim | 32cd4f1ec3 | 5 years ago |
Eric Sim | 6617f560cc | 5 years ago |
Eric Sim | adde75975f | 5 years ago |
Eric Sim | 684ae2be1d | 5 years ago |
Eric Sim | 8e98ee878a | 5 years ago |
Eric Sim | 803cf4a29e | 5 years ago |
Eric Sim | 8fb9097dbd | 5 years ago |
Jimmy Zelinskie | d79827690c | 5 years ago |
Ales Raszka | 4e49aaf346 | 5 years ago |
Jimmy Zelinskie | 3316e7e1ef | 5 years ago |
Jimmy Zelinskie | e8bd0c4f78 | 5 years ago |
Jimmy Zelinskie | 4af5afe305 | 5 years ago |
Chris Northwood | a3a37072b5 | 5 years ago |
Chris Northwood | afd7fe2554 | 5 years ago |
Sida Chen | 1234a8d2f0 | 5 years ago |
Sida Chen | 098cb2ef2c | 5 years ago |
Kate Hill | 710c65530f | 5 years ago |
Sida Chen | 88f506918b | 5 years ago |
Ales Raszka | f2ce8325b9 | 5 years ago |
Li Joe | 3f13184ad6 | 5 years ago |
Ales Raszka | f326b6f664 | 5 years ago |
Sida Chen | 2c7838eac7 | 5 years ago |
Sida Chen | 2d1ac2c4d5 | 5 years ago |
Sida Chen | 0731df972c | 5 years ago |
Sida Chen | dfa07f6d86 | 5 years ago |
Sida Chen | 921acb26fe | 5 years ago |
Sida Chen | 7cc83ccbc5 | 5 years ago |
Sida Chen | 497b79a293 | 5 years ago |
Sida Chen | ea418cffd4 | 5 years ago |
Sida Chen | 176c69e59d | 5 years ago |
Sida Chen | 98e81ff5f1 | 5 years ago |
Sida Chen | ba50d7c626 | 5 years ago |
Sida Chen | 0b32b36cf7 | 5 years ago |
Sida Chen | c50a2339b7 | 5 years ago |
Sida Chen | 43f3ea87d8 | 5 years ago |
Sida Chen | a33050637b | 5 years ago |
Sida Chen | 8bebea3643 | 5 years ago |
Sida Chen | 3fafb73c4f | 5 years ago |
Jimmy Zelinskie | a2d6508730 | 5 years ago |
Jimmy Zelinskie | c4a32543e8 | 5 years ago |
Sida Chen | a689f1f1dc | 5 years ago |
Ales Raszka | d77dc0f0ae | 5 years ago |
Ales Raszka | a8a91379d9 | 5 years ago |
Ales Raszka | 60b0bd27fa | 5 years ago |
Ales Raszka | 60ef726677 | 5 years ago |
Ales Raszka | 44c4a6f3ce | 5 years ago |
Ales Raszka | 34c2d96b36 | 5 years ago |
Sida Chen | b3fe95e152 | 5 years ago |
Sida Chen | 1b9ed99646 | 5 years ago |
Sida Chen | 1f0bc1ea5f | 5 years ago |
Sida Chen | 58f014c297 | 5 years ago |
Sida Chen | b03f1bc3a6 | 5 years ago |
Sida Chen | ed9c6baf4f | 5 years ago |
Sida Chen | f6759b2a15 | 5 years ago |
Sida Chen | 046b0e49d1 | 5 years ago |
Sida Chen | 07a97e30c6 | 5 years ago |
Jimmy Zelinskie | 4fa03d1c78 | 5 years ago |
Jimmy Zelinskie | 961c7d4680 | 5 years ago |
Jimmy Zelinskie | a4e7873d14 | 5 years ago |
Sida Chen | fb209d32a0 | 5 years ago |
Sida Chen | c6497dda0a | 5 years ago |
Sida Chen | f7e54c1a28 | 5 years ago |
Sida Chen | 76081864c9 | 5 years ago |
Sida Chen | 5bf8365f7b | 5 years ago |
Sida Chen | 465687fa94 | 5 years ago |
Sida Chen | 8aae73f1c8 | 5 years ago |
Sida Chen | 5b2376498b | 5 years ago |
Sida Chen | 891ce1697d | 5 years ago |
Sida Chen | dd239762f6 | 5 years ago |
Sida Chen | 73bc2bc36b | 5 years ago |
Sida Chen | 5a94499fdb | 5 years ago |
Sida Chen | 32b11e54eb | 5 years ago |
Sida Chen | 870e812376 | 5 years ago |
Sida Chen | 23ccd9b53b | 5 years ago |
Sida Chen | 79af05e67d | 5 years ago |
Sida Chen | 5fa1ac89b9 | 5 years ago |
Sida Chen | 073c685c5b | 5 years ago |
Sida Chen | f61675355e | 5 years ago |
Sida Chen | 0e0d8b38bb | 5 years ago |
Sida Chen | 7dd989c0f2 | 5 years ago |
Sida Chen | 00eed77b45 | 5 years ago |
Sida Chen | c6c8fce39a | 5 years ago |
Jimmy Zelinskie | cafe0976a4 | 5 years ago |
Jimmy Zelinskie | dd91597f19 | 5 years ago |
Jimmy Zelinskie | f64bd117b2 | 5 years ago |
Jimmy Zelinskie | aa8682947e | 5 years ago |
Jimmy Zelinskie | 11b26b3857 | 5 years ago |
Dustin Specker | 49b5621d73 | 5 years ago |
Jelto Wodstrcil | 4505fcea32 | 5 years ago |
Jimmy Zelinskie | cc8d1152c4 | 5 years ago |
Sergey | a57d806717 | 5 years ago |
Ales Raszka | 015a79fd5a | 5 years ago |
Ales Raszka | 90f5592095 | 5 years ago |
Jimmy Zelinskie | 97b4b1ac33 | 5 years ago |
Jimmy Zelinskie | 162e8cdafc | 5 years ago |
Jimmy Zelinskie | bafe45db2d | 5 years ago |
Jimmy Zelinskie | 3e6896c6a4 | 5 years ago |
Jimmy Zelinskie | 165c397f16 | 5 years ago |
Jimmy Zelinskie | 7084a226ae | 5 years ago |
Jimmy Zelinskie | 25078ac838 | 5 years ago |
Jimmy Zelinskie | e16d17dda9 | 5 years ago |
Jimmy Zelinskie | 0d41968acd | 5 years ago |
Jimmy Zelinskie | 6c5be7e1c6 | 5 years ago |
Jimmy Zelinskie | 399deab100 | 5 years ago |
Jimmy Zelinskie | effe1552fb | 5 years ago |
Jimmy Zelinskie | 45ecf18815 | 5 years ago |
Jimmy Zelinskie | b08ad9b8e6 | 5 years ago |
Flavio Castelli | 1105102b84 | 5 years ago |
Flavio Castelli | 5a4d4913c1 | 5 years ago |
Jimmy Zelinskie | 5cd6a8cc92 | 5 years ago |
Tamal Saha | 0ed4126240 | 5 years ago |
Ales Raszka | bd7102d963 | 5 years ago |
Jimmy Zelinskie | 3947073b9e | 6 years ago |
Jeff Knurek | 81430ffbb2 | 6 years ago |
Jeff Knurek | 6a94d8ccd2 | 6 years ago |
Jimmy Zelinskie | 300bb52696 | 6 years ago |
Jimmy Zelinskie | 4fbeb9ced5 | 6 years ago |
Jimmy Zelinskie | 504f0f3af3 | 6 years ago |
Geoff Baskwill | 3503ddb96f | 6 years ago |
Jimmy Zelinskie | 93e7a4cfa8 | 6 years ago |
Jimmy Zelinskie | 4c08c8f959 | 6 years ago |
Bryan Rosander | 00db964497 | 6 years ago |
Kate Murphy | 6c682da3e1 | 6 years ago |
Jimmy Zelinskie | c123c95590 | 6 years ago |
Harald Nordgren | be24096183 | 6 years ago |
Sida Chen | 05cbf328aa | 6 years ago |
Sida Chen | 4106322107 | 6 years ago |
Sida Chen | 72674ca871 | 6 years ago |
Sida Chen | a3f7387ff1 | 6 years ago |
Sida Chen | c3904c9696 | 6 years ago |
Sida Chen | 1ee1b95afc | 6 years ago |
Jimmy Zelinskie | 0c2e5e73c2 | 6 years ago |
Kate Murphy | 081ae34af1 | 6 years ago |
Kate Murphy | 4f0da12b12 | 6 years ago |
Jimmy Zelinskie | 8efc3e4038 | 6 years ago |
Jimmy Zelinskie | 699d1143e5 | 6 years ago |
Sida Chen | 335cb65917 | 6 years ago |
Sida Chen | 2236b0a5c9 | 6 years ago |
Sida Chen | 00fadfc3e3 | 6 years ago |
Sida Chen | 11b67e612c | 6 years ago |
Kate Murphy | b81e4454fb | 6 years ago |
Kate Murphy | 14277a8f5d | 6 years ago |
Kate Murphy | aab46f5658 | 6 years ago |
Sida Chen | 17539bda60 | 6 years ago |
Sida Chen | f759dd54c0 | 6 years ago |
Jimmy Zelinskie | 2ac088dd0f | 6 years ago |
Sida Chen | fe614f2b01 | 6 years ago |
Kate Murphy | 8d5a0131c4 | 6 years ago |
Sida Chen | 2cc61f9fc0 | 6 years ago |
Sida Chen | a057e4a943 | 6 years ago |
Sida Chen | 4ac046642f | 6 years ago |
Sida Chen | 1c40e7d016 | 6 years ago |
Sida Chen | 3fe894c5ad | 6 years ago |
Jimmy Zelinskie | ddaf19b3a6 | 6 years ago |
Sida Chen | 3c72fa29a6 | 6 years ago |
Jimmy Zelinskie | 74efdf6b51 | 6 years ago |
Sida Chen | 69c0c84348 | 6 years ago |
Sida Chen | a3e9b5b55d | 6 years ago |
Sida Chen | e657d26313 | 6 years ago |
Sida Chen | 0c1b80b2ed | 6 years ago |
Sida Chen | 028324014b | 6 years ago |
Sida Chen | 48427e9b88 | 6 years ago |
Sida Chen | 9c49d9dc55 | 6 years ago |
Sida Chen | 53bf19aecf | 6 years ago |
Sida Chen | 34d0e516e0 | 6 years ago |
Sida Chen | dca2d4e597 | 6 years ago |
Sida Chen | db2db8bbe8 | 6 years ago |
Sida Chen | 9f5d1ea4e1 | 6 years ago |
Jimmy Zelinskie | 8cf7ad454c | 6 years ago |
Jimmy Zelinskie | 5d1c30218e | 6 years ago |
Jimmy Zelinskie | 9b1f205833 | 6 years ago |
Jimmy Zelinskie | 0ca9431235 | 6 years ago |
Hayden Hughes | d3facfd7cd | 6 years ago |
Sida Chen | 0609ed964b | 6 years ago |
Sida Chen | 53433090a3 | 6 years ago |
Jimmy Zelinskie | 44ae4bc959 | 6 years ago |
Jimmy Zelinskie | c2d887f9e9 | 6 years ago |
Jimmy Zelinskie | d0a3fe9206 | 6 years ago |
Grégoire Unbekandt | c4ffa0c370 | 6 years ago |
Grégoire Unbekandt | a90db713a2 | 6 years ago |
Grégoire Unbekandt | 8b3338ef56 | 6 years ago |
Grégoire Unbekandt | 4e4e98f328 | 6 years ago |
Grégoire Unbekandt | ac86a36740 | 6 years ago |
Grégoire Unbekandt | 4ab98cfe54 | 6 years ago |
Sida Chen | f98ff58afd | 6 years ago |
Sida Chen | e160616723 | 6 years ago |
Jean Michel MacKay | 30848d9eb7 | 6 years ago |
Jean Michel MacKay | 56b4f23ae2 | 6 years ago |
Jean Michel MacKay | f34f94320a | 6 years ago |
Jean Michel MacKay | 3959f416fa | 6 years ago |
Jean Michel MacKay | 49cbdd7a7c | 6 years ago |
Jimmy Zelinskie | 089a4e0f0a | 6 years ago |
Jimmy Zelinskie | 1ec2759550 | 6 years ago |
Sida Chen | ff9303905b | 6 years ago |
Sida Chen | 6c69377343 | 6 years ago |
Jimmy Zelinskie | dc6be5d1b0 | 6 years ago |
Sida Chen | 5d725e67b0 | 6 years ago |
Jimmy Zelinskie | e5c2e378a2 | 6 years ago |
Jimmy Zelinskie | 0565938956 | 6 years ago |
Jimmy Zelinskie | d193b46449 | 6 years ago |
Jimmy Zelinskie | b20482e0ae | 6 years ago |
Jimmy Zelinskie | fffb67f137 | 6 years ago |
Jimmy Zelinskie | 55ecf1e58a | 6 years ago |
Jimmy Zelinskie | 30644fcc01 | 6 years ago |
Sida Chen | 2bbbad393b | 6 years ago |
Sida Chen | 2827b9342b | 6 years ago |
Jimmy Zelinskie | 06b257cc97 | 6 years ago |
Jimmy Zelinskie | 4fd86fd518 | 6 years ago |
Jimmy Zelinskie | ce15f73501 | 6 years ago |
Jimmy Zelinskie | 52ecf35ca6 | 6 years ago |
Jean Michel MacKay | 9df4f5bd70 | 6 years ago |
Sida Chen | 4b64151330 | 6 years ago |
Sida Chen | 6a44052e31 | 6 years ago |
Jimmy Zelinskie | 9f2cc4e533 | 6 years ago |
Jimmy Zelinskie | ce6b00887b | 6 years ago |
Sida Chen | dfc3023372 | 6 years ago |
Sida Chen | d28f3214ce | 6 years ago |
Jimmy Zelinskie | 7f9c0b1b07 | 6 years ago |
Daniel Jiang | 9e4a347ecd | 6 years ago |
Jimmy Zelinskie | ddeb64339d | 6 years ago |
Fabian Hinz | 690d26edba | 6 years ago |
Jimmy Zelinskie | 1d690bbacf | 6 years ago |
Roberto Soares | bc6f37f1ae | 6 years ago |
Jimmy Zelinskie | c26154ab74 | 6 years ago |
Bryan Rosander | f3e156a46e | 6 years ago |
Jimmy Zelinskie | b1cd092319 | 6 years ago |
Jimmy Zelinskie | f32f438a98 | 6 years ago |
Jimmy Zelinskie | 3babbafb2f | 6 years ago |
honglichang(常红立) | 0d5f300c5b | 6 years ago |
Jimmy Zelinskie | 9a9b1f7a13 | 6 years ago |
Andy | 921ba54152 | 6 years ago |
ErikThoreson | df1dd5c149 | 6 years ago |
Jimmy Zelinskie | 158bb31b77 | 6 years ago |
Jimmy Zelinskie | 027f239e1f | 6 years ago |
Jimmy Zelinskie | 5caa821c80 | 6 years ago |
Jimmy Zelinskie | 456af5f48c | 6 years ago |
Jimmy Zelinskie | c031f8ea0c | 6 years ago |
Jimmy Zelinskie | 4c2be5285e | 6 years ago |
Jimmy Zelinskie | e907e4d263 | 6 years ago |
Mark Eisenblaetter | 07a08a4f53 | 6 years ago |
Jimmy Zelinskie | 34c1382d9e | 6 years ago |
usr42 | db5dbbe4e9 | 6 years ago |
Jimmy Zelinskie | 7492aa31ba | 6 years ago |
Jimmy Zelinskie | f550dd16a0 | 6 years ago |
Jimmy Zelinskie | d7a751e0d4 | 6 years ago |
Jimmy Zelinskie | 6b9f668ea0 | 6 years ago |
Jimmy Zelinskie | a5b3e747a0 | 6 years ago |
Jimmy Zelinskie | 8c71427375 | 6 years ago |
Jimmy Zelinskie | ec5014f8a1 | 6 years ago |
Jimmy Zelinskie | 389b6e9927 | 6 years ago |
Tomer H | e649f8f149 | 6 years ago |
Jimmy Zelinskie | e73051fc0a | 6 years ago |
Sida Chen | 4db72b8c26 | 6 years ago |
Brad Ison | 8a2ed864b9 | 6 years ago |
Jimmy Zelinskie | 3d2c12cd56 | 6 years ago |
Sida Chen | 7a06a7a2b4 | 6 years ago |
Jimmy Zelinskie | 5e7b450be9 | 6 years ago |
Diederik van der Boor | e454314beb | 6 years ago |
Jimmy Zelinskie | 01eb48bc84 | 6 years ago |
Jimmy Zelinskie | 69cfe9213f | 6 years ago |
Diederik van der Boor | 64c2853e75 | 6 years ago |
Grégoire Unbekandt | c1a58bf922 | 6 years ago |
Jelto Wodstrcil | e9dba0fa8f | 6 years ago |
Jimmy Zelinskie | ce0699c59d | 6 years ago |
Jimmy Zelinskie | 12b47b0854 | 6 years ago |
Sida Chen | a75b8ac7ff | 6 years ago |
Leandro Repolho | 45dfabbfea | 6 years ago |
Joe Ray | 947a8aa00c | 6 years ago |
Jimmy Zelinskie | 52a42b8503 | 6 years ago |
Eric Chiang | e43ec26965 | 6 years ago |
@ -0,0 +1,47 @@
|
||||
---
|
||||
kind: pipeline
|
||||
name: default
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: amd64
|
||||
|
||||
steps:
|
||||
- name: publish
|
||||
pull: default
|
||||
image: plugins/docker:18.09
|
||||
settings:
|
||||
registry: https://registry.nixaid.com
|
||||
repo: "registry.nixaid.com/${DRONE_REPO_NAMESPACE}/${DRONE_REPO_NAME}"
|
||||
tags:
|
||||
- latest
|
||||
username:
|
||||
from_secret: docker_username
|
||||
password:
|
||||
from_secret: docker_password
|
||||
# storage_path: /drone/docker
|
||||
# storage_driver: aufs
|
||||
# ipv6: false
|
||||
# debug: true
|
||||
when:
|
||||
branch:
|
||||
- master
|
||||
event:
|
||||
- push
|
||||
- tag
|
||||
|
||||
- name: notify
|
||||
pull: default
|
||||
image: drillster/drone-email:latest
|
||||
settings:
|
||||
from: "Drone CI <noreply@nixaid.com>"
|
||||
host: mx.nixaid.com
|
||||
port: 587
|
||||
subject: "NIXAID Drone Pipeline {{#success build.status}}SUCCESS{{else}}FAILURE{{/success}} Notification"
|
||||
when:
|
||||
event:
|
||||
- push
|
||||
- tag
|
||||
status:
|
||||
- success
|
||||
- failure
|
@ -0,0 +1,4 @@
|
||||
comment: "This issue is closed because it does not meet our issue template. Please read it."
|
||||
issueConfigs:
|
||||
- content:
|
||||
- "### Environment"
|
@ -0,0 +1,12 @@
|
||||
daysUntilStale: 60
|
||||
daysUntilClose: 7
|
||||
exemptLabels:
|
||||
- lifecycle/preserve
|
||||
exemptProjects: true
|
||||
exemptMilestones: true
|
||||
staleLabel: lifecycle/stale
|
||||
markComment: >
|
||||
This issue has been automatically marked as stale because it has not had
|
||||
recent activity. It will be closed if no further activity occurs. Thank you
|
||||
for your contributions.
|
||||
limitPerRun: 30
|
@ -0,0 +1,145 @@
|
||||
// Copyright 2019 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clair
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/ext/featurefmt"
|
||||
"github.com/coreos/clair/ext/featurens"
|
||||
"github.com/coreos/clair/ext/imagefmt"
|
||||
)
|
||||
|
||||
// AnalyzeError represents an failure when analyzing layer or constructing
|
||||
// ancestry.
|
||||
type AnalyzeError string
|
||||
|
||||
func (e AnalyzeError) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
var (
|
||||
// StorageError represents an analyze error caused by the storage
|
||||
StorageError = AnalyzeError("failed to query the database.")
|
||||
// RetrieveBlobError represents an analyze error caused by failure of
|
||||
// downloading or extracting layer blobs.
|
||||
RetrieveBlobError = AnalyzeError("failed to download layer blob.")
|
||||
// ExtractBlobError represents an analyzer error caused by failure of
|
||||
// extracting a layer blob by imagefmt.
|
||||
ExtractBlobError = AnalyzeError("failed to extract files from layer blob.")
|
||||
// FeatureDetectorError is an error caused by failure of feature listing by
|
||||
// featurefmt.
|
||||
FeatureDetectorError = AnalyzeError("failed to scan feature from layer blob files.")
|
||||
// NamespaceDetectorError is an error caused by failure of namespace
|
||||
// detection by featurens.
|
||||
NamespaceDetectorError = AnalyzeError("failed to scan namespace from layer blob files.")
|
||||
)
|
||||
|
||||
// AnalyzeLayer retrieves the clair layer with all extracted features and namespaces.
|
||||
// If a layer is already scanned by all enabled detectors in the Clair instance, it returns directly.
|
||||
// Otherwise, it re-download the layer blob and scan the features and namespaced again.
|
||||
func AnalyzeLayer(ctx context.Context, store database.Datastore, blobSha256 string, blobFormat string, downloadURI string, downloadHeaders map[string]string) (*database.Layer, error) {
|
||||
layer, found, err := database.FindLayerAndRollback(store, blobSha256)
|
||||
logFields := log.Fields{"layer.Hash": blobSha256}
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(logFields).Error("failed to find layer in the storage")
|
||||
return nil, StorageError
|
||||
}
|
||||
|
||||
var scannedBy []database.Detector
|
||||
if found {
|
||||
scannedBy = layer.By
|
||||
}
|
||||
|
||||
// layer will be scanned by detectors not scanned the layer already.
|
||||
toScan := database.DiffDetectors(EnabledDetectors(), scannedBy)
|
||||
if len(toScan) != 0 {
|
||||
log.WithFields(logFields).Debug("scan layer blob not already scanned")
|
||||
newLayerScanResult := &database.Layer{Hash: blobSha256, By: toScan}
|
||||
blob, err := retrieveLayerBlob(ctx, downloadURI, downloadHeaders)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(logFields).Error("failed to retrieve layer blob")
|
||||
return nil, RetrieveBlobError
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := blob.Close(); err != nil {
|
||||
log.WithFields(logFields).Error("failed to close layer blob reader")
|
||||
}
|
||||
}()
|
||||
|
||||
files := append(featurefmt.RequiredFilenames(toScan), featurens.RequiredFilenames(toScan)...)
|
||||
fileMap, err := imagefmt.Extract(blobFormat, blob, files)
|
||||
if err != nil {
|
||||
log.WithFields(logFields).WithError(err).Error("failed to extract layer blob")
|
||||
return nil, ExtractBlobError
|
||||
}
|
||||
|
||||
newLayerScanResult.Features, err = featurefmt.ListFeatures(fileMap, toScan)
|
||||
if err != nil {
|
||||
log.WithFields(logFields).WithError(err).Error("failed to detect features")
|
||||
return nil, FeatureDetectorError
|
||||
}
|
||||
|
||||
newLayerScanResult.Namespaces, err = featurens.Detect(fileMap, toScan)
|
||||
if err != nil {
|
||||
log.WithFields(logFields).WithError(err).Error("failed to detect namespaces")
|
||||
return nil, NamespaceDetectorError
|
||||
}
|
||||
|
||||
if err = saveLayerChange(store, newLayerScanResult); err != nil {
|
||||
log.WithFields(logFields).WithError(err).Error("failed to store layer change")
|
||||
return nil, StorageError
|
||||
}
|
||||
|
||||
layer = database.MergeLayers(layer, newLayerScanResult)
|
||||
} else {
|
||||
log.WithFields(logFields).Debug("found scanned layer blob")
|
||||
}
|
||||
|
||||
return layer, nil
|
||||
}
|
||||
|
||||
// EnabledDetectors retrieves a list of all detectors installed in the Clair
|
||||
// instance.
|
||||
func EnabledDetectors() []database.Detector {
|
||||
return append(featurefmt.ListListers(), featurens.ListDetectors()...)
|
||||
}
|
||||
|
||||
// RegisterConfiguredDetectors populates the database with registered detectors.
|
||||
func RegisterConfiguredDetectors(store database.Datastore) {
|
||||
if err := database.PersistDetectorsAndCommit(store, EnabledDetectors()); err != nil {
|
||||
panic("failed to initialize Clair analyzer")
|
||||
}
|
||||
}
|
||||
|
||||
func saveLayerChange(store database.Datastore, layer *database.Layer) error {
|
||||
if err := database.PersistFeaturesAndCommit(store, layer.GetFeatures()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := database.PersistNamespacesAndCommit(store, layer.GetNamespaces()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := database.PersistPartialLayerAndCommit(store, layer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -0,0 +1,355 @@
|
||||
// Copyright 2019 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clair
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
)
|
||||
|
||||
type layerIndexedFeature struct {
|
||||
Feature *database.LayerFeature
|
||||
Namespace *layerIndexedNamespace
|
||||
IntroducedIn int
|
||||
}
|
||||
|
||||
type layerIndexedNamespace struct {
|
||||
Namespace database.LayerNamespace `json:"namespace"`
|
||||
IntroducedIn int `json:"introducedIn"`
|
||||
}
|
||||
|
||||
// AncestryBuilder builds an Ancestry, which contains an ordered list of layers
|
||||
// and their features.
|
||||
type AncestryBuilder struct {
|
||||
layerIndex int
|
||||
layerNames []string
|
||||
detectors []database.Detector
|
||||
namespaces []layerIndexedNamespace // unique namespaces
|
||||
features map[database.Detector][]layerIndexedFeature
|
||||
}
|
||||
|
||||
// NewAncestryBuilder creates a new ancestry builder.
|
||||
//
|
||||
// ancestry builder takes in the extracted layer information and produce a set of
|
||||
// namespaces, features, and the relation between features for the whole image.
|
||||
func NewAncestryBuilder(detectors []database.Detector) *AncestryBuilder {
|
||||
return &AncestryBuilder{
|
||||
layerIndex: 0,
|
||||
detectors: detectors,
|
||||
namespaces: make([]layerIndexedNamespace, 0),
|
||||
features: make(map[database.Detector][]layerIndexedFeature),
|
||||
}
|
||||
}
|
||||
|
||||
// AddLeafLayer adds a leaf layer to the ancestry builder, and computes the
|
||||
// namespaced features.
|
||||
func (b *AncestryBuilder) AddLeafLayer(layer *database.Layer) {
|
||||
b.layerNames = append(b.layerNames, layer.Hash)
|
||||
for i := range layer.Namespaces {
|
||||
b.updateNamespace(&layer.Namespaces[i])
|
||||
}
|
||||
|
||||
allFeatureMap := map[database.Detector][]database.LayerFeature{}
|
||||
for i := range layer.Features {
|
||||
layerFeature := layer.Features[i]
|
||||
allFeatureMap[layerFeature.By] = append(allFeatureMap[layerFeature.By], layerFeature)
|
||||
}
|
||||
|
||||
// we only care about the ones specified by builder's detectors
|
||||
featureMap := map[database.Detector][]database.LayerFeature{}
|
||||
for i := range b.detectors {
|
||||
detector := b.detectors[i]
|
||||
featureMap[detector] = allFeatureMap[detector]
|
||||
}
|
||||
|
||||
for detector := range featureMap {
|
||||
b.addLayerFeatures(detector, featureMap[detector])
|
||||
}
|
||||
|
||||
b.layerIndex++
|
||||
}
|
||||
|
||||
// Every detector inspects a set of files for the features
|
||||
// therefore, if that set of files gives a different set of features, it
|
||||
// should replace the existing features.
|
||||
func (b *AncestryBuilder) addLayerFeatures(detector database.Detector, features []database.LayerFeature) {
|
||||
if len(features) == 0 {
|
||||
// TODO(sidac): we need to differentiate if the detector finds that all
|
||||
// features are removed ( a file change ), or the package installer is
|
||||
// removed ( a file deletion ), or there's no change in the file ( file
|
||||
// does not exist in the blob ) Right now, we're just assuming that no
|
||||
// change in the file because that's the most common case.
|
||||
return
|
||||
}
|
||||
|
||||
existingFeatures := b.features[detector]
|
||||
currentFeatures := make([]layerIndexedFeature, 0, len(features))
|
||||
// Features that are not in the current layer should be removed.
|
||||
for i := range existingFeatures {
|
||||
feature := existingFeatures[i]
|
||||
for j := range features {
|
||||
if features[j] == *feature.Feature {
|
||||
currentFeatures = append(currentFeatures, feature)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Features that newly introduced in the current layer should be added.
|
||||
for i := range features {
|
||||
found := false
|
||||
for j := range existingFeatures {
|
||||
if *existingFeatures[j].Feature == features[i] {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
namespace, found := b.lookupNamespace(&features[i])
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
|
||||
currentFeatures = append(currentFeatures, b.createLayerIndexedFeature(namespace, &features[i]))
|
||||
}
|
||||
}
|
||||
|
||||
b.features[detector] = currentFeatures
|
||||
}
|
||||
|
||||
// updateNamespace update the namespaces for the ancestry. It does the following things:
|
||||
// 1. when a detector detects a new namespace, it's added to the ancestry.
|
||||
// 2. when a detector detects a difference in the detected namespace, it
|
||||
// replaces the namespace, and also move all features under that namespace to
|
||||
// the new namespace.
|
||||
func (b *AncestryBuilder) updateNamespace(layerNamespace *database.LayerNamespace) {
|
||||
var (
|
||||
previous *layerIndexedNamespace
|
||||
foundUpgrade bool
|
||||
)
|
||||
|
||||
newNSNames := strings.Split(layerNamespace.Name, ":")
|
||||
if len(newNSNames) != 2 {
|
||||
log.Error("invalid namespace name")
|
||||
}
|
||||
|
||||
newNSName := newNSNames[0]
|
||||
newNSVersion := newNSNames[1]
|
||||
for i, ns := range b.namespaces {
|
||||
nsNames := strings.Split(ns.Namespace.Name, ":")
|
||||
if len(nsNames) != 2 {
|
||||
log.Error("invalid namespace name")
|
||||
continue
|
||||
}
|
||||
|
||||
nsName := nsNames[0]
|
||||
nsVersion := nsNames[1]
|
||||
if ns.Namespace.VersionFormat == layerNamespace.VersionFormat && nsName == newNSName {
|
||||
if nsVersion != newNSVersion {
|
||||
previous = &b.namespaces[i]
|
||||
foundUpgrade = true
|
||||
break
|
||||
} else {
|
||||
// not changed
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// we didn't found the namespace is a upgrade from another namespace, so we
|
||||
// simply add it.
|
||||
if !foundUpgrade {
|
||||
b.namespaces = append(b.namespaces, layerIndexedNamespace{
|
||||
Namespace: *layerNamespace,
|
||||
IntroducedIn: b.layerIndex,
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// All features referencing to this namespace are now pointing to the new namespace.
|
||||
// Also those features are now treated as introduced in the same layer as
|
||||
// when this new namespace is introduced.
|
||||
previous.Namespace = *layerNamespace
|
||||
previous.IntroducedIn = b.layerIndex
|
||||
|
||||
for _, features := range b.features {
|
||||
for i, feature := range features {
|
||||
if feature.Namespace == previous {
|
||||
features[i].IntroducedIn = previous.IntroducedIn
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *AncestryBuilder) createLayerIndexedFeature(namespace *layerIndexedNamespace, feature *database.LayerFeature) layerIndexedFeature {
|
||||
return layerIndexedFeature{
|
||||
Feature: feature,
|
||||
Namespace: namespace,
|
||||
IntroducedIn: b.layerIndex,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *AncestryBuilder) lookupNamespace(feature *database.LayerFeature) (*layerIndexedNamespace, bool) {
|
||||
matchedNamespaces := []*layerIndexedNamespace{}
|
||||
if feature.PotentialNamespace.Name != "" {
|
||||
a := &layerIndexedNamespace{
|
||||
Namespace: database.LayerNamespace{
|
||||
Namespace: feature.PotentialNamespace,
|
||||
},
|
||||
IntroducedIn: b.layerIndex,
|
||||
}
|
||||
matchedNamespaces = append(matchedNamespaces, a)
|
||||
} else {
|
||||
|
||||
for i, namespace := range b.namespaces {
|
||||
if namespace.Namespace.VersionFormat == feature.VersionFormat {
|
||||
matchedNamespaces = append(matchedNamespaces, &b.namespaces[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(matchedNamespaces) == 1 {
|
||||
return matchedNamespaces[0], true
|
||||
}
|
||||
|
||||
serialized, _ := json.Marshal(matchedNamespaces)
|
||||
fields := log.Fields{
|
||||
"feature.Name": feature.Name,
|
||||
"feature.VersionFormat": feature.VersionFormat,
|
||||
"ancestryBuilder.namespaces": string(serialized),
|
||||
}
|
||||
|
||||
if len(matchedNamespaces) > 1 {
|
||||
log.WithFields(fields).Warn("skip features with ambiguous namespaces")
|
||||
} else {
|
||||
log.WithFields(fields).Warn("skip features with no matching namespace")
|
||||
}
|
||||
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (b *AncestryBuilder) ancestryFeatures(index int) []database.AncestryFeature {
|
||||
ancestryFeatures := []database.AncestryFeature{}
|
||||
for detector, features := range b.features {
|
||||
for _, feature := range features {
|
||||
if feature.IntroducedIn == index {
|
||||
ancestryFeatures = append(ancestryFeatures, database.AncestryFeature{
|
||||
NamespacedFeature: database.NamespacedFeature{
|
||||
Feature: feature.Feature.Feature,
|
||||
Namespace: feature.Namespace.Namespace.Namespace,
|
||||
},
|
||||
FeatureBy: detector,
|
||||
NamespaceBy: feature.Namespace.Namespace.By,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ancestryFeatures
|
||||
}
|
||||
|
||||
func (b *AncestryBuilder) ancestryLayers() []database.AncestryLayer {
|
||||
layers := make([]database.AncestryLayer, 0, b.layerIndex)
|
||||
for i := 0; i < b.layerIndex; i++ {
|
||||
layers = append(layers, database.AncestryLayer{
|
||||
Hash: b.layerNames[i],
|
||||
Features: b.ancestryFeatures(i),
|
||||
})
|
||||
}
|
||||
|
||||
return layers
|
||||
}
|
||||
|
||||
// Ancestry produces an Ancestry from the builder.
|
||||
func (b *AncestryBuilder) Ancestry(name string) *database.Ancestry {
|
||||
if name == "" {
|
||||
// TODO(sidac): we'll use the computed ancestry name in the future.
|
||||
// During the transition, it still requires the user to use the correct
|
||||
// ancestry name.
|
||||
name = ancestryName(b.layerNames)
|
||||
log.WithField("ancestry.Name", name).Warn("generated ancestry name since it's not specified")
|
||||
}
|
||||
|
||||
return &database.Ancestry{
|
||||
Name: name,
|
||||
By: b.detectors,
|
||||
Layers: b.ancestryLayers(),
|
||||
}
|
||||
}
|
||||
|
||||
// SaveAncestry saves an ancestry to the datastore.
|
||||
func SaveAncestry(store database.Datastore, ancestry *database.Ancestry) error {
|
||||
log.WithField("ancestry.Name", ancestry.Name).Debug("saving ancestry")
|
||||
features := []database.NamespacedFeature{}
|
||||
for _, layer := range ancestry.Layers {
|
||||
features = append(features, layer.GetFeatures()...)
|
||||
}
|
||||
|
||||
if err := database.PersistNamespacedFeaturesAndCommit(store, features); err != nil {
|
||||
return StorageError
|
||||
}
|
||||
|
||||
if err := database.UpsertAncestryAndCommit(store, ancestry); err != nil {
|
||||
return StorageError
|
||||
}
|
||||
|
||||
if err := database.CacheRelatedVulnerabilityAndCommit(store, features); err != nil {
|
||||
return StorageError
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsAncestryCached checks if the ancestry is already cached in the database with the current set of detectors.
|
||||
func IsAncestryCached(store database.Datastore, name string, layerHashes []string) (bool, error) {
|
||||
if name == "" {
|
||||
// TODO(sidac): we'll use the computed ancestry name in the future.
|
||||
// During the transition, it still requires the user to use the correct
|
||||
// ancestry name.
|
||||
name = ancestryName(layerHashes)
|
||||
log.WithField("ancestry.Name", name).Warn("generated ancestry name since it's not specified")
|
||||
}
|
||||
|
||||
ancestry, found, err := database.FindAncestryAndRollback(store, name)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("ancestry.Name", name).Error("failed to query ancestry in database")
|
||||
return false, StorageError
|
||||
}
|
||||
|
||||
if found {
|
||||
if len(database.DiffDetectors(EnabledDetectors(), ancestry.By)) == 0 {
|
||||
log.WithField("ancestry.Name", name).Debug("found cached ancestry")
|
||||
} else {
|
||||
log.WithField("ancestry.Name", name).Debug("found outdated ancestry cache")
|
||||
}
|
||||
} else {
|
||||
log.WithField("ancestry.Name", name).Debug("ancestry not cached")
|
||||
}
|
||||
|
||||
return found && len(database.DiffDetectors(EnabledDetectors(), ancestry.By)) == 0, nil
|
||||
}
|
||||
|
||||
func ancestryName(layerHashes []string) string {
|
||||
tag := sha256.Sum256([]byte(strings.Join(layerHashes, ",")))
|
||||
return hex.EncodeToString(tag[:])
|
||||
}
|
@ -0,0 +1,297 @@
|
||||
// Copyright 2019 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clair
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
)
|
||||
|
||||
var (
|
||||
dpkg = database.NewFeatureDetector("dpkg", "1.0")
|
||||
rpm = database.NewFeatureDetector("rpm", "1.0")
|
||||
pip = database.NewFeatureDetector("pip", "1.0")
|
||||
python = database.NewNamespaceDetector("python", "1.0")
|
||||
osrelease = database.NewNamespaceDetector("os-release", "1.0")
|
||||
aptsources = database.NewNamespaceDetector("apt-sources", "1.0")
|
||||
ubuntu = *database.NewNamespace("ubuntu:14.04", "dpkg")
|
||||
ubuntu16 = *database.NewNamespace("ubuntu:16.04", "dpkg")
|
||||
rhel7 = *database.NewNamespace("cpe:/o:redhat:enterprise_linux:7::computenode", "rpm")
|
||||
debian = *database.NewNamespace("debian:7", "dpkg")
|
||||
python2 = *database.NewNamespace("python:2", "pip")
|
||||
sed = *database.NewSourcePackage("sed", "4.4-2", "dpkg")
|
||||
sedByRPM = *database.NewBinaryPackage("sed", "4.4-2", "rpm")
|
||||
sedBin = *database.NewBinaryPackage("sed", "4.4-2", "dpkg")
|
||||
tar = *database.NewBinaryPackage("tar", "1.29b-2", "dpkg")
|
||||
scipy = *database.NewSourcePackage("scipy", "3.0.0", "pip")
|
||||
|
||||
emptyNamespace = database.Namespace{}
|
||||
|
||||
detectors = []database.Detector{dpkg, osrelease, rpm}
|
||||
multinamespaceDetectors = []database.Detector{dpkg, osrelease, pip}
|
||||
)
|
||||
|
||||
type ancestryBuilder struct {
|
||||
ancestry *database.Ancestry
|
||||
}
|
||||
|
||||
func newAncestryBuilder(name string) *ancestryBuilder {
|
||||
return &ancestryBuilder{&database.Ancestry{Name: name}}
|
||||
}
|
||||
|
||||
func (b *ancestryBuilder) addDetectors(d ...database.Detector) *ancestryBuilder {
|
||||
b.ancestry.By = append(b.ancestry.By, d...)
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *ancestryBuilder) addLayer(hash string, f ...database.AncestryFeature) *ancestryBuilder {
|
||||
l := database.AncestryLayer{Hash: hash}
|
||||
l.Features = append(l.Features, f...)
|
||||
b.ancestry.Layers = append(b.ancestry.Layers, l)
|
||||
return b
|
||||
}
|
||||
|
||||
func ancestryFeature(namespace database.Namespace, feature database.Feature, nsBy database.Detector, fBy database.Detector) database.AncestryFeature {
|
||||
return database.AncestryFeature{
|
||||
NamespacedFeature: database.NamespacedFeature{feature, namespace},
|
||||
FeatureBy: fBy,
|
||||
NamespaceBy: nsBy,
|
||||
}
|
||||
}
|
||||
|
||||
// layerBuilder is for helping constructing the layer test artifacts.
|
||||
type layerBuilder struct {
|
||||
layer *database.Layer
|
||||
}
|
||||
|
||||
func newLayerBuilder(hash string) *layerBuilder {
|
||||
return &layerBuilder{&database.Layer{Hash: hash, By: detectors}}
|
||||
}
|
||||
|
||||
func newLayerBuilderWithoutDetector(hash string) *layerBuilder {
|
||||
return &layerBuilder{&database.Layer{Hash: hash}}
|
||||
}
|
||||
|
||||
func (b *layerBuilder) addDetectors(d ...database.Detector) *layerBuilder {
|
||||
b.layer.By = append(b.layer.By, d...)
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *layerBuilder) addNamespace(detector database.Detector, ns database.Namespace) *layerBuilder {
|
||||
b.layer.Namespaces = append(b.layer.Namespaces, database.LayerNamespace{
|
||||
Namespace: ns,
|
||||
By: detector,
|
||||
})
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *layerBuilder) addFeature(detector database.Detector, f database.Feature, ns database.Namespace) *layerBuilder {
|
||||
b.layer.Features = append(b.layer.Features, database.LayerFeature{
|
||||
Feature: f,
|
||||
By: detector,
|
||||
PotentialNamespace: ns,
|
||||
})
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
var testImage = []*database.Layer{
|
||||
// empty layer
|
||||
newLayerBuilder("0").layer,
|
||||
// ubuntu namespace
|
||||
newLayerBuilder("1").addNamespace(osrelease, ubuntu).layer,
|
||||
// install sed
|
||||
newLayerBuilder("2").addFeature(dpkg, sed, emptyNamespace).layer,
|
||||
// install tar
|
||||
newLayerBuilder("3").addFeature(dpkg, sed, emptyNamespace).addFeature(dpkg, tar, emptyNamespace).layer,
|
||||
// remove tar
|
||||
newLayerBuilder("4").addFeature(dpkg, sed, emptyNamespace).layer,
|
||||
// upgrade ubuntu
|
||||
newLayerBuilder("5").addNamespace(osrelease, ubuntu16).layer,
|
||||
// no change to the detectable files
|
||||
newLayerBuilder("6").layer,
|
||||
// change to the package installer database but no features are affected.
|
||||
newLayerBuilder("7").addFeature(dpkg, sed, emptyNamespace).layer,
|
||||
}
|
||||
|
||||
var invalidNamespace = []*database.Layer{
|
||||
// add package without namespace, this indicates that the namespace detector
|
||||
// could not detect the namespace.
|
||||
newLayerBuilder("0").addFeature(dpkg, sed, emptyNamespace).layer,
|
||||
}
|
||||
|
||||
var noMatchingNamespace = []*database.Layer{
|
||||
newLayerBuilder("0").addFeature(rpm, sedByRPM, emptyNamespace).addFeature(dpkg, sed, emptyNamespace).addNamespace(osrelease, ubuntu).layer,
|
||||
}
|
||||
|
||||
var multiplePackagesOnFirstLayer = []*database.Layer{
|
||||
newLayerBuilder("0").addFeature(dpkg, sed, emptyNamespace).addFeature(dpkg, tar, emptyNamespace).addFeature(dpkg, sedBin, emptyNamespace).addNamespace(osrelease, ubuntu16).layer,
|
||||
}
|
||||
|
||||
var twoNamespaceDetectorsWithSameResult = []*database.Layer{
|
||||
newLayerBuilderWithoutDetector("0").addDetectors(dpkg, aptsources, osrelease).addFeature(dpkg, sed, emptyNamespace).addNamespace(aptsources, ubuntu).addNamespace(osrelease, ubuntu).layer,
|
||||
}
|
||||
|
||||
var sameVersionFormatDiffName = []*database.Layer{
|
||||
newLayerBuilder("0").addFeature(dpkg, sed, emptyNamespace).addNamespace(aptsources, ubuntu).addNamespace(osrelease, debian).layer,
|
||||
}
|
||||
|
||||
var potentialFeatureNamespace = []*database.Layer{
|
||||
newLayerBuilder("0").addFeature(rpm, sed, rhel7).layer,
|
||||
}
|
||||
|
||||
func TestAddLayer(t *testing.T) {
|
||||
cases := []struct {
|
||||
title string
|
||||
image []*database.Layer
|
||||
nonDefaultDetectors []database.Detector
|
||||
expectedAncestry database.Ancestry
|
||||
}{
|
||||
{
|
||||
title: "empty image",
|
||||
expectedAncestry: *newAncestryBuilder(ancestryName([]string{})).addDetectors(detectors...).ancestry,
|
||||
},
|
||||
{
|
||||
title: "empty layer",
|
||||
image: testImage[:1],
|
||||
expectedAncestry: *newAncestryBuilder(ancestryName([]string{"0"})).addDetectors(detectors...).
|
||||
addLayer("0").ancestry,
|
||||
},
|
||||
{
|
||||
title: "ubuntu",
|
||||
image: testImage[:2],
|
||||
expectedAncestry: *newAncestryBuilder(ancestryName([]string{"0", "1"})).addDetectors(detectors...).
|
||||
addLayer("0").
|
||||
addLayer("1").ancestry,
|
||||
},
|
||||
{
|
||||
title: "ubuntu install sed",
|
||||
image: testImage[:3],
|
||||
expectedAncestry: *newAncestryBuilder(ancestryName([]string{"0", "1", "2"})).addDetectors(detectors...).
|
||||
addLayer("0").
|
||||
addLayer("1").
|
||||
addLayer("2", ancestryFeature(ubuntu, sed, osrelease, dpkg)).ancestry,
|
||||
},
|
||||
{
|
||||
title: "ubuntu install tar",
|
||||
image: testImage[:4],
|
||||
expectedAncestry: *newAncestryBuilder(ancestryName([]string{"0", "1", "2", "3"})).addDetectors(detectors...).
|
||||
addLayer("0").
|
||||
addLayer("1").
|
||||
addLayer("2", ancestryFeature(ubuntu, sed, osrelease, dpkg)).
|
||||
addLayer("3", ancestryFeature(ubuntu, tar, osrelease, dpkg)).ancestry,
|
||||
}, {
|
||||
title: "ubuntu uninstall tar",
|
||||
image: testImage[:5],
|
||||
expectedAncestry: *newAncestryBuilder(ancestryName([]string{"0", "1", "2", "3", "4"})).addDetectors(detectors...).
|
||||
addLayer("0").
|
||||
addLayer("1").
|
||||
addLayer("2", ancestryFeature(ubuntu, sed, osrelease, dpkg)).
|
||||
addLayer("3").
|
||||
addLayer("4").ancestry,
|
||||
}, {
|
||||
title: "ubuntu upgrade",
|
||||
image: testImage[:6],
|
||||
expectedAncestry: *newAncestryBuilder(ancestryName([]string{"0", "1", "2", "3", "4", "5"})).addDetectors(detectors...).
|
||||
addLayer("0").
|
||||
addLayer("1").
|
||||
addLayer("2").
|
||||
addLayer("3").
|
||||
addLayer("4").
|
||||
addLayer("5", ancestryFeature(ubuntu16, sed, osrelease, dpkg)).ancestry,
|
||||
}, {
|
||||
title: "no change to the detectable files",
|
||||
image: testImage[:7],
|
||||
expectedAncestry: *newAncestryBuilder(ancestryName([]string{"0", "1", "2", "3", "4", "5", "6"})).addDetectors(detectors...).
|
||||
addLayer("0").
|
||||
addLayer("1").
|
||||
addLayer("2").
|
||||
addLayer("3").
|
||||
addLayer("4").
|
||||
addLayer("5", ancestryFeature(ubuntu16, sed, osrelease, dpkg)).
|
||||
addLayer("6").ancestry,
|
||||
}, {
|
||||
title: "change to the package installer database but no features are affected.",
|
||||
image: testImage[:8],
|
||||
expectedAncestry: *newAncestryBuilder(ancestryName([]string{"0", "1", "2", "3", "4", "5", "6", "7"})).addDetectors(detectors...).
|
||||
addLayer("0").
|
||||
addLayer("1").
|
||||
addLayer("2").
|
||||
addLayer("3").
|
||||
addLayer("4").
|
||||
addLayer("5", ancestryFeature(ubuntu16, sed, osrelease, dpkg)).
|
||||
addLayer("6").
|
||||
addLayer("7").ancestry,
|
||||
}, {
|
||||
title: "layers with features and namespace.",
|
||||
image: multiplePackagesOnFirstLayer,
|
||||
expectedAncestry: *newAncestryBuilder(ancestryName([]string{"0"})).addDetectors(detectors...).
|
||||
addLayer("0",
|
||||
ancestryFeature(ubuntu16, sed, osrelease, dpkg),
|
||||
ancestryFeature(ubuntu16, sedBin, osrelease, dpkg),
|
||||
ancestryFeature(ubuntu16, tar, osrelease, dpkg)).
|
||||
ancestry,
|
||||
}, {
|
||||
title: "two namespace detectors giving same namespace.",
|
||||
image: twoNamespaceDetectorsWithSameResult,
|
||||
nonDefaultDetectors: []database.Detector{osrelease, aptsources, dpkg},
|
||||
expectedAncestry: *newAncestryBuilder(ancestryName([]string{"0"})).addDetectors(osrelease, aptsources, dpkg).
|
||||
addLayer("0", ancestryFeature(ubuntu, sed, aptsources, dpkg)).
|
||||
ancestry,
|
||||
}, {
|
||||
title: "feature without namespace",
|
||||
image: invalidNamespace,
|
||||
expectedAncestry: *newAncestryBuilder(ancestryName([]string{"0"})).addDetectors(detectors...).
|
||||
addLayer("0").
|
||||
ancestry,
|
||||
}, {
|
||||
title: "two namespaces with the same version format but different names",
|
||||
image: sameVersionFormatDiffName,
|
||||
// failure of matching a namespace will result in the package not being added.
|
||||
expectedAncestry: *newAncestryBuilder(ancestryName([]string{"0"})).addDetectors(detectors...).
|
||||
addLayer("0").
|
||||
ancestry,
|
||||
}, {
|
||||
title: "noMatchingNamespace",
|
||||
image: noMatchingNamespace,
|
||||
expectedAncestry: *newAncestryBuilder(ancestryName([]string{"0"})).addDetectors(detectors...).addLayer("0", ancestryFeature(ubuntu, sed, osrelease, dpkg)).ancestry,
|
||||
}, {
|
||||
title: "featureWithPotentialNamespace",
|
||||
image: potentialFeatureNamespace,
|
||||
expectedAncestry: *newAncestryBuilder(ancestryName([]string{"0"})).addDetectors(detectors...).addLayer("0", ancestryFeature(rhel7, sed, database.Detector{}, rpm)).ancestry,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range cases {
|
||||
t.Run(test.title, func(t *testing.T) {
|
||||
var builder *AncestryBuilder
|
||||
if len(test.nonDefaultDetectors) != 0 {
|
||||
builder = NewAncestryBuilder(test.nonDefaultDetectors)
|
||||
} else {
|
||||
builder = NewAncestryBuilder(detectors)
|
||||
}
|
||||
|
||||
for _, layer := range test.image {
|
||||
builder.AddLeafLayer(layer)
|
||||
}
|
||||
|
||||
ancestry := builder.Ancestry("")
|
||||
require.True(t, database.AssertAncestryEqual(t, &test.expectedAncestry, ancestry))
|
||||
})
|
||||
}
|
||||
}
|
@ -1,24 +0,0 @@
|
||||
package httputil
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// GetClientAddr returns the first value in X-Forwarded-For if it exists
|
||||
// otherwise fall back to use RemoteAddr
|
||||
func GetClientAddr(r *http.Request) string {
|
||||
addr := r.RemoteAddr
|
||||
if s := r.Header.Get("X-Forwarded-For"); s != "" {
|
||||
ips := strings.Split(s, ",")
|
||||
// assume the first one is the client address
|
||||
if len(ips) != 0 {
|
||||
// validate the ip
|
||||
if realIP := net.ParseIP(ips[0]); realIP != nil {
|
||||
addr = strings.TrimSpace(ips[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
return addr
|
||||
}
|
@ -0,0 +1,7 @@
|
||||
FROM golang:alpine
|
||||
|
||||
RUN apk add --update --no-cache git bash protobuf-dev
|
||||
|
||||
RUN go get -u github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway
|
||||
RUN go get -u github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger
|
||||
RUN go get -u github.com/golang/protobuf/protoc-gen-go
|
@ -1,17 +0,0 @@
|
||||
all:
|
||||
protoc -I/usr/local/include -I. \
|
||||
-I${GOPATH}/src \
|
||||
-I${GOPATH}/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis \
|
||||
--go_out=plugins=grpc:. \
|
||||
clair.proto
|
||||
protoc -I/usr/local/include -I. \
|
||||
-I${GOPATH}/src \
|
||||
-I${GOPATH}/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis \
|
||||
--grpc-gateway_out=logtostderr=true:. \
|
||||
clair.proto
|
||||
protoc -I/usr/local/include -I. \
|
||||
-I${GOPATH}/src \
|
||||
-I${GOPATH}/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis \
|
||||
--swagger_out=logtostderr=true:. \
|
||||
clair.proto
|
||||
go generate .
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,28 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2018 clair authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
DOCKER_REPO_ROOT="$GOPATH/src/github.com/coreos/clair"
|
||||
IMAGE=${IMAGE:-"quay.io/coreos/clair-gen-proto"}
|
||||
|
||||
docker run --rm -it \
|
||||
-v "$DOCKER_REPO_ROOT":"$DOCKER_REPO_ROOT" \
|
||||
-w "$DOCKER_REPO_ROOT" \
|
||||
"$IMAGE" \
|
||||
"./api/v3/clairpb/run_in_docker.sh"
|
@ -0,0 +1,3 @@
|
||||
protoc_version: 3.5.1
|
||||
protoc_includes:
|
||||
- ../../../vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis
|
@ -0,0 +1,29 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2018 clair authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
protoc -I/usr/include -I. \
|
||||
-I"${GOPATH}/src" \
|
||||
-I"${GOPATH}/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis" \
|
||||
--go_out=plugins=grpc:. \
|
||||
--grpc-gateway_out=logtostderr=true:. \
|
||||
--swagger_out=logtostderr=true:. \
|
||||
./api/v3/clairpb/clair.proto
|
||||
|
||||
go generate .
|
@ -0,0 +1,92 @@
|
||||
// Copyright 2019 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3
|
||||
|
||||
import (
|
||||
"github.com/coreos/clair"
|
||||
pb "github.com/coreos/clair/api/v3/clairpb"
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// GetClairStatus retrieves the current status of Clair and wrap it inside
|
||||
// protobuf struct.
|
||||
func GetClairStatus(store database.Datastore) (*pb.ClairStatus, error) {
|
||||
status := &pb.ClairStatus{
|
||||
Detectors: pb.DetectorsFromDatabaseModel(clair.EnabledDetectors()),
|
||||
}
|
||||
|
||||
t, firstUpdate, err := clair.GetLastUpdateTime(store)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if firstUpdate {
|
||||
return status, nil
|
||||
}
|
||||
|
||||
status.LastUpdateTime, err = ptypes.TimestampProto(t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return status, nil
|
||||
}
|
||||
|
||||
// GetPbAncestryLayer retrieves an ancestry layer with vulnerabilities and
|
||||
// features in an ancestry based on the provided database layer.
|
||||
func (s *AncestryServer) GetPbAncestryLayer(layer database.AncestryLayer) (*pb.GetAncestryResponse_AncestryLayer, error) {
|
||||
pbLayer := &pb.GetAncestryResponse_AncestryLayer{
|
||||
Layer: &pb.Layer{
|
||||
Hash: layer.Hash,
|
||||
},
|
||||
}
|
||||
|
||||
features := layer.GetFeatures()
|
||||
affectedFeatures, err := database.FindAffectedNamespacedFeaturesAndRollback(s.Store, features)
|
||||
if err != nil {
|
||||
return nil, newRPCErrorWithClairError(codes.Internal, err)
|
||||
}
|
||||
|
||||
for _, feature := range affectedFeatures {
|
||||
if !feature.Valid {
|
||||
panic("feature is missing in the database, it indicates the database is corrupted.")
|
||||
}
|
||||
|
||||
for _, detectedFeature := range layer.Features {
|
||||
if detectedFeature.NamespacedFeature != feature.NamespacedFeature {
|
||||
continue
|
||||
}
|
||||
|
||||
var (
|
||||
pbFeature = pb.NamespacedFeatureFromDatabaseModel(detectedFeature)
|
||||
pbVuln *pb.Vulnerability
|
||||
err error
|
||||
)
|
||||
|
||||
for _, vuln := range feature.AffectedBy {
|
||||
if pbVuln, err = pb.VulnerabilityWithFixedInFromDatabaseModel(vuln); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
pbFeature.Vulnerabilities = append(pbFeature.Vulnerabilities, pbVuln)
|
||||
}
|
||||
|
||||
pbLayer.DetectedFeatures = append(pbLayer.DetectedFeatures, pbFeature)
|
||||
}
|
||||
}
|
||||
|
||||
return pbLayer, nil
|
||||
}
|
@ -1,132 +1,77 @@
|
||||
[
|
||||
{
|
||||
"project": "github.com/beorn7/perks/quantile",
|
||||
"licenses": [
|
||||
{
|
||||
"type": "MIT License",
|
||||
"confidence": 0.9891304347826086
|
||||
}
|
||||
]
|
||||
"project": "github.com/coreos/clair",
|
||||
"license": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
},
|
||||
{
|
||||
"project": "github.com/coreos/clair",
|
||||
"licenses": [
|
||||
{
|
||||
"type": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
}
|
||||
]
|
||||
"project": "github.com/beorn7/perks/quantile",
|
||||
"license": "MIT License",
|
||||
"confidence": 0.989
|
||||
},
|
||||
{
|
||||
"project": "github.com/coreos/pkg/timeutil",
|
||||
"licenses": [
|
||||
{
|
||||
"type": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
}
|
||||
]
|
||||
"license": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
},
|
||||
{
|
||||
"project": "github.com/golang/protobuf/proto",
|
||||
"licenses": [
|
||||
{
|
||||
"type": "BSD 3-clause \"New\" or \"Revised\" License",
|
||||
"confidence": 0.92
|
||||
}
|
||||
]
|
||||
"license": "BSD 3-clause \"New\" or \"Revised\" License",
|
||||
"confidence": 0.92
|
||||
},
|
||||
{
|
||||
"project": "github.com/google/uuid",
|
||||
"license": "BSD 3-clause \"New\" or \"Revised\" License",
|
||||
"confidence": 0.966
|
||||
},
|
||||
{
|
||||
"project": "github.com/matttproud/golang_protobuf_extensions/pbutil",
|
||||
"licenses": [
|
||||
{
|
||||
"type": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
}
|
||||
]
|
||||
"license": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
},
|
||||
{
|
||||
"project": "github.com/pborman/uuid",
|
||||
"licenses": [
|
||||
{
|
||||
"type": "BSD 3-clause \"New\" or \"Revised\" License",
|
||||
"confidence": 0.9663865546218487
|
||||
}
|
||||
]
|
||||
"license": "BSD 3-clause \"New\" or \"Revised\" License",
|
||||
"confidence": 0.966
|
||||
},
|
||||
{
|
||||
"project": "github.com/prometheus/client_golang/prometheus",
|
||||
"licenses": [
|
||||
{
|
||||
"type": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
}
|
||||
]
|
||||
"license": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
},
|
||||
{
|
||||
"project": "github.com/prometheus/client_model/go",
|
||||
"licenses": [
|
||||
{
|
||||
"type": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
}
|
||||
]
|
||||
"license": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
},
|
||||
{
|
||||
"project": "github.com/prometheus/common",
|
||||
"licenses": [
|
||||
{
|
||||
"type": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
}
|
||||
]
|
||||
"license": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
},
|
||||
{
|
||||
"project": "github.com/prometheus/procfs",
|
||||
"licenses": [
|
||||
{
|
||||
"type": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
}
|
||||
]
|
||||
"project": "github.com/prometheus/procfs/xfs",
|
||||
"license": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
},
|
||||
{
|
||||
"project": "github.com/sirupsen/logrus",
|
||||
"licenses": [
|
||||
{
|
||||
"type": "MIT License",
|
||||
"confidence": 1
|
||||
}
|
||||
]
|
||||
"license": "MIT License",
|
||||
"confidence": 1
|
||||
},
|
||||
{
|
||||
"project": "github.com/stretchr/testify/assert",
|
||||
"licenses": [
|
||||
{
|
||||
"type": "MIT License",
|
||||
"confidence": 0.9430051813471503
|
||||
},
|
||||
{
|
||||
"type": "MIT License",
|
||||
"confidence": 0.9430051813471503
|
||||
}
|
||||
]
|
||||
"license": "MIT License",
|
||||
"confidence": 0.943
|
||||
},
|
||||
{
|
||||
"project": "github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew",
|
||||
"licenses": [
|
||||
{
|
||||
"type": "ISC License",
|
||||
"confidence": 0.9850746268656716
|
||||
}
|
||||
]
|
||||
"license": "ISC License",
|
||||
"confidence": 0.985
|
||||
},
|
||||
{
|
||||
"project": "github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/difflib",
|
||||
"licenses": [
|
||||
{
|
||||
"type": "BSD 3-clause \"New\" or \"Revised\" License",
|
||||
"confidence": 0.9830508474576272
|
||||
}
|
||||
]
|
||||
"license": "BSD 3-clause \"New\" or \"Revised\" License",
|
||||
"confidence": 0.983
|
||||
}
|
||||
]
|
||||
|
@ -0,0 +1,43 @@
|
||||
// Copyright 2019 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clair
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/clair/pkg/httputil"
|
||||
)
|
||||
|
||||
func retrieveLayerBlob(ctx context.Context, path string, headers map[string]string) (io.ReadCloser, error) {
|
||||
if strings.HasPrefix(path, "http://") || strings.HasPrefix(path, "https://") {
|
||||
httpHeaders := make(http.Header)
|
||||
for key, value := range headers {
|
||||
httpHeaders[key] = []string{value}
|
||||
}
|
||||
|
||||
reader, err := httputil.GetWithContext(ctx, path, httpHeaders)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
return os.Open(path)
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
dependencies:
|
||||
- name: postgresql
|
||||
version: "*"
|
||||
version: "1.0.0"
|
||||
condition: postgresql.enabled
|
||||
repository: "alias:stable"
|
@ -0,0 +1,83 @@
|
||||
clair:
|
||||
database:
|
||||
# Database driver.
|
||||
type: pgsql
|
||||
options:
|
||||
# PostgreSQL Connection string.
|
||||
# https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING
|
||||
{{- if .Values.config.postgresURI }}
|
||||
source: "{{ .Values.config.postgresURI }}"
|
||||
{{ else }}
|
||||
source: "host={{ template "postgresql.fullname" . }} port=5432 user={{ .Values.postgresql.postgresUser }} password={{ .Values.postgresql.postgresPassword }} dbname={{ .Values.postgresql.postgresDatabase }} sslmode=disable statement_timeout=60000"
|
||||
{{ end }}
|
||||
|
||||
# Number of elements kept in the cache.
|
||||
# Values unlikely to change (e.g. namespaces) are cached in order to save prevent needless roundtrips to the database.
|
||||
cachesize: 16384
|
||||
|
||||
# 32-bit URL-safe base64 key used to encrypt pagination tokens.
|
||||
# If one is not provided, it will be generated.
|
||||
# Multiple clair instances in the same cluster need the same value.
|
||||
paginationkey: "{{ .Values.config.paginationKey }}"
|
||||
api:
|
||||
# v3 grpc/RESTful API server address.
|
||||
addr: "0.0.0.0:{{ .Values.service.internalApiPort }}"
|
||||
|
||||
# Health server address.
|
||||
# This is an unencrypted endpoint useful for load balancers to check to healthiness of the clair server.
|
||||
healthaddr: "0.0.0.0:{{ .Values.service.internalHealthPort }}"
|
||||
|
||||
# Deadline before an API request will respond with a 503.
|
||||
timeout: 900s
|
||||
|
||||
# Optional PKI configuration.
|
||||
# If you want to easily generate client certificates and CAs, try the following projects:
|
||||
# https://github.com/coreos/etcd-ca
|
||||
# https://github.com/cloudflare/cfssl
|
||||
servername:
|
||||
cafile:
|
||||
keyfile:
|
||||
certfile:
|
||||
|
||||
worker:
|
||||
namespace_detectors:
|
||||
{{- range $key, $value := .Values.config.enabledNamespaceDetectors }}
|
||||
- {{ $value }}
|
||||
{{- end }}
|
||||
|
||||
feature_listers:
|
||||
{{- range $key, $value := .Values.config.enabledFeatureListers }}
|
||||
- {{ $value }}
|
||||
{{- end }}
|
||||
|
||||
updater:
|
||||
# Frequency the database will be updated with vulnerabilities from the default data sources.
|
||||
# The value 0 disables the updater entirely.
|
||||
interval: "{{ .Values.config.updateInterval }}"
|
||||
enabledupdaters:
|
||||
{{- range $key, $value := .Values.config.enabledUpdaters }}
|
||||
- {{ $value }}
|
||||
{{- end }}
|
||||
|
||||
notifier:
|
||||
# Number of attempts before the notification is marked as failed to be sent.
|
||||
attempts: 3
|
||||
|
||||
# Duration before a failed notification is retried.
|
||||
renotifyinterval: 2h
|
||||
|
||||
http:
|
||||
# Optional endpoint that will receive notifications via POST requests.
|
||||
endpoint: "{{ .Values.config.notificationWebhookEndpoint }}"
|
||||
|
||||
# Optional PKI configuration.
|
||||
# If you want to easily generate client certificates and CAs, try the following projects:
|
||||
# https://github.com/cloudflare/cfssl
|
||||
# https://github.com/coreos/etcd-ca
|
||||
servername:
|
||||
cafile:
|
||||
keyfile:
|
||||
certfile:
|
||||
|
||||
# Optional HTTP Proxy: must be a valid URL (including the scheme).
|
||||
proxy:
|
@ -1,92 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "fullname" . }}
|
||||
labels:
|
||||
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
|
||||
data:
|
||||
config.yaml: |
|
||||
clair:
|
||||
database:
|
||||
# Database driver
|
||||
type: pgsql
|
||||
options:
|
||||
# PostgreSQL Connection string
|
||||
# https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING
|
||||
# This should be done using secrets or Vault, but for now this will also work
|
||||
{{- if .Values.config.postgresURI -}}
|
||||
source: "{{ .Values.config.postgresURI }}"
|
||||
{{ else }}
|
||||
source: "postgres://{{ .Values.postgresql.postgresUser }}:{{ .Values.postgresql.postgresPassword }}@{{ template "postgresql.fullname" . }}:5432/{{ .Values.postgresql.postgresDatabase }}?sslmode=disable"
|
||||
{{ end }}
|
||||
|
||||
# Number of elements kept in the cache
|
||||
# Values unlikely to change (e.g. namespaces) are cached in order to save prevent needless roundtrips to the database.
|
||||
cachesize: 16384
|
||||
|
||||
# 32-bit URL-safe base64 key used to encrypt pagination tokens
|
||||
# If one is not provided, it will be generated.
|
||||
# Multiple clair instances in the same cluster need the same value.
|
||||
paginationkey: "{{ .Values.config.paginationKey }}"
|
||||
api:
|
||||
# v3 grpc/RESTful API server address
|
||||
addr: "0.0.0.0:6060"
|
||||
|
||||
# Health server address
|
||||
# This is an unencrypted endpoint useful for load balancers to check to healthiness of the clair server.
|
||||
healthaddr: "0.0.0.0:6061"
|
||||
|
||||
# Deadline before an API request will respond with a 503
|
||||
timeout: 900s
|
||||
|
||||
# Optional PKI configuration
|
||||
# If you want to easily generate client certificates and CAs, try the following projects:
|
||||
# https://github.com/coreos/etcd-ca
|
||||
# https://github.com/cloudflare/cfssl
|
||||
servername:
|
||||
cafile:
|
||||
keyfile:
|
||||
certfile:
|
||||
|
||||
worker:
|
||||
namespace_detectors:
|
||||
{{- range $key, $value := .Values.config.enabledNamespaceDetectors }}
|
||||
- {{ $value }}
|
||||
{{- end }}
|
||||
|
||||
feature_listers:
|
||||
{{- range $key, $value := .Values.config.enabledFeatureListers }}
|
||||
- {{ $value }}
|
||||
{{- end }}
|
||||
|
||||
updater:
|
||||
# Frequency the database will be updated with vulnerabilities from the default data sources
|
||||
# The value 0 disables the updater entirely.
|
||||
interval: "{{ .Values.config.updateInterval }}"
|
||||
enabledupdaters:
|
||||
{{- range $key, $value := .Values.config.enabledUpdaters }}
|
||||
- {{ $value }}
|
||||
{{- end }}
|
||||
|
||||
notifier:
|
||||
# Number of attempts before the notification is marked as failed to be sent
|
||||
attempts: 3
|
||||
|
||||
# Duration before a failed notification is retried
|
||||
renotifyinterval: 2h
|
||||
|
||||
http:
|
||||
# Optional endpoint that will receive notifications via POST requests
|
||||
endpoint: "{{ .Values.config.notificationWebhookEndpoint }}"
|
||||
|
||||
# Optional PKI configuration
|
||||
# If you want to easily generate client certificates and CAs, try the following projects:
|
||||
# https://github.com/cloudflare/cfssl
|
||||
# https://github.com/coreos/etcd-ca
|
||||
servername:
|
||||
cafile:
|
||||
keyfile:
|
||||
certfile:
|
||||
|
||||
# Optional HTTP Proxy: must be a valid URL (including the scheme).
|
||||
proxy:
|
@ -0,0 +1,13 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ template "clair.fullname" . }}
|
||||
labels:
|
||||
heritage: {{ .Release.Service | quote }}
|
||||
release: {{ .Release.Name | quote }}
|
||||
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
|
||||
app: {{ template "clair.fullname" . }}
|
||||
type: Opaque
|
||||
data:
|
||||
config.yaml: |-
|
||||
{{ include (print .Template.BasePath "/_config.yaml.tpl") . | b64enc | indent 4 }}
|
@ -1,21 +1,28 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "fullname" . }}
|
||||
name: {{ template "clair.fullname" . }}
|
||||
labels:
|
||||
heritage: {{ .Release.Service | quote }}
|
||||
release: {{ .Release.Name | quote }}
|
||||
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
|
||||
app: {{ template "clair.fullname" . }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- name: clair-api
|
||||
- name: "{{ .Chart.Name }}-api"
|
||||
port: {{ .Values.service.externalApiPort }}
|
||||
targetPort: {{ .Values.service.internalApiPort }}
|
||||
protocol: TCP
|
||||
name: "{{ .Values.service.name }}-api"
|
||||
- name: clair-health
|
||||
{{- if and (.Values.service.apiNodePort) (eq .Values.service.type "NodePort") }}
|
||||
nodePort: {{ .Values.service.apiNodePort }}
|
||||
{{- end }}
|
||||
- name: "{{ .Chart.Name }}-health"
|
||||
port: {{ .Values.service.externalHealthPort }}
|
||||
targetPort: {{ .Values.service.internalHealthPort }}
|
||||
protocol: TCP
|
||||
name: "{{ .Values.service.name }}-health"
|
||||
{{- if and (.Values.service.healthNodePort) (eq .Values.service.type "NodePort") }}
|
||||
nodePort: {{ .Values.service.healthNodePort }}
|
||||
{{- end }}
|
||||
selector:
|
||||
app: {{ template "fullname" . }}
|
||||
app: {{ template "clair.fullname" . }}
|
||||
|
@ -0,0 +1,96 @@
|
||||
// Copyright 2019 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
// Ancestry is a manifest that keeps all layers in an image in order.
|
||||
type Ancestry struct {
|
||||
// Name is a globally unique value for a set of layers. This is often the
|
||||
// sha256 digest of an OCI/Docker manifest.
|
||||
Name string `json:"name"`
|
||||
// By contains the processors that are used when computing the
|
||||
// content of this ancestry.
|
||||
By []Detector `json:"by"`
|
||||
// Layers should be ordered and i_th layer is the parent of i+1_th layer in
|
||||
// the slice.
|
||||
Layers []AncestryLayer `json:"layers"`
|
||||
}
|
||||
|
||||
// Valid checks if the ancestry is compliant to spec.
|
||||
func (a *Ancestry) Valid() bool {
|
||||
if a == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if a.Name == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, d := range a.By {
|
||||
if !d.Valid() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
for _, l := range a.Layers {
|
||||
if !l.Valid() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// AncestryLayer is a layer with all detected namespaced features.
|
||||
type AncestryLayer struct {
|
||||
// Hash is the sha-256 tarsum on the layer's blob content.
|
||||
Hash string `json:"hash"`
|
||||
// Features are the features introduced by this layer when it was
|
||||
// processed.
|
||||
Features []AncestryFeature `json:"features"`
|
||||
}
|
||||
|
||||
// Valid checks if the Ancestry Layer is compliant to the spec.
|
||||
func (l *AncestryLayer) Valid() bool {
|
||||
if l == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if l.Hash == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// GetFeatures returns the Ancestry's features.
|
||||
func (l *AncestryLayer) GetFeatures() []NamespacedFeature {
|
||||
nsf := make([]NamespacedFeature, 0, len(l.Features))
|
||||
for _, f := range l.Features {
|
||||
nsf = append(nsf, f.NamespacedFeature)
|
||||
}
|
||||
|
||||
return nsf
|
||||
}
|
||||
|
||||
// AncestryFeature is a namespaced feature with the detectors used to
|
||||
// find this feature.
|
||||
type AncestryFeature struct {
|
||||
NamespacedFeature `json:"namespacedFeature"`
|
||||
|
||||
// FeatureBy is the detector that detected the feature.
|
||||
FeatureBy Detector `json:"featureBy"`
|
||||
// NamespaceBy is the detector that detected the namespace.
|
||||
NamespaceBy Detector `json:"namespaceBy"`
|
||||
}
|
@ -0,0 +1,539 @@
|
||||
// Copyright 2018 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/coreos/clair/pkg/commonerr"
|
||||
"github.com/coreos/clair/pkg/pagination"
|
||||
"github.com/deckarep/golang-set"
|
||||
)
|
||||
|
||||
// DeduplicateNamespaces deduplicates a list of namespaces.
|
||||
func DeduplicateNamespaces(namespaces ...Namespace) []Namespace {
|
||||
nsSet := mapset.NewSet()
|
||||
for _, ns := range namespaces {
|
||||
nsSet.Add(ns)
|
||||
}
|
||||
|
||||
uniqueNamespaces := make([]Namespace, 0, nsSet.Cardinality())
|
||||
for ns := range nsSet.Iter() {
|
||||
uniqueNamespaces = append(uniqueNamespaces, ns.(Namespace))
|
||||
}
|
||||
|
||||
return uniqueNamespaces
|
||||
}
|
||||
|
||||
// DeduplicateFeatures deduplicates a list of list of features.
|
||||
func DeduplicateFeatures(features ...Feature) []Feature {
|
||||
fSet := mapset.NewSet()
|
||||
for _, f := range features {
|
||||
fSet.Add(f)
|
||||
}
|
||||
|
||||
return ConvertFeatureSetToFeatures(fSet)
|
||||
}
|
||||
|
||||
// ConvertFeatureSetToFeatures converts a feature set to an array of features
|
||||
func ConvertFeatureSetToFeatures(features mapset.Set) []Feature {
|
||||
uniqueFeatures := make([]Feature, 0, features.Cardinality())
|
||||
for f := range features.Iter() {
|
||||
uniqueFeatures = append(uniqueFeatures, f.(Feature))
|
||||
}
|
||||
|
||||
return uniqueFeatures
|
||||
}
|
||||
|
||||
func ConvertFeatureSetToLayerFeatures(features mapset.Set) []LayerFeature {
|
||||
uniqueLayerFeatures := make([]LayerFeature, 0, features.Cardinality())
|
||||
for f := range features.Iter() {
|
||||
feature := f.(Feature)
|
||||
layerFeature := LayerFeature{
|
||||
Feature: feature,
|
||||
}
|
||||
uniqueLayerFeatures = append(uniqueLayerFeatures, layerFeature)
|
||||
}
|
||||
|
||||
return uniqueLayerFeatures
|
||||
}
|
||||
|
||||
// FindKeyValueAndRollback wraps session FindKeyValue function with begin and
|
||||
// roll back.
|
||||
func FindKeyValueAndRollback(datastore Datastore, key string) (value string, ok bool, err error) {
|
||||
var tx Session
|
||||
tx, err = datastore.Begin()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
value, ok, err = tx.FindKeyValue(key)
|
||||
return
|
||||
}
|
||||
|
||||
// PersistPartialLayerAndCommit wraps session PersistLayer function with begin and
|
||||
// commit.
|
||||
func PersistPartialLayerAndCommit(datastore Datastore, layer *Layer) error {
|
||||
tx, err := datastore.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
if err := tx.PersistLayer(layer.Hash, layer.Features, layer.Namespaces, layer.By); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
// PersistFeaturesAndCommit wraps session PersistFeaturesAndCommit function with begin and commit.
|
||||
func PersistFeaturesAndCommit(datastore Datastore, features []Feature) error {
|
||||
tx, err := datastore.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
if err := tx.PersistFeatures(features); err != nil {
|
||||
serialized, _ := json.Marshal(features)
|
||||
log.WithError(err).WithField("feature", string(serialized)).Error("failed to store features")
|
||||
return err
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
// PersistNamespacesAndCommit wraps session PersistNamespaces function with
|
||||
// begin and commit.
|
||||
func PersistNamespacesAndCommit(datastore Datastore, namespaces []Namespace) error {
|
||||
tx, err := datastore.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
if err := tx.PersistNamespaces(namespaces); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
// FindAncestryAndRollback wraps session FindAncestry function with begin and
|
||||
// rollback.
|
||||
func FindAncestryAndRollback(datastore Datastore, name string) (Ancestry, bool, error) {
|
||||
tx, err := datastore.Begin()
|
||||
defer tx.Rollback()
|
||||
|
||||
if err != nil {
|
||||
return Ancestry{}, false, err
|
||||
}
|
||||
|
||||
return tx.FindAncestry(name)
|
||||
}
|
||||
|
||||
// FindLayerAndRollback wraps session FindLayer function with begin and rollback.
|
||||
func FindLayerAndRollback(datastore Datastore, hash string) (layer *Layer, ok bool, err error) {
|
||||
var tx Session
|
||||
if tx, err = datastore.Begin(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer tx.Rollback()
|
||||
// TODO(sidac): In order to make the session interface more idiomatic, we'll
|
||||
// return the pointer value in the future.
|
||||
var dereferencedLayer Layer
|
||||
dereferencedLayer, ok, err = tx.FindLayer(hash)
|
||||
layer = &dereferencedLayer
|
||||
return
|
||||
}
|
||||
|
||||
// DeduplicateNamespacedFeatures returns a copy of all unique features in the
|
||||
// input.
|
||||
func DeduplicateNamespacedFeatures(features []NamespacedFeature) []NamespacedFeature {
|
||||
nsSet := mapset.NewSet()
|
||||
for _, ns := range features {
|
||||
nsSet.Add(ns)
|
||||
}
|
||||
|
||||
uniqueFeatures := make([]NamespacedFeature, 0, nsSet.Cardinality())
|
||||
for ns := range nsSet.Iter() {
|
||||
uniqueFeatures = append(uniqueFeatures, ns.(NamespacedFeature))
|
||||
}
|
||||
|
||||
return uniqueFeatures
|
||||
}
|
||||
|
||||
// GetAncestryFeatures returns a list of unique namespaced features in the
|
||||
// ancestry.
|
||||
func GetAncestryFeatures(ancestry Ancestry) []NamespacedFeature {
|
||||
features := []NamespacedFeature{}
|
||||
for _, layer := range ancestry.Layers {
|
||||
features = append(features, layer.GetFeatures()...)
|
||||
}
|
||||
|
||||
return DeduplicateNamespacedFeatures(features)
|
||||
}
|
||||
|
||||
// UpsertAncestryAndCommit wraps session UpsertAncestry function with begin and commit.
|
||||
func UpsertAncestryAndCommit(datastore Datastore, ancestry *Ancestry) error {
|
||||
tx, err := datastore.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = tx.UpsertAncestry(*ancestry); err != nil {
|
||||
log.WithError(err).Error("failed to upsert the ancestry")
|
||||
serialized, _ := json.Marshal(ancestry)
|
||||
log.Debug(string(serialized))
|
||||
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
if err = tx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PersistNamespacedFeaturesAndCommit wraps session PersistNamespacedFeatures function
|
||||
// with begin and commit.
|
||||
func PersistNamespacedFeaturesAndCommit(datastore Datastore, features []NamespacedFeature) error {
|
||||
tx, err := datastore.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := tx.PersistNamespacedFeatures(features); err != nil {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CacheRelatedVulnerabilityAndCommit wraps session CacheAffectedNamespacedFeatures
|
||||
// function with begin and commit.
|
||||
func CacheRelatedVulnerabilityAndCommit(datastore Datastore, features []NamespacedFeature) error {
|
||||
tx, err := datastore.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := tx.CacheAffectedNamespacedFeatures(features); err != nil {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
// IntersectDetectors returns the detectors in both d1 and d2.
|
||||
func IntersectDetectors(d1 []Detector, d2 []Detector) []Detector {
|
||||
d1Set := mapset.NewSet()
|
||||
for _, d := range d1 {
|
||||
d1Set.Add(d)
|
||||
}
|
||||
|
||||
d2Set := mapset.NewSet()
|
||||
for _, d := range d2 {
|
||||
d2Set.Add(d)
|
||||
}
|
||||
|
||||
inter := d1Set.Intersect(d2Set)
|
||||
detectors := make([]Detector, 0, inter.Cardinality())
|
||||
for d := range inter.Iter() {
|
||||
detectors = append(detectors, d.(Detector))
|
||||
}
|
||||
|
||||
return detectors
|
||||
}
|
||||
|
||||
// DiffDetectors returns the detectors belongs to d1 but not d2
|
||||
func DiffDetectors(d1 []Detector, d2 []Detector) []Detector {
|
||||
d1Set := mapset.NewSet()
|
||||
for _, d := range d1 {
|
||||
d1Set.Add(d)
|
||||
}
|
||||
|
||||
d2Set := mapset.NewSet()
|
||||
for _, d := range d2 {
|
||||
d2Set.Add(d)
|
||||
}
|
||||
|
||||
diff := d1Set.Difference(d2Set)
|
||||
detectors := make([]Detector, 0, diff.Cardinality())
|
||||
for d := range diff.Iter() {
|
||||
detectors = append(detectors, d.(Detector))
|
||||
}
|
||||
|
||||
return detectors
|
||||
}
|
||||
|
||||
// MergeLayers merges all content in new layer to l, where the content is
|
||||
// updated.
|
||||
func MergeLayers(l *Layer, new *Layer) *Layer {
|
||||
featureSet := mapset.NewSet()
|
||||
namespaceSet := mapset.NewSet()
|
||||
bySet := mapset.NewSet()
|
||||
|
||||
for _, f := range l.Features {
|
||||
featureSet.Add(f)
|
||||
}
|
||||
|
||||
for _, ns := range l.Namespaces {
|
||||
namespaceSet.Add(ns)
|
||||
}
|
||||
|
||||
for _, d := range l.By {
|
||||
bySet.Add(d)
|
||||
}
|
||||
|
||||
for _, feature := range new.Features {
|
||||
if !featureSet.Contains(feature) {
|
||||
l.Features = append(l.Features, feature)
|
||||
featureSet.Add(feature)
|
||||
}
|
||||
}
|
||||
|
||||
for _, namespace := range new.Namespaces {
|
||||
if !namespaceSet.Contains(namespace) {
|
||||
l.Namespaces = append(l.Namespaces, namespace)
|
||||
namespaceSet.Add(namespace)
|
||||
}
|
||||
}
|
||||
|
||||
for _, detector := range new.By {
|
||||
if !bySet.Contains(detector) {
|
||||
l.By = append(l.By, detector)
|
||||
bySet.Add(detector)
|
||||
}
|
||||
}
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
// AcquireLock acquires a named global lock for a duration.
|
||||
func AcquireLock(datastore Datastore, name, owner string, duration time.Duration) (acquired bool, expiration time.Time) {
|
||||
tx, err := datastore.Begin()
|
||||
if err != nil {
|
||||
return false, time.Time{}
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
locked, t, err := tx.AcquireLock(name, owner, duration)
|
||||
if err != nil {
|
||||
return false, time.Time{}
|
||||
}
|
||||
|
||||
if locked {
|
||||
if err := tx.Commit(); err != nil {
|
||||
return false, time.Time{}
|
||||
}
|
||||
}
|
||||
|
||||
return locked, t
|
||||
}
|
||||
|
||||
// ExtendLock extends the duration of an existing global lock for the given
|
||||
// duration.
|
||||
func ExtendLock(ds Datastore, name, whoami string, desiredLockDuration time.Duration) (bool, time.Time) {
|
||||
tx, err := ds.Begin()
|
||||
if err != nil {
|
||||
return false, time.Time{}
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
locked, expiration, err := tx.ExtendLock(name, whoami, desiredLockDuration)
|
||||
if err != nil {
|
||||
return false, time.Time{}
|
||||
}
|
||||
|
||||
if locked {
|
||||
if err := tx.Commit(); err == nil {
|
||||
return locked, expiration
|
||||
}
|
||||
}
|
||||
|
||||
return false, time.Time{}
|
||||
}
|
||||
|
||||
// ReleaseLock releases a named global lock.
|
||||
func ReleaseLock(datastore Datastore, name, owner string) {
|
||||
tx, err := datastore.Begin()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
if err := tx.ReleaseLock(name, owner); err != nil {
|
||||
return
|
||||
}
|
||||
if err := tx.Commit(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// PersistDetectorsAndCommit stores the detectors in the data store.
|
||||
func PersistDetectorsAndCommit(store Datastore, detectors []Detector) error {
|
||||
tx, err := store.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer tx.Rollback()
|
||||
if err := tx.PersistDetectors(detectors); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarkNotificationAsReadAndCommit marks a notification as read.
|
||||
func MarkNotificationAsReadAndCommit(store Datastore, name string) (bool, error) {
|
||||
tx, err := store.Begin()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
defer tx.Rollback()
|
||||
err = tx.DeleteNotification(name)
|
||||
if err == commonerr.ErrNotFound {
|
||||
return false, nil
|
||||
} else if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// FindAffectedNamespacedFeaturesAndRollback finds the vulnerabilities on each
|
||||
// feature.
|
||||
func FindAffectedNamespacedFeaturesAndRollback(store Datastore, features []NamespacedFeature) ([]NullableAffectedNamespacedFeature, error) {
|
||||
tx, err := store.Begin()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer tx.Rollback()
|
||||
nullableFeatures, err := tx.FindAffectedNamespacedFeatures(features)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nullableFeatures, nil
|
||||
}
|
||||
|
||||
// FindVulnerabilityNotificationAndRollback finds the vulnerability notification
|
||||
// and rollback.
|
||||
func FindVulnerabilityNotificationAndRollback(store Datastore, name string, limit int, oldVulnerabilityPage pagination.Token, newVulnerabilityPage pagination.Token) (VulnerabilityNotificationWithVulnerable, bool, error) {
|
||||
tx, err := store.Begin()
|
||||
if err != nil {
|
||||
return VulnerabilityNotificationWithVulnerable{}, false, err
|
||||
}
|
||||
|
||||
defer tx.Rollback()
|
||||
return tx.FindVulnerabilityNotification(name, limit, oldVulnerabilityPage, newVulnerabilityPage)
|
||||
}
|
||||
|
||||
// FindNewNotification finds notifications either never notified or notified
|
||||
// before the given time.
|
||||
func FindNewNotification(store Datastore, notifiedBefore time.Time) (NotificationHook, bool, error) {
|
||||
tx, err := store.Begin()
|
||||
if err != nil {
|
||||
return NotificationHook{}, false, err
|
||||
}
|
||||
|
||||
defer tx.Rollback()
|
||||
return tx.FindNewNotification(notifiedBefore)
|
||||
}
|
||||
|
||||
// UpdateKeyValueAndCommit stores the key value to storage.
|
||||
func UpdateKeyValueAndCommit(store Datastore, key, value string) error {
|
||||
tx, err := store.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer tx.Rollback()
|
||||
if err = tx.UpdateKeyValue(key, value); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
// InsertVulnerabilityNotificationsAndCommit inserts the notifications into db
|
||||
// and commit.
|
||||
func InsertVulnerabilityNotificationsAndCommit(store Datastore, notifications []VulnerabilityNotification) error {
|
||||
tx, err := store.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
if err := tx.InsertVulnerabilityNotifications(notifications); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
// FindVulnerabilitiesAndRollback finds the vulnerabilities based on given ids.
|
||||
func FindVulnerabilitiesAndRollback(store Datastore, ids []VulnerabilityID) ([]NullableVulnerability, error) {
|
||||
tx, err := store.Begin()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer tx.Rollback()
|
||||
return tx.FindVulnerabilities(ids)
|
||||
}
|
||||
|
||||
func UpdateVulnerabilitiesAndCommit(store Datastore, toRemove []VulnerabilityID, toAdd []VulnerabilityWithAffected) error {
|
||||
tx, err := store.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := tx.DeleteVulnerabilities(toRemove); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := tx.InsertVulnerabilities(toAdd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
@ -0,0 +1,144 @@
|
||||
// Copyright 2018 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// NamespaceDetectorType is a type of detector that extracts the namespaces.
|
||||
NamespaceDetectorType DetectorType = "namespace"
|
||||
// FeatureDetectorType is a type of detector that extracts the features.
|
||||
FeatureDetectorType DetectorType = "feature"
|
||||
)
|
||||
|
||||
// DetectorTypes contains all detector types.
|
||||
var (
|
||||
DetectorTypes = []DetectorType{
|
||||
NamespaceDetectorType,
|
||||
FeatureDetectorType,
|
||||
}
|
||||
// ErrFailedToParseDetectorType is the error returned when a detector type could
|
||||
// not be parsed from a string.
|
||||
ErrFailedToParseDetectorType = errors.New("failed to parse DetectorType from input")
|
||||
// ErrInvalidDetector is the error returned when a detector from database has
|
||||
// invalid name or version or type.
|
||||
ErrInvalidDetector = errors.New("the detector has invalid metadata")
|
||||
)
|
||||
|
||||
// DetectorType is the type of a detector.
|
||||
type DetectorType string
|
||||
|
||||
// Value implements the database/sql/driver.Valuer interface.
|
||||
func (s DetectorType) Value() (driver.Value, error) {
|
||||
return string(s), nil
|
||||
}
|
||||
|
||||
// Scan implements the database/sql.Scanner interface.
|
||||
func (s *DetectorType) Scan(value interface{}) error {
|
||||
val, ok := value.([]byte)
|
||||
if !ok {
|
||||
return errors.New("could not scan a Severity from a non-string input")
|
||||
}
|
||||
|
||||
var err error
|
||||
*s, err = NewDetectorType(string(val))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewDetectorType attempts to parse a string into a standard DetectorType
|
||||
// value.
|
||||
func NewDetectorType(s string) (DetectorType, error) {
|
||||
for _, ss := range DetectorTypes {
|
||||
if strings.EqualFold(s, string(ss)) {
|
||||
return ss, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", ErrFailedToParseDetectorType
|
||||
}
|
||||
|
||||
// Valid checks if a detector type is defined.
|
||||
func (s DetectorType) Valid() bool {
|
||||
for _, t := range DetectorTypes {
|
||||
if s == t {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Detector is an versioned Clair extension.
|
||||
type Detector struct {
|
||||
// Name of an extension should be non-empty and uniquely identifies the
|
||||
// extension.
|
||||
Name string `json:"name"`
|
||||
// Version of an extension should be non-empty.
|
||||
Version string `json:"version"`
|
||||
// DType is the type of the extension and should be one of the types in
|
||||
// DetectorTypes.
|
||||
DType DetectorType `json:"dtype"`
|
||||
}
|
||||
|
||||
// Valid checks if all fields in the detector satisfies the spec.
|
||||
func (d Detector) Valid() bool {
|
||||
if d.Name == "" || d.Version == "" || !d.DType.Valid() {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// String returns a unique string representation of the detector.
|
||||
func (d Detector) String() string {
|
||||
return fmt.Sprintf("%s:%s", d.Name, d.Version)
|
||||
}
|
||||
|
||||
// NewNamespaceDetector returns a new namespace detector.
|
||||
func NewNamespaceDetector(name, version string) Detector {
|
||||
return Detector{
|
||||
Name: name,
|
||||
Version: version,
|
||||
DType: NamespaceDetectorType,
|
||||
}
|
||||
}
|
||||
|
||||
// NewFeatureDetector returns a new feature detector.
|
||||
func NewFeatureDetector(name, version string) Detector {
|
||||
return Detector{
|
||||
Name: name,
|
||||
Version: version,
|
||||
DType: FeatureDetectorType,
|
||||
}
|
||||
}
|
||||
|
||||
// SerializeDetectors returns the string representation of given detectors.
|
||||
func SerializeDetectors(detectors []Detector) []string {
|
||||
strDetectors := []string{}
|
||||
for _, d := range detectors {
|
||||
strDetectors = append(strDetectors, d.String())
|
||||
}
|
||||
|
||||
return strDetectors
|
||||
}
|
@ -0,0 +1,35 @@
|
||||
// Copyright 2019 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
// StorageError is database error
|
||||
type StorageError struct {
|
||||
reason string
|
||||
original error
|
||||
}
|
||||
|
||||
func (e *StorageError) Error() string {
|
||||
return e.reason
|
||||
}
|
||||
|
||||
// NewStorageErrorWithInternalError creates a new database error
|
||||
func NewStorageErrorWithInternalError(reason string, originalError error) *StorageError {
|
||||
return &StorageError{reason, originalError}
|
||||
}
|
||||
|
||||
// NewStorageError creates a new database error
|
||||
func NewStorageError(reason string) *StorageError {
|
||||
return &StorageError{reason, nil}
|
||||
}
|
@ -0,0 +1,96 @@
|
||||
// Copyright 2019 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
// Feature represents a package detected in a layer but the namespace is not
|
||||
// determined.
|
||||
//
|
||||
// e.g. Name: Libssl1.0, Version: 1.0, VersionFormat: dpkg, Type: binary
|
||||
// dpkg is the version format of the installer package manager, which in this
|
||||
// case could be dpkg or apk.
|
||||
type Feature struct {
|
||||
Name string `json:"name"`
|
||||
Version string `json:"version"`
|
||||
VersionFormat string `json:"versionFormat"`
|
||||
Type FeatureType `json:"type"`
|
||||
}
|
||||
|
||||
// NamespacedFeature is a feature with determined namespace and can be affected
|
||||
// by vulnerabilities.
|
||||
//
|
||||
// e.g. OpenSSL 1.0 dpkg Debian:7.
|
||||
type NamespacedFeature struct {
|
||||
Feature `json:"feature"`
|
||||
|
||||
Namespace Namespace `json:"namespace"`
|
||||
}
|
||||
|
||||
// AffectedNamespacedFeature is a namespaced feature affected by the
|
||||
// vulnerabilities with fixed-in versions for this feature.
|
||||
type AffectedNamespacedFeature struct {
|
||||
NamespacedFeature
|
||||
|
||||
AffectedBy []VulnerabilityWithFixedIn
|
||||
}
|
||||
|
||||
// VulnerabilityWithFixedIn is used for AffectedNamespacedFeature to retrieve
|
||||
// the affecting vulnerabilities and the fixed-in versions for the feature.
|
||||
type VulnerabilityWithFixedIn struct {
|
||||
Vulnerability
|
||||
|
||||
FixedInVersion string
|
||||
}
|
||||
|
||||
// AffectedFeature is used to determine whether a namespaced feature is affected
|
||||
// by a Vulnerability. Namespace and Feature Name is unique. Affected Feature is
|
||||
// bound to vulnerability.
|
||||
type AffectedFeature struct {
|
||||
// FeatureType determines which type of package it affects.
|
||||
FeatureType FeatureType
|
||||
Namespace Namespace
|
||||
FeatureName string
|
||||
// FixedInVersion is known next feature version that's not affected by the
|
||||
// vulnerability. Empty FixedInVersion means the unaffected version is
|
||||
// unknown.
|
||||
FixedInVersion string
|
||||
// AffectedVersion contains the version range to determine whether or not a
|
||||
// feature is affected.
|
||||
AffectedVersion string
|
||||
}
|
||||
|
||||
// NullableAffectedNamespacedFeature is an affectednamespacedfeature with
|
||||
// whether it's found in datastore.
|
||||
type NullableAffectedNamespacedFeature struct {
|
||||
AffectedNamespacedFeature
|
||||
|
||||
Valid bool
|
||||
}
|
||||
|
||||
func NewFeature(name string, version string, versionFormat string, featureType FeatureType) *Feature {
|
||||
return &Feature{name, version, versionFormat, featureType}
|
||||
}
|
||||
|
||||
func NewBinaryPackage(name string, version string, versionFormat string) *Feature {
|
||||
return &Feature{name, version, versionFormat, BinaryPackage}
|
||||
}
|
||||
|
||||
func NewSourcePackage(name string, version string, versionFormat string) *Feature {
|
||||
return &Feature{name, version, versionFormat, SourcePackage}
|
||||
}
|
||||
|
||||
func NewNamespacedFeature(namespace *Namespace, feature *Feature) *NamespacedFeature {
|
||||
// TODO: namespaced feature should use pointer values
|
||||
return &NamespacedFeature{*feature, *namespace}
|
||||
}
|
@ -0,0 +1,52 @@
|
||||
// Copyright 2019 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// FeatureType indicates the type of feature that a vulnerability
|
||||
// affects.
|
||||
type FeatureType string
|
||||
|
||||
const (
|
||||
SourcePackage FeatureType = "source"
|
||||
BinaryPackage FeatureType = "binary"
|
||||
)
|
||||
|
||||
var featureTypes = []FeatureType{
|
||||
SourcePackage,
|
||||
BinaryPackage,
|
||||
}
|
||||
|
||||
// Scan implements the database/sql.Scanner interface.
|
||||
func (t *FeatureType) Scan(value interface{}) error {
|
||||
val := value.(string)
|
||||
for _, ft := range featureTypes {
|
||||
if string(ft) == val {
|
||||
*t = ft
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("invalid feature type received from database: '%s'", val))
|
||||
}
|
||||
|
||||
// Value implements the database/sql/driver.Valuer interface.
|
||||
func (t *FeatureType) Value() (driver.Value, error) {
|
||||
return string(*t), nil
|
||||
}
|
@ -0,0 +1,65 @@
|
||||
// Copyright 2019 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
// Layer is a layer with all the detected features and namespaces.
|
||||
type Layer struct {
|
||||
// Hash is the sha-256 tarsum on the layer's blob content.
|
||||
Hash string `json:"hash"`
|
||||
// By contains a list of detectors scanned this Layer.
|
||||
By []Detector `json:"by"`
|
||||
Namespaces []LayerNamespace `json:"namespaces"`
|
||||
Features []LayerFeature `json:"features"`
|
||||
}
|
||||
|
||||
func (l *Layer) GetFeatures() []Feature {
|
||||
features := make([]Feature, 0, len(l.Features))
|
||||
for _, f := range l.Features {
|
||||
features = append(features, f.Feature)
|
||||
}
|
||||
|
||||
return features
|
||||
}
|
||||
|
||||
func (l *Layer) GetNamespaces() []Namespace {
|
||||
namespaces := make([]Namespace, 0, len(l.Namespaces)+len(l.Features))
|
||||
for _, ns := range l.Namespaces {
|
||||
namespaces = append(namespaces, ns.Namespace)
|
||||
}
|
||||
for _, f := range l.Features {
|
||||
if f.PotentialNamespace.Valid() {
|
||||
namespaces = append(namespaces, f.PotentialNamespace)
|
||||
}
|
||||
}
|
||||
|
||||
return namespaces
|
||||
}
|
||||
|
||||
// LayerNamespace is a namespace with detection information.
|
||||
type LayerNamespace struct {
|
||||
Namespace `json:"namespace"`
|
||||
|
||||
// By is the detector found the namespace.
|
||||
By Detector `json:"by"`
|
||||
}
|
||||
|
||||
// LayerFeature is a feature with detection information.
|
||||
type LayerFeature struct {
|
||||
Feature `json:"feature"`
|
||||
|
||||
// By is the detector found the feature.
|
||||
By Detector `json:"by"`
|
||||
PotentialNamespace Namespace `json:"potentialNamespace"`
|
||||
}
|
@ -0,0 +1,41 @@
|
||||
// Copyright 2019 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
// MetadataMap is for storing the metadata returned by vulnerability database.
|
||||
type MetadataMap map[string]interface{}
|
||||
|
||||
func (mm *MetadataMap) Scan(value interface{}) error {
|
||||
if value == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// github.com/lib/pq decodes TEXT/VARCHAR fields into strings.
|
||||
val, ok := value.(string)
|
||||
if !ok {
|
||||
panic("got type other than []byte from database")
|
||||
}
|
||||
return json.Unmarshal([]byte(val), mm)
|
||||
}
|
||||
|
||||
func (mm *MetadataMap) Value() (driver.Value, error) {
|
||||
json, err := json.Marshal(*mm)
|
||||
return string(json), err
|
||||
}
|
@ -1,235 +0,0 @@
|
||||
// Copyright 2017 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Processors are extentions to scan layer's content.
|
||||
type Processors struct {
|
||||
Listers []string
|
||||
Detectors []string
|
||||
}
|
||||
|
||||
// Ancestry is a manifest that keeps all layers in an image in order.
|
||||
type Ancestry struct {
|
||||
Name string
|
||||
// Layers should be ordered and i_th layer is the parent of i+1_th layer in
|
||||
// the slice.
|
||||
Layers []Layer
|
||||
}
|
||||
|
||||
// AncestryWithFeatures is an ancestry with namespaced features detected in the
|
||||
// ancestry, which is processed by `ProcessedBy`.
|
||||
type AncestryWithFeatures struct {
|
||||
Ancestry
|
||||
|
||||
ProcessedBy Processors
|
||||
Features []NamespacedFeature
|
||||
}
|
||||
|
||||
// Layer corresponds to a layer in an image processed by `ProcessedBy`.
|
||||
type Layer struct {
|
||||
// Hash is content hash of the layer.
|
||||
Hash string
|
||||
}
|
||||
|
||||
// LayerWithContent is a layer with its detected namespaces and features by
|
||||
// ProcessedBy.
|
||||
type LayerWithContent struct {
|
||||
Layer
|
||||
|
||||
ProcessedBy Processors
|
||||
Namespaces []Namespace
|
||||
Features []Feature
|
||||
}
|
||||
|
||||
// Namespace is the contextual information around features.
|
||||
//
|
||||
// e.g. Debian:7, NodeJS.
|
||||
type Namespace struct {
|
||||
Name string
|
||||
VersionFormat string
|
||||
}
|
||||
|
||||
// Feature represents a package detected in a layer but the namespace is not
|
||||
// determined.
|
||||
//
|
||||
// e.g. Name: OpenSSL, Version: 1.0, VersionFormat: dpkg.
|
||||
// dpkg implies the installer package manager but the namespace (might be
|
||||
// debian:7, debian:8, ...) could not be determined.
|
||||
type Feature struct {
|
||||
Name string
|
||||
Version string
|
||||
VersionFormat string
|
||||
}
|
||||
|
||||
// NamespacedFeature is a feature with determined namespace and can be affected
|
||||
// by vulnerabilities.
|
||||
//
|
||||
// e.g. OpenSSL 1.0 dpkg Debian:7.
|
||||
type NamespacedFeature struct {
|
||||
Feature
|
||||
|
||||
Namespace Namespace
|
||||
}
|
||||
|
||||
// AffectedNamespacedFeature is a namespaced feature affected by the
|
||||
// vulnerabilities with fixed-in versions for this feature.
|
||||
type AffectedNamespacedFeature struct {
|
||||
NamespacedFeature
|
||||
|
||||
AffectedBy []VulnerabilityWithFixedIn
|
||||
}
|
||||
|
||||
// VulnerabilityWithFixedIn is used for AffectedNamespacedFeature to retrieve
|
||||
// the affecting vulnerabilities and the fixed-in versions for the feature.
|
||||
type VulnerabilityWithFixedIn struct {
|
||||
Vulnerability
|
||||
|
||||
FixedInVersion string
|
||||
}
|
||||
|
||||
// AffectedFeature is used to determine whether a namespaced feature is affected
|
||||
// by a Vulnerability. Namespace and Feature Name is unique. Affected Feature is
|
||||
// bound to vulnerability.
|
||||
type AffectedFeature struct {
|
||||
Namespace Namespace
|
||||
FeatureName string
|
||||
// FixedInVersion is known next feature version that's not affected by the
|
||||
// vulnerability. Empty FixedInVersion means the unaffected version is
|
||||
// unknown.
|
||||
FixedInVersion string
|
||||
// AffectedVersion contains the version range to determine whether or not a
|
||||
// feature is affected.
|
||||
AffectedVersion string
|
||||
}
|
||||
|
||||
// VulnerabilityID is an identifier for every vulnerability. Every vulnerability
|
||||
// has unique namespace and name.
|
||||
type VulnerabilityID struct {
|
||||
Name string
|
||||
Namespace string
|
||||
}
|
||||
|
||||
// Vulnerability represents CVE or similar vulnerability reports.
|
||||
type Vulnerability struct {
|
||||
Name string
|
||||
Namespace Namespace
|
||||
|
||||
Description string
|
||||
Link string
|
||||
Severity Severity
|
||||
|
||||
Metadata MetadataMap
|
||||
}
|
||||
|
||||
// VulnerabilityWithAffected is an vulnerability with all known affected
|
||||
// features.
|
||||
type VulnerabilityWithAffected struct {
|
||||
Vulnerability
|
||||
|
||||
Affected []AffectedFeature
|
||||
}
|
||||
|
||||
// PagedVulnerableAncestries is a vulnerability with a page of affected
|
||||
// ancestries each with a special index attached for streaming purpose. The
|
||||
// current page number and next page number are for navigate.
|
||||
type PagedVulnerableAncestries struct {
|
||||
Vulnerability
|
||||
|
||||
// Affected is a map of special indexes to Ancestries, which the pair
|
||||
// should be unique in a stream. Every indexes in the map should be larger
|
||||
// than previous page.
|
||||
Affected map[int]string
|
||||
|
||||
Limit int
|
||||
Current PageNumber
|
||||
Next PageNumber
|
||||
|
||||
// End signals the end of the pages.
|
||||
End bool
|
||||
}
|
||||
|
||||
// NotificationHook is a message sent to another service to inform of a change
|
||||
// to a Vulnerability or the Ancestries affected by a Vulnerability. It contains
|
||||
// the name of a notification that should be read and marked as read via the
|
||||
// API.
|
||||
type NotificationHook struct {
|
||||
Name string
|
||||
|
||||
Created time.Time
|
||||
Notified time.Time
|
||||
Deleted time.Time
|
||||
}
|
||||
|
||||
// VulnerabilityNotification is a notification for vulnerability changes.
|
||||
type VulnerabilityNotification struct {
|
||||
NotificationHook
|
||||
|
||||
Old *Vulnerability
|
||||
New *Vulnerability
|
||||
}
|
||||
|
||||
// VulnerabilityNotificationWithVulnerable is a notification for vulnerability
|
||||
// changes with vulnerable ancestries.
|
||||
type VulnerabilityNotificationWithVulnerable struct {
|
||||
NotificationHook
|
||||
|
||||
Old *PagedVulnerableAncestries
|
||||
New *PagedVulnerableAncestries
|
||||
}
|
||||
|
||||
// PageNumber is used to do pagination.
|
||||
type PageNumber string
|
||||
|
||||
type MetadataMap map[string]interface{}
|
||||
|
||||
// NullableAffectedNamespacedFeature is an affectednamespacedfeature with
|
||||
// whether it's found in datastore.
|
||||
type NullableAffectedNamespacedFeature struct {
|
||||
AffectedNamespacedFeature
|
||||
|
||||
Valid bool
|
||||
}
|
||||
|
||||
// NullableVulnerability is a vulnerability with whether the vulnerability is
|
||||
// found in datastore.
|
||||
type NullableVulnerability struct {
|
||||
VulnerabilityWithAffected
|
||||
|
||||
Valid bool
|
||||
}
|
||||
|
||||
func (mm *MetadataMap) Scan(value interface{}) error {
|
||||
if value == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// github.com/lib/pq decodes TEXT/VARCHAR fields into strings.
|
||||
val, ok := value.(string)
|
||||
if !ok {
|
||||
panic("got type other than []byte from database")
|
||||
}
|
||||
return json.Unmarshal([]byte(val), mm)
|
||||
}
|
||||
|
||||
func (mm *MetadataMap) Value() (driver.Value, error) {
|
||||
json, err := json.Marshal(*mm)
|
||||
return string(json), err
|
||||
}
|
@ -0,0 +1,34 @@
|
||||
// Copyright 2019 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
// Namespace is the contextual information around features.
|
||||
//
|
||||
// e.g. Debian:7, NodeJS.
|
||||
type Namespace struct {
|
||||
Name string `json:"name"`
|
||||
VersionFormat string `json:"versionFormat"`
|
||||
}
|
||||
|
||||
func NewNamespace(name string, versionFormat string) *Namespace {
|
||||
return &Namespace{name, versionFormat}
|
||||
}
|
||||
|
||||
func (ns *Namespace) Valid() bool {
|
||||
if ns.Name == "" || ns.VersionFormat == "" {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
@ -0,0 +1,69 @@
|
||||
// Copyright 2019 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/coreos/clair/pkg/pagination"
|
||||
)
|
||||
|
||||
// NotificationHook is a message sent to another service to inform of a change
|
||||
// to a Vulnerability or the Ancestries affected by a Vulnerability. It contains
|
||||
// the name of a notification that should be read and marked as read via the
|
||||
// API.
|
||||
type NotificationHook struct {
|
||||
Name string
|
||||
|
||||
Created time.Time
|
||||
Notified time.Time
|
||||
Deleted time.Time
|
||||
}
|
||||
|
||||
// VulnerabilityNotification is a notification for vulnerability changes.
|
||||
type VulnerabilityNotification struct {
|
||||
NotificationHook
|
||||
|
||||
Old *Vulnerability
|
||||
New *Vulnerability
|
||||
}
|
||||
|
||||
// VulnerabilityNotificationWithVulnerable is a notification for vulnerability
|
||||
// changes with vulnerable ancestries.
|
||||
type VulnerabilityNotificationWithVulnerable struct {
|
||||
NotificationHook
|
||||
|
||||
Old *PagedVulnerableAncestries
|
||||
New *PagedVulnerableAncestries
|
||||
}
|
||||
|
||||
// PagedVulnerableAncestries is a vulnerability with a page of affected
|
||||
// ancestries each with a special index attached for streaming purpose. The
|
||||
// current page number and next page number are for navigate.
|
||||
type PagedVulnerableAncestries struct {
|
||||
Vulnerability
|
||||
|
||||
// Affected is a map of special indexes to Ancestries, which the pair
|
||||
// should be unique in a stream. Every indexes in the map should be larger
|
||||
// than previous page.
|
||||
Affected map[int]string
|
||||
|
||||
Limit int
|
||||
Current pagination.Token
|
||||
Next pagination.Token
|
||||
|
||||
// End signals the end of the pages.
|
||||
End bool
|
||||
}
|
@ -1,261 +0,0 @@
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/lib/pq"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/pkg/commonerr"
|
||||
)
|
||||
|
||||
func (tx *pgSession) UpsertAncestry(ancestry database.Ancestry, features []database.NamespacedFeature, processedBy database.Processors) error {
|
||||
if ancestry.Name == "" {
|
||||
log.Warning("Empty ancestry name is not allowed")
|
||||
return commonerr.NewBadRequestError("could not insert an ancestry with empty name")
|
||||
}
|
||||
|
||||
if len(ancestry.Layers) == 0 {
|
||||
log.Warning("Empty ancestry is not allowed")
|
||||
return commonerr.NewBadRequestError("could not insert an ancestry with 0 layers")
|
||||
}
|
||||
|
||||
err := tx.deleteAncestry(ancestry.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var ancestryID int64
|
||||
err = tx.QueryRow(insertAncestry, ancestry.Name).Scan(&ancestryID)
|
||||
if err != nil {
|
||||
if isErrUniqueViolation(err) {
|
||||
return handleError("insertAncestry", errors.New("Other Go-routine is processing this ancestry (skip)."))
|
||||
}
|
||||
return handleError("insertAncestry", err)
|
||||
}
|
||||
|
||||
err = tx.insertAncestryLayers(ancestryID, ancestry.Layers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = tx.insertAncestryFeatures(ancestryID, features)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return tx.persistProcessors(persistAncestryLister,
|
||||
"persistAncestryLister",
|
||||
persistAncestryDetector,
|
||||
"persistAncestryDetector",
|
||||
ancestryID, processedBy)
|
||||
}
|
||||
|
||||
func (tx *pgSession) FindAncestry(name string) (database.Ancestry, database.Processors, bool, error) {
|
||||
ancestry := database.Ancestry{Name: name}
|
||||
processed := database.Processors{}
|
||||
|
||||
var ancestryID int64
|
||||
err := tx.QueryRow(searchAncestry, name).Scan(&ancestryID)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return ancestry, processed, false, nil
|
||||
}
|
||||
return ancestry, processed, false, handleError("searchAncestry", err)
|
||||
}
|
||||
|
||||
ancestry.Layers, err = tx.findAncestryLayers(ancestryID)
|
||||
if err != nil {
|
||||
return ancestry, processed, false, err
|
||||
}
|
||||
|
||||
processed.Detectors, err = tx.findProcessors(searchAncestryDetectors, "searchAncestryDetectors", "detector", ancestryID)
|
||||
if err != nil {
|
||||
return ancestry, processed, false, err
|
||||
}
|
||||
|
||||
processed.Listers, err = tx.findProcessors(searchAncestryListers, "searchAncestryListers", "lister", ancestryID)
|
||||
if err != nil {
|
||||
return ancestry, processed, false, err
|
||||
}
|
||||
|
||||
return ancestry, processed, true, nil
|
||||
}
|
||||
|
||||
func (tx *pgSession) FindAncestryFeatures(name string) (database.AncestryWithFeatures, bool, error) {
|
||||
var (
|
||||
awf database.AncestryWithFeatures
|
||||
ok bool
|
||||
err error
|
||||
)
|
||||
awf.Ancestry, awf.ProcessedBy, ok, err = tx.FindAncestry(name)
|
||||
if err != nil {
|
||||
return awf, false, err
|
||||
}
|
||||
|
||||
if !ok {
|
||||
return awf, false, nil
|
||||
}
|
||||
|
||||
rows, err := tx.Query(searchAncestryFeatures, name)
|
||||
if err != nil {
|
||||
return awf, false, handleError("searchAncestryFeatures", err)
|
||||
}
|
||||
|
||||
for rows.Next() {
|
||||
nf := database.NamespacedFeature{}
|
||||
err := rows.Scan(&nf.Namespace.Name, &nf.Namespace.VersionFormat, &nf.Feature.Name, &nf.Feature.Version)
|
||||
if err != nil {
|
||||
return awf, false, handleError("searchAncestryFeatures", err)
|
||||
}
|
||||
nf.Feature.VersionFormat = nf.Namespace.VersionFormat
|
||||
awf.Features = append(awf.Features, nf)
|
||||
}
|
||||
|
||||
return awf, true, nil
|
||||
}
|
||||
|
||||
func (tx *pgSession) deleteAncestry(name string) error {
|
||||
result, err := tx.Exec(removeAncestry, name)
|
||||
if err != nil {
|
||||
return handleError("removeAncestry", err)
|
||||
}
|
||||
|
||||
_, err = result.RowsAffected()
|
||||
if err != nil {
|
||||
return handleError("removeAncestry", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tx *pgSession) findProcessors(query, queryName, processorType string, id int64) ([]string, error) {
|
||||
rows, err := tx.Query(query, id)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
log.Warning("No " + processorType + " are used")
|
||||
return nil, nil
|
||||
}
|
||||
return nil, handleError(queryName, err)
|
||||
}
|
||||
|
||||
var (
|
||||
processors []string
|
||||
processor string
|
||||
)
|
||||
|
||||
for rows.Next() {
|
||||
err := rows.Scan(&processor)
|
||||
if err != nil {
|
||||
return nil, handleError(queryName, err)
|
||||
}
|
||||
processors = append(processors, processor)
|
||||
}
|
||||
|
||||
return processors, nil
|
||||
}
|
||||
|
||||
func (tx *pgSession) findAncestryLayers(ancestryID int64) ([]database.Layer, error) {
|
||||
rows, err := tx.Query(searchAncestryLayer, ancestryID)
|
||||
if err != nil {
|
||||
return nil, handleError("searchAncestryLayer", err)
|
||||
}
|
||||
layers := []database.Layer{}
|
||||
for rows.Next() {
|
||||
var layer database.Layer
|
||||
err := rows.Scan(&layer.Hash)
|
||||
if err != nil {
|
||||
return nil, handleError("searchAncestryLayer", err)
|
||||
}
|
||||
layers = append(layers, layer)
|
||||
}
|
||||
return layers, nil
|
||||
}
|
||||
|
||||
func (tx *pgSession) insertAncestryLayers(ancestryID int64, layers []database.Layer) error {
|
||||
layerIDs := map[string]sql.NullInt64{}
|
||||
for _, l := range layers {
|
||||
layerIDs[l.Hash] = sql.NullInt64{}
|
||||
}
|
||||
|
||||
layerHashes := []string{}
|
||||
for hash := range layerIDs {
|
||||
layerHashes = append(layerHashes, hash)
|
||||
}
|
||||
|
||||
rows, err := tx.Query(searchLayerIDs, pq.Array(layerHashes))
|
||||
if err != nil {
|
||||
return handleError("searchLayerIDs", err)
|
||||
}
|
||||
|
||||
for rows.Next() {
|
||||
var (
|
||||
layerID sql.NullInt64
|
||||
layerName string
|
||||
)
|
||||
err := rows.Scan(&layerID, &layerName)
|
||||
if err != nil {
|
||||
return handleError("searchLayerIDs", err)
|
||||
}
|
||||
layerIDs[layerName] = layerID
|
||||
}
|
||||
|
||||
notFound := []string{}
|
||||
for hash, id := range layerIDs {
|
||||
if !id.Valid {
|
||||
notFound = append(notFound, hash)
|
||||
}
|
||||
}
|
||||
|
||||
if len(notFound) > 0 {
|
||||
return handleError("searchLayerIDs", fmt.Errorf("Layer %s is not found in database", strings.Join(notFound, ",")))
|
||||
}
|
||||
|
||||
//TODO(Sida): use bulk insert.
|
||||
stmt, err := tx.Prepare(insertAncestryLayer)
|
||||
if err != nil {
|
||||
return handleError("insertAncestryLayer", err)
|
||||
}
|
||||
|
||||
defer stmt.Close()
|
||||
for index, layer := range layers {
|
||||
_, err := stmt.Exec(ancestryID, index, layerIDs[layer.Hash].Int64)
|
||||
if err != nil {
|
||||
return handleError("insertAncestryLayer", commonerr.CombineErrors(err, stmt.Close()))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tx *pgSession) insertAncestryFeatures(ancestryID int64, features []database.NamespacedFeature) error {
|
||||
featureIDs, err := tx.findNamespacedFeatureIDs(features)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//TODO(Sida): use bulk insert.
|
||||
stmtFeatures, err := tx.Prepare(insertAncestryFeature)
|
||||
if err != nil {
|
||||
return handleError("insertAncestryFeature", err)
|
||||
}
|
||||
|
||||
defer stmtFeatures.Close()
|
||||
|
||||
for _, id := range featureIDs {
|
||||
if !id.Valid {
|
||||
return errors.New("requested namespaced feature is not in database")
|
||||
}
|
||||
|
||||
_, err := stmtFeatures.Exec(ancestryID, id)
|
||||
if err != nil {
|
||||
return handleError("insertAncestryFeature", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -0,0 +1,160 @@
|
||||
// Copyright 2019 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ancestry
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/database/pgsql/detector"
|
||||
"github.com/coreos/clair/database/pgsql/layer"
|
||||
"github.com/coreos/clair/database/pgsql/util"
|
||||
)
|
||||
|
||||
const (
|
||||
insertAncestry = `
|
||||
INSERT INTO ancestry (name) VALUES ($1) RETURNING id`
|
||||
|
||||
findAncestryID = `SELECT id FROM ancestry WHERE name = $1`
|
||||
removeAncestry = `DELETE FROM ancestry WHERE name = $1`
|
||||
|
||||
insertAncestryFeatures = `
|
||||
INSERT INTO ancestry_feature
|
||||
(ancestry_layer_id, namespaced_feature_id, feature_detector_id, namespace_detector_id) VALUES
|
||||
($1, $2, $3, $4)`
|
||||
)
|
||||
|
||||
func FindAncestry(tx *sql.Tx, name string) (database.Ancestry, bool, error) {
|
||||
var (
|
||||
ancestry = database.Ancestry{Name: name}
|
||||
err error
|
||||
)
|
||||
|
||||
id, ok, err := FindAncestryID(tx, name)
|
||||
if !ok || err != nil {
|
||||
return ancestry, ok, err
|
||||
}
|
||||
|
||||
if ancestry.By, err = FindAncestryDetectors(tx, id); err != nil {
|
||||
return ancestry, false, err
|
||||
}
|
||||
|
||||
if ancestry.Layers, err = FindAncestryLayers(tx, id); err != nil {
|
||||
return ancestry, false, err
|
||||
}
|
||||
|
||||
return ancestry, true, nil
|
||||
}
|
||||
|
||||
func UpsertAncestry(tx *sql.Tx, ancestry database.Ancestry) error {
|
||||
if !ancestry.Valid() {
|
||||
return database.ErrInvalidParameters
|
||||
}
|
||||
|
||||
if err := RemoveAncestry(tx, ancestry.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
id, err := InsertAncestry(tx, ancestry.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
detectorIDs, err := detector.FindDetectorIDs(tx, ancestry.By)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// insert ancestry metadata
|
||||
if err := InsertAncestryDetectors(tx, id, detectorIDs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
layers := make([]string, 0, len(ancestry.Layers))
|
||||
for _, l := range ancestry.Layers {
|
||||
layers = append(layers, l.Hash)
|
||||
}
|
||||
|
||||
layerIDs, ok, err := layer.FindLayerIDs(tx, layers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !ok {
|
||||
log.Error("layer cannot be found, this indicates that the internal logic of calling UpsertAncestry is wrong or the database is corrupted.")
|
||||
return database.ErrMissingEntities
|
||||
}
|
||||
|
||||
ancestryLayerIDs, err := InsertAncestryLayers(tx, id, layerIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i, id := range ancestryLayerIDs {
|
||||
if err := InsertAncestryFeatures(tx, id, ancestry.Layers[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func InsertAncestry(tx *sql.Tx, name string) (int64, error) {
|
||||
var id int64
|
||||
err := tx.QueryRow(insertAncestry, name).Scan(&id)
|
||||
if err != nil {
|
||||
if util.IsErrUniqueViolation(err) {
|
||||
return 0, util.HandleError("insertAncestry", errors.New("other Go-routine is processing this ancestry (skip)"))
|
||||
}
|
||||
|
||||
return 0, util.HandleError("insertAncestry", err)
|
||||
}
|
||||
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func FindAncestryID(tx *sql.Tx, name string) (int64, bool, error) {
|
||||
var id sql.NullInt64
|
||||
if err := tx.QueryRow(findAncestryID, name).Scan(&id); err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return 0, false, nil
|
||||
}
|
||||
|
||||
return 0, false, util.HandleError("findAncestryID", err)
|
||||
}
|
||||
|
||||
return id.Int64, true, nil
|
||||
}
|
||||
|
||||
func RemoveAncestry(tx *sql.Tx, name string) error {
|
||||
result, err := tx.Exec(removeAncestry, name)
|
||||
if err != nil {
|
||||
return util.HandleError("removeAncestry", err)
|
||||
}
|
||||
|
||||
affected, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return util.HandleError("removeAncestry", err)
|
||||
}
|
||||
|
||||
if affected != 0 {
|
||||
log.WithField("ancestry", name).Debug("removed ancestry")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -0,0 +1,48 @@
|
||||
// Copyright 2019 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ancestry
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/database/pgsql/detector"
|
||||
"github.com/coreos/clair/database/pgsql/util"
|
||||
)
|
||||
|
||||
var selectAncestryDetectors = `
|
||||
SELECT d.name, d.version, d.dtype
|
||||
FROM ancestry_detector, detector AS d
|
||||
WHERE ancestry_detector.detector_id = d.id AND ancestry_detector.ancestry_id = $1;`
|
||||
|
||||
var insertAncestryDetectors = `
|
||||
INSERT INTO ancestry_detector (ancestry_id, detector_id)
|
||||
SELECT $1, $2
|
||||
WHERE NOT EXISTS (SELECT id FROM ancestry_detector WHERE ancestry_id = $1 AND detector_id = $2)`
|
||||
|
||||
func FindAncestryDetectors(tx *sql.Tx, id int64) ([]database.Detector, error) {
|
||||
detectors, err := detector.GetDetectors(tx, selectAncestryDetectors, id)
|
||||
return detectors, err
|
||||
}
|
||||
|
||||
func InsertAncestryDetectors(tx *sql.Tx, ancestryID int64, detectorIDs []int64) error {
|
||||
for _, detectorID := range detectorIDs {
|
||||
if _, err := tx.Exec(insertAncestryDetectors, ancestryID, detectorID); err != nil {
|
||||
return util.HandleError("insertAncestryDetectors", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -0,0 +1,146 @@
|
||||
// Copyright 2019 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ancestry
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/database/pgsql/detector"
|
||||
"github.com/coreos/clair/database/pgsql/feature"
|
||||
"github.com/coreos/clair/database/pgsql/util"
|
||||
"github.com/coreos/clair/pkg/commonerr"
|
||||
)
|
||||
|
||||
const findAncestryFeatures = `
|
||||
SELECT namespace.name, namespace.version_format, feature.name,
|
||||
feature.version, feature.version_format, feature_type.name, ancestry_layer.ancestry_index,
|
||||
ancestry_feature.feature_detector_id, ancestry_feature.namespace_detector_id
|
||||
FROM namespace, feature, feature_type, namespaced_feature, ancestry_layer, ancestry_feature
|
||||
WHERE ancestry_layer.ancestry_id = $1
|
||||
AND feature_type.id = feature.type
|
||||
AND ancestry_feature.ancestry_layer_id = ancestry_layer.id
|
||||
AND ancestry_feature.namespaced_feature_id = namespaced_feature.id
|
||||
AND namespaced_feature.feature_id = feature.id
|
||||
AND namespaced_feature.namespace_id = namespace.id`
|
||||
|
||||
func FindAncestryFeatures(tx *sql.Tx, ancestryID int64, detectors detector.DetectorMap) (map[int64][]database.AncestryFeature, error) {
|
||||
// ancestry_index -> ancestry features
|
||||
featureMap := make(map[int64][]database.AncestryFeature)
|
||||
// retrieve ancestry layer's namespaced features
|
||||
rows, err := tx.Query(findAncestryFeatures, ancestryID)
|
||||
if err != nil {
|
||||
return nil, util.HandleError("findAncestryFeatures", err)
|
||||
}
|
||||
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var (
|
||||
featureDetectorID int64
|
||||
namespaceDetectorID sql.NullInt64
|
||||
feature database.NamespacedFeature
|
||||
// index is used to determine which layer the feature belongs to.
|
||||
index sql.NullInt64
|
||||
)
|
||||
|
||||
if err := rows.Scan(
|
||||
&feature.Namespace.Name,
|
||||
&feature.Namespace.VersionFormat,
|
||||
&feature.Feature.Name,
|
||||
&feature.Feature.Version,
|
||||
&feature.Feature.VersionFormat,
|
||||
&feature.Feature.Type,
|
||||
&index,
|
||||
&featureDetectorID,
|
||||
&namespaceDetectorID,
|
||||
); err != nil {
|
||||
return nil, util.HandleError("findAncestryFeatures", err)
|
||||
}
|
||||
|
||||
if feature.Feature.VersionFormat != feature.Namespace.VersionFormat {
|
||||
// Feature must have the same version format as the associated
|
||||
// namespace version format.
|
||||
return nil, database.ErrInconsistent
|
||||
}
|
||||
|
||||
fDetector, ok := detectors.ByID[featureDetectorID]
|
||||
if !ok {
|
||||
return nil, database.ErrInconsistent
|
||||
}
|
||||
|
||||
var nsDetector database.Detector
|
||||
if !namespaceDetectorID.Valid {
|
||||
nsDetector = database.Detector{}
|
||||
} else {
|
||||
nsDetector, ok = detectors.ByID[namespaceDetectorID.Int64]
|
||||
if !ok {
|
||||
return nil, database.ErrInconsistent
|
||||
}
|
||||
}
|
||||
|
||||
featureMap[index.Int64] = append(featureMap[index.Int64], database.AncestryFeature{
|
||||
NamespacedFeature: feature,
|
||||
FeatureBy: fDetector,
|
||||
NamespaceBy: nsDetector,
|
||||
})
|
||||
}
|
||||
|
||||
return featureMap, nil
|
||||
}
|
||||
|
||||
func InsertAncestryFeatures(tx *sql.Tx, ancestryLayerID int64, layer database.AncestryLayer) error {
|
||||
detectors, err := detector.FindAllDetectors(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nsFeatureIDs, err := feature.FindNamespacedFeatureIDs(tx, layer.GetFeatures())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// find the detectors for each feature
|
||||
stmt, err := tx.Prepare(insertAncestryFeatures)
|
||||
if err != nil {
|
||||
return util.HandleError("insertAncestryFeatures", err)
|
||||
}
|
||||
|
||||
defer stmt.Close()
|
||||
|
||||
for index, id := range nsFeatureIDs {
|
||||
if !id.Valid {
|
||||
return database.ErrMissingEntities
|
||||
}
|
||||
|
||||
var namespaceDetectorID sql.NullInt64
|
||||
var ok bool
|
||||
namespaceDetectorID.Int64, ok = detectors.ByValue[layer.Features[index].NamespaceBy]
|
||||
if ok {
|
||||
namespaceDetectorID.Valid = true
|
||||
}
|
||||
|
||||
featureDetectorID, ok := detectors.ByValue[layer.Features[index].FeatureBy]
|
||||
if !ok {
|
||||
return database.ErrMissingEntities
|
||||
}
|
||||
|
||||
if _, err := stmt.Exec(ancestryLayerID, id, featureDetectorID, namespaceDetectorID); err != nil {
|
||||
return util.HandleError("insertAncestryFeatures", commonerr.CombineErrors(err, stmt.Close()))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -0,0 +1,131 @@
|
||||
// Copyright 2019 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ancestry
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/database/pgsql/detector"
|
||||
"github.com/coreos/clair/database/pgsql/util"
|
||||
"github.com/coreos/clair/pkg/commonerr"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
findAncestryLayerHashes = `
|
||||
SELECT layer.hash, ancestry_layer.ancestry_index
|
||||
FROM layer, ancestry_layer
|
||||
WHERE ancestry_layer.ancestry_id = $1
|
||||
AND ancestry_layer.layer_id = layer.id
|
||||
ORDER BY ancestry_layer.ancestry_index ASC`
|
||||
insertAncestryLayers = `
|
||||
INSERT INTO ancestry_layer (ancestry_id, ancestry_index, layer_id) VALUES ($1, $2, $3)
|
||||
RETURNING id`
|
||||
)
|
||||
|
||||
func FindAncestryLayerHashes(tx *sql.Tx, ancestryID int64) (map[int64]string, error) {
|
||||
// retrieve layer indexes and hashes
|
||||
rows, err := tx.Query(findAncestryLayerHashes, ancestryID)
|
||||
if err != nil {
|
||||
return nil, util.HandleError("findAncestryLayerHashes", err)
|
||||
}
|
||||
|
||||
layerHashes := map[int64]string{}
|
||||
for rows.Next() {
|
||||
var (
|
||||
hash string
|
||||
index int64
|
||||
)
|
||||
|
||||
if err = rows.Scan(&hash, &index); err != nil {
|
||||
return nil, util.HandleError("findAncestryLayerHashes", err)
|
||||
}
|
||||
|
||||
if _, ok := layerHashes[index]; ok {
|
||||
// one ancestry index should correspond to only one layer
|
||||
return nil, database.ErrInconsistent
|
||||
}
|
||||
|
||||
layerHashes[index] = hash
|
||||
}
|
||||
|
||||
return layerHashes, nil
|
||||
}
|
||||
|
||||
// insertAncestryLayers inserts the ancestry layers along with its content into
|
||||
// the database. The layers are 0 based indexed in the original order.
|
||||
func InsertAncestryLayers(tx *sql.Tx, ancestryID int64, layers []int64) ([]int64, error) {
|
||||
stmt, err := tx.Prepare(insertAncestryLayers)
|
||||
if err != nil {
|
||||
return nil, util.HandleError("insertAncestryLayers", err)
|
||||
}
|
||||
|
||||
ancestryLayerIDs := []int64{}
|
||||
for index, layerID := range layers {
|
||||
var ancestryLayerID sql.NullInt64
|
||||
if err := stmt.QueryRow(ancestryID, index, layerID).Scan(&ancestryLayerID); err != nil {
|
||||
return nil, util.HandleError("insertAncestryLayers", commonerr.CombineErrors(err, stmt.Close()))
|
||||
}
|
||||
|
||||
if !ancestryLayerID.Valid {
|
||||
return nil, database.ErrInconsistent
|
||||
}
|
||||
|
||||
ancestryLayerIDs = append(ancestryLayerIDs, ancestryLayerID.Int64)
|
||||
}
|
||||
|
||||
if err := stmt.Close(); err != nil {
|
||||
return nil, util.HandleError("insertAncestryLayers", err)
|
||||
}
|
||||
|
||||
return ancestryLayerIDs, nil
|
||||
}
|
||||
|
||||
func FindAncestryLayers(tx *sql.Tx, id int64) ([]database.AncestryLayer, error) {
|
||||
detectors, err := detector.FindAllDetectors(tx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
layerMap, err := FindAncestryLayerHashes(tx, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
featureMap, err := FindAncestryFeatures(tx, id, detectors)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
layers := make([]database.AncestryLayer, len(layerMap))
|
||||
for index, layer := range layerMap {
|
||||
// index MUST match the ancestry layer slice index.
|
||||
if layers[index].Hash == "" && len(layers[index].Features) == 0 {
|
||||
layers[index] = database.AncestryLayer{
|
||||
Hash: layer,
|
||||
Features: featureMap[index],
|
||||
}
|
||||
} else {
|
||||
log.WithFields(log.Fields{
|
||||
"ancestry ID": id,
|
||||
"duplicated ancestry index": index,
|
||||
}).WithError(database.ErrInconsistent).Error("ancestry layers with same ancestry_index is not allowed")
|
||||
return nil, database.ErrInconsistent
|
||||
}
|
||||
}
|
||||
|
||||
return layers, nil
|
||||
}
|
@ -0,0 +1,141 @@
|
||||
// Copyright 2019 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ancestry
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/database/pgsql/testutil"
|
||||
)
|
||||
|
||||
var upsertAncestryTests = []struct {
|
||||
in *database.Ancestry
|
||||
err string
|
||||
title string
|
||||
}{
|
||||
{
|
||||
title: "ancestry with invalid layer",
|
||||
in: &database.Ancestry{
|
||||
Name: "a1",
|
||||
Layers: []database.AncestryLayer{
|
||||
{
|
||||
Hash: "layer-non-existing",
|
||||
},
|
||||
},
|
||||
},
|
||||
err: database.ErrMissingEntities.Error(),
|
||||
},
|
||||
{
|
||||
title: "ancestry with invalid name",
|
||||
in: &database.Ancestry{},
|
||||
err: database.ErrInvalidParameters.Error(),
|
||||
},
|
||||
{
|
||||
title: "new valid ancestry",
|
||||
in: &database.Ancestry{
|
||||
Name: "a",
|
||||
Layers: []database.AncestryLayer{{Hash: "layer-0"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
title: "ancestry with invalid feature",
|
||||
in: &database.Ancestry{
|
||||
Name: "a",
|
||||
By: []database.Detector{testutil.RealDetectors[1], testutil.RealDetectors[2]},
|
||||
Layers: []database.AncestryLayer{{Hash: "layer-1", Features: []database.AncestryFeature{
|
||||
{testutil.FakeNamespacedFeatures[1], testutil.FakeDetector[1], testutil.FakeDetector[2]},
|
||||
}}},
|
||||
},
|
||||
err: database.ErrMissingEntities.Error(),
|
||||
},
|
||||
{
|
||||
title: "replace old ancestry",
|
||||
in: &database.Ancestry{
|
||||
Name: "a",
|
||||
By: []database.Detector{testutil.RealDetectors[1], testutil.RealDetectors[2]},
|
||||
Layers: []database.AncestryLayer{
|
||||
{"layer-1", []database.AncestryFeature{{testutil.RealNamespacedFeatures[1], testutil.RealDetectors[2], testutil.RealDetectors[1]}}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestUpsertAncestry(t *testing.T) {
|
||||
tx, cleanup := testutil.CreateTestTxWithFixtures(t, "TestUpsertAncestry")
|
||||
defer cleanup()
|
||||
|
||||
for _, test := range upsertAncestryTests {
|
||||
t.Run(test.title, func(t *testing.T) {
|
||||
err := UpsertAncestry(tx, *test.in)
|
||||
if test.err != "" {
|
||||
assert.EqualError(t, err, test.err, "unexpected error")
|
||||
return
|
||||
}
|
||||
assert.Nil(t, err)
|
||||
actual, ok, err := FindAncestry(tx, test.in.Name)
|
||||
assert.Nil(t, err)
|
||||
assert.True(t, ok)
|
||||
database.AssertAncestryEqual(t, test.in, &actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
var findAncestryTests = []struct {
|
||||
title string
|
||||
in string
|
||||
|
||||
ancestry *database.Ancestry
|
||||
err string
|
||||
ok bool
|
||||
}{
|
||||
{
|
||||
title: "missing ancestry",
|
||||
in: "ancestry-non",
|
||||
err: "",
|
||||
ancestry: nil,
|
||||
ok: false,
|
||||
},
|
||||
{
|
||||
title: "valid ancestry",
|
||||
in: "ancestry-2",
|
||||
err: "",
|
||||
ok: true,
|
||||
ancestry: testutil.TakeAncestryPointerFromMap(testutil.RealAncestries, 2),
|
||||
},
|
||||
}
|
||||
|
||||
func TestFindAncestry(t *testing.T) {
|
||||
tx, cleanup := testutil.CreateTestTxWithFixtures(t, "TestFindAncestry")
|
||||
defer cleanup()
|
||||
|
||||
for _, test := range findAncestryTests {
|
||||
t.Run(test.title, func(t *testing.T) {
|
||||
ancestry, ok, err := FindAncestry(tx, test.in)
|
||||
if test.err != "" {
|
||||
assert.EqualError(t, err, test.err, "unexpected error")
|
||||
return
|
||||
}
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, test.ok, ok)
|
||||
if test.ok {
|
||||
database.AssertAncestryEqual(t, test.ancestry, &ancestry)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -1,207 +0,0 @@
|
||||
// Copyright 2017 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
)
|
||||
|
||||
func TestUpsertAncestry(t *testing.T) {
|
||||
store, tx := openSessionForTest(t, "UpsertAncestry", true)
|
||||
defer closeTest(t, store, tx)
|
||||
a1 := database.Ancestry{
|
||||
Name: "a1",
|
||||
Layers: []database.Layer{
|
||||
{Hash: "layer-N"},
|
||||
},
|
||||
}
|
||||
|
||||
a2 := database.Ancestry{}
|
||||
|
||||
a3 := database.Ancestry{
|
||||
Name: "a",
|
||||
Layers: []database.Layer{
|
||||
{Hash: "layer-0"},
|
||||
},
|
||||
}
|
||||
|
||||
a4 := database.Ancestry{
|
||||
Name: "a",
|
||||
Layers: []database.Layer{
|
||||
{Hash: "layer-1"},
|
||||
},
|
||||
}
|
||||
|
||||
f1 := database.Feature{
|
||||
Name: "wechat",
|
||||
Version: "0.5",
|
||||
VersionFormat: "dpkg",
|
||||
}
|
||||
|
||||
// not in database
|
||||
f2 := database.Feature{
|
||||
Name: "wechat",
|
||||
Version: "0.6",
|
||||
VersionFormat: "dpkg",
|
||||
}
|
||||
|
||||
n1 := database.Namespace{
|
||||
Name: "debian:7",
|
||||
VersionFormat: "dpkg",
|
||||
}
|
||||
|
||||
p := database.Processors{
|
||||
Listers: []string{"dpkg", "non-existing"},
|
||||
Detectors: []string{"os-release", "non-existing"},
|
||||
}
|
||||
|
||||
nsf1 := database.NamespacedFeature{
|
||||
Namespace: n1,
|
||||
Feature: f1,
|
||||
}
|
||||
|
||||
// not in database
|
||||
nsf2 := database.NamespacedFeature{
|
||||
Namespace: n1,
|
||||
Feature: f2,
|
||||
}
|
||||
|
||||
// invalid case
|
||||
assert.NotNil(t, tx.UpsertAncestry(a1, nil, database.Processors{}))
|
||||
assert.NotNil(t, tx.UpsertAncestry(a2, nil, database.Processors{}))
|
||||
// valid case
|
||||
assert.Nil(t, tx.UpsertAncestry(a3, nil, database.Processors{}))
|
||||
// replace invalid case
|
||||
assert.NotNil(t, tx.UpsertAncestry(a4, []database.NamespacedFeature{nsf1, nsf2}, p))
|
||||
// replace valid case
|
||||
assert.Nil(t, tx.UpsertAncestry(a4, []database.NamespacedFeature{nsf1}, p))
|
||||
// validate
|
||||
ancestry, ok, err := tx.FindAncestryFeatures("a")
|
||||
assert.Nil(t, err)
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, a4, ancestry.Ancestry)
|
||||
}
|
||||
|
||||
func assertProcessorsEqual(t *testing.T, expected database.Processors, actual database.Processors) bool {
|
||||
sort.Strings(expected.Detectors)
|
||||
sort.Strings(actual.Detectors)
|
||||
sort.Strings(expected.Listers)
|
||||
sort.Strings(actual.Listers)
|
||||
return assert.Equal(t, expected.Detectors, actual.Detectors) && assert.Equal(t, expected.Listers, actual.Listers)
|
||||
}
|
||||
|
||||
func TestFindAncestry(t *testing.T) {
|
||||
store, tx := openSessionForTest(t, "FindAncestry", true)
|
||||
defer closeTest(t, store, tx)
|
||||
|
||||
// not found
|
||||
_, _, ok, err := tx.FindAncestry("ancestry-non")
|
||||
assert.Nil(t, err)
|
||||
assert.False(t, ok)
|
||||
|
||||
expected := database.Ancestry{
|
||||
Name: "ancestry-1",
|
||||
Layers: []database.Layer{
|
||||
{Hash: "layer-0"},
|
||||
{Hash: "layer-1"},
|
||||
{Hash: "layer-2"},
|
||||
{Hash: "layer-3a"},
|
||||
},
|
||||
}
|
||||
|
||||
expectedProcessors := database.Processors{
|
||||
Detectors: []string{"os-release"},
|
||||
Listers: []string{"dpkg"},
|
||||
}
|
||||
|
||||
// found
|
||||
a, p, ok2, err := tx.FindAncestry("ancestry-1")
|
||||
if assert.Nil(t, err) && assert.True(t, ok2) {
|
||||
assertAncestryEqual(t, expected, a)
|
||||
assertProcessorsEqual(t, expectedProcessors, p)
|
||||
}
|
||||
}
|
||||
|
||||
func assertAncestryWithFeatureEqual(t *testing.T, expected database.AncestryWithFeatures, actual database.AncestryWithFeatures) bool {
|
||||
return assertAncestryEqual(t, expected.Ancestry, actual.Ancestry) &&
|
||||
assertNamespacedFeatureEqual(t, expected.Features, actual.Features) &&
|
||||
assertProcessorsEqual(t, expected.ProcessedBy, actual.ProcessedBy)
|
||||
}
|
||||
func assertAncestryEqual(t *testing.T, expected database.Ancestry, actual database.Ancestry) bool {
|
||||
return assert.Equal(t, expected.Name, actual.Name) && assert.Equal(t, expected.Layers, actual.Layers)
|
||||
}
|
||||
|
||||
func TestFindAncestryFeatures(t *testing.T) {
|
||||
store, tx := openSessionForTest(t, "FindAncestryFeatures", true)
|
||||
defer closeTest(t, store, tx)
|
||||
|
||||
// invalid
|
||||
_, ok, err := tx.FindAncestryFeatures("ancestry-non")
|
||||
if assert.Nil(t, err) {
|
||||
assert.False(t, ok)
|
||||
}
|
||||
|
||||
expected := database.AncestryWithFeatures{
|
||||
Ancestry: database.Ancestry{
|
||||
Name: "ancestry-2",
|
||||
Layers: []database.Layer{
|
||||
{Hash: "layer-0"},
|
||||
{Hash: "layer-1"},
|
||||
{Hash: "layer-2"},
|
||||
{Hash: "layer-3b"},
|
||||
},
|
||||
},
|
||||
ProcessedBy: database.Processors{
|
||||
Detectors: []string{"os-release"},
|
||||
Listers: []string{"dpkg"},
|
||||
},
|
||||
Features: []database.NamespacedFeature{
|
||||
{
|
||||
Namespace: database.Namespace{
|
||||
Name: "debian:7",
|
||||
VersionFormat: "dpkg",
|
||||
},
|
||||
Feature: database.Feature{
|
||||
Name: "wechat",
|
||||
Version: "0.5",
|
||||
VersionFormat: "dpkg",
|
||||
},
|
||||
},
|
||||
{
|
||||
Namespace: database.Namespace{
|
||||
Name: "debian:8",
|
||||
VersionFormat: "dpkg",
|
||||
},
|
||||
Feature: database.Feature{
|
||||
Name: "openssl",
|
||||
Version: "1.0",
|
||||
VersionFormat: "dpkg",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// valid
|
||||
ancestry, ok, err := tx.FindAncestryFeatures("ancestry-2")
|
||||
if assert.Nil(t, err) && assert.True(t, ok) {
|
||||
assertAncestryEqual(t, expected.Ancestry, ancestry.Ancestry)
|
||||
assertNamespacedFeatureEqual(t, expected.Features, ancestry.Features)
|
||||
assertProcessorsEqual(t, expected.ProcessedBy, ancestry.ProcessedBy)
|
||||
}
|
||||
}
|
@ -1,226 +0,0 @@
|
||||
// Copyright 2017 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pborman/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/ext/versionfmt"
|
||||
"github.com/coreos/clair/ext/versionfmt/dpkg"
|
||||
"github.com/coreos/clair/pkg/strutil"
|
||||
)
|
||||
|
||||
const (
|
||||
numVulnerabilities = 100
|
||||
numFeatures = 100
|
||||
)
|
||||
|
||||
func testGenRandomVulnerabilityAndNamespacedFeature(t *testing.T, store database.Datastore) ([]database.NamespacedFeature, []database.VulnerabilityWithAffected) {
|
||||
tx, err := store.Begin()
|
||||
if !assert.Nil(t, err) {
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
featureName := "TestFeature"
|
||||
featureVersionFormat := dpkg.ParserName
|
||||
// Insert the namespace on which we'll work.
|
||||
namespace := database.Namespace{
|
||||
Name: "TestRaceAffectsFeatureNamespace1",
|
||||
VersionFormat: dpkg.ParserName,
|
||||
}
|
||||
|
||||
if !assert.Nil(t, tx.PersistNamespaces([]database.Namespace{namespace})) {
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// Initialize random generator and enforce max procs.
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
|
||||
// Generate Distinct random features
|
||||
features := make([]database.Feature, numFeatures)
|
||||
nsFeatures := make([]database.NamespacedFeature, numFeatures)
|
||||
for i := 0; i < numFeatures; i++ {
|
||||
version := rand.Intn(numFeatures)
|
||||
|
||||
features[i] = database.Feature{
|
||||
Name: featureName,
|
||||
VersionFormat: featureVersionFormat,
|
||||
Version: strconv.Itoa(version),
|
||||
}
|
||||
|
||||
nsFeatures[i] = database.NamespacedFeature{
|
||||
Namespace: namespace,
|
||||
Feature: features[i],
|
||||
}
|
||||
}
|
||||
|
||||
// insert features
|
||||
if !assert.Nil(t, tx.PersistFeatures(features)) {
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// Generate vulnerabilities.
|
||||
vulnerabilities := []database.VulnerabilityWithAffected{}
|
||||
for i := 0; i < numVulnerabilities; i++ {
|
||||
// any version less than this is vulnerable
|
||||
version := rand.Intn(numFeatures) + 1
|
||||
|
||||
vulnerability := database.VulnerabilityWithAffected{
|
||||
Vulnerability: database.Vulnerability{
|
||||
Name: uuid.New(),
|
||||
Namespace: namespace,
|
||||
Severity: database.UnknownSeverity,
|
||||
},
|
||||
Affected: []database.AffectedFeature{
|
||||
{
|
||||
Namespace: namespace,
|
||||
FeatureName: featureName,
|
||||
AffectedVersion: strconv.Itoa(version),
|
||||
FixedInVersion: strconv.Itoa(version),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
vulnerabilities = append(vulnerabilities, vulnerability)
|
||||
}
|
||||
tx.Commit()
|
||||
|
||||
return nsFeatures, vulnerabilities
|
||||
}
|
||||
|
||||
func TestConcurrency(t *testing.T) {
|
||||
store, err := openDatabaseForTest("Concurrency", false)
|
||||
if !assert.Nil(t, err) {
|
||||
t.FailNow()
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
start := time.Now()
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(100)
|
||||
for i := 0; i < 100; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
nsNamespaces := genRandomNamespaces(t, 100)
|
||||
tx, err := store.Begin()
|
||||
if !assert.Nil(t, err) {
|
||||
t.FailNow()
|
||||
}
|
||||
assert.Nil(t, tx.PersistNamespaces(nsNamespaces))
|
||||
tx.Commit()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
fmt.Println("total", time.Since(start))
|
||||
}
|
||||
|
||||
func genRandomNamespaces(t *testing.T, count int) []database.Namespace {
|
||||
r := make([]database.Namespace, count)
|
||||
for i := 0; i < count; i++ {
|
||||
r[i] = database.Namespace{
|
||||
Name: uuid.New(),
|
||||
VersionFormat: "dpkg",
|
||||
}
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func TestCaching(t *testing.T) {
|
||||
store, err := openDatabaseForTest("Caching", false)
|
||||
if !assert.Nil(t, err) {
|
||||
t.FailNow()
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
nsFeatures, vulnerabilities := testGenRandomVulnerabilityAndNamespacedFeature(t, store)
|
||||
|
||||
fmt.Printf("%d features, %d vulnerabilities are generated", len(nsFeatures), len(vulnerabilities))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
tx, err := store.Begin()
|
||||
if !assert.Nil(t, err) {
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
assert.Nil(t, tx.PersistNamespacedFeatures(nsFeatures))
|
||||
fmt.Println("finished to insert namespaced features")
|
||||
|
||||
tx.Commit()
|
||||
}()
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
tx, err := store.Begin()
|
||||
if !assert.Nil(t, err) {
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
assert.Nil(t, tx.InsertVulnerabilities(vulnerabilities))
|
||||
fmt.Println("finished to insert vulnerabilities")
|
||||
tx.Commit()
|
||||
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
tx, err := store.Begin()
|
||||
if !assert.Nil(t, err) {
|
||||
t.FailNow()
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
// Verify consistency now.
|
||||
affected, err := tx.FindAffectedNamespacedFeatures(nsFeatures)
|
||||
if !assert.Nil(t, err) {
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
for _, ansf := range affected {
|
||||
if !assert.True(t, ansf.Valid) {
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
expectedAffectedNames := []string{}
|
||||
for _, vuln := range vulnerabilities {
|
||||
if ok, err := versionfmt.InRange(dpkg.ParserName, ansf.Version, vuln.Affected[0].AffectedVersion); err == nil {
|
||||
if ok {
|
||||
expectedAffectedNames = append(expectedAffectedNames, vuln.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
actualAffectedNames := []string{}
|
||||
for _, s := range ansf.AffectedBy {
|
||||
actualAffectedNames = append(actualAffectedNames, s.Name)
|
||||
}
|
||||
|
||||
assert.Len(t, strutil.CompareStringLists(expectedAffectedNames, actualAffectedNames), 0)
|
||||
assert.Len(t, strutil.CompareStringLists(actualAffectedNames, expectedAffectedNames), 0)
|
||||
}
|
||||
}
|
@ -0,0 +1,132 @@
|
||||
// Copyright 2018 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package detector
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/database/pgsql/util"
|
||||
)
|
||||
|
||||
const (
|
||||
soiDetector = `
|
||||
INSERT INTO detector (name, version, dtype)
|
||||
SELECT CAST ($1 AS TEXT), CAST ($2 AS TEXT), CAST ($3 AS detector_type )
|
||||
WHERE NOT EXISTS (SELECT id FROM detector WHERE name = $1 AND version = $2 AND dtype = $3);`
|
||||
|
||||
findDetectorID = `SELECT id FROM detector WHERE name = $1 AND version = $2 AND dtype = $3`
|
||||
findAllDetectors = `SELECT id, name, version, dtype FROM detector`
|
||||
)
|
||||
|
||||
type DetectorMap struct {
|
||||
ByID map[int64]database.Detector
|
||||
ByValue map[database.Detector]int64
|
||||
}
|
||||
|
||||
func PersistDetectors(tx *sql.Tx, detectors []database.Detector) error {
|
||||
for _, d := range detectors {
|
||||
if !d.Valid() {
|
||||
log.WithField("detector", d).Debug("Invalid Detector")
|
||||
return database.ErrInvalidParameters
|
||||
}
|
||||
|
||||
r, err := tx.Exec(soiDetector, d.Name, d.Version, d.DType)
|
||||
if err != nil {
|
||||
return util.HandleError("soiDetector", err)
|
||||
}
|
||||
|
||||
count, err := r.RowsAffected()
|
||||
if err != nil {
|
||||
return util.HandleError("soiDetector", err)
|
||||
}
|
||||
|
||||
if count == 0 {
|
||||
log.Debug("detector already exists: ", d)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// findDetectorIDs retrieve ids of the detectors from the database, if any is not
|
||||
// found, return the error.
|
||||
func FindDetectorIDs(tx *sql.Tx, detectors []database.Detector) ([]int64, error) {
|
||||
ids := []int64{}
|
||||
for _, d := range detectors {
|
||||
id := sql.NullInt64{}
|
||||
err := tx.QueryRow(findDetectorID, d.Name, d.Version, d.DType).Scan(&id)
|
||||
if err != nil {
|
||||
return nil, util.HandleError("findDetectorID", err)
|
||||
}
|
||||
|
||||
if !id.Valid {
|
||||
return nil, database.ErrInconsistent
|
||||
}
|
||||
|
||||
ids = append(ids, id.Int64)
|
||||
}
|
||||
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
func GetDetectors(tx *sql.Tx, query string, id int64) ([]database.Detector, error) {
|
||||
rows, err := tx.Query(query, id)
|
||||
if err != nil {
|
||||
return nil, util.HandleError("getDetectors", err)
|
||||
}
|
||||
|
||||
detectors := []database.Detector{}
|
||||
for rows.Next() {
|
||||
d := database.Detector{}
|
||||
err := rows.Scan(&d.Name, &d.Version, &d.DType)
|
||||
if err != nil {
|
||||
return nil, util.HandleError("getDetectors", err)
|
||||
}
|
||||
|
||||
if !d.Valid() {
|
||||
return nil, database.ErrInvalidDetector
|
||||
}
|
||||
|
||||
detectors = append(detectors, d)
|
||||
}
|
||||
|
||||
return detectors, nil
|
||||
}
|
||||
|
||||
func FindAllDetectors(tx *sql.Tx) (DetectorMap, error) {
|
||||
rows, err := tx.Query(findAllDetectors)
|
||||
if err != nil {
|
||||
return DetectorMap{}, util.HandleError("searchAllDetectors", err)
|
||||
}
|
||||
|
||||
detectors := DetectorMap{ByID: make(map[int64]database.Detector), ByValue: make(map[database.Detector]int64)}
|
||||
for rows.Next() {
|
||||
var (
|
||||
id int64
|
||||
d database.Detector
|
||||
)
|
||||
if err := rows.Scan(&id, &d.Name, &d.Version, &d.DType); err != nil {
|
||||
return DetectorMap{}, util.HandleError("searchAllDetectors", err)
|
||||
}
|
||||
|
||||
detectors.ByID[id] = d
|
||||
detectors.ByValue[d] = id
|
||||
}
|
||||
|
||||
return detectors, nil
|
||||
}
|
@ -0,0 +1,121 @@
|
||||
// Copyright 2018 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package detector
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"testing"
|
||||
|
||||
"github.com/deckarep/golang-set"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/database/pgsql/testutil"
|
||||
)
|
||||
|
||||
func testGetAllDetectors(tx *sql.Tx) []database.Detector {
|
||||
query := `SELECT name, version, dtype FROM detector`
|
||||
rows, err := tx.Query(query)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
detectors := []database.Detector{}
|
||||
for rows.Next() {
|
||||
d := database.Detector{}
|
||||
if err := rows.Scan(&d.Name, &d.Version, &d.DType); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
detectors = append(detectors, d)
|
||||
}
|
||||
|
||||
return detectors
|
||||
}
|
||||
|
||||
var persistDetectorTests = []struct {
|
||||
title string
|
||||
in []database.Detector
|
||||
err string
|
||||
}{
|
||||
{
|
||||
title: "invalid detector",
|
||||
in: []database.Detector{
|
||||
{},
|
||||
database.NewFeatureDetector("name", "2.0"),
|
||||
},
|
||||
err: database.ErrInvalidParameters.Error(),
|
||||
},
|
||||
{
|
||||
title: "invalid detector 2",
|
||||
in: []database.Detector{
|
||||
database.NewFeatureDetector("name", "2.0"),
|
||||
{"name", "1.0", "random not valid dtype"},
|
||||
},
|
||||
err: database.ErrInvalidParameters.Error(),
|
||||
},
|
||||
{
|
||||
title: "detectors with some different fields",
|
||||
in: []database.Detector{
|
||||
database.NewFeatureDetector("name", "2.0"),
|
||||
database.NewFeatureDetector("name", "1.0"),
|
||||
database.NewNamespaceDetector("name", "1.0"),
|
||||
},
|
||||
},
|
||||
{
|
||||
title: "duplicated detectors (parameter level)",
|
||||
in: []database.Detector{
|
||||
database.NewFeatureDetector("name", "1.0"),
|
||||
database.NewFeatureDetector("name", "1.0"),
|
||||
},
|
||||
},
|
||||
{
|
||||
title: "duplicated detectors (db level)",
|
||||
in: []database.Detector{
|
||||
database.NewNamespaceDetector("os-release", "1.0"),
|
||||
database.NewNamespaceDetector("os-release", "1.0"),
|
||||
database.NewFeatureDetector("dpkg", "1.0"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestPersistDetector(t *testing.T) {
|
||||
tx, cleanup := testutil.CreateTestTxWithFixtures(t, "PersistDetector")
|
||||
defer cleanup()
|
||||
|
||||
for _, test := range persistDetectorTests {
|
||||
t.Run(test.title, func(t *testing.T) {
|
||||
err := PersistDetectors(tx, test.in)
|
||||
if test.err != "" {
|
||||
require.EqualError(t, err, test.err)
|
||||
return
|
||||
}
|
||||
|
||||
detectors := testGetAllDetectors(tx)
|
||||
|
||||
// ensure no duplicated detectors
|
||||
detectorSet := mapset.NewSet()
|
||||
for _, d := range detectors {
|
||||
require.False(t, detectorSet.Contains(d), "duplicated: %v", d)
|
||||
detectorSet.Add(d)
|
||||
}
|
||||
|
||||
// ensure all persisted detectors are actually saved
|
||||
for _, d := range test.in {
|
||||
require.True(t, detectorSet.Contains(d), "detector: %v, detectors: %v", d, detectorSet)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -1,381 +0,0 @@
|
||||
// Copyright 2017 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"sort"
|
||||
|
||||
"github.com/lib/pq"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/ext/versionfmt"
|
||||
"github.com/coreos/clair/pkg/commonerr"
|
||||
)
|
||||
|
||||
var (
|
||||
errFeatureNotFound = errors.New("Feature not found")
|
||||
)
|
||||
|
||||
type vulnerabilityAffecting struct {
|
||||
vulnerabilityID int64
|
||||
addedByID int64
|
||||
}
|
||||
|
||||
func (tx *pgSession) PersistFeatures(features []database.Feature) error {
|
||||
if len(features) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sorting is needed before inserting into database to prevent deadlock.
|
||||
sort.Slice(features, func(i, j int) bool {
|
||||
return features[i].Name < features[j].Name ||
|
||||
features[i].Version < features[j].Version ||
|
||||
features[i].VersionFormat < features[j].VersionFormat
|
||||
})
|
||||
|
||||
// TODO(Sida): A better interface for bulk insertion is needed.
|
||||
keys := make([]interface{}, len(features)*3)
|
||||
for i, f := range features {
|
||||
keys[i*3] = f.Name
|
||||
keys[i*3+1] = f.Version
|
||||
keys[i*3+2] = f.VersionFormat
|
||||
if f.Name == "" || f.Version == "" || f.VersionFormat == "" {
|
||||
return commonerr.NewBadRequestError("Empty feature name, version or version format is not allowed")
|
||||
}
|
||||
}
|
||||
|
||||
_, err := tx.Exec(queryPersistFeature(len(features)), keys...)
|
||||
return handleError("queryPersistFeature", err)
|
||||
}
|
||||
|
||||
type namespacedFeatureWithID struct {
|
||||
database.NamespacedFeature
|
||||
|
||||
ID int64
|
||||
}
|
||||
|
||||
type vulnerabilityCache struct {
|
||||
nsFeatureID int64
|
||||
vulnID int64
|
||||
vulnAffectingID int64
|
||||
}
|
||||
|
||||
func (tx *pgSession) searchAffectingVulnerabilities(features []database.NamespacedFeature) ([]vulnerabilityCache, error) {
|
||||
if len(features) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
ids, err := tx.findNamespacedFeatureIDs(features)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fMap := map[int64]database.NamespacedFeature{}
|
||||
for i, f := range features {
|
||||
if !ids[i].Valid {
|
||||
return nil, errFeatureNotFound
|
||||
}
|
||||
fMap[ids[i].Int64] = f
|
||||
}
|
||||
|
||||
cacheTable := []vulnerabilityCache{}
|
||||
rows, err := tx.Query(searchPotentialAffectingVulneraibilities, pq.Array(ids))
|
||||
if err != nil {
|
||||
return nil, handleError("searchPotentialAffectingVulneraibilities", err)
|
||||
}
|
||||
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
var (
|
||||
cache vulnerabilityCache
|
||||
affected string
|
||||
)
|
||||
|
||||
err := rows.Scan(&cache.nsFeatureID, &cache.vulnID, &affected, &cache.vulnAffectingID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ok, err := versionfmt.InRange(fMap[cache.nsFeatureID].VersionFormat, fMap[cache.nsFeatureID].Version, affected); err != nil {
|
||||
return nil, err
|
||||
} else if ok {
|
||||
cacheTable = append(cacheTable, cache)
|
||||
}
|
||||
}
|
||||
|
||||
return cacheTable, nil
|
||||
}
|
||||
|
||||
func (tx *pgSession) CacheAffectedNamespacedFeatures(features []database.NamespacedFeature) error {
|
||||
if len(features) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err := tx.Exec(lockVulnerabilityAffects)
|
||||
if err != nil {
|
||||
return handleError("lockVulnerabilityAffects", err)
|
||||
}
|
||||
|
||||
cache, err := tx.searchAffectingVulnerabilities(features)
|
||||
|
||||
keys := make([]interface{}, len(cache)*3)
|
||||
for i, c := range cache {
|
||||
keys[i*3] = c.vulnID
|
||||
keys[i*3+1] = c.nsFeatureID
|
||||
keys[i*3+2] = c.vulnAffectingID
|
||||
}
|
||||
|
||||
if len(cache) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
affected, err := tx.Exec(queryPersistVulnerabilityAffectedNamespacedFeature(len(cache)), keys...)
|
||||
if err != nil {
|
||||
return handleError("persistVulnerabilityAffectedNamespacedFeature", err)
|
||||
}
|
||||
if count, err := affected.RowsAffected(); err != nil {
|
||||
log.Debugf("Cached %d features in vulnerability_affected_namespaced_feature", count)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tx *pgSession) PersistNamespacedFeatures(features []database.NamespacedFeature) error {
|
||||
if len(features) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
nsIDs := map[database.Namespace]sql.NullInt64{}
|
||||
fIDs := map[database.Feature]sql.NullInt64{}
|
||||
for _, f := range features {
|
||||
nsIDs[f.Namespace] = sql.NullInt64{}
|
||||
fIDs[f.Feature] = sql.NullInt64{}
|
||||
}
|
||||
|
||||
fToFind := []database.Feature{}
|
||||
for f := range fIDs {
|
||||
fToFind = append(fToFind, f)
|
||||
}
|
||||
|
||||
sort.Slice(fToFind, func(i, j int) bool {
|
||||
return fToFind[i].Name < fToFind[j].Name ||
|
||||
fToFind[i].Version < fToFind[j].Version ||
|
||||
fToFind[i].VersionFormat < fToFind[j].VersionFormat
|
||||
})
|
||||
|
||||
if ids, err := tx.findFeatureIDs(fToFind); err == nil {
|
||||
for i, id := range ids {
|
||||
if !id.Valid {
|
||||
return errFeatureNotFound
|
||||
}
|
||||
fIDs[fToFind[i]] = id
|
||||
}
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
|
||||
nsToFind := []database.Namespace{}
|
||||
for ns := range nsIDs {
|
||||
nsToFind = append(nsToFind, ns)
|
||||
}
|
||||
|
||||
if ids, err := tx.findNamespaceIDs(nsToFind); err == nil {
|
||||
for i, id := range ids {
|
||||
if !id.Valid {
|
||||
return errNamespaceNotFound
|
||||
}
|
||||
nsIDs[nsToFind[i]] = id
|
||||
}
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
|
||||
keys := make([]interface{}, len(features)*2)
|
||||
for i, f := range features {
|
||||
keys[i*2] = fIDs[f.Feature]
|
||||
keys[i*2+1] = nsIDs[f.Namespace]
|
||||
}
|
||||
|
||||
_, err := tx.Exec(queryPersistNamespacedFeature(len(features)), keys...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FindAffectedNamespacedFeatures looks up cache table and retrieves all
|
||||
// vulnerabilities associated with the features.
|
||||
func (tx *pgSession) FindAffectedNamespacedFeatures(features []database.NamespacedFeature) ([]database.NullableAffectedNamespacedFeature, error) {
|
||||
if len(features) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
returnFeatures := make([]database.NullableAffectedNamespacedFeature, len(features))
|
||||
|
||||
// featureMap is used to keep track of duplicated features.
|
||||
featureMap := map[database.NamespacedFeature][]*database.NullableAffectedNamespacedFeature{}
|
||||
// initialize return value and generate unique feature request queries.
|
||||
for i, f := range features {
|
||||
returnFeatures[i] = database.NullableAffectedNamespacedFeature{
|
||||
AffectedNamespacedFeature: database.AffectedNamespacedFeature{
|
||||
NamespacedFeature: f,
|
||||
},
|
||||
}
|
||||
|
||||
featureMap[f] = append(featureMap[f], &returnFeatures[i])
|
||||
}
|
||||
|
||||
// query unique namespaced features
|
||||
distinctFeatures := []database.NamespacedFeature{}
|
||||
for f := range featureMap {
|
||||
distinctFeatures = append(distinctFeatures, f)
|
||||
}
|
||||
|
||||
nsFeatureIDs, err := tx.findNamespacedFeatureIDs(distinctFeatures)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
toQuery := []int64{}
|
||||
featureIDMap := map[int64][]*database.NullableAffectedNamespacedFeature{}
|
||||
for i, id := range nsFeatureIDs {
|
||||
if id.Valid {
|
||||
toQuery = append(toQuery, id.Int64)
|
||||
for _, f := range featureMap[distinctFeatures[i]] {
|
||||
f.Valid = id.Valid
|
||||
featureIDMap[id.Int64] = append(featureIDMap[id.Int64], f)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rows, err := tx.Query(searchNamespacedFeaturesVulnerabilities, pq.Array(toQuery))
|
||||
if err != nil {
|
||||
return nil, handleError("searchNamespacedFeaturesVulnerabilities", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var (
|
||||
featureID int64
|
||||
vuln database.VulnerabilityWithFixedIn
|
||||
)
|
||||
err := rows.Scan(&featureID,
|
||||
&vuln.Name,
|
||||
&vuln.Description,
|
||||
&vuln.Link,
|
||||
&vuln.Severity,
|
||||
&vuln.Metadata,
|
||||
&vuln.FixedInVersion,
|
||||
&vuln.Namespace.Name,
|
||||
&vuln.Namespace.VersionFormat,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, handleError("searchNamespacedFeaturesVulnerabilities", err)
|
||||
}
|
||||
|
||||
for _, f := range featureIDMap[featureID] {
|
||||
f.AffectedBy = append(f.AffectedBy, vuln)
|
||||
}
|
||||
}
|
||||
|
||||
return returnFeatures, nil
|
||||
}
|
||||
|
||||
func (tx *pgSession) findNamespacedFeatureIDs(nfs []database.NamespacedFeature) ([]sql.NullInt64, error) {
|
||||
if len(nfs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
nfsMap := map[database.NamespacedFeature]sql.NullInt64{}
|
||||
keys := make([]interface{}, len(nfs)*4)
|
||||
for i, nf := range nfs {
|
||||
keys[i*4] = nfs[i].Name
|
||||
keys[i*4+1] = nfs[i].Version
|
||||
keys[i*4+2] = nfs[i].VersionFormat
|
||||
keys[i*4+3] = nfs[i].Namespace.Name
|
||||
nfsMap[nf] = sql.NullInt64{}
|
||||
}
|
||||
|
||||
rows, err := tx.Query(querySearchNamespacedFeature(len(nfs)), keys...)
|
||||
if err != nil {
|
||||
return nil, handleError("searchNamespacedFeature", err)
|
||||
}
|
||||
|
||||
defer rows.Close()
|
||||
var (
|
||||
id sql.NullInt64
|
||||
nf database.NamespacedFeature
|
||||
)
|
||||
|
||||
for rows.Next() {
|
||||
err := rows.Scan(&id, &nf.Name, &nf.Version, &nf.VersionFormat, &nf.Namespace.Name)
|
||||
nf.Namespace.VersionFormat = nf.VersionFormat
|
||||
if err != nil {
|
||||
return nil, handleError("searchNamespacedFeature", err)
|
||||
}
|
||||
nfsMap[nf] = id
|
||||
}
|
||||
|
||||
ids := make([]sql.NullInt64, len(nfs))
|
||||
for i, nf := range nfs {
|
||||
ids[i] = nfsMap[nf]
|
||||
}
|
||||
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
func (tx *pgSession) findFeatureIDs(fs []database.Feature) ([]sql.NullInt64, error) {
|
||||
if len(fs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
fMap := map[database.Feature]sql.NullInt64{}
|
||||
|
||||
keys := make([]interface{}, len(fs)*3)
|
||||
for i, f := range fs {
|
||||
keys[i*3] = f.Name
|
||||
keys[i*3+1] = f.Version
|
||||
keys[i*3+2] = f.VersionFormat
|
||||
fMap[f] = sql.NullInt64{}
|
||||
}
|
||||
|
||||
rows, err := tx.Query(querySearchFeatureID(len(fs)), keys...)
|
||||
if err != nil {
|
||||
return nil, handleError("querySearchFeatureID", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var (
|
||||
id sql.NullInt64
|
||||
f database.Feature
|
||||
)
|
||||
for rows.Next() {
|
||||
err := rows.Scan(&id, &f.Name, &f.Version, &f.VersionFormat)
|
||||
if err != nil {
|
||||
return nil, handleError("querySearchFeatureID", err)
|
||||
}
|
||||
fMap[f] = id
|
||||
}
|
||||
|
||||
ids := make([]sql.NullInt64, len(fs))
|
||||
for i, f := range fs {
|
||||
ids[i] = fMap[f]
|
||||
}
|
||||
|
||||
return ids, nil
|
||||
}
|
@ -0,0 +1,121 @@
|
||||
// Copyright 2017 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package feature
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/database/pgsql/util"
|
||||
"github.com/coreos/clair/pkg/commonerr"
|
||||
)
|
||||
|
||||
func queryPersistFeature(count int) string {
|
||||
return util.QueryPersist(count,
|
||||
"feature",
|
||||
"feature_name_version_version_format_type_key",
|
||||
"name",
|
||||
"version",
|
||||
"version_format",
|
||||
"type")
|
||||
}
|
||||
|
||||
func querySearchFeatureID(featureCount int) string {
|
||||
return fmt.Sprintf(`
|
||||
SELECT id, name, version, version_format, type
|
||||
FROM Feature WHERE (name, version, version_format, type) IN (%s)`,
|
||||
util.QueryString(4, featureCount),
|
||||
)
|
||||
}
|
||||
|
||||
func PersistFeatures(tx *sql.Tx, features []database.Feature) error {
|
||||
if len(features) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
types, err := GetFeatureTypeMap(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sorting is needed before inserting into database to prevent deadlock.
|
||||
sort.Slice(features, func(i, j int) bool {
|
||||
return features[i].Name < features[j].Name ||
|
||||
features[i].Version < features[j].Version ||
|
||||
features[i].VersionFormat < features[j].VersionFormat
|
||||
})
|
||||
|
||||
// TODO(Sida): A better interface for bulk insertion is needed.
|
||||
keys := make([]interface{}, 0, len(features)*3)
|
||||
for _, f := range features {
|
||||
keys = append(keys, f.Name, f.Version, f.VersionFormat, types.ByName[f.Type])
|
||||
if f.Name == "" || f.Version == "" || f.VersionFormat == "" {
|
||||
return commonerr.NewBadRequestError("Empty feature name, version or version format is not allowed")
|
||||
}
|
||||
}
|
||||
|
||||
_, err = tx.Exec(queryPersistFeature(len(features)), keys...)
|
||||
return util.HandleError("queryPersistFeature", err)
|
||||
}
|
||||
|
||||
func FindFeatureIDs(tx *sql.Tx, fs []database.Feature) ([]sql.NullInt64, error) {
|
||||
if len(fs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
types, err := GetFeatureTypeMap(tx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fMap := map[database.Feature]sql.NullInt64{}
|
||||
|
||||
keys := make([]interface{}, 0, len(fs)*4)
|
||||
for _, f := range fs {
|
||||
typeID := types.ByName[f.Type]
|
||||
keys = append(keys, f.Name, f.Version, f.VersionFormat, typeID)
|
||||
fMap[f] = sql.NullInt64{}
|
||||
}
|
||||
|
||||
rows, err := tx.Query(querySearchFeatureID(len(fs)), keys...)
|
||||
if err != nil {
|
||||
return nil, util.HandleError("querySearchFeatureID", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var (
|
||||
id sql.NullInt64
|
||||
f database.Feature
|
||||
)
|
||||
for rows.Next() {
|
||||
var typeID int
|
||||
err := rows.Scan(&id, &f.Name, &f.Version, &f.VersionFormat, &typeID)
|
||||
if err != nil {
|
||||
return nil, util.HandleError("querySearchFeatureID", err)
|
||||
}
|
||||
|
||||
f.Type = types.ByID[typeID]
|
||||
fMap[f] = id
|
||||
}
|
||||
|
||||
ids := make([]sql.NullInt64, len(fs))
|
||||
for i, f := range fs {
|
||||
ids[i] = fMap[f]
|
||||
}
|
||||
|
||||
return ids, nil
|
||||
}
|
@ -0,0 +1,154 @@
|
||||
// Copyright 2016 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package feature
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/database/pgsql/testutil"
|
||||
)
|
||||
|
||||
func TestPersistFeatures(t *testing.T) {
|
||||
tx, cleanup := testutil.CreateTestTx(t, "TestPersistFeatures")
|
||||
defer cleanup()
|
||||
|
||||
invalid := database.Feature{}
|
||||
valid := *database.NewBinaryPackage("mount", "2.31.1-0.4ubuntu3.1", "dpkg")
|
||||
|
||||
// invalid
|
||||
require.NotNil(t, PersistFeatures(tx, []database.Feature{invalid}))
|
||||
// existing
|
||||
require.Nil(t, PersistFeatures(tx, []database.Feature{valid}))
|
||||
require.Nil(t, PersistFeatures(tx, []database.Feature{valid}))
|
||||
|
||||
features := selectAllFeatures(t, tx)
|
||||
assert.Equal(t, []database.Feature{valid}, features)
|
||||
}
|
||||
|
||||
func TestPersistNamespacedFeatures(t *testing.T) {
|
||||
tx, cleanup := testutil.CreateTestTxWithFixtures(t, "TestPersistNamespacedFeatures")
|
||||
defer cleanup()
|
||||
|
||||
// existing features
|
||||
f1 := database.NewSourcePackage("ourchat", "0.5", "dpkg")
|
||||
// non-existing features
|
||||
f2 := database.NewSourcePackage("fake!", "", "")
|
||||
// exising namespace
|
||||
n1 := database.NewNamespace("debian:7", "dpkg")
|
||||
// non-existing namespace
|
||||
n2 := database.NewNamespace("debian:non", "dpkg")
|
||||
// existing namespaced feature
|
||||
nf1 := database.NewNamespacedFeature(n1, f1)
|
||||
// invalid namespaced feature
|
||||
nf2 := database.NewNamespacedFeature(n2, f2)
|
||||
// namespaced features with namespaces or features not in the database will
|
||||
// generate error.
|
||||
assert.Nil(t, PersistNamespacedFeatures(tx, []database.NamespacedFeature{}))
|
||||
assert.NotNil(t, PersistNamespacedFeatures(tx, []database.NamespacedFeature{*nf1, *nf2}))
|
||||
// valid case: insert nf3
|
||||
assert.Nil(t, PersistNamespacedFeatures(tx, []database.NamespacedFeature{*nf1}))
|
||||
|
||||
all := listNamespacedFeatures(t, tx)
|
||||
assert.Contains(t, all, *nf1)
|
||||
}
|
||||
|
||||
func listNamespacedFeatures(t *testing.T, tx *sql.Tx) []database.NamespacedFeature {
|
||||
types, err := GetFeatureTypeMap(tx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
rows, err := tx.Query(`SELECT f.name, f.version, f.version_format, f.type, n.name, n.version_format
|
||||
FROM feature AS f, namespace AS n, namespaced_feature AS nf
|
||||
WHERE nf.feature_id = f.id AND nf.namespace_id = n.id`)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
nf := []database.NamespacedFeature{}
|
||||
for rows.Next() {
|
||||
f := database.NamespacedFeature{}
|
||||
var typeID int
|
||||
err := rows.Scan(&f.Name, &f.Version, &f.VersionFormat, &typeID, &f.Namespace.Name, &f.Namespace.VersionFormat)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
f.Type = types.ByID[typeID]
|
||||
nf = append(nf, f)
|
||||
}
|
||||
|
||||
return nf
|
||||
}
|
||||
|
||||
func selectAllFeatures(t *testing.T, tx *sql.Tx) []database.Feature {
|
||||
types, err := GetFeatureTypeMap(tx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
rows, err := tx.Query("SELECT name, version, version_format, type FROM feature")
|
||||
if err != nil {
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
fs := []database.Feature{}
|
||||
for rows.Next() {
|
||||
f := database.Feature{}
|
||||
var typeID int
|
||||
err := rows.Scan(&f.Name, &f.Version, &f.VersionFormat, &typeID)
|
||||
f.Type = types.ByID[typeID]
|
||||
if err != nil {
|
||||
t.FailNow()
|
||||
}
|
||||
fs = append(fs, f)
|
||||
}
|
||||
return fs
|
||||
}
|
||||
|
||||
func TestFindNamespacedFeatureIDs(t *testing.T) {
|
||||
tx, cleanup := testutil.CreateTestTxWithFixtures(t, "TestFindNamespacedFeatureIDs")
|
||||
defer cleanup()
|
||||
|
||||
features := []database.NamespacedFeature{}
|
||||
expectedIDs := []int{}
|
||||
for id, feature := range testutil.RealNamespacedFeatures {
|
||||
features = append(features, feature)
|
||||
expectedIDs = append(expectedIDs, id)
|
||||
}
|
||||
|
||||
features = append(features, testutil.RealNamespacedFeatures[1]) // test duplicated
|
||||
expectedIDs = append(expectedIDs, 1)
|
||||
|
||||
namespace := testutil.RealNamespaces[1]
|
||||
features = append(features, *database.NewNamespacedFeature(&namespace, database.NewBinaryPackage("not-found", "1.0", "dpkg"))) // test not found feature
|
||||
|
||||
ids, err := FindNamespacedFeatureIDs(tx, features)
|
||||
require.Nil(t, err)
|
||||
require.Len(t, ids, len(expectedIDs)+1)
|
||||
for i, id := range ids {
|
||||
if i == len(ids)-1 {
|
||||
require.False(t, id.Valid)
|
||||
} else {
|
||||
require.True(t, id.Valid)
|
||||
require.Equal(t, expectedIDs[i], int(id.Int64))
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,57 @@
|
||||
// Copyright 2019 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package feature
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
)
|
||||
|
||||
const (
|
||||
selectAllFeatureTypes = `SELECT id, name FROM feature_type`
|
||||
)
|
||||
|
||||
type FeatureTypes struct {
|
||||
ByID map[int]database.FeatureType
|
||||
ByName map[database.FeatureType]int
|
||||
}
|
||||
|
||||
func newFeatureTypes() *FeatureTypes {
|
||||
return &FeatureTypes{make(map[int]database.FeatureType), make(map[database.FeatureType]int)}
|
||||
}
|
||||
|
||||
func GetFeatureTypeMap(tx *sql.Tx) (*FeatureTypes, error) {
|
||||
rows, err := tx.Query(selectAllFeatureTypes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
types := newFeatureTypes()
|
||||
for rows.Next() {
|
||||
var (
|
||||
id int
|
||||
name database.FeatureType
|
||||
)
|
||||
if err := rows.Scan(&id, &name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
types.ByID[id] = name
|
||||
types.ByName[name] = id
|
||||
}
|
||||
|
||||
return types, nil
|
||||
}
|
@ -0,0 +1,39 @@
|
||||
// Copyright 2019 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package feature
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/database/pgsql/testutil"
|
||||
)
|
||||
|
||||
func TestGetFeatureTypeMap(t *testing.T) {
|
||||
tx, cleanup := testutil.CreateTestTx(t, "TestGetFeatureTypeMap")
|
||||
defer cleanup()
|
||||
|
||||
types, err := GetFeatureTypeMap(tx)
|
||||
if err != nil {
|
||||
require.Nil(t, err, err.Error())
|
||||
}
|
||||
|
||||
require.Equal(t, database.SourcePackage, types.ByID[1])
|
||||
require.Equal(t, database.BinaryPackage, types.ByID[2])
|
||||
require.Equal(t, 1, types.ByName[database.SourcePackage])
|
||||
require.Equal(t, 2, types.ByName[database.BinaryPackage])
|
||||
}
|
@ -0,0 +1,168 @@
|
||||
// Copyright 2019 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package feature
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/database/pgsql/namespace"
|
||||
"github.com/coreos/clair/database/pgsql/util"
|
||||
)
|
||||
|
||||
var soiNamespacedFeature = `
|
||||
WITH new_feature_ns AS (
|
||||
INSERT INTO namespaced_feature(feature_id, namespace_id)
|
||||
SELECT CAST ($1 AS INTEGER), CAST ($2 AS INTEGER)
|
||||
WHERE NOT EXISTS ( SELECT id FROM namespaced_feature WHERE namespaced_feature.feature_id = $1 AND namespaced_feature.namespace_id = $2)
|
||||
RETURNING id
|
||||
)
|
||||
SELECT id FROM namespaced_feature WHERE namespaced_feature.feature_id = $1 AND namespaced_feature.namespace_id = $2
|
||||
UNION
|
||||
SELECT id FROM new_feature_ns`
|
||||
|
||||
func queryPersistNamespacedFeature(count int) string {
|
||||
return util.QueryPersist(count, "namespaced_feature",
|
||||
"namespaced_feature_namespace_id_feature_id_key",
|
||||
"feature_id",
|
||||
"namespace_id")
|
||||
}
|
||||
|
||||
func querySearchNamespacedFeature(nsfCount int) string {
|
||||
return fmt.Sprintf(`
|
||||
SELECT nf.id, f.name, f.version, f.version_format, t.name, n.name
|
||||
FROM namespaced_feature AS nf, feature AS f, namespace AS n, feature_type AS t
|
||||
WHERE nf.feature_id = f.id
|
||||
AND nf.namespace_id = n.id
|
||||
AND n.version_format = f.version_format
|
||||
AND f.type = t.id
|
||||
AND (f.name, f.version, f.version_format, t.name, n.name) IN (%s)`,
|
||||
util.QueryString(5, nsfCount),
|
||||
)
|
||||
}
|
||||
|
||||
type namespacedFeatureWithID struct {
|
||||
database.NamespacedFeature
|
||||
|
||||
ID int64
|
||||
}
|
||||
|
||||
func PersistNamespacedFeatures(tx *sql.Tx, features []database.NamespacedFeature) error {
|
||||
if len(features) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
nsIDs := map[database.Namespace]sql.NullInt64{}
|
||||
fIDs := map[database.Feature]sql.NullInt64{}
|
||||
for _, f := range features {
|
||||
nsIDs[f.Namespace] = sql.NullInt64{}
|
||||
fIDs[f.Feature] = sql.NullInt64{}
|
||||
}
|
||||
|
||||
fToFind := []database.Feature{}
|
||||
for f := range fIDs {
|
||||
fToFind = append(fToFind, f)
|
||||
}
|
||||
|
||||
sort.Slice(fToFind, func(i, j int) bool {
|
||||
return fToFind[i].Name < fToFind[j].Name ||
|
||||
fToFind[i].Version < fToFind[j].Version ||
|
||||
fToFind[i].VersionFormat < fToFind[j].VersionFormat
|
||||
})
|
||||
|
||||
if ids, err := FindFeatureIDs(tx, fToFind); err == nil {
|
||||
for i, id := range ids {
|
||||
if !id.Valid {
|
||||
return database.ErrMissingEntities
|
||||
}
|
||||
fIDs[fToFind[i]] = id
|
||||
}
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
|
||||
nsToFind := []database.Namespace{}
|
||||
for ns := range nsIDs {
|
||||
nsToFind = append(nsToFind, ns)
|
||||
}
|
||||
|
||||
if ids, err := namespace.FindNamespaceIDs(tx, nsToFind); err == nil {
|
||||
for i, id := range ids {
|
||||
if !id.Valid {
|
||||
return database.ErrMissingEntities
|
||||
}
|
||||
nsIDs[nsToFind[i]] = id
|
||||
}
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
|
||||
keys := make([]interface{}, 0, len(features)*2)
|
||||
for _, f := range features {
|
||||
keys = append(keys, fIDs[f.Feature], nsIDs[f.Namespace])
|
||||
}
|
||||
|
||||
_, err := tx.Exec(queryPersistNamespacedFeature(len(features)), keys...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func FindNamespacedFeatureIDs(tx *sql.Tx, nfs []database.NamespacedFeature) ([]sql.NullInt64, error) {
|
||||
if len(nfs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
nfsMap := map[database.NamespacedFeature]int64{}
|
||||
keys := make([]interface{}, 0, len(nfs)*5)
|
||||
for _, nf := range nfs {
|
||||
keys = append(keys, nf.Name, nf.Version, nf.VersionFormat, nf.Type, nf.Namespace.Name)
|
||||
}
|
||||
|
||||
rows, err := tx.Query(querySearchNamespacedFeature(len(nfs)), keys...)
|
||||
if err != nil {
|
||||
return nil, util.HandleError("searchNamespacedFeature", err)
|
||||
}
|
||||
|
||||
defer rows.Close()
|
||||
var (
|
||||
id int64
|
||||
nf database.NamespacedFeature
|
||||
)
|
||||
|
||||
for rows.Next() {
|
||||
err := rows.Scan(&id, &nf.Name, &nf.Version, &nf.VersionFormat, &nf.Type, &nf.Namespace.Name)
|
||||
nf.Namespace.VersionFormat = nf.VersionFormat
|
||||
if err != nil {
|
||||
return nil, util.HandleError("searchNamespacedFeature", err)
|
||||
}
|
||||
nfsMap[nf] = id
|
||||
}
|
||||
|
||||
ids := make([]sql.NullInt64, len(nfs))
|
||||
for i, nf := range nfs {
|
||||
if id, ok := nfsMap[nf]; ok {
|
||||
ids[i] = sql.NullInt64{id, true}
|
||||
} else {
|
||||
ids[i] = sql.NullInt64{}
|
||||
}
|
||||
}
|
||||
|
||||
return ids, nil
|
||||
}
|
@ -1,256 +0,0 @@
|
||||
// Copyright 2016 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
|
||||
// register dpkg feature lister for testing
|
||||
_ "github.com/coreos/clair/ext/featurefmt/dpkg"
|
||||
)
|
||||
|
||||
func TestPersistFeatures(t *testing.T) {
|
||||
datastore, tx := openSessionForTest(t, "PersistFeatures", false)
|
||||
defer closeTest(t, datastore, tx)
|
||||
|
||||
f1 := database.Feature{}
|
||||
f2 := database.Feature{Name: "n", Version: "v", VersionFormat: "vf"}
|
||||
|
||||
// empty
|
||||
assert.Nil(t, tx.PersistFeatures([]database.Feature{}))
|
||||
// invalid
|
||||
assert.NotNil(t, tx.PersistFeatures([]database.Feature{f1}))
|
||||
// duplicated
|
||||
assert.Nil(t, tx.PersistFeatures([]database.Feature{f2, f2}))
|
||||
// existing
|
||||
assert.Nil(t, tx.PersistFeatures([]database.Feature{f2}))
|
||||
|
||||
fs := listFeatures(t, tx)
|
||||
assert.Len(t, fs, 1)
|
||||
assert.Equal(t, f2, fs[0])
|
||||
}
|
||||
|
||||
func TestPersistNamespacedFeatures(t *testing.T) {
|
||||
datastore, tx := openSessionForTest(t, "PersistNamespacedFeatures", true)
|
||||
defer closeTest(t, datastore, tx)
|
||||
|
||||
// existing features
|
||||
f1 := database.Feature{
|
||||
Name: "wechat",
|
||||
Version: "0.5",
|
||||
VersionFormat: "dpkg",
|
||||
}
|
||||
|
||||
// non-existing features
|
||||
f2 := database.Feature{
|
||||
Name: "fake!",
|
||||
}
|
||||
|
||||
f3 := database.Feature{
|
||||
Name: "openssl",
|
||||
Version: "2.0",
|
||||
VersionFormat: "dpkg",
|
||||
}
|
||||
|
||||
// exising namespace
|
||||
n1 := database.Namespace{
|
||||
Name: "debian:7",
|
||||
VersionFormat: "dpkg",
|
||||
}
|
||||
|
||||
n3 := database.Namespace{
|
||||
Name: "debian:8",
|
||||
VersionFormat: "dpkg",
|
||||
}
|
||||
|
||||
// non-existing namespace
|
||||
n2 := database.Namespace{
|
||||
Name: "debian:non",
|
||||
VersionFormat: "dpkg",
|
||||
}
|
||||
|
||||
// existing namespaced feature
|
||||
nf1 := database.NamespacedFeature{
|
||||
Namespace: n1,
|
||||
Feature: f1,
|
||||
}
|
||||
|
||||
// invalid namespaced feature
|
||||
nf2 := database.NamespacedFeature{
|
||||
Namespace: n2,
|
||||
Feature: f2,
|
||||
}
|
||||
|
||||
// new namespaced feature affected by vulnerability
|
||||
nf3 := database.NamespacedFeature{
|
||||
Namespace: n3,
|
||||
Feature: f3,
|
||||
}
|
||||
|
||||
// namespaced features with namespaces or features not in the database will
|
||||
// generate error.
|
||||
assert.Nil(t, tx.PersistNamespacedFeatures([]database.NamespacedFeature{}))
|
||||
|
||||
assert.NotNil(t, tx.PersistNamespacedFeatures([]database.NamespacedFeature{nf1, nf2}))
|
||||
// valid case: insert nf3
|
||||
assert.Nil(t, tx.PersistNamespacedFeatures([]database.NamespacedFeature{nf1, nf3}))
|
||||
|
||||
all := listNamespacedFeatures(t, tx)
|
||||
assert.Contains(t, all, nf1)
|
||||
assert.Contains(t, all, nf3)
|
||||
}
|
||||
|
||||
func TestVulnerableFeature(t *testing.T) {
|
||||
datastore, tx := openSessionForTest(t, "VulnerableFeature", true)
|
||||
defer closeTest(t, datastore, tx)
|
||||
|
||||
f1 := database.Feature{
|
||||
Name: "openssl",
|
||||
Version: "1.3",
|
||||
VersionFormat: "dpkg",
|
||||
}
|
||||
|
||||
n1 := database.Namespace{
|
||||
Name: "debian:7",
|
||||
VersionFormat: "dpkg",
|
||||
}
|
||||
|
||||
nf1 := database.NamespacedFeature{
|
||||
Namespace: n1,
|
||||
Feature: f1,
|
||||
}
|
||||
assert.Nil(t, tx.PersistFeatures([]database.Feature{f1}))
|
||||
assert.Nil(t, tx.PersistNamespacedFeatures([]database.NamespacedFeature{nf1}))
|
||||
assert.Nil(t, tx.CacheAffectedNamespacedFeatures([]database.NamespacedFeature{nf1}))
|
||||
// ensure the namespaced feature is affected correctly
|
||||
anf, err := tx.FindAffectedNamespacedFeatures([]database.NamespacedFeature{nf1})
|
||||
if assert.Nil(t, err) &&
|
||||
assert.Len(t, anf, 1) &&
|
||||
assert.True(t, anf[0].Valid) &&
|
||||
assert.Len(t, anf[0].AffectedBy, 1) {
|
||||
assert.Equal(t, "CVE-OPENSSL-1-DEB7", anf[0].AffectedBy[0].Name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindAffectedNamespacedFeatures(t *testing.T) {
|
||||
datastore, tx := openSessionForTest(t, "FindAffectedNamespacedFeatures", true)
|
||||
defer closeTest(t, datastore, tx)
|
||||
ns := database.NamespacedFeature{
|
||||
Feature: database.Feature{
|
||||
Name: "openssl",
|
||||
Version: "1.0",
|
||||
VersionFormat: "dpkg",
|
||||
},
|
||||
Namespace: database.Namespace{
|
||||
Name: "debian:7",
|
||||
VersionFormat: "dpkg",
|
||||
},
|
||||
}
|
||||
|
||||
ans, err := tx.FindAffectedNamespacedFeatures([]database.NamespacedFeature{ns})
|
||||
if assert.Nil(t, err) &&
|
||||
assert.Len(t, ans, 1) &&
|
||||
assert.True(t, ans[0].Valid) &&
|
||||
assert.Len(t, ans[0].AffectedBy, 1) {
|
||||
assert.Equal(t, "CVE-OPENSSL-1-DEB7", ans[0].AffectedBy[0].Name)
|
||||
}
|
||||
}
|
||||
|
||||
func listNamespacedFeatures(t *testing.T, tx *pgSession) []database.NamespacedFeature {
|
||||
rows, err := tx.Query(`SELECT f.name, f.version, f.version_format, n.name, n.version_format
|
||||
FROM feature AS f, namespace AS n, namespaced_feature AS nf
|
||||
WHERE nf.feature_id = f.id AND nf.namespace_id = n.id`)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
nf := []database.NamespacedFeature{}
|
||||
for rows.Next() {
|
||||
f := database.NamespacedFeature{}
|
||||
err := rows.Scan(&f.Name, &f.Version, &f.VersionFormat, &f.Namespace.Name, &f.Namespace.VersionFormat)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
t.FailNow()
|
||||
}
|
||||
nf = append(nf, f)
|
||||
}
|
||||
|
||||
return nf
|
||||
}
|
||||
|
||||
func listFeatures(t *testing.T, tx *pgSession) []database.Feature {
|
||||
rows, err := tx.Query("SELECT name, version, version_format FROM feature")
|
||||
if err != nil {
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
fs := []database.Feature{}
|
||||
for rows.Next() {
|
||||
f := database.Feature{}
|
||||
err := rows.Scan(&f.Name, &f.Version, &f.VersionFormat)
|
||||
if err != nil {
|
||||
t.FailNow()
|
||||
}
|
||||
fs = append(fs, f)
|
||||
}
|
||||
return fs
|
||||
}
|
||||
|
||||
func assertFeaturesEqual(t *testing.T, expected []database.Feature, actual []database.Feature) bool {
|
||||
if assert.Len(t, actual, len(expected)) {
|
||||
has := map[database.Feature]bool{}
|
||||
for _, nf := range expected {
|
||||
has[nf] = false
|
||||
}
|
||||
|
||||
for _, nf := range actual {
|
||||
has[nf] = true
|
||||
}
|
||||
|
||||
for nf, visited := range has {
|
||||
if !assert.True(t, visited, nf.Name+" is expected") {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func assertNamespacedFeatureEqual(t *testing.T, expected []database.NamespacedFeature, actual []database.NamespacedFeature) bool {
|
||||
if assert.Len(t, actual, len(expected)) {
|
||||
has := map[database.NamespacedFeature]bool{}
|
||||
for _, nf := range expected {
|
||||
has[nf] = false
|
||||
}
|
||||
|
||||
for _, nf := range actual {
|
||||
has[nf] = true
|
||||
}
|
||||
|
||||
for nf, visited := range has {
|
||||
if !assert.True(t, visited, nf.Namespace.Name+":"+nf.Name+" is expected") {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
@ -1,308 +0,0 @@
|
||||
// Copyright 2017 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"sort"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/pkg/commonerr"
|
||||
)
|
||||
|
||||
func (tx *pgSession) FindLayer(hash string) (database.Layer, database.Processors, bool, error) {
|
||||
l, p, _, ok, err := tx.findLayer(hash)
|
||||
return l, p, ok, err
|
||||
}
|
||||
|
||||
func (tx *pgSession) FindLayerWithContent(hash string) (database.LayerWithContent, bool, error) {
|
||||
var (
|
||||
layer database.LayerWithContent
|
||||
layerID int64
|
||||
ok bool
|
||||
err error
|
||||
)
|
||||
|
||||
layer.Layer, layer.ProcessedBy, layerID, ok, err = tx.findLayer(hash)
|
||||
if err != nil {
|
||||
return layer, false, err
|
||||
}
|
||||
|
||||
if !ok {
|
||||
return layer, false, nil
|
||||
}
|
||||
|
||||
layer.Features, err = tx.findLayerFeatures(layerID)
|
||||
layer.Namespaces, err = tx.findLayerNamespaces(layerID)
|
||||
return layer, true, nil
|
||||
}
|
||||
|
||||
func (tx *pgSession) PersistLayer(layer database.Layer) error {
|
||||
if layer.Hash == "" {
|
||||
return commonerr.NewBadRequestError("Empty Layer Hash is not allowed")
|
||||
}
|
||||
|
||||
_, err := tx.Exec(queryPersistLayer(1), layer.Hash)
|
||||
if err != nil {
|
||||
return handleError("queryPersistLayer", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PersistLayerContent relates layer identified by hash with namespaces,
|
||||
// features and processors provided. If the layer, namespaces, features are not
|
||||
// in database, the function returns an error.
|
||||
func (tx *pgSession) PersistLayerContent(hash string, namespaces []database.Namespace, features []database.Feature, processedBy database.Processors) error {
|
||||
if hash == "" {
|
||||
return commonerr.NewBadRequestError("Empty layer hash is not allowed")
|
||||
}
|
||||
|
||||
var layerID int64
|
||||
err := tx.QueryRow(searchLayer, hash).Scan(&layerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = tx.persistLayerNamespace(layerID, namespaces); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = tx.persistLayerFeatures(layerID, features); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = tx.persistLayerDetectors(layerID, processedBy.Detectors); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = tx.persistLayerListers(layerID, processedBy.Listers); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tx *pgSession) persistLayerDetectors(id int64, detectors []string) error {
|
||||
if len(detectors) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sorting is needed before inserting into database to prevent deadlock.
|
||||
sort.Strings(detectors)
|
||||
keys := make([]interface{}, len(detectors)*2)
|
||||
for i, d := range detectors {
|
||||
keys[i*2] = id
|
||||
keys[i*2+1] = d
|
||||
}
|
||||
_, err := tx.Exec(queryPersistLayerDetectors(len(detectors)), keys...)
|
||||
if err != nil {
|
||||
return handleError("queryPersistLayerDetectors", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tx *pgSession) persistLayerListers(id int64, listers []string) error {
|
||||
if len(listers) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
sort.Strings(listers)
|
||||
keys := make([]interface{}, len(listers)*2)
|
||||
for i, d := range listers {
|
||||
keys[i*2] = id
|
||||
keys[i*2+1] = d
|
||||
}
|
||||
|
||||
_, err := tx.Exec(queryPersistLayerListers(len(listers)), keys...)
|
||||
if err != nil {
|
||||
return handleError("queryPersistLayerDetectors", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tx *pgSession) persistLayerFeatures(id int64, features []database.Feature) error {
|
||||
if len(features) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
fIDs, err := tx.findFeatureIDs(features)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ids := make([]int, len(fIDs))
|
||||
for i, fID := range fIDs {
|
||||
if !fID.Valid {
|
||||
return errNamespaceNotFound
|
||||
}
|
||||
ids[i] = int(fID.Int64)
|
||||
}
|
||||
|
||||
sort.IntSlice(ids).Sort()
|
||||
keys := make([]interface{}, len(features)*2)
|
||||
for i, fID := range ids {
|
||||
keys[i*2] = id
|
||||
keys[i*2+1] = fID
|
||||
}
|
||||
|
||||
_, err = tx.Exec(queryPersistLayerFeature(len(features)), keys...)
|
||||
if err != nil {
|
||||
return handleError("queryPersistLayerFeature", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tx *pgSession) persistLayerNamespace(id int64, namespaces []database.Namespace) error {
|
||||
if len(namespaces) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
nsIDs, err := tx.findNamespaceIDs(namespaces)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// for every bulk persist operation, the input data should be sorted.
|
||||
ids := make([]int, len(nsIDs))
|
||||
for i, nsID := range nsIDs {
|
||||
if !nsID.Valid {
|
||||
panic(errNamespaceNotFound)
|
||||
}
|
||||
ids[i] = int(nsID.Int64)
|
||||
}
|
||||
|
||||
sort.IntSlice(ids).Sort()
|
||||
|
||||
keys := make([]interface{}, len(namespaces)*2)
|
||||
for i, nsID := range ids {
|
||||
keys[i*2] = id
|
||||
keys[i*2+1] = nsID
|
||||
}
|
||||
|
||||
_, err = tx.Exec(queryPersistLayerNamespace(len(namespaces)), keys...)
|
||||
if err != nil {
|
||||
return handleError("queryPersistLayerNamespace", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tx *pgSession) persistProcessors(listerQuery, listerQueryName, detectorQuery, detectorQueryName string, id int64, processors database.Processors) error {
|
||||
stmt, err := tx.Prepare(listerQuery)
|
||||
if err != nil {
|
||||
return handleError(listerQueryName, err)
|
||||
}
|
||||
|
||||
for _, l := range processors.Listers {
|
||||
_, err := stmt.Exec(id, l)
|
||||
if err != nil {
|
||||
stmt.Close()
|
||||
return handleError(listerQueryName, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := stmt.Close(); err != nil {
|
||||
return handleError(listerQueryName, err)
|
||||
}
|
||||
|
||||
stmt, err = tx.Prepare(detectorQuery)
|
||||
if err != nil {
|
||||
return handleError(detectorQueryName, err)
|
||||
}
|
||||
|
||||
for _, d := range processors.Detectors {
|
||||
_, err := stmt.Exec(id, d)
|
||||
if err != nil {
|
||||
stmt.Close()
|
||||
return handleError(detectorQueryName, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := stmt.Close(); err != nil {
|
||||
return handleError(detectorQueryName, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tx *pgSession) findLayerNamespaces(layerID int64) ([]database.Namespace, error) {
|
||||
var namespaces []database.Namespace
|
||||
|
||||
rows, err := tx.Query(searchLayerNamespaces, layerID)
|
||||
if err != nil {
|
||||
return nil, handleError("searchLayerFeatures", err)
|
||||
}
|
||||
|
||||
for rows.Next() {
|
||||
ns := database.Namespace{}
|
||||
err := rows.Scan(&ns.Name, &ns.VersionFormat)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
namespaces = append(namespaces, ns)
|
||||
}
|
||||
return namespaces, nil
|
||||
}
|
||||
|
||||
func (tx *pgSession) findLayerFeatures(layerID int64) ([]database.Feature, error) {
|
||||
var features []database.Feature
|
||||
|
||||
rows, err := tx.Query(searchLayerFeatures, layerID)
|
||||
if err != nil {
|
||||
return nil, handleError("searchLayerFeatures", err)
|
||||
}
|
||||
|
||||
for rows.Next() {
|
||||
f := database.Feature{}
|
||||
err := rows.Scan(&f.Name, &f.Version, &f.VersionFormat)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
features = append(features, f)
|
||||
}
|
||||
return features, nil
|
||||
}
|
||||
|
||||
func (tx *pgSession) findLayer(hash string) (database.Layer, database.Processors, int64, bool, error) {
|
||||
var (
|
||||
layerID int64
|
||||
layer = database.Layer{Hash: hash}
|
||||
processors database.Processors
|
||||
)
|
||||
|
||||
if hash == "" {
|
||||
return layer, processors, layerID, false, commonerr.NewBadRequestError("Empty Layer Hash is not allowed")
|
||||
}
|
||||
|
||||
err := tx.QueryRow(searchLayer, hash).Scan(&layerID)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return layer, processors, layerID, false, nil
|
||||
}
|
||||
return layer, processors, layerID, false, err
|
||||
}
|
||||
|
||||
processors.Detectors, err = tx.findProcessors(searchLayerDetectors, "searchLayerDetectors", "detector", layerID)
|
||||
if err != nil {
|
||||
return layer, processors, layerID, false, err
|
||||
}
|
||||
|
||||
processors.Listers, err = tx.findProcessors(searchLayerListers, "searchLayerListers", "lister", layerID)
|
||||
if err != nil {
|
||||
return layer, processors, layerID, false, err
|
||||
}
|
||||
|
||||
return layer, processors, layerID, true, nil
|
||||
}
|
@ -0,0 +1,177 @@
|
||||
// Copyright 2017 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package layer
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
"github.com/deckarep/golang-set"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/database/pgsql/detector"
|
||||
"github.com/coreos/clair/database/pgsql/util"
|
||||
"github.com/coreos/clair/pkg/commonerr"
|
||||
)
|
||||
|
||||
const (
|
||||
soiLayer = `
|
||||
WITH new_layer AS (
|
||||
INSERT INTO layer (hash)
|
||||
SELECT CAST ($1 AS VARCHAR)
|
||||
WHERE NOT EXISTS (SELECT id FROM layer WHERE hash = $1)
|
||||
RETURNING id
|
||||
)
|
||||
SELECT id FROM new_Layer
|
||||
UNION
|
||||
SELECT id FROM layer WHERE hash = $1`
|
||||
|
||||
findLayerID = `SELECT id FROM layer WHERE hash = $1`
|
||||
)
|
||||
|
||||
func FindLayer(tx *sql.Tx, hash string) (database.Layer, bool, error) {
|
||||
layer := database.Layer{Hash: hash}
|
||||
if hash == "" {
|
||||
return layer, false, commonerr.NewBadRequestError("non empty layer hash is expected.")
|
||||
}
|
||||
|
||||
layerID, ok, err := FindLayerID(tx, hash)
|
||||
if err != nil || !ok {
|
||||
return layer, ok, err
|
||||
}
|
||||
|
||||
detectorMap, err := detector.FindAllDetectors(tx)
|
||||
if err != nil {
|
||||
return layer, false, err
|
||||
}
|
||||
|
||||
if layer.By, err = FindLayerDetectors(tx, layerID); err != nil {
|
||||
return layer, false, err
|
||||
}
|
||||
|
||||
if layer.Features, err = FindLayerFeatures(tx, layerID, detectorMap); err != nil {
|
||||
return layer, false, err
|
||||
}
|
||||
|
||||
if layer.Namespaces, err = FindLayerNamespaces(tx, layerID, detectorMap); err != nil {
|
||||
return layer, false, err
|
||||
}
|
||||
|
||||
return layer, true, nil
|
||||
}
|
||||
|
||||
func sanitizePersistLayerInput(hash string, features []database.LayerFeature, namespaces []database.LayerNamespace, detectedBy []database.Detector) error {
|
||||
if hash == "" {
|
||||
return commonerr.NewBadRequestError("expected non-empty layer hash")
|
||||
}
|
||||
|
||||
detectedBySet := mapset.NewSet()
|
||||
for _, d := range detectedBy {
|
||||
detectedBySet.Add(d)
|
||||
}
|
||||
|
||||
for _, f := range features {
|
||||
if !detectedBySet.Contains(f.By) {
|
||||
return database.ErrInvalidParameters
|
||||
}
|
||||
}
|
||||
|
||||
for _, n := range namespaces {
|
||||
if !detectedBySet.Contains(n.By) {
|
||||
return database.ErrInvalidParameters
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PersistLayer saves the content of a layer to the database.
|
||||
func PersistLayer(tx *sql.Tx, hash string, features []database.LayerFeature, namespaces []database.LayerNamespace, detectedBy []database.Detector) error {
|
||||
var (
|
||||
err error
|
||||
id int64
|
||||
detectorIDs []int64
|
||||
)
|
||||
|
||||
if err = sanitizePersistLayerInput(hash, features, namespaces, detectedBy); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if id, err = SoiLayer(tx, hash); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if detectorIDs, err = detector.FindDetectorIDs(tx, detectedBy); err != nil {
|
||||
if err == commonerr.ErrNotFound {
|
||||
return database.ErrMissingEntities
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if err = PersistLayerDetectors(tx, id, detectorIDs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = PersistAllLayerFeatures(tx, id, features); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = PersistAllLayerNamespaces(tx, id, namespaces); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func FindLayerID(tx *sql.Tx, hash string) (int64, bool, error) {
|
||||
var layerID int64
|
||||
err := tx.QueryRow(findLayerID, hash).Scan(&layerID)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return layerID, false, nil
|
||||
}
|
||||
|
||||
return layerID, false, util.HandleError("findLayerID", err)
|
||||
}
|
||||
|
||||
return layerID, true, nil
|
||||
}
|
||||
|
||||
func FindLayerIDs(tx *sql.Tx, hashes []string) ([]int64, bool, error) {
|
||||
layerIDs := make([]int64, 0, len(hashes))
|
||||
for _, hash := range hashes {
|
||||
id, ok, err := FindLayerID(tx, hash)
|
||||
if !ok {
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
layerIDs = append(layerIDs, id)
|
||||
}
|
||||
|
||||
return layerIDs, true, nil
|
||||
}
|
||||
|
||||
func SoiLayer(tx *sql.Tx, hash string) (int64, error) {
|
||||
var id int64
|
||||
if err := tx.QueryRow(soiLayer, hash).Scan(&id); err != nil {
|
||||
return 0, util.HandleError("soiLayer", err)
|
||||
}
|
||||
|
||||
return id, nil
|
||||
}
|
@ -0,0 +1,66 @@
|
||||
// Copyright 2019 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package layer
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
"github.com/deckarep/golang-set"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/database/pgsql/detector"
|
||||
"github.com/coreos/clair/database/pgsql/util"
|
||||
)
|
||||
|
||||
const (
|
||||
selectLayerDetectors = `
|
||||
SELECT d.name, d.version, d.dtype
|
||||
FROM layer_detector, detector AS d
|
||||
WHERE layer_detector.detector_id = d.id AND layer_detector.layer_id = $1;`
|
||||
|
||||
persistLayerDetector = `
|
||||
INSERT INTO layer_detector (layer_id, detector_id)
|
||||
SELECT $1, $2
|
||||
WHERE NOT EXISTS (SELECT id FROM layer_detector WHERE layer_id = $1 AND detector_id = $2)`
|
||||
)
|
||||
|
||||
func PersistLayerDetector(tx *sql.Tx, layerID int64, detectorID int64) error {
|
||||
if _, err := tx.Exec(persistLayerDetector, layerID, detectorID); err != nil {
|
||||
return util.HandleError("persistLayerDetector", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func PersistLayerDetectors(tx *sql.Tx, layerID int64, detectorIDs []int64) error {
|
||||
alreadySaved := mapset.NewSet()
|
||||
for _, id := range detectorIDs {
|
||||
if alreadySaved.Contains(id) {
|
||||
continue
|
||||
}
|
||||
|
||||
alreadySaved.Add(id)
|
||||
if err := PersistLayerDetector(tx, layerID, id); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func FindLayerDetectors(tx *sql.Tx, id int64) ([]database.Detector, error) {
|
||||
detectors, err := detector.GetDetectors(tx, selectLayerDetectors, id)
|
||||
return detectors, err
|
||||
}
|
@ -0,0 +1,147 @@
|
||||
// Copyright 2019 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package layer
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"sort"
|
||||
|
||||
"github.com/coreos/clair/database/pgsql/namespace"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/database/pgsql/detector"
|
||||
"github.com/coreos/clair/database/pgsql/feature"
|
||||
"github.com/coreos/clair/database/pgsql/util"
|
||||
)
|
||||
|
||||
const findLayerFeatures = `
|
||||
SELECT
|
||||
f.name, f.version, f.version_format, ft.name, lf.detector_id, ns.name, ns.version_format
|
||||
FROM
|
||||
layer_feature AS lf
|
||||
LEFT JOIN feature f on f.id = lf.feature_id
|
||||
LEFT JOIN feature_type ft on ft.id = f.type
|
||||
LEFT JOIN namespace ns ON ns.id = lf.namespace_id
|
||||
|
||||
WHERE lf.layer_id = $1`
|
||||
|
||||
func queryPersistLayerFeature(count int) string {
|
||||
return util.QueryPersist(count,
|
||||
"layer_feature",
|
||||
"layer_feature_layer_id_feature_id_namespace_id_key",
|
||||
"layer_id",
|
||||
"feature_id",
|
||||
"detector_id",
|
||||
"namespace_id")
|
||||
}
|
||||
|
||||
// dbLayerFeature represents the layer_feature table
|
||||
type dbLayerFeature struct {
|
||||
layerID int64
|
||||
featureID int64
|
||||
detectorID int64
|
||||
namespaceID sql.NullInt64
|
||||
}
|
||||
|
||||
func FindLayerFeatures(tx *sql.Tx, layerID int64, detectors detector.DetectorMap) ([]database.LayerFeature, error) {
|
||||
rows, err := tx.Query(findLayerFeatures, layerID)
|
||||
if err != nil {
|
||||
return nil, util.HandleError("findLayerFeatures", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
features := []database.LayerFeature{}
|
||||
for rows.Next() {
|
||||
var (
|
||||
detectorID int64
|
||||
feature database.LayerFeature
|
||||
)
|
||||
var namespaceName, namespaceVersion sql.NullString
|
||||
if err := rows.Scan(&feature.Name, &feature.Version, &feature.VersionFormat, &feature.Type, &detectorID, &namespaceName, &namespaceVersion); err != nil {
|
||||
return nil, util.HandleError("findLayerFeatures", err)
|
||||
}
|
||||
feature.PotentialNamespace.Name = namespaceName.String
|
||||
feature.PotentialNamespace.VersionFormat = namespaceVersion.String
|
||||
|
||||
feature.By = detectors.ByID[detectorID]
|
||||
features = append(features, feature)
|
||||
}
|
||||
|
||||
return features, nil
|
||||
}
|
||||
|
||||
func PersistAllLayerFeatures(tx *sql.Tx, layerID int64, features []database.LayerFeature) error {
|
||||
detectorMap, err := detector.FindAllDetectors(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var namespaces []database.Namespace
|
||||
for _, feature := range features {
|
||||
namespaces = append(namespaces, feature.PotentialNamespace)
|
||||
}
|
||||
nameSpaceIDs, _ := namespace.FindNamespaceIDs(tx, namespaces)
|
||||
featureNamespaceMap := map[database.Namespace]sql.NullInt64{}
|
||||
rawFeatures := make([]database.Feature, 0, len(features))
|
||||
for i, f := range features {
|
||||
rawFeatures = append(rawFeatures, f.Feature)
|
||||
if f.PotentialNamespace.Valid() {
|
||||
featureNamespaceMap[f.PotentialNamespace] = nameSpaceIDs[i]
|
||||
}
|
||||
}
|
||||
|
||||
featureIDs, err := feature.FindFeatureIDs(tx, rawFeatures)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var namespaceID sql.NullInt64
|
||||
dbFeatures := make([]dbLayerFeature, 0, len(features))
|
||||
for i, f := range features {
|
||||
detectorID := detectorMap.ByValue[f.By]
|
||||
featureID := featureIDs[i].Int64
|
||||
if !featureIDs[i].Valid {
|
||||
return database.ErrMissingEntities
|
||||
}
|
||||
namespaceID = featureNamespaceMap[f.PotentialNamespace]
|
||||
|
||||
dbFeatures = append(dbFeatures, dbLayerFeature{layerID, featureID, detectorID, namespaceID})
|
||||
}
|
||||
|
||||
if err := PersistLayerFeatures(tx, dbFeatures); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func PersistLayerFeatures(tx *sql.Tx, features []dbLayerFeature) error {
|
||||
if len(features) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
sort.Slice(features, func(i, j int) bool {
|
||||
return features[i].featureID < features[j].featureID
|
||||
})
|
||||
keys := make([]interface{}, 0, len(features)*4)
|
||||
|
||||
for _, f := range features {
|
||||
keys = append(keys, f.layerID, f.featureID, f.detectorID, f.namespaceID)
|
||||
}
|
||||
|
||||
_, err := tx.Exec(queryPersistLayerFeature(len(features)), keys...)
|
||||
if err != nil {
|
||||
return util.HandleError("queryPersistLayerFeature", err)
|
||||
}
|
||||
return nil
|
||||
}
|
@ -0,0 +1,127 @@
|
||||
// Copyright 2019 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package layer
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"sort"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/database/pgsql/detector"
|
||||
"github.com/coreos/clair/database/pgsql/namespace"
|
||||
"github.com/coreos/clair/database/pgsql/util"
|
||||
)
|
||||
|
||||
const findLayerNamespaces = `
|
||||
SELECT ns.name, ns.version_format, ln.detector_id
|
||||
FROM layer_namespace AS ln, namespace AS ns
|
||||
WHERE ln.namespace_id = ns.id
|
||||
AND ln.layer_id = $1`
|
||||
|
||||
func queryPersistLayerNamespace(count int) string {
|
||||
return util.QueryPersist(count,
|
||||
"layer_namespace",
|
||||
"layer_namespace_layer_id_namespace_id_key",
|
||||
"layer_id",
|
||||
"namespace_id",
|
||||
"detector_id")
|
||||
}
|
||||
|
||||
// dbLayerNamespace represents the layer_namespace table.
|
||||
type dbLayerNamespace struct {
|
||||
layerID int64
|
||||
namespaceID int64
|
||||
detectorID int64
|
||||
}
|
||||
|
||||
func FindLayerNamespaces(tx *sql.Tx, layerID int64, detectors detector.DetectorMap) ([]database.LayerNamespace, error) {
|
||||
rows, err := tx.Query(findLayerNamespaces, layerID)
|
||||
if err != nil {
|
||||
return nil, util.HandleError("findLayerNamespaces", err)
|
||||
}
|
||||
|
||||
namespaces := []database.LayerNamespace{}
|
||||
for rows.Next() {
|
||||
var (
|
||||
namespace database.LayerNamespace
|
||||
detectorID int64
|
||||
)
|
||||
|
||||
if err := rows.Scan(&namespace.Name, &namespace.VersionFormat, &detectorID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
namespace.By = detectors.ByID[detectorID]
|
||||
namespaces = append(namespaces, namespace)
|
||||
}
|
||||
|
||||
return namespaces, nil
|
||||
}
|
||||
|
||||
func PersistAllLayerNamespaces(tx *sql.Tx, layerID int64, namespaces []database.LayerNamespace) error {
|
||||
detectorMap, err := detector.FindAllDetectors(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO(sidac): This kind of type conversion is very useless and wasteful,
|
||||
// we need interfaces around the database models to reduce these kind of
|
||||
// operations.
|
||||
rawNamespaces := make([]database.Namespace, 0, len(namespaces))
|
||||
for _, ns := range namespaces {
|
||||
rawNamespaces = append(rawNamespaces, ns.Namespace)
|
||||
}
|
||||
|
||||
rawNamespaceIDs, err := namespace.FindNamespaceIDs(tx, rawNamespaces)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dbLayerNamespaces := make([]dbLayerNamespace, 0, len(namespaces))
|
||||
for i, ns := range namespaces {
|
||||
detectorID := detectorMap.ByValue[ns.By]
|
||||
namespaceID := rawNamespaceIDs[i].Int64
|
||||
if !rawNamespaceIDs[i].Valid {
|
||||
return database.ErrMissingEntities
|
||||
}
|
||||
|
||||
dbLayerNamespaces = append(dbLayerNamespaces, dbLayerNamespace{layerID, namespaceID, detectorID})
|
||||
}
|
||||
|
||||
return PersistLayerNamespaces(tx, dbLayerNamespaces)
|
||||
}
|
||||
|
||||
func PersistLayerNamespaces(tx *sql.Tx, namespaces []dbLayerNamespace) error {
|
||||
if len(namespaces) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// for every bulk persist operation, the input data should be sorted.
|
||||
sort.Slice(namespaces, func(i, j int) bool {
|
||||
return namespaces[i].namespaceID < namespaces[j].namespaceID
|
||||
})
|
||||
|
||||
keys := make([]interface{}, 0, len(namespaces)*3)
|
||||
for _, row := range namespaces {
|
||||
keys = append(keys, row.layerID, row.namespaceID, row.detectorID)
|
||||
}
|
||||
|
||||
_, err := tx.Exec(queryPersistLayerNamespace(len(namespaces)), keys...)
|
||||
if err != nil {
|
||||
return util.HandleError("queryPersistLayerNamespace", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -0,0 +1,214 @@
|
||||
// Copyright 2017 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package layer
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/database/pgsql/testutil"
|
||||
)
|
||||
|
||||
var persistLayerTests = []struct {
|
||||
title string
|
||||
name string
|
||||
by []database.Detector
|
||||
features []database.LayerFeature
|
||||
namespaces []database.LayerNamespace
|
||||
layer *database.Layer
|
||||
err string
|
||||
}{
|
||||
{
|
||||
title: "invalid layer name",
|
||||
name: "",
|
||||
err: "expected non-empty layer hash",
|
||||
},
|
||||
{
|
||||
title: "layer with inconsistent feature and detectors",
|
||||
name: "random-forest",
|
||||
by: []database.Detector{testutil.RealDetectors[2]},
|
||||
features: []database.LayerFeature{
|
||||
{testutil.RealFeatures[1], testutil.RealDetectors[1], database.Namespace{}},
|
||||
},
|
||||
err: "parameters are not valid",
|
||||
},
|
||||
{
|
||||
title: "layer with non-existing feature",
|
||||
name: "random-forest",
|
||||
err: "associated immutable entities are missing in the database",
|
||||
by: []database.Detector{testutil.RealDetectors[2]},
|
||||
features: []database.LayerFeature{
|
||||
{testutil.FakeFeatures[1], testutil.RealDetectors[2], database.Namespace{}},
|
||||
},
|
||||
},
|
||||
{
|
||||
title: "layer with non-existing namespace",
|
||||
name: "random-forest2",
|
||||
err: "associated immutable entities are missing in the database",
|
||||
by: []database.Detector{testutil.RealDetectors[1]},
|
||||
namespaces: []database.LayerNamespace{
|
||||
{testutil.FakeNamespaces[1], testutil.RealDetectors[1]},
|
||||
},
|
||||
},
|
||||
{
|
||||
title: "layer with non-existing detector",
|
||||
name: "random-forest3",
|
||||
err: "associated immutable entities are missing in the database",
|
||||
by: []database.Detector{testutil.FakeDetector[1]},
|
||||
},
|
||||
{
|
||||
|
||||
title: "valid layer",
|
||||
name: "hamsterhouse",
|
||||
by: []database.Detector{testutil.RealDetectors[1], testutil.RealDetectors[2]},
|
||||
features: []database.LayerFeature{
|
||||
{testutil.RealFeatures[1], testutil.RealDetectors[2], database.Namespace{}},
|
||||
{testutil.RealFeatures[2], testutil.RealDetectors[2], database.Namespace{}},
|
||||
},
|
||||
namespaces: []database.LayerNamespace{
|
||||
{testutil.RealNamespaces[1], testutil.RealDetectors[1]},
|
||||
},
|
||||
layer: &database.Layer{
|
||||
Hash: "hamsterhouse",
|
||||
By: []database.Detector{testutil.RealDetectors[1], testutil.RealDetectors[2]},
|
||||
Features: []database.LayerFeature{
|
||||
{testutil.RealFeatures[1], testutil.RealDetectors[2], database.Namespace{}},
|
||||
{testutil.RealFeatures[2], testutil.RealDetectors[2], database.Namespace{}},
|
||||
},
|
||||
Namespaces: []database.LayerNamespace{
|
||||
{testutil.RealNamespaces[1], testutil.RealDetectors[1]},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
title: "update existing layer",
|
||||
name: "layer-1",
|
||||
by: []database.Detector{testutil.RealDetectors[3], testutil.RealDetectors[4]},
|
||||
features: []database.LayerFeature{
|
||||
{testutil.RealFeatures[4], testutil.RealDetectors[3], database.Namespace{}},
|
||||
},
|
||||
namespaces: []database.LayerNamespace{
|
||||
{testutil.RealNamespaces[3], testutil.RealDetectors[4]},
|
||||
},
|
||||
layer: &database.Layer{
|
||||
Hash: "layer-1",
|
||||
By: []database.Detector{testutil.RealDetectors[1], testutil.RealDetectors[2], testutil.RealDetectors[3], testutil.RealDetectors[4]},
|
||||
Features: []database.LayerFeature{
|
||||
{testutil.RealFeatures[1], testutil.RealDetectors[2], database.Namespace{}},
|
||||
{testutil.RealFeatures[2], testutil.RealDetectors[2], database.Namespace{}},
|
||||
{testutil.RealFeatures[4], testutil.RealDetectors[3], database.Namespace{}},
|
||||
},
|
||||
Namespaces: []database.LayerNamespace{
|
||||
{testutil.RealNamespaces[1], testutil.RealDetectors[1]},
|
||||
{testutil.RealNamespaces[3], testutil.RealDetectors[4]},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
title: "layer with potential namespace",
|
||||
name: "layer-potential-namespace",
|
||||
by: []database.Detector{testutil.RealDetectors[3]},
|
||||
features: []database.LayerFeature{
|
||||
{testutil.RealFeatures[4], testutil.RealDetectors[3], testutil.RealNamespaces[4]},
|
||||
},
|
||||
namespaces: []database.LayerNamespace{
|
||||
{testutil.RealNamespaces[3], testutil.RealDetectors[3]},
|
||||
},
|
||||
layer: &database.Layer{
|
||||
Hash: "layer-potential-namespace",
|
||||
By: []database.Detector{testutil.RealDetectors[3]},
|
||||
Features: []database.LayerFeature{
|
||||
{testutil.RealFeatures[4], testutil.RealDetectors[3], testutil.RealNamespaces[4]},
|
||||
},
|
||||
Namespaces: []database.LayerNamespace{
|
||||
{testutil.RealNamespaces[3], testutil.RealDetectors[3]},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestPersistLayer(t *testing.T) {
|
||||
tx, cleanup := testutil.CreateTestTxWithFixtures(t, "PersistLayer")
|
||||
defer cleanup()
|
||||
|
||||
for _, test := range persistLayerTests {
|
||||
t.Run(test.title, func(t *testing.T) {
|
||||
err := PersistLayer(tx, test.name, test.features, test.namespaces, test.by)
|
||||
if test.err != "" {
|
||||
assert.EqualError(t, err, test.err, "unexpected error")
|
||||
return
|
||||
}
|
||||
|
||||
assert.Nil(t, err)
|
||||
if test.layer != nil {
|
||||
layer, ok, err := FindLayer(tx, test.name)
|
||||
assert.Nil(t, err)
|
||||
assert.True(t, ok)
|
||||
database.AssertLayerEqual(t, test.layer, &layer)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
var findLayerTests = []struct {
|
||||
title string
|
||||
in string
|
||||
|
||||
out *database.Layer
|
||||
err string
|
||||
ok bool
|
||||
}{
|
||||
{
|
||||
title: "invalid layer name",
|
||||
in: "",
|
||||
err: "non empty layer hash is expected.",
|
||||
},
|
||||
{
|
||||
title: "non-existing layer",
|
||||
in: "layer-non-existing",
|
||||
ok: false,
|
||||
out: nil,
|
||||
},
|
||||
{
|
||||
title: "existing layer",
|
||||
in: "layer-4",
|
||||
ok: true,
|
||||
out: testutil.TakeLayerPointerFromMap(testutil.RealLayers, 6),
|
||||
},
|
||||
}
|
||||
|
||||
func TestFindLayer(t *testing.T) {
|
||||
tx, cleanup := testutil.CreateTestTxWithFixtures(t, "FindLayer")
|
||||
defer cleanup()
|
||||
|
||||
for _, test := range findLayerTests {
|
||||
t.Run(test.title, func(t *testing.T) {
|
||||
layer, ok, err := FindLayer(tx, test.in)
|
||||
if test.err != "" {
|
||||
assert.EqualError(t, err, test.err, "unexpected error")
|
||||
return
|
||||
}
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, test.ok, ok)
|
||||
if test.ok {
|
||||
database.AssertLayerEqual(t, test.out, &layer)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -1,114 +0,0 @@
|
||||
// Copyright 2017 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
)
|
||||
|
||||
func TestPersistLayer(t *testing.T) {
|
||||
datastore, tx := openSessionForTest(t, "PersistLayer", false)
|
||||
defer closeTest(t, datastore, tx)
|
||||
|
||||
l1 := database.Layer{}
|
||||
l2 := database.Layer{Hash: "HESOYAM"}
|
||||
|
||||
// invalid
|
||||
assert.NotNil(t, tx.PersistLayer(l1))
|
||||
// valid
|
||||
assert.Nil(t, tx.PersistLayer(l2))
|
||||
// duplicated
|
||||
assert.Nil(t, tx.PersistLayer(l2))
|
||||
}
|
||||
|
||||
func TestPersistLayerProcessors(t *testing.T) {
|
||||
datastore, tx := openSessionForTest(t, "PersistLayerProcessors", true)
|
||||
defer closeTest(t, datastore, tx)
|
||||
|
||||
// invalid
|
||||
assert.NotNil(t, tx.PersistLayerContent("hash", []database.Namespace{}, []database.Feature{}, database.Processors{}))
|
||||
// valid
|
||||
assert.Nil(t, tx.PersistLayerContent("layer-4", []database.Namespace{}, []database.Feature{}, database.Processors{Detectors: []string{"new detector!"}}))
|
||||
}
|
||||
|
||||
func TestFindLayer(t *testing.T) {
|
||||
datastore, tx := openSessionForTest(t, "FindLayer", true)
|
||||
defer closeTest(t, datastore, tx)
|
||||
|
||||
expected := database.Layer{Hash: "layer-4"}
|
||||
expectedProcessors := database.Processors{
|
||||
Detectors: []string{"os-release", "apt-sources"},
|
||||
Listers: []string{"dpkg", "rpm"},
|
||||
}
|
||||
|
||||
// invalid
|
||||
_, _, _, err := tx.FindLayer("")
|
||||
assert.NotNil(t, err)
|
||||
_, _, ok, err := tx.FindLayer("layer-non")
|
||||
assert.Nil(t, err)
|
||||
assert.False(t, ok)
|
||||
|
||||
// valid
|
||||
layer, processors, ok2, err := tx.FindLayer("layer-4")
|
||||
if assert.Nil(t, err) && assert.True(t, ok2) {
|
||||
assert.Equal(t, expected, layer)
|
||||
assertProcessorsEqual(t, expectedProcessors, processors)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindLayerWithContent(t *testing.T) {
|
||||
datastore, tx := openSessionForTest(t, "FindLayerWithContent", true)
|
||||
defer closeTest(t, datastore, tx)
|
||||
|
||||
_, _, err := tx.FindLayerWithContent("")
|
||||
assert.NotNil(t, err)
|
||||
_, ok, err := tx.FindLayerWithContent("layer-non")
|
||||
assert.Nil(t, err)
|
||||
assert.False(t, ok)
|
||||
|
||||
expectedL := database.LayerWithContent{
|
||||
Layer: database.Layer{
|
||||
Hash: "layer-4",
|
||||
},
|
||||
Features: []database.Feature{
|
||||
{Name: "fake", Version: "2.0", VersionFormat: "rpm"},
|
||||
{Name: "openssl", Version: "2.0", VersionFormat: "dpkg"},
|
||||
},
|
||||
Namespaces: []database.Namespace{
|
||||
{Name: "debian:7", VersionFormat: "dpkg"},
|
||||
{Name: "fake:1.0", VersionFormat: "rpm"},
|
||||
},
|
||||
ProcessedBy: database.Processors{
|
||||
Detectors: []string{"os-release", "apt-sources"},
|
||||
Listers: []string{"dpkg", "rpm"},
|
||||
},
|
||||
}
|
||||
|
||||
layer, ok2, err := tx.FindLayerWithContent("layer-4")
|
||||
if assert.Nil(t, err) && assert.True(t, ok2) {
|
||||
assertLayerWithContentEqual(t, expectedL, layer)
|
||||
}
|
||||
}
|
||||
|
||||
func assertLayerWithContentEqual(t *testing.T, expected database.LayerWithContent, actual database.LayerWithContent) bool {
|
||||
return assert.Equal(t, expected.Layer, actual.Layer) &&
|
||||
assertFeaturesEqual(t, expected.Features, actual.Features) &&
|
||||
assertProcessorsEqual(t, expected.ProcessedBy, actual.ProcessedBy) &&
|
||||
assertNamespacesEqual(t, expected.Namespaces, actual.Namespaces)
|
||||
}
|
@ -1,113 +0,0 @@
|
||||
// Copyright 2017 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/coreos/clair/pkg/commonerr"
|
||||
)
|
||||
|
||||
var (
|
||||
errLockNotFound = errors.New("lock is not in database")
|
||||
)
|
||||
|
||||
// Lock tries to set a temporary lock in the database.
|
||||
//
|
||||
// Lock does not block, instead, it returns true and its expiration time
|
||||
// is the lock has been successfully acquired or false otherwise.
|
||||
func (tx *pgSession) Lock(name string, owner string, duration time.Duration, renew bool) (bool, time.Time, error) {
|
||||
if name == "" || owner == "" || duration == 0 {
|
||||
log.Warning("could not create an invalid lock")
|
||||
return false, time.Time{}, commonerr.NewBadRequestError("Invalid Lock Parameters")
|
||||
}
|
||||
|
||||
until := time.Now().Add(duration)
|
||||
if renew {
|
||||
defer observeQueryTime("Lock", "update", time.Now())
|
||||
// Renew lock.
|
||||
r, err := tx.Exec(updateLock, name, owner, until)
|
||||
if err != nil {
|
||||
return false, until, handleError("updateLock", err)
|
||||
}
|
||||
|
||||
if n, err := r.RowsAffected(); err == nil {
|
||||
return n > 0, until, nil
|
||||
}
|
||||
return false, until, handleError("updateLock", err)
|
||||
} else if err := tx.pruneLocks(); err != nil {
|
||||
return false, until, err
|
||||
}
|
||||
|
||||
// Lock.
|
||||
defer observeQueryTime("Lock", "soiLock", time.Now())
|
||||
_, err := tx.Exec(soiLock, name, owner, until)
|
||||
if err != nil {
|
||||
if isErrUniqueViolation(err) {
|
||||
return false, until, nil
|
||||
}
|
||||
return false, until, handleError("insertLock", err)
|
||||
}
|
||||
return true, until, nil
|
||||
}
|
||||
|
||||
// Unlock unlocks a lock specified by its name if I own it
|
||||
func (tx *pgSession) Unlock(name, owner string) error {
|
||||
if name == "" || owner == "" {
|
||||
return commonerr.NewBadRequestError("Invalid Lock Parameters")
|
||||
}
|
||||
|
||||
defer observeQueryTime("Unlock", "all", time.Now())
|
||||
|
||||
_, err := tx.Exec(removeLock, name, owner)
|
||||
return err
|
||||
}
|
||||
|
||||
// FindLock returns the owner of a lock specified by its name and its
|
||||
// expiration time.
|
||||
func (tx *pgSession) FindLock(name string) (string, time.Time, bool, error) {
|
||||
if name == "" {
|
||||
return "", time.Time{}, false, commonerr.NewBadRequestError("could not find an invalid lock")
|
||||
}
|
||||
|
||||
defer observeQueryTime("FindLock", "all", time.Now())
|
||||
|
||||
var owner string
|
||||
var until time.Time
|
||||
err := tx.QueryRow(searchLock, name).Scan(&owner, &until)
|
||||
if err != nil {
|
||||
return owner, until, false, handleError("searchLock", err)
|
||||
}
|
||||
|
||||
return owner, until, true, nil
|
||||
}
|
||||
|
||||
// pruneLocks removes every expired locks from the database
|
||||
func (tx *pgSession) pruneLocks() error {
|
||||
defer observeQueryTime("pruneLocks", "all", time.Now())
|
||||
|
||||
if r, err := tx.Exec(removeLockExpired); err != nil {
|
||||
return handleError("removeLockExpired", err)
|
||||
} else if affected, err := r.RowsAffected(); err != nil {
|
||||
return handleError("removeLockExpired", err)
|
||||
} else {
|
||||
log.Debugf("Pruned %d Locks", affected)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -0,0 +1,109 @@
|
||||
// Copyright 2019 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package lock
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/clair/database/pgsql/monitoring"
|
||||
"github.com/coreos/clair/database/pgsql/util"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
searchLock = `SELECT until FROM Lock WHERE name = $1`
|
||||
updateLock = `UPDATE Lock SET until = $3 WHERE name = $1 AND owner = $2`
|
||||
removeLock = `DELETE FROM Lock WHERE name = $1 AND owner = $2`
|
||||
removeLockExpired = `DELETE FROM LOCK WHERE until < $1`
|
||||
|
||||
soiLock = `
|
||||
WITH new_lock AS (
|
||||
INSERT INTO lock (name, owner, until)
|
||||
SELECT CAST ($1 AS TEXT), CAST ($2 AS TEXT), CAST ($3 AS TIMESTAMP)
|
||||
WHERE NOT EXISTS (SELECT id FROM lock WHERE name = $1)
|
||||
RETURNING owner, until
|
||||
)
|
||||
SELECT * FROM new_lock
|
||||
UNION
|
||||
SELECT owner, until FROM lock WHERE name = $1`
|
||||
)
|
||||
|
||||
func AcquireLock(tx *sql.Tx, lockName, whoami string, desiredDuration time.Duration) (bool, time.Time, error) {
|
||||
if lockName == "" || whoami == "" || desiredDuration == 0 {
|
||||
panic("invalid lock parameters")
|
||||
}
|
||||
|
||||
if err := PruneLocks(tx); err != nil {
|
||||
return false, time.Time{}, err
|
||||
}
|
||||
|
||||
var (
|
||||
desiredLockedUntil = time.Now().UTC().Add(desiredDuration)
|
||||
|
||||
lockedUntil time.Time
|
||||
lockOwner string
|
||||
)
|
||||
|
||||
defer monitoring.ObserveQueryTime("Lock", "soiLock", time.Now())
|
||||
err := tx.QueryRow(soiLock, lockName, whoami, desiredLockedUntil).Scan(&lockOwner, &lockedUntil)
|
||||
return lockOwner == whoami, lockedUntil, util.HandleError("AcquireLock", err)
|
||||
}
|
||||
|
||||
func ExtendLock(tx *sql.Tx, lockName, whoami string, desiredDuration time.Duration) (bool, time.Time, error) {
|
||||
if lockName == "" || whoami == "" || desiredDuration == 0 {
|
||||
panic("invalid lock parameters")
|
||||
}
|
||||
|
||||
desiredLockedUntil := time.Now().Add(desiredDuration)
|
||||
|
||||
defer monitoring.ObserveQueryTime("Lock", "update", time.Now())
|
||||
result, err := tx.Exec(updateLock, lockName, whoami, desiredLockedUntil)
|
||||
if err != nil {
|
||||
return false, time.Time{}, util.HandleError("updateLock", err)
|
||||
}
|
||||
|
||||
if numRows, err := result.RowsAffected(); err == nil {
|
||||
// This is the only happy path.
|
||||
return numRows > 0, desiredLockedUntil, nil
|
||||
}
|
||||
|
||||
return false, time.Time{}, util.HandleError("updateLock", err)
|
||||
}
|
||||
|
||||
func ReleaseLock(tx *sql.Tx, name, owner string) error {
|
||||
if name == "" || owner == "" {
|
||||
panic("invalid lock parameters")
|
||||
}
|
||||
|
||||
defer monitoring.ObserveQueryTime("Unlock", "all", time.Now())
|
||||
_, err := tx.Exec(removeLock, name, owner)
|
||||
return err
|
||||
}
|
||||
|
||||
// pruneLocks removes every expired locks from the database
|
||||
func PruneLocks(tx *sql.Tx) error {
|
||||
defer monitoring.ObserveQueryTime("pruneLocks", "all", time.Now())
|
||||
|
||||
if r, err := tx.Exec(removeLockExpired, time.Now().UTC()); err != nil {
|
||||
return util.HandleError("removeLockExpired", err)
|
||||
} else if affected, err := r.RowsAffected(); err != nil {
|
||||
return util.HandleError("removeLockExpired", err)
|
||||
} else {
|
||||
log.Debugf("Pruned %d Locks", affected)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -0,0 +1,100 @@
|
||||
// Copyright 2019 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package lock
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/clair/database/pgsql/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAcquireLockReturnsExistingLockDuration(t *testing.T) {
|
||||
tx, cleanup := testutil.CreateTestTxWithFixtures(t, "Lock")
|
||||
defer cleanup()
|
||||
|
||||
acquired, originalExpiration, err := AcquireLock(tx, "test1", "owner1", time.Minute)
|
||||
require.Nil(t, err)
|
||||
require.True(t, acquired)
|
||||
|
||||
acquired2, expiration, err := AcquireLock(tx, "test1", "owner2", time.Hour)
|
||||
require.Nil(t, err)
|
||||
require.False(t, acquired2)
|
||||
require.Equal(t, expiration, originalExpiration)
|
||||
}
|
||||
|
||||
func TestLock(t *testing.T) {
|
||||
db, cleanup := testutil.CreateTestDBWithFixture(t, "Lock")
|
||||
defer cleanup()
|
||||
|
||||
tx, err := db.Begin()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Create a first lock.
|
||||
l, _, err := AcquireLock(tx, "test1", "owner1", time.Minute)
|
||||
require.Nil(t, err)
|
||||
require.True(t, l)
|
||||
tx = testutil.RestartTransaction(db, tx, true)
|
||||
|
||||
// lock again by itself, the previous lock is not expired yet.
|
||||
l, _, err = AcquireLock(tx, "test1", "owner1", time.Minute)
|
||||
require.Nil(t, err)
|
||||
require.True(t, l)
|
||||
tx = testutil.RestartTransaction(db, tx, false)
|
||||
|
||||
// Try to renew the same lock with another owner.
|
||||
l, _, err = ExtendLock(tx, "test1", "owner2", time.Minute)
|
||||
require.Nil(t, err)
|
||||
require.False(t, l)
|
||||
tx = testutil.RestartTransaction(db, tx, false)
|
||||
|
||||
l, _, err = AcquireLock(tx, "test1", "owner2", time.Minute)
|
||||
require.Nil(t, err)
|
||||
require.False(t, l)
|
||||
tx = testutil.RestartTransaction(db, tx, false)
|
||||
|
||||
// Renew the lock.
|
||||
l, _, err = ExtendLock(tx, "test1", "owner1", 2*time.Minute)
|
||||
require.Nil(t, err)
|
||||
require.True(t, l)
|
||||
tx = testutil.RestartTransaction(db, tx, true)
|
||||
|
||||
// Unlock and then relock by someone else.
|
||||
err = ReleaseLock(tx, "test1", "owner1")
|
||||
require.Nil(t, err)
|
||||
tx = testutil.RestartTransaction(db, tx, true)
|
||||
|
||||
l, _, err = AcquireLock(tx, "test1", "owner2", time.Minute)
|
||||
require.Nil(t, err)
|
||||
require.True(t, l)
|
||||
tx = testutil.RestartTransaction(db, tx, true)
|
||||
|
||||
// Create a second lock which is actually already expired ...
|
||||
l, _, err = AcquireLock(tx, "test2", "owner1", -time.Minute)
|
||||
require.Nil(t, err)
|
||||
require.True(t, l)
|
||||
tx = testutil.RestartTransaction(db, tx, true)
|
||||
|
||||
// Take over the lock
|
||||
l, _, err = AcquireLock(tx, "test2", "owner2", time.Minute)
|
||||
require.Nil(t, err)
|
||||
require.True(t, l)
|
||||
tx = testutil.RestartTransaction(db, tx, true)
|
||||
|
||||
require.Nil(t, tx.Rollback())
|
||||
}
|
@ -1,93 +0,0 @@
|
||||
// Copyright 2016 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestLock(t *testing.T) {
|
||||
datastore, tx := openSessionForTest(t, "Lock", true)
|
||||
defer datastore.Close()
|
||||
|
||||
var l bool
|
||||
var et time.Time
|
||||
|
||||
// Create a first lock.
|
||||
l, _, err := tx.Lock("test1", "owner1", time.Minute, false)
|
||||
assert.Nil(t, err)
|
||||
assert.True(t, l)
|
||||
tx = restartSession(t, datastore, tx, true)
|
||||
|
||||
// lock again by itself, the previous lock is not expired yet.
|
||||
l, _, err = tx.Lock("test1", "owner1", time.Minute, false)
|
||||
assert.Nil(t, err)
|
||||
assert.False(t, l)
|
||||
tx = restartSession(t, datastore, tx, false)
|
||||
|
||||
// Try to renew the same lock with another owner.
|
||||
l, _, err = tx.Lock("test1", "owner2", time.Minute, true)
|
||||
assert.Nil(t, err)
|
||||
assert.False(t, l)
|
||||
tx = restartSession(t, datastore, tx, false)
|
||||
|
||||
l, _, err = tx.Lock("test1", "owner2", time.Minute, false)
|
||||
assert.Nil(t, err)
|
||||
assert.False(t, l)
|
||||
tx = restartSession(t, datastore, tx, false)
|
||||
|
||||
// Renew the lock.
|
||||
l, _, err = tx.Lock("test1", "owner1", 2*time.Minute, true)
|
||||
assert.Nil(t, err)
|
||||
assert.True(t, l)
|
||||
tx = restartSession(t, datastore, tx, true)
|
||||
|
||||
// Unlock and then relock by someone else.
|
||||
err = tx.Unlock("test1", "owner1")
|
||||
assert.Nil(t, err)
|
||||
tx = restartSession(t, datastore, tx, true)
|
||||
|
||||
l, et, err = tx.Lock("test1", "owner2", time.Minute, false)
|
||||
assert.Nil(t, err)
|
||||
assert.True(t, l)
|
||||
tx = restartSession(t, datastore, tx, true)
|
||||
|
||||
// LockInfo
|
||||
o, et2, ok, err := tx.FindLock("test1")
|
||||
assert.True(t, ok)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "owner2", o)
|
||||
assert.Equal(t, et.Second(), et2.Second())
|
||||
tx = restartSession(t, datastore, tx, true)
|
||||
|
||||
// Create a second lock which is actually already expired ...
|
||||
l, _, err = tx.Lock("test2", "owner1", -time.Minute, false)
|
||||
assert.Nil(t, err)
|
||||
assert.True(t, l)
|
||||
tx = restartSession(t, datastore, tx, true)
|
||||
|
||||
// Take over the lock
|
||||
l, _, err = tx.Lock("test2", "owner2", time.Minute, false)
|
||||
assert.Nil(t, err)
|
||||
assert.True(t, l)
|
||||
tx = restartSession(t, datastore, tx, true)
|
||||
|
||||
if !assert.Nil(t, tx.Rollback()) {
|
||||
t.FailNow()
|
||||
}
|
||||
}
|
@ -0,0 +1,60 @@
|
||||
// Copyright 2019 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package migrations_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/clair/database/pgsql/migrations"
|
||||
"github.com/coreos/clair/database/pgsql/testutil"
|
||||
_ "github.com/lib/pq"
|
||||
"github.com/remind101/migrate"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var userTableCount = `SELECT tablename FROM pg_catalog.pg_tables WHERE schemaname='public'`
|
||||
|
||||
func TestMigration(t *testing.T) {
|
||||
db, cleanup := testutil.CreateAndConnectTestDB(t, "TestMigration")
|
||||
defer cleanup()
|
||||
|
||||
err := migrate.NewPostgresMigrator(db).Exec(migrate.Up, migrations.Migrations...)
|
||||
if err != nil {
|
||||
require.Nil(t, err, err.Error())
|
||||
}
|
||||
|
||||
err = migrate.NewPostgresMigrator(db).Exec(migrate.Down, migrations.Migrations...)
|
||||
if err != nil {
|
||||
require.Nil(t, err, err.Error())
|
||||
}
|
||||
|
||||
rows, err := db.Query(userTableCount)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
var (
|
||||
tables []string
|
||||
table string
|
||||
)
|
||||
for rows.Next() {
|
||||
if err = rows.Scan(&table); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
tables = append(tables, table)
|
||||
}
|
||||
|
||||
require.True(t, len(tables) == 1 && tables[0] == "schema_migrations", "Only `schema_migrations` should be left")
|
||||
}
|
@ -0,0 +1,44 @@
|
||||
// Copyright 2019 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package migrations
|
||||
|
||||
import "github.com/remind101/migrate"
|
||||
|
||||
// MigrationQuery contains the Up migration and Down migration in Plain strings.
|
||||
type MigrationQuery struct {
|
||||
Up []string
|
||||
Down []string
|
||||
}
|
||||
|
||||
// ConcatMigrationQueries concats migration queries in the give order.
|
||||
func ConcatMigrationQueries(qs []MigrationQuery) MigrationQuery {
|
||||
r := MigrationQuery{}
|
||||
for _, q := range qs {
|
||||
r.Up = append(r.Up, q.Up...)
|
||||
r.Down = append(r.Down, q.Down...)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// NewSimpleMigration returns a simple migration plan with all provided
|
||||
// migration queries concatted in order.
|
||||
func NewSimpleMigration(id int, qs []MigrationQuery) migrate.Migration {
|
||||
q := ConcatMigrationQueries(qs)
|
||||
return migrate.Migration{
|
||||
ID: id,
|
||||
Up: migrate.Queries(q.Up),
|
||||
Down: migrate.Queries(q.Down),
|
||||
}
|
||||
}
|
@ -0,0 +1,67 @@
|
||||
// Copyright 2019 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package monitoring
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var (
|
||||
PromErrorsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "clair_pgsql_errors_total",
|
||||
Help: "Number of errors that PostgreSQL requests generated.",
|
||||
}, []string{"request"})
|
||||
|
||||
PromCacheHitsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "clair_pgsql_cache_hits_total",
|
||||
Help: "Number of cache hits that the PostgreSQL backend did.",
|
||||
}, []string{"object"})
|
||||
|
||||
PromCacheQueriesTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "clair_pgsql_cache_queries_total",
|
||||
Help: "Number of cache queries that the PostgreSQL backend did.",
|
||||
}, []string{"object"})
|
||||
|
||||
PromQueryDurationMilliseconds = prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Name: "clair_pgsql_query_duration_milliseconds",
|
||||
Help: "Time it takes to execute the database query.",
|
||||
}, []string{"query", "subquery"})
|
||||
|
||||
PromConcurrentLockVAFV = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "clair_pgsql_concurrent_lock_vafv_total",
|
||||
Help: "Number of transactions trying to hold the exclusive Vulnerability_Affects_Feature lock.",
|
||||
})
|
||||
)
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(PromErrorsTotal)
|
||||
prometheus.MustRegister(PromCacheHitsTotal)
|
||||
prometheus.MustRegister(PromCacheQueriesTotal)
|
||||
prometheus.MustRegister(PromQueryDurationMilliseconds)
|
||||
prometheus.MustRegister(PromConcurrentLockVAFV)
|
||||
}
|
||||
|
||||
// monitoring.ObserveQueryTime computes the time elapsed since `start` to represent the
|
||||
// query time.
|
||||
// 1. `query` is a pgSession function name.
|
||||
// 2. `subquery` is a specific query or a batched query.
|
||||
// 3. `start` is the time right before query is executed.
|
||||
func ObserveQueryTime(query, subquery string, start time.Time) {
|
||||
PromQueryDurationMilliseconds.
|
||||
WithLabelValues(query, subquery).
|
||||
Observe(float64(time.Since(start).Nanoseconds()) / float64(time.Millisecond))
|
||||
}
|
@ -0,0 +1,45 @@
|
||||
// Copyright 2016 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package namespace
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/database/pgsql/testutil"
|
||||
)
|
||||
|
||||
func TestPersistNamespaces(t *testing.T) {
|
||||
tx, cleanup := testutil.CreateTestTx(t, "PersistNamespaces")
|
||||
defer cleanup()
|
||||
|
||||
ns1 := database.Namespace{}
|
||||
ns2 := database.Namespace{Name: "t", VersionFormat: "b"}
|
||||
|
||||
// Empty Case
|
||||
assert.Nil(t, PersistNamespaces(tx, []database.Namespace{}))
|
||||
// Invalid Case
|
||||
assert.NotNil(t, PersistNamespaces(tx, []database.Namespace{ns1}))
|
||||
// Duplicated Case
|
||||
assert.Nil(t, PersistNamespaces(tx, []database.Namespace{ns2, ns2}))
|
||||
// Existing Case
|
||||
assert.Nil(t, PersistNamespaces(tx, []database.Namespace{ns2}))
|
||||
|
||||
nsList := testutil.ListNamespaces(t, tx)
|
||||
assert.Len(t, nsList, 1)
|
||||
assert.Equal(t, ns2, nsList[0])
|
||||
}
|
@ -1,83 +0,0 @@
|
||||
// Copyright 2016 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
)
|
||||
|
||||
func TestPersistNamespaces(t *testing.T) {
|
||||
datastore, tx := openSessionForTest(t, "PersistNamespaces", false)
|
||||
defer closeTest(t, datastore, tx)
|
||||
|
||||
ns1 := database.Namespace{}
|
||||
ns2 := database.Namespace{Name: "t", VersionFormat: "b"}
|
||||
|
||||
// Empty Case
|
||||
assert.Nil(t, tx.PersistNamespaces([]database.Namespace{}))
|
||||
// Invalid Case
|
||||
assert.NotNil(t, tx.PersistNamespaces([]database.Namespace{ns1}))
|
||||
// Duplicated Case
|
||||
assert.Nil(t, tx.PersistNamespaces([]database.Namespace{ns2, ns2}))
|
||||
// Existing Case
|
||||
assert.Nil(t, tx.PersistNamespaces([]database.Namespace{ns2}))
|
||||
|
||||
nsList := listNamespaces(t, tx)
|
||||
assert.Len(t, nsList, 1)
|
||||
assert.Equal(t, ns2, nsList[0])
|
||||
}
|
||||
|
||||
func assertNamespacesEqual(t *testing.T, expected []database.Namespace, actual []database.Namespace) bool {
|
||||
if assert.Len(t, actual, len(expected)) {
|
||||
has := map[database.Namespace]bool{}
|
||||
for _, i := range expected {
|
||||
has[i] = false
|
||||
}
|
||||
for _, i := range actual {
|
||||
has[i] = true
|
||||
}
|
||||
for key, v := range has {
|
||||
if !assert.True(t, v, key.Name+"is expected") {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func listNamespaces(t *testing.T, tx *pgSession) []database.Namespace {
|
||||
rows, err := tx.Query("SELECT name, version_format FROM namespace")
|
||||
if err != nil {
|
||||
t.FailNow()
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
namespaces := []database.Namespace{}
|
||||
for rows.Next() {
|
||||
var ns database.Namespace
|
||||
err := rows.Scan(&ns.Name, &ns.VersionFormat)
|
||||
if err != nil {
|
||||
t.FailNow()
|
||||
}
|
||||
namespaces = append(namespaces, ns)
|
||||
}
|
||||
|
||||
return namespaces
|
||||
}
|
@ -0,0 +1,280 @@
|
||||
// Copyright 2018 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package notification
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/database/pgsql/page"
|
||||
"github.com/coreos/clair/database/pgsql/testutil"
|
||||
"github.com/coreos/clair/pkg/pagination"
|
||||
)
|
||||
|
||||
type findVulnerabilityNotificationIn struct {
|
||||
notificationName string
|
||||
pageSize int
|
||||
oldAffectedAncestryPage pagination.Token
|
||||
newAffectedAncestryPage pagination.Token
|
||||
}
|
||||
|
||||
type findVulnerabilityNotificationOut struct {
|
||||
notification *database.VulnerabilityNotificationWithVulnerable
|
||||
ok bool
|
||||
err string
|
||||
}
|
||||
|
||||
var testPaginationKey = pagination.Must(pagination.NewKey())
|
||||
|
||||
var findVulnerabilityNotificationTests = []struct {
|
||||
title string
|
||||
in findVulnerabilityNotificationIn
|
||||
out findVulnerabilityNotificationOut
|
||||
}{
|
||||
{
|
||||
title: "find notification with invalid page",
|
||||
in: findVulnerabilityNotificationIn{
|
||||
notificationName: "test",
|
||||
pageSize: 1,
|
||||
oldAffectedAncestryPage: pagination.FirstPageToken,
|
||||
newAffectedAncestryPage: pagination.Token("random non sense"),
|
||||
},
|
||||
out: findVulnerabilityNotificationOut{
|
||||
err: pagination.ErrInvalidToken.Error(),
|
||||
},
|
||||
},
|
||||
{
|
||||
title: "find non-existing notification",
|
||||
in: findVulnerabilityNotificationIn{
|
||||
notificationName: "non-existing",
|
||||
pageSize: 1,
|
||||
oldAffectedAncestryPage: pagination.FirstPageToken,
|
||||
newAffectedAncestryPage: pagination.FirstPageToken,
|
||||
},
|
||||
out: findVulnerabilityNotificationOut{
|
||||
ok: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
title: "find existing notification first page",
|
||||
in: findVulnerabilityNotificationIn{
|
||||
notificationName: "test",
|
||||
pageSize: 1,
|
||||
oldAffectedAncestryPage: pagination.FirstPageToken,
|
||||
newAffectedAncestryPage: pagination.FirstPageToken,
|
||||
},
|
||||
out: findVulnerabilityNotificationOut{
|
||||
&database.VulnerabilityNotificationWithVulnerable{
|
||||
NotificationHook: testutil.RealNotification[1].NotificationHook,
|
||||
Old: &database.PagedVulnerableAncestries{
|
||||
Vulnerability: testutil.RealVulnerability[2],
|
||||
Limit: 1,
|
||||
Affected: make(map[int]string),
|
||||
Current: testutil.MustMarshalToken(testutil.TestPaginationKey, page.Page{0}),
|
||||
Next: testutil.MustMarshalToken(testutil.TestPaginationKey, page.Page{0}),
|
||||
End: true,
|
||||
},
|
||||
New: &database.PagedVulnerableAncestries{
|
||||
Vulnerability: testutil.RealVulnerability[1],
|
||||
Limit: 1,
|
||||
Affected: map[int]string{3: "ancestry-3"},
|
||||
Current: testutil.MustMarshalToken(testutil.TestPaginationKey, page.Page{0}),
|
||||
Next: testutil.MustMarshalToken(testutil.TestPaginationKey, page.Page{4}),
|
||||
End: false,
|
||||
},
|
||||
},
|
||||
|
||||
true,
|
||||
"",
|
||||
},
|
||||
},
|
||||
{
|
||||
title: "find existing notification of second page of new affected ancestry",
|
||||
in: findVulnerabilityNotificationIn{
|
||||
notificationName: "test",
|
||||
pageSize: 1,
|
||||
oldAffectedAncestryPage: pagination.FirstPageToken,
|
||||
newAffectedAncestryPage: testutil.MustMarshalToken(testutil.TestPaginationKey, page.Page{4}),
|
||||
},
|
||||
out: findVulnerabilityNotificationOut{
|
||||
&database.VulnerabilityNotificationWithVulnerable{
|
||||
NotificationHook: testutil.RealNotification[1].NotificationHook,
|
||||
Old: &database.PagedVulnerableAncestries{
|
||||
Vulnerability: testutil.RealVulnerability[2],
|
||||
Limit: 1,
|
||||
Affected: make(map[int]string),
|
||||
Current: testutil.MustMarshalToken(testutil.TestPaginationKey, page.Page{0}),
|
||||
Next: testutil.MustMarshalToken(testutil.TestPaginationKey, page.Page{0}),
|
||||
End: true,
|
||||
},
|
||||
New: &database.PagedVulnerableAncestries{
|
||||
Vulnerability: testutil.RealVulnerability[1],
|
||||
Limit: 1,
|
||||
Affected: map[int]string{4: "ancestry-4"},
|
||||
Current: testutil.MustMarshalToken(testutil.TestPaginationKey, page.Page{4}),
|
||||
Next: testutil.MustMarshalToken(testutil.TestPaginationKey, page.Page{0}),
|
||||
End: true,
|
||||
},
|
||||
},
|
||||
|
||||
true,
|
||||
"",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestFindVulnerabilityNotification(t *testing.T) {
|
||||
tx, cleanup := testutil.CreateTestTxWithFixtures(t, "pagination")
|
||||
defer cleanup()
|
||||
|
||||
for _, test := range findVulnerabilityNotificationTests {
|
||||
t.Run(test.title, func(t *testing.T) {
|
||||
notification, ok, err := FindVulnerabilityNotification(tx, test.in.notificationName, test.in.pageSize, test.in.oldAffectedAncestryPage, test.in.newAffectedAncestryPage, testutil.TestPaginationKey)
|
||||
if test.out.err != "" {
|
||||
require.EqualError(t, err, test.out.err)
|
||||
return
|
||||
}
|
||||
|
||||
require.Nil(t, err)
|
||||
if !test.out.ok {
|
||||
require.Equal(t, test.out.ok, ok)
|
||||
return
|
||||
}
|
||||
|
||||
require.True(t, ok)
|
||||
testutil.AssertVulnerabilityNotificationWithVulnerableEqual(t, testutil.TestPaginationKey, test.out.notification, ¬ification)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestInsertVulnerabilityNotifications(t *testing.T) {
|
||||
datastore, cleanup := testutil.CreateTestDBWithFixture(t, "InsertVulnerabilityNotifications")
|
||||
defer cleanup()
|
||||
|
||||
n1 := database.VulnerabilityNotification{}
|
||||
n3 := database.VulnerabilityNotification{
|
||||
NotificationHook: database.NotificationHook{
|
||||
Name: "random name",
|
||||
Created: time.Now(),
|
||||
},
|
||||
Old: nil,
|
||||
New: &database.Vulnerability{},
|
||||
}
|
||||
n4 := database.VulnerabilityNotification{
|
||||
NotificationHook: database.NotificationHook{
|
||||
Name: "random name",
|
||||
Created: time.Now(),
|
||||
},
|
||||
Old: nil,
|
||||
New: &database.Vulnerability{
|
||||
Name: "CVE-OPENSSL-1-DEB7",
|
||||
Namespace: database.Namespace{
|
||||
Name: "debian:7",
|
||||
VersionFormat: "dpkg",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tx, err := datastore.Begin()
|
||||
require.Nil(t, err)
|
||||
|
||||
// invalid case
|
||||
err = InsertVulnerabilityNotifications(tx, []database.VulnerabilityNotification{n1})
|
||||
require.NotNil(t, err)
|
||||
|
||||
// invalid case: unknown vulnerability
|
||||
err = InsertVulnerabilityNotifications(tx, []database.VulnerabilityNotification{n3})
|
||||
require.NotNil(t, err)
|
||||
|
||||
// invalid case: duplicated input notification
|
||||
err = InsertVulnerabilityNotifications(tx, []database.VulnerabilityNotification{n4, n4})
|
||||
require.NotNil(t, err)
|
||||
tx = testutil.RestartTransaction(datastore, tx, false)
|
||||
|
||||
// valid case
|
||||
err = InsertVulnerabilityNotifications(tx, []database.VulnerabilityNotification{n4})
|
||||
require.Nil(t, err)
|
||||
// invalid case: notification is already in database
|
||||
err = InsertVulnerabilityNotifications(tx, []database.VulnerabilityNotification{n4})
|
||||
require.NotNil(t, err)
|
||||
|
||||
require.Nil(t, tx.Rollback())
|
||||
}
|
||||
|
||||
func TestFindNewNotification(t *testing.T) {
|
||||
tx, cleanup := testutil.CreateTestTxWithFixtures(t, "TestFindNewNotification")
|
||||
defer cleanup()
|
||||
|
||||
noti, ok, err := FindNewNotification(tx, time.Now())
|
||||
if assert.Nil(t, err) && assert.True(t, ok) {
|
||||
assert.Equal(t, "test", noti.Name)
|
||||
assert.Equal(t, time.Time{}, noti.Notified)
|
||||
assert.Equal(t, time.Time{}, noti.Created)
|
||||
assert.Equal(t, time.Time{}, noti.Deleted)
|
||||
}
|
||||
|
||||
// can't find the notified
|
||||
assert.Nil(t, MarkNotificationAsRead(tx, "test"))
|
||||
// if the notified time is before
|
||||
noti, ok, err = FindNewNotification(tx, time.Now().Add(-time.Duration(10*time.Second)))
|
||||
assert.Nil(t, err)
|
||||
assert.False(t, ok)
|
||||
// can find the notified after a period of time
|
||||
noti, ok, err = FindNewNotification(tx, time.Now().Add(time.Duration(10*time.Second)))
|
||||
if assert.Nil(t, err) && assert.True(t, ok) {
|
||||
assert.Equal(t, "test", noti.Name)
|
||||
assert.NotEqual(t, time.Time{}, noti.Notified)
|
||||
assert.Equal(t, time.Time{}, noti.Created)
|
||||
assert.Equal(t, time.Time{}, noti.Deleted)
|
||||
}
|
||||
|
||||
assert.Nil(t, DeleteNotification(tx, "test"))
|
||||
// can't find in any time
|
||||
noti, ok, err = FindNewNotification(tx, time.Now().Add(-time.Duration(1000)))
|
||||
assert.Nil(t, err)
|
||||
assert.False(t, ok)
|
||||
|
||||
noti, ok, err = FindNewNotification(tx, time.Now().Add(time.Duration(1000)))
|
||||
assert.Nil(t, err)
|
||||
assert.False(t, ok)
|
||||
}
|
||||
|
||||
func TestMarkNotificationAsRead(t *testing.T) {
|
||||
tx, cleanup := testutil.CreateTestTxWithFixtures(t, "MarkNotificationAsRead")
|
||||
defer cleanup()
|
||||
|
||||
// invalid case: notification doesn't exist
|
||||
assert.NotNil(t, MarkNotificationAsRead(tx, "non-existing"))
|
||||
// valid case
|
||||
assert.Nil(t, MarkNotificationAsRead(tx, "test"))
|
||||
// valid case
|
||||
assert.Nil(t, MarkNotificationAsRead(tx, "test"))
|
||||
}
|
||||
|
||||
func TestDeleteNotification(t *testing.T) {
|
||||
tx, cleanup := testutil.CreateTestTxWithFixtures(t, "DeleteNotification")
|
||||
defer cleanup()
|
||||
|
||||
// invalid case: notification doesn't exist
|
||||
assert.NotNil(t, DeleteNotification(tx, "non-existing"))
|
||||
// valid case
|
||||
assert.Nil(t, DeleteNotification(tx, "test"))
|
||||
// invalid case: notification is already deleted
|
||||
assert.NotNil(t, DeleteNotification(tx, "test"))
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue