*: refactor & do initial work towards PostgreSQL implementation

pull/71/head
Quentin Machu 9 years ago committed by Jimmy Zelinskie
parent 1a0f4a0f75
commit 2c150b015e

92
Godeps/Godeps.json generated

@ -1,92 +0,0 @@
{
"ImportPath": "github.com/coreos/clair",
"GoVersion": "go1.5.1",
"Packages": [
"./..."
],
"Deps": [
{
"ImportPath": "github.com/alecthomas/template",
"Rev": "b867cc6ab45cece8143cfcc6fc9c77cf3f2c23c0"
},
{
"ImportPath": "github.com/alecthomas/units",
"Rev": "6b4e7dc5e3143b85ea77909c72caf89416fc2915"
},
{
"ImportPath": "github.com/barakmich/glog",
"Rev": "fafcb6128a8a2e6360ff034091434d547397d54a"
},
{
"ImportPath": "github.com/boltdb/bolt",
"Comment": "v1.0-98-gafceb31",
"Rev": "afceb316b96ea97cbac6d23afbdf69543d80748a"
},
{
"ImportPath": "github.com/coreos/go-systemd/journal",
"Comment": "v3-15-gcfa48f3",
"Rev": "cfa48f34d8dc4ff58f9b48725181a09f9092dc3c"
},
{
"ImportPath": "github.com/coreos/pkg/capnslog",
"Rev": "42a8c3b1a6f917bb8346ef738f32712a7ca0ede7"
},
{
"ImportPath": "github.com/coreos/pkg/timeutil",
"Rev": "42a8c3b1a6f917bb8346ef738f32712a7ca0ede7"
},
{
"ImportPath": "github.com/gogo/protobuf/proto",
"Rev": "58bbd41c1a2d1b7154f5d99a8d0d839b3093301a"
},
{
"ImportPath": "github.com/google/cayley",
"Rev": "582c4e1ca46943f2cf09c73bd12a83a6959057c9"
},
{
"ImportPath": "github.com/julienschmidt/httprouter",
"Comment": "v1.1",
"Rev": "8c199fb6259ffc1af525cc3ad52ee60ba8359669"
},
{
"ImportPath": "github.com/lib/pq",
"Comment": "go1.0-cutoff-56-gdc50b6a",
"Rev": "dc50b6ad2d3ee836442cf3389009c7cd1e64bb43"
},
{
"ImportPath": "github.com/pborman/uuid",
"Rev": "ca53cad383cad2479bbba7f7a1a05797ec1386e4"
},
{
"ImportPath": "github.com/stretchr/testify/assert",
"Comment": "v1.0-17-g089c718",
"Rev": "089c7181b8c728499929ff09b62d3fdd8df8adff"
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
"Rev": "315fcfb05d4d46d4354b313d146ef688dda272a9"
},
{
"ImportPath": "github.com/syndtr/gosnappy/snappy",
"Rev": "156a073208e131d7d2e212cb749feae7c339e846"
},
{
"ImportPath": "github.com/tylerb/graceful",
"Comment": "v1.2.3",
"Rev": "48afeb21e2fcbcff0f30bd5ad6b97747b0fae38e"
},
{
"ImportPath": "golang.org/x/net/netutil",
"Rev": "7654728e381988afd88e58cabfd6363a5ea91810"
},
{
"ImportPath": "gopkg.in/mgo.v2",
"Comment": "r2015.05.29",
"Rev": "01ee097136da162d1dd3c9b44fbdf3abf4fd6552"
},
{
"ImportPath": "gopkg.in/yaml.v2",
"Rev": "f7716cbe52baa25d2e9b0d0da546fcf909fc16b4"
}
]
}

5
Godeps/Readme generated

@ -1,5 +0,0 @@
This directory tree is generated automatically by godep.
Please do not edit.
See https://github.com/tools/godep for more information.

@ -26,17 +26,36 @@ import (
"time"
"github.com/coreos/pkg/capnslog"
"github.com/julienschmidt/httprouter"
"github.com/tylerb/graceful"
"github.com/coreos/clair/config"
"github.com/coreos/clair/database"
"github.com/coreos/clair/utils"
)
var log = capnslog.NewPackageLogger("github.com/coreos/clair", "api")
// Env stores the environment used by the API.
type Env struct {
Datastore database.Datastore
}
// Handle adds a fourth parameter to httprouter.Handle: a pointer to *Env,
// allowing us to pass our environment to the handler.
type Handle func(http.ResponseWriter, *http.Request, httprouter.Params, *Env)
// WrapHandle encloses a Handle into a httprouter.Handle to make it usable by
// httprouter.
func WrapHandle(fn Handle, e *Env) httprouter.Handle {
return func(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
fn(w, r, p, e)
}
}
// Run launches the main API, which exposes every possible interactions
// with clair.
func Run(config *config.APIConfig, st *utils.Stopper) {
func Run(config *config.APIConfig, env *Env, st *utils.Stopper) {
defer st.End()
// Do not run the API service if there is no config.
@ -60,7 +79,7 @@ func Run(config *config.APIConfig, st *utils.Stopper) {
Server: &http.Server{
Addr: ":" + strconv.Itoa(config.Port),
TLSConfig: tlsConfig,
Handler: NewVersionRouter(config.Timeout),
Handler: NewVersionRouter(config.Timeout, env),
},
}
listenAndServeWithStopper(srv, st, config.CertFile, config.KeyFile)
@ -69,7 +88,7 @@ func Run(config *config.APIConfig, st *utils.Stopper) {
// RunHealth launches the Health API, which only exposes a method to fetch
// Clair's health without any security or authentication mechanism.
func RunHealth(config *config.APIConfig, st *utils.Stopper) {
func RunHealth(config *config.APIConfig, env *Env, st *utils.Stopper) {
defer st.End()
// Do not run the API service if there is no config.
@ -84,7 +103,7 @@ func RunHealth(config *config.APIConfig, st *utils.Stopper) {
NoSignalHandling: true, // We want to use our own Stopper
Server: &http.Server{
Addr: ":" + strconv.Itoa(config.HealthPort),
Handler: NewHealthRouter(),
Handler: NewHealthRouter(env),
},
}
listenAndServeWithStopper(srv, st, "", "")

@ -0,0 +1,109 @@
// Copyright 2015 clair authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package api
import (
"net/http"
"strconv"
"github.com/julienschmidt/httprouter"
"github.com/coreos/clair/database"
"github.com/coreos/clair/health"
httputils "github.com/coreos/clair/utils/http"
"github.com/coreos/clair/worker"
)
// Version is an integer representing the API version.
const Version = 1
// POSTLayersParameters represents the expected parameters for POSTLayers.
type POSTLayersParameters struct {
Name, Path, ParentName string
}
// GETVersions returns API and Engine versions.
func GETVersions(w http.ResponseWriter, r *http.Request, _ httprouter.Params, _ *Env) {
httputils.WriteHTTP(w, http.StatusOK, struct {
APIVersion string
EngineVersion string
}{
APIVersion: strconv.Itoa(Version),
EngineVersion: strconv.Itoa(worker.Version),
})
}
// GETHealth sums up the health of all the registered services.
func GETHealth(w http.ResponseWriter, r *http.Request, _ httprouter.Params, _ *Env) {
globalHealth, statuses := health.Healthcheck()
httpStatus := http.StatusOK
if !globalHealth {
httpStatus = http.StatusServiceUnavailable
}
httputils.WriteHTTP(w, httpStatus, statuses)
return
}
// POSTLayers analyzes a layer and returns the engine version that has been used
// for the analysis.
func POSTLayers(w http.ResponseWriter, r *http.Request, _ httprouter.Params, e *Env) {
var parameters POSTLayersParameters
if s, err := httputils.ParseHTTPBody(r, &parameters); err != nil {
httputils.WriteHTTPError(w, s, err)
return
}
// Process data.
if err := worker.Process(e.Datastore, parameters.Name, parameters.ParentName, parameters.Path); err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
// Get engine version and return.
httputils.WriteHTTP(w, http.StatusCreated, struct{ Version string }{Version: strconv.Itoa(worker.Version)})
}
// DELETELayers deletes the specified layer and any child layers that are
// dependent on the specified layer.
func DELETELayers(w http.ResponseWriter, r *http.Request, p httprouter.Params, e *Env) {
if err := e.Datastore.DeleteLayer(p.ByName("id")); err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
httputils.WriteHTTP(w, http.StatusNoContent, nil)
}
// GETLayers returns informations about an existing layer, optionally with its features
// and vulnerabilities.
func GETLayers(w http.ResponseWriter, r *http.Request, p httprouter.Params, e *Env) {
withFeatures := false
withVulnerabilities := false
if r.URL.Query().Get("withFeatures") == "true" {
withFeatures = true
}
if r.URL.Query().Get("withVulnerabilities") == "true" {
withFeatures = true
withVulnerabilities = true
}
layer, err := e.Datastore.FindLayer(p.ByName("id"), withFeatures, withVulnerabilities)
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
httputils.WriteHTTP(w, http.StatusOK, struct{ Layer database.Layer }{Layer: layer})
}

@ -13,18 +13,15 @@
// limitations under the License.
// Package wrappers contains httprouter.Handle wrappers that are used in the API.
package wrappers
package api
import (
"net/http"
"time"
"github.com/coreos/pkg/capnslog"
"github.com/julienschmidt/httprouter"
)
var log = capnslog.NewPackageLogger("github.com/coreos/clair", "api")
type logWriter struct {
http.ResponseWriter
status int
@ -61,8 +58,8 @@ func (lw *logWriter) Status() int {
return lw.status
}
// Log wraps a http.HandlerFunc and logs the API call
func Log(fn httprouter.Handle) httprouter.Handle {
// Logger wraps an Handler and logs the API call
func Logger(fn httprouter.Handle) httprouter.Handle {
return func(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
lw := &logWriter{ResponseWriter: w}
start := time.Now()

@ -1,55 +0,0 @@
// Copyright 2015 clair authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package logic implements all the available API methods.
// Every methods are documented in docs/API.md.
package logic
import (
"net/http"
"strconv"
"github.com/julienschmidt/httprouter"
"github.com/coreos/clair/health"
httputils "github.com/coreos/clair/utils/http"
"github.com/coreos/clair/worker"
)
// Version is an integer representing the API version.
const Version = 1
// GETVersions returns API and Engine versions.
func GETVersions(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
httputils.WriteHTTP(w, http.StatusOK, struct {
APIVersion string
EngineVersion string
}{
APIVersion: strconv.Itoa(Version),
EngineVersion: strconv.Itoa(worker.Version),
})
}
// GETHealth sums up the health of all the registered services.
func GETHealth(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
globalHealth, statuses := health.Healthcheck()
httpStatus := http.StatusOK
if !globalHealth {
httpStatus = http.StatusServiceUnavailable
}
httputils.WriteHTTP(w, httpStatus, statuses)
return
}

@ -1,378 +0,0 @@
// Copyright 2015 clair authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package logic
import (
"errors"
"net/http"
"strconv"
"github.com/julienschmidt/httprouter"
"github.com/coreos/clair/database"
cerrors "github.com/coreos/clair/utils/errors"
httputils "github.com/coreos/clair/utils/http"
"github.com/coreos/clair/utils/types"
"github.com/coreos/clair/worker"
)
// POSTLayersParameters represents the expected parameters for POSTLayers.
type POSTLayersParameters struct {
ID, Path, ParentID, ImageFormat string
}
// POSTLayers analyzes a layer and returns the engine version that has been used
// for the analysis.
func POSTLayers(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
var parameters POSTLayersParameters
if s, err := httputils.ParseHTTPBody(r, &parameters); err != nil {
httputils.WriteHTTPError(w, s, err)
return
}
// Process data.
if err := worker.Process(parameters.ID, parameters.ParentID, parameters.Path, parameters.ImageFormat); err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
// Get engine version and return.
httputils.WriteHTTP(w, http.StatusCreated, struct{ Version string }{Version: strconv.Itoa(worker.Version)})
}
// DELETELayers deletes the specified layer and any child layers that are
// dependent on the specified layer.
func DELETELayers(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
err := database.DeleteLayer(p.ByName("id"))
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
httputils.WriteHTTP(w, http.StatusNoContent, nil)
}
// GETLayersOS returns the operating system of a layer if it exists.
// It uses not only the specified layer but also its parent layers if necessary.
// An empty OS string is returned if no OS has been detected.
func GETLayersOS(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
// Find layer.
layer, err := database.FindOneLayerByID(p.ByName("id"), []string{database.FieldLayerParent, database.FieldLayerOS})
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
// Get OS.
os, err := layer.OperatingSystem()
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
httputils.WriteHTTP(w, http.StatusOK, struct{ OS string }{OS: os})
}
// GETLayersParent returns the parent ID of a layer if it exists.
// An empty ID string is returned if the layer has no parent.
func GETLayersParent(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
// Find layer
layer, err := database.FindOneLayerByID(p.ByName("id"), []string{database.FieldLayerParent})
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
// Get layer's parent.
parent, err := layer.Parent([]string{database.FieldLayerID})
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
ID := ""
if parent != nil {
ID = parent.ID
}
httputils.WriteHTTP(w, http.StatusOK, struct{ ID string }{ID: ID})
}
// GETLayersPackages returns the complete list of packages that a layer has
// if it exists.
func GETLayersPackages(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
// Find layer
layer, err := database.FindOneLayerByID(p.ByName("id"), []string{database.FieldLayerParent, database.FieldLayerPackages})
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
// Find layer's packages.
packagesNodes, err := layer.AllPackages()
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
packages := []*database.Package{}
if len(packagesNodes) > 0 {
packages, err = database.FindAllPackagesByNodes(packagesNodes, []string{database.FieldPackageOS, database.FieldPackageName, database.FieldPackageVersion})
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
}
httputils.WriteHTTP(w, http.StatusOK, struct{ Packages []*database.Package }{Packages: packages})
}
// GETLayersPackagesDiff returns the list of packages that a layer installs and
// removes if it exists.
func GETLayersPackagesDiff(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
// Find layer.
layer, err := database.FindOneLayerByID(p.ByName("id"), []string{database.FieldLayerPackages})
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
// Find layer's packages.
installedPackages, removedPackages := make([]*database.Package, 0), make([]*database.Package, 0)
if len(layer.InstalledPackagesNodes) > 0 {
installedPackages, err = database.FindAllPackagesByNodes(layer.InstalledPackagesNodes, []string{database.FieldPackageOS, database.FieldPackageName, database.FieldPackageVersion})
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
}
if len(layer.RemovedPackagesNodes) > 0 {
removedPackages, err = database.FindAllPackagesByNodes(layer.RemovedPackagesNodes, []string{database.FieldPackageOS, database.FieldPackageName, database.FieldPackageVersion})
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
}
httputils.WriteHTTP(w, http.StatusOK, struct{ InstalledPackages, RemovedPackages []*database.Package }{InstalledPackages: installedPackages, RemovedPackages: removedPackages})
}
// GETLayersVulnerabilities returns the complete list of vulnerabilities that
// a layer has if it exists.
func GETLayersVulnerabilities(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
// Get minumum priority parameter.
minimumPriority := types.Priority(r.URL.Query().Get("minimumPriority"))
if minimumPriority == "" {
minimumPriority = "High" // Set default priority to High
} else if !minimumPriority.IsValid() {
httputils.WriteHTTPError(w, 0, cerrors.NewBadRequestError("invalid priority"))
return
}
// Find layer
layer, err := database.FindOneLayerByID(p.ByName("id"), []string{database.FieldLayerParent, database.FieldLayerPackages})
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
// Find layer's packages.
packagesNodes, err := layer.AllPackages()
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
// Find vulnerabilities.
vulnerabilities, err := getVulnerabilitiesFromLayerPackagesNodes(packagesNodes, minimumPriority, []string{database.FieldVulnerabilityID, database.FieldVulnerabilityLink, database.FieldVulnerabilityPriority, database.FieldVulnerabilityDescription, database.FieldVulnerabilityCausedByPackage})
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
httputils.WriteHTTP(w, http.StatusOK, struct{ Vulnerabilities []*database.Vulnerability }{Vulnerabilities: vulnerabilities})
}
// GETLayersVulnerabilitiesDiff returns the list of vulnerabilities that a layer
// adds and removes if it exists.
func GETLayersVulnerabilitiesDiff(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
// Get minumum priority parameter.
minimumPriority := types.Priority(r.URL.Query().Get("minimumPriority"))
if minimumPriority == "" {
minimumPriority = "High" // Set default priority to High
} else if !minimumPriority.IsValid() {
httputils.WriteHTTPError(w, 0, cerrors.NewBadRequestError("invalid priority"))
return
}
// Find layer.
layer, err := database.FindOneLayerByID(p.ByName("id"), []string{database.FieldLayerPackages})
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
// Selected fields for vulnerabilities.
selectedFields := []string{database.FieldVulnerabilityID, database.FieldVulnerabilityLink, database.FieldVulnerabilityPriority, database.FieldVulnerabilityDescription, database.FieldVulnerabilityCausedByPackage}
// Find vulnerabilities for installed packages.
addedVulnerabilities, err := getVulnerabilitiesFromLayerPackagesNodes(layer.InstalledPackagesNodes, minimumPriority, selectedFields)
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
// Find vulnerabilities for removed packages.
removedVulnerabilities, err := getVulnerabilitiesFromLayerPackagesNodes(layer.RemovedPackagesNodes, minimumPriority, selectedFields)
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
// Remove vulnerabilities which appears both in added and removed lists (eg. case of updated packages but still vulnerable).
for ia, a := range addedVulnerabilities {
for ir, r := range removedVulnerabilities {
if a.ID == r.ID {
addedVulnerabilities = append(addedVulnerabilities[:ia], addedVulnerabilities[ia+1:]...)
removedVulnerabilities = append(removedVulnerabilities[:ir], removedVulnerabilities[ir+1:]...)
}
}
}
httputils.WriteHTTP(w, http.StatusOK, struct{ Adds, Removes []*database.Vulnerability }{Adds: addedVulnerabilities, Removes: removedVulnerabilities})
}
// POSTBatchLayersVulnerabilitiesParameters represents the expected parameters
// for POSTBatchLayersVulnerabilities.
type POSTBatchLayersVulnerabilitiesParameters struct {
LayersIDs []string
}
// POSTBatchLayersVulnerabilities returns the complete list of vulnerabilities
// that the provided layers have, if they all exist.
func POSTBatchLayersVulnerabilities(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
// Parse body
var parameters POSTBatchLayersVulnerabilitiesParameters
if s, err := httputils.ParseHTTPBody(r, &parameters); err != nil {
httputils.WriteHTTPError(w, s, err)
return
}
if len(parameters.LayersIDs) == 0 {
httputils.WriteHTTPError(w, http.StatusBadRequest, errors.New("at least one LayerID query parameter must be provided"))
return
}
// Get minumum priority parameter.
minimumPriority := types.Priority(r.URL.Query().Get("minimumPriority"))
if minimumPriority == "" {
minimumPriority = "High" // Set default priority to High
} else if !minimumPriority.IsValid() {
httputils.WriteHTTPError(w, 0, cerrors.NewBadRequestError("invalid priority"))
return
}
response := make(map[string]interface{})
// For each LayerID parameter
for _, layerID := range parameters.LayersIDs {
// Find layer
layer, err := database.FindOneLayerByID(layerID, []string{database.FieldLayerParent, database.FieldLayerPackages})
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
// Find layer's packages.
packagesNodes, err := layer.AllPackages()
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
// Find vulnerabilities.
vulnerabilities, err := getVulnerabilitiesFromLayerPackagesNodes(packagesNodes, minimumPriority, []string{database.FieldVulnerabilityID, database.FieldVulnerabilityLink, database.FieldVulnerabilityPriority, database.FieldVulnerabilityDescription, database.FieldVulnerabilityCausedByPackage})
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
response[layerID] = struct{ Vulnerabilities []*database.Vulnerability }{Vulnerabilities: vulnerabilities}
}
httputils.WriteHTTP(w, http.StatusOK, response)
}
// getSuccessorsFromPackagesNodes returns the node list of packages that have
// versions following the versions of the provided packages.
func getSuccessorsFromPackagesNodes(packagesNodes []string) ([]string, error) {
if len(packagesNodes) == 0 {
return []string{}, nil
}
// Get packages.
packages, err := database.FindAllPackagesByNodes(packagesNodes, []string{database.FieldPackageNextVersion})
if err != nil {
return []string{}, err
}
// Find all packages' successors.
var packagesNextVersions []string
for _, pkg := range packages {
nextVersions, err := pkg.NextVersions([]string{})
if err != nil {
return []string{}, err
}
for _, version := range nextVersions {
packagesNextVersions = append(packagesNextVersions, version.Node)
}
}
return packagesNextVersions, nil
}
// getVulnerabilitiesFromLayerPackagesNodes returns the list of vulnerabilities
// affecting the provided package nodes, filtered by Priority.
func getVulnerabilitiesFromLayerPackagesNodes(packagesNodes []string, minimumPriority types.Priority, selectedFields []string) ([]*database.Vulnerability, error) {
if len(packagesNodes) == 0 {
return []*database.Vulnerability{}, nil
}
// Get successors of the packages.
packagesNextVersions, err := getSuccessorsFromPackagesNodes(packagesNodes)
if err != nil {
return []*database.Vulnerability{}, err
}
if len(packagesNextVersions) == 0 {
return []*database.Vulnerability{}, nil
}
// Find vulnerabilities fixed in these successors.
vulnerabilities, err := database.FindAllVulnerabilitiesByFixedIn(packagesNextVersions, selectedFields)
if err != nil {
return []*database.Vulnerability{}, err
}
// Filter vulnerabilities depending on their priority and remove duplicates.
filteredVulnerabilities := []*database.Vulnerability{}
seen := map[string]struct{}{}
for _, v := range vulnerabilities {
if minimumPriority.Compare(v.Priority) <= 0 {
if _, alreadySeen := seen[v.ID]; !alreadySeen {
filteredVulnerabilities = append(filteredVulnerabilities, v)
seen[v.ID] = struct{}{}
}
}
}
return filteredVulnerabilities, nil
}

@ -1,248 +0,0 @@
// Copyright 2015 clair authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package logic
import (
"errors"
"net/http"
"github.com/julienschmidt/httprouter"
"github.com/coreos/clair/database"
cerrors "github.com/coreos/clair/utils/errors"
httputils "github.com/coreos/clair/utils/http"
)
// GETVulnerabilities returns a vulnerability identified by an ID if it exists.
func GETVulnerabilities(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
// Find vulnerability.
vulnerability, err := database.FindOneVulnerability(p.ByName("id"), []string{database.FieldVulnerabilityID, database.FieldVulnerabilityLink, database.FieldVulnerabilityPriority, database.FieldVulnerabilityDescription, database.FieldVulnerabilityFixedIn})
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
abstractVulnerability, err := vulnerability.ToAbstractVulnerability()
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
httputils.WriteHTTP(w, http.StatusOK, abstractVulnerability)
}
// POSTVulnerabilities manually inserts a vulnerability into the database if it
// does not exist yet.
func POSTVulnerabilities(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
var parameters *database.AbstractVulnerability
if s, err := httputils.ParseHTTPBody(r, &parameters); err != nil {
httputils.WriteHTTPError(w, s, err)
return
}
// Ensure that the vulnerability does not exist.
vulnerability, err := database.FindOneVulnerability(parameters.ID, []string{})
if err != nil && err != cerrors.ErrNotFound {
httputils.WriteHTTPError(w, 0, err)
return
}
if vulnerability != nil {
httputils.WriteHTTPError(w, 0, cerrors.NewBadRequestError("vulnerability already exists"))
return
}
// Insert packages.
packages := database.AbstractPackagesToPackages(parameters.AffectedPackages)
err = database.InsertPackages(packages)
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
var pkgNodes []string
for _, p := range packages {
pkgNodes = append(pkgNodes, p.Node)
}
// Insert vulnerability.
notifications, err := database.InsertVulnerabilities([]*database.Vulnerability{parameters.ToVulnerability(pkgNodes)})
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
// Insert notifications.
err = database.InsertNotifications(notifications, database.GetDefaultNotificationWrapper())
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
httputils.WriteHTTP(w, http.StatusCreated, nil)
}
// PUTVulnerabilities updates a vulnerability if it exists.
func PUTVulnerabilities(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
var parameters *database.AbstractVulnerability
if s, err := httputils.ParseHTTPBody(r, &parameters); err != nil {
httputils.WriteHTTPError(w, s, err)
return
}
parameters.ID = p.ByName("id")
// Ensure that the vulnerability exists.
_, err := database.FindOneVulnerability(parameters.ID, []string{})
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
// Insert packages.
packages := database.AbstractPackagesToPackages(parameters.AffectedPackages)
err = database.InsertPackages(packages)
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
var pkgNodes []string
for _, p := range packages {
pkgNodes = append(pkgNodes, p.Node)
}
// Insert vulnerability.
notifications, err := database.InsertVulnerabilities([]*database.Vulnerability{parameters.ToVulnerability(pkgNodes)})
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
// Insert notifications.
err = database.InsertNotifications(notifications, database.GetDefaultNotificationWrapper())
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
httputils.WriteHTTP(w, http.StatusCreated, nil)
}
// DELVulnerabilities deletes a vulnerability if it exists.
func DELVulnerabilities(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
err := database.DeleteVulnerability(p.ByName("id"))
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
httputils.WriteHTTP(w, http.StatusNoContent, nil)
}
// GETVulnerabilitiesIntroducingLayers returns the list of layers that
// introduces a given vulnerability, if it exists.
// To clarify, it does not return the list of every layers that have
// the vulnerability.
func GETVulnerabilitiesIntroducingLayers(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
// Find vulnerability to verify that it exists.
_, err := database.FindOneVulnerability(p.ByName("id"), []string{})
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
layers, err := database.FindAllLayersIntroducingVulnerability(p.ByName("id"), []string{database.FieldLayerID})
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
layersIDs := []string{}
for _, l := range layers {
layersIDs = append(layersIDs, l.ID)
}
httputils.WriteHTTP(w, http.StatusOK, struct{ IntroducingLayersIDs []string }{IntroducingLayersIDs: layersIDs})
}
// POSTVulnerabilitiesAffectedLayersParameters represents the expected
// parameters for POSTVulnerabilitiesAffectedLayers.
type POSTVulnerabilitiesAffectedLayersParameters struct {
LayersIDs []string
}
// POSTVulnerabilitiesAffectedLayers returns whether the specified layers
// (by their IDs) are vulnerable to the given Vulnerability or not.
func POSTVulnerabilitiesAffectedLayers(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
// Parse body.
var parameters POSTBatchLayersVulnerabilitiesParameters
if s, err := httputils.ParseHTTPBody(r, &parameters); err != nil {
httputils.WriteHTTPError(w, s, err)
return
}
if len(parameters.LayersIDs) == 0 {
httputils.WriteHTTPError(w, http.StatusBadRequest, errors.New("getting the entire list of affected layers is not supported yet: at least one LayerID query parameter must be provided"))
return
}
// Find vulnerability.
vulnerability, err := database.FindOneVulnerability(p.ByName("id"), []string{database.FieldVulnerabilityFixedIn})
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
// Save the fixed in nodes into a map for fast check.
fixedInPackagesMap := make(map[string]struct{})
for _, fixedInNode := range vulnerability.FixedInNodes {
fixedInPackagesMap[fixedInNode] = struct{}{}
}
response := make(map[string]interface{})
// For each LayerID parameter.
for _, layerID := range parameters.LayersIDs {
// Find layer
layer, err := database.FindOneLayerByID(layerID, []string{database.FieldLayerParent, database.FieldLayerPackages, database.FieldLayerPackages})
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
// Find layer's packages.
packagesNodes, err := layer.AllPackages()
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
// Get successors packages of layer' packages.
successors, err := getSuccessorsFromPackagesNodes(packagesNodes)
if err != nil {
httputils.WriteHTTPError(w, 0, err)
return
}
// Determine if the layer is vulnerable by verifying if one of the successors
// of its packages are fixed by the vulnerability.
vulnerable := false
for _, p := range successors {
if _, fixed := fixedInPackagesMap[p]; fixed {
vulnerable = true
break
}
}
response[layerID] = struct{ Vulnerable bool }{Vulnerable: vulnerable}
}
httputils.WriteHTTP(w, http.StatusOK, response)
}

@ -19,8 +19,6 @@ import (
"strings"
"time"
"github.com/coreos/clair/api/logic"
"github.com/coreos/clair/api/wrappers"
"github.com/julienschmidt/httprouter"
)
@ -30,9 +28,9 @@ type VersionRouter map[string]*httprouter.Router
// NewVersionRouter instantiates a VersionRouter and every sub-routers that are
// necessary to handle supported API versions.
func NewVersionRouter(to time.Duration) *VersionRouter {
func NewVersionRouter(to time.Duration, env *Env) *VersionRouter {
return &VersionRouter{
"/v1": NewRouterV1(to),
"/v1": NewRouterV1(to, env),
}
}
@ -56,42 +54,38 @@ func (vs VersionRouter) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
// NewRouterV1 creates a new router for the API (Version 1)
func NewRouterV1(to time.Duration) *httprouter.Router {
func NewRouterV1(to time.Duration, env *Env) *httprouter.Router {
router := httprouter.New()
wrap := func(fn httprouter.Handle) httprouter.Handle {
return wrappers.Log(wrappers.TimeOut(to, fn))
// Create a wrapper that will wrap a Handle into a httprouter.Handle and that adds
// logging and time-out capabilities.
wrap := func(fn Handle, e *Env) httprouter.Handle {
return Logger(TimeOut(to, WrapHandle(fn, e)))
}
// General
router.GET("/versions", wrap(logic.GETVersions))
router.GET("/health", wrap(logic.GETHealth))
router.GET("/versions", wrap(GETVersions, env))
router.GET("/health", wrap(GETHealth, env))
// Layers
router.POST("/layers", wrap(logic.POSTLayers))
router.DELETE("/layers/:id", wrap(logic.DELETELayers))
router.GET("/layers/:id/os", wrap(logic.GETLayersOS))
router.GET("/layers/:id/parent", wrap(logic.GETLayersParent))
router.GET("/layers/:id/packages", wrap(logic.GETLayersPackages))
router.GET("/layers/:id/packages/diff", wrap(logic.GETLayersPackagesDiff))
router.GET("/layers/:id/vulnerabilities", wrap(logic.GETLayersVulnerabilities))
router.GET("/layers/:id/vulnerabilities/diff", wrap(logic.GETLayersVulnerabilitiesDiff))
// # Batch version of "/layers/:id/vulnerabilities"
router.POST("/batch/layers/vulnerabilities", wrap(logic.POSTBatchLayersVulnerabilities))
router.POST("/layers", wrap(POSTLayers, env))
router.DELETE("/layers/:id", wrap(DELETELayers, env))
router.GET("/layers/:id", wrap(GETLayers, env))
// Vulnerabilities
router.POST("/vulnerabilities", wrap(logic.POSTVulnerabilities))
router.PUT("/vulnerabilities/:id", wrap(logic.PUTVulnerabilities))
router.GET("/vulnerabilities/:id", wrap(logic.GETVulnerabilities))
router.DELETE("/vulnerabilities/:id", wrap(logic.DELVulnerabilities))
router.GET("/vulnerabilities/:id/introducing-layers", wrap(logic.GETVulnerabilitiesIntroducingLayers))
router.POST("/vulnerabilities/:id/affected-layers", wrap(logic.POSTVulnerabilitiesAffectedLayers))
// router.POST("/vulnerabilities", wrap(logic.POSTVulnerabilities))
// router.PUT("/vulnerabilities/:id", wrap(logic.PUTVulnerabilities))
// router.GET("/vulnerabilities/:id", wrap(logic.GETVulnerabilities))
// router.DELETE("/vulnerabilities/:id", wrap(logic.DELVulnerabilities))
// router.GET("/vulnerabilities/:id/introducing-layers", wrap(logic.GETVulnerabilitiesIntroducingLayers))
// router.POST("/vulnerabilities/:id/affected-layers", wrap(logic.POSTVulnerabilitiesAffectedLayers))
return router
}
// NewHealthRouter creates a new router that only serve the Health function on /
func NewHealthRouter() *httprouter.Router {
func NewHealthRouter(env *Env) *httprouter.Router {
router := httprouter.New()
router.GET("/", logic.GETHealth)
router.GET("/", WrapHandle(GETHealth, env))
return router
}

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package wrappers
package api
import (
"errors"
@ -61,7 +61,7 @@ func (tw *timeoutWriter) WriteHeader(status int) {
tw.ResponseWriter.WriteHeader(status)
}
// TimeOut wraps a http.HandlerFunc and ensure that a response is given under
// TimeOut wraps an Handler and ensure that a response is given under
// the specified duration.
//
// If the handler takes longer than the time limit, the wrapper responds with

@ -24,9 +24,7 @@ import (
"github.com/coreos/clair/api"
"github.com/coreos/clair/config"
"github.com/coreos/clair/database"
"github.com/coreos/clair/notifier"
"github.com/coreos/clair/updater"
"github.com/coreos/clair/database/pgsql"
"github.com/coreos/clair/utils"
"github.com/coreos/pkg/capnslog"
)
@ -40,25 +38,25 @@ func Boot(config *config.Config) {
st := utils.NewStopper()
// Open database
err := database.Open(config.Database)
db, err := pgsql.Open(config.Database)
if err != nil {
log.Fatal(err)
}
defer database.Close()
defer db.Close()
// Start notifier
st.Begin()
go notifier.Run(config.Notifier, st)
// st.Begin()
// go notifier.Run(config.Notifier, st)
// Start API
st.Begin()
go api.Run(config.API, st)
go api.Run(config.API, &api.Env{Datastore: db}, st)
st.Begin()
go api.RunHealth(config.API, st)
go api.RunHealth(config.API, &api.Env{Datastore: db}, st)
// Start updater
st.Begin()
go updater.Run(config.Updater, st)
// st.Begin()
// go updater.Run(config.Updater, st)
// Wait for interruption and shutdown gracefully.
waitForSignals(os.Interrupt)

@ -26,11 +26,12 @@ import (
"github.com/coreos/pkg/capnslog"
// Register components
_ "github.com/coreos/clair/notifier/notifiers"
_ "github.com/coreos/clair/updater/fetchers"
_ "github.com/coreos/clair/worker/detectors/data"
_ "github.com/coreos/clair/worker/detectors/os"
_ "github.com/coreos/clair/worker/detectors/packages"
_ "github.com/coreos/clair/worker/detectors/feature/dpkg"
_ "github.com/coreos/clair/worker/detectors/feature/rpm"
_ "github.com/coreos/clair/worker/detectors/namespace/aptsources"
_ "github.com/coreos/clair/worker/detectors/namespace/lsbrelease"
_ "github.com/coreos/clair/worker/detectors/namespace/osrelease"
_ "github.com/coreos/clair/worker/detectors/namespace/redhatrelease"
)
var log = capnslog.NewPackageLogger("github.com/coreos/clair/cmd/clair", "main")

@ -33,8 +33,8 @@ type Config struct {
// DatabaseConfig is the configuration used to specify how Clair connects
// to a database.
type DatabaseConfig struct {
Type string
Path string
Source string
CacheSize int
}
// UpdaterConfig is the configuration for the Updater service.
@ -59,7 +59,7 @@ type APIConfig struct {
// DefaultConfig is a configuration that can be used as a fallback value.
var DefaultConfig = Config{
Database: &DatabaseConfig{
Type: "memstore",
CacheSize: 16384,
},
Updater: &UpdaterConfig{
Interval: 1 * time.Hour,

@ -1,50 +1,8 @@
// Copyright 2015 clair authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package database implements every database models and the functions that
// manipulate them.
package database
import (
"errors"
"os"
"github.com/barakmich/glog"
"github.com/coreos/clair/config"
"github.com/coreos/clair/health"
"github.com/coreos/clair/utils"
"github.com/coreos/pkg/capnslog"
"github.com/google/cayley"
"github.com/google/cayley/graph"
"github.com/google/cayley/graph/path"
// Load all supported backends.
_ "github.com/google/cayley/graph/bolt"
_ "github.com/google/cayley/graph/leveldb"
_ "github.com/google/cayley/graph/memstore"
_ "github.com/google/cayley/graph/mongo"
_ "github.com/google/cayley/graph/sql"
)
const (
// fieldIs is the graph predicate defining the type of an entity.
fieldIs = "is"
)
import "errors"
var (
log = capnslog.NewPackageLogger("github.com/coreos/clair", "database")
// ErrTransaction is an error that occurs when a database transaction fails.
ErrTransaction = errors.New("database: transaction failed (concurrent modification?)")
// ErrBackendException is an error that occurs when the database backend does
@ -55,141 +13,32 @@ var (
ErrInconsistent = errors.New("database: inconsistent database")
// ErrCantOpen is an error that occurs when the database could not be opened
ErrCantOpen = errors.New("database: could not open database")
store *cayley.Handle
)
func init() {
health.RegisterHealthchecker("database", Healthcheck)
}
// Open opens a Cayley database, creating it if necessary and return its handle
func Open(config *config.DatabaseConfig) error {
if store != nil {
log.Errorf("could not open database at %s : a database is already opened", config.Path)
return ErrCantOpen
}
if config.Type != "memstore" && config.Path == "" {
log.Errorf("could not open database : no path provided.")
return ErrCantOpen
}
var err error
options := make(graph.Options)
type Datastore interface {
// Layer
InsertLayer(Layer) error
FindLayer(name string, withFeatures, withVulnerabilities bool) (layer Layer, err error)
DeleteLayer(name string) error
switch config.Type {
case "bolt", "leveldb":
if _, err := os.Stat(config.Path); os.IsNotExist(err) {
log.Infof("database at %s does not exist yet, creating it", config.Path)
// Vulnerability
// InsertVulnerabilities([]*Vulnerability)
// DeleteVulnerability(id string)
err = graph.InitQuadStore(config.Type, config.Path, options)
if err != nil && err != graph.ErrDatabaseExists {
log.Errorf("could not create database at %s : %s", config.Path, err)
return ErrCantOpen
}
}
case "sql":
// Replaces the PostgreSQL's slow COUNT query with a fast estimator.
// Ref: https://wiki.postgresql.org/wiki/Count_estimate
options["use_estimates"] = true
// Notifications
// InsertNotifications([]*Notification) error
// FindNotificationToSend() (*Notification, error)
// CountNotificationsToSend() (int, error)
// MarkNotificationAsSent(id string)
err := graph.InitQuadStore(config.Type, config.Path, options)
if err != nil && err != graph.ErrDatabaseExists {
log.Errorf("could not create database at %s : %s", config.Path, err)
return ErrCantOpen
}
}
store, err = cayley.NewGraph(config.Type, config.Path, options)
if err != nil {
log.Errorf("could not open database at %s : %s", config.Path, err)
return ErrCantOpen
}
return nil
}
// Key/Value
InsertKeyValue(key, value string) error
GetKeyValue(key string) (string, error)
// Close closes a Cayley database
func Close() {
if store != nil {
store.Close()
store = nil
}
}
// Healthcheck simply adds and then remove a quad in Cayley to ensure it is working
// It returns true when everything is ok
func Healthcheck() health.Status {
var err error
if store != nil {
t := cayley.NewTransaction()
q := cayley.Triple("cayley", "is", "healthy")
t.AddQuad(q)
t.RemoveQuad(q)
glog.SetStderrThreshold("FATAL") // TODO REMOVE ME
err = store.ApplyTransaction(t)
glog.SetStderrThreshold("ERROR") // TODO REMOVE ME
}
return health.Status{IsEssential: true, IsHealthy: err == nil, Details: nil}
}
// toValue returns a single value from a path
// If the path does not lead to a value, an empty string is returned
// If the path leads to multiple values or if a database error occurs, an empty string and an error are returned
func toValue(p *path.Path) (string, error) {
var value string
found := false
it, _ := p.BuildIterator().Optimize()
defer it.Close()
for cayley.RawNext(it) {
if found {
log.Error("failed query in toValue: used on an iterator containing multiple values")
return "", ErrInconsistent
}
if it.Result() != nil {
value = store.NameOf(it.Result())
found = true
}
}
if it.Err() != nil {
log.Errorf("failed query in toValue: %s", it.Err())
return "", ErrBackendException
}
return value, nil
}
// toValues returns multiple values from a path
// If the path does not lead to any value, an empty array is returned
// If a database error occurs, an empty array and an error are returned
func toValues(p *path.Path) ([]string, error) {
var values []string
it, _ := p.BuildIterator().Optimize()
defer it.Close()
for cayley.RawNext(it) {
if it.Result() != nil {
values = append(values, store.NameOf(it.Result()))
}
}
if it.Err() != nil {
log.Errorf("failed query in toValues: %s", it.Err())
return []string{}, ErrBackendException
}
return values, nil
}
// Lock
// Lock(name string, duration time.Duration, owner string) (bool, time.Time)
// Unlock(name, owner string)
// LockInfo(name string) (string, time.Time, error)
// saveFields appends cayley's Save method to a path for each field in
// selectedFields, except the ones that appears also in exceptFields
func saveFields(p *path.Path, selectedFields []string, exceptFields []string) {
for _, selectedField := range selectedFields {
if utils.Contains(selectedField, exceptFields) {
continue
}
p = p.Save(selectedField, selectedField)
}
Close()
}

@ -1,86 +0,0 @@
// Copyright 2015 clair authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"testing"
"github.com/coreos/clair/config"
"github.com/google/cayley"
"github.com/stretchr/testify/assert"
)
func TestHealthcheck(t *testing.T) {
Open(&config.DatabaseConfig{Type: "memstore"})
defer Close()
b := Healthcheck()
assert.True(t, b.IsHealthy, "Healthcheck failed")
}
func TestToValue(t *testing.T) {
Open(&config.DatabaseConfig{Type: "memstore"})
defer Close()
// toValue()
v, err := toValue(cayley.StartPath(store, "tests").Out("are"))
assert.Nil(t, err, "toValue should work even if the requested path leads to nothing")
assert.Equal(t, "", v, "toValue should return an empty string if the requested path leads to nothing")
store.AddQuad(cayley.Triple("tests", "are", "awesome"))
v, err = toValue(cayley.StartPath(store, "tests").Out("are"))
assert.Nil(t, err, "toValue should have worked")
assert.Equal(t, "awesome", v, "toValue did not return the expected value")
store.AddQuad(cayley.Triple("tests", "are", "running"))
v, err = toValue(cayley.StartPath(store, "tests").Out("are"))
assert.NotNil(t, err, "toValue should return an error and an empty string if the path leads to multiple values")
assert.Equal(t, "", v, "toValue should return an error and an empty string if the path leads to multiple values")
// toValues()
vs, err := toValues(cayley.StartPath(store, "CoreOS").Out(fieldIs))
assert.Nil(t, err, "toValues should work even if the requested path leads to nothing")
assert.Len(t, vs, 0, "toValue should return an empty array if the requested path leads to nothing")
words := []string{"powerful", "lightweight"}
for i, word := range words {
store.AddQuad(cayley.Triple("CoreOS", fieldIs, word))
v, err := toValues(cayley.StartPath(store, "CoreOS").Out(fieldIs))
assert.Nil(t, err, "toValues should have worked")
assert.Len(t, v, i+1, "toValues did not return the right amount of values")
for _, e := range words[:i+1] {
assert.Contains(t, v, e, "toValues did not return the values we expected")
}
}
// toValue(s)() and empty values
store.AddQuad(cayley.Triple("bob", "likes", ""))
v, err = toValue(cayley.StartPath(store, "bob").Out("likes"))
assert.Nil(t, err, "toValue should work even if the requested path leads to nothing")
assert.Equal(t, "", v, "toValue should return an empty string if the requested path leads to nothing")
store.AddQuad(cayley.Triple("bob", "likes", "running"))
v, err = toValue(cayley.StartPath(store, "bob").Out("likes"))
assert.NotNil(t, err, "toValue should return an error and an empty string if the path leads to multiple values")
assert.Equal(t, "", v, "toValue should return an error and an empty string if the path leads to multiple values")
store.AddQuad(cayley.Triple("bob", "likes", "swimming"))
va, err := toValues(cayley.StartPath(store, "bob").Out("likes"))
assert.Nil(t, err, "toValues should have worked")
if assert.Len(t, va, 3, "toValues should have returned 2 values") {
assert.Contains(t, va, "running")
assert.Contains(t, va, "swimming")
assert.Contains(t, va, "")
}
}

@ -1,63 +0,0 @@
// Copyright 2015 clair authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
cerrors "github.com/coreos/clair/utils/errors"
"github.com/google/cayley"
)
const (
fieldFlagValue = "value"
flagNodePrefix = "flag"
)
// UpdateFlag creates a flag or update an existing flag's value
func UpdateFlag(name, value string) error {
if name == "" || value == "" {
log.Warning("could not insert a flag which has an empty name or value")
return cerrors.NewBadRequestError("could not insert a flag which has an empty name or value")
}
// Initialize transaction
t := cayley.NewTransaction()
// Get current flag value
currentValue, err := GetFlagValue(name)
if err != nil {
return err
}
// Build transaction
name = flagNodePrefix + ":" + name
if currentValue != "" {
t.RemoveQuad(cayley.Triple(name, fieldFlagValue, currentValue))
}
t.AddQuad(cayley.Triple(name, fieldFlagValue, value))
// Apply transaction
if err = store.ApplyTransaction(t); err != nil {
log.Errorf("failed transaction (UpdateFlag): %s", err)
return ErrTransaction
}
// Return
return nil
}
// GetFlagValue returns the value of the flag given by its name (or an empty string if the flag does not exist)
func GetFlagValue(name string) (string, error) {
return toValue(cayley.StartPath(store, flagNodePrefix+":"+name).Out(fieldFlagValue))
}

@ -1,49 +0,0 @@
// Copyright 2015 clair authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"testing"
"github.com/coreos/clair/config"
"github.com/stretchr/testify/assert"
)
func TestFlag(t *testing.T) {
Open(&config.DatabaseConfig{Type: "memstore"})
defer Close()
// Get non existing flag
f, err := GetFlagValue("test")
assert.Nil(t, err, "GetFlagValue should have worked")
assert.Empty(t, "", f, "Getting a non-existing flag should return an empty string")
// Try to insert invalid flags
assert.Error(t, UpdateFlag("test", ""), "It should not accept a flag with an empty name or value")
assert.Error(t, UpdateFlag("", "test"), "It should not accept a flag with an empty name or value")
assert.Error(t, UpdateFlag("", ""), "It should not accept a flag with an empty name or value")
// Insert a flag and verify its value
assert.Nil(t, UpdateFlag("test", "test1"))
f, err = GetFlagValue("test")
assert.Nil(t, err, "GetFlagValue should have worked")
assert.Equal(t, "test1", f, "GetFlagValue did not return the expected value")
// Update a flag and verify its value
assert.Nil(t, UpdateFlag("test", "test2"))
f, err = GetFlagValue("test")
assert.Nil(t, err, "GetFlagValue should have worked")
assert.Equal(t, "test2", f, "GetFlagValue did not return the expected value")
}

@ -1,432 +0,0 @@
// Copyright 2015 clair authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"strconv"
"github.com/coreos/clair/utils"
cerrors "github.com/coreos/clair/utils/errors"
"github.com/google/cayley"
"github.com/google/cayley/graph"
"github.com/google/cayley/graph/path"
)
const (
FieldLayerID = "id"
FieldLayerParent = "parent"
FieldLayerSuccessors = "successors"
FieldLayerOS = "os"
FieldLayerEngineVersion = "engineVersion"
FieldLayerPackages = "adds/removes"
// These fields are not selectable and are for internal use only.
fieldLayerIsValue = "layer"
fieldLayerInstalledPackages = "adds"
fieldLayerRemovedPackages = "removes"
)
var FieldLayerAll = []string{FieldLayerID, FieldLayerParent, FieldLayerSuccessors, FieldLayerOS, FieldLayerPackages, FieldLayerEngineVersion}
// Layer represents an unique container layer
type Layer struct {
Node string `json:"-"`
ID string
ParentNode string `json:"-"`
SuccessorsNodes []string `json:"-"`
OS string
InstalledPackagesNodes []string `json:"-"`
RemovedPackagesNodes []string `json:"-"`
EngineVersion int
}
// GetNode returns the node name of a Layer
// Requires the key field: ID
func (l *Layer) GetNode() string {
return fieldLayerIsValue + ":" + utils.Hash(l.ID)
}
// InsertLayer insert a single layer in the database
//
// ID, and EngineVersion fields are required.
// ParentNode, OS, InstalledPackagesNodes and RemovedPackagesNodes are optional,
// SuccessorsNodes is unnecessary.
//
// The ID MUST be unique for two different layers.
//
//
// If the Layer already exists, nothing is done, except if the provided engine
// version is higher than the existing one, in which case, the OS,
// InstalledPackagesNodes and RemovedPackagesNodes fields will be replaced.
//
// The layer should only contains the newly installed/removed packages
// There is no safeguard that prevents from marking a package as newly installed
// while it has already been installed in one of its parent.
func InsertLayer(layer *Layer) error {
// Verify parameters
if layer.ID == "" {
log.Warning("could not insert a layer which has an empty ID")
return cerrors.NewBadRequestError("could not insert a layer which has an empty ID")
}
// Create required data structures
t := cayley.NewTransaction()
layer.Node = layer.GetNode()
// Try to find an existing layer
existingLayer, err := FindOneLayerByNode(layer.Node, FieldLayerAll)
if err != nil && err != cerrors.ErrNotFound {
return err
}
if existingLayer != nil && existingLayer.EngineVersion >= layer.EngineVersion {
// The layer exists and has an equal or higher engine verison, do nothing
return nil
}
if existingLayer == nil {
// Create case: add permanent nodes
t.AddQuad(cayley.Triple(layer.Node, fieldIs, fieldLayerIsValue))
t.AddQuad(cayley.Triple(layer.Node, FieldLayerID, layer.ID))
t.AddQuad(cayley.Triple(layer.Node, FieldLayerParent, layer.ParentNode))
} else {
// Update case: remove everything before we add updated data
t.RemoveQuad(cayley.Triple(layer.Node, FieldLayerOS, existingLayer.OS))
for _, pkg := range existingLayer.InstalledPackagesNodes {
t.RemoveQuad(cayley.Triple(layer.Node, fieldLayerInstalledPackages, pkg))
}
for _, pkg := range existingLayer.RemovedPackagesNodes {
t.RemoveQuad(cayley.Triple(layer.Node, fieldLayerRemovedPackages, pkg))
}
t.RemoveQuad(cayley.Triple(layer.Node, FieldLayerEngineVersion, strconv.Itoa(existingLayer.EngineVersion)))
}
// Add OS/Packages
t.AddQuad(cayley.Triple(layer.Node, FieldLayerOS, layer.OS))
for _, pkg := range layer.InstalledPackagesNodes {
t.AddQuad(cayley.Triple(layer.Node, fieldLayerInstalledPackages, pkg))
}
for _, pkg := range layer.RemovedPackagesNodes {
t.AddQuad(cayley.Triple(layer.Node, fieldLayerRemovedPackages, pkg))
}
t.AddQuad(cayley.Triple(layer.Node, FieldLayerEngineVersion, strconv.Itoa(layer.EngineVersion)))
// Apply transaction
if err = store.ApplyTransaction(t); err != nil {
log.Errorf("failed transaction (InsertLayer): %s", err)
return ErrTransaction
}
return nil
}
// DeleteLayer deletes the specified layer and any child layers that are
// dependent on the specified layer.
func DeleteLayer(ID string) error {
layer, err := FindOneLayerByID(ID, []string{})
if err != nil {
return err
}
return deleteLayerTreeFrom(layer.Node, nil)
}
func deleteLayerTreeFrom(node string, t *graph.Transaction) error {
// Determine if that function call is the root call of the recursivity
// And create transaction if its the case.
root := (t == nil)
if root {
t = cayley.NewTransaction()
}
// Find layer.
layer, err := FindOneLayerByNode(node, FieldLayerAll)
if err != nil {
// Ignore missing layer.
return nil
}
// Remove all successor layers.
for _, succNode := range layer.SuccessorsNodes {
deleteLayerTreeFrom(succNode, t)
}
// Remove layer.
t.RemoveQuad(cayley.Triple(layer.Node, fieldIs, fieldLayerIsValue))
t.RemoveQuad(cayley.Triple(layer.Node, FieldLayerID, layer.ID))
t.RemoveQuad(cayley.Triple(layer.Node, FieldLayerParent, layer.ParentNode))
t.RemoveQuad(cayley.Triple(layer.Node, FieldLayerOS, layer.OS))
t.RemoveQuad(cayley.Triple(layer.Node, FieldLayerEngineVersion, strconv.Itoa(layer.EngineVersion)))
for _, pkg := range layer.InstalledPackagesNodes {
t.RemoveQuad(cayley.Triple(layer.Node, fieldLayerInstalledPackages, pkg))
}
for _, pkg := range layer.RemovedPackagesNodes {
t.RemoveQuad(cayley.Triple(layer.Node, fieldLayerRemovedPackages, pkg))
}
// Apply transaction if root call.
if root {
if err = store.ApplyTransaction(t); err != nil {
log.Errorf("failed transaction (deleteLayerTreeFrom): %s", err)
return ErrTransaction
}
}
return nil
}
// FindOneLayerByID finds and returns a single layer having the given ID,
// selecting the specified fields and hardcoding its ID
func FindOneLayerByID(ID string, selectedFields []string) (*Layer, error) {
t := &Layer{ID: ID}
l, err := FindOneLayerByNode(t.GetNode(), selectedFields)
if err != nil {
return nil, err
}
l.ID = ID
return l, nil
}
// FindOneLayerByNode finds and returns a single package by its node, selecting the specified fields
func FindOneLayerByNode(node string, selectedFields []string) (*Layer, error) {
l, err := toLayers(cayley.StartPath(store, node).Has(fieldIs, fieldLayerIsValue), selectedFields)
if err != nil {
return nil, err
}
if len(l) == 1 {
return l[0], nil
}
if len(l) > 1 {
log.Errorf("found multiple layers with identical node [Node: %s]", node)
return nil, ErrInconsistent
}
return nil, cerrors.ErrNotFound
}
// FindAllLayersByAddedPackageNodes finds and returns all layers that add the
// given packages (by their nodes), selecting the specified fields
func FindAllLayersByAddedPackageNodes(nodes []string, selectedFields []string) ([]*Layer, error) {
layers, err := toLayers(cayley.StartPath(store, nodes...).In(fieldLayerInstalledPackages), selectedFields)
if err != nil {
return []*Layer{}, err
}
return layers, nil
}
// FindAllLayersByPackageNode finds and returns all layers that have the given package (by its node), selecting the specified fields
// func FindAllLayersByPackageNode(node string, only map[string]struct{}) ([]*Layer, error) {
// var layers []*Layer
//
// // We need the successors field
// if only != nil {
// only[FieldLayerSuccessors] = struct{}{}
// }
//
// // Get all the layers which remove the package
// layersNodesRemoving, err := toValues(cayley.StartPath(store, node).In(fieldLayerRemovedPackages).Has(fieldIs, fieldLayerIsValue))
// if err != nil {
// return []*Layer{}, err
// }
// layersNodesRemovingMap := make(map[string]struct{})
// for _, l := range layersNodesRemoving {
// layersNodesRemovingMap[l] = struct{}{}
// }
//
// layersToBrowse, err := toLayers(cayley.StartPath(store, node).In(fieldLayerInstalledPackages).Has(fieldIs, fieldLayerIsValue), only)
// if err != nil {
// return []*Layer{}, err
// }
// for len(layersToBrowse) > 0 {
// var newLayersToBrowse []*Layer
// for _, layerToBrowse := range layersToBrowse {
// if _, layerRemovesPackage := layersNodesRemovingMap[layerToBrowse.Node]; !layerRemovesPackage {
// layers = append(layers, layerToBrowse)
// successors, err := layerToBrowse.Successors(only)
// if err != nil {
// return []*Layer{}, err
// }
// newLayersToBrowse = append(newLayersToBrowse, successors...)
// }
// layersToBrowse = newLayersToBrowse
// }
// }
//
// return layers, nil
// }
// toLayers converts a path leading to one or multiple layers to Layer structs,
// selecting the specified fields
func toLayers(path *path.Path, selectedFields []string) ([]*Layer, error) {
var layers []*Layer
saveFields(path, selectedFields, []string{FieldLayerSuccessors, FieldLayerPackages, fieldLayerInstalledPackages, fieldLayerRemovedPackages})
it, _ := path.BuildIterator().Optimize()
defer it.Close()
for cayley.RawNext(it) {
tags := make(map[string]graph.Value)
it.TagResults(tags)
layer := Layer{Node: store.NameOf(it.Result())}
for _, selectedField := range selectedFields {
switch selectedField {
case FieldLayerID:
layer.ID = store.NameOf(tags[FieldLayerID])
case FieldLayerParent:
layer.ParentNode = store.NameOf(tags[FieldLayerParent])
case FieldLayerSuccessors:
var err error
layer.SuccessorsNodes, err = toValues(cayley.StartPath(store, layer.Node).In(FieldLayerParent))
if err != nil {
log.Errorf("could not get successors of layer %s: %s.", layer.Node, err.Error())
return nil, err
}
case FieldLayerOS:
layer.OS = store.NameOf(tags[FieldLayerOS])
case FieldLayerPackages:
var err error
it, _ := cayley.StartPath(store, layer.Node).OutWithTags([]string{"predicate"}, fieldLayerInstalledPackages, fieldLayerRemovedPackages).BuildIterator().Optimize()
defer it.Close()
for cayley.RawNext(it) {
tags := make(map[string]graph.Value)
it.TagResults(tags)
predicate := store.NameOf(tags["predicate"])
if predicate == fieldLayerInstalledPackages {
layer.InstalledPackagesNodes = append(layer.InstalledPackagesNodes, store.NameOf(it.Result()))
} else if predicate == fieldLayerRemovedPackages {
layer.RemovedPackagesNodes = append(layer.RemovedPackagesNodes, store.NameOf(it.Result()))
}
}
if it.Err() != nil {
log.Errorf("could not get installed/removed packages of layer %s: %s.", layer.Node, it.Err())
return nil, err
}
case FieldLayerEngineVersion:
layer.EngineVersion, _ = strconv.Atoi(store.NameOf(tags[FieldLayerEngineVersion]))
default:
panic("unknown selectedField")
}
}
layers = append(layers, &layer)
}
if it.Err() != nil {
log.Errorf("failed query in toLayers: %s", it.Err())
return []*Layer{}, ErrBackendException
}
return layers, nil
}
// Successors find and returns all layers that define l as their parent,
// selecting the specified fields
// It requires that FieldLayerSuccessors field has been selected on l
// func (l *Layer) Successors(selectedFields []string) ([]*Layer, error) {
// if len(l.SuccessorsNodes) == 0 {
// return []*Layer{}, nil
// }
//
// return toLayers(cayley.StartPath(store, l.SuccessorsNodes...), only)
// }
// Parent find and returns the parent layer of l, selecting the specified fields
// It requires that FieldLayerParent field has been selected on l
func (l *Layer) Parent(selectedFields []string) (*Layer, error) {
if l.ParentNode == "" {
return nil, nil
}
parent, err := toLayers(cayley.StartPath(store, l.ParentNode), selectedFields)
if err != nil {
return nil, err
}
if len(parent) == 1 {
return parent[0], nil
}
if len(parent) > 1 {
log.Errorf("found multiple layers when getting parent layer of layer %s", l.ParentNode)
return nil, ErrInconsistent
}
return nil, nil
}
// Sublayers find and returns all layers that compose l, selecting the specified
// fields
// It requires that FieldLayerParent field has been selected on l
// The base image comes first, and l is last
// func (l *Layer) Sublayers(selectedFields []string) ([]*Layer, error) {
// var sublayers []*Layer
//
// // We need the parent field
// if only != nil {
// only[FieldLayerParent] = struct{}{}
// }
//
// parent, err := l.Parent(only)
// if err != nil {
// return []*Layer{}, err
// }
// if parent != nil {
// parentSublayers, err := parent.Sublayers(only)
// if err != nil {
// return []*Layer{}, err
// }
// sublayers = append(sublayers, parentSublayers...)
// }
//
// sublayers = append(sublayers, l)
//
// return sublayers, nil
// }
// AllPackages computes the full list of packages that l has and return them as
// nodes.
// It requires that FieldLayerParent, FieldLayerContentInstalledPackages,
// FieldLayerContentRemovedPackages fields has been selected on l
func (l *Layer) AllPackages() ([]string, error) {
var allPackages []string
parent, err := l.Parent([]string{FieldLayerParent, FieldLayerPackages})
if err != nil {
return []string{}, err
}
if parent != nil {
allPackages, err = parent.AllPackages()
if err != nil {
return []string{}, err
}
}
return append(utils.CompareStringLists(allPackages, l.RemovedPackagesNodes), l.InstalledPackagesNodes...), nil
}
// OperatingSystem tries to find the Operating System of a layer using its
// parents.
// It requires that FieldLayerParent and FieldLayerOS fields has been
// selected on l
func (l *Layer) OperatingSystem() (string, error) {
if l.OS != "" {
return l.OS, nil
}
// Try from the parent
parent, err := l.Parent([]string{FieldLayerParent, FieldLayerOS})
if err != nil {
return "", err
}
if parent != nil {
return parent.OperatingSystem()
}
return "", nil
}

@ -1,178 +0,0 @@
// Copyright 2015 clair authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"testing"
"github.com/coreos/clair/config"
"github.com/coreos/clair/utils"
cerrors "github.com/coreos/clair/utils/errors"
"github.com/stretchr/testify/assert"
)
// TestInvalidLayers tries to insert invalid layers
func TestInvalidLayers(t *testing.T) {
Open(&config.DatabaseConfig{Type: "memstore"})
defer Close()
assert.Error(t, InsertLayer(&Layer{ID: ""})) // No ID
}
// TestLayerSimple inserts a single layer and ensures it can be retrieved and
// that methods works
func TestLayerSimple(t *testing.T) {
Open(&config.DatabaseConfig{Type: "memstore"})
defer Close()
// Insert a layer and find it back
l1 := &Layer{ID: "l1", OS: "os1", InstalledPackagesNodes: []string{"p1", "p2"}, EngineVersion: 1}
if assert.Nil(t, InsertLayer(l1)) {
fl1, err := FindOneLayerByID(l1.ID, FieldLayerAll)
if assert.Nil(t, err) && assert.NotNil(t, fl1) {
// Saved = found
assert.True(t, layerEqual(l1, fl1), "layers are not equal, expected %v, have %s", l1, fl1)
// No parent
p, err := fl1.Parent(FieldLayerAll)
assert.Nil(t, err)
assert.Nil(t, p)
// AllPackages()
pk, err := fl1.AllPackages()
assert.Nil(t, err)
if assert.Len(t, pk, 2) {
assert.Contains(t, pk, l1.InstalledPackagesNodes[0])
assert.Contains(t, pk, l1.InstalledPackagesNodes[1])
}
// OS()
o, err := fl1.OperatingSystem()
assert.Nil(t, err)
assert.Equal(t, l1.OS, o)
}
// FindAllLayersByAddedPackageNodes
al1, err := FindAllLayersByAddedPackageNodes([]string{"p1", "p3"}, FieldLayerAll)
if assert.Nil(t, err) && assert.Len(t, al1, 1) {
assert.Equal(t, al1[0].Node, l1.Node)
}
// Delete
if assert.Nil(t, DeleteLayer(l1.ID)) {
_, err := FindOneLayerByID(l1.ID, FieldLayerAll)
assert.Equal(t, cerrors.ErrNotFound, err)
}
}
}
// TestLayerTree inserts a tree of layers and ensure that the tree lgoic works
func TestLayerTree(t *testing.T) {
Open(&config.DatabaseConfig{Type: "memstore"})
defer Close()
var layers []*Layer
layers = append(layers, &Layer{ID: "l1"})
layers = append(layers, &Layer{ID: "l2", ParentNode: layers[0].GetNode(), OS: "os2", InstalledPackagesNodes: []string{"p1", "p2"}})
layers = append(layers, &Layer{ID: "l3", ParentNode: layers[1].GetNode()}) // Repeat an empty layer archive (l1)
layers = append(layers, &Layer{ID: "l4a", ParentNode: layers[2].GetNode(), InstalledPackagesNodes: []string{"p3"}, RemovedPackagesNodes: []string{"p1", "p4"}}) // p4 does not exists and thu can't actually be removed
layers = append(layers, &Layer{ID: "l4b", ParentNode: layers[2].GetNode(), InstalledPackagesNodes: []string{}, RemovedPackagesNodes: []string{"p2", "p1"}})
var flayers []*Layer
ok := true
for _, l := range layers {
ok = ok && assert.Nil(t, InsertLayer(l))
fl, err := FindOneLayerByID(l.ID, FieldLayerAll)
ok = ok && assert.Nil(t, err)
ok = ok && assert.NotNil(t, fl)
flayers = append(flayers, fl)
}
if assert.True(t, ok) {
// Start testing
// l4a
// Parent()
fl4ap, err := flayers[3].Parent(FieldLayerAll)
assert.Nil(t, err, "l4a should has l3 as parent")
if assert.NotNil(t, fl4ap, "l4a should has l3 as parent") {
assert.Equal(t, "l3", fl4ap.ID, "l4a should has l3 as parent")
}
// OS()
fl4ao, err := flayers[3].OperatingSystem()
assert.Nil(t, err, "l4a should inherits its OS from l2")
assert.Equal(t, "os2", fl4ao, "l4a should inherits its OS from l2")
// AllPackages()
fl4apkg, err := flayers[3].AllPackages()
assert.Nil(t, err)
if assert.Len(t, fl4apkg, 2) {
assert.Contains(t, fl4apkg, "p2")
assert.Contains(t, fl4apkg, "p3")
}
// l4b
// AllPackages()
fl4bpkg, err := flayers[4].AllPackages()
assert.Nil(t, err)
assert.Len(t, fl4bpkg, 0)
// Delete a layer in the middle of the tree.
if assert.Nil(t, DeleteLayer(flayers[1].ID)) {
for _, l := range layers[1:] {
_, err := FindOneLayerByID(l.ID, FieldLayerAll)
assert.Equal(t, cerrors.ErrNotFound, err)
}
}
}
}
func TestLayerUpdate(t *testing.T) {
Open(&config.DatabaseConfig{Type: "memstore"})
defer Close()
l1 := &Layer{ID: "l1", OS: "os1", InstalledPackagesNodes: []string{"p1", "p2"}, RemovedPackagesNodes: []string{"p3", "p4"}, EngineVersion: 1}
if assert.Nil(t, InsertLayer(l1)) {
// Do not update layer content if the engine versions are equals
l1b := &Layer{ID: "l1", OS: "os2", InstalledPackagesNodes: []string{"p1"}, RemovedPackagesNodes: []string{""}, EngineVersion: 1}
if assert.Nil(t, InsertLayer(l1b)) {
fl1b, err := FindOneLayerByID(l1.ID, FieldLayerAll)
if assert.Nil(t, err) && assert.NotNil(t, fl1b) {
assert.True(t, layerEqual(l1, fl1b), "layer contents are not equal, expected %v, have %s", l1, fl1b)
}
}
// Update the layer content with new data and a higher engine version
l1c := &Layer{ID: "l1", OS: "os2", InstalledPackagesNodes: []string{"p1", "p5"}, RemovedPackagesNodes: []string{"p6", "p7"}, EngineVersion: 2}
if assert.Nil(t, InsertLayer(l1c)) {
fl1c, err := FindOneLayerByID(l1c.ID, FieldLayerAll)
if assert.Nil(t, err) && assert.NotNil(t, fl1c) {
assert.True(t, layerEqual(l1c, fl1c), "layer contents are not equal, expected %v, have %s", l1c, fl1c)
}
}
}
}
func layerEqual(expected, actual *Layer) bool {
eq := true
eq = eq && expected.Node == actual.Node
eq = eq && expected.ID == actual.ID
eq = eq && expected.ParentNode == actual.ParentNode
eq = eq && expected.OS == actual.OS
eq = eq && expected.EngineVersion == actual.EngineVersion
eq = eq && len(utils.CompareStringLists(actual.SuccessorsNodes, expected.SuccessorsNodes)) == 0 && len(utils.CompareStringLists(expected.SuccessorsNodes, actual.SuccessorsNodes)) == 0
eq = eq && len(utils.CompareStringLists(actual.RemovedPackagesNodes, expected.RemovedPackagesNodes)) == 0 && len(utils.CompareStringLists(expected.RemovedPackagesNodes, actual.RemovedPackagesNodes)) == 0
eq = eq && len(utils.CompareStringLists(actual.InstalledPackagesNodes, expected.InstalledPackagesNodes)) == 0 && len(utils.CompareStringLists(expected.InstalledPackagesNodes, actual.InstalledPackagesNodes)) == 0
return eq
}

@ -1,163 +0,0 @@
// Copyright 2015 clair authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"strconv"
"time"
"github.com/barakmich/glog"
cerrors "github.com/coreos/clair/utils/errors"
"github.com/google/cayley"
"github.com/google/cayley/graph"
"github.com/google/cayley/graph/path"
)
const (
fieldLockLocked = "locked"
fieldLockLockedValue = "locked"
fieldLockLockedBy = "locked_by"
fieldLockLockedUntil = "locked_until"
)
// Lock tries to set a temporary lock in the database.
// If a lock already exists with the given name/owner, then the lock is renewed
//
// Lock does not block, instead, it returns true and its expiration time
// is the lock has been successfully acquired or false otherwise
func Lock(name string, duration time.Duration, owner string) (bool, time.Time) {
pruneLocks()
until := time.Now().Add(duration)
untilString := strconv.FormatInt(until.Unix(), 10)
// Try to get the expiration time of a lock with the same name/owner
currentExpiration, err := toValue(cayley.StartPath(store, name).Has(fieldLockLockedBy, owner).Out(fieldLockLockedUntil))
if err == nil && currentExpiration != "" {
// Renew our lock
if currentExpiration == untilString {
return true, until
}
t := cayley.NewTransaction()
t.RemoveQuad(cayley.Triple(name, fieldLockLockedUntil, currentExpiration))
t.AddQuad(cayley.Triple(name, fieldLockLockedUntil, untilString))
// It is not necessary to verify if the lock is ours again in the transaction
// because if someone took it, the lock's current expiration probably changed and the transaction will fail
return store.ApplyTransaction(t) == nil, until
}
t := cayley.NewTransaction()
t.AddQuad(cayley.Triple(name, fieldLockLocked, fieldLockLockedValue)) // Necessary to make the transaction fails if the lock already exists (and has not been pruned)
t.AddQuad(cayley.Triple(name, fieldLockLockedUntil, untilString))
t.AddQuad(cayley.Triple(name, fieldLockLockedBy, owner))
glog.SetStderrThreshold("FATAL")
success := store.ApplyTransaction(t) == nil
glog.SetStderrThreshold("ERROR")
return success, until
}
// Unlock unlocks a lock specified by its name if I own it
func Unlock(name, owner string) {
unlocked := 0
it, _ := cayley.StartPath(store, name).Has(fieldLockLocked, fieldLockLockedValue).Has(fieldLockLockedBy, owner).Save(fieldLockLockedUntil, fieldLockLockedUntil).BuildIterator().Optimize()
defer it.Close()
for cayley.RawNext(it) {
tags := make(map[string]graph.Value)
it.TagResults(tags)
t := cayley.NewTransaction()
t.RemoveQuad(cayley.Triple(name, fieldLockLocked, fieldLockLockedValue))
t.RemoveQuad(cayley.Triple(name, fieldLockLockedUntil, store.NameOf(tags[fieldLockLockedUntil])))
t.RemoveQuad(cayley.Triple(name, fieldLockLockedBy, owner))
err := store.ApplyTransaction(t)
if err != nil {
log.Errorf("failed transaction (Unlock): %s", err)
}
unlocked++
}
if it.Err() != nil {
log.Errorf("failed query in Unlock: %s", it.Err())
}
if unlocked > 1 {
// We should never see this, it would mean that our database doesn't ensure quad uniqueness
// and that the entire lock system is jeopardized.
log.Errorf("found inconsistency in Unlock: matched %d times a locked named: %s", unlocked, name)
}
}
// LockInfo returns the owner of a lock specified by its name and its
// expiration time
func LockInfo(name string) (string, time.Time, error) {
it, _ := cayley.StartPath(store, name).Has(fieldLockLocked, fieldLockLockedValue).Save(fieldLockLockedUntil, fieldLockLockedUntil).Save(fieldLockLockedBy, fieldLockLockedBy).BuildIterator().Optimize()
defer it.Close()
for cayley.RawNext(it) {
tags := make(map[string]graph.Value)
it.TagResults(tags)
tt, _ := strconv.ParseInt(store.NameOf(tags[fieldLockLockedUntil]), 10, 64)
return store.NameOf(tags[fieldLockLockedBy]), time.Unix(tt, 0), nil
}
if it.Err() != nil {
log.Errorf("failed query in LockInfo: %s", it.Err())
return "", time.Time{}, ErrBackendException
}
return "", time.Time{}, cerrors.ErrNotFound
}
// pruneLocks removes every expired locks from the database
func pruneLocks() {
now := time.Now()
// Delete every expired locks
it, _ := cayley.StartPath(store, "locked").In("locked").Save(fieldLockLockedUntil, fieldLockLockedUntil).Save(fieldLockLockedBy, fieldLockLockedBy).BuildIterator().Optimize()
defer it.Close()
for cayley.RawNext(it) {
tags := make(map[string]graph.Value)
it.TagResults(tags)
n := store.NameOf(it.Result())
t := store.NameOf(tags[fieldLockLockedUntil])
o := store.NameOf(tags[fieldLockLockedBy])
tt, _ := strconv.ParseInt(t, 10, 64)
if now.Unix() > tt {
log.Debugf("lock %s owned by %s has expired.", n, o)
tr := cayley.NewTransaction()
tr.RemoveQuad(cayley.Triple(n, fieldLockLocked, fieldLockLockedValue))
tr.RemoveQuad(cayley.Triple(n, fieldLockLockedUntil, t))
tr.RemoveQuad(cayley.Triple(n, fieldLockLockedBy, o))
err := store.ApplyTransaction(tr)
if err != nil {
log.Errorf("failed transaction (pruneLocks): %s", err)
continue
}
log.Debugf("lock %s has been successfully pruned.", n)
}
}
if it.Err() != nil {
log.Errorf("failed query in Unlock: %s", it.Err())
}
}
// getLockedNodes returns every nodes that are currently locked
func getLockedNodes() *path.Path {
return cayley.StartPath(store, "locked").In("locked")
}

@ -1,57 +0,0 @@
// Copyright 2015 clair authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"testing"
"time"
"github.com/coreos/clair/config"
"github.com/stretchr/testify/assert"
)
func TestLock(t *testing.T) {
Open(&config.DatabaseConfig{Type: "memstore"})
defer Close()
var l bool
var et time.Time
// Create a first lock
l, _ = Lock("test1", time.Minute, "owner1")
assert.True(t, l)
// Try to lock the same lock with another owner
l, _ = Lock("test1", time.Minute, "owner2")
assert.False(t, l)
// Renew the lock
l, _ = Lock("test1", 2*time.Minute, "owner1")
assert.True(t, l)
// Unlock and then relock by someone else
Unlock("test1", "owner1")
l, et = Lock("test1", time.Minute, "owner2")
assert.True(t, l)
// LockInfo
o, et2, err := LockInfo("test1")
assert.Nil(t, err)
assert.Equal(t, "owner2", o)
assert.Equal(t, et.Second(), et2.Second())
// Create a second lock which is actually already expired ...
l, _ = Lock("test2", -time.Minute, "owner1")
assert.True(t, l)
// Take over the lock
l, _ = Lock("test2", time.Minute, "owner2")
assert.True(t, l)
}

@ -0,0 +1,55 @@
package database
import "github.com/coreos/clair/utils/types"
type Model struct {
ID int
}
type Layer struct {
Model
Name string
EngineVersion int
Parent *Layer
Namespace *Namespace
Features []FeatureVersion
}
type Namespace struct {
Model
Name string
}
type Feature struct {
Model
Name string
Namespace Namespace
// FixedBy map[types.Version]Vulnerability // <<-- WRONG.
}
type FeatureVersion struct {
Model
Feature Feature
Version types.Version
AffectedBy []Vulnerability
}
type Vulnerability struct {
Model
Name string
Namespace Namespace
Description string
Link string
Severity types.Priority
// FixedIn map[types.Version]Feature // <<-- WRONG.
Affects []FeatureVersion
// For output purposes. Only make sense when the vulnerability
// is already about a specific Feature/FeatureVersion.
FixedBy types.Version
}

@ -15,7 +15,6 @@
package database
// DebianReleasesMapping translates Debian code names and class names to version numbers
// TODO That should probably be stored in the database or in a file
var DebianReleasesMapping = map[string]string{
// Code names
"squeeze": "6",
@ -32,7 +31,6 @@ var DebianReleasesMapping = map[string]string{
}
// UbuntuReleasesMapping translates Ubuntu code names to version numbers
// TODO That should probably be stored in the database or in a file
var UbuntuReleasesMapping = map[string]string{
"precise": "12.04",
"quantal": "12.10",

@ -1,409 +0,0 @@
// Copyright 2015 clair authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"encoding/json"
"strconv"
"github.com/coreos/clair/utils"
cerrors "github.com/coreos/clair/utils/errors"
"github.com/coreos/clair/utils/types"
"github.com/google/cayley"
"github.com/google/cayley/graph"
"github.com/pborman/uuid"
)
const (
// maxNotifications is the number of notifications that InsertNotifications
// will accept at the same time. Above this number, notifications are ignored.
maxNotifications = 100
fieldNotificationIsValue = "notification"
fieldNotificationType = "type"
fieldNotificationData = "data"
fieldNotificationIsSent = "isSent"
)
// A Notification defines an interface to a message that can be sent by a
// notifier.Notifier.
// A NotificationWrapper has to be used to convert it into a NotificationWrap,
// which can be stored in the database.
type Notification interface {
// GetName returns the explicit (humanly meaningful) name of a notification.
GetName() string
// GetType returns the type of a notification, which is used by a
// NotificationWrapper to determine the concrete type of a Notification.
GetType() string
// GetContent returns the content of the notification.
GetContent() (interface{}, error)
}
// NotificationWrapper is an interface defined how to convert a Notification to
// a NotificationWrap object and vice-versa.
type NotificationWrapper interface {
// Wrap packs a Notification instance into a new NotificationWrap.
Wrap(n Notification) (*NotificationWrap, error)
// Unwrap unpacks an instance of NotificationWrap into a new Notification.
Unwrap(nw *NotificationWrap) (Notification, error)
}
// A NotificationWrap wraps a Notification into something that can be stored in
// the database. A NotificationWrapper has to be used to convert it into a
// Notification.
type NotificationWrap struct {
Type string
Data string
}
// DefaultWrapper is an implementation of NotificationWrapper that supports
// NewVulnerabilityNotification notifications.
type DefaultWrapper struct{}
func (w *DefaultWrapper) Wrap(n Notification) (*NotificationWrap, error) {
data, err := json.Marshal(n)
if err != nil {
log.Warningf("could not marshal notification [ID: %s, Type: %s]: %s", n.GetName(), n.GetType(), err)
return nil, cerrors.NewBadRequestError("could not marshal notification with DefaultWrapper")
}
return &NotificationWrap{Type: n.GetType(), Data: string(data)}, nil
}
func (w *DefaultWrapper) Unwrap(nw *NotificationWrap) (Notification, error) {
var v Notification
// Create struct depending on the type
switch nw.Type {
case "NewVulnerabilityNotification":
v = &NewVulnerabilityNotification{}
case "VulnerabilityPriorityIncreasedNotification":
v = &VulnerabilityPriorityIncreasedNotification{}
case "VulnerabilityPackageChangedNotification":
v = &VulnerabilityPackageChangedNotification{}
default:
log.Warningf("could not unwrap notification [Type: %s]: unknown type for DefaultWrapper", nw.Type)
return nil, cerrors.NewBadRequestError("could not unwrap notification")
}
// Unmarshal notification
err := json.Unmarshal([]byte(nw.Data), v)
if err != nil {
log.Warningf("could not unmarshal notification with DefaultWrapper [Type: %s]: %s", nw.Type, err)
return nil, cerrors.NewBadRequestError("could not unmarshal notification")
}
return v, nil
}
// GetDefaultNotificationWrapper returns the default wrapper
func GetDefaultNotificationWrapper() NotificationWrapper {
return &DefaultWrapper{}
}
// A NewVulnerabilityNotification is a notification that informs about a new
// vulnerability and contains all the layers that introduce that vulnerability
type NewVulnerabilityNotification struct {
VulnerabilityID string
}
func (n *NewVulnerabilityNotification) GetName() string {
return n.VulnerabilityID
}
func (n *NewVulnerabilityNotification) GetType() string {
return "NewVulnerabilityNotification"
}
func (n *NewVulnerabilityNotification) GetContent() (interface{}, error) {
// This notification is about a new vulnerability
// Returns the list of layers that introduce this vulnerability
// Find vulnerability.
vulnerability, err := FindOneVulnerability(n.VulnerabilityID, []string{FieldVulnerabilityID, FieldVulnerabilityLink, FieldVulnerabilityPriority, FieldVulnerabilityDescription, FieldVulnerabilityFixedIn})
if err != nil {
return []byte{}, err
}
abstractVulnerability, err := vulnerability.ToAbstractVulnerability()
if err != nil {
return []byte{}, err
}
layers, err := FindAllLayersIntroducingVulnerability(n.VulnerabilityID, []string{FieldLayerID})
if err != nil {
return []byte{}, err
}
layersIDs := []string{} // empty slice, not null
for _, l := range layers {
layersIDs = append(layersIDs, l.ID)
}
return struct {
Vulnerability *AbstractVulnerability
IntroducingLayersIDs []string
}{
Vulnerability: abstractVulnerability,
IntroducingLayersIDs: layersIDs,
}, nil
}
// A VulnerabilityPriorityIncreasedNotification is a notification that informs
// about the fact that the priority of a vulnerability increased
// vulnerability and contains all the layers that introduce that vulnerability.
type VulnerabilityPriorityIncreasedNotification struct {
VulnerabilityID string
OldPriority, NewPriority types.Priority
}
func (n *VulnerabilityPriorityIncreasedNotification) GetName() string {
return n.VulnerabilityID
}
func (n *VulnerabilityPriorityIncreasedNotification) GetType() string {
return "VulnerabilityPriorityIncreasedNotification"
}
func (n *VulnerabilityPriorityIncreasedNotification) GetContent() (interface{}, error) {
// Returns the list of layers that introduce this vulnerability
// And both the old and new priorities
// Find vulnerability.
vulnerability, err := FindOneVulnerability(n.VulnerabilityID, []string{FieldVulnerabilityID, FieldVulnerabilityLink, FieldVulnerabilityPriority, FieldVulnerabilityDescription, FieldVulnerabilityFixedIn})
if err != nil {
return []byte{}, err
}
abstractVulnerability, err := vulnerability.ToAbstractVulnerability()
if err != nil {
return []byte{}, err
}
layers, err := FindAllLayersIntroducingVulnerability(n.VulnerabilityID, []string{FieldLayerID})
if err != nil {
return []byte{}, err
}
layersIDs := []string{} // empty slice, not null
for _, l := range layers {
layersIDs = append(layersIDs, l.ID)
}
return struct {
Vulnerability *AbstractVulnerability
OldPriority, NewPriority types.Priority
IntroducingLayersIDs []string
}{
Vulnerability: abstractVulnerability,
OldPriority: n.OldPriority,
NewPriority: n.NewPriority,
IntroducingLayersIDs: layersIDs,
}, nil
}
// A VulnerabilityPackageChangedNotification is a notification that informs that
// an existing vulnerability's fixed package list has been updated and may not
// affect some layers anymore or may affect new layers.
type VulnerabilityPackageChangedNotification struct {
VulnerabilityID string
AddedFixedInNodes, RemovedFixedInNodes []string
}
func (n *VulnerabilityPackageChangedNotification) GetName() string {
return n.VulnerabilityID
}
func (n *VulnerabilityPackageChangedNotification) GetType() string {
return "VulnerabilityPackageChangedNotification"
}
func (n *VulnerabilityPackageChangedNotification) GetContent() (interface{}, error) {
// Returns the removed and added packages as well as the layers that
// introduced the vulnerability in the past but don't anymore because of the
// removed packages and the layers that now introduce the vulnerability
// because of the added packages
// Find vulnerability.
vulnerability, err := FindOneVulnerability(n.VulnerabilityID, []string{FieldVulnerabilityID, FieldVulnerabilityLink, FieldVulnerabilityPriority, FieldVulnerabilityDescription, FieldVulnerabilityFixedIn})
if err != nil {
return []byte{}, err
}
abstractVulnerability, err := vulnerability.ToAbstractVulnerability()
if err != nil {
return []byte{}, err
}
// First part of the answer : added/removed packages
addedPackages, err := FindAllPackagesByNodes(n.AddedFixedInNodes, []string{FieldPackageOS, FieldPackageName, FieldPackageVersion, FieldPackagePreviousVersion})
if err != nil {
return []byte{}, err
}
removedPackages, err := FindAllPackagesByNodes(n.RemovedFixedInNodes, []string{FieldPackageOS, FieldPackageName, FieldPackageVersion, FieldPackagePreviousVersion})
if err != nil {
return []byte{}, err
}
// Second part of the answer
var addedPackagesPreviousVersions []string
for _, pkg := range addedPackages {
previousVersions, err := pkg.PreviousVersions([]string{})
if err != nil {
return []*Layer{}, err
}
for _, version := range previousVersions {
addedPackagesPreviousVersions = append(addedPackagesPreviousVersions, version.Node)
}
}
var removedPackagesPreviousVersions []string
for _, pkg := range removedPackages {
previousVersions, err := pkg.PreviousVersions([]string{})
if err != nil {
return []*Layer{}, err
}
for _, version := range previousVersions {
removedPackagesPreviousVersions = append(removedPackagesPreviousVersions, version.Node)
}
}
newIntroducingLayers, err := FindAllLayersByAddedPackageNodes(addedPackagesPreviousVersions, []string{FieldLayerID})
if err != nil {
return []byte{}, err
}
formerIntroducingLayers, err := FindAllLayersByAddedPackageNodes(removedPackagesPreviousVersions, []string{FieldLayerID})
if err != nil {
return []byte{}, err
}
newIntroducingLayersIDs := []string{} // empty slice, not null
for _, l := range newIntroducingLayers {
newIntroducingLayersIDs = append(newIntroducingLayersIDs, l.ID)
}
formerIntroducingLayersIDs := []string{} // empty slice, not null
for _, l := range formerIntroducingLayers {
formerIntroducingLayersIDs = append(formerIntroducingLayersIDs, l.ID)
}
// Remove layers which appears both in new and former lists (eg. case of updated packages but still vulnerable)
filteredNewIntroducingLayersIDs := utils.CompareStringLists(newIntroducingLayersIDs, formerIntroducingLayersIDs)
filteredFormerIntroducingLayersIDs := utils.CompareStringLists(formerIntroducingLayersIDs, newIntroducingLayersIDs)
return struct {
Vulnerability *AbstractVulnerability
AddedAffectedPackages, RemovedAffectedPackages []*AbstractPackage
NewIntroducingLayersIDs, FormerIntroducingLayerIDs []string
}{
Vulnerability: abstractVulnerability,
AddedAffectedPackages: PackagesToAbstractPackages(addedPackages),
RemovedAffectedPackages: PackagesToAbstractPackages(removedPackages),
NewIntroducingLayersIDs: filteredNewIntroducingLayersIDs,
FormerIntroducingLayerIDs: filteredFormerIntroducingLayersIDs,
}, nil
}
// InsertNotifications stores multiple Notification in the database
// It uses the given NotificationWrapper to convert these notifications to
// something that can be stored in the database.
func InsertNotifications(notifications []Notification, wrapper NotificationWrapper) error {
if len(notifications) == 0 {
return nil
}
// Do not send notifications if there are too many of them (first update for example)
if len(notifications) > maxNotifications {
log.Noticef("Ignoring %d notifications", len(notifications))
return nil
}
// Initialize transaction
t := cayley.NewTransaction()
// Iterate over all the vulnerabilities we need to insert
for _, notification := range notifications {
// Wrap notification
wrappedNotification, err := wrapper.Wrap(notification)
if err != nil {
return err
}
node := fieldNotificationIsValue + ":" + uuid.New()
t.AddQuad(cayley.Triple(node, fieldIs, fieldNotificationIsValue))
t.AddQuad(cayley.Triple(node, fieldNotificationType, wrappedNotification.Type))
t.AddQuad(cayley.Triple(node, fieldNotificationData, wrappedNotification.Data))
t.AddQuad(cayley.Triple(node, fieldNotificationIsSent, strconv.FormatBool(false)))
}
// Apply transaction
if err := store.ApplyTransaction(t); err != nil {
log.Errorf("failed transaction (InsertNotifications): %s", err)
return ErrTransaction
}
return nil
}
// FindOneNotificationToSend finds and returns a notification that is not sent
// yet and not locked. Returns nil if there is none.
func FindOneNotificationToSend(wrapper NotificationWrapper) (string, Notification, error) {
it, _ := cayley.StartPath(store, fieldNotificationIsValue).In(fieldIs).Has(fieldNotificationIsSent, strconv.FormatBool(false)).Except(getLockedNodes()).Save(fieldNotificationType, fieldNotificationType).Save(fieldNotificationData, fieldNotificationData).BuildIterator().Optimize()
defer it.Close()
for cayley.RawNext(it) {
tags := make(map[string]graph.Value)
it.TagResults(tags)
notification, err := wrapper.Unwrap(&NotificationWrap{Type: store.NameOf(tags[fieldNotificationType]), Data: store.NameOf(tags[fieldNotificationData])})
if err != nil {
return "", nil, err
}
return store.NameOf(it.Result()), notification, nil
}
if it.Err() != nil {
log.Errorf("failed query in FindOneNotificationToSend: %s", it.Err())
return "", nil, ErrBackendException
}
return "", nil, nil
}
// CountNotificationsToSend returns the number of pending notifications
// Note that it also count the locked notifications.
func CountNotificationsToSend() (int, error) {
c := 0
it, _ := cayley.StartPath(store, fieldNotificationIsValue).In(fieldIs).Has(fieldNotificationIsSent, strconv.FormatBool(false)).BuildIterator().Optimize()
defer it.Close()
for cayley.RawNext(it) {
c = c + 1
}
if it.Err() != nil {
log.Errorf("failed query in CountNotificationsToSend: %s", it.Err())
return 0, ErrBackendException
}
return c, nil
}
// MarkNotificationAsSent marks a notification as sent.
func MarkNotificationAsSent(node string) {
// Initialize transaction
t := cayley.NewTransaction()
t.RemoveQuad(cayley.Triple(node, fieldNotificationIsSent, strconv.FormatBool(false)))
t.AddQuad(cayley.Triple(node, fieldNotificationIsSent, strconv.FormatBool(true)))
// Apply transaction
store.ApplyTransaction(t)
}

@ -1,145 +0,0 @@
// Copyright 2015 clair authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"encoding/json"
"fmt"
"reflect"
"testing"
"time"
"github.com/coreos/clair/config"
"github.com/stretchr/testify/assert"
)
type TestWrapper struct{}
func (w *TestWrapper) Wrap(n Notification) (*NotificationWrap, error) {
data, err := json.Marshal(n)
if err != nil {
return nil, err
}
return &NotificationWrap{Type: n.GetType(), Data: string(data)}, nil
}
func (w *TestWrapper) Unwrap(nw *NotificationWrap) (Notification, error) {
var v Notification
switch nw.Type {
case "ntest1":
v = &NotificationTest1{}
case "ntest2":
v = &NotificationTest2{}
default:
return nil, fmt.Errorf("Could not Unwrap NotificationWrapper [Type: %s, Data: %s]: Unknown notification type.", nw.Type, nw.Data)
}
err := json.Unmarshal([]byte(nw.Data), v)
return v, err
}
type NotificationTest1 struct {
Test1 string
}
func (n NotificationTest1) GetName() string {
return n.Test1
}
func (n NotificationTest1) GetType() string {
return "ntest1"
}
func (n NotificationTest1) GetContent() (interface{}, error) {
return struct{ Test1 string }{Test1: n.Test1}, nil
}
type NotificationTest2 struct {
Test2 string
}
func (n NotificationTest2) GetName() string {
return n.Test2
}
func (n NotificationTest2) GetType() string {
return "ntest2"
}
func (n NotificationTest2) GetContent() (interface{}, error) {
return struct{ Test2 string }{Test2: n.Test2}, nil
}
func TestNotification(t *testing.T) {
Open(&config.DatabaseConfig{Type: "memstore"})
defer Close()
wrapper := &TestWrapper{}
// Insert two notifications of different types
n1 := &NotificationTest1{Test1: "test1"}
n2 := &NotificationTest2{Test2: "test2"}
err := InsertNotifications([]Notification{n1, n2}, &TestWrapper{})
assert.Nil(t, err)
// Count notifications to send
c, err := CountNotificationsToSend()
assert.Nil(t, err)
assert.Equal(t, 2, c)
foundN1 := false
foundN2 := false
// Select the first one
node, n, err := FindOneNotificationToSend(wrapper)
assert.Nil(t, err)
if assert.NotNil(t, n) {
if reflect.DeepEqual(n1, n) {
foundN1 = true
} else if reflect.DeepEqual(n2, n) {
foundN2 = true
} else {
assert.Fail(t, "did not find any expected notification")
return
}
}
// Mark the first one as sent
MarkNotificationAsSent(node)
// Count notifications to send
c, err = CountNotificationsToSend()
assert.Nil(t, err)
assert.Equal(t, 1, c)
// Select again
node, n, err = FindOneNotificationToSend(wrapper)
assert.Nil(t, err)
if foundN1 {
assert.Equal(t, n2, n)
} else if foundN2 {
assert.Equal(t, n1, n)
}
// Lock the second one
Lock(node, time.Minute, "TestNotification")
// Select again
_, n, err = FindOneNotificationToSend(wrapper)
assert.Nil(t, err)
assert.Equal(t, nil, n)
}

@ -1,448 +0,0 @@
// Copyright 2015 clair authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"sort"
"github.com/coreos/clair/utils"
cerrors "github.com/coreos/clair/utils/errors"
"github.com/coreos/clair/utils/types"
"github.com/google/cayley"
"github.com/google/cayley/graph"
"github.com/google/cayley/graph/path"
)
const (
FieldPackageOS = "os"
FieldPackageName = "name"
FieldPackageVersion = "version"
FieldPackageNextVersion = "nextVersion"
FieldPackagePreviousVersion = "previousVersion"
// This field is not selectable and is for internal use only.
fieldPackageIsValue = "package"
)
var FieldPackageAll = []string{FieldPackageOS, FieldPackageName, FieldPackageVersion, FieldPackageNextVersion, FieldPackagePreviousVersion}
// Package represents a package
type Package struct {
Node string `json:"-"`
OS string
Name string
Version types.Version
NextVersionNode string `json:"-"`
PreviousVersionNode string `json:"-"`
}
// GetNode returns an unique identifier for the graph node
// Requires the key fields: OS, Name, Version
func (p *Package) GetNode() string {
return fieldPackageIsValue + ":" + utils.Hash(p.Key())
}
// Key returns an unique string defining p
// Requires the key fields: OS, Name, Version
func (p *Package) Key() string {
return p.OS + ":" + p.Name + ":" + p.Version.String()
}
// Branch returns an unique string defined the Branch of p (os, name)
// Requires the key fields: OS, Name
func (p *Package) Branch() string {
return p.OS + ":" + p.Name
}
// AbstractPackage is a package that abstract types.MaxVersion by modifying
// using a AllVersion boolean field and renaming Version to BeforeVersion
// which makes more sense for an usage with a Vulnerability
type AbstractPackage struct {
OS string
Name string
AllVersions bool
BeforeVersion types.Version
}
// PackagesToAbstractPackages converts several Packages to AbstractPackages
func PackagesToAbstractPackages(packages []*Package) (abstractPackages []*AbstractPackage) {
for _, p := range packages {
ap := &AbstractPackage{OS: p.OS, Name: p.Name}
if p.Version != types.MaxVersion {
ap.BeforeVersion = p.Version
} else {
ap.AllVersions = true
}
abstractPackages = append(abstractPackages, ap)
}
return
}
// AbstractPackagesToPackages converts several AbstractPackages to Packages
func AbstractPackagesToPackages(abstractPackages []*AbstractPackage) (packages []*Package) {
for _, ap := range abstractPackages {
p := &Package{OS: ap.OS, Name: ap.Name}
if ap.AllVersions {
p.Version = types.MaxVersion
} else {
p.Version = ap.BeforeVersion
}
packages = append(packages, p)
}
return
}
// InsertPackages inserts several packages in the database in one transaction
// Packages are stored in linked lists, one per Branch. Each linked list has a start package and an end package defined with types.MinVersion/types.MaxVersion versions
//
// OS, Name and Version fields have to be specified.
// If the insertion is successfull, the Node field is filled and represents the graph node identifier.
func InsertPackages(packageParameters []*Package) error {
if len(packageParameters) == 0 {
return nil
}
// Verify parameters
for _, pkg := range packageParameters {
if pkg.OS == "" || pkg.Name == "" || pkg.Version.String() == "" {
log.Warningf("could not insert an incomplete package [OS: %s, Name: %s, Version: %s]", pkg.OS, pkg.Name, pkg.Version)
return cerrors.NewBadRequestError("could not insert an incomplete package")
}
}
// Iterate over all the packages we need to insert
for _, packageParameter := range packageParameters {
t := cayley.NewTransaction()
// Is the package already existing ?
pkg, err := FindOnePackage(packageParameter.OS, packageParameter.Name, packageParameter.Version, []string{})
if err != nil && err != cerrors.ErrNotFound {
return err
}
if pkg != nil {
packageParameter.Node = pkg.Node
continue
}
// Get all packages of the same branch (both from local cache and database)
branchPackages, err := FindAllPackagesByBranch(packageParameter.OS, packageParameter.Name, []string{FieldPackageOS, FieldPackageName, FieldPackageVersion, FieldPackageNextVersion})
if err != nil {
return err
}
if len(branchPackages) == 0 {
// The branch does not exist yet
insertingStartPackage := packageParameter.Version == types.MinVersion
insertingEndPackage := packageParameter.Version == types.MaxVersion
// Create and insert a end package
endPackage := &Package{
OS: packageParameter.OS,
Name: packageParameter.Name,
Version: types.MaxVersion,
}
endPackage.Node = endPackage.GetNode()
t.AddQuad(cayley.Triple(endPackage.Node, fieldIs, fieldPackageIsValue))
t.AddQuad(cayley.Triple(endPackage.Node, FieldPackageOS, endPackage.OS))
t.AddQuad(cayley.Triple(endPackage.Node, FieldPackageName, endPackage.Name))
t.AddQuad(cayley.Triple(endPackage.Node, FieldPackageVersion, endPackage.Version.String()))
t.AddQuad(cayley.Triple(endPackage.Node, FieldPackageNextVersion, ""))
// Create the inserted package if it is different than a start/end package
var newPackage *Package
if !insertingStartPackage && !insertingEndPackage {
newPackage = &Package{
OS: packageParameter.OS,
Name: packageParameter.Name,
Version: packageParameter.Version,
}
newPackage.Node = newPackage.GetNode()
t.AddQuad(cayley.Triple(newPackage.Node, fieldIs, fieldPackageIsValue))
t.AddQuad(cayley.Triple(newPackage.Node, FieldPackageOS, newPackage.OS))
t.AddQuad(cayley.Triple(newPackage.Node, FieldPackageName, newPackage.Name))
t.AddQuad(cayley.Triple(newPackage.Node, FieldPackageVersion, newPackage.Version.String()))
t.AddQuad(cayley.Triple(newPackage.Node, FieldPackageNextVersion, endPackage.Node))
packageParameter.Node = newPackage.Node
}
// Create and insert a start package
startPackage := &Package{
OS: packageParameter.OS,
Name: packageParameter.Name,
Version: types.MinVersion,
}
startPackage.Node = startPackage.GetNode()
t.AddQuad(cayley.Triple(startPackage.Node, fieldIs, fieldPackageIsValue))
t.AddQuad(cayley.Triple(startPackage.Node, FieldPackageOS, startPackage.OS))
t.AddQuad(cayley.Triple(startPackage.Node, FieldPackageName, startPackage.Name))
t.AddQuad(cayley.Triple(startPackage.Node, FieldPackageVersion, startPackage.Version.String()))
if !insertingStartPackage && !insertingEndPackage {
t.AddQuad(cayley.Triple(startPackage.Node, FieldPackageNextVersion, newPackage.Node))
} else {
t.AddQuad(cayley.Triple(startPackage.Node, FieldPackageNextVersion, endPackage.Node))
}
// Set package node
if insertingEndPackage {
packageParameter.Node = endPackage.Node
} else if insertingStartPackage {
packageParameter.Node = startPackage.Node
}
} else {
// The branch already exists
// Create the package
newPackage := &Package{OS: packageParameter.OS, Name: packageParameter.Name, Version: packageParameter.Version}
newPackage.Node = "package:" + utils.Hash(newPackage.Key())
packageParameter.Node = newPackage.Node
t.AddQuad(cayley.Triple(newPackage.Node, fieldIs, fieldPackageIsValue))
t.AddQuad(cayley.Triple(newPackage.Node, FieldPackageOS, newPackage.OS))
t.AddQuad(cayley.Triple(newPackage.Node, FieldPackageName, newPackage.Name))
t.AddQuad(cayley.Triple(newPackage.Node, FieldPackageVersion, newPackage.Version.String()))
// Sort branchPackages by version (including the new package)
branchPackages = append(branchPackages, newPackage)
sort.Sort(ByVersion(branchPackages))
// Find my prec/succ GraphID in the sorted slice now
newPackageKey := newPackage.Key()
var pred, succ *Package
var found bool
for _, p := range branchPackages {
equal := p.Key() == newPackageKey
if !equal && !found {
pred = p
} else if found {
succ = p
break
} else if equal {
found = true
continue
}
}
if pred == nil || succ == nil {
log.Warningf("could not find any package predecessor/successor of: [OS: %s, Name: %s, Version: %s].", packageParameter.OS, packageParameter.Name, packageParameter.Version)
return cerrors.NewBadRequestError("could not find package predecessor/successor")
}
// Link the new packages with the branch
t.RemoveQuad(cayley.Triple(pred.Node, FieldPackageNextVersion, succ.Node))
pred.NextVersionNode = newPackage.Node
t.AddQuad(cayley.Triple(pred.Node, FieldPackageNextVersion, newPackage.Node))
newPackage.NextVersionNode = succ.Node
t.AddQuad(cayley.Triple(newPackage.Node, FieldPackageNextVersion, succ.Node))
}
// Apply transaction
if err := store.ApplyTransaction(t); err != nil {
log.Errorf("failed transaction (InsertPackages): %s", err)
return ErrTransaction
}
}
// Return
return nil
}
// FindOnePackage finds and returns a single package having the given OS, name and version, selecting the specified fields
func FindOnePackage(OS, name string, version types.Version, selectedFields []string) (*Package, error) {
packageParameter := Package{OS: OS, Name: name, Version: version}
p, err := toPackages(cayley.StartPath(store, packageParameter.GetNode()).Has(fieldIs, fieldPackageIsValue), selectedFields)
if err != nil {
return nil, err
}
if len(p) == 1 {
return p[0], nil
}
if len(p) > 1 {
log.Errorf("found multiple packages with identical data [OS: %s, Name: %s, Version: %s]", OS, name, version)
return nil, ErrInconsistent
}
return nil, cerrors.ErrNotFound
}
// FindAllPackagesByNodes finds and returns all packages given by their nodes, selecting the specified fields
func FindAllPackagesByNodes(nodes []string, selectedFields []string) ([]*Package, error) {
if len(nodes) == 0 {
return []*Package{}, nil
}
return toPackages(cayley.StartPath(store, nodes...).Has(fieldIs, fieldPackageIsValue), selectedFields)
}
// FindAllPackagesByBranch finds and returns all packages that belong to the given Branch, selecting the specified fields
func FindAllPackagesByBranch(OS, name string, selectedFields []string) ([]*Package, error) {
return toPackages(cayley.StartPath(store, name).In(FieldPackageName).Has(FieldPackageOS, OS), selectedFields)
}
// toPackages converts a path leading to one or multiple packages to Package structs, selecting the specified fields
func toPackages(path *path.Path, selectedFields []string) ([]*Package, error) {
var packages []*Package
var err error
saveFields(path, selectedFields, []string{FieldPackagePreviousVersion})
it, _ := path.BuildIterator().Optimize()
defer it.Close()
for cayley.RawNext(it) {
tags := make(map[string]graph.Value)
it.TagResults(tags)
pkg := Package{Node: store.NameOf(it.Result())}
for _, selectedField := range selectedFields {
switch selectedField {
case FieldPackageOS:
pkg.OS = store.NameOf(tags[FieldPackageOS])
case FieldPackageName:
pkg.Name = store.NameOf(tags[FieldPackageName])
case FieldPackageVersion:
pkg.Version, err = types.NewVersion(store.NameOf(tags[FieldPackageVersion]))
if err != nil {
log.Warningf("could not parse version of package %s: %s", pkg.Node, err.Error())
}
case FieldPackageNextVersion:
pkg.NextVersionNode = store.NameOf(tags[FieldPackageNextVersion])
case FieldPackagePreviousVersion:
pkg.PreviousVersionNode, err = toValue(cayley.StartPath(store, pkg.Node).In(FieldPackageNextVersion))
if err != nil {
log.Warningf("could not get previousVersion on package %s: %s.", pkg.Node, err.Error())
return []*Package{}, ErrInconsistent
}
default:
panic("unknown selectedField")
}
}
packages = append(packages, &pkg)
}
if it.Err() != nil {
log.Errorf("failed query in toPackages: %s", it.Err())
return []*Package{}, ErrBackendException
}
return packages, nil
}
// NextVersion find and returns the package of the same branch that has a higher version number, selecting the specified fields
// It requires that FieldPackageNextVersion field has been selected on p
func (p *Package) NextVersion(selectedFields []string) (*Package, error) {
if p.NextVersionNode == "" {
return nil, nil
}
v, err := FindAllPackagesByNodes([]string{p.NextVersionNode}, selectedFields)
if err != nil {
return nil, err
}
if len(v) != 1 {
log.Errorf("found multiple packages when getting next version of package %s", p.Node)
return nil, ErrInconsistent
}
return v[0], nil
}
// NextVersions find and returns all the packages of the same branch that have
// a higher version number, selecting the specified fields
// It requires that FieldPackageNextVersion field has been selected on p
// The immediate higher version is listed first, and the special end-of-Branch package is last, p is not listed
func (p *Package) NextVersions(selectedFields []string) ([]*Package, error) {
var nextVersions []*Package
if !utils.Contains(FieldPackageNextVersion, selectedFields) {
selectedFields = append(selectedFields, FieldPackageNextVersion)
}
nextVersion, err := p.NextVersion(selectedFields)
if err != nil {
return []*Package{}, err
}
if nextVersion != nil {
nextVersions = append(nextVersions, nextVersion)
nextNextVersions, err := nextVersion.NextVersions(selectedFields)
if err != nil {
return []*Package{}, err
}
nextVersions = append(nextVersions, nextNextVersions...)
}
return nextVersions, nil
}
// PreviousVersion find and returns the package of the same branch that has an
// immediate lower version number, selecting the specified fields
// It requires that FieldPackagePreviousVersion field has been selected on p
func (p *Package) PreviousVersion(selectedFields []string) (*Package, error) {
if p.PreviousVersionNode == "" {
return nil, nil
}
v, err := FindAllPackagesByNodes([]string{p.PreviousVersionNode}, selectedFields)
if err != nil {
return nil, err
}
if len(v) == 0 {
return nil, nil
}
if len(v) != 1 {
log.Errorf("found multiple packages when getting previous version of package %s", p.Node)
return nil, ErrInconsistent
}
return v[0], nil
}
// PreviousVersions find and returns all the packages of the same branch that
// have a lower version number, selecting the specified fields
// It requires that FieldPackageNextVersion field has been selected on p
// The immediate lower version is listed first, and the special start-of-Branch
// package is last, p is not listed
func (p *Package) PreviousVersions(selectedFields []string) ([]*Package, error) {
var previousVersions []*Package
if !utils.Contains(FieldPackagePreviousVersion, selectedFields) {
selectedFields = append(selectedFields, FieldPackagePreviousVersion)
}
previousVersion, err := p.PreviousVersion(selectedFields)
if err != nil {
return []*Package{}, err
}
if previousVersion != nil {
previousVersions = append(previousVersions, previousVersion)
previousPreviousVersions, err := previousVersion.PreviousVersions(selectedFields)
if err != nil {
return []*Package{}, err
}
previousVersions = append(previousVersions, previousPreviousVersions...)
}
return previousVersions, nil
}
// ByVersion implements sort.Interface for []*Package based on the Version field
// It uses github.com/quentin-m/dpkgcomp internally and makes use of types.MinVersion/types.MaxVersion
type ByVersion []*Package
func (p ByVersion) Len() int { return len(p) }
func (p ByVersion) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p ByVersion) Less(i, j int) bool { return p[i].Version.Compare(p[j].Version) < 0 }

@ -1,194 +0,0 @@
// Copyright 2015 clair authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"math/rand"
"sort"
"testing"
"time"
"github.com/coreos/clair/config"
"github.com/coreos/clair/utils/types"
"github.com/stretchr/testify/assert"
)
func TestPackage(t *testing.T) {
Open(&config.DatabaseConfig{Type: "memstore"})
defer Close()
// Try to insert invalid packages
for _, invalidPkg := range []*Package{
&Package{OS: "", Name: "testpkg1", Version: types.NewVersionUnsafe("1.0")},
&Package{OS: "testOS", Name: "", Version: types.NewVersionUnsafe("1.0")},
&Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("")},
&Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("bad version")},
&Package{OS: "", Name: "", Version: types.NewVersionUnsafe("")},
} {
err := InsertPackages([]*Package{invalidPkg})
assert.Error(t, err)
}
// Insert a package
pkg1 := &Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("1.0")}
err := InsertPackages([]*Package{pkg1})
if assert.Nil(t, err) {
// Find the inserted package and verify its content
pkg1b, err := FindOnePackage(pkg1.OS, pkg1.Name, pkg1.Version, FieldPackageAll)
if assert.Nil(t, err) && assert.NotNil(t, pkg1b) {
assert.Equal(t, pkg1.Node, pkg1b.Node)
assert.Equal(t, pkg1.OS, pkg1b.OS)
assert.Equal(t, pkg1.Name, pkg1b.Name)
assert.Equal(t, pkg1.Version, pkg1b.Version)
}
// Find packages from the inserted branch and verify their content
// (the first one should be a start package, the second one the inserted one and the third one the end package)
pkgs1c, err := FindAllPackagesByBranch(pkg1.OS, pkg1.Name, FieldPackageAll)
if assert.Nil(t, err) && assert.Equal(t, 3, len(pkgs1c)) {
sort.Sort(ByVersion(pkgs1c))
assert.Equal(t, pkg1.OS, pkgs1c[0].OS)
assert.Equal(t, pkg1.Name, pkgs1c[0].Name)
assert.Equal(t, types.MinVersion, pkgs1c[0].Version)
assert.Equal(t, pkg1.OS, pkgs1c[1].OS)
assert.Equal(t, pkg1.Name, pkgs1c[1].Name)
assert.Equal(t, pkg1.Version, pkgs1c[1].Version)
assert.Equal(t, pkg1.OS, pkgs1c[2].OS)
assert.Equal(t, pkg1.Name, pkgs1c[2].Name)
assert.Equal(t, types.MaxVersion, pkgs1c[2].Version)
}
}
// Insert multiple packages in the same branch, one in another branch, insert local duplicates and database duplicates as well
pkg2 := []*Package{
&Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("0.8")},
&Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("0.9")},
&Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("1.0")}, // Already present in the database
&Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("1.1")},
&Package{OS: "testOS", Name: "testpkg2", Version: types.NewVersionUnsafe("1.0")}, // Another branch
&Package{OS: "testOS", Name: "testpkg2", Version: types.NewVersionUnsafe("1.0")}, // Local duplicates
}
nbInSameBranch := 4 + 2 // (start/end packages)
err = InsertPackages(shuffle(pkg2))
if assert.Nil(t, err) {
// Find packages from the inserted branch, verify their order and NextVersion / PreviousVersion
pkgs2b, err := FindAllPackagesByBranch("testOS", "testpkg1", FieldPackageAll)
if assert.Nil(t, err) && assert.Equal(t, nbInSameBranch, len(pkgs2b)) {
sort.Sort(ByVersion(pkgs2b))
for i := 0; i < nbInSameBranch; i = i + 1 {
if i == 0 {
assert.Equal(t, types.MinVersion, pkgs2b[0].Version)
} else if i < nbInSameBranch-2 {
assert.Equal(t, pkg2[i].Version, pkgs2b[i+1].Version)
nv, err := pkgs2b[i+1].NextVersion(FieldPackageAll)
assert.Nil(t, err)
assert.Equal(t, pkgs2b[i+2], nv)
if i > 0 {
pv, err := pkgs2b[i].PreviousVersion(FieldPackageAll)
assert.Nil(t, err)
assert.Equal(t, pkgs2b[i-1], pv)
} else {
pv, err := pkgs2b[i].PreviousVersion(FieldPackageAll)
assert.Nil(t, err)
assert.Nil(t, pv)
}
} else {
assert.Equal(t, types.MaxVersion, pkgs2b[nbInSameBranch-1].Version)
nv, err := pkgs2b[nbInSameBranch-1].NextVersion(FieldPackageAll)
assert.Nil(t, err)
assert.Nil(t, nv)
pv, err := pkgs2b[i].PreviousVersion(FieldPackageAll)
assert.Nil(t, err)
assert.Equal(t, pkgs2b[i-1], pv)
}
}
// NextVersions
nv, err := pkgs2b[0].NextVersions(FieldPackageAll)
if assert.Nil(t, err) && assert.Len(t, nv, nbInSameBranch-1) {
for i := 0; i < nbInSameBranch-1; i = i + 1 {
if i < nbInSameBranch-2 {
assert.Equal(t, pkg2[i].Version, nv[i].Version)
} else {
assert.Equal(t, types.MaxVersion, nv[i].Version)
}
}
}
// PreviousVersions
pv, err := pkgs2b[nbInSameBranch-1].PreviousVersions(FieldPackageAll)
if assert.Nil(t, err) && assert.Len(t, pv, nbInSameBranch-1) {
for i := 0; i < len(pv); i = i + 1 {
assert.Equal(t, pkgs2b[len(pkgs2b)-i-2], pv[i])
}
}
}
// Verify that the one we added which was already present in the database has the same node value (meaning that we just fetched it actually)
assert.Contains(t, pkg2, pkg1)
}
// Insert duplicated latest packages directly, ensure only one is actually inserted. Then insert another package in the branch and ensure that its next version is the latest one
pkg3a := &Package{OS: "testOS", Name: "testpkg3", Version: types.MaxVersion}
pkg3b := &Package{OS: "testOS", Name: "testpkg3", Version: types.MaxVersion}
pkg3c := &Package{OS: "testOS", Name: "testpkg3", Version: types.MaxVersion}
err1 := InsertPackages([]*Package{pkg3a, pkg3b})
err2 := InsertPackages([]*Package{pkg3c})
if assert.Nil(t, err1) && assert.Nil(t, err2) {
assert.Equal(t, pkg3a, pkg3b)
assert.Equal(t, pkg3b, pkg3c)
}
pkg4 := Package{OS: "testOS", Name: "testpkg3", Version: types.NewVersionUnsafe("1.0")}
InsertPackages([]*Package{&pkg4})
pkgs34, _ := FindAllPackagesByBranch("testOS", "testpkg3", FieldPackageAll)
if assert.Len(t, pkgs34, 3) {
sort.Sort(ByVersion(pkgs34))
assert.Equal(t, pkg4.Node, pkgs34[1].Node)
assert.Equal(t, pkg3a.Node, pkgs34[2].Node)
assert.Equal(t, pkg3a.Node, pkgs34[1].NextVersionNode)
}
// Insert two identical packages but with "different" versions
// The second version should be simplified to the first one
// Therefore, we should just have three packages (the inserted one and the start/end packages of the branch)
InsertPackages([]*Package{&Package{OS: "testOS", Name: "testdirtypkg", Version: types.NewVersionUnsafe("0.1")}})
InsertPackages([]*Package{&Package{OS: "testOS", Name: "testdirtypkg", Version: types.NewVersionUnsafe("0:0.1")}})
dirtypkgs, err := FindAllPackagesByBranch("testOS", "testdirtypkg", FieldPackageAll)
assert.Nil(t, err)
assert.Len(t, dirtypkgs, 3)
}
func shuffle(packageParameters []*Package) []*Package {
rand.Seed(int64(time.Now().Nanosecond()))
sPackage := make([]*Package, len(packageParameters))
copy(sPackage, packageParameters)
for i := len(sPackage) - 1; i > 0; i-- {
j := rand.Intn(i)
sPackage[i], sPackage[j] = sPackage[j], sPackage[i]
}
return sPackage
}

@ -0,0 +1,127 @@
package pgsql
import (
"github.com/coreos/clair/database"
"github.com/coreos/clair/utils/types"
cerrors "github.com/coreos/clair/utils/errors"
)
func (pgSQL *pgSQL) insertFeature(feature database.Feature) (id int, err error) {
if feature.Name == "" {
return 0, cerrors.NewBadRequestError("could not find/insert invalid Feature")
}
if pgSQL.cache != nil {
if id, found := pgSQL.cache.Get("feature:" + feature.Name); found {
return id.(int), nil
}
}
// Find or create Namespace.
namespaceID, err := pgSQL.insertNamespace(feature.Namespace)
if err != nil {
return -1, err
}
// Find or create Feature.
err = pgSQL.QueryRow(getQuery("soi_feature"), feature.Name, namespaceID).Scan(&id)
if pgSQL.cache != nil {
pgSQL.cache.Add("feature:"+feature.Name, id)
}
return
}
func (pgSQL *pgSQL) insertFeatureVersion(featureVersion database.FeatureVersion) (id int, err error) {
if featureVersion.Version.String() == "" {
return 0, cerrors.NewBadRequestError("could not find/insert invalid FeatureVersion")
}
if pgSQL.cache != nil {
if id, found := pgSQL.cache.Get("featureversion:" + featureVersion.Feature.Name + ":" +
featureVersion.Version.String()); found {
return id.(int), nil
}
}
// Find or create Feature first.
featureID, err := pgSQL.insertFeature(featureVersion.Feature)
if err != nil {
return -1, err
}
// Begin transaction.
tx, err := pgSQL.Begin()
if err != nil {
tx.Rollback()
return -1, err
}
// Find or create FeatureVersion.
var newOrExisting string
err = tx.QueryRow(getQuery("soi_featureversion"), featureID, featureVersion.Version).
Scan(&newOrExisting, &featureVersion.ID)
if err != nil {
tx.Rollback()
return -1, err
}
if newOrExisting == "exi" {
// That featureVersion already exists, return its id.
return featureVersion.ID, nil
}
// Link the new FeatureVersion with every vulnerabilities that affect it, by inserting in
// Vulnerability_Affects_FeatureVersion.
// Lock Vulnerability_FixedIn_Feature because we can't let it to be modified while we modify
// Vulnerability_Affects_FeatureVersion.
_, err = tx.Exec(getQuery("l_share_vulnerability_fixedin_feature"))
if err != nil {
tx.Rollback()
return -1, err
}
// Select every vulnerability and the fixed version that affect this Feature.
rows, err := tx.Query(getQuery("s_vulnerability_fixedin_feature"), featureID)
if err != nil {
tx.Rollback()
return -1, err
}
defer rows.Close()
var fixedInID, vulnerabilityID int
var fixedInVersion types.Version
for rows.Next() {
err := rows.Scan(&fixedInID, &vulnerabilityID, &fixedInVersion)
if err != nil {
tx.Rollback()
return -1, err
}
if featureVersion.Version.Compare(fixedInVersion) < 0 {
// The version of the FeatureVersion we are inserting is lower than the fixed version on this
// Vulnerability, thus, this FeatureVersion is affected by it.
_, err := tx.Exec(getQuery("i_vulnerability_affects_featureversion"), vulnerabilityID,
featureVersion.ID, fixedInID)
if err != nil {
tx.Rollback()
return -1, err
}
}
}
// Commit transaction.
err = tx.Commit()
if err != nil {
tx.Rollback()
return -1, err
}
if pgSQL.cache != nil {
pgSQL.cache.Add("featureversion:"+featureVersion.Feature.Name+":"+
featureVersion.Version.String(), featureVersion.ID)
}
return 0, nil
}

@ -0,0 +1,88 @@
package pgsql
import (
"testing"
"github.com/coreos/clair/database"
"github.com/coreos/clair/utils/types"
"github.com/stretchr/testify/assert"
)
func TestInsertFeature(t *testing.T) {
datastore, err := OpenForTest("InsertFeature", false)
if err != nil {
t.Error(err)
return
}
defer datastore.Close()
// Invalid Feature.
id0, err := datastore.insertFeature(database.Feature{})
assert.NotNil(t, err)
assert.Zero(t, id0)
id0, err = datastore.insertFeature(database.Feature{
Namespace: database.Namespace{},
Name: "TestInsertFeature0",
})
assert.NotNil(t, err)
assert.Zero(t, id0)
// Insert Feature and ensure we can find it.
feature := database.Feature{
Namespace: database.Namespace{Name: "TestInsertFeatureNamespace1"},
Name: "TestInsertFeature1",
}
id1, err := datastore.insertFeature(feature)
assert.Nil(t, err)
id2, err := datastore.insertFeature(feature)
assert.Nil(t, err)
assert.Equal(t, id1, id2)
// Insert invalid FeatureVersion.
for _, invalidFeatureVersion := range []database.FeatureVersion{
database.FeatureVersion{
Feature: database.Feature{},
Version: types.NewVersionUnsafe("1.0"),
},
database.FeatureVersion{
Feature: database.Feature{
Namespace: database.Namespace{},
Name: "TestInsertFeature2",
},
Version: types.NewVersionUnsafe("1.0"),
},
database.FeatureVersion{
Feature: database.Feature{
Namespace: database.Namespace{Name: "TestInsertFeatureNamespace2"},
Name: "TestInsertFeature2",
},
Version: types.NewVersionUnsafe(""),
},
database.FeatureVersion{
Feature: database.Feature{
Namespace: database.Namespace{Name: "TestInsertFeatureNamespace2"},
Name: "TestInsertFeature2",
},
Version: types.NewVersionUnsafe("bad version"),
},
} {
id3, err := datastore.insertFeatureVersion(invalidFeatureVersion)
assert.Error(t, err)
assert.Zero(t, id3)
}
// Insert FeatureVersion and ensure we can find it.
featureVersion := database.FeatureVersion{
Feature: database.Feature{
Namespace: database.Namespace{Name: "TestInsertFeatureNamespace1"},
Name: "TestInsertFeature1",
},
Version: types.NewVersionUnsafe("2:3.0-imba"),
}
id4, err := datastore.insertFeatureVersion(featureVersion)
assert.Nil(t, err)
id5, err := datastore.insertFeatureVersion(featureVersion)
assert.Nil(t, err)
assert.Equal(t, id4, id5)
}

@ -0,0 +1,58 @@
package pgsql
import (
"database/sql"
cerrors "github.com/coreos/clair/utils/errors"
)
// InsertKeyValue stores (or updates) a single key / value tuple.
func (pgSQL *pgSQL) InsertKeyValue(key, value string) (err error) {
if key == "" || value == "" {
log.Warning("could not insert a flag which has an empty name or value")
return cerrors.NewBadRequestError("could not insert a flag which has an empty name or value")
}
// Upsert.
//
// Note: UPSERT works only on >= PostgreSQL 9.5 which is not yet supported by AWS RDS.
// The best solution is currently the use of http://dba.stackexchange.com/a/13477
// but the key/value storage doesn't need to be super-efficient and super-safe at the
// moment so we can just use a client-side solution with transactions, based on
// http://postgresql.org/docs/current/static/plpgsql-control-structures.html.
// TODO(Quentin-M): Enable Upsert as soon as 9.5 is stable.
for {
// First, try to update.
r, err := pgSQL.Exec(getQuery("u_keyvalue"), value, key)
if err != nil {
return err
}
if n, _ := r.RowsAffected(); n > 0 {
// Updated successfully.
return nil
}
// Try to insert the key.
// If someone else inserts the same key concurrently, we could get a unique-key violation error.
_, err = pgSQL.Exec(getQuery("i_keyvalue"), key, value)
if err != nil {
if isErrUniqueViolation(err) {
// Got unique constraint violation, retry.
continue
}
return err
}
return nil
}
}
// GetValue reads a single key / value tuple and returns an empty string if the key doesn't exist.
func (pgSQL *pgSQL) GetKeyValue(key string) (value string, err error) {
err = pgSQL.QueryRow(getQuery("s_keyvalue"), key).Scan(&value)
if err == sql.ErrNoRows {
return "", nil
}
return
}

@ -0,0 +1,38 @@
package pgsql
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestKeyValue(t *testing.T) {
datastore, err := OpenForTest("KeyValue", false)
if err != nil {
t.Error(err)
return
}
defer datastore.Close()
// Get non-existing key/value
f, err := datastore.GetKeyValue("test")
assert.Nil(t, err)
assert.Empty(t, "", f)
// Try to insert invalid key/value.
assert.Error(t, datastore.InsertKeyValue("test", ""))
assert.Error(t, datastore.InsertKeyValue("", "test"))
assert.Error(t, datastore.InsertKeyValue("", ""))
// Insert and verify.
assert.Nil(t, datastore.InsertKeyValue("test", "test1"))
f, err = datastore.GetKeyValue("test")
assert.Nil(t, err)
assert.Equal(t, "test1", f)
// Update and verify.
assert.Nil(t, datastore.InsertKeyValue("test", "test2"))
f, err = datastore.GetKeyValue("test")
assert.Nil(t, err)
assert.Equal(t, "test2", f)
}

@ -0,0 +1,283 @@
package pgsql
import (
"database/sql"
"github.com/coreos/clair/database"
cerrors "github.com/coreos/clair/utils/errors"
"github.com/guregu/null/zero"
)
func (pgSQL *pgSQL) FindLayer(name string, withFeatures, withVulnerabilities bool) (database.Layer, error) {
// Find the layer
var layer database.Layer
var parentName sql.NullString
var namespaceName sql.NullString
err := pgSQL.QueryRow(getQuery("s_layer"), name).
Scan(&layer.ID, &layer.Name, &layer.EngineVersion, &parentName, &namespaceName)
if err == sql.ErrNoRows {
return layer, cerrors.ErrNotFound
}
if err != nil {
return layer, err
}
if parentName.Valid {
layer.Parent = &database.Layer{Name: parentName.String}
}
if namespaceName.Valid {
layer.Namespace = &database.Namespace{Name: namespaceName.String}
}
// Find its features
if withFeatures || withVulnerabilities {
featureVersions, err := pgSQL.getLayerFeatureVersions(layer.ID, !withFeatures)
if err != nil {
return layer, err
}
layer.Features = featureVersions
if withVulnerabilities {
// Load the vulnerabilities that affect the FeatureVersions.
err := pgSQL.loadAffectedBy(layer.Features)
if err != nil {
return layer, err
}
}
}
return layer, nil
}
// getLayerFeatureVersions returns list of database.FeatureVersion that a database.Layer has.
// if idOnly is specified, the returned structs will only have their ID filled. Otherwise,
// it also gets their versions, feature's names, feature's namespace's names.
func (pgSQL *pgSQL) getLayerFeatureVersions(layerID int, idOnly bool) ([]database.FeatureVersion, error) {
var featureVersions []database.FeatureVersion
// Build query
var query string
if idOnly {
query = getQuery("s_layer_featureversion_id_only")
} else {
query = getQuery("s_layer_featureversion")
}
// Query
rows, err := pgSQL.Query(query, layerID)
if err != nil && err != sql.ErrNoRows {
return featureVersions, err
}
defer rows.Close()
// Scan query
var modification string
mapFeatureVersions := make(map[int]database.FeatureVersion)
for rows.Next() {
var featureVersion database.FeatureVersion
if idOnly {
err = rows.Scan(&featureVersion.ID, &modification)
if err != nil {
return featureVersions, err
}
} else {
err = rows.Scan(&featureVersion.ID, &modification, &featureVersion.Feature.Namespace.ID,
&featureVersion.Feature.Namespace.Name, &featureVersion.Feature.ID,
&featureVersion.Feature.Name, &featureVersion.ID, &featureVersion.Version)
if err != nil {
return featureVersions, err
}
}
// Do transitive closure
switch modification {
case "add":
mapFeatureVersions[featureVersion.ID] = featureVersion
case "del":
delete(mapFeatureVersions, featureVersion.ID)
default:
log.Warningf("unknown Layer_diff_FeatureVersion's modification: %s", modification)
return featureVersions, database.ErrInconsistent
}
}
if err = rows.Err(); err != nil {
return featureVersions, err
}
// Build result by converting our map to a slice
for _, featureVersion := range mapFeatureVersions {
featureVersions = append(featureVersions, featureVersion)
}
return featureVersions, nil
}
// loadAffectedBy returns the list of database.Vulnerability that affect the given
// FeatureVersion.
func (pgSQL *pgSQL) loadAffectedBy(featureVersions []database.FeatureVersion) error {
if len(featureVersions) == 0 {
return nil
}
// Construct list of FeatureVersion IDs, we will do a single query
featureVersionIDs := make([]int, 0, len(featureVersions))
for i := 0; i < len(featureVersions); i++ {
featureVersionIDs = append(featureVersionIDs, featureVersions[i].ID)
}
rows, err := pgSQL.Query(getQuery("s_featureversions_vulnerabilities"),
buildInputArray(featureVersionIDs))
if err != nil && err != sql.ErrNoRows {
return err
}
defer rows.Close()
vulnerabilities := make(map[int][]database.Vulnerability, len(featureVersions))
var featureversionID int
for rows.Next() {
var vulnerability database.Vulnerability
err := rows.Scan(&featureversionID, &vulnerability.ID, &vulnerability.Name,
&vulnerability.Description, &vulnerability.Link, &vulnerability.Severity,
&vulnerability.Namespace.Name, &vulnerability.FixedBy)
if err != nil {
return err
}
vulnerabilities[featureversionID] = append(vulnerabilities[featureversionID], vulnerability)
}
if err = rows.Err(); err != nil {
return err
}
// Assign vulnerabilities to every FeatureVersions
for i := 0; i < len(featureVersions); i++ {
featureVersions[i].AffectedBy = vulnerabilities[featureVersions[i].ID]
}
return nil
}
// InsertLayer insert a single layer in the database
//
// The Name and EngineVersion fields are required.
// The Parent, Namespace, Features are optional.
// However, please note that the Parent field, if provided, is expected to have been retrieved
// using FindLayer with its Features.
//
// The Name must be unique for two different layers.
//
// If the Layer already exists and the EngineVersion value of the inserted layer is higher than the
// stored value, the EngineVersion, the Namespace and the Feature list will be updated.
//
// Internally, only Feature additions/removals are stored for each layer. If a layer has a parent,
// the Feature list will be compared to the parent's Feature list and the difference will be stored.
// Note that when the Namespace of a layer differs from its parent, it is expected that several
// Feature that were already included a parent will have their Namespace updated as well
// (happens when Feature detectors relies on the detected layer Namespace). However, if the listed
// Feature has the same Name/Version as its parent, InsertLayer considers that the Feature hasn't
// been modified.
// TODO(Quentin-M): This behavior should be implemented at the Feature detectors level.
func (pgSQL *pgSQL) InsertLayer(layer database.Layer) error {
// Verify parameters
if layer.Name == "" {
log.Warning("could not insert a layer which has an empty Name")
return cerrors.NewBadRequestError("could not insert a layer which has an empty Name")
}
// Get a potentially existing layer.
existingLayer, err := pgSQL.FindLayer(layer.Name, true, false)
if err != nil && err != cerrors.ErrNotFound {
return err
} else if err == nil {
layer.ID = existingLayer.ID
}
// Begin transaction.
tx, err := pgSQL.Begin()
if err != nil {
tx.Rollback()
return err
}
// Find or insert namespace if provided.
var namespaceID zero.Int
if layer.Namespace != nil {
n, err := pgSQL.insertNamespace(*layer.Namespace)
if err != nil {
tx.Rollback()
return err
}
namespaceID = zero.IntFrom(int64(n))
}
if layer.ID == 0 {
// Insert a new layer.
var parentID zero.Int
if layer.Parent != nil {
if layer.Parent.ID == 0 {
log.Warning("Parent is expected to be retrieved from database when inserting a layer.")
return cerrors.NewBadRequestError("Parent is expected to be retrieved from database when inserting a layer.")
}
parentID = zero.IntFrom(int64(layer.Parent.ID))
// Import the Namespace from the parent is this layer doesn't specify one.
if zero.IsNull(namespaceID) {
namespaceID = zero.IntFrom(int64(layer.Parent.Namespace.ID))
}
}
err = tx.QueryRow(getQuery("i_layer"), layer.Name, layer.EngineVersion, parentID, namespaceID).
Scan(&layer.ID)
if err != nil {
tx.Rollback()
return err
}
} else {
if existingLayer.EngineVersion >= layer.EngineVersion {
// The layer exists and has an equal or higher engine verison, do nothing.
return nil
}
// Update an existing layer.
_, err = tx.Exec(getQuery("u_layer"), layer.ID, layer.EngineVersion, namespaceID)
if err != nil {
tx.Rollback()
return err
}
}
// Update Layer_diff_FeatureVersion now.
updateDiffFeatureVersions(tx, &layer, &existingLayer)
// Commit transaction.
err = tx.Commit()
if err != nil {
tx.Rollback()
return err
}
return nil
}
func updateDiffFeatureVersions(tx *sql.Tx, layer, existingLayer *database.Layer) {
// TODO
if existingLayer != nil {
// We are updating a layer, we need to diff the Features with the existing Layer.
} else if layer.Parent == nil {
// There is no parent, every Features are added.
} else if layer.Parent != nil {
// There is a parent, we need to diff the Features with it.
}
}
func (pgSQL *pgSQL) DeleteLayer(name string) error {
// TODO
return nil
}

@ -0,0 +1,246 @@
package pgsql
import (
"fmt"
"testing"
"github.com/coreos/clair/database"
"github.com/coreos/clair/utils/types"
"github.com/stretchr/testify/assert"
)
func TestFindLayer(t *testing.T) {
datastore, err := OpenForTest("FindLayer", true)
if err != nil {
t.Error(err)
return
}
defer datastore.Close()
// Layer-0: no parent, no namespace, no feature, no vulnerability
layer, err := datastore.FindLayer("layer-0", false, false)
if assert.Nil(t, err) && assert.NotNil(t, layer) {
assert.Equal(t, "layer-0", layer.Name)
assert.Nil(t, layer.Namespace)
assert.Nil(t, layer.Parent)
assert.Equal(t, 1, layer.EngineVersion)
assert.Len(t, layer.Features, 0)
}
layer, err = datastore.FindLayer("layer-0", true, false)
if assert.Nil(t, err) && assert.NotNil(t, layer) {
assert.Len(t, layer.Features, 0)
}
// Layer-1: one parent, adds two features, one vulnerability
layer, err = datastore.FindLayer("layer-1", false, false)
if assert.Nil(t, err) && assert.NotNil(t, layer) {
assert.Equal(t, layer.Name, "layer-1")
assert.Equal(t, "debian:7", layer.Namespace.Name)
if assert.NotNil(t, layer.Parent) {
assert.Equal(t, "layer-0", layer.Parent.Name)
}
assert.Equal(t, 1, layer.EngineVersion)
assert.Len(t, layer.Features, 0)
}
layer, err = datastore.FindLayer("layer-1", true, false)
if assert.Nil(t, err) && assert.NotNil(t, layer) && assert.Len(t, layer.Features, 2) {
for _, featureVersion := range layer.Features {
assert.Equal(t, "debian:7", featureVersion.Feature.Namespace.Name)
switch featureVersion.Feature.Name {
case "wechat":
assert.Equal(t, types.NewVersionUnsafe("0.5"), featureVersion.Version)
case "openssl":
assert.Equal(t, types.NewVersionUnsafe("1.0"), featureVersion.Version)
default:
t.Errorf("unexpected package %s for layer-1", featureVersion.Feature.Name)
}
}
}
layer, err = datastore.FindLayer("layer-1", true, true)
if assert.Nil(t, err) && assert.NotNil(t, layer) && assert.Len(t, layer.Features, 2) {
for _, featureVersion := range layer.Features {
assert.Equal(t, "debian:7", featureVersion.Feature.Namespace.Name)
switch featureVersion.Feature.Name {
case "wechat":
assert.Equal(t, types.NewVersionUnsafe("0.5"), featureVersion.Version)
case "openssl":
assert.Equal(t, types.NewVersionUnsafe("1.0"), featureVersion.Version)
if assert.Len(t, featureVersion.AffectedBy, 1) {
assert.Equal(t, "debian:7", featureVersion.AffectedBy[0].Namespace.Name)
assert.Equal(t, "CVE-OPENSSL-1-DEB7", featureVersion.AffectedBy[0].Name)
assert.Equal(t, types.High, featureVersion.AffectedBy[0].Severity)
assert.Equal(t, "A vulnerability affecting OpenSSL < 2.0 on Debian 7.0", featureVersion.AffectedBy[0].Description)
assert.Equal(t, "http://google.com/#q=CVE-OPENSSL-1-DEB7", featureVersion.AffectedBy[0].Link)
assert.Equal(t, types.NewVersionUnsafe("2.0"), featureVersion.AffectedBy[0].FixedBy)
}
default:
t.Errorf("unexpected package %s for layer-1", featureVersion.Feature.Name)
}
}
}
}
func TestInsertLayer(t *testing.T) {
datastore, err := OpenForTest("InsertLayer", true)
if err != nil {
t.Error(err)
return
}
defer datastore.Close()
// Insert invalid layer.
testInsertLayerInvalid(t, datastore)
// Insert a layer tree.
testInsertLayerTree(t, datastore)
// Update layer.
// TODO(Quentin-M)
// Delete layer.
// TODO(Quentin-M)
}
func testInsertLayerInvalid(t *testing.T, datastore database.Datastore) {
invalidLayers := []database.Layer{
database.Layer{},
database.Layer{Name: "layer0", Parent: &database.Layer{}},
database.Layer{Name: "layer0", Parent: &database.Layer{Name: "UnknownLayer"}},
}
for _, invalidLayer := range invalidLayers {
err := datastore.InsertLayer(invalidLayer)
assert.Error(t, err)
}
}
func testInsertLayerTree(t *testing.T, datastore database.Datastore) {
f1 := database.FeatureVersion{
Feature: database.Feature{
Namespace: database.Namespace{Name: "TestInsertLayerNamespace2"},
Name: "TestInsertLayerFeature1",
},
Version: types.NewVersionUnsafe("1.0"),
}
f2 := database.FeatureVersion{
Feature: database.Feature{
Namespace: database.Namespace{Name: "TestInsertLayerNamespace2"},
Name: "TestInsertLayerFeature2",
},
Version: types.NewVersionUnsafe("0.34"),
}
f3 := database.FeatureVersion{
Feature: database.Feature{
Namespace: database.Namespace{Name: "TestInsertLayerNamespace2"},
Name: "TestInsertLayerFeature3",
},
Version: types.NewVersionUnsafe("0.56"),
}
f4 := database.FeatureVersion{
Feature: database.Feature{
Namespace: database.Namespace{Name: "TestInsertLayerNamespace3"},
Name: "TestInsertLayerFeature2",
},
Version: types.NewVersionUnsafe("0.34"),
}
f5 := database.FeatureVersion{
Feature: database.Feature{
Namespace: database.Namespace{Name: "TestInsertLayerNamespace3"},
Name: "TestInsertLayerFeature2",
},
Version: types.NewVersionUnsafe("0.57"),
}
f6 := database.FeatureVersion{
Feature: database.Feature{
Namespace: database.Namespace{Name: "TestInsertLayerNamespace3"},
Name: "TestInsertLayerFeature4",
},
Version: types.NewVersionUnsafe("0.666"),
}
layers := []database.Layer{
database.Layer{
Name: "TestInsertLayer1",
},
database.Layer{
Name: "TestInsertLayer2",
Parent: &database.Layer{Name: "TestInsertLayer1"},
Namespace: &database.Namespace{Name: "TestInsertLayerNamespace1"},
},
// This layer changes the namespace and adds Features.
database.Layer{
Name: "TestInsertLayer3",
Parent: &database.Layer{Name: "TestInsertLayer2"},
Namespace: &database.Namespace{Name: "TestInsertLayerNamespace2"},
Features: []database.FeatureVersion{f1, f2, f3},
},
// This layer covers the case where the last layer doesn't provide any Feature.
database.Layer{
Name: "TestInsertLayer4a",
Parent: &database.Layer{Name: "TestInsertLayer3"},
},
// This layer covers the case where the last layer provides Features.
// It also modifies the Namespace ("upgrade") but keeps some Features not upgraded, their
// Namespaces should then remain unchanged.
database.Layer{
Name: "TestInsertLayer4b",
Parent: &database.Layer{Name: "TestInsertLayer3"},
Namespace: &database.Namespace{Name: "TestInsertLayerNamespace3"},
Features: []database.FeatureVersion{
// Deletes TestInsertLayerFeature1.
// Keep TestInsertLayerFeature2 (old Namespace should be kept):
f4,
// Upgrades TestInsertLayerFeature3 (with new Namespace):
f5,
// Adds TestInsertLayerFeature4:
f6,
},
},
}
var err error
retrievedLayers := make(map[string]database.Layer)
for _, layer := range layers {
if layer.Parent != nil {
// Retrieve from database its parent and assign.
parent := retrievedLayers[layer.Parent.Name]
layer.Parent = &parent
}
err = datastore.InsertLayer(layer)
assert.Nil(t, err)
retrievedLayers[layer.Name], err = datastore.FindLayer(layer.Name, true, false)
assert.Nil(t, err)
}
l4a := retrievedLayers["TestInsertLayer4a"]
assert.Equal(t, "TestInsertLayerNamespace2", l4a.Namespace.Name)
assert.Len(t, l4a.Features, 3)
for _, featureVersion := range l4a.Features {
if cmpFV(featureVersion, f1) && cmpFV(featureVersion, f2) && cmpFV(featureVersion, f3) {
assert.Error(t, fmt.Errorf("TestInsertLayer4a contains an unexpected package: %#v. Should contain %#v and %#v and %#v.", featureVersion, f1, f2, f3))
}
}
l4b := retrievedLayers["TestInsertLayer4b"]
assert.Equal(t, "TestInsertLayerNamespace3", l4a.Namespace.Name)
assert.Len(t, l4a.Features, 3)
for _, featureVersion := range l4a.Features {
if cmpFV(featureVersion, f2) && cmpFV(featureVersion, f5) && cmpFV(featureVersion, f6) {
assert.Error(t, fmt.Errorf("TestInsertLayer4a contains an unexpected package: %#v. Should contain %#v and %#v and %#v.", featureVersion, f2, f4, f6))
}
}
}
func cmpFV(a, b database.FeatureVersion) bool {
return a.Feature.Name == b.Feature.Name &&
a.Feature.Namespace.Name == b.Feature.Namespace.Name &&
a.Version.String() == b.Version.String()
}

@ -0,0 +1,127 @@
-- +goose Up
-- -----------------------------------------------------
-- Table Namespace
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS Namespace (
id SERIAL PRIMARY KEY,
name VARCHAR(128) NULL);
-- -----------------------------------------------------
-- Table Layer
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS Layer (
id SERIAL PRIMARY KEY,
name VARCHAR(128) NOT NULL UNIQUE,
engineversion SMALLINT NOT NULL,
parent_id INT NULL REFERENCES Layer,
namespace_id INT NULL REFERENCES Namespace);
CREATE INDEX ON Layer (parent_id);
CREATE INDEX ON Layer (namespace_id);
-- -----------------------------------------------------
-- Table Feature
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS Feature (
id SERIAL PRIMARY KEY,
namespace_id INT NOT NULL REFERENCES Namespace,
name VARCHAR(128) NOT NULL,
UNIQUE (namespace_id, name));
-- -----------------------------------------------------
-- Table FeatureVersion
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS FeatureVersion (
id SERIAL PRIMARY KEY,
feature_id INT NOT NULL REFERENCES Feature,
version VARCHAR(128) NOT NULL);
CREATE INDEX ON FeatureVersion (feature_id);
-- -----------------------------------------------------
-- Table Layer_diff_FeatureVersion
-- -----------------------------------------------------
CREATE TYPE modification AS ENUM ('add', 'del');
CREATE TABLE IF NOT EXISTS Layer_diff_FeatureVersion (
id SERIAL PRIMARY KEY,
layer_id INT NOT NULL REFERENCES Layer ON DELETE CASCADE,
featureversion_id INT NOT NULL REFERENCES FeatureVersion,
modification modification NOT NULL,
UNIQUE (layer_id, featureversion_id));
CREATE INDEX ON Layer_diff_FeatureVersion (layer_id);
CREATE INDEX ON Layer_diff_FeatureVersion (featureversion_id);
CREATE INDEX ON Layer_diff_FeatureVersion (featureversion_id, layer_id);
-- -----------------------------------------------------
-- Table Vulnerability
-- -----------------------------------------------------
CREATE TYPE severity AS ENUM ('Unknown', 'Negligible', 'Low', 'Medium', 'High', 'Critical', 'Defcon1');
CREATE TABLE IF NOT EXISTS Vulnerability (
id SERIAL PRIMARY KEY,
namespace_id INT NOT NULL REFERENCES Namespace,
name VARCHAR(128) NOT NULL,
description TEXT NULL,
link VARCHAR(128) NULL,
severity severity NULL,
UNIQUE (namespace_id, name));
-- -----------------------------------------------------
-- Table Vulnerability_FixedIn_Feature
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS Vulnerability_FixedIn_Feature (
id SERIAL PRIMARY KEY,
vulnerability_id INT NOT NULL REFERENCES Vulnerability ON DELETE CASCADE,
feature_id INT NOT NULL REFERENCES Feature,
version VARCHAR(128) NOT NULL,
UNIQUE (vulnerability_id, feature_id));
CREATE INDEX ON Vulnerability_FixedIn_Feature (feature_id, vulnerability_id);
-- -----------------------------------------------------
-- Table Vulnerability_Affects_FeatureVersion
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS Vulnerability_Affects_FeatureVersion (
id SERIAL PRIMARY KEY,
vulnerability_id INT NOT NULL REFERENCES Vulnerability ON DELETE CASCADE,
featureversion_id INT NOT NULL REFERENCES FeatureVersion,
fixedin_id INT NOT NULL REFERENCES Vulnerability_FixedIn_Feature,
UNIQUE (vulnerability_id, featureversion_id));
CREATE INDEX ON Vulnerability_Affects_FeatureVersion (fixedin_id);
CREATE INDEX ON Vulnerability_Affects_FeatureVersion (featureversion_id, vulnerability_id);
-- -----------------------------------------------------
-- Table KeyValue
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS KeyValue (
id SERIAL PRIMARY KEY,
key VARCHAR(128) NOT NULL UNIQUE,
value TEXT);
-- +goose Down
DROP TABLE IF EXISTS Namespace,
Layer,
Feature,
FeatureVersion,
Layer_diff_FeatureVersion,
Vulnerability,
Vulnerability_FixedIn_Feature,
Vulnerability_Affects_FeatureVersion,
KeyValue
CASCADE;

@ -0,0 +1,26 @@
package pgsql
import (
"github.com/coreos/clair/database"
cerrors "github.com/coreos/clair/utils/errors"
)
func (pgSQL *pgSQL) insertNamespace(namespace database.Namespace) (id int, err error) {
if namespace.Name == "" {
return 0, cerrors.NewBadRequestError("could not find/insert invalid Namespace")
}
if pgSQL.cache != nil {
if id, found := pgSQL.cache.Get("namespace:" + namespace.Name); found {
return id.(int), nil
}
}
err = pgSQL.QueryRow(getQuery("soi_namespace"), namespace.Name).Scan(&id)
if pgSQL.cache != nil {
pgSQL.cache.Add("namespace:"+namespace.Name, id)
}
return
}

@ -0,0 +1,29 @@
package pgsql
import (
"testing"
"github.com/coreos/clair/database"
"github.com/stretchr/testify/assert"
)
func TestInsertNamespace(t *testing.T) {
datastore, err := OpenForTest("InsertNamespace", false)
if err != nil {
t.Error(err)
return
}
defer datastore.Close()
// Invalid Namespace.
id0, err := datastore.insertNamespace(database.Namespace{})
assert.NotNil(t, err)
assert.Zero(t, id0)
// Insert Namespace and ensure we can find it.
id1, err := datastore.insertNamespace(database.Namespace{Name: "TestInsertNamespace1"})
assert.Nil(t, err)
id2, err := datastore.insertNamespace(database.Namespace{Name: "TestInsertNamespace1"})
assert.Nil(t, err)
assert.Equal(t, id1, id2)
}

@ -0,0 +1,190 @@
package pgsql
import (
"database/sql"
"fmt"
"io/ioutil"
"path"
"runtime"
"strconv"
"strings"
"bitbucket.org/liamstask/goose/lib/goose"
"github.com/coreos/clair/config"
"github.com/coreos/clair/database"
"github.com/coreos/pkg/capnslog"
"github.com/hashicorp/golang-lru"
"github.com/lib/pq"
"github.com/pborman/uuid"
)
var log = capnslog.NewPackageLogger("github.com/coreos/clair", "pgsql")
type pgSQL struct {
*sql.DB
cache *lru.ARCCache
}
func (pgSQL *pgSQL) Close() {
pgSQL.DB.Close()
}
// Open creates a Datastore backed by a PostgreSQL database.
//
// It will run immediately every necessary migration on the database.
func Open(config *config.DatabaseConfig) (database.Datastore, error) {
// Run migrations.
if err := Migrate(config.Source); err != nil {
return nil, fmt.Errorf("could not run database migration: %v", err)
}
// Open database.
db, err := sql.Open("postgres", config.Source)
if err != nil {
return nil, fmt.Errorf("could not open database (Open): %v", err)
}
// Initialize cache.
// TODO(Quentin-M): Benchmark with a simple LRU Cache.
var cache *lru.ARCCache
if config.CacheSize > 0 {
cache, _ = lru.NewARC(config.CacheSize)
}
return &pgSQL{DB: db, cache: cache}, nil
}
// Migrate runs all available migrations on a pgSQL database.
func Migrate(dataSource string) error {
log.Info("running database migrations")
_, filename, _, _ := runtime.Caller(1)
migrationDir := path.Join(path.Dir(filename), "/migrations/")
conf := &goose.DBConf{
MigrationsDir: migrationDir,
Driver: goose.DBDriver{
Name: "postgres",
OpenStr: dataSource,
Import: "github.com/lib/pq",
Dialect: &goose.PostgresDialect{},
},
}
// Determine the most recent revision available from the migrations folder.
target, err := goose.GetMostRecentDBVersion(conf.MigrationsDir)
if err != nil {
return err
}
// Run migrations
err = goose.RunMigrations(conf, conf.MigrationsDir, target)
if err != nil {
return err
}
log.Info("database migration ran successfully")
return nil
}
// CreateDatabase creates a new database.
// The dataSource parameter should not contain a dbname.
func CreateDatabase(dataSource, databaseName string) error {
// Open database.
db, err := sql.Open("postgres", dataSource)
if err != nil {
return fmt.Errorf("could not open database (CreateDatabase): %v", err)
}
defer db.Close()
// Create database.
_, err = db.Exec("CREATE DATABASE " + databaseName + ";")
if err != nil {
return fmt.Errorf("could not create database: %v", err)
}
return nil
}
// DropDatabase drops an existing database.
// The dataSource parameter should not contain a dbname.
func DropDatabase(dataSource, databaseName string) error {
// Open database.
db, err := sql.Open("postgres", dataSource)
if err != nil {
return fmt.Errorf("could not open database (DropDatabase): %v", err)
}
defer db.Close()
// Drop database.
_, err = db.Exec("DROP DATABASE " + databaseName + ";")
if err != nil {
return fmt.Errorf("could not create database: %v", err)
}
return nil
}
// pgSQLTest wraps pgSQL for testing purposes.
// Its Close() method drops the database.
type pgSQLTest struct {
*pgSQL
dataSource string
dbName string
}
func (pgSQL *pgSQLTest) Close() {
pgSQL.DB.Close()
DropDatabase(pgSQL.dataSource+"dbname=postgres", pgSQL.dbName)
}
// OpenForTest creates a test Datastore backed by a new PostgreSQL database.
// It creates a new unique and prefixed ("test_") database.
// Using Close() will drop the database.
func OpenForTest(name string, withTestData bool) (*pgSQLTest, error) {
dataSource := "host=127.0.0.1 sslmode=disable "
dbName := "test_" + strings.ToLower(name) + "_" + strings.Replace(uuid.New(), "-", "_", -1)
// Create database.
err := CreateDatabase(dataSource+"dbname=postgres", dbName)
if err != nil {
return nil, err
}
// Open database.
db, err := Open(&config.DatabaseConfig{Source: dataSource + "dbname=" + dbName, CacheSize: 0})
if err != nil {
DropDatabase(dataSource, dbName)
return nil, err
}
// Load test data if specified.
if withTestData {
_, filename, _, _ := runtime.Caller(0)
d, _ := ioutil.ReadFile(path.Join(path.Dir(filename)) + "/testdata/data.sql")
_, err = db.(*pgSQL).Exec(string(d))
if err != nil {
DropDatabase(dataSource, dbName)
return nil, err
}
}
return &pgSQLTest{pgSQL: db.(*pgSQL), dataSource: dataSource, dbName: dbName}, nil
}
// buildInputArray constructs a PostgreSQL input array from the specified integers.
// Useful to use the `= ANY($1::integer[])` syntax that let us use a IN clause while using
// a single placeholder.
func buildInputArray(ints []int) string {
str := "{"
for i := 0; i < len(ints)-1; i++ {
str = str + strconv.Itoa(ints[i]) + ","
}
str = str + strconv.Itoa(ints[len(ints)-1]) + "}"
return str
}
// isErrUniqueViolation determines is the given error is a unique contraint violation.
func isErrUniqueViolation(err error) bool {
pqErr, ok := err.(*pq.Error)
return ok && pqErr.Code == "23505"
}

@ -0,0 +1,123 @@
package pgsql
import "fmt"
var queries map[string]string
func init() {
queries = make(map[string]string)
// keyvalue.go
queries["u_keyvalue"] = `UPDATE KeyValue SET value = $1 WHERE key = $2`
queries["i_keyvalue"] = `INSERT INTO KeyValue(key, value) VALUES($1, $2)`
queries["s_keyvalue"] = `SELECT value FROM KeyValue WHERE key = $1`
// namespace.go
queries["soi_namespace"] = `
WITH new_namespace AS (
INSERT INTO Namespace(name)
SELECT CAST($1 AS VARCHAR)
WHERE NOT EXISTS (SELECT name FROM Namespace WHERE name = $1)
RETURNING id
)
SELECT id FROM Namespace WHERE name = $1
UNION
SELECT id FROM new_namespace`
// feature.go
queries["soi_feature"] = `
WITH new_feature AS (
INSERT INTO Feature(name, namespace_id)
SELECT CAST($1 AS VARCHAR), CAST($2 AS VARCHAR)
WHERE NOT EXISTS (SELECT id FROM Feature WHERE name = $1 AND namespace_id = $2)
RETURNING id
)
SELECT id FROM Feature WHERE name = $1 AND namespace_id = $2
UNION
SELECT id FROM new_feature`
queries["l_share_vulnerability_fixedin_feature"] = `LOCK Vulnerability_FixedIn_Feature IN SHARE MODE`
queries["soi_featureversion"] = `
WITH new_featureversion AS (
INSERT INTO FeatureVersion(feature_id, version)
SELECT CAST($1 AS VARCHAR), CAST($2 AS VARCHAR)
WHERE NOT EXISTS (SELECT id FROM Feature WHERE feature_id = $1 AND version = $2)
RETURNING id
)
SELECT 'exi', id FROM Feature WHERE feature_id = $1 AND version = $2
UNION
SELECT 'new', id FROM new_featureversion
`
queries["s_vulnerability_fixedin_feature"] = `
SELECT id, vulnerability_id, version FROM Vulnerability_FixedIn_Feature
WHERE feature_id = ?`
queries["i_vulnerability_affects_featureversion"] = `
INSERT INTO Vulnerability_Affects_FeatureVersion(vulnerability_id,
featureversion_id, fixedin_id) VALUES($1, $2, $3)`
// layer.go
queries["s_layer"] = `
SELECT l.id, l.name, l.engineversion, p.name, n.name
FROM Layer l
LEFT JOIN Layer p ON l.parent_id = p.id
LEFT JOIN Namespace n ON l.namespace_id = n.id
WHERE l.name = $1;`
queries["s_layer_featureversion_id_only"] = `
WITH RECURSIVE layer_tree(id, parent_id, depth, path, cycle) AS(
SELECT l.id, l.parent_id, 1, ARRAY[l.id], false
FROM Layer l
WHERE l.id = $1
UNION ALL
SELECT l.id, l.parent_id, lt.depth + 1, path || l.id, l.id = ANY(path)
FROM Layer l, layer_tree lt
WHERE l.id = lt.parent_id
)
SELECT ldf.featureversion_id, ldf.modification
FROM Layer_diff_FeatureVersion ldf
JOIN (
SELECT row_number() over (ORDER BY depth DESC), id FROM layer_tree
) AS ltree (ordering, id) ON ldf.layer_id = ltree.id
ORDER BY ltree.ordering`
queries["s_layer_featureversion"] = `
WITH RECURSIVE layer_tree(id, parent_id, depth, path, cycle) AS(
SELECT l.id, l.parent_id, 1, ARRAY[l.id], false
FROM Layer l
WHERE l.id = $1
UNION ALL
SELECT l.id, l.parent_id, lt.depth + 1, path || l.id, l.id = ANY(path)
FROM Layer l, layer_tree lt
WHERE l.id = lt.parent_id
)
SELECT ldf.featureversion_id, ldf.modification, fn.id, fn.name, f.id, f.name, fv.id, fv.version
FROM Layer_diff_FeatureVersion ldf
JOIN (
SELECT row_number() over (ORDER BY depth DESC), id FROM layer_tree
) AS ltree (ordering, id) ON ldf.layer_id = ltree.id, FeatureVersion fv, Feature f, Namespace fn
WHERE ldf.featureversion_id = fv.id AND fv.feature_id = f.id AND f.namespace_id = fn.id
ORDER BY ltree.ordering`
queries["s_featureversions_vulnerabilities"] = `
SELECT vafv.featureversion_id, v.id, v.name, v.description, v.link, v.severity, vn.name, vfif.version
FROM Vulnerability_Affects_FeatureVersion vafv, Vulnerability v,
Namespace vn, Vulnerability_FixedIn_Feature vfif
WHERE vafv.featureversion_id = ANY($1::integer[])
AND vafv.vulnerability_id = v.id
AND vafv.fixedin_id = vfif.id
AND v.namespace_id = vn.id`
queries["i_layer"] = `INSERT INTO Layer(name, engine_version, parent_id, namespace_id) VALUES($1, $2, $3, $4) RETURNING id`
queries["u_layer"] = `UPDATE LAYER SET engine_version = $2, namespace_id = $3) WHERE id = $1`
}
func getQuery(name string) string {
if query, ok := queries[name]; ok {
return query
}
panic(fmt.Sprintf("pgsql: unknown query %v", name))
}

@ -0,0 +1,37 @@
INSERT INTO namespace (id, name) VALUES (1, 'debian:7');
INSERT INTO namespace (id, name) VALUES (2, 'debian:8');
INSERT INTO feature (id, namespace_id, name) VALUES (1, 1, 'wechat');
INSERT INTO feature (id, namespace_id, name) VALUES (2, 1, 'openssl');
INSERT INTO feature (id, namespace_id, name) VALUES (3, 2, 'openssl');
INSERT INTO featureversion (id, feature_id, version) VALUES (1, 1, '0.5');
INSERT INTO featureversion (id, feature_id, version) VALUES (2, 2, '1.0');
INSERT INTO featureversion (id, feature_id, version) VALUES (3, 2, '2.0');
INSERT INTO featureversion (id, feature_id, version) VALUES (4, 3, '1.0');
INSERT INTO layer (id, name, engineversion, parent_id, namespace_id) VALUES (1, 'layer-0', 1, NULL, NULL);
INSERT INTO layer (id, name, engineversion, parent_id, namespace_id) VALUES (2, 'layer-1', 1, 1, 1);
INSERT INTO layer (id, name, engineversion, parent_id, namespace_id) VALUES (3, 'layer-2', 1, 2, 1);
INSERT INTO layer (id, name, engineversion, parent_id, namespace_id) VALUES (4, 'layer-3a', 1, 3, 1);
INSERT INTO layer (id, name, engineversion, parent_id, namespace_id) VALUES (5, 'layer-3b', 1, 3, 2);
INSERT INTO layer_diff_featureversion (id, layer_id, featureversion_id, modification) VALUES (1, 2, 1, 'add');
INSERT INTO layer_diff_featureversion (id, layer_id, featureversion_id, modification) VALUES (2, 2, 2, 'add');
INSERT INTO layer_diff_featureversion (id, layer_id, featureversion_id, modification) VALUES (3, 3, 2, 'del'); -- layer-2: Update Debian:7 OpenSSL 1.0 -> 2.0
INSERT INTO layer_diff_featureversion (id, layer_id, featureversion_id, modification) VALUES (4, 3, 3, 'add'); -- ^
INSERT INTO layer_diff_featureversion (id, layer_id, featureversion_id, modification) VALUES (5, 5, 3, 'del'); -- layer-3b: Delete Debian:7 OpenSSL 2.0
INSERT INTO layer_diff_featureversion (id, layer_id, featureversion_id, modification) VALUES (6, 5, 4, 'add'); -- layer-3b: Add Debian:8 OpenSSL 1.0
INSERT INTO vulnerability (id, namespace_id, name, description, link, severity) VALUES (1, 1, 'CVE-OPENSSL-1-DEB7', 'A vulnerability affecting OpenSSL < 2.0 on Debian 7.0', 'http://google.com/#q=CVE-OPENSSL-1-DEB7', 'High');
INSERT INTO vulnerability_fixedin_feature (id, vulnerability_id, feature_id, version) VALUES (1, 1, 2, '2.0');
INSERT INTO vulnerability_affects_featureversion (id, vulnerability_id, featureversion_id, fixedin_id) VALUES (1, 1, 2, 1); -- CVE-OPENSSL-1-DEB7 affects Debian:7 OpenSSL 1.0
INSERT INTO vulnerability (id, namespace_id, name, description, link, severity) VALUES (2, 1, 'CVE-NOPE', 'A vulnerability affecting nothing', 'http://google.com/#q=NOPE', 'Negligible');
SELECT pg_catalog.setval(pg_get_serial_sequence('namespace', 'id'), (SELECT MAX(id) FROM namespace)+1);
SELECT pg_catalog.setval(pg_get_serial_sequence('feature', 'id'), (SELECT MAX(id) FROM feature)+1);
SELECT pg_catalog.setval(pg_get_serial_sequence('featureversion', 'id'), (SELECT MAX(id) FROM featureversion)+1);
SELECT pg_catalog.setval(pg_get_serial_sequence('layer', 'id'), (SELECT MAX(id) FROM layer)+1);
SELECT pg_catalog.setval(pg_get_serial_sequence('layer_diff_featureversion', 'id'), (SELECT MAX(id) FROM layer_diff_featureversion)+1);
SELECT pg_catalog.setval(pg_get_serial_sequence('vulnerability', 'id'), (SELECT MAX(id) FROM vulnerability)+1);
SELECT pg_catalog.setval(pg_get_serial_sequence('vulnerability_fixedin_feature', 'id'), (SELECT MAX(id) FROM vulnerability_fixedin_feature)+1);
SELECT pg_catalog.setval(pg_get_serial_sequence('vulnerability_affects_featureversion', 'id'), (SELECT MAX(id) FROM vulnerability_affects_featureversion)+1);
SELECT pg_catalog.setval(pg_get_serial_sequence('vulnerability', 'id'), (SELECT MAX(id) FROM vulnerability)+1);

@ -1,51 +0,0 @@
// Copyright 2015 clair authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import cerrors "github.com/coreos/clair/utils/errors"
// FindAllLayersIntroducingVulnerability finds and returns the list of layers
// that introduce the given vulnerability (by its ID), selecting the specified fields
func FindAllLayersIntroducingVulnerability(vulnerabilityID string, selectedFields []string) ([]*Layer, error) {
// Find vulnerability
vulnerability, err := FindOneVulnerability(vulnerabilityID, []string{FieldVulnerabilityFixedIn})
if err != nil {
return []*Layer{}, err
}
if vulnerability == nil {
return []*Layer{}, cerrors.ErrNotFound
}
// Find FixedIn packages
fixedInPackages, err := FindAllPackagesByNodes(vulnerability.FixedInNodes, []string{FieldPackagePreviousVersion})
if err != nil {
return []*Layer{}, err
}
// Find all FixedIn packages's ancestors packages (which are therefore vulnerable to the vulnerability)
var vulnerablePackagesNodes []string
for _, pkg := range fixedInPackages {
previousVersions, err := pkg.PreviousVersions([]string{})
if err != nil {
return []*Layer{}, err
}
for _, version := range previousVersions {
vulnerablePackagesNodes = append(vulnerablePackagesNodes, version.Node)
}
}
// Return all the layers that add these packages
return FindAllLayersByAddedPackageNodes(vulnerablePackagesNodes, selectedFields)
}

@ -1,377 +0,0 @@
// Copyright 2015 clair authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"github.com/coreos/clair/utils"
cerrors "github.com/coreos/clair/utils/errors"
"github.com/coreos/clair/utils/types"
"github.com/google/cayley"
"github.com/google/cayley/graph"
"github.com/google/cayley/graph/path"
)
const (
FieldVulnerabilityID = "id"
FieldVulnerabilityLink = "link"
FieldVulnerabilityPriority = "priority"
FieldVulnerabilityDescription = "description"
FieldVulnerabilityFixedIn = "fixedIn"
// FieldVulnerabilityCausedByPackage only makes sense with FindAllVulnerabilitiesByFixedIn.
FieldVulnerabilityCausedByPackage = "causedByPackage"
// This field is not selectable and is for internal use only.
fieldVulnerabilityIsValue = "vulnerability"
)
var FieldVulnerabilityAll = []string{FieldVulnerabilityID, FieldVulnerabilityLink, FieldVulnerabilityPriority, FieldVulnerabilityDescription, FieldVulnerabilityFixedIn}
// Vulnerability represents a vulnerability that is fixed in some Packages
type Vulnerability struct {
Node string `json:"-"`
ID string
Link string
Priority types.Priority
Description string `json:",omitempty"`
FixedInNodes []string `json:"-"`
CausedByPackage string `json:",omitempty"`
}
// GetNode returns an unique identifier for the graph node
// Requires the key field: ID
func (v *Vulnerability) GetNode() string {
return fieldVulnerabilityIsValue + ":" + utils.Hash(v.ID)
}
// ToAbstractVulnerability converts a Vulnerability into an
// AbstractVulnerability.
func (v *Vulnerability) ToAbstractVulnerability() (*AbstractVulnerability, error) {
// Find FixedIn packages.
fixedInPackages, err := FindAllPackagesByNodes(v.FixedInNodes, []string{FieldPackageOS, FieldPackageName, FieldPackageVersion})
if err != nil {
return nil, err
}
return &AbstractVulnerability{
ID: v.ID,
Link: v.Link,
Priority: v.Priority,
Description: v.Description,
AffectedPackages: PackagesToAbstractPackages(fixedInPackages),
}, nil
}
// AbstractVulnerability represents a Vulnerability as it is defined in the database
// package but exposes directly a list of AbstractPackage instead of
// nodes to packages.
type AbstractVulnerability struct {
ID string
Link string
Priority types.Priority
Description string
AffectedPackages []*AbstractPackage
}
// ToVulnerability converts an abstractVulnerability into
// a Vulnerability
func (av *AbstractVulnerability) ToVulnerability(fixedInNodes []string) *Vulnerability {
return &Vulnerability{
ID: av.ID,
Link: av.Link,
Priority: av.Priority,
Description: av.Description,
FixedInNodes: fixedInNodes,
}
}
// InsertVulnerabilities inserts or updates several vulnerabilities in the database in one transaction
// During an update, if the vulnerability was previously fixed by a version in a branch and a new package of that branch is specified, the previous one is deleted
// Otherwise, it simply adds the defined packages, there is currently no way to delete affected packages.
//
// ID, Link, Priority and FixedInNodes fields have to be specified. Description is optionnal.
func InsertVulnerabilities(vulnerabilities []*Vulnerability) ([]Notification, error) {
if len(vulnerabilities) == 0 {
return []Notification{}, nil
}
// Create required data structure
var err error
t := cayley.NewTransaction()
cachedVulnerabilities := make(map[string]*Vulnerability)
var notifications []Notification
newVulnerabilityNotifications := make(map[string]*NewVulnerabilityNotification)
vulnerabilityPriorityIncreasedNotifications := make(map[string]*VulnerabilityPriorityIncreasedNotification)
vulnerabilityPackageChangedNotifications := make(map[string]*VulnerabilityPackageChangedNotification)
// Iterate over all the vulnerabilities we need to insert/update
for _, vulnerability := range vulnerabilities {
// Check if the vulnerability already exists
existingVulnerability, _ := cachedVulnerabilities[vulnerability.ID]
if existingVulnerability == nil {
existingVulnerability, err = FindOneVulnerability(vulnerability.ID, FieldVulnerabilityAll)
if err != nil && err != cerrors.ErrNotFound {
return []Notification{}, err
}
if existingVulnerability != nil {
cachedVulnerabilities[vulnerability.ID] = existingVulnerability
}
}
// Insert/Update vulnerability
if existingVulnerability == nil {
// The vulnerability does not exist, create it
// Verify parameters
if vulnerability.ID == "" || vulnerability.Link == "" || vulnerability.Priority == "" {
log.Warningf("could not insert an incomplete vulnerability [ID: %s, Link: %s, Priority: %s]", vulnerability.ID, vulnerability.Link, vulnerability.Priority)
return []Notification{}, cerrors.NewBadRequestError("Could not insert an incomplete vulnerability")
}
if !vulnerability.Priority.IsValid() {
log.Warningf("could not insert a vulnerability which has an invalid priority [ID: %s, Link: %s, Priority: %s]. Valid priorities are: %v.", vulnerability.ID, vulnerability.Link, vulnerability.Priority, types.Priorities)
return []Notification{}, cerrors.NewBadRequestError("Could not insert a vulnerability which has an invalid priority")
}
if len(vulnerability.FixedInNodes) == 0 {
log.Warningf("could not insert a vulnerability which doesn't affect any package [ID: %s].", vulnerability.ID)
return []Notification{}, cerrors.NewBadRequestError("could not insert a vulnerability which doesn't affect any package")
}
// Insert it
vulnerability.Node = vulnerability.GetNode()
t.AddQuad(cayley.Triple(vulnerability.Node, fieldIs, fieldVulnerabilityIsValue))
t.AddQuad(cayley.Triple(vulnerability.Node, FieldVulnerabilityID, vulnerability.ID))
t.AddQuad(cayley.Triple(vulnerability.Node, FieldVulnerabilityLink, vulnerability.Link))
t.AddQuad(cayley.Triple(vulnerability.Node, FieldVulnerabilityPriority, string(vulnerability.Priority)))
t.AddQuad(cayley.Triple(vulnerability.Node, FieldVulnerabilityDescription, vulnerability.Description))
for _, p := range vulnerability.FixedInNodes {
t.AddQuad(cayley.Triple(vulnerability.Node, FieldVulnerabilityFixedIn, p))
}
// Add a notification
notification := &NewVulnerabilityNotification{VulnerabilityID: vulnerability.ID}
notifications = append(notifications, notification)
newVulnerabilityNotifications[vulnerability.ID] = notification
cachedVulnerabilities[vulnerability.ID] = vulnerability
} else {
// The vulnerability already exists, update it
if vulnerability.Link != "" && existingVulnerability.Link != vulnerability.Link {
t.RemoveQuad(cayley.Triple(existingVulnerability.Node, FieldVulnerabilityLink, existingVulnerability.Link))
t.AddQuad(cayley.Triple(existingVulnerability.Node, FieldVulnerabilityLink, vulnerability.Link))
existingVulnerability.Link = vulnerability.Link
}
if vulnerability.Priority != "" && vulnerability.Priority != types.Unknown && existingVulnerability.Priority != vulnerability.Priority {
if !vulnerability.Priority.IsValid() {
log.Warningf("could not update a vulnerability which has an invalid priority [ID: %s, Link: %s, Priority: %s]. Valid priorities are: %v.", vulnerability.ID, vulnerability.Link, vulnerability.Priority, types.Priorities)
return []Notification{}, cerrors.NewBadRequestError("Could not update a vulnerability which has an invalid priority")
}
// Add a notification about the priority change if the new priority is higher and the vulnerability is not new
if vulnerability.Priority.Compare(existingVulnerability.Priority) > 0 {
if _, newVulnerabilityNotificationExists := newVulnerabilityNotifications[vulnerability.ID]; !newVulnerabilityNotificationExists {
// Any priorityChangeNotification already ?
if existingPriorityNotification, _ := vulnerabilityPriorityIncreasedNotifications[vulnerability.ID]; existingPriorityNotification != nil {
// There is a priority change notification, replace it but keep the old priority field
existingPriorityNotification.NewPriority = vulnerability.Priority
} else {
// No previous notification, just add a new one
notification := &VulnerabilityPriorityIncreasedNotification{OldPriority: existingVulnerability.Priority, NewPriority: vulnerability.Priority, VulnerabilityID: existingVulnerability.ID}
notifications = append(notifications, notification)
vulnerabilityPriorityIncreasedNotifications[vulnerability.ID] = notification
}
}
}
t.RemoveQuad(cayley.Triple(existingVulnerability.Node, FieldVulnerabilityPriority, string(existingVulnerability.Priority)))
t.AddQuad(cayley.Triple(existingVulnerability.Node, FieldVulnerabilityPriority, string(vulnerability.Priority)))
existingVulnerability.Priority = vulnerability.Priority
}
if vulnerability.Description != "" && existingVulnerability.Description != vulnerability.Description {
t.RemoveQuad(cayley.Triple(existingVulnerability.Node, FieldVulnerabilityDescription, existingVulnerability.Description))
t.AddQuad(cayley.Triple(existingVulnerability.Node, FieldVulnerabilityDescription, vulnerability.Description))
existingVulnerability.Description = vulnerability.Description
}
newFixedInNodes := utils.CompareStringLists(vulnerability.FixedInNodes, existingVulnerability.FixedInNodes)
if len(newFixedInNodes) > 0 {
var removedNodes []string
var addedNodes []string
existingVulnerabilityFixedInPackages, err := FindAllPackagesByNodes(existingVulnerability.FixedInNodes, []string{FieldPackageOS, FieldPackageName, FieldPackageVersion})
if err != nil {
return []Notification{}, err
}
newFixedInPackages, err := FindAllPackagesByNodes(newFixedInNodes, []string{FieldPackageOS, FieldPackageName, FieldPackageVersion})
if err != nil {
return []Notification{}, err
}
for _, p := range newFixedInPackages {
for _, ep := range existingVulnerabilityFixedInPackages {
if p.Branch() == ep.Branch() {
// A link to this package branch already exist and is not the same version, we will delete it
t.RemoveQuad(cayley.Triple(existingVulnerability.Node, FieldVulnerabilityFixedIn, ep.Node))
var index int
for i, n := range existingVulnerability.FixedInNodes {
if n == ep.Node {
index = i
break
}
}
existingVulnerability.FixedInNodes = append(existingVulnerability.FixedInNodes[index:], existingVulnerability.FixedInNodes[index+1:]...)
removedNodes = append(removedNodes, ep.Node)
}
}
t.AddQuad(cayley.Triple(existingVulnerability.Node, FieldVulnerabilityFixedIn, p.Node))
existingVulnerability.FixedInNodes = append(existingVulnerability.FixedInNodes, p.Node)
addedNodes = append(addedNodes, p.Node)
}
// Add notification about the FixedIn modification if the vulnerability is not new
if _, newVulnerabilityNotificationExists := newVulnerabilityNotifications[vulnerability.ID]; !newVulnerabilityNotificationExists {
// Any VulnerabilityPackageChangedNotification already ?
if existingPackageNotification, _ := vulnerabilityPackageChangedNotifications[vulnerability.ID]; existingPackageNotification != nil {
// There is a priority change notification, add the packages modifications to it
existingPackageNotification.AddedFixedInNodes = append(existingPackageNotification.AddedFixedInNodes, addedNodes...)
existingPackageNotification.RemovedFixedInNodes = append(existingPackageNotification.RemovedFixedInNodes, removedNodes...)
} else {
// No previous notification, just add a new one
notification := &VulnerabilityPackageChangedNotification{VulnerabilityID: vulnerability.ID, AddedFixedInNodes: addedNodes, RemovedFixedInNodes: removedNodes}
notifications = append(notifications, notification)
vulnerabilityPackageChangedNotifications[vulnerability.ID] = notification
}
}
}
}
}
// Apply transaction
if err = store.ApplyTransaction(t); err != nil {
log.Errorf("failed transaction (InsertVulnerabilities): %s", err)
return []Notification{}, ErrTransaction
}
return notifications, nil
}
// DeleteVulnerability deletes the vulnerability having the given ID
func DeleteVulnerability(id string) error {
vulnerability, err := FindOneVulnerability(id, FieldVulnerabilityAll)
if err != nil {
return err
}
t := cayley.NewTransaction()
t.RemoveQuad(cayley.Triple(vulnerability.Node, FieldVulnerabilityID, vulnerability.ID))
t.RemoveQuad(cayley.Triple(vulnerability.Node, FieldVulnerabilityLink, vulnerability.Link))
t.RemoveQuad(cayley.Triple(vulnerability.Node, FieldVulnerabilityPriority, string(vulnerability.Priority)))
t.RemoveQuad(cayley.Triple(vulnerability.Node, FieldVulnerabilityDescription, vulnerability.Description))
for _, p := range vulnerability.FixedInNodes {
t.RemoveQuad(cayley.Triple(vulnerability.Node, FieldVulnerabilityFixedIn, p))
}
if err := store.ApplyTransaction(t); err != nil {
log.Errorf("failed transaction (DeleteVulnerability): %s", err)
return ErrTransaction
}
return nil
}
// FindOneVulnerability finds and returns a single vulnerability having the given ID selecting the specified fields
func FindOneVulnerability(id string, selectedFields []string) (*Vulnerability, error) {
t := &Vulnerability{ID: id}
v, err := toVulnerabilities(cayley.StartPath(store, t.GetNode()).Has(fieldIs, fieldVulnerabilityIsValue), selectedFields)
if err != nil {
return nil, err
}
if len(v) == 1 {
return v[0], nil
}
if len(v) > 1 {
log.Errorf("found multiple vulnerabilities with identical ID [ID: %s]", id)
return nil, ErrInconsistent
}
return nil, cerrors.ErrNotFound
}
// FindAllVulnerabilitiesByFixedIn finds and returns all vulnerabilities that are fixed in the given packages (speficied by their nodes), selecting the specified fields
func FindAllVulnerabilitiesByFixedIn(nodes []string, selectedFields []string) ([]*Vulnerability, error) {
if len(nodes) == 0 {
log.Warning("Could not FindAllVulnerabilitiesByFixedIn with an empty nodes array.")
return []*Vulnerability{}, nil
}
// Construct path, potentially saving FieldVulnerabilityCausedByPackage
path := cayley.StartPath(store, nodes...)
if utils.Contains(FieldVulnerabilityCausedByPackage, selectedFields) {
path = path.Save(FieldPackageName, FieldVulnerabilityCausedByPackage)
}
path = path.In(FieldVulnerabilityFixedIn)
return toVulnerabilities(path, selectedFields)
}
// toVulnerabilities converts a path leading to one or multiple vulnerabilities to Vulnerability structs, selecting the specified fields
func toVulnerabilities(path *path.Path, selectedFields []string) ([]*Vulnerability, error) {
var vulnerabilities []*Vulnerability
saveFields(path, selectedFields, []string{FieldVulnerabilityFixedIn, FieldVulnerabilityCausedByPackage})
it, _ := path.BuildIterator().Optimize()
defer it.Close()
for cayley.RawNext(it) {
tags := make(map[string]graph.Value)
it.TagResults(tags)
vulnerability := Vulnerability{Node: store.NameOf(it.Result())}
for _, selectedField := range selectedFields {
switch selectedField {
case FieldVulnerabilityID:
vulnerability.ID = store.NameOf(tags[FieldVulnerabilityID])
case FieldVulnerabilityLink:
vulnerability.Link = store.NameOf(tags[FieldVulnerabilityLink])
case FieldVulnerabilityPriority:
vulnerability.Priority = types.Priority(store.NameOf(tags[FieldVulnerabilityPriority]))
case FieldVulnerabilityDescription:
vulnerability.Description = store.NameOf(tags[FieldVulnerabilityDescription])
case FieldVulnerabilityFixedIn:
var err error
vulnerability.FixedInNodes, err = toValues(cayley.StartPath(store, vulnerability.Node).Out(FieldVulnerabilityFixedIn))
if err != nil {
log.Errorf("could not get fixedIn on vulnerability %s: %s.", vulnerability.Node, err.Error())
return []*Vulnerability{}, err
}
case FieldVulnerabilityCausedByPackage:
vulnerability.CausedByPackage = store.NameOf(tags[FieldVulnerabilityCausedByPackage])
default:
panic("unknown selectedField")
}
}
vulnerabilities = append(vulnerabilities, &vulnerability)
}
if it.Err() != nil {
log.Errorf("failed query in toVulnerabilities: %s", it.Err())
return []*Vulnerability{}, ErrBackendException
}
return vulnerabilities, nil
}

@ -1,238 +0,0 @@
// Copyright 2015 clair authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"testing"
"github.com/coreos/clair/config"
cerrors "github.com/coreos/clair/utils/errors"
"github.com/coreos/clair/utils/types"
"github.com/stretchr/testify/assert"
)
func TestVulnerability(t *testing.T) {
Open(&config.DatabaseConfig{Type: "memstore"})
defer Close()
// Insert invalid vulnerabilities
for _, vulnerability := range []Vulnerability{
Vulnerability{ID: "", Link: "link1", Priority: types.Medium, FixedInNodes: []string{"pkg1"}},
Vulnerability{ID: "test1", Link: "", Priority: types.Medium, FixedInNodes: []string{"pkg1"}},
Vulnerability{ID: "test1", Link: "link1", Priority: "InvalidPriority", FixedInNodes: []string{"pkg1"}},
Vulnerability{ID: "test1", Link: "link1", Priority: types.Medium, FixedInNodes: []string{}},
} {
_, err := InsertVulnerabilities([]*Vulnerability{&vulnerability})
assert.Error(t, err)
}
// Some data
vuln1 := &Vulnerability{ID: "test1", Link: "link1", Priority: types.Medium, Description: "testDescription1", FixedInNodes: []string{"pkg1"}}
vuln2 := &Vulnerability{ID: "test2", Link: "link2", Priority: types.High, Description: "testDescription2", FixedInNodes: []string{"pkg1", "pkg2"}}
vuln3 := &Vulnerability{ID: "test3", Link: "link3", Priority: types.High, FixedInNodes: []string{"pkg3"}} // Empty description
// Insert some vulnerabilities
_, err := InsertVulnerabilities([]*Vulnerability{vuln1, vuln2, vuln3})
if assert.Nil(t, err) {
// Find one of the vulnerabilities we just inserted and verify its content
v1, err := FindOneVulnerability(vuln1.ID, FieldVulnerabilityAll)
if assert.Nil(t, err) && assert.NotNil(t, v1) {
assert.Equal(t, vuln1.ID, v1.ID)
assert.Equal(t, vuln1.Link, v1.Link)
assert.Equal(t, vuln1.Priority, v1.Priority)
assert.Equal(t, vuln1.Description, v1.Description)
if assert.Len(t, v1.FixedInNodes, 1) {
assert.Equal(t, vuln1.FixedInNodes[0], v1.FixedInNodes[0])
}
}
// Ensure that vulnerabilities with empty descriptions work as well
v3, err := FindOneVulnerability(vuln3.ID, FieldVulnerabilityAll)
if assert.Nil(t, err) && assert.NotNil(t, v3) {
assert.Equal(t, vuln3.Description, v3.Description)
}
// Find vulnerabilities by fixed packages
vulnsFixedInPkg1AndPkg3, err := FindAllVulnerabilitiesByFixedIn([]string{"pkg2", "pkg3"}, FieldVulnerabilityAll)
assert.Nil(t, err)
assert.Len(t, vulnsFixedInPkg1AndPkg3, 2)
// Delete vulnerability
if assert.Nil(t, DeleteVulnerability(vuln1.ID)) {
v1, err := FindOneVulnerability(vuln1.ID, FieldVulnerabilityAll)
assert.Equal(t, cerrors.ErrNotFound, err)
assert.Nil(t, v1)
}
}
// Update a vulnerability and verify its new content
pkg1 := &Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("1.0")}
InsertPackages([]*Package{pkg1})
vuln5 := &Vulnerability{ID: "test5", Link: "link5", Priority: types.Medium, Description: "testDescription5", FixedInNodes: []string{pkg1.Node}}
_, err = InsertVulnerabilities([]*Vulnerability{vuln5})
if assert.Nil(t, err) {
// Partial updates
// # Just a field update
vuln5b := &Vulnerability{ID: "test5", Priority: types.High}
_, err := InsertVulnerabilities([]*Vulnerability{vuln5b})
if assert.Nil(t, err) {
v5b, err := FindOneVulnerability(vuln5b.ID, FieldVulnerabilityAll)
if assert.Nil(t, err) && assert.NotNil(t, v5b) {
assert.Equal(t, vuln5b.ID, v5b.ID)
assert.Equal(t, vuln5b.Priority, v5b.Priority)
if assert.Len(t, v5b.FixedInNodes, 1) {
assert.Contains(t, v5b.FixedInNodes, pkg1.Node)
}
}
}
// # Just a field update, twice in the same transaction
vuln5b1 := &Vulnerability{ID: "test5", Link: "http://foo.bar"}
vuln5b2 := &Vulnerability{ID: "test5", Link: "http://bar.foo"}
_, err = InsertVulnerabilities([]*Vulnerability{vuln5b1, vuln5b2})
if assert.Nil(t, err) {
v5b2, err := FindOneVulnerability(vuln5b2.ID, FieldVulnerabilityAll)
if assert.Nil(t, err) && assert.NotNil(t, v5b2) {
assert.Equal(t, vuln5b2.Link, v5b2.Link)
}
}
// # All fields except fixedIn update
vuln5c := &Vulnerability{ID: "test5", Link: "link5c", Priority: types.Critical, Description: "testDescription5c"}
_, err = InsertVulnerabilities([]*Vulnerability{vuln5c})
if assert.Nil(t, err) {
v5c, err := FindOneVulnerability(vuln5c.ID, FieldVulnerabilityAll)
if assert.Nil(t, err) && assert.NotNil(t, v5c) {
assert.Equal(t, vuln5c.ID, v5c.ID)
assert.Equal(t, vuln5c.Link, v5c.Link)
assert.Equal(t, vuln5c.Priority, v5c.Priority)
assert.Equal(t, vuln5c.Description, v5c.Description)
if assert.Len(t, v5c.FixedInNodes, 1) {
assert.Contains(t, v5c.FixedInNodes, pkg1.Node)
}
}
}
// Complete update
pkg2 := &Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("1.1")}
pkg3 := &Package{OS: "testOS", Name: "testpkg2", Version: types.NewVersionUnsafe("1.0")}
InsertPackages([]*Package{pkg2, pkg3})
vuln5d := &Vulnerability{ID: "test5", Link: "link5d", Priority: types.Low, Description: "testDescription5d", FixedInNodes: []string{pkg2.Node, pkg3.Node}}
_, err = InsertVulnerabilities([]*Vulnerability{vuln5d})
if assert.Nil(t, err) {
v5d, err := FindOneVulnerability(vuln5d.ID, FieldVulnerabilityAll)
if assert.Nil(t, err) && assert.NotNil(t, v5d) {
assert.Equal(t, vuln5d.ID, v5d.ID)
assert.Equal(t, vuln5d.Link, v5d.Link)
assert.Equal(t, vuln5d.Priority, v5d.Priority)
assert.Equal(t, vuln5d.Description, v5d.Description)
// Here, we ensure that a vulnerability can only be fixed by one package of a given branch at a given time
// And that we can add new fixed packages as well
if assert.Len(t, v5d.FixedInNodes, 2) {
assert.NotContains(t, v5d.FixedInNodes, pkg1.Node)
}
}
}
}
// Create and update a vulnerability's packages (and from the same branch) in the same batch
pkg1 = &Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("1.0")}
pkg1b := &Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("1.1")}
InsertPackages([]*Package{pkg1, pkg1b})
// # Two updates of the same vulnerability in the same batch with packages of the same branch
pkg0 := &Package{OS: "testOS", Name: "testpkg0", Version: types.NewVersionUnsafe("1.0")}
InsertPackages([]*Package{pkg0})
_, err = InsertVulnerabilities([]*Vulnerability{&Vulnerability{ID: "test7", Link: "link7", Priority: types.Medium, Description: "testDescription7", FixedInNodes: []string{pkg0.Node}}})
if assert.Nil(t, err) {
vuln7b := &Vulnerability{ID: "test7", FixedInNodes: []string{pkg1.Node}}
vuln7c := &Vulnerability{ID: "test7", FixedInNodes: []string{pkg1b.Node}}
_, err = InsertVulnerabilities([]*Vulnerability{vuln7b, vuln7c})
if assert.Nil(t, err) {
v7, err := FindOneVulnerability("test7", FieldVulnerabilityAll)
if assert.Nil(t, err) && assert.Len(t, v7.FixedInNodes, 2) {
assert.Contains(t, v7.FixedInNodes, pkg0.Node)
assert.NotContains(t, v7.FixedInNodes, pkg1.Node)
assert.Contains(t, v7.FixedInNodes, pkg1b.Node)
}
}
}
}
func TestInsertVulnerabilityNotifications(t *testing.T) {
Open(&config.DatabaseConfig{Type: "memstore"})
defer Close()
pkg1 := &Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("1.0")}
pkg1b := &Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("1.2")}
pkg2 := &Package{OS: "testOS", Name: "testpkg2", Version: types.NewVersionUnsafe("1.0")}
InsertPackages([]*Package{pkg1, pkg1b, pkg2})
// NewVulnerabilityNotification
vuln1 := &Vulnerability{ID: "test1", Link: "link1", Priority: types.Medium, Description: "testDescription1", FixedInNodes: []string{pkg1.Node}}
vuln2 := &Vulnerability{ID: "test2", Link: "link2", Priority: types.High, Description: "testDescription2", FixedInNodes: []string{pkg1.Node, pkg2.Node}}
vuln1b := &Vulnerability{ID: "test1", Priority: types.High, FixedInNodes: []string{"pkg3"}}
notifications, err := InsertVulnerabilities([]*Vulnerability{vuln1, vuln2, vuln1b})
if assert.Nil(t, err) {
// We should only have two NewVulnerabilityNotification notifications: one for test1 and one for test2
// We should not have a VulnerabilityPriorityIncreasedNotification or a VulnerabilityPackageChangedNotification
// for test1 because it is in the same batch
if assert.Len(t, notifications, 2) {
for _, n := range notifications {
_, ok := n.(*NewVulnerabilityNotification)
assert.True(t, ok)
}
}
}
// VulnerabilityPriorityIncreasedNotification
vuln1c := &Vulnerability{ID: "test1", Priority: types.Critical}
notifications, err = InsertVulnerabilities([]*Vulnerability{vuln1c})
if assert.Nil(t, err) {
if assert.Len(t, notifications, 1) {
if nn, ok := notifications[0].(*VulnerabilityPriorityIncreasedNotification); assert.True(t, ok) {
assert.Equal(t, vuln1b.Priority, nn.OldPriority)
assert.Equal(t, vuln1c.Priority, nn.NewPriority)
}
}
}
notifications, err = InsertVulnerabilities([]*Vulnerability{&Vulnerability{ID: "test1", Priority: types.Low}})
assert.Nil(t, err)
assert.Len(t, notifications, 0)
// VulnerabilityPackageChangedNotification
vuln1e := &Vulnerability{ID: "test1", FixedInNodes: []string{pkg1b.Node}}
vuln1f := &Vulnerability{ID: "test1", FixedInNodes: []string{pkg2.Node}}
notifications, err = InsertVulnerabilities([]*Vulnerability{vuln1e, vuln1f})
if assert.Nil(t, err) {
if assert.Len(t, notifications, 1) {
if nn, ok := notifications[0].(*VulnerabilityPackageChangedNotification); assert.True(t, ok) {
// Here, we say that pkg1b fixes the vulnerability, but as pkg1b is in
// the same branch as pkg1, pkg1 should be removed and pkg1b added
// We also add pkg2 as fixed
assert.Contains(t, nn.AddedFixedInNodes, pkg1b.Node)
assert.Contains(t, nn.RemovedFixedInNodes, pkg1.Node)
assert.Contains(t, nn.AddedFixedInNodes, pkg2.Node)
}
}
}
}

@ -48,7 +48,7 @@ type WebhookNotifierConfiguration struct {
}
func init() {
notifier.RegisterNotifier("webhook", &WebhookNotifier{})
//notifier.RegisterNotifier("webhook", &WebhookNotifier{})
}
func (h *WebhookNotifier) Configure(config *config.NotifierConfig) (bool, error) {

BIN
updater/.DS_Store vendored

Binary file not shown.

@ -53,7 +53,7 @@ type jsonRel struct {
type DebianFetcher struct{}
func init() {
updater.RegisterFetcher("debian", &DebianFetcher{})
//updater.RegisterFetcher("debian", &DebianFetcher{})
}
// FetchUpdate fetches vulnerability updates from the Debian Security Tracker.

@ -0,0 +1,19 @@
package fetchers
import "github.com/coreos/clair/updater"
// NVDFetcher implements updater.Fetcher and gets vulnerability updates from
// the National Vulnerability Database (NVD), from the
// National Institute of Standards and Technology (NIST).
type NVDFetcher struct{}
func init() {
//updater.RegisterFetcher("NVD", &RHELFetcher{})
}
// FetchUpdate gets vulnerability updates from the NVD database.
func (f *NVDFetcher) FetchUpdate() (resp updater.FetcherResponse, err error) {
log.Info("fetching NVD vulneratibilities")
return
}

@ -81,7 +81,7 @@ type criterion struct {
type RHELFetcher struct{}
func init() {
updater.RegisterFetcher("Red Hat", &RHELFetcher{})
//updater.RegisterFetcher("Red Hat", &RHELFetcher{})
}
// FetchUpdate gets vulnerability updates from the Red Hat OVAL definitions.

@ -76,7 +76,7 @@ var (
type UbuntuFetcher struct{}
func init() {
updater.RegisterFetcher("Ubuntu", &UbuntuFetcher{})
//updater.RegisterFetcher("Ubuntu", &UbuntuFetcher{})
}
// FetchUpdate gets vulnerability updates from the Ubuntu CVE Tracker.

@ -0,0 +1,211 @@
package types
//
// import "fmt"
//
// // CVSSv2 represents the Common Vulnerability Scoring System (CVSS), that assesses the severity of
// // vulnerabilities.
// // It describes the CVSS score, but also a vector describing the components from which the score
// // was calculated. This provides users of the score confidence in its correctness and provides
// // insight into the nature of the vulnerability.
// //
// // Reference: https://nvd.nist.gov/CVSS/Vector-v2.aspx
// type CVSSv2 struct {
// // Base Vectors
// AccessVector CVSSValue
// AccessComplexity CVSSValue
// Authentication CVSSValue
// ConfImpact CVSSValue
// IntegImpact CVSSValue
// AvailImpact CVSSValue
// // Temporal Vectors
// Exploitability CVSSValue
// RemediationLevel CVSSValue
// ReportConfidence CVSSValue
// // Environmental Vectors
// CollateralDamagePotential CVSSValue
// TargetDistribution CVSSValue
// SystemConfidentialityRequirement CVSSValue
// SystemIntegrityRequirement CVSSValue
// SystemAvailabilityRequirement CVSSValue
// }
//
// func NewCVSSv2(value string) (*CVSSv2, error) {
//
// }
//
// // CVSSValue is the comprehensible value for a CVSS metric.
// type CVSSValue string
//
// // Metric acronym + Value abbreviation -> Comprehensible metric value.
// var toValue map[string]func(string) (CVSSValue, error)
//
// func init() {
// parsers = make(map[string]func(string) (CVSSValue, error), 14)
// toValue["AV"] = av
// toValue["AC"] = ac
// toValue["Au"] = au
// toValue["C"] = cAndIAndA
// toValue["I"] = cAndIAndA
// toValue["A"] = cAndIAndA
// toValue["E"] = e
// toValue["RL"] = rl
// toValue["RC"] = rc
// toValue["CDP"] = cdp
// toValue["TD"] = td
// toValue["CR"] = crAndIrAndAr
// toValue["IR"] = crAndIrAndAr
// toValue["AR"] = crAndIrAndAr
// }
//
// func av(v string) (CVSSValue, error) {
// switch v {
// case "L":
// return CVSSValue("Local access"), nil
// case "A":
// return CVSSValue("Adjacent Network"), nil
// case "N":
// return CVSSValue("Network"), nil
// default:
// return "", fmt.Errorf("%v is not a valid value for AV", v)
// }
// }
//
// func ac(v string) (CVSSValue, error) {
// switch v {
// case "H":
// return CVSSValue("High"), nil
// case "M":
// return CVSSValue("Medium"), nil
// case "L":
// return CVSSValue("Low"), nil
// default:
// return "", fmt.Errorf("%v is not a valid value for AC", v)
// }
// }
//
// func au(v string) (CVSSValue, error) {
// switch v {
// case "N":
// return CVSSValue("None required"), nil
// case "S":
// return CVSSValue("Requires single instance"), nil
// case "M":
// return CVSSValue("Requires multiple instances"), nil
// default:
// return "", fmt.Errorf("%v is not a valid value for Au", v)
// }
// }
//
// func cAndIAndA(v string) (CVSSValue, error) {
// switch v {
// case "N":
// return CVSSValue("None"), nil
// case "P":
// return CVSSValue("Partial"), nil
// case "C":
// return CVSSValue("Complete"), nil
// default:
// return "", fmt.Errorf("%v is not a valid value for C/I/A", v)
// }
// }
//
// func e(v string) (CVSSValue, error) {
// switch v {
// case "U":
// return CVSSValue("Unproven"), nil
// case "POC":
// return CVSSValue("Proof-of-concept"), nil
// case "F":
// return CVSSValue("Functional"), nil
// case "H":
// return CVSSValue("High"), nil
// case "ND":
// return CVSSValue("Not Defined"), nil
// default:
// return "", fmt.Errorf("%v is not a valid value for E", v)
// }
// }
//
// func rl(v string) (CVSSValue, error) {
// switch v {
// case "OF":
// return CVSSValue("Official-fix"), nil
// case "T":
// return CVSSValue("Temporary-fix"), nil
// case "W":
// return CVSSValue("Workaround"), nil
// case "U":
// return CVSSValue("Unavailable"), nil
// case "ND":
// return CVSSValue("Not Defined"), nil
// default:
// return "", fmt.Errorf("%v is not a valid value for RL", v)
// }
// }
//
// func rc(v string) (CVSSValue, error) {
// switch v {
// case "UC":
// return CVSSValue("Unconfirmed"), nil
// case "UR":
// return CVSSValue("Uncorroborated"), nil
// case "C":
// return CVSSValue("Confirmed"), nil
// case "ND":
// return CVSSValue("Not Defined"), nil
// default:
// return "", fmt.Errorf("%v is not a valid value for RC", v)
// }
// }
//
// func cdp(v string) (CVSSValue, error) {
// switch v {
// case "N":
// return CVSSValue("None"), nil
// case "L":
// return CVSSValue("Low"), nil
// case "LM":
// return CVSSValue("Low-Medium"), nil
// case "MH":
// return CVSSValue("Medium-High"), nil
// case "H":
// return CVSSValue("High"), nil
// case "ND":
// return CVSSValue("Not Defined"), nil
// default:
// return "", fmt.Errorf("%v is not a valid value for CDP", v)
// }
// }
//
// func td(v string) (CVSSValue, error) {
// switch v {
// case "N":
// return CVSSValue("None (0%)"), nil
// case "L":
// return CVSSValue("Low (1-25%)"), nil
// case "M":
// return CVSSValue("Medium (26-75%)"), nil
// case "H":
// return CVSSValue("High (76-100%)"), nil
// case "ND":
// return CVSSValue("Not Defined"), nil
// default:
// return "", fmt.Errorf("%v is not a valid value for TD", v)
// }
// }
//
// func crAndIrAndAr(v string) (CVSSValue, error) {
// switch v {
// case "L":
// return CVSSValue("Low"), nil
// case "M":
// return CVSSValue("Medium"), nil
// case "H":
// return CVSSValue("High"), nil
// case "ND":
// return CVSSValue("Not Defined"), nil
// default:
// return "", fmt.Errorf("%v is not a valid value for CR/IR/AR", v)
// }
// }

@ -15,6 +15,12 @@
// Package types defines useful types that are used in database models.
package types
import (
"database/sql/driver"
"errors"
"fmt"
)
// Priority defines a vulnerability priority
type Priority string
@ -86,3 +92,19 @@ func (p Priority) Compare(p2 Priority) int {
return i1 - i2
}
func (p *Priority) Scan(value interface{}) error {
val, ok := value.([]byte)
if !ok {
return errors.New("could not scan a Priority from a non-string input")
}
*p = Priority(string(val))
if !p.IsValid() {
return fmt.Errorf("could not scan an invalid Priority (%v)", p)
}
return nil
}
func (p *Priority) Value() (driver.Value, error) {
return p, nil
}

@ -15,6 +15,7 @@
package types
import (
"database/sql/driver"
"encoding/json"
"errors"
"strconv"
@ -183,6 +184,19 @@ func (v *Version) UnmarshalJSON(b []byte) (err error) {
return
}
func (v *Version) Scan(value interface{}) (err error) {
val, ok := value.([]byte)
if !ok {
return errors.New("could not scan a Version from a non-string input")
}
*v, err = NewVersion(string(val))
return
}
func (v *Version) Value() (driver.Value, error) {
return v.String(), nil
}
func verrevcmp(t1, t2 string) int {
t1, rt1 := nextRune(t1)
t2, rt2 := nextRune(t2)

@ -12,18 +12,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Package packages defines PackagesDetector for several sources.
package packages
package dpkg
import (
"bufio"
"regexp"
"strings"
"github.com/coreos/pkg/capnslog"
"github.com/coreos/clair/database"
"github.com/coreos/clair/utils/types"
"github.com/coreos/clair/worker/detectors"
"github.com/coreos/pkg/capnslog"
)
var (
@ -33,24 +32,24 @@ var (
dpkgSrcCaptureRegexpNames = dpkgSrcCaptureRegexp.SubexpNames()
)
// DpkgPackagesDetector implements PackagesDetector and detects dpkg packages
type DpkgPackagesDetector struct{}
// DpkgFeaturesDetector implements FeaturesDetector and detects dpkg packages
type DpkgFeaturesDetector struct{}
func init() {
detectors.RegisterPackagesDetector("dpkg", &DpkgPackagesDetector{})
detectors.RegisterFeaturesDetector("dpkg", &DpkgFeaturesDetector{})
}
// Detect detects packages using var/lib/dpkg/status from the input data
func (detector *DpkgPackagesDetector) Detect(data map[string][]byte) ([]*database.Package, error) {
func (detector *DpkgFeaturesDetector) Detect(data map[string][]byte) ([]database.FeatureVersion, error) {
f, hasFile := data["var/lib/dpkg/status"]
if !hasFile {
return []*database.Package{}, nil
return []database.FeatureVersion{}, nil
}
// Create a map to store packages and ensure their uniqueness
packagesMap := make(map[string]*database.Package)
packagesMap := make(map[string]database.FeatureVersion)
var pkg *database.Package
var pkg database.FeatureVersion
var err error
scanner := bufio.NewScanner(strings.NewReader(string(f)))
for scanner.Scan() {
@ -60,10 +59,8 @@ func (detector *DpkgPackagesDetector) Detect(data map[string][]byte) ([]*databas
// Package line
// Defines the name of the package
pkg = &database.Package{
Name: strings.TrimSpace(strings.TrimPrefix(line, "Package: ")),
}
} else if pkg != nil && strings.HasPrefix(line, "Source: ") {
pkg.Feature.Name = strings.TrimSpace(strings.TrimPrefix(line, "Package: "))
} else if strings.HasPrefix(line, "Source: ") {
// Source line (Optionnal)
// Gives the name of the source package
// May also specifies a version
@ -74,14 +71,14 @@ func (detector *DpkgPackagesDetector) Detect(data map[string][]byte) ([]*databas
md[dpkgSrcCaptureRegexpNames[i]] = strings.TrimSpace(n)
}
pkg.Name = md["name"]
pkg.Feature.Name = md["name"]
if md["version"] != "" {
pkg.Version, err = types.NewVersion(md["version"])
if err != nil {
log.Warningf("could not parse package version '%s': %s. skipping", line[1], err.Error())
}
}
} else if pkg != nil && strings.HasPrefix(line, "Version: ") && pkg.Version.String() == "" {
} else if strings.HasPrefix(line, "Version: ") && pkg.Version.String() == "" {
// Version line
// Defines the version of the package
// This version is less important than a version retrieved from a Source line
@ -94,14 +91,15 @@ func (detector *DpkgPackagesDetector) Detect(data map[string][]byte) ([]*databas
}
// Add the package to the result array if we have all the informations
if pkg != nil && pkg.Name != "" && pkg.Version.String() != "" {
packagesMap[pkg.Key()] = pkg
pkg = nil
if pkg.Feature.Name != "" && pkg.Version.String() != "" {
packagesMap[pkg.Feature.Name+"#"+pkg.Version.String()] = pkg
pkg.Feature.Name = ""
pkg.Version = types.Version{}
}
}
// Convert the map to a slice
packages := make([]*database.Package, 0, len(packagesMap))
packages := make([]database.FeatureVersion, 0, len(packagesMap))
for _, pkg := range packagesMap {
packages = append(packages, pkg)
}
@ -111,6 +109,6 @@ func (detector *DpkgPackagesDetector) Detect(data map[string][]byte) ([]*databas
// GetRequiredFiles returns the list of files required for Detect, without
// leading /
func (detector *DpkgPackagesDetector) GetRequiredFiles() []string {
func (detector *DpkgFeaturesDetector) GetRequiredFiles() []string {
return []string{"var/lib/dpkg/status"}
}

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package packages
package dpkg
import (
"testing"
@ -24,7 +24,7 @@ import (
var dpkgPackagesTests = []packagesTest{
// Test an Ubuntu dpkg status file
packagesTest{
packages: []*database.Package{
packages: []database.FeatureVersion{
&database.Package{
Name: "pam", // Two packages from this source are installed, it should only appear one time
Version: types.NewVersionUnsafe("1.1.8-3.1ubuntu3"),
@ -44,6 +44,6 @@ var dpkgPackagesTests = []packagesTest{
},
}
func TestDpkgPackagesDetector(t *testing.T) {
testPackagesDetector(t, &DpkgPackagesDetector{}, dpkgPackagesTests)
func TestDpkgFeaturesDetector(t *testing.T) {
feature.TestFeaturesDetector(t, &DpkgFeaturesDetector{}, dpkgPackagesTests)
}

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package packages
package rpm
import (
"bufio"
@ -21,42 +21,45 @@ import (
"strings"
"github.com/coreos/clair/database"
cerrors "github.com/coreos/clair/utils/errors"
"github.com/coreos/clair/utils"
cerrors "github.com/coreos/clair/utils/errors"
"github.com/coreos/clair/utils/types"
"github.com/coreos/clair/worker/detectors"
"github.com/coreos/pkg/capnslog"
)
// RpmPackagesDetector implements PackagesDetector and detects rpm packages
var log = capnslog.NewPackageLogger("github.com/coreos/clair", "rpm")
// RpmFeaturesDetector implements FeaturesDetector and detects rpm packages
// It requires the "rpm" binary to be in the PATH
type RpmPackagesDetector struct{}
type RpmFeaturesDetector struct{}
func init() {
detectors.RegisterPackagesDetector("rpm", &RpmPackagesDetector{})
detectors.RegisterFeaturesDetector("rpm", &RpmFeaturesDetector{})
}
// Detect detects packages using var/lib/rpm/Packages from the input data
func (detector *RpmPackagesDetector) Detect(data map[string][]byte) ([]*database.Package, error) {
func (detector *RpmFeaturesDetector) Detect(data map[string][]byte) ([]database.FeatureVersion, error) {
f, hasFile := data["var/lib/rpm/Packages"]
if !hasFile {
return []*database.Package{}, nil
return []database.FeatureVersion{}, nil
}
// Create a map to store packages and ensure their uniqueness
packagesMap := make(map[string]*database.Package)
packagesMap := make(map[string]database.FeatureVersion)
// Write the required "Packages" file to disk
tmpDir, err := ioutil.TempDir(os.TempDir(), "rpm")
defer os.RemoveAll(tmpDir)
if err != nil {
log.Errorf("could not create temporary folder for RPM detection: %s", err)
return []*database.Package{}, cerrors.ErrFilesystem
return []database.FeatureVersion{}, cerrors.ErrFilesystem
}
err = ioutil.WriteFile(tmpDir+"/Packages", f, 0700)
if err != nil {
log.Errorf("could not create temporary file for RPM detection: %s", err)
return []*database.Package{}, cerrors.ErrFilesystem
return []database.FeatureVersion{}, cerrors.ErrFilesystem
}
// Query RPM
@ -67,7 +70,7 @@ func (detector *RpmPackagesDetector) Detect(data map[string][]byte) ([]*database
log.Errorf("could not query RPM: %s. output: %s", err, string(out))
// Do not bubble up because we probably won't be able to fix it,
// the database must be corrupted
return []*database.Package{}, nil
return []database.FeatureVersion{}, nil
}
scanner := bufio.NewScanner(strings.NewReader(string(out)))
@ -92,12 +95,17 @@ func (detector *RpmPackagesDetector) Detect(data map[string][]byte) ([]*database
}
// Add package
pkg := &database.Package{Name: line[0], Version: version}
packagesMap[pkg.Key()] = pkg
pkg := database.FeatureVersion{
Feature: database.Feature{
Name: line[0],
},
Version: version,
}
packagesMap[pkg.Feature.Name+"#"+pkg.Version.String()] = pkg
}
// Convert the map to a slice
packages := make([]*database.Package, 0, len(packagesMap))
packages := make([]database.FeatureVersion, 0, len(packagesMap))
for _, pkg := range packagesMap {
packages = append(packages, pkg)
}
@ -107,6 +115,6 @@ func (detector *RpmPackagesDetector) Detect(data map[string][]byte) ([]*database
// GetRequiredFiles returns the list of files required for Detect, without
// leading /
func (detector *RpmPackagesDetector) GetRequiredFiles() []string {
func (detector *RpmFeaturesDetector) GetRequiredFiles() []string {
return []string{"var/lib/rpm/Packages"}
}

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package packages
package rpm
import (
"testing"
@ -25,7 +25,7 @@ var rpmPackagesTests = []packagesTest{
// Test a CentOS 7 RPM database
// Memo: Use the following command on a RPM-based system to shrink a database: rpm -qa --qf "%{NAME}\n" |tail -n +3| xargs rpm -e --justdb
packagesTest{
packages: []*database.Package{
packages: []database.FeatureVersion{
&database.Package{
Name: "centos-release", // Two packages from this source are installed, it should only appear one time
Version: types.NewVersionUnsafe("7-1.1503.el7.centos.2.8"),
@ -41,6 +41,6 @@ var rpmPackagesTests = []packagesTest{
},
}
func TestRpmPackagesDetector(t *testing.T) {
testPackagesDetector(t, &RpmPackagesDetector{}, rpmPackagesTests)
func TestRpmFeaturesDetector(t *testing.T) {
feature.TestFeaturesDetector(t, &RpmFeaturesDetector{}, rpmPackagesTests)
}

@ -26,7 +26,7 @@ import (
)
type packagesTest struct {
packages []*database.Package
packages []database.FeatureVersion
data map[string][]byte
}
@ -36,7 +36,7 @@ func loadFileForTest(name string) []byte {
return d
}
func testPackagesDetector(t *testing.T, detector detectors.PackagesDetector, tests []packagesTest) {
func testFeaturesDetector(t *testing.T, detector detectors.FeaturesDetector, tests []packagesTest) {
for _, test := range tests {
packages, err := detector.Detect(test.data)
if assert.Nil(t, err) && assert.Len(t, packages, len(test.packages)) {

@ -0,0 +1,79 @@
// Copyright 2015 clair authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package detectors
import (
"fmt"
"sync"
"github.com/coreos/clair/database"
)
// The FeaturesDetector interface defines a way to detect packages from input data.
type FeaturesDetector interface {
// Detect detects a list of FeatureVersion from the input data.
Detect(map[string][]byte) ([]database.FeatureVersion, error)
// GetRequiredFiles returns the list of files required for Detect, without
// leading /.
GetRequiredFiles() []string
}
var (
featuresDetectorsLock sync.Mutex
featuresDetectors = make(map[string]FeaturesDetector)
)
// RegisterFeaturesDetector makes a FeaturesDetector available for DetectFeatures.
func RegisterFeaturesDetector(name string, f FeaturesDetector) {
if name == "" {
panic("Could not register a FeaturesDetector with an empty name")
}
if f == nil {
panic("Could not register a nil FeaturesDetector")
}
featuresDetectorsLock.Lock()
defer featuresDetectorsLock.Unlock()
if _, alreadyExists := featuresDetectors[name]; alreadyExists {
panic(fmt.Sprintf("Detector '%s' is already registered", name))
}
featuresDetectors[name] = f
}
// DetectFeatures detects a list of FeatureVersion using every registered FeaturesDetector.
func DetectFeatures(data map[string][]byte) ([]database.FeatureVersion, error) {
var packages []database.FeatureVersion
for _, detector := range featuresDetectors {
pkgs, err := detector.Detect(data)
if err != nil {
return []database.FeatureVersion{}, err
}
packages = append(packages, pkgs...)
}
return packages, nil
}
// GetRequiredFilesFeatures returns the list of files required for Detect for every
// registered FeaturesDetector, without leading /.
func GetRequiredFilesFeatures() (files []string) {
for _, detector := range featuresDetectors {
files = append(files, detector.GetRequiredFiles()...)
}
return
}

@ -0,0 +1,82 @@
// Copyright 2015 clair authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package detectors exposes functions to register and use container
// information extractors.
package detectors
import (
"fmt"
"sync"
"github.com/coreos/clair/database"
)
// The NamespaceDetector interface defines a way to detect a Namespace from input data.
// A namespace is usually made of an Operating System name and its version.
type NamespaceDetector interface {
// Detect detects a Namespace and its version from input data.
Detect(map[string][]byte) *database.Namespace
// GetRequiredFiles returns the list of files required for Detect, without
// leading /.
GetRequiredFiles() []string
}
var (
namespaceDetectorsLock sync.Mutex
namespaceDetectors = make(map[string]NamespaceDetector)
)
// RegisterNamespaceDetector provides a way to dynamically register an implementation of a
// NamespaceDetector.
//
// If RegisterNamespaceDetector is called twice with the same name if NamespaceDetector is nil,
// or if the name is blank, it panics.
func RegisterNamespaceDetector(name string, f NamespaceDetector) {
if name == "" {
panic("Could not register a NamespaceDetector with an empty name")
}
if f == nil {
panic("Could not register a nil NamespaceDetector")
}
namespaceDetectorsLock.Lock()
defer namespaceDetectorsLock.Unlock()
if _, alreadyExists := namespaceDetectors[name]; alreadyExists {
panic(fmt.Sprintf("Detector '%s' is already registered", name))
}
namespaceDetectors[name] = f
}
// DetectNamespace finds the OS of the layer by using every registered NamespaceDetector.
func DetectNamespace(data map[string][]byte) *database.Namespace {
for _, detector := range namespaceDetectors {
if namespace := detector.Detect(data); namespace != nil {
return namespace
}
}
return nil
}
// GetRequiredFilesNamespace returns the list of files required for DetectNamespace for every
// registered NamespaceDetector, without leading /.
func GetRequiredFilesNamespace() (files []string) {
for _, detector := range namespaceDetectors {
files = append(files, detector.GetRequiredFiles()...)
}
return
}

@ -12,8 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Package os defines OSDetector for several sources.
package os
package aptsources
import (
"bufio"
@ -23,22 +22,25 @@ import (
"github.com/coreos/clair/worker/detectors"
)
// AptSourcesOSDetector implements OSDetector and detects the OS from the
// AptSourcesNamespaceDetector implements NamespaceDetector and detects the Namespace from the
// /etc/apt/sources.list file.
type AptSourcesOSDetector struct{}
//
// This detector is necessary to determine precise Debian version when it is
// an unstable version for instance.
type AptSourcesNamespaceDetector struct{}
func init() {
detectors.RegisterOSDetector("apt-sources", &AptSourcesOSDetector{})
detectors.RegisterNamespaceDetector("apt-sources", &AptSourcesNamespaceDetector{})
}
// Detect tries to detect OS/Version using /etc/apt/sources.list
// Necessary to determine precise Debian version when it is an unstable version for instance
func (detector *AptSourcesOSDetector) Detect(data map[string][]byte) (OS, version string) {
func (detector *AptSourcesNamespaceDetector) Detect(data map[string][]byte) *database.Namespace {
f, hasFile := data["etc/apt/sources.list"]
if !hasFile {
return
return nil
}
var OS, version string
scanner := bufio.NewScanner(strings.NewReader(string(f)))
for scanner.Scan() {
// Format: man sources.list | https://wiki.debian.org/SourcesList)
@ -72,10 +74,12 @@ func (detector *AptSourcesOSDetector) Detect(data map[string][]byte) (OS, versio
}
}
return
if OS != "" && version != "" {
return &database.Namespace{Name: OS + ":" + version}
}
return nil
}
// GetRequiredFiles returns the list of files that are required for Detect()
func (detector *AptSourcesOSDetector) GetRequiredFiles() []string {
func (detector *AptSourcesNamespaceDetector) GetRequiredFiles() []string {
return []string{"etc/apt/sources.list"}
}

@ -12,15 +12,19 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package os
package aptsources
import "testing"
import (
"testing"
var aptSourcesOSTests = []osTest{
osTest{
expectedOS: "debian",
expectedVersion: "unstable",
data: map[string][]byte{
"github.com/coreos/clair/database"
"github.com/coreos/clair/worker/detectors/namespace"
)
var aptSourcesOSTests = []namespace.NamespaceTest{
namespace.NamespaceTest{
ExpectedNamespace: database.Namespace{Name: "debian:unstable"},
Data: map[string][]byte{
"etc/os-release": []byte(
`PRETTY_NAME="Debian GNU/Linux stretch/sid"
NAME="Debian GNU/Linux"
@ -33,6 +37,6 @@ BUG_REPORT_URL="https://bugs.debian.org/"`),
},
}
func TestAptSourcesOSDetector(t *testing.T) {
testOSDetector(t, &AptSourcesOSDetector{}, aptSourcesOSTests)
func TestAptSourcesNamespaceDetector(t *testing.T) {
namespace.TestNamespaceDetector(t, &AptSourcesNamespaceDetector{}, aptSourcesOSTests)
}

@ -12,13 +12,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package os
package lsbrelease
import (
"bufio"
"regexp"
"strings"
"github.com/coreos/clair/database"
"github.com/coreos/clair/worker/detectors"
)
@ -27,22 +28,24 @@ var (
lsbReleaseVersionRegexp = regexp.MustCompile(`^DISTRIB_RELEASE=(.*)`)
)
// AptSourcesOSDetector implements OSDetector and detects the OS from the
// AptSourcesNamespaceDetector implements NamespaceDetector and detects the Namespace from the
// /etc/lsb-release file.
type LsbReleaseOSDetector struct{}
//
// This detector is necessary for Ubuntu Precise.
type LsbReleaseNamespaceDetector struct{}
func init() {
detectors.RegisterOSDetector("lsb-release", &LsbReleaseOSDetector{})
detectors.RegisterNamespaceDetector("lsb-release", &LsbReleaseNamespaceDetector{})
}
// Detect tries to detect OS/Version using "/etc/lsb-release"
// Necessary for Ubuntu Precise for instance
func (detector *LsbReleaseOSDetector) Detect(data map[string][]byte) (OS, version string) {
func (detector *LsbReleaseNamespaceDetector) Detect(data map[string][]byte) *database.Namespace {
f, hasFile := data["etc/lsb-release"]
if !hasFile {
return
return nil
}
var OS, version string
scanner := bufio.NewScanner(strings.NewReader(string(f)))
for scanner.Scan() {
line := scanner.Text()
@ -66,10 +69,13 @@ func (detector *LsbReleaseOSDetector) Detect(data map[string][]byte) (OS, versio
}
}
return
if OS != "" && version != "" {
return &database.Namespace{Name: OS + ":" + version}
}
return nil
}
// GetRequiredFiles returns the list of files that are required for Detect()
func (detector *LsbReleaseOSDetector) GetRequiredFiles() []string {
func (detector *LsbReleaseNamespaceDetector) GetRequiredFiles() []string {
return []string{"etc/lsb-release"}
}

@ -12,15 +12,19 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package os
package lsbrelease
import "testing"
import (
"testing"
var lsbReleaseOSTests = []osTest{
osTest{
expectedOS: "ubuntu",
expectedVersion: "12.04",
data: map[string][]byte{
"github.com/coreos/clair/database"
"github.com/coreos/clair/worker/detectors/namespace"
)
var lsbReleaseOSTests = []namespace.NamespaceTest{
namespace.NamespaceTest{
ExpectedNamespace: database.Namespace{Name: "ubuntu:12.04"},
Data: map[string][]byte{
"etc/lsb-release": []byte(
`DISTRIB_ID=Ubuntu
DISTRIB_RELEASE=12.04
@ -28,10 +32,9 @@ DISTRIB_CODENAME=precise
DISTRIB_DESCRIPTION="Ubuntu 12.04 LTS"`),
},
},
osTest{ // We don't care about the minor version of Debian
expectedOS: "debian",
expectedVersion: "7",
data: map[string][]byte{
namespace.NamespaceTest{ // We don't care about the minor version of Debian
ExpectedNamespace: database.Namespace{Name: "debian:7"},
Data: map[string][]byte{
"etc/lsb-release": []byte(
`DISTRIB_ID=Debian
DISTRIB_RELEASE=7.1
@ -41,6 +44,6 @@ DISTRIB_DESCRIPTION="Debian 7.1"`),
},
}
func TestLsbReleaseOSDetector(t *testing.T) {
testOSDetector(t, &LsbReleaseOSDetector{}, lsbReleaseOSTests)
func TestLsbReleaseNamespaceDetector(t *testing.T) {
namespace.TestNamespaceDetector(t, &LsbReleaseNamespaceDetector{}, lsbReleaseOSTests)
}

@ -12,13 +12,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package os
package osrelease
import (
"bufio"
"regexp"
"strings"
"github.com/coreos/clair/database"
"github.com/coreos/clair/worker/detectors"
)
@ -27,18 +28,20 @@ var (
osReleaseVersionRegexp = regexp.MustCompile(`^VERSION_ID=(.*)`)
)
// OsReleaseOSDetector implements OSDetector and detects the OS from the
// OsReleaseNamespaceDetector implements NamespaceDetector and detects the OS from the
// /etc/os-release and usr/lib/os-release files.
type OsReleaseOSDetector struct{}
type OsReleaseNamespaceDetector struct{}
func init() {
detectors.RegisterOSDetector("os-release", &OsReleaseOSDetector{})
detectors.RegisterNamespaceDetector("os-release", &OsReleaseNamespaceDetector{})
}
// Detect tries to detect OS/Version using "/etc/os-release" and "/usr/lib/os-release"
// Typically for Debian / Ubuntu
// /etc/debian_version can't be used, it does not make any difference between testing and unstable, it returns stretch/sid
func (detector *OsReleaseOSDetector) Detect(data map[string][]byte) (OS, version string) {
func (detector *OsReleaseNamespaceDetector) Detect(data map[string][]byte) *database.Namespace {
var OS, version string
for _, filePath := range detector.GetRequiredFiles() {
f, hasFile := data[filePath]
if !hasFile {
@ -61,10 +64,13 @@ func (detector *OsReleaseOSDetector) Detect(data map[string][]byte) (OS, version
}
}
return
if OS != "" && version != "" {
return &database.Namespace{Name: OS + ":" + version}
}
return nil
}
// GetRequiredFiles returns the list of files that are required for Detect()
func (detector *OsReleaseOSDetector) GetRequiredFiles() []string {
func (detector *OsReleaseNamespaceDetector) GetRequiredFiles() []string {
return []string{"etc/os-release", "usr/lib/os-release"}
}

@ -12,15 +12,19 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package os
package osrelease
import "testing"
import (
"testing"
var osReleaseOSTests = []osTest{
osTest{
expectedOS: "debian",
expectedVersion: "8",
data: map[string][]byte{
"github.com/coreos/clair/database"
"github.com/coreos/clair/worker/detectors/namespace"
)
var osReleaseOSTests = []namespace.NamespaceTest{
namespace.NamespaceTest{
ExpectedNamespace: database.Namespace{Name: "debian:8"},
Data: map[string][]byte{
"etc/os-release": []byte(
`PRETTY_NAME="Debian GNU/Linux 8 (jessie)"
NAME="Debian GNU/Linux"
@ -32,10 +36,9 @@ SUPPORT_URL="http://www.debian.org/support/"
BUG_REPORT_URL="https://bugs.debian.org/"`),
},
},
osTest{
expectedOS: "ubuntu",
expectedVersion: "15.10",
data: map[string][]byte{
namespace.NamespaceTest{
ExpectedNamespace: database.Namespace{Name: "ubuntu:15.10"},
Data: map[string][]byte{
"etc/os-release": []byte(
`NAME="Ubuntu"
VERSION="15.10 (Wily Werewolf)"
@ -48,10 +51,9 @@ SUPPORT_URL="http://help.ubuntu.com/"
BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`),
},
},
osTest{ // Doesn't have quotes around VERSION_ID
expectedOS: "fedora",
expectedVersion: "20",
data: map[string][]byte{
namespace.NamespaceTest{ // Doesn't have quotes around VERSION_ID
ExpectedNamespace: database.Namespace{Name: "fedora:20"},
Data: map[string][]byte{
"etc/os-release": []byte(
`NAME=Fedora
VERSION="20 (Heisenbug)"
@ -70,6 +72,6 @@ REDHAT_SUPPORT_PRODUCT_VERSION=20`),
},
}
func TestOsReleaseOSDetector(t *testing.T) {
testOSDetector(t, &OsReleaseOSDetector{}, osReleaseOSTests)
func TestOsReleaseNamespaceDetector(t *testing.T) {
namespace.TestNamespaceDetector(t, &OsReleaseNamespaceDetector{}, osReleaseOSTests)
}

@ -12,31 +12,32 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package os
package redhatrelease
import (
"regexp"
"strings"
"github.com/coreos/clair/database"
"github.com/coreos/clair/worker/detectors"
)
var redhatReleaseRegexp = regexp.MustCompile(`(?P<os>[^\s]*) (Linux release|release) (?P<version>[\d]+)`)
// RedhatReleaseOSDetector implements OSDetector and detects the OS from the
// RedhatReleaseNamespaceDetector implements NamespaceDetector and detects the OS from the
// /etc/centos-release, /etc/redhat-release and /etc/system-release files.
type RedhatReleaseOSDetector struct{}
func init() {
detectors.RegisterOSDetector("redhat-release", &RedhatReleaseOSDetector{})
}
// Detect tries to detect OS/Version using "/etc/centos-release", "/etc/redhat-release" and "/etc/system-release"
//
// Typically for CentOS and Red-Hat like systems
// eg. CentOS release 5.11 (Final)
// eg. CentOS release 6.6 (Final)
// eg. CentOS Linux release 7.1.1503 (Core)
func (detector *RedhatReleaseOSDetector) Detect(data map[string][]byte) (OS, version string) {
type RedhatReleaseNamespaceDetector struct{}
func init() {
detectors.RegisterNamespaceDetector("redhat-release", &RedhatReleaseNamespaceDetector{})
}
func (detector *RedhatReleaseNamespaceDetector) Detect(data map[string][]byte) *database.Namespace {
for _, filePath := range detector.GetRequiredFiles() {
f, hasFile := data[filePath]
if !hasFile {
@ -45,15 +46,14 @@ func (detector *RedhatReleaseOSDetector) Detect(data map[string][]byte) (OS, ver
r := redhatReleaseRegexp.FindStringSubmatch(string(f))
if len(r) == 4 {
OS = strings.ToLower(r[1])
version = r[3]
return &database.Namespace{Name: strings.ToLower(r[1]) + ":" + r[3]}
}
}
return
return nil
}
// GetRequiredFiles returns the list of files that are required for Detect()
func (detector *RedhatReleaseOSDetector) GetRequiredFiles() []string {
func (detector *RedhatReleaseNamespaceDetector) GetRequiredFiles() []string {
return []string{"etc/centos-release", "etc/redhat-release", "etc/system-release"}
}

@ -12,27 +12,30 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package os
package redhatrelease
import "testing"
import (
"testing"
var redhatReleaseTests = []osTest{
osTest{
expectedOS: "centos",
expectedVersion: "6",
data: map[string][]byte{
"github.com/coreos/clair/database"
"github.com/coreos/clair/worker/detectors/namespace"
)
var redhatReleaseTests = []namespace.NamespaceTest{
namespace.NamespaceTest{
ExpectedNamespace: database.Namespace{Name: "centos:6"},
Data: map[string][]byte{
"etc/centos-release": []byte(`CentOS release 6.6 (Final)`),
},
},
osTest{
expectedOS: "centos",
expectedVersion: "7",
data: map[string][]byte{
namespace.NamespaceTest{
ExpectedNamespace: database.Namespace{Name: "centos:7"},
Data: map[string][]byte{
"etc/system-release": []byte(`CentOS Linux release 7.1.1503 (Core)`),
},
},
}
func TestRedhatReleaseOSDetector(t *testing.T) {
testOSDetector(t, &RedhatReleaseOSDetector{}, redhatReleaseTests)
func TestRedhatReleaseNamespaceDetector(t *testing.T) {
namespace.TestNamespaceDetector(t, &RedhatReleaseNamespaceDetector{}, redhatReleaseTests)
}

@ -12,25 +12,23 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package os
package namespace
import (
"testing"
"github.com/coreos/clair/database"
"github.com/coreos/clair/worker/detectors"
"github.com/stretchr/testify/assert"
)
type osTest struct {
expectedOS string
expectedVersion string
data map[string][]byte
type NamespaceTest struct {
Data map[string][]byte
ExpectedNamespace database.Namespace
}
func testOSDetector(t *testing.T, detector detectors.OSDetector, tests []osTest) {
func TestNamespaceDetector(t *testing.T, detector detectors.NamespaceDetector, tests []NamespaceTest) {
for _, test := range tests {
os, version := detector.Detect(test.data)
assert.Equal(t, test.expectedOS, os)
assert.Equal(t, test.expectedVersion, version)
assert.Equal(t, test.ExpectedNamespace, *detector.Detect(test.Data))
}
}

@ -1,81 +0,0 @@
// Copyright 2015 clair authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package detectors exposes functions to register and use container
// information extractors.
package detectors
import (
"fmt"
"sync"
)
// The OSDetector interface defines a way to detect an Operating System and
// its version from input data
type OSDetector interface {
// Detect detects an Operating System and its version from input data
Detect(map[string][]byte) (string, string)
// GetRequiredFiles returns the list of files required for Detect, without
// leading /
GetRequiredFiles() []string
}
var (
osDetectorsLock sync.Mutex
osDetectors = make(map[string]OSDetector)
)
// RegisterOSDetector provides a way to dynamically register an implementation of a
// OSDetector.
//
// If RegisterOSDetector is called twice with the same name if OSDetector is nil,
// or if the name is blank, it panics.
func RegisterOSDetector(name string, f OSDetector) {
if name == "" {
panic("Could not register a OSDetector with an empty name")
}
if f == nil {
panic("Could not register a nil OSDetector")
}
osDetectorsLock.Lock()
defer osDetectorsLock.Unlock()
if _, alreadyExists := osDetectors[name]; alreadyExists {
panic(fmt.Sprintf("Detector '%s' is already registered", name))
}
osDetectors[name] = f
}
// DetectOS finds the OS of the layer by using every registered OSDetector
func DetectOS(data map[string][]byte) string {
for _, detector := range osDetectors {
OS, version := detector.Detect(data)
if OS != "" && version != "" {
return OS + ":" + version
}
}
return ""
}
// GetRequiredFilesOS returns the list of files required for Detect for every
// registered OSDetector, without leading /
func GetRequiredFilesOS() (files []string) {
for _, detector := range osDetectors {
files = append(files, detector.GetRequiredFiles()...)
}
return
}

@ -1,79 +0,0 @@
// Copyright 2015 clair authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package detectors
import (
"fmt"
"sync"
"github.com/coreos/clair/database"
)
// The PackagesDetector interface defines a way to detect packages from input data
type PackagesDetector interface {
// Detect detects packages from the input data
Detect(map[string][]byte) ([]*database.Package, error)
// GetRequiredFiles returns the list of files required for Detect, without
// leading /
GetRequiredFiles() []string
}
var (
packagesDetectorsLock sync.Mutex
packagesDetectors = make(map[string]PackagesDetector)
)
// RegisterPackagesDetector makes a PackagesDetector available for DetectPackages
func RegisterPackagesDetector(name string, f PackagesDetector) {
if name == "" {
panic("Could not register a PackagesDetector with an empty name")
}
if f == nil {
panic("Could not register a nil PackagesDetector")
}
packagesDetectorsLock.Lock()
defer packagesDetectorsLock.Unlock()
if _, alreadyExists := packagesDetectors[name]; alreadyExists {
panic(fmt.Sprintf("Detector '%s' is already registered", name))
}
packagesDetectors[name] = f
}
// DetectPackages detects packages using every registered PackagesDetector
func DetectPackages(data map[string][]byte) ([]*database.Package, error) {
var packages []*database.Package
for _, detector := range packagesDetectors {
pkgs, err := detector.Detect(data)
if err != nil {
return []*database.Package{}, err
}
packages = append(packages, pkgs...)
}
return packages, nil
}
// GetRequiredFilesPackages returns the list of files required for Detect for every
// registered PackagesDetector, without leading /
func GetRequiredFilesPackages() (files []string) {
for _, detector := range packagesDetectors {
files = append(files, detector.GetRequiredFiles()...)
}
return
}

@ -47,259 +47,241 @@ var (
// has yet to be processed for the current layer.
ErrParentUnknown = errors.New("worker: parent layer is unknown, it must be processed first")
// SupportedOS is the list of operating system names that the worker supports.
SupportedOS = []string{"debian", "ubuntu", "centos"}
// SupportedNamespacePrefixes is the list of namespace prefixes that the worker supports.
SupportedNamespacePrefixes = []string{"debian:", "ubuntu:", "centos:"}
// SupportedImageFormat is the list of image formats that the worker supports.
SupportedImageFormat = []string{"Docker", "ACI"}
)
// Process detects the OS of a layer, the packages it installs/removes, and
// Process detects the Namespace of a layer, the features it adds/removes, and
// then stores everything in the database.
func Process(ID, parentID, path string, imageFormat string) error {
if ID == "" {
return cerrors.NewBadRequestError("could not process a layer which does not have ID")
func Process(datastore database.Datastore, name, parentName, path, imageFormat string) error {
// Verify parameters.
if name == "" {
return cerrors.NewBadRequestError("could not process a layer which does not have a name")
}
if path == "" {
return cerrors.NewBadRequestError("could not process a layer which does not have a path")
}
if imageFormat == "" {
return cerrors.NewBadRequestError("could not process a layer which does not have a specified format")
} else {
isSupported := false
for _, format := range SupportedImageFormat {
if strings.EqualFold(imageFormat, format) {
isSupported = true
break
}
}
if !isSupported {
return cerrors.NewBadRequestError("could not process a layer which does not have a supported format")
return cerrors.NewBadRequestError("could not process a layer which does not have a format")
}
isSupported := false
for _, format := range SupportedImageFormat {
if strings.EqualFold(imageFormat, format) {
isSupported = true
break
}
}
if !isSupported {
return cerrors.NewBadRequestError("could not process a layer which does not have a supported format")
}
log.Debugf("layer %s: processing (Location: %s, Engine version: %d, Parent: %s, Format: %s)", ID, utils.CleanURL(path), Version, parentID, imageFormat)
log.Debugf("layer %s: processing (Location: %s, Engine version: %d, Parent: %s, Format: %s)",
name, utils.CleanURL(path), Version, parentName, imageFormat)
// Check to see if the layer is already in the database.
layer, err := database.FindOneLayerByID(ID, []string{database.FieldLayerEngineVersion})
layer, err := datastore.FindLayer(name, false, false)
if err != nil && err != cerrors.ErrNotFound {
return err
}
var parent *database.Layer
if layer != nil {
// The layer is already in the database, check if we need to update it.
if layer.EngineVersion >= Version {
log.Debugf("layer %s: layer content has already been processed in the past with engine %d. Current engine is %d. skipping analysis", ID, layer.EngineVersion, Version)
return nil
}
log.Debugf("layer %s: layer content has been analyzed in the past with engine %d. Current engine is %d. analyzing again", ID, layer.EngineVersion, Version)
} else {
// The layer is a new one, create a base struct that we will fill.
layer = &database.Layer{ID: ID, EngineVersion: Version}
if err == cerrors.ErrNotFound {
// New layer case.
layer = database.Layer{Name: name, EngineVersion: Version}
// Check to make sure that the parent's layer has already been processed.
if parentID != "" {
parent, err = database.FindOneLayerByID(parentID, []string{database.FieldLayerOS, database.FieldLayerPackages, database.FieldLayerPackages})
// Retrieve the parent if it has one.
// We need to get it with its Features in order to diff them.
if parentName != "" {
parent, err := datastore.FindLayer(parentName, true, false)
if err != nil && err != cerrors.ErrNotFound {
return err
}
if parent == nil {
log.Warningf("layer %s: the parent layer (%s) is unknown. it must be processed first", ID, parentID)
if err == cerrors.ErrNotFound {
log.Warningf("layer %s: the parent layer (%s) is unknown. it must be processed first", name,
parentName)
return ErrParentUnknown
}
layer.ParentNode = parent.GetNode()
layer.Parent = &parent
}
} else {
// The layer is already in the database, check if we need to update it.
if layer.EngineVersion >= Version {
log.Debugf(`layer %s: layer content has already been processed in the past with engine %d.
Current engine is %d. skipping analysis`, name, layer.EngineVersion, Version)
return nil
}
log.Debugf(`layer %s: layer content has been analyzed in the past with engine %d. Current
engine is %d. analyzing again`, name, layer.EngineVersion, Version)
}
// Analyze the content.
layer.OS, layer.InstalledPackagesNodes, layer.RemovedPackagesNodes, err = detectContent(ID, path, parent, imageFormat)
layer.Namespace, layer.Features, err = detectContent(name, path, imageFormat, layer.Parent)
if err != nil {
return err
}
return database.InsertLayer(layer)
return datastore.InsertLayer(layer)
}
// detectContent downloads a layer's archive, extracts info from it and returns
// an updated Layer struct.
//
// If parent is not nil, database.FieldLayerOS, database.FieldLayerPackages fields must be
// has been selectioned.
func detectContent(ID, path string, parent *database.Layer, imageFormat string) (OS string, installedPackagesNodes, removedPackagesNodes []string, err error) {
data, err := getLayerData(path, imageFormat)
// detectContent downloads a layer's archive and extracts its Namespace and Features.
func detectContent(name, path, imageFormat string, parent *database.Layer) (namespace *database.Namespace, features []database.FeatureVersion, err error) {
data, err := detectors.DetectData(path, imageFormat, append(detectors.GetRequiredFilesFeatures(),
detectors.GetRequiredFilesNamespace()...), maxFileSize)
if err != nil {
log.Errorf("layer %s: failed to extract data from %s: %s", ID, utils.CleanURL(path), err)
log.Errorf("layer %s: failed to extract data from %s: %s", name, utils.CleanURL(path), err)
return
}
OS, err = detectOS(data, parent)
// Detect namespace.
namespace, err = detectNamespace(data, parent)
if err != nil {
return
}
if OS != "" {
log.Debugf("layer %s: OS is %s.", ID, OS)
if namespace.Name != "" {
log.Debugf("layer %s: Namespace is %s.", name, namespace.Name)
} else {
log.Debugf("layer %s: OS is unknown.", ID)
log.Debugf("layer %s: OS is unknown.", name)
}
packageList, err := detectors.DetectPackages(data)
// Detect features.
features, err = detectFeatures(name, data, namespace)
if err != nil {
log.Errorf("layer %s: package list could not be determined: %s", ID, err)
log.Errorf("layer %s: package list could not be determined: %s", name, err)
return
}
// If there are any packages, that layer modified the package list.
if len(packageList) > 0 {
// It is possible that the OS could not be detected, in the case of a
// first layer setting MAINTAINER only for instance. However, if the OS
// is unknown and packages are detected, we have to return an error.
if OS == "" {
log.Errorf("layer %s: OS is unknown but %d packages have been detected", ID, len(packageList))
err = ErrUnsupported
return
}
// If the layer has no parent, it can only add packages, not remove them.
if parent == nil {
// Build a list of the layer packages' node values.
var installedPackages []*database.Package
for _, p := range packageList {
p.OS = OS
installedPackages = append(installedPackages, p)
}
// Insert that list into the database.
err = database.InsertPackages(installedPackages)
if err != nil {
return
}
// Set the InstalledPackageNodes field on content.
for _, p := range installedPackages {
if p.Node != "" {
installedPackagesNodes = append(installedPackagesNodes, p.Node)
}
}
} else {
installedPackagesNodes, removedPackagesNodes, err = detectAndInsertInstalledAndRemovedPackages(OS, packageList, parent)
if err != nil {
return
}
}
}
log.Debugf("layer %s: detected %d packages: installs %d and removes %d packages", ID, len(packageList), len(installedPackagesNodes), len(removedPackagesNodes))
log.Debugf("layer %s: detected %d features", name, len(features))
return
}
// getLayerData downloads/opens a layer archive and extracts it into memory.
func getLayerData(path string, imageFormat string) (data map[string][]byte, err error) {
data, err = detectors.DetectData(path, imageFormat, append(detectors.GetRequiredFilesPackages(), detectors.GetRequiredFilesOS()...), maxFileSize)
if err != nil {
return nil, err
}
return
}
func detectOS(data map[string][]byte, parent *database.Layer) (detectedOS string, err error) {
detectedOS = detectors.DetectOS(data)
func detectNamespace(data map[string][]byte, parent *database.Layer) (namespace *database.Namespace, err error) {
namespace = detectors.DetectNamespace(data)
// Attempt to detect the OS from the parent layer.
if detectedOS == "" && parent != nil {
detectedOS, err = parent.OperatingSystem()
if namespace == nil && parent != nil {
namespace = parent.Namespace
if err != nil {
return "", err
return
}
}
// If the detectedOS is not in the supported OS list, the OS is unsupported.
if detectedOS != "" {
// Ensure that the detected namespace's prefix is supported.
if namespace != nil {
isSupported := false
for _, osPrefix := range SupportedOS {
if strings.HasPrefix(detectedOS, osPrefix) {
for _, namespacePrefix := range SupportedNamespacePrefixes {
if strings.HasPrefix(namespace.Name, namespacePrefix) {
isSupported = true
break
}
}
if !isSupported {
return "", ErrUnsupported
return namespace, ErrUnsupported
}
}
return
}
// detectAndInsertInstalledAndRemovedPackages finds the installed and removed
// package nodes and inserts the installed packages into the database.
func detectAndInsertInstalledAndRemovedPackages(detectedOS string, packageList []*database.Package, parent *database.Layer) (installedNodes, removedNodes []string, err error) {
// Get the parent layer's packages.
parentPackageNodes, err := parent.AllPackages()
func detectFeatures(name string, data map[string][]byte, namespace *database.Namespace) (features []database.FeatureVersion, err error) {
// TODO(Quentin-M): We need to pass the parent image DetectFeatures because it's possible that
// some detectors would need it in order to produce the entire feature list (if they can only
// detect a diff). Also, we should probably pass the detected namespace so detectors could
// make their own decision.
features, err = detectors.DetectFeatures(data)
if err != nil {
return nil, nil, err
}
parentPackages, err := database.FindAllPackagesByNodes(parentPackageNodes, []string{database.FieldPackageName, database.FieldPackageVersion})
if err != nil {
return nil, nil, err
}
// Map detected packages (name:version) string to packages.
packagesNVMapToPackage := make(map[string]*database.Package)
for _, p := range packageList {
packagesNVMapToPackage[p.Name+":"+p.Version.String()] = p
}
// Map parent's packages (name:version) string to nodes.
parentPackagesNVMapToNodes := make(map[string]string)
for _, p := range parentPackages {
parentPackagesNVMapToNodes[p.Name+":"+p.Version.String()] = p.Node
}
// Build a list of the parent layer's packages' node values.
var parentPackagesNV []string
for _, p := range parentPackages {
parentPackagesNV = append(parentPackagesNV, p.Name+":"+p.Version.String())
}
// Build a list of the layer packages' node values.
var layerPackagesNV []string
for _, p := range packageList {
layerPackagesNV = append(layerPackagesNV, p.Name+":"+p.Version.String())
}
// Calculate the installed and removed packages.
removedPackagesNV := utils.CompareStringLists(parentPackagesNV, layerPackagesNV)
installedPackagesNV := utils.CompareStringLists(layerPackagesNV, parentPackagesNV)
// Build a list of all the installed packages.
var installedPackages []*database.Package
for _, nv := range installedPackagesNV {
p, _ := packagesNVMapToPackage[nv]
p.OS = detectedOS
installedPackages = append(installedPackages, p)
}
// Insert that list into the database.
err = database.InsertPackages(installedPackages)
if err != nil {
return nil, nil, err
return
}
// Build the list of installed package nodes.
for _, p := range installedPackages {
if p.Node != "" {
installedNodes = append(installedNodes, p.Node)
// Ensure that every feature has a Namespace associated, otherwise associate the detected
// namespace. If there is no detected namespace, we'll throw an error.
for i := 0; i < len(features); i++ {
if features[i].Feature.Namespace.Name == "" {
if namespace != nil {
features[i].Feature.Namespace = *namespace
} else {
log.Errorf("layer %s: Layer's namespace is unknown but non-namespaced features have been detected", name)
err = ErrUnsupported
return
}
}
}
// Build the list of removed package nodes.
for _, nv := range removedPackagesNV {
node, _ := parentPackagesNVMapToNodes[nv]
removedNodes = append(removedNodes, node)
}
return
}
// // detectAndInsertInstalledAndRemovedPackages finds the installed and removed
// // package nodes and inserts the installed packages into the database.
// func detectAndInsertInstalledAndRemovedPackages(detectedOS string, packageList []database.FeatureVersion, parent *database.Layer) (installedNodes, removedNodes []string, err error) {
// // Get the parent layer's packages.
// parentPackageNodes, err := parent.AllPackages()
// if err != nil {
// return nil, nil, err
// }
// parentPackages, err := database.FindAllPackagesByNodes(parentPackageNodes, []string{database.FieldPackageName, database.FieldPackageVersion})
// if err != nil {
// return nil, nil, err
// }
//
// // Map detected packages (name:version) string to packages.
// packagesNVMapToPackage := make(map[string]*database.Package)
// for _, p := range packageList {
// packagesNVMapToPackage[p.Name+":"+p.Version.String()] = p
// }
//
// // Map parent's packages (name:version) string to nodes.
// parentPackagesNVMapToNodes := make(map[string]string)
// for _, p := range parentPackages {
// parentPackagesNVMapToNodes[p.Name+":"+p.Version.String()] = p.Node
// }
//
// // Build a list of the parent layer's packages' node values.
// var parentPackagesNV []string
// for _, p := range parentPackages {
// parentPackagesNV = append(parentPackagesNV, p.Name+":"+p.Version.String())
// }
//
// // Build a list of the layer packages' node values.
// var layerPackagesNV []string
// for _, p := range packageList {
// layerPackagesNV = append(layerPackagesNV, p.Name+":"+p.Version.String())
// }
//
// // Calculate the installed and removed packages.
// removedPackagesNV := utils.CompareStringLists(parentPackagesNV, layerPackagesNV)
// installedPackagesNV := utils.CompareStringLists(layerPackagesNV, parentPackagesNV)
//
// // Build a list of all the installed packages.
// var installedPackages []database.FeatureVersion
// for _, nv := range installedPackagesNV {
// p, _ := packagesNVMapToPackage[nv]
// p.OS = detectedOS
// installedPackages = append(installedPackages, p)
// }
//
// // Insert that list into the database.
// err = database.InsertPackages(installedPackages)
// if err != nil {
// return nil, nil, err
// }
//
// // Build the list of installed package nodes.
// for _, p := range installedPackages {
// if p.Node != "" {
// installedNodes = append(installedNodes, p.Node)
// }
// }
//
// // Build the list of removed package nodes.
// for _, nv := range removedPackagesNV {
// node, _ := parentPackagesNVMapToNodes[nv]
// removedNodes = append(removedNodes, node)
// }
//
// return
// }

Loading…
Cancel
Save