2018-09-11 20:09:08 +00:00
|
|
|
// Copyright 2018 clair authors
|
2015-11-13 19:11:28 +00:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2017-01-26 23:24:04 +00:00
|
|
|
package clair
|
2015-11-13 19:11:28 +00:00
|
|
|
|
|
|
|
import (
|
2017-07-26 23:22:29 +00:00
|
|
|
"errors"
|
|
|
|
"sync"
|
2017-01-18 02:40:59 +00:00
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
"github.com/deckarep/golang-set"
|
2017-05-04 17:21:25 +00:00
|
|
|
log "github.com/sirupsen/logrus"
|
|
|
|
|
2015-11-13 19:11:28 +00:00
|
|
|
"github.com/coreos/clair/database"
|
2017-01-13 23:49:02 +00:00
|
|
|
"github.com/coreos/clair/ext/featurefmt"
|
2017-01-13 21:48:12 +00:00
|
|
|
"github.com/coreos/clair/ext/featurens"
|
2017-01-13 08:07:35 +00:00
|
|
|
"github.com/coreos/clair/ext/imagefmt"
|
2017-01-13 07:08:52 +00:00
|
|
|
"github.com/coreos/clair/pkg/commonerr"
|
2017-07-26 23:22:29 +00:00
|
|
|
"github.com/coreos/clair/pkg/strutil"
|
2018-09-20 19:57:06 +00:00
|
|
|
"github.com/coreos/clair/pkg/tarutil"
|
2015-11-13 19:11:28 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
// ErrUnsupported is the error that should be raised when an OS or package
|
|
|
|
// manager is not supported.
|
2017-01-13 07:08:52 +00:00
|
|
|
ErrUnsupported = commonerr.NewBadRequestError("worker: OS and/or package manager are not supported")
|
2015-11-13 19:11:28 +00:00
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
// EnabledDetectors are detectors to be used to scan the layers.
|
|
|
|
EnabledDetectors []database.Detector
|
2015-11-13 19:11:28 +00:00
|
|
|
)
|
|
|
|
|
2017-07-26 23:22:29 +00:00
|
|
|
// LayerRequest represents all information necessary to download and process a
|
|
|
|
// layer.
|
|
|
|
type LayerRequest struct {
|
|
|
|
Hash string
|
|
|
|
Path string
|
|
|
|
Headers map[string]string
|
|
|
|
}
|
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
type processResult struct {
|
|
|
|
existingLayer *database.Layer
|
|
|
|
newLayerContent *database.Layer
|
|
|
|
err error
|
2017-07-26 23:22:29 +00:00
|
|
|
}
|
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
// processRequest stores parameters used for processing a layer.
|
2017-07-26 23:22:29 +00:00
|
|
|
type processRequest struct {
|
2018-09-20 19:57:06 +00:00
|
|
|
LayerRequest
|
|
|
|
|
|
|
|
existingLayer *database.Layer
|
|
|
|
detectors []database.Detector
|
2017-07-26 23:22:29 +00:00
|
|
|
}
|
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
type introducedFeature struct {
|
|
|
|
feature database.AncestryFeature
|
|
|
|
layerIndex int
|
2017-01-18 02:40:59 +00:00
|
|
|
}
|
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
// processRequests in parallel processes a set of requests for unique set of layers
|
2017-07-26 23:22:29 +00:00
|
|
|
// and returns sets of unique namespaces, features and layers to be inserted
|
|
|
|
// into the database.
|
2018-09-20 19:57:06 +00:00
|
|
|
func processRequests(imageFormat string, toDetect map[string]*processRequest) (map[string]*processResult, error) {
|
2017-07-26 23:22:29 +00:00
|
|
|
wg := &sync.WaitGroup{}
|
|
|
|
wg.Add(len(toDetect))
|
2018-09-20 19:57:06 +00:00
|
|
|
|
|
|
|
results := map[string]*processResult{}
|
|
|
|
for i := range toDetect {
|
|
|
|
results[i] = nil
|
|
|
|
}
|
|
|
|
|
2017-07-26 23:22:29 +00:00
|
|
|
for i := range toDetect {
|
2018-09-20 19:57:06 +00:00
|
|
|
result := processResult{}
|
|
|
|
results[i] = &result
|
|
|
|
go func(req *processRequest, res *processResult) {
|
|
|
|
*res = *detectContent(imageFormat, req)
|
2017-07-26 23:22:29 +00:00
|
|
|
wg.Done()
|
2018-09-20 19:57:06 +00:00
|
|
|
}(toDetect[i], &result)
|
2015-11-13 19:11:28 +00:00
|
|
|
}
|
2015-12-28 20:03:29 +00:00
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
wg.Wait()
|
2017-07-26 23:22:29 +00:00
|
|
|
errs := []error{}
|
|
|
|
for _, r := range results {
|
|
|
|
errs = append(errs, r.err)
|
2015-11-13 19:11:28 +00:00
|
|
|
}
|
2015-12-28 20:03:29 +00:00
|
|
|
|
2017-07-26 23:22:29 +00:00
|
|
|
if err := commonerr.CombineErrors(errs...); err != nil {
|
2018-09-20 19:57:06 +00:00
|
|
|
return nil, err
|
2017-07-26 23:22:29 +00:00
|
|
|
}
|
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
return results, nil
|
2017-07-26 23:22:29 +00:00
|
|
|
}
|
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
func getProcessRequest(datastore database.Datastore, req LayerRequest) (preq *processRequest, err error) {
|
2018-10-08 17:12:18 +00:00
|
|
|
layer, ok, err := database.FindLayerAndRollback(datastore, req.Hash)
|
2018-09-20 19:57:06 +00:00
|
|
|
if err != nil {
|
2017-07-26 23:22:29 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if !ok {
|
2018-09-20 19:57:06 +00:00
|
|
|
log.WithField("layer", req.Hash).Debug("found no existing layer in database")
|
2017-07-26 23:22:29 +00:00
|
|
|
preq = &processRequest{
|
2018-09-20 19:57:06 +00:00
|
|
|
LayerRequest: req,
|
|
|
|
existingLayer: &database.Layer{Hash: req.Hash},
|
|
|
|
detectors: EnabledDetectors,
|
2015-12-28 20:03:29 +00:00
|
|
|
}
|
|
|
|
} else {
|
2018-09-20 19:57:06 +00:00
|
|
|
log.WithFields(log.Fields{
|
|
|
|
"layer": layer.Hash,
|
|
|
|
"detectors": layer.By,
|
|
|
|
"feature count": len(layer.Features),
|
|
|
|
"namespace count": len(layer.Namespaces),
|
|
|
|
}).Debug("found existing layer in database")
|
|
|
|
|
|
|
|
preq = &processRequest{
|
|
|
|
LayerRequest: req,
|
|
|
|
existingLayer: &layer,
|
2018-10-02 14:50:53 +00:00
|
|
|
detectors: database.DiffDetectors(EnabledDetectors, layer.By),
|
2015-11-13 19:11:28 +00:00
|
|
|
}
|
|
|
|
}
|
2018-09-11 20:09:08 +00:00
|
|
|
|
2017-07-26 23:22:29 +00:00
|
|
|
return
|
|
|
|
}
|
2015-11-13 19:11:28 +00:00
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
func persistProcessResult(datastore database.Datastore, results map[string]*processResult) error {
|
|
|
|
features := []database.Feature{}
|
|
|
|
namespaces := []database.Namespace{}
|
|
|
|
for _, r := range results {
|
|
|
|
features = append(features, r.newLayerContent.GetFeatures()...)
|
|
|
|
namespaces = append(namespaces, r.newLayerContent.GetNamespaces()...)
|
2017-07-26 23:22:29 +00:00
|
|
|
}
|
|
|
|
|
2018-10-02 14:50:53 +00:00
|
|
|
features = database.DeduplicateFeatures(features...)
|
|
|
|
namespaces = database.DeduplicateNamespaces(namespaces...)
|
2018-10-08 17:12:18 +00:00
|
|
|
if err := database.PersistNamespacesAndCommit(datastore, namespaces); err != nil {
|
2018-09-20 19:57:06 +00:00
|
|
|
return err
|
2017-07-26 23:22:29 +00:00
|
|
|
}
|
|
|
|
|
2018-10-08 17:12:18 +00:00
|
|
|
if err := database.PersistFeaturesAndCommit(datastore, features); err != nil {
|
2018-09-20 19:57:06 +00:00
|
|
|
return err
|
2017-07-26 23:22:29 +00:00
|
|
|
}
|
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
for _, layer := range results {
|
2018-10-08 17:12:18 +00:00
|
|
|
if err := database.PersistPartialLayerAndCommit(datastore, layer.newLayerContent); err != nil {
|
2018-09-20 19:57:06 +00:00
|
|
|
return err
|
2017-07-26 23:22:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
return nil
|
2017-07-26 23:22:29 +00:00
|
|
|
}
|
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
// processLayers processes a set of post layer requests, stores layers and
|
|
|
|
// returns an ordered list of processed layers with detected features and
|
|
|
|
// namespaces.
|
|
|
|
func processLayers(datastore database.Datastore, imageFormat string, requests []LayerRequest) ([]database.Layer, error) {
|
|
|
|
var (
|
|
|
|
reqMap = make(map[string]*processRequest)
|
|
|
|
err error
|
|
|
|
)
|
2015-11-13 19:11:28 +00:00
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
for _, r := range requests {
|
|
|
|
reqMap[r.Hash], err = getProcessRequest(datastore, r)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-07-26 23:22:29 +00:00
|
|
|
}
|
2018-09-11 20:09:08 +00:00
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
results, err := processRequests(imageFormat, reqMap)
|
2015-11-13 19:11:28 +00:00
|
|
|
if err != nil {
|
2018-09-20 19:57:06 +00:00
|
|
|
return nil, err
|
2017-07-26 23:22:29 +00:00
|
|
|
}
|
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
if err := persistProcessResult(datastore, results); err != nil {
|
|
|
|
return nil, err
|
2017-07-26 23:22:29 +00:00
|
|
|
}
|
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
completeLayers := getProcessResultLayers(results)
|
|
|
|
layers := make([]database.Layer, 0, len(requests))
|
|
|
|
for _, r := range requests {
|
|
|
|
layers = append(layers, completeLayers[r.Hash])
|
2017-01-13 21:48:12 +00:00
|
|
|
}
|
2015-11-13 19:11:28 +00:00
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
return layers, nil
|
2017-07-26 23:22:29 +00:00
|
|
|
}
|
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
func getProcessResultLayers(results map[string]*processResult) map[string]database.Layer {
|
|
|
|
layers := map[string]database.Layer{}
|
|
|
|
for name, r := range results {
|
2018-10-02 14:50:53 +00:00
|
|
|
layers[name] = *database.MergeLayers(r.existingLayer, r.newLayerContent)
|
2017-07-26 23:22:29 +00:00
|
|
|
}
|
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
return layers
|
2017-07-26 23:22:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func isAncestryProcessed(datastore database.Datastore, name string) (bool, error) {
|
2018-10-08 17:12:18 +00:00
|
|
|
ancestry, ok, err := database.FindAncestryAndRollback(datastore, name)
|
2018-09-20 19:57:06 +00:00
|
|
|
if err != nil || !ok {
|
|
|
|
return ok, err
|
2015-11-13 19:11:28 +00:00
|
|
|
}
|
2017-06-22 18:01:41 +00:00
|
|
|
|
2018-10-02 14:50:53 +00:00
|
|
|
return len(database.DiffDetectors(EnabledDetectors, ancestry.By)) == 0, nil
|
2017-07-26 23:22:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ProcessAncestry downloads and scans an ancestry if it's not scanned by all
|
|
|
|
// enabled processors in this instance of Clair.
|
|
|
|
func ProcessAncestry(datastore database.Datastore, imageFormat, name string, layerRequest []LayerRequest) error {
|
2018-09-05 15:34:49 +00:00
|
|
|
var (
|
2018-09-20 19:57:06 +00:00
|
|
|
err error
|
|
|
|
ok bool
|
|
|
|
layers []database.Layer
|
2018-09-05 15:34:49 +00:00
|
|
|
)
|
|
|
|
|
2017-07-26 23:22:29 +00:00
|
|
|
if name == "" {
|
|
|
|
return commonerr.NewBadRequestError("could not process a layer which does not have a name")
|
2016-01-08 15:27:30 +00:00
|
|
|
}
|
|
|
|
|
2017-07-26 23:22:29 +00:00
|
|
|
if imageFormat == "" {
|
|
|
|
return commonerr.NewBadRequestError("could not process a layer which does not have a format")
|
|
|
|
}
|
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
log.WithField("ancestry", name).Debug("start processing ancestry...")
|
2018-09-05 15:34:49 +00:00
|
|
|
if ok, err = isAncestryProcessed(datastore, name); err != nil {
|
2018-09-20 19:57:06 +00:00
|
|
|
log.WithError(err).Error("could not determine if ancestry is processed")
|
2018-09-05 15:34:49 +00:00
|
|
|
return err
|
|
|
|
} else if ok {
|
2018-09-20 19:57:06 +00:00
|
|
|
log.WithField("ancestry", name).Debug("ancestry is already processed")
|
2017-07-26 23:22:29 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-09-05 15:34:49 +00:00
|
|
|
if layers, err = processLayers(datastore, imageFormat, layerRequest); err != nil {
|
2017-07-26 23:22:29 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
return processAncestry(datastore, name, layers)
|
2018-09-05 15:34:49 +00:00
|
|
|
}
|
2017-05-30 17:45:14 +00:00
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
func processAncestry(datastore database.Datastore, name string, layers []database.Layer) error {
|
2018-09-05 15:34:49 +00:00
|
|
|
var (
|
2018-09-20 19:57:06 +00:00
|
|
|
ancestry = database.Ancestry{Name: name}
|
2018-09-05 15:34:49 +00:00
|
|
|
err error
|
|
|
|
)
|
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
ancestry.Layers, ancestry.By, err = computeAncestryLayers(layers)
|
2018-09-05 15:34:49 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2015-11-13 19:11:28 +00:00
|
|
|
}
|
|
|
|
|
2018-10-02 14:50:53 +00:00
|
|
|
ancestryFeatures := database.GetAncestryFeatures(ancestry)
|
2017-07-26 23:22:29 +00:00
|
|
|
log.WithFields(log.Fields{
|
2018-09-20 19:57:06 +00:00
|
|
|
"ancestry": name,
|
|
|
|
"processed by": EnabledDetectors,
|
|
|
|
"features count": len(ancestryFeatures),
|
|
|
|
"layer count": len(ancestry.Layers),
|
2017-07-26 23:22:29 +00:00
|
|
|
}).Debug("compute ancestry features")
|
|
|
|
|
2018-10-08 17:12:18 +00:00
|
|
|
if err := database.PersistNamespacedFeaturesAndCommit(datastore, ancestryFeatures); err != nil {
|
2018-09-20 19:57:06 +00:00
|
|
|
log.WithField("ancestry", name).WithError(err).Error("could not persist namespaced features for ancestry")
|
2017-07-26 23:22:29 +00:00
|
|
|
return err
|
2017-06-22 18:01:41 +00:00
|
|
|
}
|
|
|
|
|
2018-10-08 17:12:18 +00:00
|
|
|
if err := database.CacheRelatedVulnerabilityAndCommit(datastore, ancestryFeatures); err != nil {
|
2018-09-20 19:57:06 +00:00
|
|
|
log.WithField("ancestry", name).WithError(err).Error("failed to cache feature related vulnerability")
|
2017-07-26 23:22:29 +00:00
|
|
|
return err
|
2017-05-30 17:45:14 +00:00
|
|
|
}
|
2017-07-26 23:22:29 +00:00
|
|
|
|
2018-10-08 17:12:18 +00:00
|
|
|
if err := database.UpsertAncestryAndCommit(datastore, ancestry); err != nil {
|
2018-09-20 19:57:06 +00:00
|
|
|
log.WithField("ancestry", name).WithError(err).Error("could not upsert ancestry")
|
2017-07-26 23:22:29 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
func getCommonDetectors(layers []database.Layer) mapset.Set {
|
|
|
|
// find the common detector for all layers and filter the namespaces and
|
|
|
|
// features based on that.
|
|
|
|
commonDetectors := mapset.NewSet()
|
|
|
|
for _, d := range layers[0].By {
|
|
|
|
commonDetectors.Add(d)
|
2017-07-26 23:22:29 +00:00
|
|
|
}
|
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
for _, l := range layers {
|
|
|
|
detectors := mapset.NewSet()
|
|
|
|
for _, d := range l.By {
|
|
|
|
detectors.Add(d)
|
2016-05-11 22:13:00 +00:00
|
|
|
}
|
2018-09-20 19:57:06 +00:00
|
|
|
|
|
|
|
commonDetectors = commonDetectors.Intersect(detectors)
|
2016-05-11 22:13:00 +00:00
|
|
|
}
|
2018-09-05 15:34:49 +00:00
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
return commonDetectors
|
2017-07-26 23:22:29 +00:00
|
|
|
}
|
2016-05-11 22:13:00 +00:00
|
|
|
|
2018-09-05 15:34:49 +00:00
|
|
|
// computeAncestryLayers computes ancestry's layers along with what features are
|
|
|
|
// introduced.
|
2018-09-20 19:57:06 +00:00
|
|
|
func computeAncestryLayers(layers []database.Layer) ([]database.AncestryLayer, []database.Detector, error) {
|
|
|
|
if len(layers) == 0 {
|
|
|
|
return nil, nil, nil
|
|
|
|
}
|
2018-09-05 15:34:49 +00:00
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
commonDetectors := getCommonDetectors(layers)
|
2017-07-26 23:22:29 +00:00
|
|
|
// version format -> namespace
|
2018-09-20 19:57:06 +00:00
|
|
|
namespaces := map[string]database.LayerNamespace{}
|
2017-07-26 23:22:29 +00:00
|
|
|
// version format -> feature ID -> feature
|
2018-09-05 15:34:49 +00:00
|
|
|
features := map[string]map[string]introducedFeature{}
|
|
|
|
ancestryLayers := []database.AncestryLayer{}
|
|
|
|
for index, layer := range layers {
|
2018-09-20 19:57:06 +00:00
|
|
|
initializedLayer := database.AncestryLayer{Hash: layer.Hash}
|
2018-09-05 15:34:49 +00:00
|
|
|
ancestryLayers = append(ancestryLayers, initializedLayer)
|
|
|
|
|
|
|
|
// Precondition: namespaces and features contain the result from union
|
|
|
|
// of all parents.
|
2017-07-26 23:22:29 +00:00
|
|
|
for _, ns := range layer.Namespaces {
|
2018-09-20 19:57:06 +00:00
|
|
|
if !commonDetectors.Contains(ns.By) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-07-26 23:22:29 +00:00
|
|
|
namespaces[ns.VersionFormat] = ns
|
2015-11-13 19:11:28 +00:00
|
|
|
}
|
2016-05-11 22:13:00 +00:00
|
|
|
|
2017-07-26 23:22:29 +00:00
|
|
|
// version format -> feature ID -> feature
|
2018-09-05 15:34:49 +00:00
|
|
|
currentFeatures := map[string]map[string]introducedFeature{}
|
2017-07-26 23:22:29 +00:00
|
|
|
for _, f := range layer.Features {
|
2018-09-20 19:57:06 +00:00
|
|
|
if !commonDetectors.Contains(f.By) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-07-26 23:22:29 +00:00
|
|
|
if ns, ok := namespaces[f.VersionFormat]; ok {
|
2018-09-05 15:34:49 +00:00
|
|
|
var currentMap map[string]introducedFeature
|
2017-07-26 23:22:29 +00:00
|
|
|
if currentMap, ok = currentFeatures[f.VersionFormat]; !ok {
|
2018-09-05 15:34:49 +00:00
|
|
|
currentFeatures[f.VersionFormat] = make(map[string]introducedFeature)
|
2017-07-26 23:22:29 +00:00
|
|
|
currentMap = currentFeatures[f.VersionFormat]
|
|
|
|
}
|
|
|
|
|
|
|
|
inherited := false
|
|
|
|
if mapF, ok := features[f.VersionFormat]; ok {
|
|
|
|
if parentFeature, ok := mapF[f.Name+":"+f.Version]; ok {
|
|
|
|
currentMap[f.Name+":"+f.Version] = parentFeature
|
|
|
|
inherited = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !inherited {
|
2018-09-05 15:34:49 +00:00
|
|
|
currentMap[f.Name+":"+f.Version] = introducedFeature{
|
2018-09-20 19:57:06 +00:00
|
|
|
feature: database.AncestryFeature{
|
|
|
|
NamespacedFeature: database.NamespacedFeature{
|
|
|
|
Feature: f.Feature,
|
|
|
|
Namespace: ns.Namespace,
|
|
|
|
},
|
|
|
|
NamespaceBy: ns.By,
|
|
|
|
FeatureBy: f.By,
|
2018-09-05 15:34:49 +00:00
|
|
|
},
|
|
|
|
layerIndex: index,
|
2017-07-26 23:22:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
2018-09-20 19:57:06 +00:00
|
|
|
return nil, nil, errors.New("No corresponding version format")
|
2017-06-22 18:01:41 +00:00
|
|
|
}
|
2017-07-26 23:22:29 +00:00
|
|
|
}
|
2017-06-22 18:01:41 +00:00
|
|
|
|
2017-07-26 23:22:29 +00:00
|
|
|
// NOTE(Sida): we update the feature map in some version format
|
|
|
|
// only if there's at least one feature with that version format. This
|
|
|
|
// approach won't differentiate feature file removed vs all detectable
|
|
|
|
// features removed from that file vs feature file not changed.
|
|
|
|
//
|
|
|
|
// One way to differentiate (feature file removed or not changed) vs
|
|
|
|
// all detectable features removed is to pass in the file status.
|
|
|
|
for vf, mapF := range currentFeatures {
|
|
|
|
features[vf] = mapF
|
|
|
|
}
|
|
|
|
}
|
2016-05-11 22:13:00 +00:00
|
|
|
|
2017-07-26 23:22:29 +00:00
|
|
|
for _, featureMap := range features {
|
|
|
|
for _, feature := range featureMap {
|
2018-09-20 19:57:06 +00:00
|
|
|
ancestryLayers[feature.layerIndex].Features = append(
|
|
|
|
ancestryLayers[feature.layerIndex].Features,
|
2018-09-05 15:34:49 +00:00
|
|
|
feature.feature,
|
|
|
|
)
|
2016-05-11 22:13:00 +00:00
|
|
|
}
|
2017-07-26 23:22:29 +00:00
|
|
|
}
|
2018-09-05 15:34:49 +00:00
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
detectors := make([]database.Detector, 0, commonDetectors.Cardinality())
|
|
|
|
for d := range commonDetectors.Iter() {
|
|
|
|
detectors = append(detectors, d.(database.Detector))
|
|
|
|
}
|
|
|
|
|
|
|
|
return ancestryLayers, detectors, nil
|
2017-07-26 23:22:29 +00:00
|
|
|
}
|
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
func extractRequiredFiles(imageFormat string, req *processRequest) (tarutil.FilesMap, error) {
|
|
|
|
requiredFiles := append(featurefmt.RequiredFilenames(req.detectors), featurens.RequiredFilenames(req.detectors)...)
|
|
|
|
if len(requiredFiles) == 0 {
|
|
|
|
log.WithFields(log.Fields{
|
|
|
|
"layer": req.Hash,
|
|
|
|
"detectors": req.detectors,
|
|
|
|
}).Info("layer requires no file to extract")
|
|
|
|
return make(tarutil.FilesMap), nil
|
2017-07-26 23:22:29 +00:00
|
|
|
}
|
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
files, err := imagefmt.Extract(imageFormat, req.Path, req.Headers, requiredFiles)
|
2017-07-26 23:22:29 +00:00
|
|
|
if err != nil {
|
|
|
|
log.WithError(err).WithFields(log.Fields{
|
2018-09-20 19:57:06 +00:00
|
|
|
"layer": req.Hash,
|
|
|
|
"path": strutil.CleanURL(req.Path),
|
2017-07-26 23:22:29 +00:00
|
|
|
}).Error("failed to extract data from path")
|
2018-09-20 19:57:06 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return files, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// detectContent downloads a layer and detects all features and namespaces.
|
|
|
|
func detectContent(imageFormat string, req *processRequest) (res *processResult) {
|
|
|
|
var (
|
|
|
|
files tarutil.FilesMap
|
|
|
|
layer = database.Layer{Hash: req.Hash, By: req.detectors}
|
|
|
|
)
|
|
|
|
|
|
|
|
res = &processResult{req.existingLayer, &layer, nil}
|
|
|
|
log.WithFields(log.Fields{
|
|
|
|
"layer": req.Hash,
|
|
|
|
"detectors": req.detectors,
|
|
|
|
}).Info("detecting layer content...")
|
|
|
|
|
|
|
|
files, res.err = extractRequiredFiles(imageFormat, req)
|
|
|
|
if res.err != nil {
|
2017-07-26 23:22:29 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
if layer.Namespaces, res.err = featurens.Detect(files, req.detectors); res.err != nil {
|
2017-07-26 23:22:29 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
if layer.Features, res.err = featurefmt.ListFeatures(files, req.detectors); res.err != nil {
|
|
|
|
return
|
2017-06-22 18:01:41 +00:00
|
|
|
}
|
2016-05-11 22:13:00 +00:00
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
log.WithFields(log.Fields{
|
|
|
|
"layer": req.Hash,
|
|
|
|
"detectors": req.detectors,
|
|
|
|
"namespace count": len(layer.Namespaces),
|
|
|
|
"feature count": len(layer.Features),
|
|
|
|
}).Info("processed layer")
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// InitWorker initializes the worker.
|
|
|
|
func InitWorker(datastore database.Datastore) {
|
|
|
|
if len(EnabledDetectors) == 0 {
|
|
|
|
log.Warn("no enabled detector, and therefore, no ancestry will be processed.")
|
2017-07-26 23:22:29 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
tx, err := datastore.Begin()
|
|
|
|
if err != nil {
|
|
|
|
log.WithError(err).Fatal("cannot connect to database to initialize worker")
|
2015-11-13 19:11:28 +00:00
|
|
|
}
|
|
|
|
|
2018-09-20 19:57:06 +00:00
|
|
|
defer tx.Rollback()
|
|
|
|
if err := tx.PersistDetectors(EnabledDetectors); err != nil {
|
|
|
|
log.WithError(err).Fatal("cannot insert detectors to initialize worker")
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := tx.Commit(); err != nil {
|
|
|
|
log.WithError(err).Fatal("cannot commit detector changes to initialize worker")
|
|
|
|
}
|
2015-11-13 19:11:28 +00:00
|
|
|
}
|