Merge pull request #619 from KeyboardNerd/sidac/rm_layer
database: Remove LayerWithContent from interface
This commit is contained in:
commit
f98ff58afd
@ -123,7 +123,7 @@ func VulnerabilityWithFixedInFromDatabaseModel(dbVuln database.VulnerabilityWith
|
|||||||
}
|
}
|
||||||
|
|
||||||
// LayerFromDatabaseModel converts database layer to api layer.
|
// LayerFromDatabaseModel converts database layer to api layer.
|
||||||
func LayerFromDatabaseModel(dbLayer database.Layer) *Layer {
|
func LayerFromDatabaseModel(dbLayer database.LayerMetadata) *Layer {
|
||||||
layer := Layer{Hash: dbLayer.Hash}
|
layer := Layer{Hash: dbLayer.Hash}
|
||||||
return &layer
|
return &layer
|
||||||
}
|
}
|
||||||
|
@ -120,22 +120,16 @@ type Session interface {
|
|||||||
// PersistNamespaces inserts a set of namespaces if not in the database.
|
// PersistNamespaces inserts a set of namespaces if not in the database.
|
||||||
PersistNamespaces([]Namespace) error
|
PersistNamespaces([]Namespace) error
|
||||||
|
|
||||||
// PersistLayer creates a layer using the blob Sum hash.
|
// PersistLayer persists a layer's content in the database. The given
|
||||||
PersistLayer(hash string) error
|
|
||||||
|
|
||||||
// PersistLayerContent persists a layer's content in the database. The given
|
|
||||||
// namespaces and features can be partial content of this layer.
|
// namespaces and features can be partial content of this layer.
|
||||||
//
|
//
|
||||||
// The layer, namespaces and features are expected to be already existing
|
// The layer, namespaces and features are expected to be already existing
|
||||||
// in the database.
|
// in the database.
|
||||||
PersistLayerContent(hash string, namespaces []Namespace, features []Feature, processedBy Processors) error
|
PersistLayer(hash string, namespaces []Namespace, features []Feature, processedBy Processors) error
|
||||||
|
|
||||||
// FindLayer retrieves the metadata of a layer.
|
// FindLayer returns a layer with all detected features and
|
||||||
FindLayer(hash string) (layer Layer, found bool, err error)
|
|
||||||
|
|
||||||
// FindLayerWithContent returns a layer with all detected features and
|
|
||||||
// namespaces.
|
// namespaces.
|
||||||
FindLayerWithContent(hash string) (layer LayerWithContent, found bool, err error)
|
FindLayer(hash string) (layer Layer, found bool, err error)
|
||||||
|
|
||||||
// InsertVulnerabilities inserts a set of UNIQUE vulnerabilities with
|
// InsertVulnerabilities inserts a set of UNIQUE vulnerabilities with
|
||||||
// affected features into database, assuming that all vulnerabilities
|
// affected features into database, assuming that all vulnerabilities
|
||||||
@ -167,9 +161,9 @@ type Session interface {
|
|||||||
// always considered first page.
|
// always considered first page.
|
||||||
FindVulnerabilityNotification(name string, limit int, oldVulnerabilityPage pagination.Token, newVulnerabilityPage pagination.Token) (noti VulnerabilityNotificationWithVulnerable, found bool, err error)
|
FindVulnerabilityNotification(name string, limit int, oldVulnerabilityPage pagination.Token, newVulnerabilityPage pagination.Token) (noti VulnerabilityNotificationWithVulnerable, found bool, err error)
|
||||||
|
|
||||||
// MarkNotificationNotified marks a Notification as notified now, assuming
|
// MarkNotificationAsRead marks a Notification as notified now, assuming
|
||||||
// the requested notification is in the database.
|
// the requested notification is in the database.
|
||||||
MarkNotificationNotified(name string) error
|
MarkNotificationAsRead(name string) error
|
||||||
|
|
||||||
// DeleteNotification removes a Notification in the database.
|
// DeleteNotification removes a Notification in the database.
|
||||||
DeleteNotification(name string) error
|
DeleteNotification(name string) error
|
||||||
|
@ -32,10 +32,8 @@ type MockSession struct {
|
|||||||
FctPersistFeatures func([]Feature) error
|
FctPersistFeatures func([]Feature) error
|
||||||
FctPersistNamespacedFeatures func([]NamespacedFeature) error
|
FctPersistNamespacedFeatures func([]NamespacedFeature) error
|
||||||
FctCacheAffectedNamespacedFeatures func([]NamespacedFeature) error
|
FctCacheAffectedNamespacedFeatures func([]NamespacedFeature) error
|
||||||
FctPersistLayer func(hash string) error
|
FctPersistLayer func(hash string, namespaces []Namespace, features []Feature, processedBy Processors) error
|
||||||
FctPersistLayerContent func(hash string, namespaces []Namespace, features []Feature, processedBy Processors) error
|
|
||||||
FctFindLayer func(name string) (Layer, bool, error)
|
FctFindLayer func(name string) (Layer, bool, error)
|
||||||
FctFindLayerWithContent func(name string) (LayerWithContent, bool, error)
|
|
||||||
FctInsertVulnerabilities func([]VulnerabilityWithAffected) error
|
FctInsertVulnerabilities func([]VulnerabilityWithAffected) error
|
||||||
FctFindVulnerabilities func([]VulnerabilityID) ([]NullableVulnerability, error)
|
FctFindVulnerabilities func([]VulnerabilityID) ([]NullableVulnerability, error)
|
||||||
FctDeleteVulnerabilities func([]VulnerabilityID) error
|
FctDeleteVulnerabilities func([]VulnerabilityID) error
|
||||||
@ -43,7 +41,7 @@ type MockSession struct {
|
|||||||
FctFindNewNotification func(lastNotified time.Time) (NotificationHook, bool, error)
|
FctFindNewNotification func(lastNotified time.Time) (NotificationHook, bool, error)
|
||||||
FctFindVulnerabilityNotification func(name string, limit int, oldPage pagination.Token, newPage pagination.Token) (
|
FctFindVulnerabilityNotification func(name string, limit int, oldPage pagination.Token, newPage pagination.Token) (
|
||||||
vuln VulnerabilityNotificationWithVulnerable, ok bool, err error)
|
vuln VulnerabilityNotificationWithVulnerable, ok bool, err error)
|
||||||
FctMarkNotificationNotified func(name string) error
|
FctMarkNotificationAsRead func(name string) error
|
||||||
FctDeleteNotification func(name string) error
|
FctDeleteNotification func(name string) error
|
||||||
FctUpdateKeyValue func(key, value string) error
|
FctUpdateKeyValue func(key, value string) error
|
||||||
FctFindKeyValue func(key string) (string, bool, error)
|
FctFindKeyValue func(key string) (string, bool, error)
|
||||||
@ -115,16 +113,9 @@ func (ms *MockSession) CacheAffectedNamespacedFeatures(namespacedFeatures []Name
|
|||||||
panic("required mock function not implemented")
|
panic("required mock function not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ms *MockSession) PersistLayer(layer string) error {
|
func (ms *MockSession) PersistLayer(hash string, namespaces []Namespace, features []Feature, processedBy Processors) error {
|
||||||
if ms.FctPersistLayer != nil {
|
if ms.FctPersistLayer != nil {
|
||||||
return ms.FctPersistLayer(layer)
|
return ms.FctPersistLayer(hash, namespaces, features, processedBy)
|
||||||
}
|
|
||||||
panic("required mock function not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms *MockSession) PersistLayerContent(hash string, namespaces []Namespace, features []Feature, processedBy Processors) error {
|
|
||||||
if ms.FctPersistLayerContent != nil {
|
|
||||||
return ms.FctPersistLayerContent(hash, namespaces, features, processedBy)
|
|
||||||
}
|
}
|
||||||
panic("required mock function not implemented")
|
panic("required mock function not implemented")
|
||||||
}
|
}
|
||||||
@ -136,13 +127,6 @@ func (ms *MockSession) FindLayer(name string) (Layer, bool, error) {
|
|||||||
panic("required mock function not implemented")
|
panic("required mock function not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ms *MockSession) FindLayerWithContent(name string) (LayerWithContent, bool, error) {
|
|
||||||
if ms.FctFindLayerWithContent != nil {
|
|
||||||
return ms.FctFindLayerWithContent(name)
|
|
||||||
}
|
|
||||||
panic("required mock function not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms *MockSession) InsertVulnerabilities(vulnerabilities []VulnerabilityWithAffected) error {
|
func (ms *MockSession) InsertVulnerabilities(vulnerabilities []VulnerabilityWithAffected) error {
|
||||||
if ms.FctInsertVulnerabilities != nil {
|
if ms.FctInsertVulnerabilities != nil {
|
||||||
return ms.FctInsertVulnerabilities(vulnerabilities)
|
return ms.FctInsertVulnerabilities(vulnerabilities)
|
||||||
@ -186,9 +170,9 @@ func (ms *MockSession) FindVulnerabilityNotification(name string, limit int, old
|
|||||||
panic("required mock function not implemented")
|
panic("required mock function not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ms *MockSession) MarkNotificationNotified(name string) error {
|
func (ms *MockSession) MarkNotificationAsRead(name string) error {
|
||||||
if ms.FctMarkNotificationNotified != nil {
|
if ms.FctMarkNotificationAsRead != nil {
|
||||||
return ms.FctMarkNotificationNotified(name)
|
return ms.FctMarkNotificationAsRead(name)
|
||||||
}
|
}
|
||||||
panic("required mock function not implemented")
|
panic("required mock function not implemented")
|
||||||
}
|
}
|
||||||
|
@ -41,25 +41,25 @@ type Ancestry struct {
|
|||||||
|
|
||||||
// AncestryLayer is a layer with all detected namespaced features.
|
// AncestryLayer is a layer with all detected namespaced features.
|
||||||
type AncestryLayer struct {
|
type AncestryLayer struct {
|
||||||
Layer
|
LayerMetadata
|
||||||
|
|
||||||
// DetectedFeatures are the features introduced by this layer when it was
|
// DetectedFeatures are the features introduced by this layer when it was
|
||||||
// processed.
|
// processed.
|
||||||
DetectedFeatures []NamespacedFeature
|
DetectedFeatures []NamespacedFeature
|
||||||
}
|
}
|
||||||
|
|
||||||
// Layer contains the metadata of a layer.
|
// LayerMetadata contains the metadata of a layer.
|
||||||
type Layer struct {
|
type LayerMetadata struct {
|
||||||
// Hash is content hash of the layer.
|
// Hash is content hash of the layer.
|
||||||
Hash string
|
Hash string
|
||||||
// ProcessedBy contains the processors that processed this layer.
|
// ProcessedBy contains the processors that processed this layer.
|
||||||
ProcessedBy Processors
|
ProcessedBy Processors
|
||||||
}
|
}
|
||||||
|
|
||||||
// LayerWithContent is a layer with its detected namespaces and features by
|
// Layer is a layer with its detected namespaces and features by
|
||||||
// ProcessedBy.
|
// ProcessedBy.
|
||||||
type LayerWithContent struct {
|
type Layer struct {
|
||||||
Layer
|
LayerMetadata
|
||||||
|
|
||||||
Namespaces []Namespace
|
Namespaces []Namespace
|
||||||
Features []Feature
|
Features []Feature
|
||||||
|
@ -170,7 +170,7 @@ func (tx *pgSession) findAncestryLayers(id int64) ([]database.AncestryLayer, err
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !index.Valid || !id.Valid {
|
if !index.Valid || !id.Valid {
|
||||||
return nil, commonerr.ErrNotFound
|
panic("null ancestry ID or ancestry index violates database constraints")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := layers[index.Int64]; ok {
|
if _, ok := layers[index.Int64]; ok {
|
||||||
|
@ -30,7 +30,7 @@ func TestUpsertAncestry(t *testing.T) {
|
|||||||
Name: "a1",
|
Name: "a1",
|
||||||
Layers: []database.AncestryLayer{
|
Layers: []database.AncestryLayer{
|
||||||
{
|
{
|
||||||
Layer: database.Layer{
|
LayerMetadata: database.LayerMetadata{
|
||||||
Hash: "layer-N",
|
Hash: "layer-N",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -43,7 +43,7 @@ func TestUpsertAncestry(t *testing.T) {
|
|||||||
Name: "a",
|
Name: "a",
|
||||||
Layers: []database.AncestryLayer{
|
Layers: []database.AncestryLayer{
|
||||||
{
|
{
|
||||||
Layer: database.Layer{
|
LayerMetadata: database.LayerMetadata{
|
||||||
Hash: "layer-0",
|
Hash: "layer-0",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -54,7 +54,7 @@ func TestUpsertAncestry(t *testing.T) {
|
|||||||
Name: "a",
|
Name: "a",
|
||||||
Layers: []database.AncestryLayer{
|
Layers: []database.AncestryLayer{
|
||||||
{
|
{
|
||||||
Layer: database.Layer{
|
LayerMetadata: database.LayerMetadata{
|
||||||
Hash: "layer-1",
|
Hash: "layer-1",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -137,7 +137,7 @@ func assertAncestryEqual(t *testing.T, expected database.Ancestry, actual databa
|
|||||||
}
|
}
|
||||||
|
|
||||||
func assertAncestryLayerEqual(t *testing.T, expected database.AncestryLayer, actual database.AncestryLayer) bool {
|
func assertAncestryLayerEqual(t *testing.T, expected database.AncestryLayer, actual database.AncestryLayer) bool {
|
||||||
return assertLayerEqual(t, expected.Layer, actual.Layer) &&
|
return assertLayerEqual(t, expected.LayerMetadata, actual.LayerMetadata) &&
|
||||||
assertNamespacedFeatureEqual(t, expected.DetectedFeatures, actual.DetectedFeatures)
|
assertNamespacedFeatureEqual(t, expected.DetectedFeatures, actual.DetectedFeatures)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -159,7 +159,7 @@ func TestFindAncestry(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Layers: []database.AncestryLayer{
|
Layers: []database.AncestryLayer{
|
||||||
{
|
{
|
||||||
Layer: database.Layer{
|
LayerMetadata: database.LayerMetadata{
|
||||||
Hash: "layer-0",
|
Hash: "layer-0",
|
||||||
},
|
},
|
||||||
DetectedFeatures: []database.NamespacedFeature{
|
DetectedFeatures: []database.NamespacedFeature{
|
||||||
@ -188,17 +188,17 @@ func TestFindAncestry(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Layer: database.Layer{
|
LayerMetadata: database.LayerMetadata{
|
||||||
Hash: "layer-1",
|
Hash: "layer-1",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Layer: database.Layer{
|
LayerMetadata: database.LayerMetadata{
|
||||||
Hash: "layer-2",
|
Hash: "layer-2",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Layer: database.Layer{
|
LayerMetadata: database.LayerMetadata{
|
||||||
Hash: "layer-3b",
|
Hash: "layer-3b",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -23,19 +23,14 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func (tx *pgSession) FindLayer(hash string) (database.Layer, bool, error) {
|
func (tx *pgSession) FindLayer(hash string) (database.Layer, bool, error) {
|
||||||
layer, _, ok, err := tx.findLayer(hash)
|
|
||||||
return layer, ok, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tx *pgSession) FindLayerWithContent(hash string) (database.LayerWithContent, bool, error) {
|
|
||||||
var (
|
var (
|
||||||
layer database.LayerWithContent
|
layer database.Layer
|
||||||
layerID int64
|
layerID int64
|
||||||
ok bool
|
ok bool
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
|
|
||||||
layer.Layer, layerID, ok, err = tx.findLayer(hash)
|
layer.LayerMetadata, layerID, ok, err = tx.findLayer(hash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return layer, false, err
|
return layer, false, err
|
||||||
}
|
}
|
||||||
@ -49,46 +44,53 @@ func (tx *pgSession) FindLayerWithContent(hash string) (database.LayerWithConten
|
|||||||
return layer, true, nil
|
return layer, true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tx *pgSession) PersistLayer(hash string) error {
|
func (tx *pgSession) persistLayer(hash string) (int64, error) {
|
||||||
if hash == "" {
|
if hash == "" {
|
||||||
return commonerr.NewBadRequestError("Empty Layer Hash is not allowed")
|
return -1, commonerr.NewBadRequestError("Empty Layer Hash is not allowed")
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := tx.Exec(queryPersistLayer(1), hash)
|
id := sql.NullInt64{}
|
||||||
if err != nil {
|
if err := tx.QueryRow(soiLayer, hash).Scan(&id); err != nil {
|
||||||
return handleError("queryPersistLayer", err)
|
return -1, handleError("queryPersistLayer", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
if !id.Valid {
|
||||||
|
panic("null layer.id violates database constraint")
|
||||||
}
|
}
|
||||||
|
|
||||||
// PersistLayerContent relates layer identified by hash with namespaces,
|
return id.Int64, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PersistLayer relates layer identified by hash with namespaces,
|
||||||
// features and processors provided. If the layer, namespaces, features are not
|
// features and processors provided. If the layer, namespaces, features are not
|
||||||
// in database, the function returns an error.
|
// in database, the function returns an error.
|
||||||
func (tx *pgSession) PersistLayerContent(hash string, namespaces []database.Namespace, features []database.Feature, processedBy database.Processors) error {
|
func (tx *pgSession) PersistLayer(hash string, namespaces []database.Namespace, features []database.Feature, processedBy database.Processors) error {
|
||||||
if hash == "" {
|
if hash == "" {
|
||||||
return commonerr.NewBadRequestError("Empty layer hash is not allowed")
|
return commonerr.NewBadRequestError("Empty layer hash is not allowed")
|
||||||
}
|
}
|
||||||
|
|
||||||
var layerID int64
|
var (
|
||||||
err := tx.QueryRow(searchLayer, hash).Scan(&layerID)
|
err error
|
||||||
if err != nil {
|
id int64
|
||||||
|
)
|
||||||
|
|
||||||
|
if id, err = tx.persistLayer(hash); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = tx.persistLayerNamespace(layerID, namespaces); err != nil {
|
if err = tx.persistLayerNamespace(id, namespaces); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = tx.persistLayerFeatures(layerID, features); err != nil {
|
if err = tx.persistLayerFeatures(id, features); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = tx.persistLayerDetectors(layerID, processedBy.Detectors); err != nil {
|
if err = tx.persistLayerDetectors(id, processedBy.Detectors); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = tx.persistLayerListers(layerID, processedBy.Listers); err != nil {
|
if err = tx.persistLayerListers(id, processedBy.Listers); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -275,10 +277,10 @@ func (tx *pgSession) findLayerFeatures(layerID int64) ([]database.Feature, error
|
|||||||
return features, nil
|
return features, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tx *pgSession) findLayer(hash string) (database.Layer, int64, bool, error) {
|
func (tx *pgSession) findLayer(hash string) (database.LayerMetadata, int64, bool, error) {
|
||||||
var (
|
var (
|
||||||
layerID int64
|
layerID int64
|
||||||
layer = database.Layer{Hash: hash, ProcessedBy: database.Processors{}}
|
layer = database.LayerMetadata{Hash: hash, ProcessedBy: database.Processors{}}
|
||||||
)
|
)
|
||||||
|
|
||||||
if hash == "" {
|
if hash == "" {
|
||||||
|
@ -26,65 +26,72 @@ func TestPersistLayer(t *testing.T) {
|
|||||||
datastore, tx := openSessionForTest(t, "PersistLayer", false)
|
datastore, tx := openSessionForTest(t, "PersistLayer", false)
|
||||||
defer closeTest(t, datastore, tx)
|
defer closeTest(t, datastore, tx)
|
||||||
|
|
||||||
l1 := ""
|
|
||||||
l2 := "HESOYAM"
|
|
||||||
|
|
||||||
// invalid
|
// invalid
|
||||||
assert.NotNil(t, tx.PersistLayer(l1))
|
assert.NotNil(t, tx.PersistLayer("", nil, nil, database.Processors{}))
|
||||||
// valid
|
// insert namespaces + features to
|
||||||
assert.Nil(t, tx.PersistLayer(l2))
|
namespaces := []database.Namespace{
|
||||||
// duplicated
|
{
|
||||||
assert.Nil(t, tx.PersistLayer(l2))
|
Name: "sushi shop",
|
||||||
|
VersionFormat: "apk",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPersistLayerProcessors(t *testing.T) {
|
features := []database.Feature{
|
||||||
datastore, tx := openSessionForTest(t, "PersistLayerProcessors", true)
|
{
|
||||||
defer closeTest(t, datastore, tx)
|
Name: "blue fin sashimi",
|
||||||
|
Version: "v1.0",
|
||||||
|
VersionFormat: "apk",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
// invalid
|
processors := database.Processors{
|
||||||
assert.NotNil(t, tx.PersistLayerContent("hash", []database.Namespace{}, []database.Feature{}, database.Processors{}))
|
Listers: []string{"release"},
|
||||||
// valid
|
Detectors: []string{"apk"},
|
||||||
assert.Nil(t, tx.PersistLayerContent("layer-4", []database.Namespace{}, []database.Feature{}, database.Processors{Detectors: []string{"new detector!"}}))
|
}
|
||||||
|
|
||||||
|
assert.Nil(t, tx.PersistNamespaces(namespaces))
|
||||||
|
assert.Nil(t, tx.PersistFeatures(features))
|
||||||
|
|
||||||
|
// Valid
|
||||||
|
assert.Nil(t, tx.PersistLayer("RANDOM_FOREST", namespaces, features, processors))
|
||||||
|
|
||||||
|
nonExistingFeature := []database.Feature{{Name: "lobster sushi", Version: "v0.1", VersionFormat: "apk"}}
|
||||||
|
// Invalid:
|
||||||
|
assert.NotNil(t, tx.PersistLayer("RANDOM_FOREST", namespaces, nonExistingFeature, processors))
|
||||||
|
|
||||||
|
assert.Nil(t, tx.PersistFeatures(nonExistingFeature))
|
||||||
|
// Update the layer
|
||||||
|
assert.Nil(t, tx.PersistLayer("RANDOM_FOREST", namespaces, nonExistingFeature, processors))
|
||||||
|
|
||||||
|
// confirm update
|
||||||
|
layer, ok, err := tx.FindLayer("RANDOM_FOREST")
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.True(t, ok)
|
||||||
|
|
||||||
|
expectedLayer := database.Layer{
|
||||||
|
LayerMetadata: database.LayerMetadata{
|
||||||
|
Hash: "RANDOM_FOREST",
|
||||||
|
ProcessedBy: processors,
|
||||||
|
},
|
||||||
|
Features: append(features, nonExistingFeature...),
|
||||||
|
Namespaces: namespaces,
|
||||||
|
}
|
||||||
|
|
||||||
|
assertLayerWithContentEqual(t, expectedLayer, layer)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFindLayer(t *testing.T) {
|
func TestFindLayer(t *testing.T) {
|
||||||
datastore, tx := openSessionForTest(t, "FindLayer", true)
|
datastore, tx := openSessionForTest(t, "FindLayer", true)
|
||||||
defer closeTest(t, datastore, tx)
|
defer closeTest(t, datastore, tx)
|
||||||
|
|
||||||
expected := database.Layer{
|
|
||||||
Hash: "layer-4",
|
|
||||||
ProcessedBy: database.Processors{
|
|
||||||
Detectors: []string{"os-release", "apt-sources"},
|
|
||||||
Listers: []string{"dpkg", "rpm"},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// invalid
|
|
||||||
_, _, err := tx.FindLayer("")
|
_, _, err := tx.FindLayer("")
|
||||||
assert.NotNil(t, err)
|
assert.NotNil(t, err)
|
||||||
_, ok, err := tx.FindLayer("layer-non")
|
_, ok, err := tx.FindLayer("layer-non")
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.False(t, ok)
|
assert.False(t, ok)
|
||||||
|
|
||||||
// valid
|
expectedL := database.Layer{
|
||||||
layer, ok2, err := tx.FindLayer("layer-4")
|
LayerMetadata: database.LayerMetadata{
|
||||||
if assert.Nil(t, err) && assert.True(t, ok2) {
|
|
||||||
assertLayerEqual(t, expected, layer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFindLayerWithContent(t *testing.T) {
|
|
||||||
datastore, tx := openSessionForTest(t, "FindLayerWithContent", true)
|
|
||||||
defer closeTest(t, datastore, tx)
|
|
||||||
|
|
||||||
_, _, err := tx.FindLayerWithContent("")
|
|
||||||
assert.NotNil(t, err)
|
|
||||||
_, ok, err := tx.FindLayerWithContent("layer-non")
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.False(t, ok)
|
|
||||||
|
|
||||||
expectedL := database.LayerWithContent{
|
|
||||||
Layer: database.Layer{
|
|
||||||
Hash: "layer-4",
|
Hash: "layer-4",
|
||||||
ProcessedBy: database.Processors{
|
ProcessedBy: database.Processors{
|
||||||
Detectors: []string{"os-release", "apt-sources"},
|
Detectors: []string{"os-release", "apt-sources"},
|
||||||
@ -101,19 +108,19 @@ func TestFindLayerWithContent(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
layer, ok2, err := tx.FindLayerWithContent("layer-4")
|
layer, ok2, err := tx.FindLayer("layer-4")
|
||||||
if assert.Nil(t, err) && assert.True(t, ok2) {
|
if assert.Nil(t, err) && assert.True(t, ok2) {
|
||||||
assertLayerWithContentEqual(t, expectedL, layer)
|
assertLayerWithContentEqual(t, expectedL, layer)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertLayerWithContentEqual(t *testing.T, expected database.LayerWithContent, actual database.LayerWithContent) bool {
|
func assertLayerWithContentEqual(t *testing.T, expected database.Layer, actual database.Layer) bool {
|
||||||
return assertLayerEqual(t, expected.Layer, actual.Layer) &&
|
return assertLayerEqual(t, expected.LayerMetadata, actual.LayerMetadata) &&
|
||||||
assertFeaturesEqual(t, expected.Features, actual.Features) &&
|
assertFeaturesEqual(t, expected.Features, actual.Features) &&
|
||||||
assertNamespacesEqual(t, expected.Namespaces, actual.Namespaces)
|
assertNamespacesEqual(t, expected.Namespaces, actual.Namespaces)
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertLayerEqual(t *testing.T, expected database.Layer, actual database.Layer) bool {
|
func assertLayerEqual(t *testing.T, expected database.LayerMetadata, actual database.LayerMetadata) bool {
|
||||||
return assertProcessorsEqual(t, expected.ProcessedBy, actual.ProcessedBy) &&
|
return assertProcessorsEqual(t, expected.ProcessedBy, actual.ProcessedBy) &&
|
||||||
assert.Equal(t, expected.Hash, actual.Hash)
|
assert.Equal(t, expected.Hash, actual.Hash)
|
||||||
}
|
}
|
||||||
|
@ -289,23 +289,23 @@ func (tx *pgSession) FindVulnerabilityNotification(name string, limit int, oldPa
|
|||||||
return noti, true, nil
|
return noti, true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tx *pgSession) MarkNotificationNotified(name string) error {
|
func (tx *pgSession) MarkNotificationAsRead(name string) error {
|
||||||
if name == "" {
|
if name == "" {
|
||||||
return commonerr.NewBadRequestError("Empty notification name is not allowed")
|
return commonerr.NewBadRequestError("Empty notification name is not allowed")
|
||||||
}
|
}
|
||||||
|
|
||||||
r, err := tx.Exec(updatedNotificationNotified, name)
|
r, err := tx.Exec(updatedNotificationAsRead, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return handleError("updatedNotificationNotified", err)
|
return handleError("updatedNotificationAsRead", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
affected, err := r.RowsAffected()
|
affected, err := r.RowsAffected()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return handleError("updatedNotificationNotified", err)
|
return handleError("updatedNotificationAsRead", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if affected <= 0 {
|
if affected <= 0 {
|
||||||
return handleError("updatedNotificationNotified", errNotificationNotFound)
|
return handleError("updatedNotificationAsRead", errNotificationNotFound)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -200,7 +200,7 @@ func TestFindNewNotification(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// can't find the notified
|
// can't find the notified
|
||||||
assert.Nil(t, tx.MarkNotificationNotified("test"))
|
assert.Nil(t, tx.MarkNotificationAsRead("test"))
|
||||||
// if the notified time is before
|
// if the notified time is before
|
||||||
noti, ok, err = tx.FindNewNotification(time.Now().Add(-time.Duration(10 * time.Second)))
|
noti, ok, err = tx.FindNewNotification(time.Now().Add(-time.Duration(10 * time.Second)))
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
@ -225,16 +225,16 @@ func TestFindNewNotification(t *testing.T) {
|
|||||||
assert.False(t, ok)
|
assert.False(t, ok)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMarkNotificationNotified(t *testing.T) {
|
func TestMarkNotificationAsRead(t *testing.T) {
|
||||||
datastore, tx := openSessionForTest(t, "MarkNotificationNotified", true)
|
datastore, tx := openSessionForTest(t, "MarkNotificationAsRead", true)
|
||||||
defer closeTest(t, datastore, tx)
|
defer closeTest(t, datastore, tx)
|
||||||
|
|
||||||
// invalid case: notification doesn't exist
|
// invalid case: notification doesn't exist
|
||||||
assert.NotNil(t, tx.MarkNotificationNotified("non-existing"))
|
assert.NotNil(t, tx.MarkNotificationAsRead("non-existing"))
|
||||||
// valid case
|
// valid case
|
||||||
assert.Nil(t, tx.MarkNotificationNotified("test"))
|
assert.Nil(t, tx.MarkNotificationAsRead("test"))
|
||||||
// valid case
|
// valid case
|
||||||
assert.Nil(t, tx.MarkNotificationNotified("test"))
|
assert.Nil(t, tx.MarkNotificationAsRead("test"))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDeleteNotification(t *testing.T) {
|
func TestDeleteNotification(t *testing.T) {
|
||||||
|
@ -73,7 +73,16 @@ const (
|
|||||||
AND v.deleted_at IS NULL`
|
AND v.deleted_at IS NULL`
|
||||||
|
|
||||||
// layer.go
|
// layer.go
|
||||||
searchLayerIDs = `SELECT id, hash FROM layer WHERE hash = ANY($1);`
|
soiLayer = `
|
||||||
|
WITH new_layer AS (
|
||||||
|
INSERT INTO layer (hash)
|
||||||
|
SELECT CAST ($1 AS VARCHAR)
|
||||||
|
WHERE NOT EXISTS (SELECT id FROM layer WHERE hash = $1)
|
||||||
|
RETURNING id
|
||||||
|
)
|
||||||
|
SELECT id FROM new_Layer
|
||||||
|
UNION
|
||||||
|
SELECT id FROM layer WHERE hash = $1`
|
||||||
|
|
||||||
searchLayerFeatures = `
|
searchLayerFeatures = `
|
||||||
SELECT feature.Name, feature.Version, feature.version_format
|
SELECT feature.Name, feature.Version, feature.version_format
|
||||||
@ -168,7 +177,7 @@ const (
|
|||||||
INSERT INTO Vulnerability_Notification(name, created_at, old_vulnerability_id, new_vulnerability_id)
|
INSERT INTO Vulnerability_Notification(name, created_at, old_vulnerability_id, new_vulnerability_id)
|
||||||
VALUES ($1, $2, $3, $4)`
|
VALUES ($1, $2, $3, $4)`
|
||||||
|
|
||||||
updatedNotificationNotified = `
|
updatedNotificationAsRead = `
|
||||||
UPDATE Vulnerability_Notification
|
UPDATE Vulnerability_Notification
|
||||||
SET notified_at = CURRENT_TIMESTAMP
|
SET notified_at = CURRENT_TIMESTAMP
|
||||||
WHERE name = $1`
|
WHERE name = $1`
|
||||||
|
@ -16,7 +16,6 @@ package pgsql
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
"errors"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -220,17 +219,6 @@ func (tx *pgSession) insertVulnerabilities(vulnerabilities []database.Vulnerabil
|
|||||||
return vulnIDs, nil
|
return vulnIDs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// castMetadata marshals the given database.MetadataMap and unmarshals it again to make sure that
|
|
||||||
// everything has the interface{} type.
|
|
||||||
// It is required when comparing crafted MetadataMap against MetadataMap that we get from the
|
|
||||||
// database.
|
|
||||||
func castMetadata(m database.MetadataMap) database.MetadataMap {
|
|
||||||
c := make(database.MetadataMap)
|
|
||||||
j, _ := json.Marshal(m)
|
|
||||||
json.Unmarshal(j, &c)
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tx *pgSession) lockFeatureVulnerabilityCache() error {
|
func (tx *pgSession) lockFeatureVulnerabilityCache() error {
|
||||||
_, err := tx.Exec(lockVulnerabilityAffects)
|
_, err := tx.Exec(lockVulnerabilityAffects)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -93,7 +93,7 @@ func RunNotifier(config *notification.Config, datastore database.Datastore, stop
|
|||||||
go func() {
|
go func() {
|
||||||
success, interrupted := handleTask(*notification, stopper, config.Attempts)
|
success, interrupted := handleTask(*notification, stopper, config.Attempts)
|
||||||
if success {
|
if success {
|
||||||
err := markNotificationNotified(datastore, notification.Name)
|
err := markNotificationAsRead(datastore, notification.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithError(err).Error("Failed to mark notification notified")
|
log.WithError(err).Error("Failed to mark notification notified")
|
||||||
}
|
}
|
||||||
@ -196,14 +196,14 @@ func findNewNotification(datastore database.Datastore, renotifyInterval time.Dur
|
|||||||
return tx.FindNewNotification(time.Now().Add(-renotifyInterval))
|
return tx.FindNewNotification(time.Now().Add(-renotifyInterval))
|
||||||
}
|
}
|
||||||
|
|
||||||
func markNotificationNotified(datastore database.Datastore, name string) error {
|
func markNotificationAsRead(datastore database.Datastore, name string) error {
|
||||||
tx, err := datastore.Begin()
|
tx, err := datastore.Begin()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithError(err).Error("an error happens when beginning database transaction")
|
log.WithError(err).Error("an error happens when beginning database transaction")
|
||||||
}
|
}
|
||||||
defer tx.Rollback()
|
defer tx.Rollback()
|
||||||
|
|
||||||
if err := tx.MarkNotificationNotified(name); err != nil {
|
if err := tx.MarkNotificationAsRead(name); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return tx.Commit()
|
return tx.Commit()
|
||||||
|
75
worker.go
75
worker.go
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2017 clair authors
|
// Copyright 2018 clair authors
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
@ -146,32 +146,29 @@ func processRequests(imageFormat string, toDetect []processRequest) ([]database.
|
|||||||
return namespaces, features, updates, nil
|
return namespaces, features, updates, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getLayer(datastore database.Datastore, req LayerRequest) (layer database.LayerWithContent, preq *processRequest, err error) {
|
func getLayer(datastore database.Datastore, req LayerRequest) (layer database.Layer, preq *processRequest, err error) {
|
||||||
var ok bool
|
var (
|
||||||
tx, err := datastore.Begin()
|
tx database.Session
|
||||||
if err != nil {
|
ok bool
|
||||||
|
)
|
||||||
|
|
||||||
|
if tx, err = datastore.Begin(); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
defer tx.Rollback()
|
defer tx.Rollback()
|
||||||
|
|
||||||
layer, ok, err = tx.FindLayerWithContent(req.Hash)
|
if layer, ok, err = tx.FindLayer(req.Hash); err != nil {
|
||||||
if err != nil {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if !ok {
|
if !ok {
|
||||||
err = tx.PersistLayer(req.Hash)
|
layer = database.Layer{
|
||||||
if err != nil {
|
LayerMetadata: database.LayerMetadata{
|
||||||
return
|
Hash: req.Hash,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = tx.Commit(); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
layer = database.LayerWithContent{}
|
|
||||||
layer.Hash = req.Hash
|
|
||||||
|
|
||||||
preq = &processRequest{
|
preq = &processRequest{
|
||||||
request: req,
|
request: req,
|
||||||
notProcessedBy: Processors,
|
notProcessedBy: Processors,
|
||||||
@ -185,15 +182,16 @@ func getLayer(datastore database.Datastore, req LayerRequest) (layer database.La
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// processLayers processes a set of post layer requests, stores layers and
|
// processLayers processes a set of post layer requests, stores layers and
|
||||||
// returns an ordered list of processed layers with detected features and
|
// returns an ordered list of processed layers with detected features and
|
||||||
// namespaces.
|
// namespaces.
|
||||||
func processLayers(datastore database.Datastore, imageFormat string, requests []LayerRequest) ([]database.LayerWithContent, error) {
|
func processLayers(datastore database.Datastore, imageFormat string, requests []LayerRequest) ([]database.Layer, error) {
|
||||||
toDetect := []processRequest{}
|
toDetect := []processRequest{}
|
||||||
layers := map[string]database.LayerWithContent{}
|
layers := map[string]database.Layer{}
|
||||||
for _, req := range requests {
|
for _, req := range requests {
|
||||||
if _, ok := layers[req.Hash]; ok {
|
if _, ok := layers[req.Hash]; ok {
|
||||||
continue
|
continue
|
||||||
@ -208,7 +206,7 @@ func processLayers(datastore database.Datastore, imageFormat string, requests []
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
namespaces, features, partialRes, err := processRequests(imageFormat, toDetect)
|
namespaces, features, partialLayers, err := processRequests(imageFormat, toDetect)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -222,10 +220,18 @@ func processLayers(datastore database.Datastore, imageFormat string, requests []
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, res := range partialRes {
|
for _, layer := range partialLayers {
|
||||||
if err := persistPartialLayer(datastore, res); err != nil {
|
if err := persistPartialLayer(datastore, layer); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"Hash": layer.hash,
|
||||||
|
"namespace count": len(layer.namespaces),
|
||||||
|
"feature count": len(layer.features),
|
||||||
|
"namespace detectors": layer.processedBy.Detectors,
|
||||||
|
"feature listers": layer.processedBy.Listers,
|
||||||
|
}).Debug("saved layer")
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE(Sida): The full layers are computed using partially
|
// NOTE(Sida): The full layers are computed using partially
|
||||||
@ -233,9 +239,9 @@ func processLayers(datastore database.Datastore, imageFormat string, requests []
|
|||||||
// Clair are changing some layers in this set of layers, it might generate
|
// Clair are changing some layers in this set of layers, it might generate
|
||||||
// different results especially when the other Clair is with different
|
// different results especially when the other Clair is with different
|
||||||
// processors.
|
// processors.
|
||||||
completeLayers := []database.LayerWithContent{}
|
completeLayers := []database.Layer{}
|
||||||
for _, req := range requests {
|
for _, req := range requests {
|
||||||
if partialLayer, ok := partialRes[req.Hash]; ok {
|
if partialLayer, ok := partialLayers[req.Hash]; ok {
|
||||||
completeLayers = append(completeLayers, combineLayers(layers[req.Hash], partialLayer))
|
completeLayers = append(completeLayers, combineLayers(layers[req.Hash], partialLayer))
|
||||||
} else {
|
} else {
|
||||||
completeLayers = append(completeLayers, layers[req.Hash])
|
completeLayers = append(completeLayers, layers[req.Hash])
|
||||||
@ -252,9 +258,10 @@ func persistPartialLayer(datastore database.Datastore, layer partialLayer) error
|
|||||||
}
|
}
|
||||||
defer tx.Rollback()
|
defer tx.Rollback()
|
||||||
|
|
||||||
if err := tx.PersistLayerContent(layer.hash, layer.namespaces, layer.features, layer.processedBy); err != nil {
|
if err := tx.PersistLayer(layer.hash, layer.namespaces, layer.features, layer.processedBy); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return tx.Commit()
|
return tx.Commit()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -286,7 +293,7 @@ func persistNamespaces(datastore database.Datastore, namespaces []database.Names
|
|||||||
}
|
}
|
||||||
|
|
||||||
// combineLayers merges `layer` and `partial` without duplicated content.
|
// combineLayers merges `layer` and `partial` without duplicated content.
|
||||||
func combineLayers(layer database.LayerWithContent, partial partialLayer) database.LayerWithContent {
|
func combineLayers(layer database.Layer, partial partialLayer) database.Layer {
|
||||||
mapF := map[database.Feature]struct{}{}
|
mapF := map[database.Feature]struct{}{}
|
||||||
mapNS := map[database.Namespace]struct{}{}
|
mapNS := map[database.Namespace]struct{}{}
|
||||||
for _, f := range layer.Features {
|
for _, f := range layer.Features {
|
||||||
@ -312,8 +319,8 @@ func combineLayers(layer database.LayerWithContent, partial partialLayer) databa
|
|||||||
|
|
||||||
layer.ProcessedBy.Detectors = append(layer.ProcessedBy.Detectors, strutil.CompareStringLists(partial.processedBy.Detectors, layer.ProcessedBy.Detectors)...)
|
layer.ProcessedBy.Detectors = append(layer.ProcessedBy.Detectors, strutil.CompareStringLists(partial.processedBy.Detectors, layer.ProcessedBy.Detectors)...)
|
||||||
layer.ProcessedBy.Listers = append(layer.ProcessedBy.Listers, strutil.CompareStringLists(partial.processedBy.Listers, layer.ProcessedBy.Listers)...)
|
layer.ProcessedBy.Listers = append(layer.ProcessedBy.Listers, strutil.CompareStringLists(partial.processedBy.Listers, layer.ProcessedBy.Listers)...)
|
||||||
return database.LayerWithContent{
|
return database.Layer{
|
||||||
Layer: database.Layer{
|
LayerMetadata: database.LayerMetadata{
|
||||||
Hash: layer.Hash,
|
Hash: layer.Hash,
|
||||||
ProcessedBy: layer.ProcessedBy,
|
ProcessedBy: layer.ProcessedBy,
|
||||||
},
|
},
|
||||||
@ -346,7 +353,7 @@ func ProcessAncestry(datastore database.Datastore, imageFormat, name string, lay
|
|||||||
var (
|
var (
|
||||||
err error
|
err error
|
||||||
ok bool
|
ok bool
|
||||||
layers []database.LayerWithContent
|
layers []database.Layer
|
||||||
commonProcessors database.Processors
|
commonProcessors database.Processors
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -361,7 +368,7 @@ func ProcessAncestry(datastore database.Datastore, imageFormat, name string, lay
|
|||||||
if ok, err = isAncestryProcessed(datastore, name); err != nil {
|
if ok, err = isAncestryProcessed(datastore, name); err != nil {
|
||||||
return err
|
return err
|
||||||
} else if ok {
|
} else if ok {
|
||||||
log.WithField("ancestry", name).Debug("Ancestry is processed")
|
log.WithField("name", name).Debug("ancestry is already processed")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -386,7 +393,7 @@ func getNamespacedFeatures(layers []database.AncestryLayer) []database.Namespace
|
|||||||
return features
|
return features
|
||||||
}
|
}
|
||||||
|
|
||||||
func processAncestry(datastore database.Datastore, name string, layers []database.LayerWithContent, commonProcessors database.Processors) error {
|
func processAncestry(datastore database.Datastore, name string, layers []database.Layer, commonProcessors database.Processors) error {
|
||||||
var (
|
var (
|
||||||
ancestry database.Ancestry
|
ancestry database.Ancestry
|
||||||
err error
|
err error
|
||||||
@ -458,7 +465,7 @@ func persistNamespacedFeatures(datastore database.Datastore, features []database
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getProcessors retrieves common subset of the processors of each layer.
|
// getProcessors retrieves common subset of the processors of each layer.
|
||||||
func getProcessors(layers []database.LayerWithContent) (database.Processors, error) {
|
func getProcessors(layers []database.Layer) (database.Processors, error) {
|
||||||
if len(layers) == 0 {
|
if len(layers) == 0 {
|
||||||
return database.Processors{}, nil
|
return database.Processors{}, nil
|
||||||
}
|
}
|
||||||
@ -495,7 +502,7 @@ type introducedFeature struct {
|
|||||||
|
|
||||||
// computeAncestryLayers computes ancestry's layers along with what features are
|
// computeAncestryLayers computes ancestry's layers along with what features are
|
||||||
// introduced.
|
// introduced.
|
||||||
func computeAncestryLayers(layers []database.LayerWithContent, commonProcessors database.Processors) ([]database.AncestryLayer, error) {
|
func computeAncestryLayers(layers []database.Layer, commonProcessors database.Processors) ([]database.AncestryLayer, error) {
|
||||||
// TODO(sidchen): Once the features are linked to specific processor, we
|
// TODO(sidchen): Once the features are linked to specific processor, we
|
||||||
// will use commonProcessors to filter out the features for this ancestry.
|
// will use commonProcessors to filter out the features for this ancestry.
|
||||||
|
|
||||||
@ -506,7 +513,7 @@ func computeAncestryLayers(layers []database.LayerWithContent, commonProcessors
|
|||||||
ancestryLayers := []database.AncestryLayer{}
|
ancestryLayers := []database.AncestryLayer{}
|
||||||
for index, layer := range layers {
|
for index, layer := range layers {
|
||||||
// Initialize the ancestry Layer
|
// Initialize the ancestry Layer
|
||||||
initializedLayer := database.AncestryLayer{Layer: layer.Layer, DetectedFeatures: []database.NamespacedFeature{}}
|
initializedLayer := database.AncestryLayer{LayerMetadata: layer.LayerMetadata, DetectedFeatures: []database.NamespacedFeature{}}
|
||||||
ancestryLayers = append(ancestryLayers, initializedLayer)
|
ancestryLayers = append(ancestryLayers, initializedLayer)
|
||||||
|
|
||||||
// Precondition: namespaces and features contain the result from union
|
// Precondition: namespaces and features contain the result from union
|
||||||
|
@ -40,7 +40,7 @@ import (
|
|||||||
type mockDatastore struct {
|
type mockDatastore struct {
|
||||||
database.MockDatastore
|
database.MockDatastore
|
||||||
|
|
||||||
layers map[string]database.LayerWithContent
|
layers map[string]database.Layer
|
||||||
ancestry map[string]database.Ancestry
|
ancestry map[string]database.Ancestry
|
||||||
namespaces map[string]database.Namespace
|
namespaces map[string]database.Namespace
|
||||||
features map[string]database.Feature
|
features map[string]database.Feature
|
||||||
@ -56,14 +56,14 @@ type mockSession struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func copyDatastore(md *mockDatastore) mockDatastore {
|
func copyDatastore(md *mockDatastore) mockDatastore {
|
||||||
layers := map[string]database.LayerWithContent{}
|
layers := map[string]database.Layer{}
|
||||||
for k, l := range md.layers {
|
for k, l := range md.layers {
|
||||||
features := append([]database.Feature(nil), l.Features...)
|
features := append([]database.Feature(nil), l.Features...)
|
||||||
namespaces := append([]database.Namespace(nil), l.Namespaces...)
|
namespaces := append([]database.Namespace(nil), l.Namespaces...)
|
||||||
listers := append([]string(nil), l.ProcessedBy.Listers...)
|
listers := append([]string(nil), l.ProcessedBy.Listers...)
|
||||||
detectors := append([]string(nil), l.ProcessedBy.Detectors...)
|
detectors := append([]string(nil), l.ProcessedBy.Detectors...)
|
||||||
layers[k] = database.LayerWithContent{
|
layers[k] = database.Layer{
|
||||||
Layer: database.Layer{
|
LayerMetadata: database.LayerMetadata{
|
||||||
Hash: l.Hash,
|
Hash: l.Hash,
|
||||||
ProcessedBy: database.Processors{
|
ProcessedBy: database.Processors{
|
||||||
Listers: listers,
|
Listers: listers,
|
||||||
@ -78,23 +78,23 @@ func copyDatastore(md *mockDatastore) mockDatastore {
|
|||||||
ancestry := map[string]database.Ancestry{}
|
ancestry := map[string]database.Ancestry{}
|
||||||
for k, a := range md.ancestry {
|
for k, a := range md.ancestry {
|
||||||
ancestryLayers := []database.AncestryLayer{}
|
ancestryLayers := []database.AncestryLayer{}
|
||||||
layers := []database.Layer{}
|
layers := []database.LayerMetadata{}
|
||||||
|
|
||||||
for _, layer := range a.Layers {
|
for _, layer := range a.Layers {
|
||||||
layers = append(layers, database.Layer{
|
layers = append(layers, database.LayerMetadata{
|
||||||
Hash: layer.Hash,
|
Hash: layer.Hash,
|
||||||
ProcessedBy: database.Processors{
|
ProcessedBy: database.Processors{
|
||||||
Detectors: append([]string(nil), layer.Layer.ProcessedBy.Detectors...),
|
Detectors: append([]string(nil), layer.LayerMetadata.ProcessedBy.Detectors...),
|
||||||
Listers: append([]string(nil), layer.Layer.ProcessedBy.Listers...),
|
Listers: append([]string(nil), layer.LayerMetadata.ProcessedBy.Listers...),
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
ancestryLayers = append(ancestryLayers, database.AncestryLayer{
|
ancestryLayers = append(ancestryLayers, database.AncestryLayer{
|
||||||
Layer: database.Layer{
|
LayerMetadata: database.LayerMetadata{
|
||||||
Hash: layer.Hash,
|
Hash: layer.Hash,
|
||||||
ProcessedBy: database.Processors{
|
ProcessedBy: database.Processors{
|
||||||
Detectors: append([]string(nil), layer.Layer.ProcessedBy.Detectors...),
|
Detectors: append([]string(nil), layer.LayerMetadata.ProcessedBy.Detectors...),
|
||||||
Listers: append([]string(nil), layer.Layer.ProcessedBy.Listers...),
|
Listers: append([]string(nil), layer.LayerMetadata.ProcessedBy.Listers...),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
DetectedFeatures: append([]database.NamespacedFeature(nil), layer.DetectedFeatures...),
|
DetectedFeatures: append([]database.NamespacedFeature(nil), layer.DetectedFeatures...),
|
||||||
@ -137,7 +137,7 @@ func copyDatastore(md *mockDatastore) mockDatastore {
|
|||||||
func newMockDatastore() *mockDatastore {
|
func newMockDatastore() *mockDatastore {
|
||||||
errSessionDone := errors.New("Session Done")
|
errSessionDone := errors.New("Session Done")
|
||||||
md := &mockDatastore{
|
md := &mockDatastore{
|
||||||
layers: make(map[string]database.LayerWithContent),
|
layers: make(map[string]database.Layer),
|
||||||
ancestry: make(map[string]database.Ancestry),
|
ancestry: make(map[string]database.Ancestry),
|
||||||
namespaces: make(map[string]database.Namespace),
|
namespaces: make(map[string]database.Namespace),
|
||||||
features: make(map[string]database.Feature),
|
features: make(map[string]database.Feature),
|
||||||
@ -186,27 +186,9 @@ func newMockDatastore() *mockDatastore {
|
|||||||
return database.Layer{}, false, errSessionDone
|
return database.Layer{}, false, errSessionDone
|
||||||
}
|
}
|
||||||
layer, ok := session.copy.layers[name]
|
layer, ok := session.copy.layers[name]
|
||||||
return layer.Layer, ok, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
session.FctFindLayerWithContent = func(name string) (database.LayerWithContent, bool, error) {
|
|
||||||
if session.terminated {
|
|
||||||
return database.LayerWithContent{}, false, errSessionDone
|
|
||||||
}
|
|
||||||
layer, ok := session.copy.layers[name]
|
|
||||||
return layer, ok, nil
|
return layer, ok, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
session.FctPersistLayer = func(hash string) error {
|
|
||||||
if session.terminated {
|
|
||||||
return errSessionDone
|
|
||||||
}
|
|
||||||
if _, ok := session.copy.layers[hash]; !ok {
|
|
||||||
session.copy.layers[hash] = database.LayerWithContent{Layer: database.Layer{Hash: hash}}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
session.FctPersistNamespaces = func(ns []database.Namespace) error {
|
session.FctPersistNamespaces = func(ns []database.Namespace) error {
|
||||||
if session.terminated {
|
if session.terminated {
|
||||||
return errSessionDone
|
return errSessionDone
|
||||||
@ -234,15 +216,20 @@ func newMockDatastore() *mockDatastore {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
session.FctPersistLayerContent = func(hash string, namespaces []database.Namespace, features []database.Feature, processedBy database.Processors) error {
|
session.FctPersistLayer = func(hash string, namespaces []database.Namespace, features []database.Feature, processedBy database.Processors) error {
|
||||||
if session.terminated {
|
if session.terminated {
|
||||||
return errSessionDone
|
return errSessionDone
|
||||||
}
|
}
|
||||||
|
|
||||||
// update the layer
|
// update the layer
|
||||||
|
_, ok := session.copy.layers[hash]
|
||||||
|
if !ok {
|
||||||
|
session.copy.layers[hash] = database.Layer{}
|
||||||
|
}
|
||||||
|
|
||||||
layer, ok := session.copy.layers[hash]
|
layer, ok := session.copy.layers[hash]
|
||||||
if !ok {
|
if !ok {
|
||||||
return errors.New("layer not found")
|
return errors.New("Failed to insert layer")
|
||||||
}
|
}
|
||||||
|
|
||||||
layerFeatures := map[string]database.Feature{}
|
layerFeatures := map[string]database.Feature{}
|
||||||
@ -381,7 +368,7 @@ func TestProcessAncestryWithDistUpgrade(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.Equal(t, []database.Layer{
|
assert.Equal(t, []database.LayerMetadata{
|
||||||
{Hash: "blank"},
|
{Hash: "blank"},
|
||||||
{Hash: "wheezy"},
|
{Hash: "wheezy"},
|
||||||
{Hash: "jessie"},
|
{Hash: "jessie"},
|
||||||
@ -571,32 +558,32 @@ func TestComputeAncestryFeatures(t *testing.T) {
|
|||||||
// Suppose Clair is watching two files for namespaces one containing ns1
|
// Suppose Clair is watching two files for namespaces one containing ns1
|
||||||
// changes e.g. os-release and the other one containing ns2 changes e.g.
|
// changes e.g. os-release and the other one containing ns2 changes e.g.
|
||||||
// node.
|
// node.
|
||||||
blank := database.LayerWithContent{Layer: database.Layer{Hash: "blank"}}
|
blank := database.Layer{LayerMetadata: database.LayerMetadata{Hash: "blank"}}
|
||||||
initNS1a := database.LayerWithContent{
|
initNS1a := database.Layer{
|
||||||
Layer: database.Layer{Hash: "init ns1a"},
|
LayerMetadata: database.LayerMetadata{Hash: "init ns1a"},
|
||||||
Namespaces: []database.Namespace{ns1a},
|
Namespaces: []database.Namespace{ns1a},
|
||||||
Features: []database.Feature{f1, f2},
|
Features: []database.Feature{f1, f2},
|
||||||
}
|
}
|
||||||
|
|
||||||
upgradeNS2b := database.LayerWithContent{
|
upgradeNS2b := database.Layer{
|
||||||
Layer: database.Layer{Hash: "upgrade ns2b"},
|
LayerMetadata: database.LayerMetadata{Hash: "upgrade ns2b"},
|
||||||
Namespaces: []database.Namespace{ns2b},
|
Namespaces: []database.Namespace{ns2b},
|
||||||
}
|
}
|
||||||
|
|
||||||
upgradeNS1b := database.LayerWithContent{
|
upgradeNS1b := database.Layer{
|
||||||
Layer: database.Layer{Hash: "upgrade ns1b"},
|
LayerMetadata: database.LayerMetadata{Hash: "upgrade ns1b"},
|
||||||
Namespaces: []database.Namespace{ns1b},
|
Namespaces: []database.Namespace{ns1b},
|
||||||
Features: []database.Feature{f1, f2},
|
Features: []database.Feature{f1, f2},
|
||||||
}
|
}
|
||||||
|
|
||||||
initNS2a := database.LayerWithContent{
|
initNS2a := database.Layer{
|
||||||
Layer: database.Layer{Hash: "init ns2a"},
|
LayerMetadata: database.LayerMetadata{Hash: "init ns2a"},
|
||||||
Namespaces: []database.Namespace{ns2a},
|
Namespaces: []database.Namespace{ns2a},
|
||||||
Features: []database.Feature{f3, f4},
|
Features: []database.Feature{f3, f4},
|
||||||
}
|
}
|
||||||
|
|
||||||
removeF2 := database.LayerWithContent{
|
removeF2 := database.Layer{
|
||||||
Layer: database.Layer{Hash: "remove f2"},
|
LayerMetadata: database.LayerMetadata{Hash: "remove f2"},
|
||||||
Features: []database.Feature{f1},
|
Features: []database.Feature{f1},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -609,7 +596,7 @@ func TestComputeAncestryFeatures(t *testing.T) {
|
|||||||
// -> f1 (remove f2)
|
// -> f1 (remove f2)
|
||||||
// -> blank (empty)
|
// -> blank (empty)
|
||||||
|
|
||||||
layers := []database.LayerWithContent{
|
layers := []database.Layer{
|
||||||
blank,
|
blank,
|
||||||
initNS1a,
|
initNS1a,
|
||||||
removeF2,
|
removeF2,
|
||||||
|
Loading…
Reference in New Issue
Block a user