Merge pull request #619 from KeyboardNerd/sidac/rm_layer
database: Remove LayerWithContent from interface
This commit is contained in:
commit
f98ff58afd
@ -123,7 +123,7 @@ func VulnerabilityWithFixedInFromDatabaseModel(dbVuln database.VulnerabilityWith
|
||||
}
|
||||
|
||||
// LayerFromDatabaseModel converts database layer to api layer.
|
||||
func LayerFromDatabaseModel(dbLayer database.Layer) *Layer {
|
||||
func LayerFromDatabaseModel(dbLayer database.LayerMetadata) *Layer {
|
||||
layer := Layer{Hash: dbLayer.Hash}
|
||||
return &layer
|
||||
}
|
||||
|
@ -120,22 +120,16 @@ type Session interface {
|
||||
// PersistNamespaces inserts a set of namespaces if not in the database.
|
||||
PersistNamespaces([]Namespace) error
|
||||
|
||||
// PersistLayer creates a layer using the blob Sum hash.
|
||||
PersistLayer(hash string) error
|
||||
|
||||
// PersistLayerContent persists a layer's content in the database. The given
|
||||
// PersistLayer persists a layer's content in the database. The given
|
||||
// namespaces and features can be partial content of this layer.
|
||||
//
|
||||
// The layer, namespaces and features are expected to be already existing
|
||||
// in the database.
|
||||
PersistLayerContent(hash string, namespaces []Namespace, features []Feature, processedBy Processors) error
|
||||
PersistLayer(hash string, namespaces []Namespace, features []Feature, processedBy Processors) error
|
||||
|
||||
// FindLayer retrieves the metadata of a layer.
|
||||
FindLayer(hash string) (layer Layer, found bool, err error)
|
||||
|
||||
// FindLayerWithContent returns a layer with all detected features and
|
||||
// FindLayer returns a layer with all detected features and
|
||||
// namespaces.
|
||||
FindLayerWithContent(hash string) (layer LayerWithContent, found bool, err error)
|
||||
FindLayer(hash string) (layer Layer, found bool, err error)
|
||||
|
||||
// InsertVulnerabilities inserts a set of UNIQUE vulnerabilities with
|
||||
// affected features into database, assuming that all vulnerabilities
|
||||
@ -167,9 +161,9 @@ type Session interface {
|
||||
// always considered first page.
|
||||
FindVulnerabilityNotification(name string, limit int, oldVulnerabilityPage pagination.Token, newVulnerabilityPage pagination.Token) (noti VulnerabilityNotificationWithVulnerable, found bool, err error)
|
||||
|
||||
// MarkNotificationNotified marks a Notification as notified now, assuming
|
||||
// MarkNotificationAsRead marks a Notification as notified now, assuming
|
||||
// the requested notification is in the database.
|
||||
MarkNotificationNotified(name string) error
|
||||
MarkNotificationAsRead(name string) error
|
||||
|
||||
// DeleteNotification removes a Notification in the database.
|
||||
DeleteNotification(name string) error
|
||||
|
@ -32,10 +32,8 @@ type MockSession struct {
|
||||
FctPersistFeatures func([]Feature) error
|
||||
FctPersistNamespacedFeatures func([]NamespacedFeature) error
|
||||
FctCacheAffectedNamespacedFeatures func([]NamespacedFeature) error
|
||||
FctPersistLayer func(hash string) error
|
||||
FctPersistLayerContent func(hash string, namespaces []Namespace, features []Feature, processedBy Processors) error
|
||||
FctPersistLayer func(hash string, namespaces []Namespace, features []Feature, processedBy Processors) error
|
||||
FctFindLayer func(name string) (Layer, bool, error)
|
||||
FctFindLayerWithContent func(name string) (LayerWithContent, bool, error)
|
||||
FctInsertVulnerabilities func([]VulnerabilityWithAffected) error
|
||||
FctFindVulnerabilities func([]VulnerabilityID) ([]NullableVulnerability, error)
|
||||
FctDeleteVulnerabilities func([]VulnerabilityID) error
|
||||
@ -43,7 +41,7 @@ type MockSession struct {
|
||||
FctFindNewNotification func(lastNotified time.Time) (NotificationHook, bool, error)
|
||||
FctFindVulnerabilityNotification func(name string, limit int, oldPage pagination.Token, newPage pagination.Token) (
|
||||
vuln VulnerabilityNotificationWithVulnerable, ok bool, err error)
|
||||
FctMarkNotificationNotified func(name string) error
|
||||
FctMarkNotificationAsRead func(name string) error
|
||||
FctDeleteNotification func(name string) error
|
||||
FctUpdateKeyValue func(key, value string) error
|
||||
FctFindKeyValue func(key string) (string, bool, error)
|
||||
@ -115,16 +113,9 @@ func (ms *MockSession) CacheAffectedNamespacedFeatures(namespacedFeatures []Name
|
||||
panic("required mock function not implemented")
|
||||
}
|
||||
|
||||
func (ms *MockSession) PersistLayer(layer string) error {
|
||||
func (ms *MockSession) PersistLayer(hash string, namespaces []Namespace, features []Feature, processedBy Processors) error {
|
||||
if ms.FctPersistLayer != nil {
|
||||
return ms.FctPersistLayer(layer)
|
||||
}
|
||||
panic("required mock function not implemented")
|
||||
}
|
||||
|
||||
func (ms *MockSession) PersistLayerContent(hash string, namespaces []Namespace, features []Feature, processedBy Processors) error {
|
||||
if ms.FctPersistLayerContent != nil {
|
||||
return ms.FctPersistLayerContent(hash, namespaces, features, processedBy)
|
||||
return ms.FctPersistLayer(hash, namespaces, features, processedBy)
|
||||
}
|
||||
panic("required mock function not implemented")
|
||||
}
|
||||
@ -136,13 +127,6 @@ func (ms *MockSession) FindLayer(name string) (Layer, bool, error) {
|
||||
panic("required mock function not implemented")
|
||||
}
|
||||
|
||||
func (ms *MockSession) FindLayerWithContent(name string) (LayerWithContent, bool, error) {
|
||||
if ms.FctFindLayerWithContent != nil {
|
||||
return ms.FctFindLayerWithContent(name)
|
||||
}
|
||||
panic("required mock function not implemented")
|
||||
}
|
||||
|
||||
func (ms *MockSession) InsertVulnerabilities(vulnerabilities []VulnerabilityWithAffected) error {
|
||||
if ms.FctInsertVulnerabilities != nil {
|
||||
return ms.FctInsertVulnerabilities(vulnerabilities)
|
||||
@ -186,9 +170,9 @@ func (ms *MockSession) FindVulnerabilityNotification(name string, limit int, old
|
||||
panic("required mock function not implemented")
|
||||
}
|
||||
|
||||
func (ms *MockSession) MarkNotificationNotified(name string) error {
|
||||
if ms.FctMarkNotificationNotified != nil {
|
||||
return ms.FctMarkNotificationNotified(name)
|
||||
func (ms *MockSession) MarkNotificationAsRead(name string) error {
|
||||
if ms.FctMarkNotificationAsRead != nil {
|
||||
return ms.FctMarkNotificationAsRead(name)
|
||||
}
|
||||
panic("required mock function not implemented")
|
||||
}
|
||||
|
@ -41,25 +41,25 @@ type Ancestry struct {
|
||||
|
||||
// AncestryLayer is a layer with all detected namespaced features.
|
||||
type AncestryLayer struct {
|
||||
Layer
|
||||
LayerMetadata
|
||||
|
||||
// DetectedFeatures are the features introduced by this layer when it was
|
||||
// processed.
|
||||
DetectedFeatures []NamespacedFeature
|
||||
}
|
||||
|
||||
// Layer contains the metadata of a layer.
|
||||
type Layer struct {
|
||||
// LayerMetadata contains the metadata of a layer.
|
||||
type LayerMetadata struct {
|
||||
// Hash is content hash of the layer.
|
||||
Hash string
|
||||
// ProcessedBy contains the processors that processed this layer.
|
||||
ProcessedBy Processors
|
||||
}
|
||||
|
||||
// LayerWithContent is a layer with its detected namespaces and features by
|
||||
// Layer is a layer with its detected namespaces and features by
|
||||
// ProcessedBy.
|
||||
type LayerWithContent struct {
|
||||
Layer
|
||||
type Layer struct {
|
||||
LayerMetadata
|
||||
|
||||
Namespaces []Namespace
|
||||
Features []Feature
|
||||
|
@ -170,7 +170,7 @@ func (tx *pgSession) findAncestryLayers(id int64) ([]database.AncestryLayer, err
|
||||
}
|
||||
|
||||
if !index.Valid || !id.Valid {
|
||||
return nil, commonerr.ErrNotFound
|
||||
panic("null ancestry ID or ancestry index violates database constraints")
|
||||
}
|
||||
|
||||
if _, ok := layers[index.Int64]; ok {
|
||||
|
@ -30,7 +30,7 @@ func TestUpsertAncestry(t *testing.T) {
|
||||
Name: "a1",
|
||||
Layers: []database.AncestryLayer{
|
||||
{
|
||||
Layer: database.Layer{
|
||||
LayerMetadata: database.LayerMetadata{
|
||||
Hash: "layer-N",
|
||||
},
|
||||
},
|
||||
@ -43,7 +43,7 @@ func TestUpsertAncestry(t *testing.T) {
|
||||
Name: "a",
|
||||
Layers: []database.AncestryLayer{
|
||||
{
|
||||
Layer: database.Layer{
|
||||
LayerMetadata: database.LayerMetadata{
|
||||
Hash: "layer-0",
|
||||
},
|
||||
},
|
||||
@ -54,7 +54,7 @@ func TestUpsertAncestry(t *testing.T) {
|
||||
Name: "a",
|
||||
Layers: []database.AncestryLayer{
|
||||
{
|
||||
Layer: database.Layer{
|
||||
LayerMetadata: database.LayerMetadata{
|
||||
Hash: "layer-1",
|
||||
},
|
||||
},
|
||||
@ -137,7 +137,7 @@ func assertAncestryEqual(t *testing.T, expected database.Ancestry, actual databa
|
||||
}
|
||||
|
||||
func assertAncestryLayerEqual(t *testing.T, expected database.AncestryLayer, actual database.AncestryLayer) bool {
|
||||
return assertLayerEqual(t, expected.Layer, actual.Layer) &&
|
||||
return assertLayerEqual(t, expected.LayerMetadata, actual.LayerMetadata) &&
|
||||
assertNamespacedFeatureEqual(t, expected.DetectedFeatures, actual.DetectedFeatures)
|
||||
}
|
||||
|
||||
@ -159,7 +159,7 @@ func TestFindAncestry(t *testing.T) {
|
||||
},
|
||||
Layers: []database.AncestryLayer{
|
||||
{
|
||||
Layer: database.Layer{
|
||||
LayerMetadata: database.LayerMetadata{
|
||||
Hash: "layer-0",
|
||||
},
|
||||
DetectedFeatures: []database.NamespacedFeature{
|
||||
@ -188,17 +188,17 @@ func TestFindAncestry(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
Layer: database.Layer{
|
||||
LayerMetadata: database.LayerMetadata{
|
||||
Hash: "layer-1",
|
||||
},
|
||||
},
|
||||
{
|
||||
Layer: database.Layer{
|
||||
LayerMetadata: database.LayerMetadata{
|
||||
Hash: "layer-2",
|
||||
},
|
||||
},
|
||||
{
|
||||
Layer: database.Layer{
|
||||
LayerMetadata: database.LayerMetadata{
|
||||
Hash: "layer-3b",
|
||||
},
|
||||
},
|
||||
|
@ -23,19 +23,14 @@ import (
|
||||
)
|
||||
|
||||
func (tx *pgSession) FindLayer(hash string) (database.Layer, bool, error) {
|
||||
layer, _, ok, err := tx.findLayer(hash)
|
||||
return layer, ok, err
|
||||
}
|
||||
|
||||
func (tx *pgSession) FindLayerWithContent(hash string) (database.LayerWithContent, bool, error) {
|
||||
var (
|
||||
layer database.LayerWithContent
|
||||
layer database.Layer
|
||||
layerID int64
|
||||
ok bool
|
||||
err error
|
||||
)
|
||||
|
||||
layer.Layer, layerID, ok, err = tx.findLayer(hash)
|
||||
layer.LayerMetadata, layerID, ok, err = tx.findLayer(hash)
|
||||
if err != nil {
|
||||
return layer, false, err
|
||||
}
|
||||
@ -49,46 +44,53 @@ func (tx *pgSession) FindLayerWithContent(hash string) (database.LayerWithConten
|
||||
return layer, true, nil
|
||||
}
|
||||
|
||||
func (tx *pgSession) PersistLayer(hash string) error {
|
||||
func (tx *pgSession) persistLayer(hash string) (int64, error) {
|
||||
if hash == "" {
|
||||
return commonerr.NewBadRequestError("Empty Layer Hash is not allowed")
|
||||
return -1, commonerr.NewBadRequestError("Empty Layer Hash is not allowed")
|
||||
}
|
||||
|
||||
_, err := tx.Exec(queryPersistLayer(1), hash)
|
||||
if err != nil {
|
||||
return handleError("queryPersistLayer", err)
|
||||
id := sql.NullInt64{}
|
||||
if err := tx.QueryRow(soiLayer, hash).Scan(&id); err != nil {
|
||||
return -1, handleError("queryPersistLayer", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
if !id.Valid {
|
||||
panic("null layer.id violates database constraint")
|
||||
}
|
||||
|
||||
return id.Int64, nil
|
||||
}
|
||||
|
||||
// PersistLayerContent relates layer identified by hash with namespaces,
|
||||
// PersistLayer relates layer identified by hash with namespaces,
|
||||
// features and processors provided. If the layer, namespaces, features are not
|
||||
// in database, the function returns an error.
|
||||
func (tx *pgSession) PersistLayerContent(hash string, namespaces []database.Namespace, features []database.Feature, processedBy database.Processors) error {
|
||||
func (tx *pgSession) PersistLayer(hash string, namespaces []database.Namespace, features []database.Feature, processedBy database.Processors) error {
|
||||
if hash == "" {
|
||||
return commonerr.NewBadRequestError("Empty layer hash is not allowed")
|
||||
}
|
||||
|
||||
var layerID int64
|
||||
err := tx.QueryRow(searchLayer, hash).Scan(&layerID)
|
||||
if err != nil {
|
||||
var (
|
||||
err error
|
||||
id int64
|
||||
)
|
||||
|
||||
if id, err = tx.persistLayer(hash); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = tx.persistLayerNamespace(layerID, namespaces); err != nil {
|
||||
if err = tx.persistLayerNamespace(id, namespaces); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = tx.persistLayerFeatures(layerID, features); err != nil {
|
||||
if err = tx.persistLayerFeatures(id, features); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = tx.persistLayerDetectors(layerID, processedBy.Detectors); err != nil {
|
||||
if err = tx.persistLayerDetectors(id, processedBy.Detectors); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = tx.persistLayerListers(layerID, processedBy.Listers); err != nil {
|
||||
if err = tx.persistLayerListers(id, processedBy.Listers); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -275,10 +277,10 @@ func (tx *pgSession) findLayerFeatures(layerID int64) ([]database.Feature, error
|
||||
return features, nil
|
||||
}
|
||||
|
||||
func (tx *pgSession) findLayer(hash string) (database.Layer, int64, bool, error) {
|
||||
func (tx *pgSession) findLayer(hash string) (database.LayerMetadata, int64, bool, error) {
|
||||
var (
|
||||
layerID int64
|
||||
layer = database.Layer{Hash: hash, ProcessedBy: database.Processors{}}
|
||||
layer = database.LayerMetadata{Hash: hash, ProcessedBy: database.Processors{}}
|
||||
)
|
||||
|
||||
if hash == "" {
|
||||
|
@ -26,65 +26,72 @@ func TestPersistLayer(t *testing.T) {
|
||||
datastore, tx := openSessionForTest(t, "PersistLayer", false)
|
||||
defer closeTest(t, datastore, tx)
|
||||
|
||||
l1 := ""
|
||||
l2 := "HESOYAM"
|
||||
|
||||
// invalid
|
||||
assert.NotNil(t, tx.PersistLayer(l1))
|
||||
// valid
|
||||
assert.Nil(t, tx.PersistLayer(l2))
|
||||
// duplicated
|
||||
assert.Nil(t, tx.PersistLayer(l2))
|
||||
}
|
||||
assert.NotNil(t, tx.PersistLayer("", nil, nil, database.Processors{}))
|
||||
// insert namespaces + features to
|
||||
namespaces := []database.Namespace{
|
||||
{
|
||||
Name: "sushi shop",
|
||||
VersionFormat: "apk",
|
||||
},
|
||||
}
|
||||
|
||||
func TestPersistLayerProcessors(t *testing.T) {
|
||||
datastore, tx := openSessionForTest(t, "PersistLayerProcessors", true)
|
||||
defer closeTest(t, datastore, tx)
|
||||
features := []database.Feature{
|
||||
{
|
||||
Name: "blue fin sashimi",
|
||||
Version: "v1.0",
|
||||
VersionFormat: "apk",
|
||||
},
|
||||
}
|
||||
|
||||
// invalid
|
||||
assert.NotNil(t, tx.PersistLayerContent("hash", []database.Namespace{}, []database.Feature{}, database.Processors{}))
|
||||
// valid
|
||||
assert.Nil(t, tx.PersistLayerContent("layer-4", []database.Namespace{}, []database.Feature{}, database.Processors{Detectors: []string{"new detector!"}}))
|
||||
processors := database.Processors{
|
||||
Listers: []string{"release"},
|
||||
Detectors: []string{"apk"},
|
||||
}
|
||||
|
||||
assert.Nil(t, tx.PersistNamespaces(namespaces))
|
||||
assert.Nil(t, tx.PersistFeatures(features))
|
||||
|
||||
// Valid
|
||||
assert.Nil(t, tx.PersistLayer("RANDOM_FOREST", namespaces, features, processors))
|
||||
|
||||
nonExistingFeature := []database.Feature{{Name: "lobster sushi", Version: "v0.1", VersionFormat: "apk"}}
|
||||
// Invalid:
|
||||
assert.NotNil(t, tx.PersistLayer("RANDOM_FOREST", namespaces, nonExistingFeature, processors))
|
||||
|
||||
assert.Nil(t, tx.PersistFeatures(nonExistingFeature))
|
||||
// Update the layer
|
||||
assert.Nil(t, tx.PersistLayer("RANDOM_FOREST", namespaces, nonExistingFeature, processors))
|
||||
|
||||
// confirm update
|
||||
layer, ok, err := tx.FindLayer("RANDOM_FOREST")
|
||||
assert.Nil(t, err)
|
||||
assert.True(t, ok)
|
||||
|
||||
expectedLayer := database.Layer{
|
||||
LayerMetadata: database.LayerMetadata{
|
||||
Hash: "RANDOM_FOREST",
|
||||
ProcessedBy: processors,
|
||||
},
|
||||
Features: append(features, nonExistingFeature...),
|
||||
Namespaces: namespaces,
|
||||
}
|
||||
|
||||
assertLayerWithContentEqual(t, expectedLayer, layer)
|
||||
}
|
||||
|
||||
func TestFindLayer(t *testing.T) {
|
||||
datastore, tx := openSessionForTest(t, "FindLayer", true)
|
||||
defer closeTest(t, datastore, tx)
|
||||
|
||||
expected := database.Layer{
|
||||
Hash: "layer-4",
|
||||
ProcessedBy: database.Processors{
|
||||
Detectors: []string{"os-release", "apt-sources"},
|
||||
Listers: []string{"dpkg", "rpm"},
|
||||
},
|
||||
}
|
||||
|
||||
// invalid
|
||||
_, _, err := tx.FindLayer("")
|
||||
assert.NotNil(t, err)
|
||||
_, ok, err := tx.FindLayer("layer-non")
|
||||
assert.Nil(t, err)
|
||||
assert.False(t, ok)
|
||||
|
||||
// valid
|
||||
layer, ok2, err := tx.FindLayer("layer-4")
|
||||
if assert.Nil(t, err) && assert.True(t, ok2) {
|
||||
assertLayerEqual(t, expected, layer)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindLayerWithContent(t *testing.T) {
|
||||
datastore, tx := openSessionForTest(t, "FindLayerWithContent", true)
|
||||
defer closeTest(t, datastore, tx)
|
||||
|
||||
_, _, err := tx.FindLayerWithContent("")
|
||||
assert.NotNil(t, err)
|
||||
_, ok, err := tx.FindLayerWithContent("layer-non")
|
||||
assert.Nil(t, err)
|
||||
assert.False(t, ok)
|
||||
|
||||
expectedL := database.LayerWithContent{
|
||||
Layer: database.Layer{
|
||||
expectedL := database.Layer{
|
||||
LayerMetadata: database.LayerMetadata{
|
||||
Hash: "layer-4",
|
||||
ProcessedBy: database.Processors{
|
||||
Detectors: []string{"os-release", "apt-sources"},
|
||||
@ -101,19 +108,19 @@ func TestFindLayerWithContent(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
layer, ok2, err := tx.FindLayerWithContent("layer-4")
|
||||
layer, ok2, err := tx.FindLayer("layer-4")
|
||||
if assert.Nil(t, err) && assert.True(t, ok2) {
|
||||
assertLayerWithContentEqual(t, expectedL, layer)
|
||||
}
|
||||
}
|
||||
|
||||
func assertLayerWithContentEqual(t *testing.T, expected database.LayerWithContent, actual database.LayerWithContent) bool {
|
||||
return assertLayerEqual(t, expected.Layer, actual.Layer) &&
|
||||
func assertLayerWithContentEqual(t *testing.T, expected database.Layer, actual database.Layer) bool {
|
||||
return assertLayerEqual(t, expected.LayerMetadata, actual.LayerMetadata) &&
|
||||
assertFeaturesEqual(t, expected.Features, actual.Features) &&
|
||||
assertNamespacesEqual(t, expected.Namespaces, actual.Namespaces)
|
||||
}
|
||||
|
||||
func assertLayerEqual(t *testing.T, expected database.Layer, actual database.Layer) bool {
|
||||
func assertLayerEqual(t *testing.T, expected database.LayerMetadata, actual database.LayerMetadata) bool {
|
||||
return assertProcessorsEqual(t, expected.ProcessedBy, actual.ProcessedBy) &&
|
||||
assert.Equal(t, expected.Hash, actual.Hash)
|
||||
}
|
||||
|
@ -289,23 +289,23 @@ func (tx *pgSession) FindVulnerabilityNotification(name string, limit int, oldPa
|
||||
return noti, true, nil
|
||||
}
|
||||
|
||||
func (tx *pgSession) MarkNotificationNotified(name string) error {
|
||||
func (tx *pgSession) MarkNotificationAsRead(name string) error {
|
||||
if name == "" {
|
||||
return commonerr.NewBadRequestError("Empty notification name is not allowed")
|
||||
}
|
||||
|
||||
r, err := tx.Exec(updatedNotificationNotified, name)
|
||||
r, err := tx.Exec(updatedNotificationAsRead, name)
|
||||
if err != nil {
|
||||
return handleError("updatedNotificationNotified", err)
|
||||
return handleError("updatedNotificationAsRead", err)
|
||||
}
|
||||
|
||||
affected, err := r.RowsAffected()
|
||||
if err != nil {
|
||||
return handleError("updatedNotificationNotified", err)
|
||||
return handleError("updatedNotificationAsRead", err)
|
||||
}
|
||||
|
||||
if affected <= 0 {
|
||||
return handleError("updatedNotificationNotified", errNotificationNotFound)
|
||||
return handleError("updatedNotificationAsRead", errNotificationNotFound)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -200,7 +200,7 @@ func TestFindNewNotification(t *testing.T) {
|
||||
}
|
||||
|
||||
// can't find the notified
|
||||
assert.Nil(t, tx.MarkNotificationNotified("test"))
|
||||
assert.Nil(t, tx.MarkNotificationAsRead("test"))
|
||||
// if the notified time is before
|
||||
noti, ok, err = tx.FindNewNotification(time.Now().Add(-time.Duration(10 * time.Second)))
|
||||
assert.Nil(t, err)
|
||||
@ -225,16 +225,16 @@ func TestFindNewNotification(t *testing.T) {
|
||||
assert.False(t, ok)
|
||||
}
|
||||
|
||||
func TestMarkNotificationNotified(t *testing.T) {
|
||||
datastore, tx := openSessionForTest(t, "MarkNotificationNotified", true)
|
||||
func TestMarkNotificationAsRead(t *testing.T) {
|
||||
datastore, tx := openSessionForTest(t, "MarkNotificationAsRead", true)
|
||||
defer closeTest(t, datastore, tx)
|
||||
|
||||
// invalid case: notification doesn't exist
|
||||
assert.NotNil(t, tx.MarkNotificationNotified("non-existing"))
|
||||
assert.NotNil(t, tx.MarkNotificationAsRead("non-existing"))
|
||||
// valid case
|
||||
assert.Nil(t, tx.MarkNotificationNotified("test"))
|
||||
assert.Nil(t, tx.MarkNotificationAsRead("test"))
|
||||
// valid case
|
||||
assert.Nil(t, tx.MarkNotificationNotified("test"))
|
||||
assert.Nil(t, tx.MarkNotificationAsRead("test"))
|
||||
}
|
||||
|
||||
func TestDeleteNotification(t *testing.T) {
|
||||
|
@ -73,7 +73,16 @@ const (
|
||||
AND v.deleted_at IS NULL`
|
||||
|
||||
// layer.go
|
||||
searchLayerIDs = `SELECT id, hash FROM layer WHERE hash = ANY($1);`
|
||||
soiLayer = `
|
||||
WITH new_layer AS (
|
||||
INSERT INTO layer (hash)
|
||||
SELECT CAST ($1 AS VARCHAR)
|
||||
WHERE NOT EXISTS (SELECT id FROM layer WHERE hash = $1)
|
||||
RETURNING id
|
||||
)
|
||||
SELECT id FROM new_Layer
|
||||
UNION
|
||||
SELECT id FROM layer WHERE hash = $1`
|
||||
|
||||
searchLayerFeatures = `
|
||||
SELECT feature.Name, feature.Version, feature.version_format
|
||||
@ -168,7 +177,7 @@ const (
|
||||
INSERT INTO Vulnerability_Notification(name, created_at, old_vulnerability_id, new_vulnerability_id)
|
||||
VALUES ($1, $2, $3, $4)`
|
||||
|
||||
updatedNotificationNotified = `
|
||||
updatedNotificationAsRead = `
|
||||
UPDATE Vulnerability_Notification
|
||||
SET notified_at = CURRENT_TIMESTAMP
|
||||
WHERE name = $1`
|
||||
|
@ -16,7 +16,6 @@ package pgsql
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
@ -220,17 +219,6 @@ func (tx *pgSession) insertVulnerabilities(vulnerabilities []database.Vulnerabil
|
||||
return vulnIDs, nil
|
||||
}
|
||||
|
||||
// castMetadata marshals the given database.MetadataMap and unmarshals it again to make sure that
|
||||
// everything has the interface{} type.
|
||||
// It is required when comparing crafted MetadataMap against MetadataMap that we get from the
|
||||
// database.
|
||||
func castMetadata(m database.MetadataMap) database.MetadataMap {
|
||||
c := make(database.MetadataMap)
|
||||
j, _ := json.Marshal(m)
|
||||
json.Unmarshal(j, &c)
|
||||
return c
|
||||
}
|
||||
|
||||
func (tx *pgSession) lockFeatureVulnerabilityCache() error {
|
||||
_, err := tx.Exec(lockVulnerabilityAffects)
|
||||
if err != nil {
|
||||
|
@ -93,7 +93,7 @@ func RunNotifier(config *notification.Config, datastore database.Datastore, stop
|
||||
go func() {
|
||||
success, interrupted := handleTask(*notification, stopper, config.Attempts)
|
||||
if success {
|
||||
err := markNotificationNotified(datastore, notification.Name)
|
||||
err := markNotificationAsRead(datastore, notification.Name)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to mark notification notified")
|
||||
}
|
||||
@ -196,14 +196,14 @@ func findNewNotification(datastore database.Datastore, renotifyInterval time.Dur
|
||||
return tx.FindNewNotification(time.Now().Add(-renotifyInterval))
|
||||
}
|
||||
|
||||
func markNotificationNotified(datastore database.Datastore, name string) error {
|
||||
func markNotificationAsRead(datastore database.Datastore, name string) error {
|
||||
tx, err := datastore.Begin()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("an error happens when beginning database transaction")
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
if err := tx.MarkNotificationNotified(name); err != nil {
|
||||
if err := tx.MarkNotificationAsRead(name); err != nil {
|
||||
return err
|
||||
}
|
||||
return tx.Commit()
|
||||
|
75
worker.go
75
worker.go
@ -1,4 +1,4 @@
|
||||
// Copyright 2017 clair authors
|
||||
// Copyright 2018 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@ -146,32 +146,29 @@ func processRequests(imageFormat string, toDetect []processRequest) ([]database.
|
||||
return namespaces, features, updates, nil
|
||||
}
|
||||
|
||||
func getLayer(datastore database.Datastore, req LayerRequest) (layer database.LayerWithContent, preq *processRequest, err error) {
|
||||
var ok bool
|
||||
tx, err := datastore.Begin()
|
||||
if err != nil {
|
||||
func getLayer(datastore database.Datastore, req LayerRequest) (layer database.Layer, preq *processRequest, err error) {
|
||||
var (
|
||||
tx database.Session
|
||||
ok bool
|
||||
)
|
||||
|
||||
if tx, err = datastore.Begin(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer tx.Rollback()
|
||||
|
||||
layer, ok, err = tx.FindLayerWithContent(req.Hash)
|
||||
if err != nil {
|
||||
if layer, ok, err = tx.FindLayer(req.Hash); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !ok {
|
||||
err = tx.PersistLayer(req.Hash)
|
||||
if err != nil {
|
||||
return
|
||||
layer = database.Layer{
|
||||
LayerMetadata: database.LayerMetadata{
|
||||
Hash: req.Hash,
|
||||
},
|
||||
}
|
||||
|
||||
if err = tx.Commit(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
layer = database.LayerWithContent{}
|
||||
layer.Hash = req.Hash
|
||||
|
||||
preq = &processRequest{
|
||||
request: req,
|
||||
notProcessedBy: Processors,
|
||||
@ -185,15 +182,16 @@ func getLayer(datastore database.Datastore, req LayerRequest) (layer database.La
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// processLayers processes a set of post layer requests, stores layers and
|
||||
// returns an ordered list of processed layers with detected features and
|
||||
// namespaces.
|
||||
func processLayers(datastore database.Datastore, imageFormat string, requests []LayerRequest) ([]database.LayerWithContent, error) {
|
||||
func processLayers(datastore database.Datastore, imageFormat string, requests []LayerRequest) ([]database.Layer, error) {
|
||||
toDetect := []processRequest{}
|
||||
layers := map[string]database.LayerWithContent{}
|
||||
layers := map[string]database.Layer{}
|
||||
for _, req := range requests {
|
||||
if _, ok := layers[req.Hash]; ok {
|
||||
continue
|
||||
@ -208,7 +206,7 @@ func processLayers(datastore database.Datastore, imageFormat string, requests []
|
||||
}
|
||||
}
|
||||
|
||||
namespaces, features, partialRes, err := processRequests(imageFormat, toDetect)
|
||||
namespaces, features, partialLayers, err := processRequests(imageFormat, toDetect)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -222,10 +220,18 @@ func processLayers(datastore database.Datastore, imageFormat string, requests []
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, res := range partialRes {
|
||||
if err := persistPartialLayer(datastore, res); err != nil {
|
||||
for _, layer := range partialLayers {
|
||||
if err := persistPartialLayer(datastore, layer); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"Hash": layer.hash,
|
||||
"namespace count": len(layer.namespaces),
|
||||
"feature count": len(layer.features),
|
||||
"namespace detectors": layer.processedBy.Detectors,
|
||||
"feature listers": layer.processedBy.Listers,
|
||||
}).Debug("saved layer")
|
||||
}
|
||||
|
||||
// NOTE(Sida): The full layers are computed using partially
|
||||
@ -233,9 +239,9 @@ func processLayers(datastore database.Datastore, imageFormat string, requests []
|
||||
// Clair are changing some layers in this set of layers, it might generate
|
||||
// different results especially when the other Clair is with different
|
||||
// processors.
|
||||
completeLayers := []database.LayerWithContent{}
|
||||
completeLayers := []database.Layer{}
|
||||
for _, req := range requests {
|
||||
if partialLayer, ok := partialRes[req.Hash]; ok {
|
||||
if partialLayer, ok := partialLayers[req.Hash]; ok {
|
||||
completeLayers = append(completeLayers, combineLayers(layers[req.Hash], partialLayer))
|
||||
} else {
|
||||
completeLayers = append(completeLayers, layers[req.Hash])
|
||||
@ -252,9 +258,10 @@ func persistPartialLayer(datastore database.Datastore, layer partialLayer) error
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
if err := tx.PersistLayerContent(layer.hash, layer.namespaces, layer.features, layer.processedBy); err != nil {
|
||||
if err := tx.PersistLayer(layer.hash, layer.namespaces, layer.features, layer.processedBy); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
@ -286,7 +293,7 @@ func persistNamespaces(datastore database.Datastore, namespaces []database.Names
|
||||
}
|
||||
|
||||
// combineLayers merges `layer` and `partial` without duplicated content.
|
||||
func combineLayers(layer database.LayerWithContent, partial partialLayer) database.LayerWithContent {
|
||||
func combineLayers(layer database.Layer, partial partialLayer) database.Layer {
|
||||
mapF := map[database.Feature]struct{}{}
|
||||
mapNS := map[database.Namespace]struct{}{}
|
||||
for _, f := range layer.Features {
|
||||
@ -312,8 +319,8 @@ func combineLayers(layer database.LayerWithContent, partial partialLayer) databa
|
||||
|
||||
layer.ProcessedBy.Detectors = append(layer.ProcessedBy.Detectors, strutil.CompareStringLists(partial.processedBy.Detectors, layer.ProcessedBy.Detectors)...)
|
||||
layer.ProcessedBy.Listers = append(layer.ProcessedBy.Listers, strutil.CompareStringLists(partial.processedBy.Listers, layer.ProcessedBy.Listers)...)
|
||||
return database.LayerWithContent{
|
||||
Layer: database.Layer{
|
||||
return database.Layer{
|
||||
LayerMetadata: database.LayerMetadata{
|
||||
Hash: layer.Hash,
|
||||
ProcessedBy: layer.ProcessedBy,
|
||||
},
|
||||
@ -346,7 +353,7 @@ func ProcessAncestry(datastore database.Datastore, imageFormat, name string, lay
|
||||
var (
|
||||
err error
|
||||
ok bool
|
||||
layers []database.LayerWithContent
|
||||
layers []database.Layer
|
||||
commonProcessors database.Processors
|
||||
)
|
||||
|
||||
@ -361,7 +368,7 @@ func ProcessAncestry(datastore database.Datastore, imageFormat, name string, lay
|
||||
if ok, err = isAncestryProcessed(datastore, name); err != nil {
|
||||
return err
|
||||
} else if ok {
|
||||
log.WithField("ancestry", name).Debug("Ancestry is processed")
|
||||
log.WithField("name", name).Debug("ancestry is already processed")
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -386,7 +393,7 @@ func getNamespacedFeatures(layers []database.AncestryLayer) []database.Namespace
|
||||
return features
|
||||
}
|
||||
|
||||
func processAncestry(datastore database.Datastore, name string, layers []database.LayerWithContent, commonProcessors database.Processors) error {
|
||||
func processAncestry(datastore database.Datastore, name string, layers []database.Layer, commonProcessors database.Processors) error {
|
||||
var (
|
||||
ancestry database.Ancestry
|
||||
err error
|
||||
@ -458,7 +465,7 @@ func persistNamespacedFeatures(datastore database.Datastore, features []database
|
||||
}
|
||||
|
||||
// getProcessors retrieves common subset of the processors of each layer.
|
||||
func getProcessors(layers []database.LayerWithContent) (database.Processors, error) {
|
||||
func getProcessors(layers []database.Layer) (database.Processors, error) {
|
||||
if len(layers) == 0 {
|
||||
return database.Processors{}, nil
|
||||
}
|
||||
@ -495,7 +502,7 @@ type introducedFeature struct {
|
||||
|
||||
// computeAncestryLayers computes ancestry's layers along with what features are
|
||||
// introduced.
|
||||
func computeAncestryLayers(layers []database.LayerWithContent, commonProcessors database.Processors) ([]database.AncestryLayer, error) {
|
||||
func computeAncestryLayers(layers []database.Layer, commonProcessors database.Processors) ([]database.AncestryLayer, error) {
|
||||
// TODO(sidchen): Once the features are linked to specific processor, we
|
||||
// will use commonProcessors to filter out the features for this ancestry.
|
||||
|
||||
@ -506,7 +513,7 @@ func computeAncestryLayers(layers []database.LayerWithContent, commonProcessors
|
||||
ancestryLayers := []database.AncestryLayer{}
|
||||
for index, layer := range layers {
|
||||
// Initialize the ancestry Layer
|
||||
initializedLayer := database.AncestryLayer{Layer: layer.Layer, DetectedFeatures: []database.NamespacedFeature{}}
|
||||
initializedLayer := database.AncestryLayer{LayerMetadata: layer.LayerMetadata, DetectedFeatures: []database.NamespacedFeature{}}
|
||||
ancestryLayers = append(ancestryLayers, initializedLayer)
|
||||
|
||||
// Precondition: namespaces and features contain the result from union
|
||||
|
@ -40,7 +40,7 @@ import (
|
||||
type mockDatastore struct {
|
||||
database.MockDatastore
|
||||
|
||||
layers map[string]database.LayerWithContent
|
||||
layers map[string]database.Layer
|
||||
ancestry map[string]database.Ancestry
|
||||
namespaces map[string]database.Namespace
|
||||
features map[string]database.Feature
|
||||
@ -56,14 +56,14 @@ type mockSession struct {
|
||||
}
|
||||
|
||||
func copyDatastore(md *mockDatastore) mockDatastore {
|
||||
layers := map[string]database.LayerWithContent{}
|
||||
layers := map[string]database.Layer{}
|
||||
for k, l := range md.layers {
|
||||
features := append([]database.Feature(nil), l.Features...)
|
||||
namespaces := append([]database.Namespace(nil), l.Namespaces...)
|
||||
listers := append([]string(nil), l.ProcessedBy.Listers...)
|
||||
detectors := append([]string(nil), l.ProcessedBy.Detectors...)
|
||||
layers[k] = database.LayerWithContent{
|
||||
Layer: database.Layer{
|
||||
layers[k] = database.Layer{
|
||||
LayerMetadata: database.LayerMetadata{
|
||||
Hash: l.Hash,
|
||||
ProcessedBy: database.Processors{
|
||||
Listers: listers,
|
||||
@ -78,23 +78,23 @@ func copyDatastore(md *mockDatastore) mockDatastore {
|
||||
ancestry := map[string]database.Ancestry{}
|
||||
for k, a := range md.ancestry {
|
||||
ancestryLayers := []database.AncestryLayer{}
|
||||
layers := []database.Layer{}
|
||||
layers := []database.LayerMetadata{}
|
||||
|
||||
for _, layer := range a.Layers {
|
||||
layers = append(layers, database.Layer{
|
||||
layers = append(layers, database.LayerMetadata{
|
||||
Hash: layer.Hash,
|
||||
ProcessedBy: database.Processors{
|
||||
Detectors: append([]string(nil), layer.Layer.ProcessedBy.Detectors...),
|
||||
Listers: append([]string(nil), layer.Layer.ProcessedBy.Listers...),
|
||||
Detectors: append([]string(nil), layer.LayerMetadata.ProcessedBy.Detectors...),
|
||||
Listers: append([]string(nil), layer.LayerMetadata.ProcessedBy.Listers...),
|
||||
},
|
||||
})
|
||||
|
||||
ancestryLayers = append(ancestryLayers, database.AncestryLayer{
|
||||
Layer: database.Layer{
|
||||
LayerMetadata: database.LayerMetadata{
|
||||
Hash: layer.Hash,
|
||||
ProcessedBy: database.Processors{
|
||||
Detectors: append([]string(nil), layer.Layer.ProcessedBy.Detectors...),
|
||||
Listers: append([]string(nil), layer.Layer.ProcessedBy.Listers...),
|
||||
Detectors: append([]string(nil), layer.LayerMetadata.ProcessedBy.Detectors...),
|
||||
Listers: append([]string(nil), layer.LayerMetadata.ProcessedBy.Listers...),
|
||||
},
|
||||
},
|
||||
DetectedFeatures: append([]database.NamespacedFeature(nil), layer.DetectedFeatures...),
|
||||
@ -137,7 +137,7 @@ func copyDatastore(md *mockDatastore) mockDatastore {
|
||||
func newMockDatastore() *mockDatastore {
|
||||
errSessionDone := errors.New("Session Done")
|
||||
md := &mockDatastore{
|
||||
layers: make(map[string]database.LayerWithContent),
|
||||
layers: make(map[string]database.Layer),
|
||||
ancestry: make(map[string]database.Ancestry),
|
||||
namespaces: make(map[string]database.Namespace),
|
||||
features: make(map[string]database.Feature),
|
||||
@ -186,27 +186,9 @@ func newMockDatastore() *mockDatastore {
|
||||
return database.Layer{}, false, errSessionDone
|
||||
}
|
||||
layer, ok := session.copy.layers[name]
|
||||
return layer.Layer, ok, nil
|
||||
}
|
||||
|
||||
session.FctFindLayerWithContent = func(name string) (database.LayerWithContent, bool, error) {
|
||||
if session.terminated {
|
||||
return database.LayerWithContent{}, false, errSessionDone
|
||||
}
|
||||
layer, ok := session.copy.layers[name]
|
||||
return layer, ok, nil
|
||||
}
|
||||
|
||||
session.FctPersistLayer = func(hash string) error {
|
||||
if session.terminated {
|
||||
return errSessionDone
|
||||
}
|
||||
if _, ok := session.copy.layers[hash]; !ok {
|
||||
session.copy.layers[hash] = database.LayerWithContent{Layer: database.Layer{Hash: hash}}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
session.FctPersistNamespaces = func(ns []database.Namespace) error {
|
||||
if session.terminated {
|
||||
return errSessionDone
|
||||
@ -234,15 +216,20 @@ func newMockDatastore() *mockDatastore {
|
||||
return nil
|
||||
}
|
||||
|
||||
session.FctPersistLayerContent = func(hash string, namespaces []database.Namespace, features []database.Feature, processedBy database.Processors) error {
|
||||
session.FctPersistLayer = func(hash string, namespaces []database.Namespace, features []database.Feature, processedBy database.Processors) error {
|
||||
if session.terminated {
|
||||
return errSessionDone
|
||||
}
|
||||
|
||||
// update the layer
|
||||
_, ok := session.copy.layers[hash]
|
||||
if !ok {
|
||||
session.copy.layers[hash] = database.Layer{}
|
||||
}
|
||||
|
||||
layer, ok := session.copy.layers[hash]
|
||||
if !ok {
|
||||
return errors.New("layer not found")
|
||||
return errors.New("Failed to insert layer")
|
||||
}
|
||||
|
||||
layerFeatures := map[string]database.Feature{}
|
||||
@ -381,7 +368,7 @@ func TestProcessAncestryWithDistUpgrade(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
assert.Equal(t, []database.Layer{
|
||||
assert.Equal(t, []database.LayerMetadata{
|
||||
{Hash: "blank"},
|
||||
{Hash: "wheezy"},
|
||||
{Hash: "jessie"},
|
||||
@ -571,32 +558,32 @@ func TestComputeAncestryFeatures(t *testing.T) {
|
||||
// Suppose Clair is watching two files for namespaces one containing ns1
|
||||
// changes e.g. os-release and the other one containing ns2 changes e.g.
|
||||
// node.
|
||||
blank := database.LayerWithContent{Layer: database.Layer{Hash: "blank"}}
|
||||
initNS1a := database.LayerWithContent{
|
||||
Layer: database.Layer{Hash: "init ns1a"},
|
||||
blank := database.Layer{LayerMetadata: database.LayerMetadata{Hash: "blank"}}
|
||||
initNS1a := database.Layer{
|
||||
LayerMetadata: database.LayerMetadata{Hash: "init ns1a"},
|
||||
Namespaces: []database.Namespace{ns1a},
|
||||
Features: []database.Feature{f1, f2},
|
||||
}
|
||||
|
||||
upgradeNS2b := database.LayerWithContent{
|
||||
Layer: database.Layer{Hash: "upgrade ns2b"},
|
||||
upgradeNS2b := database.Layer{
|
||||
LayerMetadata: database.LayerMetadata{Hash: "upgrade ns2b"},
|
||||
Namespaces: []database.Namespace{ns2b},
|
||||
}
|
||||
|
||||
upgradeNS1b := database.LayerWithContent{
|
||||
Layer: database.Layer{Hash: "upgrade ns1b"},
|
||||
upgradeNS1b := database.Layer{
|
||||
LayerMetadata: database.LayerMetadata{Hash: "upgrade ns1b"},
|
||||
Namespaces: []database.Namespace{ns1b},
|
||||
Features: []database.Feature{f1, f2},
|
||||
}
|
||||
|
||||
initNS2a := database.LayerWithContent{
|
||||
Layer: database.Layer{Hash: "init ns2a"},
|
||||
initNS2a := database.Layer{
|
||||
LayerMetadata: database.LayerMetadata{Hash: "init ns2a"},
|
||||
Namespaces: []database.Namespace{ns2a},
|
||||
Features: []database.Feature{f3, f4},
|
||||
}
|
||||
|
||||
removeF2 := database.LayerWithContent{
|
||||
Layer: database.Layer{Hash: "remove f2"},
|
||||
removeF2 := database.Layer{
|
||||
LayerMetadata: database.LayerMetadata{Hash: "remove f2"},
|
||||
Features: []database.Feature{f1},
|
||||
}
|
||||
|
||||
@ -609,7 +596,7 @@ func TestComputeAncestryFeatures(t *testing.T) {
|
||||
// -> f1 (remove f2)
|
||||
// -> blank (empty)
|
||||
|
||||
layers := []database.LayerWithContent{
|
||||
layers := []database.Layer{
|
||||
blank,
|
||||
initNS1a,
|
||||
removeF2,
|
||||
|
Loading…
Reference in New Issue
Block a user