pgsql: Add detector to database schema

'detector' table is added to store the metadata of detectors.
'layer_feature', 'layer_namespace', and 'ancestry_feature' tables are
modified to store the detection relationship between the
feature/namespace with the detector.
master
Sida Chen 6 years ago
parent db2db8bbe8
commit dca2d4e597

@ -14,12 +14,11 @@
package migrations
import "github.com/remind101/migrate"
func init() {
RegisterMigration(migrate.Migration{
ID: 1,
Up: migrate.Queries([]string{
var (
// entities are the basic building blocks to relate the vulnerabilities with
// the ancestry.
entities = MigrationQuery{
Up: []string{
// namespaces
`CREATE TABLE IF NOT EXISTS namespace (
id SERIAL PRIMARY KEY,
@ -42,40 +41,72 @@ func init() {
namespace_id INT REFERENCES namespace,
feature_id INT REFERENCES feature,
UNIQUE (namespace_id, feature_id));`,
},
Down: []string{
`DROP TABLE IF EXISTS namespace, feature, namespaced_feature CASCADE;`,
},
}
// detector is analysis extensions used by the worker.
detector = MigrationQuery{
Up: []string{
// Detector Type
`CREATE TYPE detector_type AS ENUM ('namespace', 'feature');`,
// Detector
`CREATE TABLE IF NOT EXISTS detector (
id SERIAL PRIMARY KEY,
name TEXT NOT NULL,
version TEXT NOT NULL,
dtype detector_type NOT NULL,
UNIQUE (name, version, dtype));`,
},
Down: []string{
`DROP TABLE IF EXISTS detector CASCADE;`,
`DROP TYPE IF EXISTS detector_type;`,
},
}
// layer contains all metadata and scanned features and namespaces.
layer = MigrationQuery{
Up: []string{
// layers
`CREATE TABLE IF NOT EXISTS layer(
id SERIAL PRIMARY KEY,
hash TEXT NOT NULL UNIQUE);`,
`CREATE TABLE IF NOT EXISTS layer_detector(
id SERIAL PRIMARY KEY,
layer_id INT REFERENCES layer ON DELETE CASCADE,
detector_id INT REFERENCES detector ON DELETE CASCADE,
UNIQUE(layer_id, detector_id));`,
`CREATE INDEX ON layer_detector(layer_id);`,
`CREATE TABLE IF NOT EXISTS layer_feature (
id SERIAL PRIMARY KEY,
layer_id INT REFERENCES layer ON DELETE CASCADE,
feature_id INT REFERENCES feature ON DELETE CASCADE,
detector_id INT REFERENCES detector ON DELETE CASCADE,
UNIQUE (layer_id, feature_id));`,
`CREATE INDEX ON layer_feature(layer_id);`,
`CREATE TABLE IF NOT EXISTS layer_lister (
id SERIAL PRIMARY KEY,
layer_id INT REFERENCES layer ON DELETE CASCADE,
lister TEXT NOT NULL,
UNIQUE (layer_id, lister));`,
`CREATE INDEX ON layer_lister(layer_id);`,
`CREATE TABLE IF NOT EXISTS layer_detector (
id SERIAL PRIMARY KEY,
layer_id INT REFERENCES layer ON DELETE CASCADE,
detector TEXT,
UNIQUE (layer_id, detector));`,
`CREATE INDEX ON layer_detector(layer_id);`,
`CREATE TABLE IF NOT EXISTS layer_namespace (
id SERIAL PRIMARY KEY,
layer_id INT REFERENCES layer ON DELETE CASCADE,
namespace_id INT REFERENCES namespace ON DELETE CASCADE,
detector_id INT REFERENCES detector ON DELETE CASCADE,
UNIQUE (layer_id, namespace_id));`,
`CREATE INDEX ON layer_namespace(layer_id);`,
},
Down: []string{
`DROP TABLE IF EXISTS layer, layer_detector, layer_feature, layer_namespace CASCADE;`,
},
}
// ancestry contains all meta information around scanned manifest and its
// layers.
ancestry = MigrationQuery{
Up: []string{
// ancestry
`CREATE TABLE IF NOT EXISTS ancestry (
id SERIAL PRIMARY KEY,
@ -93,28 +124,31 @@ func init() {
id SERIAL PRIMARY KEY,
ancestry_layer_id INT REFERENCES ancestry_layer ON DELETE CASCADE,
namespaced_feature_id INT REFERENCES namespaced_feature ON DELETE CASCADE,
feature_detector_id INT REFERENCES detector ON DELETE CASCADE,
namespace_detector_id INT REFERENCES detector ON DELETE CASCADE,
UNIQUE (ancestry_layer_id, namespaced_feature_id));`,
`CREATE TABLE IF NOT EXISTS ancestry_lister (
id SERIAL PRIMARY KEY,
ancestry_id INT REFERENCES ancestry ON DELETE CASCADE,
lister TEXT,
UNIQUE (ancestry_id, lister));`,
`CREATE INDEX ON ancestry_lister(ancestry_id);`,
`CREATE TABLE IF NOT EXISTS ancestry_detector (
`CREATE TABLE IF NOT EXISTS ancestry_detector(
id SERIAL PRIMARY KEY,
ancestry_id INT REFERENCES ancestry ON DELETE CASCADE,
detector TEXT,
UNIQUE (ancestry_id, detector));`,
ancestry_id INT REFERENCES layer ON DELETE CASCADE,
detector_id INT REFERENCES detector ON DELETE CASCADE,
UNIQUE(ancestry_id, detector_id));`,
`CREATE INDEX ON ancestry_detector(ancestry_id);`,
},
Down: []string{
`DROP TABLE IF EXISTS ancestry, ancestry_layer, ancestry_feature, ancestry_detector CASCADE;`,
},
}
// vulnerability contains the metadata and vulnerability affecting relation.
vulnerability = MigrationQuery{
Up: []string{
`CREATE TYPE severity AS ENUM ('Unknown', 'Negligible', 'Low', 'Medium', 'High', 'Critical', 'Defcon1');`,
// vulnerability
`CREATE TABLE IF NOT EXISTS vulnerability (
id SERIAL PRIMARY KEY,
namespace_id INT NOT NULL REFERENCES Namespace,
namespace_id INT REFERENCES Namespace,
name TEXT NOT NULL,
description TEXT NULL,
link TEXT NULL,
@ -127,7 +161,7 @@ func init() {
`CREATE TABLE IF NOT EXISTS vulnerability_affected_feature (
id SERIAL PRIMARY KEY,
vulnerability_id INT NOT NULL REFERENCES vulnerability ON DELETE CASCADE,
vulnerability_id INT REFERENCES vulnerability ON DELETE CASCADE,
feature_name TEXT NOT NULL,
affected_version TEXT,
fixedin TEXT);`,
@ -135,12 +169,22 @@ func init() {
`CREATE TABLE IF NOT EXISTS vulnerability_affected_namespaced_feature(
id SERIAL PRIMARY KEY,
vulnerability_id INT NOT NULL REFERENCES vulnerability ON DELETE CASCADE,
namespaced_feature_id INT NOT NULL REFERENCES namespaced_feature ON DELETE CASCADE,
added_by INT NOT NULL REFERENCES vulnerability_affected_feature ON DELETE CASCADE,
vulnerability_id INT REFERENCES vulnerability ON DELETE CASCADE,
namespaced_feature_id INT REFERENCES namespaced_feature ON DELETE CASCADE,
added_by INT REFERENCES vulnerability_affected_feature ON DELETE CASCADE,
UNIQUE (vulnerability_id, namespaced_feature_id));`,
`CREATE INDEX ON vulnerability_affected_namespaced_feature(namespaced_feature_id);`,
},
Down: []string{
`DROP TYPE IF EXISTS severity;`,
`DROP TABLE IF EXISTS vulnerability, vulnerability_affected_feature, vulnerability_affected_namespaced_feature CASCADE;`,
},
}
// updaterLock is the lock to be used by updater to prevent multiple
// updaters running on the same vulnerability source.
updaterLock = MigrationQuery{
Up: []string{
`CREATE TABLE IF NOT EXISTS KeyValue (
id SERIAL PRIMARY KEY,
key TEXT NOT NULL UNIQUE,
@ -152,8 +196,16 @@ func init() {
owner VARCHAR(64) NOT NULL,
until TIMESTAMP WITH TIME ZONE);`,
`CREATE INDEX ON Lock (owner);`,
// Notification
},
Down: []string{
`DROP TABLE IF EXISTS KeyValue, Lock CASCADE;`,
},
}
// notification is the vulnerability notification spawned by the
// vulnerability changes.
notification = MigrationQuery{
Up: []string{
`CREATE TABLE IF NOT EXISTS Vulnerability_Notification (
id SERIAL PRIMARY KEY,
name VARCHAR(64) NOT NULL UNIQUE,
@ -163,30 +215,22 @@ func init() {
old_vulnerability_id INT NULL REFERENCES Vulnerability ON DELETE CASCADE,
new_vulnerability_id INT NULL REFERENCES Vulnerability ON DELETE CASCADE);`,
`CREATE INDEX ON Vulnerability_Notification (notified_at);`,
}),
Down: migrate.Queries([]string{
`DROP TABLE IF EXISTS
ancestry,
ancestry_layer,
ancestry_detector,
ancestry_lister,
ancestry_feature,
feature,
namespaced_feature,
keyvalue,
layer,
layer_detector,
layer_feature,
layer_lister,
layer_namespace,
lock,
namespace,
vulnerability,
vulnerability_affected_feature,
vulnerability_affected_namespaced_feature,
vulnerability_notification
CASCADE;`,
`DROP TYPE IF EXISTS severity;`,
}),
})
},
Down: []string{
`DROP TABLE IF EXISTS Vulnerability_Notification CASCADE;`,
},
}
)
func init() {
RegisterMigration(NewSimpleMigration(1,
[]MigrationQuery{
entities,
detector,
layer,
ancestry,
vulnerability,
updaterLock,
notification,
}))
}

@ -0,0 +1,30 @@
package migrations
import "github.com/remind101/migrate"
// MigrationQuery contains the Up migration and Down migration in Plain strings.
type MigrationQuery struct {
Up []string
Down []string
}
// ConcatMigrationQueries concats migration queries in the give order.
func ConcatMigrationQueries(qs []MigrationQuery) MigrationQuery {
r := MigrationQuery{}
for _, q := range qs {
r.Up = append(r.Up, q.Up...)
r.Down = append(r.Down, q.Down...)
}
return r
}
// NewSimpleMigration returns a simple migration plan with all provided
// migration queries concatted in order.
func NewSimpleMigration(id int, qs []MigrationQuery) migrate.Migration {
q := ConcatMigrationQueries(qs)
return migrate.Migration{
ID: id,
Up: migrate.Queries(q.Up),
Down: migrate.Queries(q.Down),
}
}
Loading…
Cancel
Save