2017-01-13 07:08:52 +00:00
|
|
|
// Copyright 2017 clair authors
|
2016-01-19 20:16:45 +00:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
// Package pgsql implements database.Datastore with PostgreSQL.
|
2015-12-28 20:03:29 +00:00
|
|
|
package pgsql
|
|
|
|
|
|
|
|
import (
|
|
|
|
"database/sql"
|
|
|
|
"fmt"
|
|
|
|
"io/ioutil"
|
2016-05-02 22:33:03 +00:00
|
|
|
"net/url"
|
2015-12-28 20:03:29 +00:00
|
|
|
"strings"
|
2016-01-24 03:02:34 +00:00
|
|
|
"time"
|
2015-12-28 20:03:29 +00:00
|
|
|
|
2016-11-08 15:45:05 +00:00
|
|
|
"gopkg.in/yaml.v2"
|
|
|
|
|
2015-12-28 20:03:29 +00:00
|
|
|
"github.com/hashicorp/golang-lru"
|
|
|
|
"github.com/lib/pq"
|
2016-01-22 20:59:46 +00:00
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
2016-11-08 15:45:05 +00:00
|
|
|
"github.com/remind101/migrate"
|
2017-05-04 17:21:25 +00:00
|
|
|
log "github.com/sirupsen/logrus"
|
2016-05-02 22:33:03 +00:00
|
|
|
|
2017-07-26 23:23:54 +00:00
|
|
|
"github.com/coreos/clair/api/token"
|
2016-05-02 22:33:03 +00:00
|
|
|
"github.com/coreos/clair/database"
|
2016-11-08 15:45:05 +00:00
|
|
|
"github.com/coreos/clair/database/pgsql/migrations"
|
2017-01-13 07:08:52 +00:00
|
|
|
"github.com/coreos/clair/pkg/commonerr"
|
2015-12-28 20:03:29 +00:00
|
|
|
)
|
|
|
|
|
2016-01-22 20:59:46 +00:00
|
|
|
var (
|
|
|
|
promErrorsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
|
|
|
|
Name: "clair_pgsql_errors_total",
|
2016-01-24 03:02:34 +00:00
|
|
|
Help: "Number of errors that PostgreSQL requests generated.",
|
2016-01-22 20:59:46 +00:00
|
|
|
}, []string{"request"})
|
|
|
|
|
|
|
|
promCacheHitsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
|
|
|
|
Name: "clair_pgsql_cache_hits_total",
|
2016-01-24 03:02:34 +00:00
|
|
|
Help: "Number of cache hits that the PostgreSQL backend did.",
|
2016-01-22 20:59:46 +00:00
|
|
|
}, []string{"object"})
|
|
|
|
|
|
|
|
promCacheQueriesTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
|
|
|
|
Name: "clair_pgsql_cache_queries_total",
|
2016-01-24 03:02:34 +00:00
|
|
|
Help: "Number of cache queries that the PostgreSQL backend did.",
|
2016-01-22 20:59:46 +00:00
|
|
|
}, []string{"object"})
|
2016-01-24 03:02:34 +00:00
|
|
|
|
|
|
|
promQueryDurationMilliseconds = prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
|
|
|
Name: "clair_pgsql_query_duration_milliseconds",
|
|
|
|
Help: "Time it takes to execute the database query.",
|
|
|
|
}, []string{"query", "subquery"})
|
|
|
|
|
|
|
|
promConcurrentLockVAFV = prometheus.NewGauge(prometheus.GaugeOpts{
|
|
|
|
Name: "clair_pgsql_concurrent_lock_vafv_total",
|
2017-07-26 23:23:54 +00:00
|
|
|
Help: "Number of transactions trying to hold the exclusive Vulnerability_Affects_Feature lock.",
|
2016-01-24 03:02:34 +00:00
|
|
|
})
|
2016-01-22 20:59:46 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
2016-01-24 03:02:34 +00:00
|
|
|
prometheus.MustRegister(promErrorsTotal)
|
2016-01-22 20:59:46 +00:00
|
|
|
prometheus.MustRegister(promCacheHitsTotal)
|
|
|
|
prometheus.MustRegister(promCacheQueriesTotal)
|
2016-01-24 03:02:34 +00:00
|
|
|
prometheus.MustRegister(promQueryDurationMilliseconds)
|
|
|
|
prometheus.MustRegister(promConcurrentLockVAFV)
|
2016-05-02 22:33:03 +00:00
|
|
|
|
|
|
|
database.Register("pgsql", openDatabase)
|
2016-01-22 20:59:46 +00:00
|
|
|
}
|
2015-12-28 20:03:29 +00:00
|
|
|
|
2017-07-26 23:23:54 +00:00
|
|
|
// pgSessionCache is the session's cache, which holds the pgSQL's cache and the
|
|
|
|
// individual session's cache. Only when session.Commit is called, all the
|
|
|
|
// changes to pgSQL cache will be applied.
|
|
|
|
type pgSessionCache struct {
|
|
|
|
c *lru.ARCCache
|
2016-02-02 18:29:59 +00:00
|
|
|
}
|
|
|
|
|
2015-12-28 20:03:29 +00:00
|
|
|
type pgSQL struct {
|
|
|
|
*sql.DB
|
2017-07-26 23:23:54 +00:00
|
|
|
|
2016-05-02 22:33:03 +00:00
|
|
|
cache *lru.ARCCache
|
|
|
|
config Config
|
2015-12-28 20:03:29 +00:00
|
|
|
}
|
|
|
|
|
2017-07-26 23:23:54 +00:00
|
|
|
type pgSession struct {
|
|
|
|
*sql.Tx
|
|
|
|
|
|
|
|
paginationKey string
|
|
|
|
}
|
|
|
|
|
|
|
|
type idPageNumber struct {
|
|
|
|
// StartID is an implementation detail for paginating by an ID required to
|
|
|
|
// be unique to every ancestry and always increasing.
|
|
|
|
//
|
|
|
|
// StartID is used to search for ancestry with ID >= StartID
|
|
|
|
StartID int64
|
|
|
|
}
|
|
|
|
|
|
|
|
func encryptPage(page idPageNumber, paginationKey string) (result database.PageNumber, err error) {
|
|
|
|
resultBytes, err := token.Marshal(page, paginationKey)
|
|
|
|
if err != nil {
|
|
|
|
return result, err
|
|
|
|
}
|
|
|
|
result = database.PageNumber(resultBytes)
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func decryptPage(page database.PageNumber, paginationKey string) (result idPageNumber, err error) {
|
|
|
|
err = token.Unmarshal(string(page), paginationKey, &result)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Begin initiates a transaction to database. The expected transaction isolation
|
|
|
|
// level in this implementation is "Read Committed".
|
|
|
|
func (pgSQL *pgSQL) Begin() (database.Session, error) {
|
|
|
|
tx, err := pgSQL.DB.Begin()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return &pgSession{
|
|
|
|
Tx: tx,
|
|
|
|
paginationKey: pgSQL.config.PaginationKey,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (tx *pgSession) Commit() error {
|
|
|
|
return tx.Tx.Commit()
|
|
|
|
}
|
|
|
|
|
2016-05-02 22:33:03 +00:00
|
|
|
// Close closes the database and destroys if ManageDatabaseLifecycle has been specified in
|
|
|
|
// the configuration.
|
2015-12-28 20:03:29 +00:00
|
|
|
func (pgSQL *pgSQL) Close() {
|
2016-05-02 22:33:03 +00:00
|
|
|
if pgSQL.DB != nil {
|
|
|
|
pgSQL.DB.Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
if pgSQL.config.ManageDatabaseLifecycle {
|
|
|
|
dbName, pgSourceURL, _ := parseConnectionString(pgSQL.config.Source)
|
|
|
|
dropDatabase(pgSourceURL, dbName)
|
|
|
|
}
|
2015-12-28 20:03:29 +00:00
|
|
|
}
|
|
|
|
|
2016-05-02 22:33:03 +00:00
|
|
|
// Ping verifies that the database is accessible.
|
2016-01-22 20:57:57 +00:00
|
|
|
func (pgSQL *pgSQL) Ping() bool {
|
2016-02-04 21:13:11 +00:00
|
|
|
return pgSQL.DB.Ping() == nil
|
2016-01-22 20:57:57 +00:00
|
|
|
}
|
|
|
|
|
2016-05-02 22:33:03 +00:00
|
|
|
// Config is the configuration that is used by openDatabase.
|
|
|
|
type Config struct {
|
|
|
|
Source string
|
|
|
|
CacheSize int
|
|
|
|
|
|
|
|
ManageDatabaseLifecycle bool
|
|
|
|
FixturePath string
|
2017-07-26 23:23:54 +00:00
|
|
|
PaginationKey string
|
2016-05-02 22:33:03 +00:00
|
|
|
}
|
|
|
|
|
2017-01-27 01:14:44 +00:00
|
|
|
// openDatabase opens a PostgresSQL-backed Datastore using the given
|
|
|
|
// configuration.
|
|
|
|
//
|
|
|
|
// It immediately runs all necessary migrations. If ManageDatabaseLifecycle is
|
|
|
|
// specified, the database will be created first. If FixturePath is specified,
|
|
|
|
// every SQL queries that are present insides will be executed.
|
|
|
|
func openDatabase(registrableComponentConfig database.RegistrableComponentConfig) (database.Datastore, error) {
|
2016-05-02 22:33:03 +00:00
|
|
|
var pg pgSQL
|
|
|
|
var err error
|
|
|
|
|
|
|
|
// Parse configuration.
|
|
|
|
pg.config = Config{
|
|
|
|
CacheSize: 16384,
|
|
|
|
}
|
|
|
|
bytes, err := yaml.Marshal(registrableComponentConfig.Options)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("pgsql: could not load configuration: %v", err)
|
|
|
|
}
|
|
|
|
err = yaml.Unmarshal(bytes, &pg.config)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("pgsql: could not load configuration: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-07-26 23:23:54 +00:00
|
|
|
if pg.config.PaginationKey == "" {
|
|
|
|
panic("pagination key should be given")
|
|
|
|
}
|
|
|
|
|
2016-05-02 22:33:03 +00:00
|
|
|
dbName, pgSourceURL, err := parseConnectionString(pg.config.Source)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create database.
|
|
|
|
if pg.config.ManageDatabaseLifecycle {
|
|
|
|
log.Info("pgsql: creating database")
|
2016-11-08 15:45:05 +00:00
|
|
|
if err = createDatabase(pgSourceURL, dbName); err != nil {
|
2016-05-02 22:33:03 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2015-12-28 20:03:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Open database.
|
2016-05-02 22:33:03 +00:00
|
|
|
pg.DB, err = sql.Open("postgres", pg.config.Source)
|
2015-12-28 20:03:29 +00:00
|
|
|
if err != nil {
|
2016-05-02 22:33:03 +00:00
|
|
|
pg.Close()
|
|
|
|
return nil, fmt.Errorf("pgsql: could not open database: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify database state.
|
2016-11-08 15:45:05 +00:00
|
|
|
if err = pg.DB.Ping(); err != nil {
|
2016-05-02 22:33:03 +00:00
|
|
|
pg.Close()
|
|
|
|
return nil, fmt.Errorf("pgsql: could not open database: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Run migrations.
|
2016-11-08 15:45:05 +00:00
|
|
|
if err = migrateDatabase(pg.DB); err != nil {
|
2016-05-02 22:33:03 +00:00
|
|
|
pg.Close()
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Load fixture data.
|
|
|
|
if pg.config.FixturePath != "" {
|
|
|
|
log.Info("pgsql: loading fixtures")
|
|
|
|
|
|
|
|
d, err := ioutil.ReadFile(pg.config.FixturePath)
|
|
|
|
if err != nil {
|
|
|
|
pg.Close()
|
|
|
|
return nil, fmt.Errorf("pgsql: could not open fixture file: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = pg.DB.Exec(string(d))
|
|
|
|
if err != nil {
|
|
|
|
pg.Close()
|
2017-07-26 23:23:54 +00:00
|
|
|
return nil, fmt.Errorf("pgsql: an error occurred while importing fixtures: %v", err)
|
2016-05-02 22:33:03 +00:00
|
|
|
}
|
2015-12-28 20:03:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Initialize cache.
|
|
|
|
// TODO(Quentin-M): Benchmark with a simple LRU Cache.
|
2016-05-02 22:33:03 +00:00
|
|
|
if pg.config.CacheSize > 0 {
|
|
|
|
pg.cache, _ = lru.NewARC(pg.config.CacheSize)
|
|
|
|
}
|
|
|
|
|
|
|
|
return &pg, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func parseConnectionString(source string) (dbName string, pgSourceURL string, err error) {
|
|
|
|
if source == "" {
|
2017-01-13 07:08:52 +00:00
|
|
|
return "", "", commonerr.NewBadRequestError("pgsql: no database connection string specified")
|
2015-12-28 20:03:29 +00:00
|
|
|
}
|
|
|
|
|
2016-05-02 22:33:03 +00:00
|
|
|
sourceURL, err := url.Parse(source)
|
|
|
|
if err != nil {
|
2017-01-13 07:08:52 +00:00
|
|
|
return "", "", commonerr.NewBadRequestError("pgsql: database connection string is not a valid URL")
|
2016-05-02 22:33:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
dbName = strings.TrimPrefix(sourceURL.Path, "/")
|
|
|
|
|
|
|
|
pgSource := *sourceURL
|
|
|
|
pgSource.Path = "/postgres"
|
|
|
|
pgSourceURL = pgSource.String()
|
|
|
|
|
|
|
|
return
|
2015-12-28 20:03:29 +00:00
|
|
|
}
|
|
|
|
|
2016-01-08 16:17:32 +00:00
|
|
|
// migrate runs all available migrations on a pgSQL database.
|
2016-11-08 15:45:05 +00:00
|
|
|
func migrateDatabase(db *sql.DB) error {
|
2015-12-28 20:03:29 +00:00
|
|
|
log.Info("running database migrations")
|
|
|
|
|
2016-11-08 15:45:05 +00:00
|
|
|
err := migrate.NewPostgresMigrator(db).Exec(migrate.Up, migrations.Migrations...)
|
2015-12-28 20:03:29 +00:00
|
|
|
if err != nil {
|
2017-07-26 23:23:54 +00:00
|
|
|
return fmt.Errorf("pgsql: an error occurred while running migrations: %v", err)
|
2015-12-28 20:03:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
log.Info("database migration ran successfully")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-01-08 16:17:32 +00:00
|
|
|
// createDatabase creates a new database.
|
2016-05-02 22:33:03 +00:00
|
|
|
// The source parameter should not contain a dbname.
|
|
|
|
func createDatabase(source, dbName string) error {
|
2015-12-28 20:03:29 +00:00
|
|
|
// Open database.
|
2016-05-02 22:33:03 +00:00
|
|
|
db, err := sql.Open("postgres", source)
|
2015-12-28 20:03:29 +00:00
|
|
|
if err != nil {
|
2016-05-02 22:33:03 +00:00
|
|
|
return fmt.Errorf("pgsql: could not open 'postgres' database for creation: %v", err)
|
2015-12-28 20:03:29 +00:00
|
|
|
}
|
|
|
|
defer db.Close()
|
|
|
|
|
|
|
|
// Create database.
|
2016-05-02 22:33:03 +00:00
|
|
|
_, err = db.Exec("CREATE DATABASE " + dbName)
|
2015-12-28 20:03:29 +00:00
|
|
|
if err != nil {
|
2016-05-02 22:33:03 +00:00
|
|
|
return fmt.Errorf("pgsql: could not create database: %v", err)
|
2015-12-28 20:03:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-01-08 16:17:32 +00:00
|
|
|
// dropDatabase drops an existing database.
|
2016-05-02 22:33:03 +00:00
|
|
|
// The source parameter should not contain a dbname.
|
|
|
|
func dropDatabase(source, dbName string) error {
|
2015-12-28 20:03:29 +00:00
|
|
|
// Open database.
|
2016-05-02 22:33:03 +00:00
|
|
|
db, err := sql.Open("postgres", source)
|
2015-12-28 20:03:29 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("could not open database (DropDatabase): %v", err)
|
|
|
|
}
|
|
|
|
defer db.Close()
|
|
|
|
|
2016-02-02 18:40:41 +00:00
|
|
|
// Kill any opened connection.
|
2016-05-02 22:33:03 +00:00
|
|
|
if _, err = db.Exec(`
|
2016-02-02 18:40:41 +00:00
|
|
|
SELECT pg_terminate_backend(pg_stat_activity.pid)
|
|
|
|
FROM pg_stat_activity
|
|
|
|
WHERE pg_stat_activity.datname = $1
|
2016-05-02 22:33:03 +00:00
|
|
|
AND pid <> pg_backend_pid()`, dbName); err != nil {
|
2016-02-02 18:40:41 +00:00
|
|
|
return fmt.Errorf("could not drop database: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-12-28 20:03:29 +00:00
|
|
|
// Drop database.
|
2016-05-02 22:33:03 +00:00
|
|
|
if _, err = db.Exec("DROP DATABASE " + dbName); err != nil {
|
2016-01-12 15:40:46 +00:00
|
|
|
return fmt.Errorf("could not drop database: %v", err)
|
2015-12-28 20:03:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-01-08 16:17:32 +00:00
|
|
|
// handleError logs an error with an extra description and masks the error if it's an SQL one.
|
2017-07-26 23:23:54 +00:00
|
|
|
// The function ensures we never return plain SQL errors and leak anything.
|
|
|
|
// The function should be used for every database query error.
|
2016-01-08 16:17:32 +00:00
|
|
|
func handleError(desc string, err error) error {
|
2016-01-26 22:57:32 +00:00
|
|
|
if err == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-01-22 20:59:46 +00:00
|
|
|
if err == sql.ErrNoRows {
|
2017-01-13 07:08:52 +00:00
|
|
|
return commonerr.ErrNotFound
|
2016-01-08 16:17:32 +00:00
|
|
|
}
|
|
|
|
|
2017-05-04 17:21:25 +00:00
|
|
|
log.WithError(err).WithField("Description", desc).Error("Handled Database Error")
|
2016-01-22 20:59:46 +00:00
|
|
|
promErrorsTotal.WithLabelValues(desc).Inc()
|
|
|
|
|
|
|
|
if _, o := err.(*pq.Error); o || err == sql.ErrTxDone || strings.HasPrefix(err.Error(), "sql:") {
|
|
|
|
return database.ErrBackendException
|
|
|
|
}
|
|
|
|
|
2016-01-08 16:17:32 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-12-28 20:03:29 +00:00
|
|
|
// isErrUniqueViolation determines is the given error is a unique contraint violation.
|
|
|
|
func isErrUniqueViolation(err error) bool {
|
|
|
|
pqErr, ok := err.(*pq.Error)
|
|
|
|
return ok && pqErr.Code == "23505"
|
|
|
|
}
|
2016-01-24 03:02:34 +00:00
|
|
|
|
2017-07-26 23:23:54 +00:00
|
|
|
// observeQueryTime computes the time elapsed since `start` to represent the
|
|
|
|
// query time.
|
|
|
|
// 1. `query` is a pgSession function name.
|
|
|
|
// 2. `subquery` is a specific query or a batched query.
|
|
|
|
// 3. `start` is the time right before query is executed.
|
2016-01-24 03:02:34 +00:00
|
|
|
func observeQueryTime(query, subquery string, start time.Time) {
|
2017-01-18 01:33:20 +00:00
|
|
|
promQueryDurationMilliseconds.
|
|
|
|
WithLabelValues(query, subquery).
|
|
|
|
Observe(float64(time.Since(start).Nanoseconds()) / float64(time.Millisecond))
|
2016-01-24 03:02:34 +00:00
|
|
|
}
|