2019-01-09 21:16:17 +00:00
|
|
|
// Copyright 2019 clair authors
|
2015-11-13 19:11:28 +00:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2017-01-19 18:42:37 +00:00
|
|
|
package clair
|
2015-11-13 19:11:28 +00:00
|
|
|
|
|
|
|
import (
|
2019-01-07 19:20:31 +00:00
|
|
|
"context"
|
|
|
|
"errors"
|
2017-07-26 23:22:29 +00:00
|
|
|
"fmt"
|
2015-11-13 19:11:28 +00:00
|
|
|
"strconv"
|
2016-02-01 23:41:40 +00:00
|
|
|
"sync"
|
2015-11-13 19:11:28 +00:00
|
|
|
"time"
|
|
|
|
|
2019-02-14 16:26:22 +00:00
|
|
|
"github.com/pborman/uuid"
|
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
|
|
|
log "github.com/sirupsen/logrus"
|
|
|
|
"golang.org/x/sync/errgroup"
|
|
|
|
|
2017-01-04 02:44:32 +00:00
|
|
|
"github.com/coreos/clair/database"
|
|
|
|
"github.com/coreos/clair/ext/vulnmdsrc"
|
|
|
|
"github.com/coreos/clair/ext/vulnsrc"
|
2017-01-18 03:18:03 +00:00
|
|
|
"github.com/coreos/clair/pkg/stopper"
|
2019-01-07 19:20:31 +00:00
|
|
|
"github.com/coreos/clair/pkg/timeutil"
|
2015-11-13 19:11:28 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2017-06-28 21:13:27 +00:00
|
|
|
updaterLastFlagName = "updater/last"
|
|
|
|
updaterLockName = "updater"
|
|
|
|
updaterLockDuration = updaterLockRefreshDuration + time.Minute*2
|
|
|
|
updaterLockRefreshDuration = time.Minute * 8
|
|
|
|
updaterSleepBetweenLoopsDuration = time.Minute
|
2015-11-13 19:11:28 +00:00
|
|
|
)
|
|
|
|
|
2016-01-24 03:02:34 +00:00
|
|
|
var (
|
|
|
|
promUpdaterErrorsTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
Name: "clair_updater_errors_total",
|
|
|
|
Help: "Numbers of errors that the updater generated.",
|
|
|
|
})
|
|
|
|
|
|
|
|
promUpdaterDurationSeconds = prometheus.NewGauge(prometheus.GaugeOpts{
|
|
|
|
Name: "clair_updater_duration_seconds",
|
|
|
|
Help: "Time it takes to update the vulnerability database.",
|
|
|
|
})
|
|
|
|
|
|
|
|
promUpdaterNotesTotal = prometheus.NewGauge(prometheus.GaugeOpts{
|
|
|
|
Name: "clair_updater_notes_total",
|
|
|
|
Help: "Number of notes that the vulnerability fetchers generated.",
|
|
|
|
})
|
2017-07-26 23:22:29 +00:00
|
|
|
|
|
|
|
// EnabledUpdaters contains all updaters to be used for update.
|
|
|
|
EnabledUpdaters []string
|
2016-01-24 03:02:34 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
prometheus.MustRegister(promUpdaterErrorsTotal)
|
|
|
|
prometheus.MustRegister(promUpdaterDurationSeconds)
|
|
|
|
prometheus.MustRegister(promUpdaterNotesTotal)
|
|
|
|
}
|
2015-11-13 19:11:28 +00:00
|
|
|
|
2017-01-27 01:14:44 +00:00
|
|
|
// UpdaterConfig is the configuration for the Updater service.
|
|
|
|
type UpdaterConfig struct {
|
2017-07-26 23:22:29 +00:00
|
|
|
EnabledUpdaters []string
|
|
|
|
Interval time.Duration
|
|
|
|
}
|
|
|
|
|
|
|
|
type vulnerabilityChange struct {
|
|
|
|
old *database.VulnerabilityWithAffected
|
|
|
|
new *database.VulnerabilityWithAffected
|
2017-01-27 01:14:44 +00:00
|
|
|
}
|
|
|
|
|
2019-01-07 19:20:31 +00:00
|
|
|
// RunUpdater begins a process that updates the vulnerability database at
|
|
|
|
// regular intervals.
|
|
|
|
func RunUpdater(config *UpdaterConfig, datastore database.Datastore, st *stopper.Stopper) {
|
|
|
|
defer st.End()
|
|
|
|
|
|
|
|
// Do not run the updater if there is no config or if the interval is 0.
|
|
|
|
if config == nil || config.Interval == 0 || len(config.EnabledUpdaters) == 0 {
|
|
|
|
log.Info("updater service is disabled.")
|
|
|
|
return
|
|
|
|
}
|
2015-11-13 19:11:28 +00:00
|
|
|
|
2019-01-07 19:20:31 +00:00
|
|
|
// Clean up any resources the updater left behind.
|
|
|
|
defer func() {
|
2019-01-09 21:16:17 +00:00
|
|
|
vulnmdsrc.CleanAll()
|
|
|
|
vulnsrc.CleanAll()
|
2019-01-07 19:20:31 +00:00
|
|
|
log.Info("updater service stopped")
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Create a new unique identity for tracking who owns global locks.
|
|
|
|
whoAmI := uuid.New()
|
|
|
|
log.WithField("owner", whoAmI).Info("updater service started")
|
|
|
|
|
|
|
|
sleepDuration := updaterSleepBetweenLoopsDuration
|
|
|
|
for {
|
|
|
|
// Determine if this is the first update and define the next update time.
|
|
|
|
// The next update time is (last update time + interval) or now if this is the first update.
|
|
|
|
nextUpdate := time.Now().UTC()
|
|
|
|
lastUpdate, isFirstUpdate, err := GetLastUpdateTime(datastore)
|
|
|
|
if err != nil {
|
|
|
|
log.WithError(err).Error("an error occurred while getting the last update time")
|
|
|
|
nextUpdate = nextUpdate.Add(config.Interval)
|
|
|
|
}
|
|
|
|
|
|
|
|
log.WithFields(log.Fields{
|
|
|
|
"firstUpdate": isFirstUpdate,
|
|
|
|
"nextUpdate": nextUpdate,
|
|
|
|
}).Debug("fetched last update time")
|
|
|
|
if !isFirstUpdate {
|
|
|
|
nextUpdate = lastUpdate.Add(config.Interval)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the next update timer is in the past, then try to update.
|
|
|
|
if nextUpdate.Before(time.Now().UTC()) {
|
2019-02-14 16:26:22 +00:00
|
|
|
// Attempt to get a lock on the update.
|
2019-01-07 19:20:31 +00:00
|
|
|
log.Debug("attempting to obtain update lock")
|
2019-02-28 19:17:09 +00:00
|
|
|
acquiredLock, lockExpiration := database.AcquireLock(datastore, updaterLockName, whoAmI, updaterLockDuration)
|
2019-01-07 19:20:31 +00:00
|
|
|
if lockExpiration.IsZero() {
|
|
|
|
// Any failures to acquire the lock should instantly expire.
|
|
|
|
var instantExpiration time.Duration
|
|
|
|
sleepDuration = instantExpiration
|
|
|
|
}
|
|
|
|
|
|
|
|
if acquiredLock {
|
|
|
|
sleepDuration, err = updateWhileRenewingLock(datastore, whoAmI, isFirstUpdate, st)
|
|
|
|
if err != nil {
|
|
|
|
if err == errReceivedStopSignal {
|
2019-02-14 16:26:22 +00:00
|
|
|
log.Debug("updater received stop signal")
|
2019-01-07 19:20:31 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
log.WithError(err).Debug("failed to acquired lock")
|
|
|
|
sleepDuration = timeutil.ExpBackoff(sleepDuration, config.Interval)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
sleepDuration = updaterSleepBetweenLoopsDuration
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
sleepDuration = time.Until(nextUpdate)
|
|
|
|
}
|
|
|
|
|
|
|
|
if stopped := timeutil.ApproxSleep(time.Now().Add(sleepDuration), st); stopped {
|
|
|
|
return
|
2017-06-28 21:13:27 +00:00
|
|
|
}
|
|
|
|
}
|
2019-01-07 19:20:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var errReceivedStopSignal = errors.New("stopped")
|
|
|
|
|
|
|
|
func updateWhileRenewingLock(datastore database.Datastore, whoAmI string, isFirstUpdate bool, st *stopper.Stopper) (sleepDuration time.Duration, err error) {
|
|
|
|
g, ctx := errgroup.WithContext(context.Background())
|
|
|
|
g.Go(func() error {
|
|
|
|
return update(ctx, datastore, isFirstUpdate)
|
|
|
|
})
|
|
|
|
|
|
|
|
g.Go(func() error {
|
|
|
|
var refreshDuration = updaterLockRefreshDuration
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-time.After(timeutil.FractionalDuration(0.9, refreshDuration)):
|
2019-02-28 19:17:09 +00:00
|
|
|
success, lockExpiration := database.ExtendLock(datastore, updaterLockName, whoAmI, updaterLockRefreshDuration)
|
2019-01-07 19:20:31 +00:00
|
|
|
if !success {
|
|
|
|
return errors.New("failed to extend lock")
|
|
|
|
}
|
|
|
|
refreshDuration = time.Until(lockExpiration)
|
|
|
|
case <-ctx.Done():
|
|
|
|
database.ReleaseLock(datastore, updaterLockName, whoAmI)
|
2019-01-09 21:04:51 +00:00
|
|
|
return ctx.Err()
|
2019-01-07 19:20:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
g.Go(func() error {
|
|
|
|
select {
|
|
|
|
case <-st.Chan():
|
|
|
|
return errReceivedStopSignal
|
|
|
|
case <-ctx.Done():
|
2019-01-09 21:04:51 +00:00
|
|
|
return ctx.Err()
|
2019-01-07 19:20:31 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
err = g.Wait()
|
|
|
|
return
|
2017-06-28 21:13:27 +00:00
|
|
|
}
|
|
|
|
|
2017-07-26 23:22:29 +00:00
|
|
|
// update fetches all the vulnerabilities from the registered fetchers, updates
|
|
|
|
// vulnerabilities, and updater flags, and logs notes from updaters.
|
2019-01-07 19:20:31 +00:00
|
|
|
func update(ctx context.Context, datastore database.Datastore, firstUpdate bool) error {
|
2016-01-24 03:02:34 +00:00
|
|
|
defer setUpdaterDuration(time.Now())
|
|
|
|
|
2015-11-13 19:11:28 +00:00
|
|
|
log.Info("updating vulnerabilities")
|
|
|
|
|
2015-12-01 19:58:17 +00:00
|
|
|
// Fetch updates.
|
2019-01-09 21:04:51 +00:00
|
|
|
success, vulnerabilities, flags, notes := fetchUpdates(ctx, datastore)
|
2017-07-26 23:22:29 +00:00
|
|
|
|
2019-01-10 18:10:38 +00:00
|
|
|
namespaces, vulnerabilities := deduplicate(vulnerabilities)
|
2017-07-26 23:22:29 +00:00
|
|
|
|
2018-10-08 17:12:18 +00:00
|
|
|
if err := database.PersistNamespacesAndCommit(datastore, namespaces); err != nil {
|
2017-07-26 23:22:29 +00:00
|
|
|
log.WithError(err).Error("Unable to insert namespaces")
|
2019-01-07 19:20:31 +00:00
|
|
|
return err
|
2017-07-26 23:22:29 +00:00
|
|
|
}
|
|
|
|
|
2019-01-09 21:04:51 +00:00
|
|
|
changes, err := updateVulnerabilities(ctx, datastore, vulnerabilities)
|
2017-07-26 23:22:29 +00:00
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
promUpdaterErrorsTotal.Inc()
|
|
|
|
}
|
|
|
|
}()
|
2015-12-01 19:58:17 +00:00
|
|
|
|
|
|
|
if err != nil {
|
2017-07-26 23:22:29 +00:00
|
|
|
log.WithError(err).Error("Unable to update vulnerabilities")
|
2019-01-07 19:20:31 +00:00
|
|
|
return err
|
2015-12-01 19:58:17 +00:00
|
|
|
}
|
|
|
|
|
2017-07-26 23:22:29 +00:00
|
|
|
if !firstUpdate {
|
|
|
|
err = createVulnerabilityNotifications(datastore, changes)
|
|
|
|
if err != nil {
|
|
|
|
log.WithError(err).Error("Unable to create notifications")
|
2019-01-07 19:20:31 +00:00
|
|
|
return err
|
2017-07-26 23:22:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err = updateUpdaterFlags(datastore, flags)
|
|
|
|
if err != nil {
|
|
|
|
log.WithError(err).Error("Unable to update updater flags")
|
2019-01-07 19:20:31 +00:00
|
|
|
return err
|
2015-12-01 19:58:17 +00:00
|
|
|
}
|
2016-01-20 00:17:08 +00:00
|
|
|
|
2016-01-24 03:02:34 +00:00
|
|
|
for _, note := range notes {
|
2017-05-04 17:21:25 +00:00
|
|
|
log.WithField("note", note).Warning("fetcher note")
|
2016-01-24 03:02:34 +00:00
|
|
|
}
|
|
|
|
promUpdaterNotesTotal.Set(float64(len(notes)))
|
2015-12-01 19:58:17 +00:00
|
|
|
|
2017-07-26 23:22:29 +00:00
|
|
|
if success {
|
|
|
|
err = setLastUpdateTime(datastore)
|
|
|
|
if err != nil {
|
|
|
|
log.WithError(err).Error("Unable to set last update time")
|
2019-01-07 19:20:31 +00:00
|
|
|
return err
|
2017-07-26 23:22:29 +00:00
|
|
|
}
|
2015-12-01 19:58:17 +00:00
|
|
|
}
|
2016-01-24 03:02:34 +00:00
|
|
|
|
2015-12-01 19:58:17 +00:00
|
|
|
log.Info("update finished")
|
2019-01-07 19:20:31 +00:00
|
|
|
return nil
|
2015-12-01 19:58:17 +00:00
|
|
|
}
|
|
|
|
|
2019-01-10 18:10:38 +00:00
|
|
|
func deduplicate(vulns []database.VulnerabilityWithAffected) ([]database.Namespace, []database.VulnerabilityWithAffected) {
|
|
|
|
// do vulnerability namespacing again to merge potentially duplicated
|
|
|
|
// vulnerabilities from each updater.
|
|
|
|
vulnerabilities := doVulnerabilitiesNamespacing(vulns)
|
|
|
|
|
|
|
|
nsMap := map[database.Namespace]struct{}{}
|
|
|
|
for _, vuln := range vulnerabilities {
|
|
|
|
nsMap[vuln.Namespace] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
namespaces := make([]database.Namespace, 0, len(nsMap))
|
|
|
|
for ns := range nsMap {
|
|
|
|
namespaces = append(namespaces, ns)
|
|
|
|
}
|
|
|
|
|
|
|
|
return namespaces, vulnerabilities
|
|
|
|
}
|
|
|
|
|
2016-01-24 03:02:34 +00:00
|
|
|
func setUpdaterDuration(start time.Time) {
|
|
|
|
promUpdaterDurationSeconds.Set(time.Since(start).Seconds())
|
|
|
|
}
|
|
|
|
|
2019-01-09 21:04:51 +00:00
|
|
|
// fetchUpdates asynchronously runs all of the enabled Updaters, aggregates
|
|
|
|
// their results, and appends metadata to the vulnerabilities found.
|
2019-02-14 16:26:22 +00:00
|
|
|
func fetchUpdates(ctx context.Context, datastore database.Datastore) (success bool, vulns []database.VulnerabilityWithAffected, flags map[string]string, notes []string) {
|
2019-01-09 21:04:51 +00:00
|
|
|
flags = make(map[string]string)
|
2016-01-20 00:17:08 +00:00
|
|
|
|
2016-02-01 23:41:40 +00:00
|
|
|
log.Info("fetching vulnerability updates")
|
2019-01-09 21:04:51 +00:00
|
|
|
|
|
|
|
var mu sync.RWMutex
|
|
|
|
g, ctx := errgroup.WithContext(ctx)
|
|
|
|
for updaterName, updater := range vulnsrc.Updaters() {
|
|
|
|
// Shadow the loop variables to avoid closing over the wrong thing.
|
|
|
|
// See: https://golang.org/doc/faq#closures_and_goroutines
|
|
|
|
updaterName := updaterName
|
|
|
|
updater := updater
|
|
|
|
|
|
|
|
g.Go(func() error {
|
|
|
|
if !updaterEnabled(updaterName) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(jzelinskie): add context to Update()
|
|
|
|
response, err := updater.Update(datastore)
|
2015-11-13 19:11:28 +00:00
|
|
|
if err != nil {
|
2016-01-24 03:02:34 +00:00
|
|
|
promUpdaterErrorsTotal.Inc()
|
2019-01-09 21:04:51 +00:00
|
|
|
log.WithError(err).WithField("updater", updaterName).Error("an error occurred when fetching an update")
|
|
|
|
return err
|
2015-11-13 19:11:28 +00:00
|
|
|
}
|
|
|
|
|
2019-01-09 21:04:51 +00:00
|
|
|
namespacedVulns := doVulnerabilitiesNamespacing(response.Vulnerabilities)
|
2015-11-13 19:11:28 +00:00
|
|
|
|
2019-01-09 21:04:51 +00:00
|
|
|
mu.Lock()
|
|
|
|
vulns = append(vulns, namespacedVulns...)
|
|
|
|
notes = append(notes, response.Notes...)
|
|
|
|
if response.FlagName != "" && response.FlagValue != "" {
|
|
|
|
flags[response.FlagName] = response.FlagValue
|
2015-12-01 19:58:17 +00:00
|
|
|
}
|
2019-01-09 21:04:51 +00:00
|
|
|
mu.Unlock()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := g.Wait(); err == nil {
|
2019-02-14 16:26:22 +00:00
|
|
|
success = true
|
2015-11-13 19:11:28 +00:00
|
|
|
}
|
|
|
|
|
2019-01-09 21:04:51 +00:00
|
|
|
vulns = addMetadata(ctx, datastore, vulns)
|
|
|
|
|
|
|
|
return
|
2016-02-01 23:41:40 +00:00
|
|
|
}
|
|
|
|
|
2019-01-09 21:07:09 +00:00
|
|
|
// fetch get data from the registered fetchers, in parallel.
|
|
|
|
func fetch(datastore database.Datastore) (bool, []database.VulnerabilityWithAffected, map[string]string, []string) {
|
|
|
|
var vulnerabilities []database.VulnerabilityWithAffected
|
|
|
|
var notes []string
|
|
|
|
status := true
|
|
|
|
flags := make(map[string]string)
|
|
|
|
|
|
|
|
// Fetch updates in parallel.
|
|
|
|
log.Info("fetching vulnerability updates")
|
|
|
|
var responseC = make(chan *vulnsrc.UpdateResponse, 0)
|
|
|
|
numUpdaters := 0
|
|
|
|
for n, u := range vulnsrc.Updaters() {
|
|
|
|
if !updaterEnabled(n) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
numUpdaters++
|
|
|
|
go func(name string, u vulnsrc.Updater) {
|
|
|
|
response, err := u.Update(datastore)
|
|
|
|
if err != nil {
|
|
|
|
promUpdaterErrorsTotal.Inc()
|
|
|
|
log.WithError(err).WithField("updater name", name).Error("an error occurred when fetching update")
|
|
|
|
status = false
|
|
|
|
responseC <- nil
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
responseC <- &response
|
|
|
|
log.WithField("updater name", name).Info("finished fetching")
|
|
|
|
}(n, u)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Collect results of updates.
|
|
|
|
for i := 0; i < numUpdaters; i++ {
|
|
|
|
resp := <-responseC
|
|
|
|
if resp != nil {
|
|
|
|
vulnerabilities = append(vulnerabilities, doVulnerabilitiesNamespacing(resp.Vulnerabilities)...)
|
|
|
|
notes = append(notes, resp.Notes...)
|
|
|
|
if resp.FlagName != "" && resp.FlagValue != "" {
|
|
|
|
flags[resp.FlagName] = resp.FlagValue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
close(responseC)
|
|
|
|
return status, addMetadata(context.TODO(), datastore, vulnerabilities), flags, notes
|
|
|
|
}
|
|
|
|
|
2019-01-09 21:04:51 +00:00
|
|
|
// addMetadata asynchronously updates a list of vulnerabilities with metadata
|
|
|
|
// from the vulnerability metadata sources.
|
|
|
|
func addMetadata(ctx context.Context, datastore database.Datastore, vulnerabilities []database.VulnerabilityWithAffected) []database.VulnerabilityWithAffected {
|
2017-07-26 23:22:29 +00:00
|
|
|
if len(vulnmdsrc.Appenders()) == 0 || len(vulnerabilities) == 0 {
|
2016-02-01 23:41:40 +00:00
|
|
|
return vulnerabilities
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Info("adding metadata to vulnerabilities")
|
|
|
|
|
2017-01-04 02:44:32 +00:00
|
|
|
// Add a mutex to each vulnerability to ensure that only one appender at a
|
|
|
|
// time can modify the vulnerability's Metadata map.
|
|
|
|
lockableVulnerabilities := make([]*lockableVulnerability, 0, len(vulnerabilities))
|
2016-02-01 23:41:40 +00:00
|
|
|
for i := 0; i < len(vulnerabilities); i++ {
|
2017-01-04 02:44:32 +00:00
|
|
|
lockableVulnerabilities = append(lockableVulnerabilities, &lockableVulnerability{
|
2017-07-26 23:22:29 +00:00
|
|
|
VulnerabilityWithAffected: &vulnerabilities[i],
|
2016-02-01 23:41:40 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-01-09 21:04:51 +00:00
|
|
|
g, ctx := errgroup.WithContext(ctx)
|
|
|
|
for name, metadataAppender := range vulnmdsrc.Appenders() {
|
|
|
|
// Shadow the loop variables to avoid closing over the wrong thing.
|
|
|
|
// See: https://golang.org/doc/faq#closures_and_goroutines
|
|
|
|
name := name
|
|
|
|
metadataAppender := metadataAppender
|
2016-02-01 23:41:40 +00:00
|
|
|
|
2019-01-09 21:04:51 +00:00
|
|
|
g.Go(func() error {
|
|
|
|
// TODO(jzelinskie): add ctx to BuildCache()
|
|
|
|
if err := metadataAppender.BuildCache(datastore); err != nil {
|
2016-02-01 23:41:40 +00:00
|
|
|
promUpdaterErrorsTotal.Inc()
|
2019-01-09 21:04:51 +00:00
|
|
|
log.WithError(err).WithField("appender", name).Error("an error occurred when fetching vulnerability metadata")
|
|
|
|
return err
|
2016-02-01 23:41:40 +00:00
|
|
|
}
|
2019-01-09 21:04:51 +00:00
|
|
|
defer metadataAppender.PurgeCache()
|
|
|
|
|
|
|
|
for i, vulnerability := range lockableVulnerabilities {
|
|
|
|
metadataAppender.Append(vulnerability.Name, vulnerability.appendFunc)
|
2016-02-01 23:41:40 +00:00
|
|
|
|
2019-01-09 21:04:51 +00:00
|
|
|
if i%10 == 0 {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return nil
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
2016-02-01 23:41:40 +00:00
|
|
|
}
|
|
|
|
|
2019-01-09 21:04:51 +00:00
|
|
|
return nil
|
|
|
|
})
|
2016-02-01 23:41:40 +00:00
|
|
|
}
|
|
|
|
|
2019-01-09 21:04:51 +00:00
|
|
|
g.Wait()
|
2016-02-01 23:41:40 +00:00
|
|
|
|
|
|
|
return vulnerabilities
|
2015-11-13 19:11:28 +00:00
|
|
|
}
|
|
|
|
|
2017-07-26 23:22:29 +00:00
|
|
|
// GetLastUpdateTime retrieves the latest successful time of update and whether
|
|
|
|
// or not it's the first update.
|
|
|
|
func GetLastUpdateTime(datastore database.Datastore) (time.Time, bool, error) {
|
|
|
|
tx, err := datastore.Begin()
|
|
|
|
if err != nil {
|
|
|
|
return time.Time{}, false, err
|
|
|
|
}
|
|
|
|
defer tx.Rollback()
|
|
|
|
|
|
|
|
lastUpdateTSS, ok, err := tx.FindKeyValue(updaterLastFlagName)
|
2016-02-04 22:10:19 +00:00
|
|
|
if err != nil {
|
|
|
|
return time.Time{}, false, err
|
|
|
|
}
|
|
|
|
|
2017-07-26 23:22:29 +00:00
|
|
|
if !ok {
|
2016-02-04 22:10:19 +00:00
|
|
|
// This is the first update.
|
|
|
|
return time.Time{}, true, nil
|
2015-12-01 19:58:17 +00:00
|
|
|
}
|
2016-02-04 22:10:19 +00:00
|
|
|
|
|
|
|
lastUpdateTS, err := strconv.ParseInt(lastUpdateTSS, 10, 64)
|
|
|
|
if err != nil {
|
|
|
|
return time.Time{}, false, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return time.Unix(lastUpdateTS, 0).UTC(), false, nil
|
2015-12-01 19:58:17 +00:00
|
|
|
}
|
2016-01-25 19:50:48 +00:00
|
|
|
|
2017-01-04 02:44:32 +00:00
|
|
|
type lockableVulnerability struct {
|
2017-07-26 23:22:29 +00:00
|
|
|
*database.VulnerabilityWithAffected
|
2017-01-04 02:44:32 +00:00
|
|
|
sync.Mutex
|
|
|
|
}
|
|
|
|
|
2017-01-19 18:42:37 +00:00
|
|
|
func (lv *lockableVulnerability) appendFunc(metadataKey string, metadata interface{}, severity database.Severity) {
|
2017-01-04 02:44:32 +00:00
|
|
|
lv.Lock()
|
|
|
|
defer lv.Unlock()
|
|
|
|
|
|
|
|
// If necessary, initialize the metadata map for the vulnerability.
|
|
|
|
if lv.Metadata == nil {
|
|
|
|
lv.Metadata = make(map[string]interface{})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Append the metadata.
|
|
|
|
lv.Metadata[metadataKey] = metadata
|
|
|
|
|
|
|
|
// If necessary, provide a severity for the vulnerability.
|
2017-01-19 18:42:37 +00:00
|
|
|
if lv.Severity == database.UnknownSeverity {
|
2017-01-04 02:44:32 +00:00
|
|
|
lv.Severity = severity
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-23 07:05:52 +00:00
|
|
|
// doVulnerabilitiesNamespacing takes Vulnerabilities that don't have a
|
|
|
|
// Namespace and split them into multiple vulnerabilities that have a Namespace
|
2017-07-26 23:22:29 +00:00
|
|
|
// and only contains the Affected Features corresponding to their
|
2017-02-23 07:05:52 +00:00
|
|
|
// Namespace.
|
2016-01-25 19:50:48 +00:00
|
|
|
//
|
2017-02-23 07:05:52 +00:00
|
|
|
// It helps simplifying the fetchers that share the same metadata about a
|
|
|
|
// Vulnerability regardless of their actual namespace (ie. same vulnerability
|
|
|
|
// information for every version of a distro).
|
2017-07-26 23:22:29 +00:00
|
|
|
//
|
|
|
|
// It also validates the vulnerabilities fetched from updaters. If any
|
|
|
|
// vulnerability is mal-formated, the updater process will continue but will log
|
|
|
|
// warning.
|
|
|
|
func doVulnerabilitiesNamespacing(vulnerabilities []database.VulnerabilityWithAffected) []database.VulnerabilityWithAffected {
|
|
|
|
vulnerabilitiesMap := make(map[string]*database.VulnerabilityWithAffected)
|
2016-01-25 19:50:48 +00:00
|
|
|
|
|
|
|
for _, v := range vulnerabilities {
|
2017-07-26 23:22:29 +00:00
|
|
|
namespacedFeatures := v.Affected
|
|
|
|
v.Affected = []database.AffectedFeature{}
|
|
|
|
|
|
|
|
for _, fv := range namespacedFeatures {
|
|
|
|
// validate vulnerabilities, throw out the invalid vulnerabilities
|
2019-02-19 21:36:31 +00:00
|
|
|
if fv.FeatureType == "" || fv.AffectedVersion == "" || fv.FeatureName == "" || fv.Namespace.Name == "" || fv.Namespace.VersionFormat == "" {
|
2017-07-26 23:22:29 +00:00
|
|
|
log.WithFields(log.Fields{
|
|
|
|
"Name": fv.FeatureName,
|
|
|
|
"Affected Version": fv.AffectedVersion,
|
|
|
|
"Namespace": fv.Namespace.Name + ":" + fv.Namespace.VersionFormat,
|
|
|
|
}).Warn("Mal-formated affected feature (skipped)")
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
index := fv.Namespace.Name + ":" + v.Name
|
2016-01-25 19:50:48 +00:00
|
|
|
|
|
|
|
if vulnerability, ok := vulnerabilitiesMap[index]; !ok {
|
|
|
|
newVulnerability := v
|
2017-07-26 23:22:29 +00:00
|
|
|
newVulnerability.Namespace = fv.Namespace
|
|
|
|
newVulnerability.Affected = []database.AffectedFeature{fv}
|
2016-01-25 19:50:48 +00:00
|
|
|
|
|
|
|
vulnerabilitiesMap[index] = &newVulnerability
|
|
|
|
} else {
|
2017-07-26 23:22:29 +00:00
|
|
|
vulnerability.Affected = append(vulnerability.Affected, fv)
|
2016-01-25 19:50:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Convert map into a slice.
|
2017-07-26 23:22:29 +00:00
|
|
|
var response []database.VulnerabilityWithAffected
|
|
|
|
for _, v := range vulnerabilitiesMap {
|
|
|
|
// throw out invalid vulnerabilities.
|
|
|
|
if v.Name == "" || !v.Severity.Valid() || v.Namespace.Name == "" || v.Namespace.VersionFormat == "" {
|
|
|
|
log.WithFields(log.Fields{
|
|
|
|
"Name": v.Name,
|
|
|
|
"Severity": v.Severity,
|
|
|
|
"Namespace": v.Namespace.Name + ":" + v.Namespace.VersionFormat,
|
|
|
|
}).Warning("Vulnerability is mal-formatted")
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
response = append(response, *v)
|
2016-01-25 19:50:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return response
|
|
|
|
}
|
2017-07-26 23:22:29 +00:00
|
|
|
|
|
|
|
// updateUpdaterFlags updates the flags specified by updaters, every transaction
|
|
|
|
// is independent of each other.
|
|
|
|
func updateUpdaterFlags(datastore database.Datastore, flags map[string]string) error {
|
|
|
|
for key, value := range flags {
|
|
|
|
tx, err := datastore.Begin()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer tx.Rollback()
|
|
|
|
|
|
|
|
err = tx.UpdateKeyValue(key, value)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err = tx.Commit(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// setLastUpdateTime records the last successful date time in database.
|
|
|
|
func setLastUpdateTime(datastore database.Datastore) error {
|
|
|
|
tx, err := datastore.Begin()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer tx.Rollback()
|
|
|
|
|
|
|
|
err = tx.UpdateKeyValue(updaterLastFlagName, strconv.FormatInt(time.Now().UTC().Unix(), 10))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return tx.Commit()
|
|
|
|
}
|
|
|
|
|
|
|
|
// isVulnerabilityChange compares two vulnerabilities by their severity and
|
|
|
|
// affected features, and return true if they are different.
|
|
|
|
func isVulnerabilityChanged(a *database.VulnerabilityWithAffected, b *database.VulnerabilityWithAffected) bool {
|
|
|
|
if a == b {
|
|
|
|
return false
|
|
|
|
} else if a != nil && b != nil && a.Severity == b.Severity && len(a.Affected) == len(b.Affected) {
|
|
|
|
checked := map[string]bool{}
|
|
|
|
for _, affected := range a.Affected {
|
|
|
|
checked[affected.Namespace.Name+":"+affected.FeatureName] = false
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, affected := range b.Affected {
|
|
|
|
key := affected.Namespace.Name + ":" + affected.FeatureName
|
|
|
|
if visited, ok := checked[key]; !ok || visited {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
checked[key] = true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// findVulnerabilityChanges finds vulnerability changes from old
|
|
|
|
// vulnerabilities to new vulnerabilities.
|
|
|
|
// old and new vulnerabilities should be unique.
|
|
|
|
func findVulnerabilityChanges(old []database.VulnerabilityWithAffected, new []database.VulnerabilityWithAffected) ([]vulnerabilityChange, error) {
|
|
|
|
changes := map[database.VulnerabilityID]vulnerabilityChange{}
|
|
|
|
for i, vuln := range old {
|
|
|
|
key := database.VulnerabilityID{
|
|
|
|
Name: vuln.Name,
|
|
|
|
Namespace: vuln.Namespace.Name,
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, ok := changes[key]; ok {
|
|
|
|
return nil, fmt.Errorf("duplicated old vulnerability")
|
|
|
|
}
|
|
|
|
changes[key] = vulnerabilityChange{old: &old[i]}
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, vuln := range new {
|
|
|
|
key := database.VulnerabilityID{
|
|
|
|
Name: vuln.Name,
|
|
|
|
Namespace: vuln.Namespace.Name,
|
|
|
|
}
|
|
|
|
|
|
|
|
if change, ok := changes[key]; ok {
|
|
|
|
if isVulnerabilityChanged(change.old, &vuln) {
|
|
|
|
change.new = &new[i]
|
|
|
|
changes[key] = change
|
|
|
|
} else {
|
|
|
|
delete(changes, key)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
changes[key] = vulnerabilityChange{new: &new[i]}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
vulnChange := make([]vulnerabilityChange, 0, len(changes))
|
|
|
|
for _, change := range changes {
|
|
|
|
vulnChange = append(vulnChange, change)
|
|
|
|
}
|
|
|
|
return vulnChange, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// createVulnerabilityNotifications makes notifications out of vulnerability
|
|
|
|
// changes and insert them into database.
|
|
|
|
func createVulnerabilityNotifications(datastore database.Datastore, changes []vulnerabilityChange) error {
|
|
|
|
log.WithField("count", len(changes)).Debug("creating vulnerability notifications")
|
|
|
|
if len(changes) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
tx, err := datastore.Begin()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer tx.Rollback()
|
|
|
|
|
|
|
|
notifications := make([]database.VulnerabilityNotification, 0, len(changes))
|
|
|
|
for _, change := range changes {
|
|
|
|
var oldVuln, newVuln *database.Vulnerability
|
|
|
|
if change.old != nil {
|
|
|
|
oldVuln = &change.old.Vulnerability
|
|
|
|
}
|
|
|
|
|
|
|
|
if change.new != nil {
|
|
|
|
newVuln = &change.new.Vulnerability
|
|
|
|
}
|
|
|
|
|
|
|
|
notifications = append(notifications, database.VulnerabilityNotification{
|
|
|
|
NotificationHook: database.NotificationHook{
|
|
|
|
Name: uuid.New(),
|
|
|
|
Created: time.Now(),
|
|
|
|
},
|
|
|
|
Old: oldVuln,
|
|
|
|
New: newVuln,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := tx.InsertVulnerabilityNotifications(notifications); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return tx.Commit()
|
|
|
|
}
|
|
|
|
|
|
|
|
// updateVulnerabilities upserts unique vulnerabilities into the database and
|
|
|
|
// computes vulnerability changes.
|
2019-01-09 21:04:51 +00:00
|
|
|
func updateVulnerabilities(ctx context.Context, datastore database.Datastore, vulnerabilities []database.VulnerabilityWithAffected) ([]vulnerabilityChange, error) {
|
2017-07-26 23:22:29 +00:00
|
|
|
log.WithField("count", len(vulnerabilities)).Debug("updating vulnerabilities")
|
|
|
|
if len(vulnerabilities) == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
ids := make([]database.VulnerabilityID, 0, len(vulnerabilities))
|
|
|
|
for _, vuln := range vulnerabilities {
|
|
|
|
ids = append(ids, database.VulnerabilityID{
|
|
|
|
Name: vuln.Name,
|
|
|
|
Namespace: vuln.Namespace.Name,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
tx, err := datastore.Begin()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer tx.Rollback()
|
2019-01-09 21:04:51 +00:00
|
|
|
|
2017-07-26 23:22:29 +00:00
|
|
|
oldVulnNullable, err := tx.FindVulnerabilities(ids)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-01-09 21:04:51 +00:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return nil, ctx.Err()
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2017-07-26 23:22:29 +00:00
|
|
|
oldVuln := []database.VulnerabilityWithAffected{}
|
|
|
|
for _, vuln := range oldVulnNullable {
|
|
|
|
if vuln.Valid {
|
|
|
|
oldVuln = append(oldVuln, vuln.VulnerabilityWithAffected)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
changes, err := findVulnerabilityChanges(oldVuln, vulnerabilities)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-01-09 21:04:51 +00:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return nil, ctx.Err()
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2017-07-26 23:22:29 +00:00
|
|
|
toRemove := []database.VulnerabilityID{}
|
|
|
|
toAdd := []database.VulnerabilityWithAffected{}
|
|
|
|
for _, change := range changes {
|
|
|
|
if change.old != nil {
|
|
|
|
toRemove = append(toRemove, database.VulnerabilityID{
|
|
|
|
Name: change.old.Name,
|
|
|
|
Namespace: change.old.Namespace.Name,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
if change.new != nil {
|
|
|
|
toAdd = append(toAdd, *change.new)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
log.WithField("count", len(toRemove)).Debug("marking vulnerabilities as outdated")
|
|
|
|
if err := tx.DeleteVulnerabilities(toRemove); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
log.WithField("count", len(toAdd)).Debug("inserting new vulnerabilities")
|
|
|
|
if err := tx.InsertVulnerabilities(toAdd); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := tx.Commit(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return changes, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func updaterEnabled(updaterName string) bool {
|
|
|
|
for _, u := range EnabledUpdaters {
|
|
|
|
if u == updaterName {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|