2015-11-13 19:11:28 +00:00
|
|
|
// Copyright 2015 clair authors
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
// Package updater updates the vulnerability database periodically using
|
|
|
|
// the registered vulnerability fetchers.
|
|
|
|
package updater
|
|
|
|
|
|
|
|
import (
|
|
|
|
"math/rand"
|
|
|
|
"strconv"
|
2016-02-01 23:41:40 +00:00
|
|
|
"sync"
|
2015-11-13 19:11:28 +00:00
|
|
|
"time"
|
|
|
|
|
2015-12-07 21:38:50 +00:00
|
|
|
"github.com/coreos/clair/config"
|
2015-11-13 19:11:28 +00:00
|
|
|
"github.com/coreos/clair/database"
|
|
|
|
"github.com/coreos/clair/utils"
|
2015-11-20 20:02:47 +00:00
|
|
|
"github.com/coreos/pkg/capnslog"
|
2015-11-13 19:11:28 +00:00
|
|
|
"github.com/pborman/uuid"
|
2016-01-24 03:02:34 +00:00
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
2015-11-13 19:11:28 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2016-01-20 00:17:08 +00:00
|
|
|
flagName = "updater/last"
|
|
|
|
notesFlagName = "updater/notes"
|
|
|
|
|
|
|
|
lockName = "updater"
|
2015-11-13 19:11:28 +00:00
|
|
|
lockDuration = refreshLockDuration + time.Minute*2
|
2016-01-20 00:17:08 +00:00
|
|
|
refreshLockDuration = time.Minute * 8
|
2015-11-13 19:11:28 +00:00
|
|
|
)
|
|
|
|
|
2016-01-24 03:02:34 +00:00
|
|
|
var (
|
|
|
|
log = capnslog.NewPackageLogger("github.com/coreos/clair", "updater")
|
|
|
|
|
|
|
|
promUpdaterErrorsTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
Name: "clair_updater_errors_total",
|
|
|
|
Help: "Numbers of errors that the updater generated.",
|
|
|
|
})
|
|
|
|
|
|
|
|
promUpdaterDurationSeconds = prometheus.NewGauge(prometheus.GaugeOpts{
|
|
|
|
Name: "clair_updater_duration_seconds",
|
|
|
|
Help: "Time it takes to update the vulnerability database.",
|
|
|
|
})
|
|
|
|
|
|
|
|
promUpdaterNotesTotal = prometheus.NewGauge(prometheus.GaugeOpts{
|
|
|
|
Name: "clair_updater_notes_total",
|
|
|
|
Help: "Number of notes that the vulnerability fetchers generated.",
|
|
|
|
})
|
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
prometheus.MustRegister(promUpdaterErrorsTotal)
|
|
|
|
prometheus.MustRegister(promUpdaterDurationSeconds)
|
|
|
|
prometheus.MustRegister(promUpdaterNotesTotal)
|
|
|
|
}
|
2015-11-13 19:11:28 +00:00
|
|
|
|
2015-12-07 21:38:50 +00:00
|
|
|
// Run updates the vulnerability database at regular intervals.
|
2016-01-20 00:17:08 +00:00
|
|
|
func Run(config *config.UpdaterConfig, datastore database.Datastore, st *utils.Stopper) {
|
2015-11-13 19:11:28 +00:00
|
|
|
defer st.End()
|
|
|
|
|
2015-12-07 21:38:50 +00:00
|
|
|
// Do not run the updater if there is no config or if the interval is 0.
|
|
|
|
if config == nil || config.Interval == 0 {
|
2015-11-13 19:11:28 +00:00
|
|
|
log.Infof("updater service is disabled.")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
whoAmI := uuid.New()
|
|
|
|
log.Infof("updater service started. lock identifier: %s", whoAmI)
|
|
|
|
|
|
|
|
for {
|
2015-12-14 05:48:01 +00:00
|
|
|
var stop bool
|
2016-02-04 22:10:19 +00:00
|
|
|
|
|
|
|
// Determine if this is the first update and define the next update time.
|
|
|
|
// The next update time is (last update time + interval) or now if this is the first update.
|
|
|
|
nextUpdate := time.Now().UTC()
|
|
|
|
lastUpdate, firstUpdate, err := getLastUpdate(datastore)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("an error occured while getting the last update time")
|
|
|
|
nextUpdate = nextUpdate.Add(config.Interval)
|
|
|
|
} else if firstUpdate == false {
|
2015-12-07 21:38:50 +00:00
|
|
|
nextUpdate = lastUpdate.Add(config.Interval)
|
2015-11-13 19:11:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// If the next update timer is in the past, then try to update.
|
|
|
|
if nextUpdate.Before(time.Now().UTC()) {
|
|
|
|
// Attempt to get a lock on the the update.
|
|
|
|
log.Debug("attempting to obtain update lock")
|
2016-01-20 00:17:08 +00:00
|
|
|
hasLock, hasLockUntil := datastore.Lock(lockName, whoAmI, lockDuration, false)
|
2015-11-13 19:11:28 +00:00
|
|
|
if hasLock {
|
|
|
|
// Launch update in a new go routine.
|
|
|
|
doneC := make(chan bool, 1)
|
|
|
|
go func() {
|
2016-02-04 22:10:19 +00:00
|
|
|
Update(datastore, firstUpdate)
|
2015-11-13 19:11:28 +00:00
|
|
|
doneC <- true
|
|
|
|
}()
|
|
|
|
|
2015-12-14 05:48:01 +00:00
|
|
|
for done := false; !done && !stop; {
|
2015-11-13 19:11:28 +00:00
|
|
|
select {
|
|
|
|
case <-doneC:
|
|
|
|
done = true
|
|
|
|
case <-time.After(refreshLockDuration):
|
2015-11-20 20:02:47 +00:00
|
|
|
// Refresh the lock until the update is done.
|
2016-01-20 00:17:08 +00:00
|
|
|
datastore.Lock(lockName, whoAmI, lockDuration, true)
|
2015-12-14 05:48:01 +00:00
|
|
|
case <-st.Chan():
|
|
|
|
stop = true
|
2015-11-13 19:11:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unlock the update.
|
2016-01-20 00:17:08 +00:00
|
|
|
datastore.Unlock(lockName, whoAmI)
|
2015-12-01 19:58:17 +00:00
|
|
|
|
2015-12-14 05:48:01 +00:00
|
|
|
if stop {
|
|
|
|
break
|
|
|
|
}
|
2015-12-01 19:58:17 +00:00
|
|
|
continue
|
2015-11-13 19:11:28 +00:00
|
|
|
} else {
|
2016-01-20 00:17:08 +00:00
|
|
|
lockOwner, lockExpiration, err := datastore.FindLock(lockName)
|
2015-11-13 19:11:28 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Debug("update lock is already taken")
|
|
|
|
nextUpdate = hasLockUntil
|
|
|
|
} else {
|
|
|
|
log.Debugf("update lock is already taken by %s until %v", lockOwner, lockExpiration)
|
|
|
|
nextUpdate = lockExpiration
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Sleep, but remain stoppable until approximately the next update time.
|
|
|
|
now := time.Now().UTC()
|
|
|
|
waitUntil := nextUpdate.Add(time.Duration(rand.ExpFloat64()/0.5) * time.Second)
|
|
|
|
log.Debugf("next update attempt scheduled for %v.", waitUntil)
|
|
|
|
if !waitUntil.Before(now) {
|
|
|
|
if !st.Sleep(waitUntil.Sub(time.Now())) {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-29 16:22:54 +00:00
|
|
|
// Clean resources.
|
2016-02-01 23:41:40 +00:00
|
|
|
for _, metadataFetcher := range metadataFetchers {
|
|
|
|
metadataFetcher.Clean()
|
|
|
|
}
|
2016-01-29 16:22:54 +00:00
|
|
|
for _, fetcher := range fetchers {
|
|
|
|
fetcher.Clean()
|
|
|
|
}
|
|
|
|
|
2015-11-13 19:11:28 +00:00
|
|
|
log.Info("updater service stopped")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update fetches all the vulnerabilities from the registered fetchers, upserts
|
|
|
|
// them into the database and then sends notifications.
|
2016-02-04 22:10:19 +00:00
|
|
|
func Update(datastore database.Datastore, firstUpdate bool) {
|
2016-01-24 03:02:34 +00:00
|
|
|
defer setUpdaterDuration(time.Now())
|
|
|
|
|
2015-11-13 19:11:28 +00:00
|
|
|
log.Info("updating vulnerabilities")
|
|
|
|
|
2015-12-01 19:58:17 +00:00
|
|
|
// Fetch updates.
|
2016-01-20 00:17:08 +00:00
|
|
|
status, vulnerabilities, flags, notes := fetch(datastore)
|
2015-12-01 19:58:17 +00:00
|
|
|
|
|
|
|
// Insert vulnerabilities.
|
2016-02-01 23:41:40 +00:00
|
|
|
log.Tracef("inserting %d vulnerabilities for update", len(vulnerabilities))
|
2016-02-04 22:10:19 +00:00
|
|
|
err := datastore.InsertVulnerabilities(vulnerabilities, !firstUpdate)
|
2015-12-01 19:58:17 +00:00
|
|
|
if err != nil {
|
2016-01-24 03:02:34 +00:00
|
|
|
promUpdaterErrorsTotal.Inc()
|
2015-12-01 19:58:17 +00:00
|
|
|
log.Errorf("an error occured when inserting vulnerabilities for update: %s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
vulnerabilities = nil
|
|
|
|
|
2016-01-24 03:02:34 +00:00
|
|
|
// Update flags.
|
2015-12-01 19:58:17 +00:00
|
|
|
for flagName, flagValue := range flags {
|
2016-01-20 00:17:08 +00:00
|
|
|
datastore.InsertKeyValue(flagName, flagValue)
|
2015-12-01 19:58:17 +00:00
|
|
|
}
|
2016-01-20 00:17:08 +00:00
|
|
|
|
2016-01-24 03:02:34 +00:00
|
|
|
// Log notes.
|
|
|
|
for _, note := range notes {
|
|
|
|
log.Warningf("fetcher note: %s", note)
|
|
|
|
}
|
|
|
|
promUpdaterNotesTotal.Set(float64(len(notes)))
|
2015-12-01 19:58:17 +00:00
|
|
|
|
|
|
|
// Update last successful update if every fetchers worked properly.
|
|
|
|
if status {
|
2016-01-20 00:17:08 +00:00
|
|
|
datastore.InsertKeyValue(flagName, strconv.FormatInt(time.Now().UTC().Unix(), 10))
|
2015-12-01 19:58:17 +00:00
|
|
|
}
|
2016-01-24 03:02:34 +00:00
|
|
|
|
2015-12-01 19:58:17 +00:00
|
|
|
log.Info("update finished")
|
|
|
|
}
|
|
|
|
|
2016-01-24 03:02:34 +00:00
|
|
|
func setUpdaterDuration(start time.Time) {
|
|
|
|
promUpdaterDurationSeconds.Set(time.Since(start).Seconds())
|
|
|
|
}
|
|
|
|
|
2015-12-01 19:58:17 +00:00
|
|
|
// fetch get data from the registered fetchers, in parallel.
|
2016-01-20 00:17:08 +00:00
|
|
|
func fetch(datastore database.Datastore) (bool, []database.Vulnerability, map[string]string, []string) {
|
|
|
|
var vulnerabilities []database.Vulnerability
|
|
|
|
var notes []string
|
|
|
|
status := true
|
|
|
|
flags := make(map[string]string)
|
|
|
|
|
2015-11-13 19:11:28 +00:00
|
|
|
// Fetch updates in parallel.
|
2016-02-01 23:41:40 +00:00
|
|
|
log.Info("fetching vulnerability updates")
|
2015-11-13 19:11:28 +00:00
|
|
|
var responseC = make(chan *FetcherResponse, 0)
|
|
|
|
for n, f := range fetchers {
|
|
|
|
go func(name string, fetcher Fetcher) {
|
2016-01-20 00:17:08 +00:00
|
|
|
response, err := fetcher.FetchUpdate(datastore)
|
2015-11-13 19:11:28 +00:00
|
|
|
if err != nil {
|
2016-01-24 03:02:34 +00:00
|
|
|
promUpdaterErrorsTotal.Inc()
|
2015-11-13 19:11:28 +00:00
|
|
|
log.Errorf("an error occured when fetching update '%s': %s.", name, err)
|
|
|
|
status = false
|
|
|
|
responseC <- nil
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
responseC <- &response
|
|
|
|
}(n, f)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Collect results of updates.
|
2015-12-01 19:58:17 +00:00
|
|
|
for i := 0; i < len(fetchers); i++ {
|
|
|
|
resp := <-responseC
|
|
|
|
if resp != nil {
|
2016-01-25 19:50:48 +00:00
|
|
|
vulnerabilities = append(vulnerabilities, doVulnerabilitiesNamespacing(resp.Vulnerabilities)...)
|
2016-01-20 00:17:08 +00:00
|
|
|
notes = append(notes, resp.Notes...)
|
|
|
|
if resp.FlagName != "" && resp.FlagValue != "" {
|
|
|
|
flags[resp.FlagName] = resp.FlagValue
|
2015-12-01 19:58:17 +00:00
|
|
|
}
|
2015-11-13 19:11:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-20 00:17:08 +00:00
|
|
|
close(responseC)
|
2016-02-01 23:41:40 +00:00
|
|
|
return status, addMetadata(datastore, vulnerabilities), flags, notes
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add metadata to the specified vulnerabilities using the registered MetadataFetchers, in parallel.
|
|
|
|
func addMetadata(datastore database.Datastore, vulnerabilities []database.Vulnerability) []database.Vulnerability {
|
|
|
|
if len(metadataFetchers) == 0 {
|
|
|
|
return vulnerabilities
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Info("adding metadata to vulnerabilities")
|
|
|
|
|
|
|
|
// Wrap vulnerabilities in VulnerabilityWithLock.
|
|
|
|
// It ensures that only one metadata fetcher at a time can modify the Metadata map.
|
|
|
|
vulnerabilitiesWithLocks := make([]*VulnerabilityWithLock, 0, len(vulnerabilities))
|
|
|
|
for i := 0; i < len(vulnerabilities); i++ {
|
|
|
|
vulnerabilitiesWithLocks = append(vulnerabilitiesWithLocks, &VulnerabilityWithLock{
|
|
|
|
Vulnerability: &vulnerabilities[i],
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(len(metadataFetchers))
|
|
|
|
|
|
|
|
for n, f := range metadataFetchers {
|
|
|
|
go func(name string, metadataFetcher MetadataFetcher) {
|
|
|
|
defer wg.Done()
|
|
|
|
|
|
|
|
// Load the metadata fetcher.
|
|
|
|
if err := metadataFetcher.Load(datastore); err != nil {
|
|
|
|
promUpdaterErrorsTotal.Inc()
|
|
|
|
log.Errorf("an error occured when loading metadata fetcher '%s': %s.", name, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add metadata to each vulnerability.
|
|
|
|
for _, vulnerability := range vulnerabilitiesWithLocks {
|
|
|
|
metadataFetcher.AddMetadata(vulnerability)
|
|
|
|
}
|
|
|
|
|
|
|
|
metadataFetcher.Unload()
|
|
|
|
}(n, f)
|
|
|
|
}
|
|
|
|
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
return vulnerabilities
|
2015-11-13 19:11:28 +00:00
|
|
|
}
|
|
|
|
|
2016-02-04 22:10:19 +00:00
|
|
|
func getLastUpdate(datastore database.Datastore) (time.Time, bool, error) {
|
|
|
|
lastUpdateTSS, err := datastore.GetKeyValue(flagName)
|
|
|
|
if err != nil {
|
|
|
|
return time.Time{}, false, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if lastUpdateTSS == "" {
|
|
|
|
// This is the first update.
|
|
|
|
return time.Time{}, true, nil
|
2015-12-01 19:58:17 +00:00
|
|
|
}
|
2016-02-04 22:10:19 +00:00
|
|
|
|
|
|
|
lastUpdateTS, err := strconv.ParseInt(lastUpdateTSS, 10, 64)
|
|
|
|
if err != nil {
|
|
|
|
return time.Time{}, false, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return time.Unix(lastUpdateTS, 0).UTC(), false, nil
|
2015-12-01 19:58:17 +00:00
|
|
|
}
|
2016-01-25 19:50:48 +00:00
|
|
|
|
|
|
|
// doVulnerabilitiesNamespacing takes Vulnerabilities that don't have a Namespace and split them
|
|
|
|
// into multiple vulnerabilities that have a Namespace and only contains the FixedIn
|
|
|
|
// FeatureVersions corresponding to their Namespace.
|
|
|
|
//
|
|
|
|
// It helps simplifying the fetchers that share the same metadata about a Vulnerability regardless
|
|
|
|
// of their actual namespace (ie. same vulnerability information for every version of a distro).
|
|
|
|
func doVulnerabilitiesNamespacing(vulnerabilities []database.Vulnerability) []database.Vulnerability {
|
|
|
|
vulnerabilitiesMap := make(map[string]*database.Vulnerability)
|
|
|
|
|
|
|
|
for _, v := range vulnerabilities {
|
|
|
|
featureVersions := v.FixedIn
|
|
|
|
v.FixedIn = []database.FeatureVersion{}
|
|
|
|
|
|
|
|
for _, fv := range featureVersions {
|
|
|
|
index := fv.Feature.Namespace.Name + ":" + v.Name
|
|
|
|
|
|
|
|
if vulnerability, ok := vulnerabilitiesMap[index]; !ok {
|
|
|
|
newVulnerability := v
|
|
|
|
newVulnerability.Namespace.Name = fv.Feature.Namespace.Name
|
|
|
|
newVulnerability.FixedIn = []database.FeatureVersion{fv}
|
|
|
|
|
|
|
|
vulnerabilitiesMap[index] = &newVulnerability
|
|
|
|
} else {
|
|
|
|
vulnerability.FixedIn = append(vulnerability.FixedIn, fv)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Convert map into a slice.
|
|
|
|
var response []database.Vulnerability
|
|
|
|
for _, vulnerability := range vulnerabilitiesMap {
|
|
|
|
response = append(response, *vulnerability)
|
|
|
|
}
|
|
|
|
|
|
|
|
return response
|
|
|
|
}
|