clair/updater/updater.go

284 lines
8.3 KiB
Go
Raw Normal View History

2015-11-13 19:11:28 +00:00
// Copyright 2015 clair authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package updater updates the vulnerability database periodically using
// the registered vulnerability fetchers.
package updater
import (
"math/rand"
"strconv"
"time"
"github.com/coreos/clair/config"
2015-11-13 19:11:28 +00:00
"github.com/coreos/clair/database"
"github.com/coreos/clair/utils"
"github.com/coreos/pkg/capnslog"
2015-11-13 19:11:28 +00:00
"github.com/pborman/uuid"
"github.com/prometheus/client_golang/prometheus"
2015-11-13 19:11:28 +00:00
)
const (
2016-01-20 00:17:08 +00:00
flagName = "updater/last"
notesFlagName = "updater/notes"
lockName = "updater"
2015-11-13 19:11:28 +00:00
lockDuration = refreshLockDuration + time.Minute*2
2016-01-20 00:17:08 +00:00
refreshLockDuration = time.Minute * 8
2015-11-13 19:11:28 +00:00
)
var (
log = capnslog.NewPackageLogger("github.com/coreos/clair", "updater")
promUpdaterErrorsTotal = prometheus.NewCounter(prometheus.CounterOpts{
Name: "clair_updater_errors_total",
Help: "Numbers of errors that the updater generated.",
})
promUpdaterDurationSeconds = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "clair_updater_duration_seconds",
Help: "Time it takes to update the vulnerability database.",
})
promUpdaterNotesTotal = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "clair_updater_notes_total",
Help: "Number of notes that the vulnerability fetchers generated.",
})
)
func init() {
prometheus.MustRegister(promUpdaterErrorsTotal)
prometheus.MustRegister(promUpdaterDurationSeconds)
prometheus.MustRegister(promUpdaterNotesTotal)
}
2015-11-13 19:11:28 +00:00
// Run updates the vulnerability database at regular intervals.
2016-01-20 00:17:08 +00:00
func Run(config *config.UpdaterConfig, datastore database.Datastore, st *utils.Stopper) {
2015-11-13 19:11:28 +00:00
defer st.End()
// Do not run the updater if there is no config or if the interval is 0.
if config == nil || config.Interval == 0 {
2015-11-13 19:11:28 +00:00
log.Infof("updater service is disabled.")
return
}
whoAmI := uuid.New()
log.Infof("updater service started. lock identifier: %s", whoAmI)
for {
// Set the next update time to (last update time + interval) or now if there
// is no last update time stored in database (first update) or if an error
// occurs.
var nextUpdate time.Time
var stop bool
2016-01-20 00:17:08 +00:00
if lastUpdate := getLastUpdate(datastore); !lastUpdate.IsZero() {
nextUpdate = lastUpdate.Add(config.Interval)
} else {
nextUpdate = time.Now().UTC()
2015-11-13 19:11:28 +00:00
}
// If the next update timer is in the past, then try to update.
if nextUpdate.Before(time.Now().UTC()) {
// Attempt to get a lock on the the update.
log.Debug("attempting to obtain update lock")
2016-01-20 00:17:08 +00:00
hasLock, hasLockUntil := datastore.Lock(lockName, whoAmI, lockDuration, false)
2015-11-13 19:11:28 +00:00
if hasLock {
// Launch update in a new go routine.
doneC := make(chan bool, 1)
go func() {
2016-01-20 00:17:08 +00:00
Update(datastore)
2015-11-13 19:11:28 +00:00
doneC <- true
}()
for done := false; !done && !stop; {
2015-11-13 19:11:28 +00:00
select {
case <-doneC:
done = true
case <-time.After(refreshLockDuration):
// Refresh the lock until the update is done.
2016-01-20 00:17:08 +00:00
datastore.Lock(lockName, whoAmI, lockDuration, true)
case <-st.Chan():
stop = true
2015-11-13 19:11:28 +00:00
}
}
// Unlock the update.
2016-01-20 00:17:08 +00:00
datastore.Unlock(lockName, whoAmI)
if stop {
break
}
continue
2015-11-13 19:11:28 +00:00
} else {
2016-01-20 00:17:08 +00:00
lockOwner, lockExpiration, err := datastore.FindLock(lockName)
2015-11-13 19:11:28 +00:00
if err != nil {
log.Debug("update lock is already taken")
nextUpdate = hasLockUntil
} else {
log.Debugf("update lock is already taken by %s until %v", lockOwner, lockExpiration)
nextUpdate = lockExpiration
}
}
}
// Sleep, but remain stoppable until approximately the next update time.
now := time.Now().UTC()
waitUntil := nextUpdate.Add(time.Duration(rand.ExpFloat64()/0.5) * time.Second)
log.Debugf("next update attempt scheduled for %v.", waitUntil)
if !waitUntil.Before(now) {
if !st.Sleep(waitUntil.Sub(time.Now())) {
break
}
}
}
// Clean resources.
for _, fetcher := range fetchers {
fetcher.Clean()
}
2015-11-13 19:11:28 +00:00
log.Info("updater service stopped")
}
// Update fetches all the vulnerabilities from the registered fetchers, upserts
// them into the database and then sends notifications.
2016-01-20 00:17:08 +00:00
func Update(datastore database.Datastore) {
defer setUpdaterDuration(time.Now())
2015-11-13 19:11:28 +00:00
log.Info("updating vulnerabilities")
// Fetch updates.
2016-01-20 00:17:08 +00:00
status, vulnerabilities, flags, notes := fetch(datastore)
// TODO(Quentin-M): Complete informations using NVD
// Insert vulnerabilities.
log.Tracef("beginning insertion of %d vulnerabilities for update", len(vulnerabilities))
2016-01-20 00:17:08 +00:00
err := datastore.InsertVulnerabilities(vulnerabilities)
if err != nil {
promUpdaterErrorsTotal.Inc()
log.Errorf("an error occured when inserting vulnerabilities for update: %s", err)
return
}
vulnerabilities = nil
// Update flags.
for flagName, flagValue := range flags {
2016-01-20 00:17:08 +00:00
datastore.InsertKeyValue(flagName, flagValue)
}
2016-01-20 00:17:08 +00:00
// Log notes.
for _, note := range notes {
log.Warningf("fetcher note: %s", note)
}
promUpdaterNotesTotal.Set(float64(len(notes)))
// Update last successful update if every fetchers worked properly.
if status {
2016-01-20 00:17:08 +00:00
datastore.InsertKeyValue(flagName, strconv.FormatInt(time.Now().UTC().Unix(), 10))
}
log.Info("update finished")
}
func setUpdaterDuration(start time.Time) {
promUpdaterDurationSeconds.Set(time.Since(start).Seconds())
}
// fetch get data from the registered fetchers, in parallel.
2016-01-20 00:17:08 +00:00
func fetch(datastore database.Datastore) (bool, []database.Vulnerability, map[string]string, []string) {
var vulnerabilities []database.Vulnerability
var notes []string
status := true
flags := make(map[string]string)
2015-11-13 19:11:28 +00:00
// Fetch updates in parallel.
var responseC = make(chan *FetcherResponse, 0)
for n, f := range fetchers {
go func(name string, fetcher Fetcher) {
2016-01-20 00:17:08 +00:00
response, err := fetcher.FetchUpdate(datastore)
2015-11-13 19:11:28 +00:00
if err != nil {
promUpdaterErrorsTotal.Inc()
2015-11-13 19:11:28 +00:00
log.Errorf("an error occured when fetching update '%s': %s.", name, err)
status = false
responseC <- nil
return
}
responseC <- &response
}(n, f)
}
// Collect results of updates.
for i := 0; i < len(fetchers); i++ {
resp := <-responseC
if resp != nil {
2016-01-25 19:50:48 +00:00
vulnerabilities = append(vulnerabilities, doVulnerabilitiesNamespacing(resp.Vulnerabilities)...)
2016-01-20 00:17:08 +00:00
notes = append(notes, resp.Notes...)
if resp.FlagName != "" && resp.FlagValue != "" {
flags[resp.FlagName] = resp.FlagValue
}
2015-11-13 19:11:28 +00:00
}
}
2016-01-20 00:17:08 +00:00
close(responseC)
return status, vulnerabilities, flags, notes
2015-11-13 19:11:28 +00:00
}
2016-01-20 00:17:08 +00:00
func getLastUpdate(datastore database.Datastore) time.Time {
if lastUpdateTSS, err := datastore.GetKeyValue(flagName); err == nil && lastUpdateTSS != "" {
if lastUpdateTS, err := strconv.ParseInt(lastUpdateTSS, 10, 64); err == nil {
return time.Unix(lastUpdateTS, 0).UTC()
}
}
return time.Time{}
}
2016-01-25 19:50:48 +00:00
// doVulnerabilitiesNamespacing takes Vulnerabilities that don't have a Namespace and split them
// into multiple vulnerabilities that have a Namespace and only contains the FixedIn
// FeatureVersions corresponding to their Namespace.
//
// It helps simplifying the fetchers that share the same metadata about a Vulnerability regardless
// of their actual namespace (ie. same vulnerability information for every version of a distro).
func doVulnerabilitiesNamespacing(vulnerabilities []database.Vulnerability) []database.Vulnerability {
vulnerabilitiesMap := make(map[string]*database.Vulnerability)
for _, v := range vulnerabilities {
featureVersions := v.FixedIn
v.FixedIn = []database.FeatureVersion{}
for _, fv := range featureVersions {
index := fv.Feature.Namespace.Name + ":" + v.Name
if vulnerability, ok := vulnerabilitiesMap[index]; !ok {
newVulnerability := v
newVulnerability.Namespace.Name = fv.Feature.Namespace.Name
newVulnerability.FixedIn = []database.FeatureVersion{fv}
vulnerabilitiesMap[index] = &newVulnerability
} else {
vulnerability.FixedIn = append(vulnerability.FixedIn, fv)
}
}
}
// Convert map into a slice.
var response []database.Vulnerability
for _, vulnerability := range vulnerabilitiesMap {
response = append(response, *vulnerability)
}
return response
}