converted to structured logging by using logrus

changed from capnslog to logrus for logging JSON structured message.

finished issue #383
This commit is contained in:
Sida Chen 2017-05-04 13:21:25 -04:00
parent d0ca4d1fe6
commit 9306e99368
30 changed files with 227 additions and 173 deletions

View File

@ -23,7 +23,7 @@ import (
"strconv" "strconv"
"time" "time"
"github.com/coreos/pkg/capnslog" log "github.com/sirupsen/logrus"
"github.com/tylerb/graceful" "github.com/tylerb/graceful"
"github.com/coreos/clair/database" "github.com/coreos/clair/database"
@ -32,8 +32,6 @@ import (
const timeoutResponse = `{"Error":{"Message":"Clair failed to respond within the configured timeout window.","Type":"Timeout"}}` const timeoutResponse = `{"Error":{"Message":"Clair failed to respond within the configured timeout window.","Type":"Timeout"}}`
var log = capnslog.NewPackageLogger("github.com/coreos/clair", "api")
// Config is the configuration for the API service. // Config is the configuration for the API service.
type Config struct { type Config struct {
Port int Port int
@ -48,14 +46,14 @@ func Run(cfg *Config, store database.Datastore, st *stopper.Stopper) {
// Do not run the API service if there is no config. // Do not run the API service if there is no config.
if cfg == nil { if cfg == nil {
log.Infof("main API service is disabled.") log.Info("main API service is disabled.")
return return
} }
log.Infof("starting main API on port %d.", cfg.Port) log.WithField("port", cfg.Port).Info("starting main API")
tlsConfig, err := tlsClientConfig(cfg.CAFile) tlsConfig, err := tlsClientConfig(cfg.CAFile)
if err != nil { if err != nil {
log.Fatalf("could not initialize client cert authentication: %s\n", err) log.WithError(err).Fatal("could not initialize client cert authentication")
} }
if tlsConfig != nil { if tlsConfig != nil {
log.Info("main API configured with client certificate authentication") log.Info("main API configured with client certificate authentication")
@ -81,10 +79,10 @@ func RunHealth(cfg *Config, store database.Datastore, st *stopper.Stopper) {
// Do not run the API service if there is no config. // Do not run the API service if there is no config.
if cfg == nil { if cfg == nil {
log.Infof("health API service is disabled.") log.Info("health API service is disabled.")
return return
} }
log.Infof("starting health API on port %d.", cfg.HealthPort) log.WithField("port", cfg.HealthPort).Info("starting health API")
srv := &graceful.Server{ srv := &graceful.Server{
Timeout: 10 * time.Second, // Interrupt health checks when stopping Timeout: 10 * time.Second, // Interrupt health checks when stopping

View File

@ -19,6 +19,7 @@ import (
"strings" "strings"
"github.com/julienschmidt/httprouter" "github.com/julienschmidt/httprouter"
log "github.com/sirupsen/logrus"
"github.com/coreos/clair/api/v1" "github.com/coreos/clair/api/v1"
"github.com/coreos/clair/database" "github.com/coreos/clair/database"
@ -52,7 +53,7 @@ func (rtr router) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return return
} }
log.Infof("%d %s %s %s", http.StatusNotFound, r.Method, r.RequestURI, r.RemoteAddr) log.WithFields(log.Fields{"status": http.StatusNotFound, "method": r.Method, "request uri": r.RequestURI, "remote addr": r.RemoteAddr}).Info("Served HTTP request")
http.NotFound(w, r) http.NotFound(w, r)
} }

View File

@ -21,15 +21,12 @@ import (
"fmt" "fmt"
"time" "time"
"github.com/coreos/pkg/capnslog"
"github.com/fernet/fernet-go" "github.com/fernet/fernet-go"
"github.com/coreos/clair/database" "github.com/coreos/clair/database"
"github.com/coreos/clair/ext/versionfmt" "github.com/coreos/clair/ext/versionfmt"
) )
var log = capnslog.NewPackageLogger("github.com/coreos/clair", "v1")
type Error struct { type Error struct {
Message string `json:"Message,omitempty"` Message string `json:"Message,omitempty"`
} }

View File

@ -22,6 +22,7 @@ import (
"github.com/julienschmidt/httprouter" "github.com/julienschmidt/httprouter"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
log "github.com/sirupsen/logrus"
"github.com/coreos/clair/database" "github.com/coreos/clair/database"
) )
@ -53,7 +54,7 @@ func httpHandler(h handler, ctx *context) httprouter.Handle {
WithLabelValues(route, statusStr). WithLabelValues(route, statusStr).
Observe(float64(time.Since(start).Nanoseconds()) / float64(time.Millisecond)) Observe(float64(time.Since(start).Nanoseconds()) / float64(time.Millisecond))
log.Infof("%s \"%s %s\" %s (%s)", r.RemoteAddr, r.Method, r.RequestURI, statusStr, time.Since(start)) log.WithFields(log.Fields{"remote addr": r.RemoteAddr, "method": r.Method, "request uri": r.RequestURI, "status": statusStr, "elapsed time": time.Since(start)}).Info("Handled HTTP request")
} }
} }

View File

@ -24,6 +24,7 @@ import (
"github.com/julienschmidt/httprouter" "github.com/julienschmidt/httprouter"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
log "github.com/sirupsen/logrus"
"github.com/coreos/clair" "github.com/coreos/clair"
"github.com/coreos/clair/database" "github.com/coreos/clair/database"
@ -90,7 +91,7 @@ func writeResponse(w http.ResponseWriter, r *http.Request, status int, resp inte
case *json.MarshalerError, *json.UnsupportedTypeError, *json.UnsupportedValueError: case *json.MarshalerError, *json.UnsupportedTypeError, *json.UnsupportedValueError:
panic("v1: failed to marshal response: " + err.Error()) panic("v1: failed to marshal response: " + err.Error())
default: default:
log.Warningf("failed to write response: %s", err.Error()) log.WithError(err).Warning("failed to write response")
} }
} }
} }

View File

@ -25,12 +25,13 @@ import (
"syscall" "syscall"
"time" "time"
"github.com/coreos/pkg/capnslog" log "github.com/sirupsen/logrus"
"github.com/coreos/clair" "github.com/coreos/clair"
"github.com/coreos/clair/api" "github.com/coreos/clair/api"
"github.com/coreos/clair/database" "github.com/coreos/clair/database"
"github.com/coreos/clair/ext/imagefmt" "github.com/coreos/clair/ext/imagefmt"
"github.com/coreos/clair/pkg/formatter"
"github.com/coreos/clair/pkg/stopper" "github.com/coreos/clair/pkg/stopper"
// Register database driver. // Register database driver.
@ -56,8 +57,6 @@ import (
_ "github.com/coreos/clair/ext/vulnsrc/ubuntu" _ "github.com/coreos/clair/ext/vulnsrc/ubuntu"
) )
var log = capnslog.NewPackageLogger("github.com/coreos/clair/cmd/clair", "main")
func waitForSignals(signals ...os.Signal) { func waitForSignals(signals ...os.Signal) {
interrupts := make(chan os.Signal, 1) interrupts := make(chan os.Signal, 1)
signal.Notify(interrupts, signals...) signal.Notify(interrupts, signals...)
@ -67,12 +66,12 @@ func waitForSignals(signals ...os.Signal) {
func startCPUProfiling(path string) *os.File { func startCPUProfiling(path string) *os.File {
f, err := os.Create(path) f, err := os.Create(path)
if err != nil { if err != nil {
log.Fatalf("failed to create profile file: %s", err) log.WithError(err).Fatal("failed to create profile file")
} }
err = pprof.StartCPUProfile(f) err = pprof.StartCPUProfile(f)
if err != nil { if err != nil {
log.Fatalf("failed to start CPU profiling: %s", err) log.WithError(err).Fatal("failed to start CPU profiling")
} }
log.Info("started CPU profiling") log.Info("started CPU profiling")
@ -131,20 +130,22 @@ func main() {
for _, bin := range []string{"git", "bzr", "rpm", "xz"} { for _, bin := range []string{"git", "bzr", "rpm", "xz"} {
_, err := exec.LookPath(bin) _, err := exec.LookPath(bin)
if err != nil { if err != nil {
log.Fatalf("failed to find dependency: %s", bin) log.WithError(err).WithField("dependency", bin).Fatal("failed to find dependency")
} }
} }
// Load configuration // Load configuration
config, err := LoadConfig(*flagConfigPath) config, err := LoadConfig(*flagConfigPath)
if err != nil { if err != nil {
log.Fatalf("failed to load configuration: %s", err) log.WithError(err).Fatal("failed to load configuration")
} }
// Initialize logging system // Initialize logging system
logLevel, err := capnslog.ParseLevel(strings.ToUpper(*flagLogLevel))
capnslog.SetGlobalLogLevel(logLevel) logLevel, err := log.ParseLevel(strings.ToUpper(*flagLogLevel))
capnslog.SetFormatter(capnslog.NewPrettyFormatter(os.Stdout, false)) log.SetLevel(logLevel)
log.SetOutput(os.Stdout)
log.SetFormatter(&formatter.JSONExtendedFormatter{ShowLn: true})
// Enable CPU Profiling if specified // Enable CPU Profiling if specified
if *flagCPUProfilePath != "" { if *flagCPUProfilePath != "" {

View File

@ -18,6 +18,8 @@ import (
"database/sql" "database/sql"
"time" "time"
log "github.com/sirupsen/logrus"
"github.com/coreos/clair/pkg/commonerr" "github.com/coreos/clair/pkg/commonerr"
) )

View File

@ -20,6 +20,7 @@ import (
"time" "time"
"github.com/guregu/null/zero" "github.com/guregu/null/zero"
log "github.com/sirupsen/logrus"
"github.com/coreos/clair/database" "github.com/coreos/clair/database"
"github.com/coreos/clair/pkg/commonerr" "github.com/coreos/clair/pkg/commonerr"
@ -92,11 +93,11 @@ func (pgSQL *pgSQL) FindLayer(name string, withFeatures, withVulnerabilities boo
_, err = tx.Exec(disableHashJoin) _, err = tx.Exec(disableHashJoin)
if err != nil { if err != nil {
log.Warningf("FindLayer: could not disable hash join: %s", err) log.WithError(err).Warningf("FindLayer: could not disable hash join")
} }
_, err = tx.Exec(disableMergeJoin) _, err = tx.Exec(disableMergeJoin)
if err != nil { if err != nil {
log.Warningf("FindLayer: could not disable merge join: %s", err) log.WithError(err).Warningf("FindLayer: could not disable merge join")
} }
t = time.Now() t = time.Now()
@ -164,7 +165,7 @@ func getLayerFeatureVersions(tx *sql.Tx, layerID int) ([]database.FeatureVersion
case "del": case "del":
delete(mapFeatureVersions, fv.ID) delete(mapFeatureVersions, fv.ID)
default: default:
log.Warningf("unknown Layer_diff_FeatureVersion's modification: %s", modification) log.WithField("modification", modification).Warning("unknown Layer_diff_FeatureVersion's modification")
return featureVersions, database.ErrInconsistent return featureVersions, database.ErrInconsistent
} }
} }

View File

@ -17,6 +17,8 @@ package pgsql
import ( import (
"time" "time"
log "github.com/sirupsen/logrus"
"github.com/coreos/clair/pkg/commonerr" "github.com/coreos/clair/pkg/commonerr"
) )

View File

@ -18,10 +18,12 @@ import (
"database/sql" "database/sql"
"time" "time"
"github.com/coreos/clair/database"
"github.com/coreos/clair/pkg/commonerr"
"github.com/guregu/null/zero" "github.com/guregu/null/zero"
"github.com/pborman/uuid" "github.com/pborman/uuid"
log "github.com/sirupsen/logrus"
"github.com/coreos/clair/database"
"github.com/coreos/clair/pkg/commonerr"
) )
// do it in tx so we won't insert/update a vuln without notification and vice-versa. // do it in tx so we won't insert/update a vuln without notification and vice-versa.
@ -177,7 +179,7 @@ func (pgSQL *pgSQL) loadLayerIntroducingVulnerability(vulnerability *database.Vu
_, err = tx.Exec(disableHashJoin) _, err = tx.Exec(disableHashJoin)
if err != nil { if err != nil {
log.Warningf("searchNotificationLayerIntroducingVulnerability: could not disable hash join: %s", err) log.WithError(err).Warning("searchNotificationLayerIntroducingVulnerability: could not disable hash join")
} }
// We do `defer observeQueryTime` here because we don't want to observe invalid calls. // We do `defer observeQueryTime` here because we don't want to observe invalid calls.

View File

@ -25,11 +25,11 @@ import (
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
"github.com/coreos/pkg/capnslog"
"github.com/hashicorp/golang-lru" "github.com/hashicorp/golang-lru"
"github.com/lib/pq" "github.com/lib/pq"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/remind101/migrate" "github.com/remind101/migrate"
log "github.com/sirupsen/logrus"
"github.com/coreos/clair/database" "github.com/coreos/clair/database"
"github.com/coreos/clair/database/pgsql/migrations" "github.com/coreos/clair/database/pgsql/migrations"
@ -37,8 +37,6 @@ import (
) )
var ( var (
log = capnslog.NewPackageLogger("github.com/coreos/clair", "pgsql")
promErrorsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ promErrorsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "clair_pgsql_errors_total", Name: "clair_pgsql_errors_total",
Help: "Number of errors that PostgreSQL requests generated.", Help: "Number of errors that PostgreSQL requests generated.",
@ -283,7 +281,7 @@ func handleError(desc string, err error) error {
return commonerr.ErrNotFound return commonerr.ErrNotFound
} }
log.Errorf("%s: %v", desc, err) log.WithError(err).WithField("Description", desc).Error("Handled Database Error")
promErrorsTotal.WithLabelValues(desc).Inc() promErrorsTotal.WithLabelValues(desc).Inc()
if _, o := err.(*pq.Error); o || err == sql.ErrTxDone || strings.HasPrefix(err.Error(), "sql:") { if _, o := err.(*pq.Error); o || err == sql.ErrTxDone || strings.HasPrefix(err.Error(), "sql:") {

View File

@ -20,10 +20,12 @@ import (
"reflect" "reflect"
"time" "time"
"github.com/guregu/null/zero"
log "github.com/sirupsen/logrus"
"github.com/coreos/clair/database" "github.com/coreos/clair/database"
"github.com/coreos/clair/ext/versionfmt" "github.com/coreos/clair/ext/versionfmt"
"github.com/coreos/clair/pkg/commonerr" "github.com/coreos/clair/pkg/commonerr"
"github.com/guregu/null/zero"
) )
// compareStringLists returns the strings that are present in X but not in Y. // compareStringLists returns the strings that are present in X but not in Y.

View File

@ -19,7 +19,7 @@ import (
"bufio" "bufio"
"bytes" "bytes"
"github.com/coreos/pkg/capnslog" log "github.com/sirupsen/logrus"
"github.com/coreos/clair/database" "github.com/coreos/clair/database"
"github.com/coreos/clair/ext/featurefmt" "github.com/coreos/clair/ext/featurefmt"
@ -28,8 +28,6 @@ import (
"github.com/coreos/clair/pkg/tarutil" "github.com/coreos/clair/pkg/tarutil"
) )
var log = capnslog.NewPackageLogger("github.com/coreos/clair", "ext/featurefmt/apk")
func init() { func init() {
featurefmt.RegisterLister("apk", &lister{}) featurefmt.RegisterLister("apk", &lister{})
} }
@ -62,7 +60,7 @@ func (l lister) ListFeatures(files tarutil.FilesMap) ([]database.FeatureVersion,
version := string(line[2:]) version := string(line[2:])
err := versionfmt.Valid(dpkg.ParserName, version) err := versionfmt.Valid(dpkg.ParserName, version)
if err != nil { if err != nil {
log.Warningf("could not parse package version '%s': %s. skipping", version, err.Error()) log.WithError(err).WithField("version", version).Warning("could not parse package version. skipping")
} else { } else {
ipkg.Version = version ipkg.Version = version
} }

View File

@ -20,7 +20,7 @@ import (
"regexp" "regexp"
"strings" "strings"
"github.com/coreos/pkg/capnslog" log "github.com/sirupsen/logrus"
"github.com/coreos/clair/database" "github.com/coreos/clair/database"
"github.com/coreos/clair/ext/featurefmt" "github.com/coreos/clair/ext/featurefmt"
@ -30,8 +30,6 @@ import (
) )
var ( var (
log = capnslog.NewPackageLogger("github.com/coreos/clair", "ext/featurefmt/dpkg")
dpkgSrcCaptureRegexp = regexp.MustCompile(`Source: (?P<name>[^\s]*)( \((?P<version>.*)\))?`) dpkgSrcCaptureRegexp = regexp.MustCompile(`Source: (?P<name>[^\s]*)( \((?P<version>.*)\))?`)
dpkgSrcCaptureRegexpNames = dpkgSrcCaptureRegexp.SubexpNames() dpkgSrcCaptureRegexpNames = dpkgSrcCaptureRegexp.SubexpNames()
) )
@ -79,7 +77,7 @@ func (l lister) ListFeatures(files tarutil.FilesMap) ([]database.FeatureVersion,
version := md["version"] version := md["version"]
err = versionfmt.Valid(dpkg.ParserName, version) err = versionfmt.Valid(dpkg.ParserName, version)
if err != nil { if err != nil {
log.Warningf("could not parse package version '%s': %s. skipping", string(line[1]), err.Error()) log.WithError(err).WithField("version", string(line[1])).Warning("could not parse package version. skipping")
} else { } else {
pkg.Version = version pkg.Version = version
} }
@ -93,7 +91,7 @@ func (l lister) ListFeatures(files tarutil.FilesMap) ([]database.FeatureVersion,
version := strings.TrimPrefix(line, "Version: ") version := strings.TrimPrefix(line, "Version: ")
err = versionfmt.Valid(dpkg.ParserName, version) err = versionfmt.Valid(dpkg.ParserName, version)
if err != nil { if err != nil {
log.Warningf("could not parse package version '%s': %s. skipping", string(line[1]), err.Error()) log.WithError(err).WithField("version", string(line[1])).Warning("could not parse package version. skipping")
} else { } else {
pkg.Version = version pkg.Version = version
} }

View File

@ -22,7 +22,7 @@ import (
"os/exec" "os/exec"
"strings" "strings"
"github.com/coreos/pkg/capnslog" log "github.com/sirupsen/logrus"
"github.com/coreos/clair/database" "github.com/coreos/clair/database"
"github.com/coreos/clair/ext/featurefmt" "github.com/coreos/clair/ext/featurefmt"
@ -32,8 +32,6 @@ import (
"github.com/coreos/clair/pkg/tarutil" "github.com/coreos/clair/pkg/tarutil"
) )
var log = capnslog.NewPackageLogger("github.com/coreos/clair", "ext/featurefmt/rpm")
type lister struct{} type lister struct{}
func init() { func init() {
@ -53,20 +51,20 @@ func (l lister) ListFeatures(files tarutil.FilesMap) ([]database.FeatureVersion,
tmpDir, err := ioutil.TempDir(os.TempDir(), "rpm") tmpDir, err := ioutil.TempDir(os.TempDir(), "rpm")
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
if err != nil { if err != nil {
log.Errorf("could not create temporary folder for RPM detection: %s", err) log.WithError(err).Error("could not create temporary folder for RPM detection")
return []database.FeatureVersion{}, commonerr.ErrFilesystem return []database.FeatureVersion{}, commonerr.ErrFilesystem
} }
err = ioutil.WriteFile(tmpDir+"/Packages", f, 0700) err = ioutil.WriteFile(tmpDir+"/Packages", f, 0700)
if err != nil { if err != nil {
log.Errorf("could not create temporary file for RPM detection: %s", err) log.WithError(err).Error("could not create temporary file for RPM detection")
return []database.FeatureVersion{}, commonerr.ErrFilesystem return []database.FeatureVersion{}, commonerr.ErrFilesystem
} }
// Extract binary package names because RHSA refers to binary package names. // Extract binary package names because RHSA refers to binary package names.
out, err := exec.Command("rpm", "--dbpath", tmpDir, "-qa", "--qf", "%{NAME} %{EPOCH}:%{VERSION}-%{RELEASE}\n").CombinedOutput() out, err := exec.Command("rpm", "--dbpath", tmpDir, "-qa", "--qf", "%{NAME} %{EPOCH}:%{VERSION}-%{RELEASE}\n").CombinedOutput()
if err != nil { if err != nil {
log.Errorf("could not query RPM: %s. output: %s", err, string(out)) log.WithError(err).WithField("output", string(out)).Error("could not query RPM")
// Do not bubble up because we probably won't be able to fix it, // Do not bubble up because we probably won't be able to fix it,
// the database must be corrupted // the database must be corrupted
return []database.FeatureVersion{}, nil return []database.FeatureVersion{}, nil
@ -90,7 +88,7 @@ func (l lister) ListFeatures(files tarutil.FilesMap) ([]database.FeatureVersion,
version := strings.Replace(line[1], "(none):", "", -1) version := strings.Replace(line[1], "(none):", "", -1)
err := versionfmt.Valid(rpm.ParserName, version) err := versionfmt.Valid(rpm.ParserName, version)
if err != nil { if err != nil {
log.Warningf("could not parse package version '%s': %s. skipping", line[1], err.Error()) log.WithError(err).WithField("version", line[1]).Warning("could not parse package version. skipping")
continue continue
} }

View File

@ -20,7 +20,7 @@ import (
"sync" "sync"
"testing" "testing"
"github.com/coreos/pkg/capnslog" log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/coreos/clair/database" "github.com/coreos/clair/database"
@ -28,8 +28,6 @@ import (
) )
var ( var (
log = capnslog.NewPackageLogger("github.com/coreos/clair", "ext/featurens")
detectorsM sync.RWMutex detectorsM sync.RWMutex
detectors = make(map[string]Detector) detectors = make(map[string]Detector)
) )
@ -79,12 +77,12 @@ func Detect(files tarutil.FilesMap) (*database.Namespace, error) {
for name, detector := range detectors { for name, detector := range detectors {
namespace, err := detector.Detect(files) namespace, err := detector.Detect(files)
if err != nil { if err != nil {
log.Warningf("failed while attempting to detect namespace %s: %s", name, err) log.WithError(err).WithField("name", name).Warning("failed while attempting to detect namespace")
return nil, err return nil, err
} }
if namespace != nil { if namespace != nil {
log.Debugf("detected namespace %s: %#v", name, namespace) log.WithFields(log.Fields{"name": name, "namespace": namespace.Name}).Debug("detected namespace")
return namespace, nil return namespace, nil
} }
} }

View File

@ -30,9 +30,10 @@ import (
"strings" "strings"
"sync" "sync"
log "github.com/sirupsen/logrus"
"github.com/coreos/clair/pkg/commonerr" "github.com/coreos/clair/pkg/commonerr"
"github.com/coreos/clair/pkg/tarutil" "github.com/coreos/clair/pkg/tarutil"
"github.com/coreos/pkg/capnslog"
) )
var ( var (
@ -43,8 +44,6 @@ var (
// when pulling layers, verified in default. // when pulling layers, verified in default.
insecureTLS = false insecureTLS = false
log = capnslog.NewPackageLogger("github.com/coreos/clair", "ext/imagefmt")
extractorsM sync.RWMutex extractorsM sync.RWMutex
extractors = make(map[string]Extractor) extractors = make(map[string]Extractor)
) )
@ -127,13 +126,13 @@ func Extract(format, path string, headers map[string]string, toExtract []string)
client := &http.Client{Transport: tr} client := &http.Client{Transport: tr}
r, err := client.Do(request) r, err := client.Do(request)
if err != nil { if err != nil {
log.Warningf("could not download layer: %s", err) log.WithError(err).Warning("could not download layer")
return nil, ErrCouldNotFindLayer return nil, ErrCouldNotFindLayer
} }
// Fail if we don't receive a 2xx HTTP status code. // Fail if we don't receive a 2xx HTTP status code.
if math.Floor(float64(r.StatusCode/100)) != 2 { if math.Floor(float64(r.StatusCode/100)) != 2 {
log.Warningf("could not download layer: got status code %d, expected 2XX", r.StatusCode) log.WithField("status code", r.StatusCode).Warning("could not download layer: expected 2XX")
return nil, ErrCouldNotFindLayer return nil, ErrCouldNotFindLayer
} }

View File

@ -24,14 +24,10 @@ import (
"sync" "sync"
"time" "time"
"github.com/coreos/pkg/capnslog"
"github.com/coreos/clair/database" "github.com/coreos/clair/database"
) )
var ( var (
log = capnslog.NewPackageLogger("github.com/coreos/clair", "ext/notification")
sendersM sync.RWMutex sendersM sync.RWMutex
senders = make(map[string]Sender) senders = make(map[string]Sender)
) )

View File

@ -19,8 +19,6 @@ package versionfmt
import ( import (
"errors" "errors"
"sync" "sync"
"github.com/coreos/pkg/capnslog"
) )
const ( const (
@ -40,8 +38,6 @@ var (
// but should return an error in the case where the version is invalid. // but should return an error in the case where the version is invalid.
ErrInvalidVersion = errors.New("invalid version") ErrInvalidVersion = errors.New("invalid version")
nlog = capnslog.NewPackageLogger("github.com/coreos/clair", "ext/versionfmt")
parsersM sync.Mutex parsersM sync.Mutex
parsers = make(map[string]Parser) parsers = make(map[string]Parser)
) )

View File

@ -30,7 +30,7 @@ import (
"strings" "strings"
"time" "time"
"github.com/coreos/pkg/capnslog" log "github.com/sirupsen/logrus"
"github.com/coreos/clair/database" "github.com/coreos/clair/database"
"github.com/coreos/clair/ext/vulnmdsrc" "github.com/coreos/clair/ext/vulnmdsrc"
@ -42,9 +42,9 @@ const (
dataFeedMetaURL string = "http://static.nvd.nist.gov/feeds/xml/cve/nvdcve-2.0-%s.meta" dataFeedMetaURL string = "http://static.nvd.nist.gov/feeds/xml/cve/nvdcve-2.0-%s.meta"
appenderName string = "NVD" appenderName string = "NVD"
)
var log = capnslog.NewPackageLogger("github.com/coreos/clair", "ext/vulnmdsrc/nvd") logDataFeedName string = "data feed name"
)
type appender struct { type appender struct {
localPath string localPath string
@ -90,7 +90,7 @@ func (a *appender) BuildCache(datastore database.Datastore) error {
for dataFeedName, dataFeedReader := range dataFeedReaders { for dataFeedName, dataFeedReader := range dataFeedReaders {
var nvd nvd var nvd nvd
if err = xml.NewDecoder(dataFeedReader).Decode(&nvd); err != nil { if err = xml.NewDecoder(dataFeedReader).Decode(&nvd); err != nil {
log.Errorf("could not decode NVD data feed '%s': %s", dataFeedName, err) log.WithError(err).WithField(logDataFeedName, dataFeedName).Error("could not decode NVD data feed")
return commonerr.ErrCouldNotParse return commonerr.ErrCouldNotParse
} }
@ -134,7 +134,7 @@ func getDataFeeds(dataFeedHashes map[string]string, localPath string) (map[strin
for _, dataFeedName := range dataFeedNames { for _, dataFeedName := range dataFeedNames {
hash, err := getHashFromMetaURL(fmt.Sprintf(dataFeedMetaURL, dataFeedName)) hash, err := getHashFromMetaURL(fmt.Sprintf(dataFeedMetaURL, dataFeedName))
if err != nil { if err != nil {
log.Warningf("could get get NVD data feed hash '%s': %s", dataFeedName, err) log.WithError(err).WithField(logDataFeedName, dataFeedName).Warning("could not get NVD data feed hash")
// It's not a big deal, no need interrupt, we're just going to download it again then. // It's not a big deal, no need interrupt, we're just going to download it again then.
continue continue
@ -163,14 +163,14 @@ func getDataFeeds(dataFeedHashes map[string]string, localPath string) (map[strin
// Download data feed. // Download data feed.
r, err := http.Get(fmt.Sprintf(dataFeedURL, dataFeedName)) r, err := http.Get(fmt.Sprintf(dataFeedURL, dataFeedName))
if err != nil { if err != nil {
log.Errorf("could not download NVD data feed file '%s': %s", dataFeedName, err) log.WithError(err).WithField(logDataFeedName, dataFeedName).Error("could not download NVD data feed")
return dataFeedReaders, dataFeedHashes, commonerr.ErrCouldNotDownload return dataFeedReaders, dataFeedHashes, commonerr.ErrCouldNotDownload
} }
// Un-gzip it. // Un-gzip it.
gr, err := gzip.NewReader(r.Body) gr, err := gzip.NewReader(r.Body)
if err != nil { if err != nil {
log.Errorf("could not read NVD data feed file '%s': %s", dataFeedName, err) log.WithError(err).WithField(logDataFeedName, dataFeedName).Error("could not read NVD data feed")
return dataFeedReaders, dataFeedHashes, commonerr.ErrCouldNotDownload return dataFeedReaders, dataFeedHashes, commonerr.ErrCouldNotDownload
} }
@ -188,7 +188,7 @@ func getDataFeeds(dataFeedHashes map[string]string, localPath string) (map[strin
} }
dataFeedReaders[dataFeedName] = nrc dataFeedReaders[dataFeedName] = nrc
log.Warningf("could not store NVD data feed to filesystem: %s", err) log.WithError(err).Warning("could not store NVD data feed to filesystem")
} }
} }
} }

View File

@ -17,6 +17,8 @@ package nvd
import ( import (
"fmt" "fmt"
"strings" "strings"
log "github.com/sirupsen/logrus"
) )
type nvd struct { type nvd struct {
@ -90,7 +92,7 @@ func addVec(str *string, vec, val string) {
if let, ok := vectorValuesToLetters[val]; ok { if let, ok := vectorValuesToLetters[val]; ok {
*str = fmt.Sprintf("%s%s:%s/", *str, vec, let) *str = fmt.Sprintf("%s%s:%s/", *str, vec, let)
} else { } else {
log.Warningf("unknown value '%v' for CVSSv2 vector '%s'", val, vec) log.WithFields(log.Fields{"value": val, "vector": vec}).Warning("unknown value for CVSSv2 vector")
} }
} }
} }

View File

@ -24,10 +24,9 @@ import (
"path/filepath" "path/filepath"
"strings" "strings"
log "github.com/sirupsen/logrus"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
"github.com/coreos/pkg/capnslog"
"github.com/coreos/clair/database" "github.com/coreos/clair/database"
"github.com/coreos/clair/ext/versionfmt" "github.com/coreos/clair/ext/versionfmt"
"github.com/coreos/clair/ext/versionfmt/dpkg" "github.com/coreos/clair/ext/versionfmt/dpkg"
@ -42,8 +41,6 @@ const (
nvdURLPrefix = "https://cve.mitre.org/cgi-bin/cvename.cgi?name=" nvdURLPrefix = "https://cve.mitre.org/cgi-bin/cvename.cgi?name="
) )
var log = capnslog.NewPackageLogger("github.com/coreos/clair", "ext/vulnsrc/alpine")
func init() { func init() {
vulnsrc.RegisterUpdater("alpine", &updater{}) vulnsrc.RegisterUpdater("alpine", &updater{})
} }
@ -53,7 +50,7 @@ type updater struct {
} }
func (u *updater) Update(db database.Datastore) (resp vulnsrc.UpdateResponse, err error) { func (u *updater) Update(db database.Datastore) (resp vulnsrc.UpdateResponse, err error) {
log.Info("fetching Alpine vulnerabilities") log.WithField("package", "Alpine").Info("Start fetching vulnerabilities")
// Pull the master branch. // Pull the master branch.
var commit string var commit string
@ -75,7 +72,7 @@ func (u *updater) Update(db database.Datastore) (resp vulnsrc.UpdateResponse, er
// Short-circuit if there have been no updates. // Short-circuit if there have been no updates.
if commit == dbCommit { if commit == dbCommit {
log.Debug("no alpine update") log.WithField("package", "alpine").Debug("no update")
return return
} }
@ -187,7 +184,7 @@ func (u *updater) pullRepository() (commit string, err error) {
cmd.Dir = u.repositoryLocalPath cmd.Dir = u.repositoryLocalPath
if out, err := cmd.CombinedOutput(); err != nil { if out, err := cmd.CombinedOutput(); err != nil {
u.Clean() u.Clean()
log.Errorf("could not pull alpine-secdb repository: %s. output: %s", err, out) log.WithError(err).WithField("output", string(out)).Error("could not pull alpine-secdb repository")
return "", commonerr.ErrCouldNotDownload return "", commonerr.ErrCouldNotDownload
} }
} else { } else {
@ -238,7 +235,7 @@ func parseYAML(r io.Reader) (vulns []database.Vulnerability, err error) {
for version, vulnStrs := range pkg.Fixes { for version, vulnStrs := range pkg.Fixes {
err := versionfmt.Valid(dpkg.ParserName, version) err := versionfmt.Valid(dpkg.ParserName, version)
if err != nil { if err != nil {
log.Warningf("could not parse package version '%s': %s. skipping", version, err.Error()) log.WithError(err).WithField("version", version).Warning("could not parse package version. skipping")
continue continue
} }

View File

@ -25,7 +25,7 @@ import (
"net/http" "net/http"
"strings" "strings"
"github.com/coreos/pkg/capnslog" log "github.com/sirupsen/logrus"
"github.com/coreos/clair/database" "github.com/coreos/clair/database"
"github.com/coreos/clair/ext/versionfmt" "github.com/coreos/clair/ext/versionfmt"
@ -40,8 +40,6 @@ const (
updaterFlag = "debianUpdater" updaterFlag = "debianUpdater"
) )
var log = capnslog.NewPackageLogger("github.com/coreos/clair", "ext/vulnsrc/debian")
type jsonData map[string]map[string]jsonVuln type jsonData map[string]map[string]jsonVuln
type jsonVuln struct { type jsonVuln struct {
@ -62,12 +60,12 @@ func init() {
} }
func (u *updater) Update(datastore database.Datastore) (resp vulnsrc.UpdateResponse, err error) { func (u *updater) Update(datastore database.Datastore) (resp vulnsrc.UpdateResponse, err error) {
log.Info("fetching Debian vulnerabilities") log.WithField("package", "Debian").Info("Start fetching vulnerabilities")
// Download JSON. // Download JSON.
r, err := http.Get(url) r, err := http.Get(url)
if err != nil { if err != nil {
log.Errorf("could not download Debian's update: %s", err) log.WithError(err).Error("could not download Debian's update")
return resp, commonerr.ErrCouldNotDownload return resp, commonerr.ErrCouldNotDownload
} }
@ -108,14 +106,14 @@ func buildResponse(jsonReader io.Reader, latestKnownHash string) (resp vulnsrc.U
var data jsonData var data jsonData
err = json.NewDecoder(teedJSONReader).Decode(&data) err = json.NewDecoder(teedJSONReader).Decode(&data)
if err != nil { if err != nil {
log.Errorf("could not unmarshal Debian's JSON: %s", err) log.WithError(err).Error("could not unmarshal Debian's JSON")
return resp, commonerr.ErrCouldNotParse return resp, commonerr.ErrCouldNotParse
} }
// Calculate the hash and skip updating if the hash has been seen before. // Calculate the hash and skip updating if the hash has been seen before.
hash = hex.EncodeToString(jsonSHA.Sum(nil)) hash = hex.EncodeToString(jsonSHA.Sum(nil))
if latestKnownHash == hash { if latestKnownHash == hash {
log.Debug("no Debian update") log.WithField("package", "Debian").Debug("no update")
return resp, nil return resp, nil
} }
@ -185,7 +183,7 @@ func parseDebianJSON(data *jsonData) (vulnerabilities []database.Vulnerability,
// "fixed_version" (if affected). // "fixed_version" (if affected).
err = versionfmt.Valid(dpkg.ParserName, releaseNode.FixedVersion) err = versionfmt.Valid(dpkg.ParserName, releaseNode.FixedVersion)
if err != nil { if err != nil {
log.Warningf("could not parse package version '%s': %s. skipping", releaseNode.FixedVersion, err.Error()) log.WithError(err).WithField("version", version).Warning("could not parse package version. skipping")
continue continue
} }
version = releaseNode.FixedVersion version = releaseNode.FixedVersion
@ -252,7 +250,7 @@ func SeverityFromUrgency(urgency string) database.Severity {
return database.HighSeverity return database.HighSeverity
default: default:
log.Warningf("could not determine vulnerability severity from: %s", urgency) log.WithField("urgency", urgency).Warning("could not determine vulnerability severity from urgency")
return database.UnknownSeverity return database.UnknownSeverity
} }
} }

View File

@ -25,7 +25,9 @@ import (
"strconv" "strconv"
"strings" "strings"
"github.com/coreos/pkg/capnslog" log "github.com/sirupsen/logrus"
"fmt"
"github.com/coreos/clair/database" "github.com/coreos/clair/database"
"github.com/coreos/clair/ext/versionfmt" "github.com/coreos/clair/ext/versionfmt"
@ -48,8 +50,6 @@ var (
} }
elsaRegexp = regexp.MustCompile(`com.oracle.elsa-(\d+).xml`) elsaRegexp = regexp.MustCompile(`com.oracle.elsa-(\d+).xml`)
log = capnslog.NewPackageLogger("github.com/coreos/clair", "ext/vulnsrc/oracle")
) )
type oval struct { type oval struct {
@ -116,8 +116,7 @@ func compareELSA(left, right int) int {
} }
func (u *updater) Update(datastore database.Datastore) (resp vulnsrc.UpdateResponse, err error) { func (u *updater) Update(datastore database.Datastore) (resp vulnsrc.UpdateResponse, err error) {
log.Info("fetching Oracle Linux vulnerabilities") log.WithField("package", "Oracle Linux").Info("Start fetching vulnerabilities")
// Get the first ELSA we have to manage. // Get the first ELSA we have to manage.
flagValue, err := datastore.GetKeyValue(updaterFlag) flagValue, err := datastore.GetKeyValue(updaterFlag)
if err != nil { if err != nil {
@ -132,7 +131,7 @@ func (u *updater) Update(datastore database.Datastore) (resp vulnsrc.UpdateRespo
// Fetch the update list. // Fetch the update list.
r, err := http.Get(ovalURI) r, err := http.Get(ovalURI)
if err != nil { if err != nil {
log.Errorf("could not download Oracle's update list: %s", err) log.WithError(err).Error("could not download Oracle's update list")
return resp, commonerr.ErrCouldNotDownload return resp, commonerr.ErrCouldNotDownload
} }
defer r.Body.Close() defer r.Body.Close()
@ -155,7 +154,7 @@ func (u *updater) Update(datastore database.Datastore) (resp vulnsrc.UpdateRespo
// Download the ELSA's XML file. // Download the ELSA's XML file.
r, err := http.Get(ovalURI + elsaFilePrefix + strconv.Itoa(elsa) + ".xml") r, err := http.Get(ovalURI + elsaFilePrefix + strconv.Itoa(elsa) + ".xml")
if err != nil { if err != nil {
log.Errorf("could not download Oracle's update file: %s", err) log.WithError(err).Error("could not download Oracle's update list")
return resp, commonerr.ErrCouldNotDownload return resp, commonerr.ErrCouldNotDownload
} }
@ -176,7 +175,7 @@ func (u *updater) Update(datastore database.Datastore) (resp vulnsrc.UpdateRespo
resp.FlagName = updaterFlag resp.FlagName = updaterFlag
resp.FlagValue = strconv.Itoa(largest(elsaList)) resp.FlagValue = strconv.Itoa(largest(elsaList))
} else { } else {
log.Debug("no Oracle Linux update.") log.WithField("package", "Oracle Linux").Debug("no update")
} }
return resp, nil return resp, nil
@ -198,7 +197,7 @@ func parseELSA(ovalReader io.Reader) (vulnerabilities []database.Vulnerability,
var ov oval var ov oval
err = xml.NewDecoder(ovalReader).Decode(&ov) err = xml.NewDecoder(ovalReader).Decode(&ov)
if err != nil { if err != nil {
log.Errorf("could not decode Oracle's XML: %s", err) log.WithError(err).Error("could not decode Oracle's XML")
err = commonerr.ErrCouldNotParse err = commonerr.ErrCouldNotParse
return return
} }
@ -318,7 +317,7 @@ func toFeatureVersions(criteria criteria) []database.FeatureVersion {
const prefixLen = len("Oracle Linux ") const prefixLen = len("Oracle Linux ")
osVersion, err = strconv.Atoi(strings.TrimSpace(c.Comment[prefixLen : prefixLen+strings.Index(c.Comment[prefixLen:], " ")])) osVersion, err = strconv.Atoi(strings.TrimSpace(c.Comment[prefixLen : prefixLen+strings.Index(c.Comment[prefixLen:], " ")]))
if err != nil { if err != nil {
log.Warningf("could not parse Oracle Linux release version from: '%s'.", c.Comment) log.WithError(err).WithField("comment", c.Comment).Warning("could not parse Oracle Linux release version from comment")
} }
} else if strings.Contains(c.Comment, " is earlier than ") { } else if strings.Contains(c.Comment, " is earlier than ") {
const prefixLen = len(" is earlier than ") const prefixLen = len(" is earlier than ")
@ -326,7 +325,7 @@ func toFeatureVersions(criteria criteria) []database.FeatureVersion {
version := c.Comment[strings.Index(c.Comment, " is earlier than ")+prefixLen:] version := c.Comment[strings.Index(c.Comment, " is earlier than ")+prefixLen:]
err := versionfmt.Valid(rpm.ParserName, version) err := versionfmt.Valid(rpm.ParserName, version)
if err != nil { if err != nil {
log.Warningf("could not parse package version '%s': %s. skipping", version, err.Error()) log.WithError(err).WithField("version", version).Warning("could not parse package version. skipping")
} else { } else {
featureVersion.Version = version featureVersion.Version = version
} }
@ -339,7 +338,7 @@ func toFeatureVersions(criteria criteria) []database.FeatureVersion {
if featureVersion.Feature.Namespace.Name != "" && featureVersion.Feature.Name != "" && featureVersion.Version != "" { if featureVersion.Feature.Namespace.Name != "" && featureVersion.Feature.Name != "" && featureVersion.Version != "" {
featureVersionParameters[featureVersion.Feature.Namespace.Name+":"+featureVersion.Feature.Name] = featureVersion featureVersionParameters[featureVersion.Feature.Namespace.Name+":"+featureVersion.Feature.Name] = featureVersion
} else { } else {
log.Warningf("could not determine a valid package from criterions: %v", criterions) log.WithField("criterions", fmt.Sprintf("%v", criterions)).Warning("could not determine a valid package from criterions")
} }
} }
@ -388,7 +387,7 @@ func severity(def definition) database.Severity {
case "critical": case "critical":
return database.CriticalSeverity return database.CriticalSeverity
default: default:
log.Warningf("could not determine vulnerability severity from: %s.", def.Severity) log.WithField("severity", def.Severity).Warning("could not determine vulnerability severity")
return database.UnknownSeverity return database.UnknownSeverity
} }
} }

View File

@ -19,13 +19,14 @@ package rhel
import ( import (
"bufio" "bufio"
"encoding/xml" "encoding/xml"
"fmt"
"io" "io"
"net/http" "net/http"
"regexp" "regexp"
"strconv" "strconv"
"strings" "strings"
"github.com/coreos/pkg/capnslog" log "github.com/sirupsen/logrus"
"github.com/coreos/clair/database" "github.com/coreos/clair/database"
"github.com/coreos/clair/ext/versionfmt" "github.com/coreos/clair/ext/versionfmt"
@ -53,8 +54,6 @@ var (
} }
rhsaRegexp = regexp.MustCompile(`com.redhat.rhsa-(\d+).xml`) rhsaRegexp = regexp.MustCompile(`com.redhat.rhsa-(\d+).xml`)
log = capnslog.NewPackageLogger("github.com/coreos/clair", "ext/vulnsrc/rhel")
) )
type oval struct { type oval struct {
@ -90,8 +89,7 @@ func init() {
} }
func (u *updater) Update(datastore database.Datastore) (resp vulnsrc.UpdateResponse, err error) { func (u *updater) Update(datastore database.Datastore) (resp vulnsrc.UpdateResponse, err error) {
log.Info("fetching RHEL vulnerabilities") log.WithField("package", "RHEL").Info("Start fetching vulnerabilities")
// Get the first RHSA we have to manage. // Get the first RHSA we have to manage.
flagValue, err := datastore.GetKeyValue(updaterFlag) flagValue, err := datastore.GetKeyValue(updaterFlag)
if err != nil { if err != nil {
@ -105,7 +103,7 @@ func (u *updater) Update(datastore database.Datastore) (resp vulnsrc.UpdateRespo
// Fetch the update list. // Fetch the update list.
r, err := http.Get(ovalURI) r, err := http.Get(ovalURI)
if err != nil { if err != nil {
log.Errorf("could not download RHEL's update list: %s", err) log.WithError(err).Error("could not download RHEL's update list")
return resp, commonerr.ErrCouldNotDownload return resp, commonerr.ErrCouldNotDownload
} }
@ -127,7 +125,7 @@ func (u *updater) Update(datastore database.Datastore) (resp vulnsrc.UpdateRespo
// Download the RHSA's XML file. // Download the RHSA's XML file.
r, err := http.Get(ovalURI + rhsaFilePrefix + strconv.Itoa(rhsa) + ".xml") r, err := http.Get(ovalURI + rhsaFilePrefix + strconv.Itoa(rhsa) + ".xml")
if err != nil { if err != nil {
log.Errorf("could not download RHEL's update file: %s", err) log.WithError(err).Error("could not download RHEL's update list")
return resp, commonerr.ErrCouldNotDownload return resp, commonerr.ErrCouldNotDownload
} }
@ -148,7 +146,7 @@ func (u *updater) Update(datastore database.Datastore) (resp vulnsrc.UpdateRespo
resp.FlagName = updaterFlag resp.FlagName = updaterFlag
resp.FlagValue = strconv.Itoa(rhsaList[len(rhsaList)-1]) resp.FlagValue = strconv.Itoa(rhsaList[len(rhsaList)-1])
} else { } else {
log.Debug("no Red Hat update.") log.WithField("package", "Red Hat").Debug("no update")
} }
return resp, nil return resp, nil
@ -161,7 +159,7 @@ func parseRHSA(ovalReader io.Reader) (vulnerabilities []database.Vulnerability,
var ov oval var ov oval
err = xml.NewDecoder(ovalReader).Decode(&ov) err = xml.NewDecoder(ovalReader).Decode(&ov)
if err != nil { if err != nil {
log.Errorf("could not decode RHEL's XML: %s", err) log.WithError(err).Error("could not decode RHEL's XML")
err = commonerr.ErrCouldNotParse err = commonerr.ErrCouldNotParse
return return
} }
@ -281,7 +279,7 @@ func toFeatureVersions(criteria criteria) []database.FeatureVersion {
const prefixLen = len("Red Hat Enterprise Linux ") const prefixLen = len("Red Hat Enterprise Linux ")
osVersion, err = strconv.Atoi(strings.TrimSpace(c.Comment[prefixLen : prefixLen+strings.Index(c.Comment[prefixLen:], " ")])) osVersion, err = strconv.Atoi(strings.TrimSpace(c.Comment[prefixLen : prefixLen+strings.Index(c.Comment[prefixLen:], " ")]))
if err != nil { if err != nil {
log.Warningf("could not parse Red Hat release version from: '%s'.", c.Comment) log.WithField("criterion comment", c.Comment).Warning("could not parse Red Hat release version from criterion comment")
} }
} else if strings.Contains(c.Comment, " is earlier than ") { } else if strings.Contains(c.Comment, " is earlier than ") {
const prefixLen = len(" is earlier than ") const prefixLen = len(" is earlier than ")
@ -289,7 +287,7 @@ func toFeatureVersions(criteria criteria) []database.FeatureVersion {
version := c.Comment[strings.Index(c.Comment, " is earlier than ")+prefixLen:] version := c.Comment[strings.Index(c.Comment, " is earlier than ")+prefixLen:]
err := versionfmt.Valid(rpm.ParserName, version) err := versionfmt.Valid(rpm.ParserName, version)
if err != nil { if err != nil {
log.Warningf("could not parse package version '%s': %s. skipping", version, err.Error()) log.WithError(err).WithField("version", version).Warning("could not parse package version. skipping")
} else { } else {
featureVersion.Version = version featureVersion.Version = version
featureVersion.Feature.Namespace.VersionFormat = rpm.ParserName featureVersion.Feature.Namespace.VersionFormat = rpm.ParserName
@ -307,7 +305,7 @@ func toFeatureVersions(criteria criteria) []database.FeatureVersion {
if featureVersion.Feature.Namespace.Name != "" && featureVersion.Feature.Name != "" && featureVersion.Version != "" { if featureVersion.Feature.Namespace.Name != "" && featureVersion.Feature.Name != "" && featureVersion.Version != "" {
featureVersionParameters[featureVersion.Feature.Namespace.Name+":"+featureVersion.Feature.Name] = featureVersion featureVersionParameters[featureVersion.Feature.Namespace.Name+":"+featureVersion.Feature.Name] = featureVersion
} else { } else {
log.Warningf("could not determine a valid package from criterions: %v", criterions) log.WithField("criterions", fmt.Sprintf("%v", criterions)).Warning("could not determine a valid package from criterions")
} }
} }

View File

@ -28,7 +28,7 @@ import (
"strconv" "strconv"
"strings" "strings"
"github.com/coreos/pkg/capnslog" log "github.com/sirupsen/logrus"
"github.com/coreos/clair/database" "github.com/coreos/clair/database"
"github.com/coreos/clair/ext/versionfmt" "github.com/coreos/clair/ext/versionfmt"
@ -74,8 +74,6 @@ var (
affectsCaptureRegexp = regexp.MustCompile(`(?P<release>.*)_(?P<package>.*): (?P<status>[^\s]*)( \(+(?P<note>[^()]*)\)+)?`) affectsCaptureRegexp = regexp.MustCompile(`(?P<release>.*)_(?P<package>.*): (?P<status>[^\s]*)( \(+(?P<note>[^()]*)\)+)?`)
affectsCaptureRegexpNames = affectsCaptureRegexp.SubexpNames() affectsCaptureRegexpNames = affectsCaptureRegexp.SubexpNames()
log = capnslog.NewPackageLogger("github.com/coreos/clair", "ext/vulnsrc/ubuntu")
) )
type updater struct { type updater struct {
@ -87,7 +85,7 @@ func init() {
} }
func (u *updater) Update(datastore database.Datastore) (resp vulnsrc.UpdateResponse, err error) { func (u *updater) Update(datastore database.Datastore) (resp vulnsrc.UpdateResponse, err error) {
log.Info("fetching Ubuntu vulnerabilities") log.WithField("package", "Ubuntu").Info("Start fetching vulnerabilities")
// Pull the bzr repository. // Pull the bzr repository.
if err = u.pullRepository(); err != nil { if err = u.pullRepository(); err != nil {
@ -176,7 +174,7 @@ func (u *updater) pullRepository() (err error) {
cmd := exec.Command("bzr", "branch", "--use-existing-dir", trackerRepository, ".") cmd := exec.Command("bzr", "branch", "--use-existing-dir", trackerRepository, ".")
cmd.Dir = u.repositoryLocalPath cmd.Dir = u.repositoryLocalPath
if out, err := cmd.CombinedOutput(); err != nil { if out, err := cmd.CombinedOutput(); err != nil {
log.Errorf("could not branch Ubuntu repository: %s. output: %s", err, out) log.WithError(err).WithField("output", string(out)).Error("could not branch Ubuntu repository")
return commonerr.ErrCouldNotDownload return commonerr.ErrCouldNotDownload
} }
@ -188,8 +186,7 @@ func (u *updater) pullRepository() (err error) {
cmd.Dir = u.repositoryLocalPath cmd.Dir = u.repositoryLocalPath
if out, err := cmd.CombinedOutput(); err != nil { if out, err := cmd.CombinedOutput(); err != nil {
os.RemoveAll(u.repositoryLocalPath) os.RemoveAll(u.repositoryLocalPath)
log.WithError(err).WithField("output", string(out)).Error("could not pull Ubuntu repository")
log.Errorf("could not pull Ubuntu repository: %s. output: %s", err, out)
return commonerr.ErrCouldNotDownload return commonerr.ErrCouldNotDownload
} }
@ -201,13 +198,13 @@ func getRevisionNumber(pathToRepo string) (int, error) {
cmd.Dir = pathToRepo cmd.Dir = pathToRepo
out, err := cmd.CombinedOutput() out, err := cmd.CombinedOutput()
if err != nil { if err != nil {
log.Errorf("could not get Ubuntu repository's revision number: %s. output: %s", err, out) log.WithError(err).WithField("output", string(out)).Error("could not get Ubuntu repository's revision number")
return 0, commonerr.ErrCouldNotDownload return 0, commonerr.ErrCouldNotDownload
} }
revno, err := strconv.Atoi(strings.TrimSpace(string(out))) revno, err := strconv.Atoi(strings.TrimSpace(string(out)))
if err != nil { if err != nil {
log.Errorf("could not parse Ubuntu repository's revision number: %s. output: %s", err, out) log.WithError(err).WithField("output", string(out)).Error("could not parse Ubuntu repository's revision number")
return 0, commonerr.ErrCouldNotDownload return 0, commonerr.ErrCouldNotDownload
} }
@ -222,14 +219,14 @@ func collectModifiedVulnerabilities(revision int, dbRevision, repositoryLocalPat
for _, folder := range []string{"active", "retired"} { for _, folder := range []string{"active", "retired"} {
d, err := os.Open(repositoryLocalPath + "/" + folder) d, err := os.Open(repositoryLocalPath + "/" + folder)
if err != nil { if err != nil {
log.Errorf("could not open Ubuntu vulnerabilities repository's folder: %s", err) log.WithError(err).Error("could not open Ubuntu vulnerabilities repository's folder")
return nil, vulnsrc.ErrFilesystem return nil, vulnsrc.ErrFilesystem
} }
// Get the FileInfo of all the files in the directory. // Get the FileInfo of all the files in the directory.
names, err := d.Readdirnames(-1) names, err := d.Readdirnames(-1)
if err != nil { if err != nil {
log.Errorf("could not read Ubuntu vulnerabilities repository's folder:: %s.", err) log.WithError(err).Error("could not read Ubuntu vulnerabilities repository's folder")
return nil, vulnsrc.ErrFilesystem return nil, vulnsrc.ErrFilesystem
} }
@ -254,7 +251,7 @@ func collectModifiedVulnerabilities(revision int, dbRevision, repositoryLocalPat
// Handle an up to date database. // Handle an up to date database.
dbRevisionInt, _ := strconv.Atoi(dbRevision) dbRevisionInt, _ := strconv.Atoi(dbRevision)
if revision == dbRevisionInt { if revision == dbRevisionInt {
log.Debug("no Ubuntu update") log.WithField("package", "Ubuntu").Debug("no update")
return modifiedCVE, nil return modifiedCVE, nil
} }
@ -263,7 +260,7 @@ func collectModifiedVulnerabilities(revision int, dbRevision, repositoryLocalPat
cmd.Dir = repositoryLocalPath cmd.Dir = repositoryLocalPath
out, err := cmd.CombinedOutput() out, err := cmd.CombinedOutput()
if err != nil { if err != nil {
log.Errorf("could not get Ubuntu vulnerabilities repository logs: %s. output: %s", err, out) log.WithError(err).WithField("output", string(out)).Error("could not get Ubuntu vulnerabilities repository logs")
return nil, commonerr.ErrCouldNotDownload return nil, commonerr.ErrCouldNotDownload
} }
@ -361,7 +358,7 @@ func parseUbuntuCVE(fileContent io.Reader) (vulnerability database.Vulnerability
var err error var err error
err = versionfmt.Valid(dpkg.ParserName, md["note"]) err = versionfmt.Valid(dpkg.ParserName, md["note"])
if err != nil { if err != nil {
log.Warningf("could not parse package version '%s': %s. skipping", md["note"], err) log.WithError(err).WithField("version", md["note"]).Warning("could not parse package version. skipping")
} }
version = md["note"] version = md["note"]
} }

View File

@ -20,6 +20,7 @@ import (
"github.com/coreos/pkg/timeutil" "github.com/coreos/pkg/timeutil"
"github.com/pborman/uuid" "github.com/pborman/uuid"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
log "github.com/sirupsen/logrus"
"github.com/coreos/clair/database" "github.com/coreos/clair/database"
"github.com/coreos/clair/ext/notification" "github.com/coreos/clair/ext/notification"
@ -32,6 +33,9 @@ const (
notifierMaxBackOff = 15 * time.Minute notifierMaxBackOff = 15 * time.Minute
notifierLockRefreshDuration = time.Minute * 2 notifierLockRefreshDuration = time.Minute * 2
notifierLockDuration = time.Minute*8 + notifierLockRefreshDuration notifierLockDuration = time.Minute*8 + notifierLockRefreshDuration
logSenderName = "sender name"
logNotiName = "notification name"
) )
var ( var (
@ -59,23 +63,23 @@ func RunNotifier(config *notification.Config, datastore database.Datastore, stop
// Configure registered notifiers. // Configure registered notifiers.
for senderName, sender := range notification.Senders() { for senderName, sender := range notification.Senders() {
if configured, err := sender.Configure(config); configured { if configured, err := sender.Configure(config); configured {
log.Infof("sender '%s' configured\n", senderName) log.WithField(logSenderName, senderName).Info("sender configured")
} else { } else {
notification.UnregisterSender(senderName) notification.UnregisterSender(senderName)
if err != nil { if err != nil {
log.Errorf("could not configure notifier '%s': %s", senderName, err) log.WithError(err).WithField(logSenderName, senderName).Error("could not configure notifier")
} }
} }
} }
// Do not run the updater if there is no notifier enabled. // Do not run the updater if there is no notifier enabled.
if len(notification.Senders()) == 0 { if len(notification.Senders()) == 0 {
log.Infof("notifier service is disabled") log.Info("notifier service is disabled")
return return
} }
whoAmI := uuid.New() whoAmI := uuid.New()
log.Infof("notifier service started. lock identifier: %s\n", whoAmI) log.WithField("lock identifier", whoAmI).Info("notifier service started")
for running := true; running; { for running := true; running; {
// Find task. // Find task.
@ -123,7 +127,7 @@ func findTask(datastore database.Datastore, renotifyInterval time.Duration, whoA
if err != nil { if err != nil {
// There is no notification or an error occurred. // There is no notification or an error occurred.
if err != commonerr.ErrNotFound { if err != commonerr.ErrNotFound {
log.Warningf("could not get notification to send: %s", err) log.WithError(err).Warning("could not get notification to send")
} }
// Wait. // Wait.
@ -136,7 +140,7 @@ func findTask(datastore database.Datastore, renotifyInterval time.Duration, whoA
// Lock the notification. // Lock the notification.
if hasLock, _ := datastore.Lock(notification.Name, whoAmI, notifierLockDuration, false); hasLock { if hasLock, _ := datastore.Lock(notification.Name, whoAmI, notifierLockDuration, false); hasLock {
log.Infof("found and locked a notification: %s", notification.Name) log.WithField(logNotiName, notification.Name).Info("found and locked a notification")
return &notification return &notification
} }
} }
@ -150,13 +154,13 @@ func handleTask(n database.VulnerabilityNotification, st *stopper.Stopper, maxAt
for { for {
// Max attempts exceeded. // Max attempts exceeded.
if attempts >= maxAttempts { if attempts >= maxAttempts {
log.Infof("giving up on sending notification '%s' via sender '%s': max attempts exceeded (%d)\n", n.Name, senderName, maxAttempts) log.WithFields(log.Fields{logNotiName: n.Name, logSenderName: senderName, "max attempts": maxAttempts}).Info("giving up on sending notification : max attempts exceeded")
return false, false return false, false
} }
// Backoff. // Backoff.
if backOff > 0 { if backOff > 0 {
log.Infof("waiting %v before retrying to send notification '%s' via sender '%s' (Attempt %d / %d)\n", backOff, n.Name, senderName, attempts+1, maxAttempts) log.WithFields(log.Fields{"duration": backOff, logNotiName: n.Name, logSenderName: senderName, "attempts": attempts + 1, "max attempts": maxAttempts}).Info("waiting before retrying to send notification")
if !st.Sleep(backOff) { if !st.Sleep(backOff) {
return false, true return false, true
} }
@ -166,7 +170,7 @@ func handleTask(n database.VulnerabilityNotification, st *stopper.Stopper, maxAt
if err := sender.Send(n); err != nil { if err := sender.Send(n); err != nil {
// Send failed; increase attempts/backoff and retry. // Send failed; increase attempts/backoff and retry.
promNotifierBackendErrorsTotal.WithLabelValues(senderName).Inc() promNotifierBackendErrorsTotal.WithLabelValues(senderName).Inc()
log.Errorf("could not send notification '%s' via notifier '%s': %v", n.Name, senderName, err) log.WithError(err).WithFields(log.Fields{logSenderName: senderName, logNotiName: n.Name}).Error("could not send notification via notifier")
backOff = timeutil.ExpBackoff(backOff, notifierMaxBackOff) backOff = timeutil.ExpBackoff(backOff, notifierMaxBackOff)
attempts++ attempts++
continue continue
@ -177,6 +181,6 @@ func handleTask(n database.VulnerabilityNotification, st *stopper.Stopper, maxAt
} }
} }
log.Infof("successfully sent notification '%s'\n", n.Name) log.WithField(logNotiName, n.Name).Info("successfully sent notification")
return true, false return true, false
} }

View File

@ -0,0 +1,74 @@
package formatter
import (
"encoding/json"
"fmt"
"runtime"
"strings"
"github.com/sirupsen/logrus"
)
// JSONExtendedFormatter formats log information to JSON format with time and line number in file
type JSONExtendedFormatter struct {
ShowLn bool
}
func (f *JSONExtendedFormatter) Format(entry *logrus.Entry) ([]byte, error) {
// Because entry.Data is not concurrent write safe, we need to copy the dictionary
data := make(logrus.Fields, len(entry.Data)+4)
for k, v := range entry.Data {
switch v := v.(type) {
case error:
// Otherwise errors are ignored by `encoding/json`
// https://github.com/Sirupsen/logrus/issues/137
data[k] = v.Error()
default:
data[k] = v
}
}
if f.ShowLn {
var (
filename = "???"
filepath string
line int
ok = true
)
// worst case is O(call stack size)
for depth := 3; ok; depth++ {
_, filepath, line, ok = runtime.Caller(depth)
if !ok {
line = 0
filename = "???"
break
} else if !strings.Contains(filepath, "logrus") {
if line < 0 {
line = 0
}
slash := strings.LastIndex(filepath, "/")
if slash >= 0 {
filename = filepath[slash+1:]
} else {
filename = filepath
}
break
}
}
data["Location"] = fmt.Sprintf("%s:%d", filename, line)
}
now := entry.Time
ts := now.Format("2006-01-02 15:04:05")
ms := now.Nanosecond() / 1000
data["Time"] = fmt.Sprintf("%s.%06d", ts, ms)
data["Event"] = entry.Message
data["Level"] = entry.Level.String()
serialized, err := json.Marshal(data)
if err != nil {
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
}
return append(serialized, '\n'), nil
}

View File

@ -20,9 +20,9 @@ import (
"sync" "sync"
"time" "time"
"github.com/coreos/pkg/capnslog"
"github.com/pborman/uuid" "github.com/pborman/uuid"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
log "github.com/sirupsen/logrus"
"github.com/coreos/clair/database" "github.com/coreos/clair/database"
"github.com/coreos/clair/ext/vulnmdsrc" "github.com/coreos/clair/ext/vulnmdsrc"
@ -38,8 +38,6 @@ const (
) )
var ( var (
log = capnslog.NewPackageLogger("github.com/coreos/clair", "clair")
promUpdaterErrorsTotal = prometheus.NewCounter(prometheus.CounterOpts{ promUpdaterErrorsTotal = prometheus.NewCounter(prometheus.CounterOpts{
Name: "clair_updater_errors_total", Name: "clair_updater_errors_total",
Help: "Numbers of errors that the updater generated.", Help: "Numbers of errors that the updater generated.",
@ -74,12 +72,12 @@ func RunUpdater(config *UpdaterConfig, datastore database.Datastore, st *stopper
// Do not run the updater if there is no config or if the interval is 0. // Do not run the updater if there is no config or if the interval is 0.
if config == nil || config.Interval == 0 { if config == nil || config.Interval == 0 {
log.Infof("updater service is disabled.") log.Info("updater service is disabled.")
return return
} }
whoAmI := uuid.New() whoAmI := uuid.New()
log.Infof("updater service started. lock identifier: %s", whoAmI) log.WithField("lock identifier", whoAmI).Info("updater service started")
for { for {
var stop bool var stop bool
@ -89,7 +87,7 @@ func RunUpdater(config *UpdaterConfig, datastore database.Datastore, st *stopper
nextUpdate := time.Now().UTC() nextUpdate := time.Now().UTC()
lastUpdate, firstUpdate, err := getLastUpdate(datastore) lastUpdate, firstUpdate, err := getLastUpdate(datastore)
if err != nil { if err != nil {
log.Errorf("an error occured while getting the last update time") log.WithError(err).Error("an error occured while getting the last update time")
nextUpdate = nextUpdate.Add(config.Interval) nextUpdate = nextUpdate.Add(config.Interval)
} else if firstUpdate == false { } else if firstUpdate == false {
nextUpdate = lastUpdate.Add(config.Interval) nextUpdate = lastUpdate.Add(config.Interval)
@ -133,7 +131,7 @@ func RunUpdater(config *UpdaterConfig, datastore database.Datastore, st *stopper
log.Debug("update lock is already taken") log.Debug("update lock is already taken")
nextUpdate = hasLockUntil nextUpdate = hasLockUntil
} else { } else {
log.Debugf("update lock is already taken by %s until %v", lockOwner, lockExpiration) log.WithFields(log.Fields{"lock owner": lockOwner, "lock expiration": lockExpiration}).Debug("update lock is already taken")
nextUpdate = lockExpiration nextUpdate = lockExpiration
} }
} }
@ -142,7 +140,7 @@ func RunUpdater(config *UpdaterConfig, datastore database.Datastore, st *stopper
// Sleep, but remain stoppable until approximately the next update time. // Sleep, but remain stoppable until approximately the next update time.
now := time.Now().UTC() now := time.Now().UTC()
waitUntil := nextUpdate.Add(time.Duration(rand.ExpFloat64()/0.5) * time.Second) waitUntil := nextUpdate.Add(time.Duration(rand.ExpFloat64()/0.5) * time.Second)
log.Debugf("next update attempt scheduled for %v.", waitUntil) log.WithField("scheduled time", waitUntil).Debug("next update attempt scheduled")
if !waitUntil.Before(now) { if !waitUntil.Before(now) {
if !st.Sleep(waitUntil.Sub(time.Now())) { if !st.Sleep(waitUntil.Sub(time.Now())) {
break break
@ -172,11 +170,11 @@ func update(datastore database.Datastore, firstUpdate bool) {
status, vulnerabilities, flags, notes := fetch(datastore) status, vulnerabilities, flags, notes := fetch(datastore)
// Insert vulnerabilities. // Insert vulnerabilities.
log.Tracef("inserting %d vulnerabilities for update", len(vulnerabilities)) log.WithField("count", len(vulnerabilities)).Debug("inserting vulnerabilities for update")
err := datastore.InsertVulnerabilities(vulnerabilities, !firstUpdate) err := datastore.InsertVulnerabilities(vulnerabilities, !firstUpdate)
if err != nil { if err != nil {
promUpdaterErrorsTotal.Inc() promUpdaterErrorsTotal.Inc()
log.Errorf("an error occured when inserting vulnerabilities for update: %s", err) log.WithError(err).Error("an error occured when inserting vulnerabilities for update")
return return
} }
vulnerabilities = nil vulnerabilities = nil
@ -188,7 +186,7 @@ func update(datastore database.Datastore, firstUpdate bool) {
// Log notes. // Log notes.
for _, note := range notes { for _, note := range notes {
log.Warningf("fetcher note: %s", note) log.WithField("note", note).Warning("fetcher note")
} }
promUpdaterNotesTotal.Set(float64(len(notes))) promUpdaterNotesTotal.Set(float64(len(notes)))
@ -219,14 +217,14 @@ func fetch(datastore database.Datastore) (bool, []database.Vulnerability, map[st
response, err := u.Update(datastore) response, err := u.Update(datastore)
if err != nil { if err != nil {
promUpdaterErrorsTotal.Inc() promUpdaterErrorsTotal.Inc()
log.Errorf("an error occured when fetching update '%s': %s.", name, err) log.WithError(err).WithField("updater name", name).Error("an error occured when fetching update")
status = false status = false
responseC <- nil responseC <- nil
return return
} }
responseC <- &response responseC <- &response
log.Infof("finished fetching %s", name) log.WithField("updater name", name).Info("finished fetching")
}(n, u) }(n, u)
} }
@ -273,7 +271,7 @@ func addMetadata(datastore database.Datastore, vulnerabilities []database.Vulner
// Build up a metadata cache. // Build up a metadata cache.
if err := appender.BuildCache(datastore); err != nil { if err := appender.BuildCache(datastore); err != nil {
promUpdaterErrorsTotal.Inc() promUpdaterErrorsTotal.Inc()
log.Errorf("an error occured when loading metadata fetcher '%s': %s.", name, err) log.WithError(err).WithField("appender name", name).Error("an error occured when loading metadata fetcher")
return return
} }

View File

@ -17,6 +17,8 @@ package clair
import ( import (
"regexp" "regexp"
log "github.com/sirupsen/logrus"
"github.com/coreos/clair/database" "github.com/coreos/clair/database"
"github.com/coreos/clair/ext/featurefmt" "github.com/coreos/clair/ext/featurefmt"
"github.com/coreos/clair/ext/featurens" "github.com/coreos/clair/ext/featurens"
@ -29,6 +31,7 @@ const (
// Version (integer) represents the worker version. // Version (integer) represents the worker version.
// Increased each time the engine changes. // Increased each time the engine changes.
Version = 3 Version = 3
logLayerName = "layer"
) )
var ( var (
@ -67,8 +70,7 @@ func ProcessLayer(datastore database.Datastore, imageFormat, name, parentName, p
return commonerr.NewBadRequestError("could not process a layer which does not have a format") return commonerr.NewBadRequestError("could not process a layer which does not have a format")
} }
log.Debugf("layer %s: processing (Location: %s, Engine version: %d, Parent: %s, Format: %s)", log.WithFields(log.Fields{logLayerName: name, "path": cleanURL(path), "engine version": Version, "parent layer": parentName, "format": imageFormat}).Debug("processing layer")
name, cleanURL(path), Version, parentName, imageFormat)
// Check to see if the layer is already in the database. // Check to see if the layer is already in the database.
layer, err := datastore.FindLayer(name, false, false) layer, err := datastore.FindLayer(name, false, false)
@ -88,8 +90,7 @@ func ProcessLayer(datastore database.Datastore, imageFormat, name, parentName, p
return err return err
} }
if err == commonerr.ErrNotFound { if err == commonerr.ErrNotFound {
log.Warningf("layer %s: the parent layer (%s) is unknown. it must be processed first", name, log.WithFields(log.Fields{logLayerName: name, "parent layer": parentName}).Warning("the parent layer is unknown. it must be processed first")
parentName)
return ErrParentUnknown return ErrParentUnknown
} }
layer.Parent = &parent layer.Parent = &parent
@ -97,13 +98,10 @@ func ProcessLayer(datastore database.Datastore, imageFormat, name, parentName, p
} else { } else {
// The layer is already in the database, check if we need to update it. // The layer is already in the database, check if we need to update it.
if layer.EngineVersion >= Version { if layer.EngineVersion >= Version {
log.Debugf(`layer %s: layer content has already been processed in the past with engine %d. log.WithFields(log.Fields{logLayerName: name, "past engine version": layer.EngineVersion, "current engine version": Version}).Debug("layer content has already been processed in the past with older engine. skipping analysis")
Current engine is %d. skipping analysis`, name, layer.EngineVersion, Version)
return nil return nil
} }
log.WithFields(log.Fields{logLayerName: name, "past engine version": layer.EngineVersion, "current engine version": Version}).Debug("layer content has already been processed in the past with older engine. analyzing again")
log.Debugf(`layer %s: layer content has been analyzed in the past with engine %d. Current
engine is %d. analyzing again`, name, layer.EngineVersion, Version)
} }
// Analyze the content. // Analyze the content.
@ -121,7 +119,7 @@ func detectContent(imageFormat, name, path string, headers map[string]string, pa
totalRequiredFiles := append(featurefmt.RequiredFilenames(), featurens.RequiredFilenames()...) totalRequiredFiles := append(featurefmt.RequiredFilenames(), featurens.RequiredFilenames()...)
files, err := imagefmt.Extract(imageFormat, path, headers, totalRequiredFiles) files, err := imagefmt.Extract(imageFormat, path, headers, totalRequiredFiles)
if err != nil { if err != nil {
log.Errorf("layer %s: failed to extract data from %s: %s", name, cleanURL(path), err) log.WithError(err).WithFields(log.Fields{logLayerName: name, "path": cleanURL(path)}).Error("failed to extract data from path")
return return
} }
@ -136,7 +134,7 @@ func detectContent(imageFormat, name, path string, headers map[string]string, pa
return return
} }
if len(featureVersions) > 0 { if len(featureVersions) > 0 {
log.Debugf("layer %s: detected %d features", name, len(featureVersions)) log.WithFields(log.Fields{logLayerName: name, "feature count": len(featureVersions)}).Debug("detected features")
} }
return return
@ -148,7 +146,7 @@ func detectNamespace(name string, files tarutil.FilesMap, parent *database.Layer
return return
} }
if namespace != nil { if namespace != nil {
log.Debugf("layer %s: detected namespace %q", name, namespace.Name) log.WithFields(log.Fields{logLayerName: name, "detected namespace": namespace.Name}).Debug("detected namespace")
return return
} }
@ -156,7 +154,7 @@ func detectNamespace(name string, files tarutil.FilesMap, parent *database.Layer
if parent != nil { if parent != nil {
namespace = parent.Namespace namespace = parent.Namespace
if namespace != nil { if namespace != nil {
log.Debugf("layer %s: detected namespace %q (from parent)", name, namespace.Name) log.WithFields(log.Fields{logLayerName: name, "detected namespace": namespace.Name}).Debug("detected namespace (from parent)")
return return
} }
} }
@ -210,7 +208,7 @@ func detectFeatureVersions(name string, files tarutil.FilesMap, namespace *datab
continue continue
} }
log.Warningf("Namespace unknown for feature %s %s, in layer %s", feature.Feature.Name, feature.Version, name) log.WithFields(log.Fields{"feature name": feature.Feature.Name, "feature version": feature.Version, logLayerName: name}).Warning("Namespace unknown")
err = ErrUnsupported err = ErrUnsupported
return return
} }