commit
548a2be262
@ -15,10 +15,14 @@ install:
|
||||
script:
|
||||
- go test -v $(go list ./... | grep -v /vendor/)
|
||||
|
||||
services:
|
||||
- postgresql
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- rpm
|
||||
postgresql: "9.4"
|
||||
|
||||
notifications:
|
||||
email: false
|
||||
|
@ -7,8 +7,6 @@ RUN apt-get update && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
RUN mkdir /db
|
||||
VOLUME /db
|
||||
VOLUME /config
|
||||
|
||||
EXPOSE 6060 6061
|
||||
@ -18,6 +16,5 @@ WORKDIR /go/src/github.com/coreos/clair/
|
||||
|
||||
ENV GO15VENDOREXPERIMENT 1
|
||||
RUN go install -v github.com/coreos/clair/cmd/clair
|
||||
RUN go test $(go list ./... | grep -v /vendor/) # https://github.com/golang/go/issues/11659
|
||||
|
||||
ENTRYPOINT ["clair"]
|
||||
|
140
Godeps/Godeps.json
generated
140
Godeps/Godeps.json
generated
@ -1,74 +1,124 @@
|
||||
{
|
||||
"ImportPath": "github.com/coreos/clair",
|
||||
"GoVersion": "go1.5.1",
|
||||
"GoVersion": "go1.5",
|
||||
"Packages": [
|
||||
"./..."
|
||||
],
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/alecthomas/template",
|
||||
"Rev": "b867cc6ab45cece8143cfcc6fc9c77cf3f2c23c0"
|
||||
"ImportPath": "bitbucket.org/liamstask/goose/lib/goose",
|
||||
"Rev": "8488cc47d90c8a502b1c41a462a6d9cc8ee0a895"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/alecthomas/units",
|
||||
"Rev": "6b4e7dc5e3143b85ea77909c72caf89416fc2915"
|
||||
"ImportPath": "github.com/beorn7/perks/quantile",
|
||||
"Rev": "b965b613227fddccbfffe13eae360ed3fa822f8d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/barakmich/glog",
|
||||
"Rev": "fafcb6128a8a2e6360ff034091434d547397d54a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/boltdb/bolt",
|
||||
"Comment": "v1.0-98-gafceb31",
|
||||
"Rev": "afceb316b96ea97cbac6d23afbdf69543d80748a"
|
||||
"ImportPath": "github.com/codegangsta/negroni",
|
||||
"Comment": "v0.1-70-gc7477ad",
|
||||
"Rev": "c7477ad8e330bef55bf1ebe300cf8aa67c492d1b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/go-systemd/journal",
|
||||
"Comment": "v3-15-gcfa48f3",
|
||||
"Rev": "cfa48f34d8dc4ff58f9b48725181a09f9092dc3c"
|
||||
"Comment": "v4-34-g4f14f6d",
|
||||
"Rev": "4f14f6deef2da87e4aa59e6c1c1f3e02ba44c5e1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/pkg/capnslog",
|
||||
"Rev": "42a8c3b1a6f917bb8346ef738f32712a7ca0ede7"
|
||||
"Rev": "2c77715c4df99b5420ffcae14ead08f52104065d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/pkg/timeutil",
|
||||
"Rev": "42a8c3b1a6f917bb8346ef738f32712a7ca0ede7"
|
||||
"Rev": "2c77715c4df99b5420ffcae14ead08f52104065d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gogo/protobuf/proto",
|
||||
"Rev": "58bbd41c1a2d1b7154f5d99a8d0d839b3093301a"
|
||||
"ImportPath": "github.com/davecgh/go-spew/spew",
|
||||
"Rev": "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cayley",
|
||||
"Rev": "582c4e1ca46943f2cf09c73bd12a83a6959057c9"
|
||||
"ImportPath": "github.com/fernet/fernet-go",
|
||||
"Rev": "1b2437bc582b3cfbb341ee5a29f8ef5b42912ff2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/go-sql-driver/mysql",
|
||||
"Comment": "v1.2-125-gd512f20",
|
||||
"Rev": "d512f204a577a4ab037a1816604c48c9c13210be"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/protobuf/proto",
|
||||
"Rev": "5fc2294e655b78ed8a02082d37808d46c17d7e64"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/guregu/null/zero",
|
||||
"Comment": "v3-3-g79c5bd3",
|
||||
"Rev": "79c5bd36b615db4c06132321189f579c8a5fca98"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/golang-lru",
|
||||
"Rev": "5c7531c003d8bf158b0fe5063649a2f41a822146"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/julienschmidt/httprouter",
|
||||
"Comment": "v1.1",
|
||||
"Rev": "8c199fb6259ffc1af525cc3ad52ee60ba8359669"
|
||||
"Comment": "v1.1-14-g21439ef",
|
||||
"Rev": "21439ef4d70ba4f3e2a5ed9249e7b03af4019b40"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/kylelemons/go-gypsy/yaml",
|
||||
"Comment": "go.weekly.2011-11-02-19-g42fc2c7",
|
||||
"Rev": "42fc2c7ee9b8bd0ff636cd2d7a8c0a49491044c5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/lib/pq",
|
||||
"Comment": "go1.0-cutoff-56-gdc50b6a",
|
||||
"Rev": "dc50b6ad2d3ee836442cf3389009c7cd1e64bb43"
|
||||
"Comment": "go1.0-cutoff-63-g11fc39a",
|
||||
"Rev": "11fc39a580a008f1f39bb3d11d984fb34ed778d9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mattn/go-sqlite3",
|
||||
"Comment": "v1.1.0-30-g5510da3",
|
||||
"Rev": "5510da399572b4962c020184bb291120c0a412e2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil",
|
||||
"Rev": "d0c3fe89de86839aecf2e0579c40ba3bb336a453"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/pborman/uuid",
|
||||
"Rev": "ca53cad383cad2479bbba7f7a1a05797ec1386e4"
|
||||
"Rev": "dee7705ef7b324f27ceb85a121c61f2c2e8ce988"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/pmezard/go-difflib/difflib",
|
||||
"Rev": "e8554b8641db39598be7f6342874b958f12ae1d4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/client_golang/prometheus",
|
||||
"Comment": "0.7.0-68-g67994f1",
|
||||
"Rev": "67994f177195311c3ea3d4407ed0175e34a4256f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/client_model/go",
|
||||
"Comment": "model-0.0.2-12-gfa8ad6f",
|
||||
"Rev": "fa8ad6fec33561be4280a8f0514318c79d7f6cb6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/common/expfmt",
|
||||
"Rev": "dba5e39d4516169e840def50e507ef5f21b985f9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg",
|
||||
"Rev": "dba5e39d4516169e840def50e507ef5f21b985f9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/common/model",
|
||||
"Rev": "dba5e39d4516169e840def50e507ef5f21b985f9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/procfs",
|
||||
"Rev": "406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stretchr/testify/assert",
|
||||
"Comment": "v1.0-17-g089c718",
|
||||
"Rev": "089c7181b8c728499929ff09b62d3fdd8df8adff"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
|
||||
"Rev": "315fcfb05d4d46d4354b313d146ef688dda272a9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/gosnappy/snappy",
|
||||
"Rev": "156a073208e131d7d2e212cb749feae7c339e846"
|
||||
"Comment": "v1.0-91-g5b9da39",
|
||||
"Rev": "5b9da39b66e8e994455c2525c4421c8cc00a7f93"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/tylerb/graceful",
|
||||
@ -76,13 +126,23 @@
|
||||
"Rev": "48afeb21e2fcbcff0f30bd5ad6b97747b0fae38e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/netutil",
|
||||
"Rev": "7654728e381988afd88e58cabfd6363a5ea91810"
|
||||
"ImportPath": "github.com/ziutek/mymysql/godrv",
|
||||
"Comment": "v1.5.4-13-g75ce5fb",
|
||||
"Rev": "75ce5fbba34b1912a3641adbd58cf317d7315821"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/mgo.v2",
|
||||
"Comment": "r2015.05.29",
|
||||
"Rev": "01ee097136da162d1dd3c9b44fbdf3abf4fd6552"
|
||||
"ImportPath": "github.com/ziutek/mymysql/mysql",
|
||||
"Comment": "v1.5.4-13-g75ce5fb",
|
||||
"Rev": "75ce5fbba34b1912a3641adbd58cf317d7315821"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ziutek/mymysql/native",
|
||||
"Comment": "v1.5.4-13-g75ce5fb",
|
||||
"Rev": "75ce5fbba34b1912a3641adbd58cf317d7315821"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/netutil",
|
||||
"Rev": "1d7a0b2100da090d8b02afcfb42f97e2c77e71a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/yaml.v2",
|
||||
|
151
README.md
151
README.md
@ -1,89 +1,130 @@
|
||||
Clair
|
||||
=====
|
||||
# Clair
|
||||
|
||||
[](https://travis-ci.org/coreos/clair)
|
||||
[](https://travis-ci.org/coreos/clair)
|
||||
[](https://quay.io/repository/coreos/clair)
|
||||
[](https://goreportcard.com/report/coreos/clair)
|
||||
[](https://godoc.org/github.com/chihaya/chihaya)
|
||||
[](http://webchat.freenode.net/?channels=clair)
|
||||
|
||||
Clair is a container vulnerability analysis service. It provides a list of vulnerabilities that threaten a container, and can notify users when new vulnerabilities that affect existing containers become known.
|
||||
Clair is an open source project for the static analysis of vulnerabilities in [appc] and [docker] containers.
|
||||
|
||||
We named the project « Clair », which in French means *clear*, *bright*, *transparent*, because we believe that it enables users to have a clear insight into the security of their container infrastructure.
|
||||
Vulnerability data is continuously imported from a known set of sources and correlated with the indexed contents of container images in order to produce lists of vulnerabilities that threaten a container.
|
||||
When vulnerability data changes upstream, the previous state and new state of the vulnerability along with the images they affect can be sent via webhook to a configured endpoint.
|
||||
New data sources can be [added programmatically] at compile-time or data can be injected via HTTP API at runtime.
|
||||
|
||||
## Why should I use Clair?
|
||||
Our goal is to enable a more transparent view of the security of container-based infrastructure.
|
||||
Thus, the project was named `Clair` after the French term which translates to *clear*, *bright*, *transparent*.
|
||||
|
||||
Clair is a single-binary server that exposes a JSON HTTP API. It does not require any in-container monitoring agent, nor any other container modifications. It has been designed to perform massive analysis on the [Quay.io Container Registry](https://quay.io).
|
||||
[appc]: https://github.com/appc/spec
|
||||
[docker]: https://github.com/docker/docker/blob/master/image/spec/v1.md
|
||||
[added programmatically]: #custom-data-sources
|
||||
|
||||
Whether you host a container registry, a continuous-integration system, or build anywhere from dozens to thousands of containers, you can benefit from Clair. More generally, if you consider that container security matters (and, honestly, you should), you should give it a try.
|
||||
## Common Use Cases
|
||||
|
||||
## How do I run Clair?
|
||||
### Manual Auditing
|
||||
|
||||
Refer to the documentation [here](docs/Run.md "How to run Clair") for a detailed overview of how to run Clair.
|
||||
You're building an application and want to depend on a third-party container image that you found by searching the internet.
|
||||
To make sure that you do not knowingly introduce a new vulnerability into your production service, you decide to scan the container for vulnerabilities.
|
||||
You `docker pull` the container to your development machine and start an instance of Clair.
|
||||
Once it finishes updating, you use the [local image analysis tool] to analyze the container.
|
||||
You realize this container is vulnerable to many critical CVEs, so you decide to use another one.
|
||||
|
||||
## How Clair Detects Vulnerabilities
|
||||
[local image analysis tool]: https://github.com/coreos/clair/tree/master/contrib/analyze-local-images
|
||||
|
||||
Clair analyzes each container layer once, and does not execute the container to perform its examination. The scanning engine extracts all required data to detect known vulnerabilities, and caches layer data for examination against vulnerabilities discovered in the future.
|
||||
### Container Registry Integration
|
||||
|
||||
Detecting vulnerabilities can be achieved with several techniques. One option is to compute hashes of binaries. These are presented on a layer and then compared with a database. However, building this database would become tricky considering the number of different packages and library versions.
|
||||
Your company has a continuous-integration pipeline and you want to stop deployments if they introduce a dangerous vulnerability.
|
||||
A developer merges some code into the master branch of your codebase.
|
||||
The first step of your continuous-integration pipeline automates the testing and building of your container and pushes a new container to your container registry.
|
||||
Your container registry notifies Clair which causes the download and indexing of the images for the new container.
|
||||
Clair detects some vulnerabilities and sends a webhook to your continuous deployment tool to prevent this vulnerable build from seeing the light of day.
|
||||
|
||||
To detect vulnerabilities, Clair instead takes advantage of common package managers, which quickly and comprehensively provide lists of installed binary and source packages. Package lists are extracted for each layer that composes your container image: the difference between the layer’s package list and its parent one is stored. This method is efficient in its use of storage, and allows Clair to scan each layer only once, though that layer may be used in many container images. Coupled with vulnerability databases such as the Debian’s Security Bug Tracker, Clair is able to tell which vulnerabilities threaten a container, and which layer and package introduced them.
|
||||
## Hello Heartbleed
|
||||
|
||||
### Requirements
|
||||
|
||||
### Graph
|
||||
An instance of [PostgreSQL] 9.4+ is required.
|
||||
All instructions assume the user has already setup this instance.
|
||||
During the first run, Clair will bootstrap its database with vulnerability data from its data sources.
|
||||
This can take several minutes.
|
||||
|
||||
Internally, Clair implements a [graph structure to store and query layer data](docs/Model.md). The non-exhaustive example graph below corresponds to the following `Dockerfile`.
|
||||
[PostgreSQL]: http://postgresql.org
|
||||
|
||||
```
|
||||
1. MAINTAINER Quentin Machu <quentin.machu@coreos.com>
|
||||
2. FROM ubuntu:trusty
|
||||
3. RUN apt−get update && apt−get upgrade −y
|
||||
4. EXPOSE 22
|
||||
5. CMD ["/usr/sbin/sshd", "-D"]
|
||||
### Docker
|
||||
|
||||
The easiest way to get an instance of Clair running is to simply pull down the latest copy from Quay.
|
||||
|
||||
```sh
|
||||
$ mkdir $HOME/clair_config
|
||||
$ curl -L https://raw.githubusercontent.com/coreos/clair/config.example.yaml -o $HOME/clair_config/config.yaml
|
||||
$ $EDITOR $HOME/clair_config/config.yaml # Add the URI for your postgres database
|
||||
$ docker run quay.io/coreos/clair -p 6060-6061:6060-6061 -v $HOME/clair_config:/config -config=config.yaml
|
||||
```
|
||||
|
||||

|
||||
### Source
|
||||
|
||||
The above image shows five layers represented by the purple nodes, associated with their IDs and parents. Because the second layer imports *Ubuntu Trusty* in the container, Clair can detect the operating system and some packages, depicted in green (we only show one here for the sake of simplicity). The third layer upgrades packages, so the graph reflects that this layer removes the previous version and installs the new one. Finally, the graph knows about a vulnerability, drawn in red, which is fixed by a particular package. Note that two synthetic package versions exist (0 and ∞): they ensure database consistency during parallel modification. ∞ also allows us to define very easily that a vulnerability is not yet fixed; thus, it affects every package version.
|
||||
To build Clair, you need to latest stable version of [Go] and a working [Go environment].
|
||||
|
||||
Querying this particular graph will tell us that our image is not vulnerable at all because none of the successor versions of its only package fix any vulnerability. However, an image based on the second layer could be vulnerable.
|
||||
[Go]: https://github.com/golang/go/releases
|
||||
[Go environment]: https://golang.org/doc/code.html
|
||||
|
||||
### Architecture
|
||||
```sh
|
||||
$ go get github.com/coreos/clair
|
||||
$ go install github.com/coreos/clair/cmd/clair
|
||||
$ $EDITOR config.yaml # Add the URI for your postgres database
|
||||
$ ./$GOBIN/clair -config=config.yaml
|
||||
```
|
||||
|
||||
Clair is divided into X main modules (which represent Go packages):
|
||||
## Architecture
|
||||
|
||||
- **api** defines how users interact with Clair and exposes a [documented HTTP API](docs/API.md).
|
||||
- **worker** extracts useful informations from layers and store everything in the database.
|
||||
- **updater** periodically updates Clair's vulnerability database from known vulnerability sources.
|
||||
- **notifier** dispatches [notifications](docs/Notifications.md) about vulnerable containers when vulnerabilities are released or updated.
|
||||
- **database** persists layers informations and vulnerabilities in [Cayley graph database](https://github.com/google/cayley).
|
||||
- **health** summarizes health checks of every Clair's services.
|
||||
### At a glance
|
||||
|
||||
Multiple backend databases are supported, a testing deployment would use an in-memory storage while a production deployment should use [Bolt](https://github.com/boltdb/bolt) (single-instance deployment) or PostgreSQL (distributed deployment, probably behind a load-balancer). To learn more about how to run Clair, take a look at the [doc](docs/Run.md).
|
||||

|
||||
|
||||
#### Detectors & Fetchers
|
||||
### Documentation
|
||||
|
||||
Clair currently supports three operating systems and their package managers, which we believe are the most common ones: *Debian* (dpkg), *Ubuntu* (dpkg), *CentOS* (rpm).
|
||||
Documentation can be found in a README.md file located in the directory of the component.
|
||||
|
||||
Supporting an operating system implies that we are able to extract the operating system's name and version from a layer and the list of package it has. This is done inside the *worker/detectors* package and extending that is straightforward.
|
||||
- [Notifier](https://github.com/coreos/clair/blob/master/notifier/README.md)
|
||||
- [v1 API](https://github.com/coreos/clair/blob/master/api/v1/README.md)
|
||||
|
||||
All of this is useless if no vulnerability is known for any of these packages. The *updater/fetchers* package defines trusted sources of vulnerabilities, how to fetch them and parse them. For now, Clair uses three databases, one for each supported operating system:
|
||||
- [Debian Security Bug Tracker](https://security-tracker.debian.org/tracker/)
|
||||
- [Ubuntu CVE Tracker](https://launchpad.net/ubuntu-cve-tracker)
|
||||
- [Red Hat Security Data](https://www.redhat.com/security/data/metrics/)
|
||||
### Vulnerability Analysis
|
||||
|
||||
Using these distro-specific sources gives us confidence that Clair can take into consideration *all* the different package implementations and backports without ever reporting anything possibly inaccurate.
|
||||
There are two major ways to perform analysis of programs: [Static Analysis] and [Dynamic Analysis].
|
||||
Clair has been designed to perform *static analysis*; containers never need to be executed.
|
||||
Rather, the filesystem of the container image is inspected and *features* are indexed into a database.
|
||||
Features are anything that when present could be an indication of a vulnerability (e.g. the presence of a file or an installed software package).
|
||||
By indexing the features of an image into the database, images only need to be rescanned when new features are added.
|
||||
|
||||
# Coming Soon
|
||||
[Static Analysis]: https://en.wikipedia.org/wiki/Static_program_analysis
|
||||
[Dynamic Analysis]: https://en.wikipedia.org/wiki/Dynamic_program_analysis
|
||||
|
||||
- Improved performances.
|
||||
- Extended detection system
|
||||
- More package managers
|
||||
- Generic features such as detecting presence/absence of files
|
||||
- ...
|
||||
- Expose more informations about vulnerability
|
||||
- Access vector
|
||||
- Acess complexity
|
||||
- ...
|
||||
### Data Sources
|
||||
|
||||
# Related links
|
||||
| Data Source | Versions | Format |
|
||||
|-------------------------------|--------------------------------------------------------|--------|
|
||||
| [Debian Security Bug Tracker] | 6, 7, 8, unstable | [dpkg] |
|
||||
| [Ubuntu CVE Tracker] | 12.04, 12.10, 13.04, 14.04, 14.10, 15.04, 15.10, 16.04 | [dpkg] |
|
||||
| [Red Hat Security Data] | 5, 6, 7 | [rpm] |
|
||||
|
||||
- Talk @ ContainerDays NYC 2015 [[Slides]](https://docs.google.com/presentation/d/1toUKgqLyy1b-pZlDgxONLduiLmt2yaLR0GliBB7b3L0/pub?start=false&loop=false&slide=id.p) [[Video]](https://www.youtube.com/watch?v=PA3oBAgjnkU)
|
||||
- [Quay](https://quay.io): First container registry using Clair.
|
||||
[Debian Security Bug Tracker]: https://security-tracker.debian.org/tracker
|
||||
[Ubuntu CVE Tracker]: https://launchpad.net/ubuntu-cve-tracker
|
||||
[Red Hat Security Data]: https://www.redhat.com/security/data/metrics
|
||||
[dpkg]: https://en.wikipedia.org/wiki/dpkg
|
||||
[rpm]: http://www.rpm.org
|
||||
|
||||
|
||||
### Custom Data Sources
|
||||
|
||||
In addition to the default data sources, Clair has been designed in a way that allows extension without forking the project.
|
||||
*Fetchers*, which are Go packages that implement the fetching of upstream vulnerability data, are registered in [init()] similar to drivers for Go's standard [database/sql] package.
|
||||
A fetcher can live in its own repository and custom versions of clair can contain a small patch that adds the import statements of the desired fetchers in `main.go`.
|
||||
|
||||
[init()]: https://golang.org/doc/effective_go.html#init
|
||||
[database/sql]: https://godoc.org/database/sql
|
||||
|
||||
## Related Links
|
||||
|
||||
- [Talk](https://www.youtube.com/watch?v=PA3oBAgjnkU) and [Slides](https://docs.google.com/presentation/d/1toUKgqLyy1b-pZlDgxONLduiLmt2yaLR0GliBB7b3L0/pub?start=false&loop=false&slide=id.p) @ ContainerDays NYC 2015
|
||||
- [Quay](https://quay.io): the first container registry to integrate with Clair
|
||||
- [Dockyard](https://github.com/containerops/dockyard): an open source container registry with Clair integration
|
||||
|
25
api/api.go
25
api/api.go
@ -12,8 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package api provides a RESTful HTTP API, enabling external apps to interact
|
||||
// with clair.
|
||||
package api
|
||||
|
||||
import (
|
||||
@ -25,18 +23,19 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"github.com/tylerb/graceful"
|
||||
|
||||
"github.com/coreos/clair/api/context"
|
||||
"github.com/coreos/clair/config"
|
||||
"github.com/coreos/clair/utils"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
)
|
||||
|
||||
const timeoutResponse = `{"Error":{"Message":"Clair failed to respond within the configured timeout window.","Type":"Timeout"}}`
|
||||
|
||||
var log = capnslog.NewPackageLogger("github.com/coreos/clair", "api")
|
||||
|
||||
// Run launches the main API, which exposes every possible interactions
|
||||
// with clair.
|
||||
func Run(config *config.APIConfig, st *utils.Stopper) {
|
||||
func Run(config *config.APIConfig, ctx *context.RouteContext, st *utils.Stopper) {
|
||||
defer st.End()
|
||||
|
||||
// Do not run the API service if there is no config.
|
||||
@ -60,16 +59,16 @@ func Run(config *config.APIConfig, st *utils.Stopper) {
|
||||
Server: &http.Server{
|
||||
Addr: ":" + strconv.Itoa(config.Port),
|
||||
TLSConfig: tlsConfig,
|
||||
Handler: NewVersionRouter(config.Timeout),
|
||||
Handler: http.TimeoutHandler(newAPIHandler(ctx), config.Timeout, timeoutResponse),
|
||||
},
|
||||
}
|
||||
|
||||
listenAndServeWithStopper(srv, st, config.CertFile, config.KeyFile)
|
||||
|
||||
log.Info("main API stopped")
|
||||
}
|
||||
|
||||
// RunHealth launches the Health API, which only exposes a method to fetch
|
||||
// Clair's health without any security or authentication mechanism.
|
||||
func RunHealth(config *config.APIConfig, st *utils.Stopper) {
|
||||
func RunHealth(config *config.APIConfig, ctx *context.RouteContext, st *utils.Stopper) {
|
||||
defer st.End()
|
||||
|
||||
// Do not run the API service if there is no config.
|
||||
@ -84,10 +83,12 @@ func RunHealth(config *config.APIConfig, st *utils.Stopper) {
|
||||
NoSignalHandling: true, // We want to use our own Stopper
|
||||
Server: &http.Server{
|
||||
Addr: ":" + strconv.Itoa(config.HealthPort),
|
||||
Handler: NewHealthRouter(),
|
||||
Handler: http.TimeoutHandler(newHealthHandler(ctx), config.Timeout, timeoutResponse),
|
||||
},
|
||||
}
|
||||
|
||||
listenAndServeWithStopper(srv, st, "", "")
|
||||
|
||||
log.Info("health API stopped")
|
||||
}
|
||||
|
||||
@ -108,9 +109,11 @@ func listenAndServeWithStopper(srv *graceful.Server, st *utils.Stopper, certFile
|
||||
err = srv.ListenAndServe()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if opErr, ok := err.(*net.OpError); !ok || (ok && opErr.Op != "accept") {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// tlsClientConfig initializes a *tls.Config using the given CA. The resulting
|
||||
|
64
api/context/context.go
Normal file
64
api/context/context.go
Normal file
@ -0,0 +1,64 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package context
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"github.com/julienschmidt/httprouter"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/coreos/clair/config"
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
log = capnslog.NewPackageLogger("github.com/coreos/clair", "api")
|
||||
|
||||
promResponseDurationMilliseconds = prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Name: "clair_api_response_duration_milliseconds",
|
||||
Help: "The duration of time it takes to receieve and write a response to an API request",
|
||||
Buckets: prometheus.ExponentialBuckets(9.375, 2, 10),
|
||||
}, []string{"route", "code"})
|
||||
)
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(promResponseDurationMilliseconds)
|
||||
}
|
||||
|
||||
type Handler func(http.ResponseWriter, *http.Request, httprouter.Params, *RouteContext) (route string, status int)
|
||||
|
||||
func HTTPHandler(handler Handler, ctx *RouteContext) httprouter.Handle {
|
||||
return func(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
start := time.Now()
|
||||
route, status := handler(w, r, p, ctx)
|
||||
statusStr := strconv.Itoa(status)
|
||||
if status == 0 {
|
||||
statusStr = "???"
|
||||
}
|
||||
utils.PrometheusObserveTimeMilliseconds(promResponseDurationMilliseconds.WithLabelValues(route, statusStr), start)
|
||||
|
||||
log.Infof("%s \"%s %s\" %s (%s)", r.RemoteAddr, r.Method, r.RequestURI, statusStr, time.Since(start))
|
||||
}
|
||||
}
|
||||
|
||||
type RouteContext struct {
|
||||
Store database.Datastore
|
||||
Config *config.APIConfig
|
||||
}
|
@ -1,55 +0,0 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package logic implements all the available API methods.
|
||||
// Every methods are documented in docs/API.md.
|
||||
package logic
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/julienschmidt/httprouter"
|
||||
|
||||
"github.com/coreos/clair/health"
|
||||
httputils "github.com/coreos/clair/utils/http"
|
||||
"github.com/coreos/clair/worker"
|
||||
)
|
||||
|
||||
// Version is an integer representing the API version.
|
||||
const Version = 1
|
||||
|
||||
// GETVersions returns API and Engine versions.
|
||||
func GETVersions(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
|
||||
httputils.WriteHTTP(w, http.StatusOK, struct {
|
||||
APIVersion string
|
||||
EngineVersion string
|
||||
}{
|
||||
APIVersion: strconv.Itoa(Version),
|
||||
EngineVersion: strconv.Itoa(worker.Version),
|
||||
})
|
||||
}
|
||||
|
||||
// GETHealth sums up the health of all the registered services.
|
||||
func GETHealth(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
|
||||
globalHealth, statuses := health.Healthcheck()
|
||||
|
||||
httpStatus := http.StatusOK
|
||||
if !globalHealth {
|
||||
httpStatus = http.StatusServiceUnavailable
|
||||
}
|
||||
|
||||
httputils.WriteHTTP(w, httpStatus, statuses)
|
||||
return
|
||||
}
|
@ -1,378 +0,0 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package logic
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/julienschmidt/httprouter"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
httputils "github.com/coreos/clair/utils/http"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
"github.com/coreos/clair/worker"
|
||||
)
|
||||
|
||||
// POSTLayersParameters represents the expected parameters for POSTLayers.
|
||||
type POSTLayersParameters struct {
|
||||
ID, Path, ParentID, ImageFormat string
|
||||
}
|
||||
|
||||
// POSTLayers analyzes a layer and returns the engine version that has been used
|
||||
// for the analysis.
|
||||
func POSTLayers(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
|
||||
var parameters POSTLayersParameters
|
||||
if s, err := httputils.ParseHTTPBody(r, ¶meters); err != nil {
|
||||
httputils.WriteHTTPError(w, s, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Process data.
|
||||
if err := worker.Process(parameters.ID, parameters.ParentID, parameters.Path, parameters.ImageFormat); err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Get engine version and return.
|
||||
httputils.WriteHTTP(w, http.StatusCreated, struct{ Version string }{Version: strconv.Itoa(worker.Version)})
|
||||
}
|
||||
|
||||
// DELETELayers deletes the specified layer and any child layers that are
|
||||
// dependent on the specified layer.
|
||||
func DELETELayers(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
err := database.DeleteLayer(p.ByName("id"))
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
httputils.WriteHTTP(w, http.StatusNoContent, nil)
|
||||
}
|
||||
|
||||
// GETLayersOS returns the operating system of a layer if it exists.
|
||||
// It uses not only the specified layer but also its parent layers if necessary.
|
||||
// An empty OS string is returned if no OS has been detected.
|
||||
func GETLayersOS(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
// Find layer.
|
||||
layer, err := database.FindOneLayerByID(p.ByName("id"), []string{database.FieldLayerParent, database.FieldLayerOS})
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Get OS.
|
||||
os, err := layer.OperatingSystem()
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
httputils.WriteHTTP(w, http.StatusOK, struct{ OS string }{OS: os})
|
||||
}
|
||||
|
||||
// GETLayersParent returns the parent ID of a layer if it exists.
|
||||
// An empty ID string is returned if the layer has no parent.
|
||||
func GETLayersParent(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
// Find layer
|
||||
layer, err := database.FindOneLayerByID(p.ByName("id"), []string{database.FieldLayerParent})
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Get layer's parent.
|
||||
parent, err := layer.Parent([]string{database.FieldLayerID})
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
ID := ""
|
||||
if parent != nil {
|
||||
ID = parent.ID
|
||||
}
|
||||
httputils.WriteHTTP(w, http.StatusOK, struct{ ID string }{ID: ID})
|
||||
}
|
||||
|
||||
// GETLayersPackages returns the complete list of packages that a layer has
|
||||
// if it exists.
|
||||
func GETLayersPackages(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
// Find layer
|
||||
layer, err := database.FindOneLayerByID(p.ByName("id"), []string{database.FieldLayerParent, database.FieldLayerPackages})
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Find layer's packages.
|
||||
packagesNodes, err := layer.AllPackages()
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
packages := []*database.Package{}
|
||||
if len(packagesNodes) > 0 {
|
||||
packages, err = database.FindAllPackagesByNodes(packagesNodes, []string{database.FieldPackageOS, database.FieldPackageName, database.FieldPackageVersion})
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
httputils.WriteHTTP(w, http.StatusOK, struct{ Packages []*database.Package }{Packages: packages})
|
||||
}
|
||||
|
||||
// GETLayersPackagesDiff returns the list of packages that a layer installs and
|
||||
// removes if it exists.
|
||||
func GETLayersPackagesDiff(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
// Find layer.
|
||||
layer, err := database.FindOneLayerByID(p.ByName("id"), []string{database.FieldLayerPackages})
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Find layer's packages.
|
||||
installedPackages, removedPackages := make([]*database.Package, 0), make([]*database.Package, 0)
|
||||
if len(layer.InstalledPackagesNodes) > 0 {
|
||||
installedPackages, err = database.FindAllPackagesByNodes(layer.InstalledPackagesNodes, []string{database.FieldPackageOS, database.FieldPackageName, database.FieldPackageVersion})
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
if len(layer.RemovedPackagesNodes) > 0 {
|
||||
removedPackages, err = database.FindAllPackagesByNodes(layer.RemovedPackagesNodes, []string{database.FieldPackageOS, database.FieldPackageName, database.FieldPackageVersion})
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
httputils.WriteHTTP(w, http.StatusOK, struct{ InstalledPackages, RemovedPackages []*database.Package }{InstalledPackages: installedPackages, RemovedPackages: removedPackages})
|
||||
}
|
||||
|
||||
// GETLayersVulnerabilities returns the complete list of vulnerabilities that
|
||||
// a layer has if it exists.
|
||||
func GETLayersVulnerabilities(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
// Get minumum priority parameter.
|
||||
minimumPriority := types.Priority(r.URL.Query().Get("minimumPriority"))
|
||||
if minimumPriority == "" {
|
||||
minimumPriority = "High" // Set default priority to High
|
||||
} else if !minimumPriority.IsValid() {
|
||||
httputils.WriteHTTPError(w, 0, cerrors.NewBadRequestError("invalid priority"))
|
||||
return
|
||||
}
|
||||
|
||||
// Find layer
|
||||
layer, err := database.FindOneLayerByID(p.ByName("id"), []string{database.FieldLayerParent, database.FieldLayerPackages})
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Find layer's packages.
|
||||
packagesNodes, err := layer.AllPackages()
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Find vulnerabilities.
|
||||
vulnerabilities, err := getVulnerabilitiesFromLayerPackagesNodes(packagesNodes, minimumPriority, []string{database.FieldVulnerabilityID, database.FieldVulnerabilityLink, database.FieldVulnerabilityPriority, database.FieldVulnerabilityDescription, database.FieldVulnerabilityCausedByPackage})
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
httputils.WriteHTTP(w, http.StatusOK, struct{ Vulnerabilities []*database.Vulnerability }{Vulnerabilities: vulnerabilities})
|
||||
}
|
||||
|
||||
// GETLayersVulnerabilitiesDiff returns the list of vulnerabilities that a layer
|
||||
// adds and removes if it exists.
|
||||
func GETLayersVulnerabilitiesDiff(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
// Get minumum priority parameter.
|
||||
minimumPriority := types.Priority(r.URL.Query().Get("minimumPriority"))
|
||||
if minimumPriority == "" {
|
||||
minimumPriority = "High" // Set default priority to High
|
||||
} else if !minimumPriority.IsValid() {
|
||||
httputils.WriteHTTPError(w, 0, cerrors.NewBadRequestError("invalid priority"))
|
||||
return
|
||||
}
|
||||
|
||||
// Find layer.
|
||||
layer, err := database.FindOneLayerByID(p.ByName("id"), []string{database.FieldLayerPackages})
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Selected fields for vulnerabilities.
|
||||
selectedFields := []string{database.FieldVulnerabilityID, database.FieldVulnerabilityLink, database.FieldVulnerabilityPriority, database.FieldVulnerabilityDescription, database.FieldVulnerabilityCausedByPackage}
|
||||
|
||||
// Find vulnerabilities for installed packages.
|
||||
addedVulnerabilities, err := getVulnerabilitiesFromLayerPackagesNodes(layer.InstalledPackagesNodes, minimumPriority, selectedFields)
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Find vulnerabilities for removed packages.
|
||||
removedVulnerabilities, err := getVulnerabilitiesFromLayerPackagesNodes(layer.RemovedPackagesNodes, minimumPriority, selectedFields)
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Remove vulnerabilities which appears both in added and removed lists (eg. case of updated packages but still vulnerable).
|
||||
for ia, a := range addedVulnerabilities {
|
||||
for ir, r := range removedVulnerabilities {
|
||||
if a.ID == r.ID {
|
||||
addedVulnerabilities = append(addedVulnerabilities[:ia], addedVulnerabilities[ia+1:]...)
|
||||
removedVulnerabilities = append(removedVulnerabilities[:ir], removedVulnerabilities[ir+1:]...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
httputils.WriteHTTP(w, http.StatusOK, struct{ Adds, Removes []*database.Vulnerability }{Adds: addedVulnerabilities, Removes: removedVulnerabilities})
|
||||
}
|
||||
|
||||
// POSTBatchLayersVulnerabilitiesParameters represents the expected parameters
|
||||
// for POSTBatchLayersVulnerabilities.
|
||||
type POSTBatchLayersVulnerabilitiesParameters struct {
|
||||
LayersIDs []string
|
||||
}
|
||||
|
||||
// POSTBatchLayersVulnerabilities returns the complete list of vulnerabilities
|
||||
// that the provided layers have, if they all exist.
|
||||
func POSTBatchLayersVulnerabilities(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
// Parse body
|
||||
var parameters POSTBatchLayersVulnerabilitiesParameters
|
||||
if s, err := httputils.ParseHTTPBody(r, ¶meters); err != nil {
|
||||
httputils.WriteHTTPError(w, s, err)
|
||||
return
|
||||
}
|
||||
if len(parameters.LayersIDs) == 0 {
|
||||
httputils.WriteHTTPError(w, http.StatusBadRequest, errors.New("at least one LayerID query parameter must be provided"))
|
||||
return
|
||||
}
|
||||
|
||||
// Get minumum priority parameter.
|
||||
minimumPriority := types.Priority(r.URL.Query().Get("minimumPriority"))
|
||||
if minimumPriority == "" {
|
||||
minimumPriority = "High" // Set default priority to High
|
||||
} else if !minimumPriority.IsValid() {
|
||||
httputils.WriteHTTPError(w, 0, cerrors.NewBadRequestError("invalid priority"))
|
||||
return
|
||||
}
|
||||
|
||||
response := make(map[string]interface{})
|
||||
// For each LayerID parameter
|
||||
for _, layerID := range parameters.LayersIDs {
|
||||
// Find layer
|
||||
layer, err := database.FindOneLayerByID(layerID, []string{database.FieldLayerParent, database.FieldLayerPackages})
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Find layer's packages.
|
||||
packagesNodes, err := layer.AllPackages()
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Find vulnerabilities.
|
||||
vulnerabilities, err := getVulnerabilitiesFromLayerPackagesNodes(packagesNodes, minimumPriority, []string{database.FieldVulnerabilityID, database.FieldVulnerabilityLink, database.FieldVulnerabilityPriority, database.FieldVulnerabilityDescription, database.FieldVulnerabilityCausedByPackage})
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
response[layerID] = struct{ Vulnerabilities []*database.Vulnerability }{Vulnerabilities: vulnerabilities}
|
||||
}
|
||||
|
||||
httputils.WriteHTTP(w, http.StatusOK, response)
|
||||
}
|
||||
|
||||
// getSuccessorsFromPackagesNodes returns the node list of packages that have
|
||||
// versions following the versions of the provided packages.
|
||||
func getSuccessorsFromPackagesNodes(packagesNodes []string) ([]string, error) {
|
||||
if len(packagesNodes) == 0 {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
// Get packages.
|
||||
packages, err := database.FindAllPackagesByNodes(packagesNodes, []string{database.FieldPackageNextVersion})
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
|
||||
// Find all packages' successors.
|
||||
var packagesNextVersions []string
|
||||
for _, pkg := range packages {
|
||||
nextVersions, err := pkg.NextVersions([]string{})
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
for _, version := range nextVersions {
|
||||
packagesNextVersions = append(packagesNextVersions, version.Node)
|
||||
}
|
||||
}
|
||||
|
||||
return packagesNextVersions, nil
|
||||
}
|
||||
|
||||
// getVulnerabilitiesFromLayerPackagesNodes returns the list of vulnerabilities
|
||||
// affecting the provided package nodes, filtered by Priority.
|
||||
func getVulnerabilitiesFromLayerPackagesNodes(packagesNodes []string, minimumPriority types.Priority, selectedFields []string) ([]*database.Vulnerability, error) {
|
||||
if len(packagesNodes) == 0 {
|
||||
return []*database.Vulnerability{}, nil
|
||||
}
|
||||
|
||||
// Get successors of the packages.
|
||||
packagesNextVersions, err := getSuccessorsFromPackagesNodes(packagesNodes)
|
||||
if err != nil {
|
||||
return []*database.Vulnerability{}, err
|
||||
}
|
||||
if len(packagesNextVersions) == 0 {
|
||||
return []*database.Vulnerability{}, nil
|
||||
}
|
||||
|
||||
// Find vulnerabilities fixed in these successors.
|
||||
vulnerabilities, err := database.FindAllVulnerabilitiesByFixedIn(packagesNextVersions, selectedFields)
|
||||
if err != nil {
|
||||
return []*database.Vulnerability{}, err
|
||||
}
|
||||
|
||||
// Filter vulnerabilities depending on their priority and remove duplicates.
|
||||
filteredVulnerabilities := []*database.Vulnerability{}
|
||||
seen := map[string]struct{}{}
|
||||
for _, v := range vulnerabilities {
|
||||
if minimumPriority.Compare(v.Priority) <= 0 {
|
||||
if _, alreadySeen := seen[v.ID]; !alreadySeen {
|
||||
filteredVulnerabilities = append(filteredVulnerabilities, v)
|
||||
seen[v.ID] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return filteredVulnerabilities, nil
|
||||
}
|
@ -1,248 +0,0 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package logic
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
|
||||
"github.com/julienschmidt/httprouter"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
httputils "github.com/coreos/clair/utils/http"
|
||||
)
|
||||
|
||||
// GETVulnerabilities returns a vulnerability identified by an ID if it exists.
|
||||
func GETVulnerabilities(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
// Find vulnerability.
|
||||
vulnerability, err := database.FindOneVulnerability(p.ByName("id"), []string{database.FieldVulnerabilityID, database.FieldVulnerabilityLink, database.FieldVulnerabilityPriority, database.FieldVulnerabilityDescription, database.FieldVulnerabilityFixedIn})
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
abstractVulnerability, err := vulnerability.ToAbstractVulnerability()
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
httputils.WriteHTTP(w, http.StatusOK, abstractVulnerability)
|
||||
}
|
||||
|
||||
// POSTVulnerabilities manually inserts a vulnerability into the database if it
|
||||
// does not exist yet.
|
||||
func POSTVulnerabilities(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
var parameters *database.AbstractVulnerability
|
||||
if s, err := httputils.ParseHTTPBody(r, ¶meters); err != nil {
|
||||
httputils.WriteHTTPError(w, s, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure that the vulnerability does not exist.
|
||||
vulnerability, err := database.FindOneVulnerability(parameters.ID, []string{})
|
||||
if err != nil && err != cerrors.ErrNotFound {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
if vulnerability != nil {
|
||||
httputils.WriteHTTPError(w, 0, cerrors.NewBadRequestError("vulnerability already exists"))
|
||||
return
|
||||
}
|
||||
|
||||
// Insert packages.
|
||||
packages := database.AbstractPackagesToPackages(parameters.AffectedPackages)
|
||||
err = database.InsertPackages(packages)
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
var pkgNodes []string
|
||||
for _, p := range packages {
|
||||
pkgNodes = append(pkgNodes, p.Node)
|
||||
}
|
||||
|
||||
// Insert vulnerability.
|
||||
notifications, err := database.InsertVulnerabilities([]*database.Vulnerability{parameters.ToVulnerability(pkgNodes)})
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Insert notifications.
|
||||
err = database.InsertNotifications(notifications, database.GetDefaultNotificationWrapper())
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
httputils.WriteHTTP(w, http.StatusCreated, nil)
|
||||
}
|
||||
|
||||
// PUTVulnerabilities updates a vulnerability if it exists.
|
||||
func PUTVulnerabilities(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
var parameters *database.AbstractVulnerability
|
||||
if s, err := httputils.ParseHTTPBody(r, ¶meters); err != nil {
|
||||
httputils.WriteHTTPError(w, s, err)
|
||||
return
|
||||
}
|
||||
parameters.ID = p.ByName("id")
|
||||
|
||||
// Ensure that the vulnerability exists.
|
||||
_, err := database.FindOneVulnerability(parameters.ID, []string{})
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Insert packages.
|
||||
packages := database.AbstractPackagesToPackages(parameters.AffectedPackages)
|
||||
err = database.InsertPackages(packages)
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
var pkgNodes []string
|
||||
for _, p := range packages {
|
||||
pkgNodes = append(pkgNodes, p.Node)
|
||||
}
|
||||
|
||||
// Insert vulnerability.
|
||||
notifications, err := database.InsertVulnerabilities([]*database.Vulnerability{parameters.ToVulnerability(pkgNodes)})
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Insert notifications.
|
||||
err = database.InsertNotifications(notifications, database.GetDefaultNotificationWrapper())
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
httputils.WriteHTTP(w, http.StatusCreated, nil)
|
||||
}
|
||||
|
||||
// DELVulnerabilities deletes a vulnerability if it exists.
|
||||
func DELVulnerabilities(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
err := database.DeleteVulnerability(p.ByName("id"))
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
httputils.WriteHTTP(w, http.StatusNoContent, nil)
|
||||
}
|
||||
|
||||
// GETVulnerabilitiesIntroducingLayers returns the list of layers that
|
||||
// introduces a given vulnerability, if it exists.
|
||||
// To clarify, it does not return the list of every layers that have
|
||||
// the vulnerability.
|
||||
func GETVulnerabilitiesIntroducingLayers(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
// Find vulnerability to verify that it exists.
|
||||
_, err := database.FindOneVulnerability(p.ByName("id"), []string{})
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
layers, err := database.FindAllLayersIntroducingVulnerability(p.ByName("id"), []string{database.FieldLayerID})
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
layersIDs := []string{}
|
||||
for _, l := range layers {
|
||||
layersIDs = append(layersIDs, l.ID)
|
||||
}
|
||||
|
||||
httputils.WriteHTTP(w, http.StatusOK, struct{ IntroducingLayersIDs []string }{IntroducingLayersIDs: layersIDs})
|
||||
}
|
||||
|
||||
// POSTVulnerabilitiesAffectedLayersParameters represents the expected
|
||||
// parameters for POSTVulnerabilitiesAffectedLayers.
|
||||
type POSTVulnerabilitiesAffectedLayersParameters struct {
|
||||
LayersIDs []string
|
||||
}
|
||||
|
||||
// POSTVulnerabilitiesAffectedLayers returns whether the specified layers
|
||||
// (by their IDs) are vulnerable to the given Vulnerability or not.
|
||||
func POSTVulnerabilitiesAffectedLayers(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
// Parse body.
|
||||
var parameters POSTBatchLayersVulnerabilitiesParameters
|
||||
if s, err := httputils.ParseHTTPBody(r, ¶meters); err != nil {
|
||||
httputils.WriteHTTPError(w, s, err)
|
||||
return
|
||||
}
|
||||
if len(parameters.LayersIDs) == 0 {
|
||||
httputils.WriteHTTPError(w, http.StatusBadRequest, errors.New("getting the entire list of affected layers is not supported yet: at least one LayerID query parameter must be provided"))
|
||||
return
|
||||
}
|
||||
|
||||
// Find vulnerability.
|
||||
vulnerability, err := database.FindOneVulnerability(p.ByName("id"), []string{database.FieldVulnerabilityFixedIn})
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Save the fixed in nodes into a map for fast check.
|
||||
fixedInPackagesMap := make(map[string]struct{})
|
||||
for _, fixedInNode := range vulnerability.FixedInNodes {
|
||||
fixedInPackagesMap[fixedInNode] = struct{}{}
|
||||
}
|
||||
|
||||
response := make(map[string]interface{})
|
||||
// For each LayerID parameter.
|
||||
for _, layerID := range parameters.LayersIDs {
|
||||
// Find layer
|
||||
layer, err := database.FindOneLayerByID(layerID, []string{database.FieldLayerParent, database.FieldLayerPackages, database.FieldLayerPackages})
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Find layer's packages.
|
||||
packagesNodes, err := layer.AllPackages()
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Get successors packages of layer' packages.
|
||||
successors, err := getSuccessorsFromPackagesNodes(packagesNodes)
|
||||
if err != nil {
|
||||
httputils.WriteHTTPError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Determine if the layer is vulnerable by verifying if one of the successors
|
||||
// of its packages are fixed by the vulnerability.
|
||||
vulnerable := false
|
||||
for _, p := range successors {
|
||||
if _, fixed := fixedInPackagesMap[p]; fixed {
|
||||
vulnerable = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
response[layerID] = struct{ Vulnerable bool }{Vulnerable: vulnerable}
|
||||
}
|
||||
|
||||
httputils.WriteHTTP(w, http.StatusOK, response)
|
||||
}
|
@ -17,81 +17,54 @@ package api
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/clair/api/logic"
|
||||
"github.com/coreos/clair/api/wrappers"
|
||||
"github.com/julienschmidt/httprouter"
|
||||
|
||||
"github.com/coreos/clair/api/context"
|
||||
"github.com/coreos/clair/api/v1"
|
||||
)
|
||||
|
||||
// VersionRouter is an HTTP router that forwards requests to the appropriate
|
||||
// router depending on the API version specified in the requested URI.
|
||||
type VersionRouter map[string]*httprouter.Router
|
||||
// router is an HTTP router that forwards requests to the appropriate sub-router
|
||||
// depending on the API version specified in the request URI.
|
||||
type router map[string]*httprouter.Router
|
||||
|
||||
// NewVersionRouter instantiates a VersionRouter and every sub-routers that are
|
||||
// necessary to handle supported API versions.
|
||||
func NewVersionRouter(to time.Duration) *VersionRouter {
|
||||
return &VersionRouter{
|
||||
"/v1": NewRouterV1(to),
|
||||
}
|
||||
// Let's hope we never have more than 99 API versions.
|
||||
const apiVersionLength = len("v99")
|
||||
|
||||
func newAPIHandler(ctx *context.RouteContext) http.Handler {
|
||||
router := make(router)
|
||||
router["/v1"] = v1.NewRouter(ctx)
|
||||
return router
|
||||
}
|
||||
|
||||
// ServeHTTP forwards requests to the appropriate router depending on the API
|
||||
// version specified in the requested URI and remove the version information
|
||||
// from the request URL.Path, without modifying the request uRequestURI.
|
||||
func (vs VersionRouter) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
func (rtr router) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
urlStr := r.URL.String()
|
||||
var version string
|
||||
if len(urlStr) >= 3 {
|
||||
version = urlStr[:3]
|
||||
if len(urlStr) >= apiVersionLength {
|
||||
version = urlStr[:apiVersionLength]
|
||||
}
|
||||
if router, _ := vs[version]; router != nil {
|
||||
|
||||
if router, _ := rtr[version]; router != nil {
|
||||
// Remove the version number from the request path to let the router do its
|
||||
// job but do not update the RequestURI
|
||||
r.URL.Path = strings.Replace(r.URL.Path, version, "", 1)
|
||||
router.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("%s %d %s %s", http.StatusNotFound, r.Method, r.RequestURI, r.RemoteAddr)
|
||||
http.NotFound(w, r)
|
||||
}
|
||||
|
||||
// NewRouterV1 creates a new router for the API (Version 1)
|
||||
func NewRouterV1(to time.Duration) *httprouter.Router {
|
||||
func newHealthHandler(ctx *context.RouteContext) http.Handler {
|
||||
router := httprouter.New()
|
||||
wrap := func(fn httprouter.Handle) httprouter.Handle {
|
||||
return wrappers.Log(wrappers.TimeOut(to, fn))
|
||||
router.GET("/health", context.HTTPHandler(getHealth, ctx))
|
||||
return router
|
||||
}
|
||||
|
||||
func getHealth(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
if ctx.Store.Ping() {
|
||||
return "health", http.StatusOK
|
||||
}
|
||||
|
||||
// General
|
||||
router.GET("/versions", wrap(logic.GETVersions))
|
||||
router.GET("/health", wrap(logic.GETHealth))
|
||||
|
||||
// Layers
|
||||
router.POST("/layers", wrap(logic.POSTLayers))
|
||||
router.DELETE("/layers/:id", wrap(logic.DELETELayers))
|
||||
router.GET("/layers/:id/os", wrap(logic.GETLayersOS))
|
||||
router.GET("/layers/:id/parent", wrap(logic.GETLayersParent))
|
||||
router.GET("/layers/:id/packages", wrap(logic.GETLayersPackages))
|
||||
router.GET("/layers/:id/packages/diff", wrap(logic.GETLayersPackagesDiff))
|
||||
router.GET("/layers/:id/vulnerabilities", wrap(logic.GETLayersVulnerabilities))
|
||||
router.GET("/layers/:id/vulnerabilities/diff", wrap(logic.GETLayersVulnerabilitiesDiff))
|
||||
// # Batch version of "/layers/:id/vulnerabilities"
|
||||
router.POST("/batch/layers/vulnerabilities", wrap(logic.POSTBatchLayersVulnerabilities))
|
||||
|
||||
// Vulnerabilities
|
||||
router.POST("/vulnerabilities", wrap(logic.POSTVulnerabilities))
|
||||
router.PUT("/vulnerabilities/:id", wrap(logic.PUTVulnerabilities))
|
||||
router.GET("/vulnerabilities/:id", wrap(logic.GETVulnerabilities))
|
||||
router.DELETE("/vulnerabilities/:id", wrap(logic.DELVulnerabilities))
|
||||
router.GET("/vulnerabilities/:id/introducing-layers", wrap(logic.GETVulnerabilitiesIntroducingLayers))
|
||||
router.POST("/vulnerabilities/:id/affected-layers", wrap(logic.POSTVulnerabilitiesAffectedLayers))
|
||||
|
||||
return router
|
||||
}
|
||||
|
||||
// NewHealthRouter creates a new router that only serve the Health function on /
|
||||
func NewHealthRouter() *httprouter.Router {
|
||||
router := httprouter.New()
|
||||
router.GET("/", logic.GETHealth)
|
||||
return router
|
||||
return "health", http.StatusInternalServerError
|
||||
}
|
||||
|
576
api/v1/README.md
Normal file
576
api/v1/README.md
Normal file
@ -0,0 +1,576 @@
|
||||
# Clair v1 API
|
||||
|
||||
- [Error Handling](#error-handling)
|
||||
- [Layers](#layers)
|
||||
- [POST](#post-layers)
|
||||
- [GET](get-layersname)
|
||||
- [DELETE](#delete-layersname)
|
||||
- [Namespaces](#namespaces)
|
||||
- [GET](#get-namespaces)
|
||||
- [Vulnerabilities](#vulnerabilities)
|
||||
- [POST](#post-namespacesnamevulnerabilities)
|
||||
- [GET](#get-namespacesnsnamevulnerabilitiesvulnname)
|
||||
- [PUT](#put-namespacesnsnamevulnerabilitiesvulnname)
|
||||
- [DELETE](#delete-namespacesnsnamevulnerabilitiesvulnname)
|
||||
- [Fixes](#fixes)
|
||||
- [GET](#get-namespacesnsnamevulnerabilitiesvulnnamefixes)
|
||||
- [PUT](#put-namespacesnsnamevulnerabilitiesvulnnamefixesfeaturename)
|
||||
- [DELETE](#delete-namespacesnsnamevulnerabilitiesvulnnamefixesfeaturename)
|
||||
- [Notifications](#notifications)
|
||||
- [GET](#get-notificationsname)
|
||||
- [DELETE](#delete-notificationname)
|
||||
|
||||
## Error Handling
|
||||
|
||||
###### Description
|
||||
|
||||
Every route can optionally provide an `Error` property on the response object.
|
||||
The HTTP status code of the response should indicate what type of failure occurred and how the client should reaction.
|
||||
|
||||
###### Client Retry Behavior
|
||||
|
||||
| Code | Name | Retry Behavior |
|
||||
|------|-----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| 400 | Bad Request | The body of the request invalid. The request either must be changed before being retried or depends on another request being processed before it. |
|
||||
| 404 | Not Found | The requested resource could not be found. The request must be changed before being retried. |
|
||||
| 422 | Unprocessable Entity | The request body is valid, but unsupported. This request should never be retried. |
|
||||
| 500 | Internal Server Error | The server encountered an error while processing the request. This request should be retried without change. |
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 400 Bad Request
|
||||
Content-Type: application/json;charset=utf-8
|
||||
Server: clair
|
||||
|
||||
{
|
||||
"Error": {
|
||||
"Message": "example error message"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Layers
|
||||
|
||||
#### POST /layers
|
||||
|
||||
###### Description
|
||||
|
||||
The POST route for the Layers resource performs the indexing of a Layer from the provided path and displays the provided Layer with an updated `IndexByVersion` property.
|
||||
This request blocks for the entire duration of the downloading and indexing of the layer.
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
POST http://localhost:6060/v1/layers HTTP/1.1
|
||||
|
||||
{
|
||||
"Layer": {
|
||||
"Name": "523ef1d23f222195488575f52a39c729c76a8c5630c9a194139cb246fb212da6",
|
||||
"Path": "/mnt/layers/523ef1d23f222195488575f52a39c729c76a8c5630c9a194139cb246fb212da6/layer.tar",
|
||||
"ParentName": "140f9bdfeb9784cf8730e9dab5dd12fbd704151cf555ac8cae650451794e5ac2",
|
||||
"Format": "Docker"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 201 Created
|
||||
Content-Type: application/json;charset=utf-8
|
||||
Server: clair
|
||||
|
||||
{
|
||||
"Layer": {
|
||||
"Name": "523ef1d23f222195488575f52a39c729c76a8c5630c9a194139cb246fb212da6",
|
||||
"Path": "/mnt/layers/523ef1d23f222195488575f52a39c729c76a8c5630c9a194139cb246fb212da6/layer.tar",
|
||||
"ParentName": "140f9bdfeb9784cf8730e9dab5dd12fbd704151cf555ac8cae650451794e5ac2",
|
||||
"Format": "Docker",
|
||||
"IndexedByVersion": 1
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### GET /layers/`:name`
|
||||
|
||||
###### Description
|
||||
|
||||
The GET route for the Layers resource displays a Layer and optionally all of its features and vulnerabilities.
|
||||
|
||||
###### Query Parameters
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
|-----------------|------|----------|-------------------------------------------------------------------------------|
|
||||
| features | bool | optional | Displays the list of features indexed in this layer and all of its parents. |
|
||||
| vulnerabilities | bool | optional | Displays the list of vulnerabilities along with the features described above. |
|
||||
|
||||
###### Example Request
|
||||
|
||||
```
|
||||
GET http://localhost:6060/v1/layers/17675ec01494d651e1ccf81dc9cf63959ebfeed4f978fddb1666b6ead008ed52?features&vulnerabilities HTTP/1.1
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json;charset=utf-8
|
||||
Server: clair
|
||||
|
||||
{
|
||||
"Layer": {
|
||||
"Name": "17675ec01494d651e1ccf81dc9cf63959ebfeed4f978fddb1666b6ead008ed52",
|
||||
"Namespace": "debian:8",
|
||||
"ParentName": "140f9bdfeb9784cf8730e9dab5dd12fbd704151cf555ac8cae650451794e5ac2",
|
||||
"IndexedByVersion": 1,
|
||||
"Features": [
|
||||
{
|
||||
"Name": "coreutils",
|
||||
"Namespace": "debian:8",
|
||||
"Version": "8.23-4",
|
||||
"Vulnerabilities": [
|
||||
{
|
||||
"Name": "CVE-2014-9471",
|
||||
"Namespace": "debian:8",
|
||||
"Description": "The parse_datetime function in GNU coreutils allows remote attackers to cause a denial of service (crash) or possibly execute arbitrary code via a crafted date string, as demonstrated by the \"--date=TZ=\"123\"345\" @1\" string to the touch or date command.",
|
||||
"Link": "https://security-tracker.debian.org/tracker/CVE-2014-9471",
|
||||
"Severity": "Low",
|
||||
"FixedBy": "9.23-5"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### DELETE /layers/`:name`
|
||||
|
||||
###### Description
|
||||
|
||||
The DELETE route for the Layers resource removes a Layer and all of its children from the database.
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
DELETE http://localhost:6060/v1/layers/17675ec01494d651e1ccf81dc9cf63959ebfeed4f978fddb1666b6ead008ed52 HTTP/1.1
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Server: clair
|
||||
```
|
||||
|
||||
|
||||
## Namespaces
|
||||
|
||||
#### GET /namespaces
|
||||
|
||||
###### Description
|
||||
|
||||
The GET route for the Namespaces resource displays a list of namespaces currently being managed.
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
GET http://localhost:6060/v1/namespaces HTTP/1.1
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json;charset=utf-8
|
||||
Server: clair
|
||||
|
||||
{
|
||||
"Namespaces": [
|
||||
"debian:8",
|
||||
"debian:9"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Vulnerabilities
|
||||
|
||||
#### POST /namespaces/`:name`/vulnerabilities
|
||||
|
||||
###### Description
|
||||
|
||||
The POST route for the Vulnerabilities resource creates a new Vulnerability.
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
POST /namespaces/debian%3A8/vulnerabilities HTTP/1.1
|
||||
|
||||
{
|
||||
"Vulnerability": {
|
||||
"Name": "CVE-2014-9471",
|
||||
"Namespace": "debian:8",
|
||||
"Link": "https://security-tracker.debian.org/tracker/CVE-2014-9471",
|
||||
"Description": "The parse_datetime function in GNU coreutils allows remote attackers to cause a denial of service (crash) or possibly execute arbitrary code via a crafted date string, as demonstrated by the \"--date=TZ=\"123\"345\" @1\" string to the touch or date command.",
|
||||
"Severity": "Low",
|
||||
"Metadata": {
|
||||
"NVD": {
|
||||
"CVSSv2": {
|
||||
"Score": 7.5,
|
||||
"Vectors": "AV:N/AC:L/Au:N/C:P/I:P"
|
||||
}
|
||||
}
|
||||
},
|
||||
"FixedIn": [
|
||||
{
|
||||
"Name": "coreutils",
|
||||
"Namespace": "debian:8",
|
||||
"Version": "8.23-1"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 201 Created
|
||||
Content-Type: application/json;charset=utf-8
|
||||
Server: clair
|
||||
|
||||
{
|
||||
"Vulnerability": {
|
||||
"Name": "CVE-2014-9471",
|
||||
"Namespace": "debian:8",
|
||||
"Link": "https://security-tracker.debian.org/tracker/CVE-2014-9471",
|
||||
"Description": "The parse_datetime function in GNU coreutils allows remote attackers to cause a denial of service (crash) or possibly execute arbitrary code via a crafted date string, as demonstrated by the \"--date=TZ=\"123\"345\" @1\" string to the touch or date command.",
|
||||
"Severity": "Low",
|
||||
"Metadata": {
|
||||
"NVD": {
|
||||
"CVSSv2": {
|
||||
"Score": 7.5,
|
||||
"Vectors": "AV:N/AC:L/Au:N/C:P/I:P"
|
||||
}
|
||||
}
|
||||
},
|
||||
"FixedIn": [
|
||||
{
|
||||
"Name": "coreutils",
|
||||
"Namespace": "debian:8",
|
||||
"Version": "8.23-1"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### GET /namespaces/`:nsName`/vulnerabilities/`:vulnName`
|
||||
|
||||
###### Description
|
||||
|
||||
The GET route for the Vulnerabilities resource displays the current data for a given vulnerability and optionally the features that fix it.
|
||||
|
||||
###### Query Parameters
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
|---------|------|----------|------------------------------------------------------------|
|
||||
| fixedIn | bool | optional | Displays the list of features that fix this vulnerability. |
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
GET /namespaces/debian%3A8/vulnerabilities/CVE-2014-9471?fixedIn HTTP/1.1
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json;charset=utf-8
|
||||
Server: clair
|
||||
|
||||
{
|
||||
"Vulnerability": {
|
||||
"Name": "CVE-2014-9471",
|
||||
"Namespace": "debian:8",
|
||||
"Link": "https://security-tracker.debian.org/tracker/CVE-2014-9471",
|
||||
"Description": "The parse_datetime function in GNU coreutils allows remote attackers to cause a denial of service (crash) or possibly execute arbitrary code via a crafted date string, as demonstrated by the \"--date=TZ=\"123\"345\" @1\" string to the touch or date command.",
|
||||
"Severity": "Low",
|
||||
"Metadata": {
|
||||
"NVD": {
|
||||
"CVSSv2": {
|
||||
"Score": 7.5,
|
||||
"Vectors": "AV:N/AC:L/Au:N/C:P/I:P"
|
||||
}
|
||||
}
|
||||
},
|
||||
"FixedIn": [
|
||||
{
|
||||
"Name": "coreutils",
|
||||
"Namespace": "debian:8",
|
||||
"Version": "8.23-1"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### PUT /namespaces/`:nsName`/vulnerabilities/`:vulnName`
|
||||
|
||||
###### Description
|
||||
|
||||
The PUT route for the Vulnerabilities resource updates a given Vulnerability.
|
||||
The "FixedIn" property of the Vulnerability must be empty or missing.
|
||||
Fixes should be managed by the Fixes resource.
|
||||
If this vulnerability was inserted by a Fetcher, changes may be lost when the Fetcher updates.
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
PUT http://localhost:6060/v1/namespaces/debian%3A8/vulnerabilities/CVE-2014-9471
|
||||
|
||||
{
|
||||
"Vulnerability": {
|
||||
"Name": "CVE-2014-9471",
|
||||
"Namespace": "debian:8",
|
||||
"Link": "https://security-tracker.debian.org/tracker/CVE-2014-9471",
|
||||
"Description": "The parse_datetime function in GNU coreutils allows remote attackers to cause a denial of service (crash) or possibly execute arbitrary code via a crafted date string, as demonstrated by the \"--date=TZ=\"123\"345\" @1\" string to the touch or date command.",
|
||||
"Severity": "Low",
|
||||
"Metadata": {
|
||||
"NVD": {
|
||||
"CVSSv2": {
|
||||
"Score": 7.5,
|
||||
"Vectors": "AV:N/AC:L/Au:N/C:P/I:P"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Server: clair
|
||||
|
||||
{
|
||||
"Vulnerability": {
|
||||
"Name": "CVE-2014-9471",
|
||||
"Namespace": "debian:8",
|
||||
"Link": "https://security-tracker.debian.org/tracker/CVE-2014-9471",
|
||||
"Description": "The parse_datetime function in GNU coreutils allows remote attackers to cause a denial of service (crash) or possibly execute arbitrary code via a crafted date string, as demonstrated by the \"--date=TZ=\"123\"345\" @1\" string to the touch or date command.",
|
||||
"Severity": "Low",
|
||||
"Metadata": {
|
||||
"NVD": {
|
||||
"CVSSv2": {
|
||||
"Score": 7.5,
|
||||
"Vectors": "AV:N/AC:L/Au:N/C:P/I:P"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
#### DELETE /namespaces/`:nsName`/vulnerabilities/`:vulnName`
|
||||
|
||||
###### Description
|
||||
|
||||
The DELETE route for the Vulnerabilities resource deletes a given Vulnerability.
|
||||
If this vulnerability was inserted by a Fetcher, it may be re-inserted when the Fetcher updates.
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
GET http://localhost:6060/v1/namespaces/debian%3A8/vulnerabilities/CVE-2014-9471 HTTP/1.1
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Server: clair
|
||||
```
|
||||
|
||||
## Fixes
|
||||
|
||||
#### GET /namespaces/`:nsName`/vulnerabilities/`:vulnName`/fixes
|
||||
|
||||
###### Description
|
||||
|
||||
The GET route for the Fixes resource displays the list of Features that fix the given Vulnerability.
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
GET http://localhost:6060/v1/namespaces/debian%3A8/vulnerabilities/CVE-2014-9471/fixes HTTP/1.1
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json;charset=utf-8
|
||||
Server: clair
|
||||
|
||||
{
|
||||
"Features": [
|
||||
{
|
||||
"Name": "coreutils",
|
||||
"Namespace": "debian:8",
|
||||
"Version": "8.23-1"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
#### PUT /namespaces/`:nsName`/vulnerabilities/`:vulnName`/fixes/`:featureName`
|
||||
|
||||
###### Description
|
||||
|
||||
The PUT route for the Fixes resource updates a Feature that is the fix for a given Vulnerability.
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
PUT http://localhost:6060/namespaces/debian%3A8/vulnerabilities/CVE-2014-9471/fixes/coreutils HTTP/1.1
|
||||
|
||||
{
|
||||
"Feature": {
|
||||
"Name": "coreutils",
|
||||
"Namespace": "debian:8",
|
||||
"Version": "4.24-9"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Server: clair
|
||||
|
||||
{
|
||||
"Feature": {
|
||||
"Name": "coreutils",
|
||||
"Namespace": "debian:8",
|
||||
"Version": "4.24-9"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### DELETE /namespaces/`:nsName`/vulnerabilities/`:vulnName`/fixes/`:featureName`
|
||||
|
||||
###### Description
|
||||
|
||||
The DELETE route for the Fixes resource removes a Feature as fix for the given Vulnerability.
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
DELETE http://localhost:6060/namespaces/debian%3A8/vulnerabilities/CVE-2014-9471/fixes/coreutils
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Server: clair
|
||||
```
|
||||
|
||||
## Notifications
|
||||
|
||||
#### GET /notifications/`:name`
|
||||
|
||||
###### Description
|
||||
|
||||
The GET route for the Notifications resource displays a notification that a Vulnerability has been updated.
|
||||
This route supports simultaneous pagination for both the `Old` and `New` Vulnerabilities' `LayersIntroducingVulnerability` property which can be extremely long.
|
||||
|
||||
###### Query Parameters
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
|-------|--------|----------|---------------------------------------------------------------------------------------------------------------|
|
||||
| page | string | optional | Displays the specific page of the "LayersIntroducingVulnerability" property on New and Old vulnerabilities. |
|
||||
| limit | int | optional | Limits the amount of results in the "LayersIntroducingVulnerability" property on New and Old vulnerabilities. |
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
GET http://localhost:6060/v1/notifications/ec45ec87-bfc8-4129-a1c3-d2b82622175a?limit=2 HTTP/1.1
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json;charset=utf-8
|
||||
Server: clair
|
||||
|
||||
{
|
||||
"Notification": {
|
||||
"Name": "ec45ec87-bfc8-4129-a1c3-d2b82622175a",
|
||||
"Created": "1456247389",
|
||||
"Notified": "1456246708",
|
||||
"Limit": 2,
|
||||
"Page": "gAAAAABWzJaC2JCH6Apr_R1f2EkjGdibnrKOobTcYXBWl6t0Cw6Q04ENGIymB6XlZ3Zi0bYt2c-2cXe43fvsJ7ECZhZz4P8C8F9efr_SR0HPiejzQTuG0qAzeO8klogFfFjSz2peBvgP",
|
||||
"NextPage": "gAAAAABWzJaCTyr6QXP2aYsCwEZfWIkU2GkNplSMlTOhLJfiR3LorBv8QYgEIgyOvZRmHQEzJKvkI6TP2PkRczBkcD17GE89btaaKMqEX14yHDgyfQvdasW1tj3-5bBRt0esKi9ym5En",
|
||||
"New": {
|
||||
"Vulnerability": {
|
||||
"Name": "CVE-TEST",
|
||||
"Namespace": "debian:8",
|
||||
"Description": "New CVE",
|
||||
"Severity": "Low",
|
||||
"FixedIn": [
|
||||
{
|
||||
"Name": "grep",
|
||||
"Namespace": "debian:8",
|
||||
"Version": "2.25"
|
||||
}
|
||||
]
|
||||
},
|
||||
"LayersIntroducingVulnerability": [
|
||||
"3b59c795b34670618fbcace4dac7a27c5ecec156812c9e2c90d3f4be1916b12d.9673fdf7-b81a-4b3e-acf8-e551ef155449",
|
||||
"523ef1d23f222195488575f52a39c729c76a8c5630c9a194139cb246fb212da6"
|
||||
]
|
||||
},
|
||||
"Old": {
|
||||
"Vulnerability": {
|
||||
"Name": "CVE-TEST",
|
||||
"Namespace": "debian:8",
|
||||
"Description": "New CVE",
|
||||
"Severity": "Low",
|
||||
"FixedIn": []
|
||||
},
|
||||
"LayersIntroducingVulnerability": [
|
||||
"3b59c795b34670618fbcace4dac7a27c5ecec156812c9e2c90d3f4be1916b12d.9673fdf7-b81a-4b3e-acf8-e551ef155449",
|
||||
"523ef1d23f222195488575f52a39c729c76a8c5630c9a194139cb246fb212da6"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### DELETE /notification/`:name`
|
||||
|
||||
###### Description
|
||||
|
||||
The delete route for the Notifications resource marks a Notification as read.
|
||||
The time at which this Notification was marked as read can be seen in the `Notified` property of the response GET route for Notification.
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
DELETE http://localhost:6060/v1/notification/ec45ec87-bfc8-4129-a1c3-d2b82622175a HTTP/1.1
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Server: clair
|
||||
```
|
318
api/v1/models.go
Normal file
318
api/v1/models.go
Normal file
@ -0,0 +1,318 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"github.com/fernet/fernet-go"
|
||||
)
|
||||
|
||||
var log = capnslog.NewPackageLogger("github.com/coreos/clair", "v1")
|
||||
|
||||
type Error struct {
|
||||
Message string `json:"Layer`
|
||||
}
|
||||
|
||||
type Layer struct {
|
||||
Name string `json:"Name,omitempty"`
|
||||
Namespace string `json:"Namespace,omitempty"`
|
||||
Path string `json:"Path,omitempty"`
|
||||
ParentName string `json:"ParentName,omitempty"`
|
||||
Format string `json:"Format,omitempty"`
|
||||
IndexedByVersion int `json:"IndexedByVersion,omitempty"`
|
||||
Features []Feature `json:"Features,omitempty"`
|
||||
}
|
||||
|
||||
func LayerFromDatabaseModel(dbLayer database.Layer, withFeatures, withVulnerabilities bool) Layer {
|
||||
layer := Layer{
|
||||
Name: dbLayer.Name,
|
||||
IndexedByVersion: dbLayer.EngineVersion,
|
||||
}
|
||||
|
||||
if dbLayer.Parent != nil {
|
||||
layer.ParentName = dbLayer.Parent.Name
|
||||
}
|
||||
|
||||
if dbLayer.Namespace != nil {
|
||||
layer.Namespace = dbLayer.Namespace.Name
|
||||
}
|
||||
|
||||
if withFeatures || withVulnerabilities && dbLayer.Features != nil {
|
||||
for _, dbFeatureVersion := range dbLayer.Features {
|
||||
feature := Feature{
|
||||
Name: dbFeatureVersion.Feature.Name,
|
||||
Namespace: dbFeatureVersion.Feature.Namespace.Name,
|
||||
Version: dbFeatureVersion.Version.String(),
|
||||
AddedBy: dbFeatureVersion.AddedBy.Name,
|
||||
}
|
||||
|
||||
for _, dbVuln := range dbFeatureVersion.AffectedBy {
|
||||
vuln := Vulnerability{
|
||||
Name: dbVuln.Name,
|
||||
Namespace: dbVuln.Namespace.Name,
|
||||
Description: dbVuln.Description,
|
||||
Link: dbVuln.Link,
|
||||
Severity: string(dbVuln.Severity),
|
||||
Metadata: dbVuln.Metadata,
|
||||
}
|
||||
|
||||
if dbVuln.FixedBy != types.MaxVersion {
|
||||
vuln.FixedBy = dbVuln.FixedBy.String()
|
||||
}
|
||||
feature.Vulnerabilities = append(feature.Vulnerabilities, vuln)
|
||||
}
|
||||
layer.Features = append(layer.Features, feature)
|
||||
}
|
||||
}
|
||||
|
||||
return layer
|
||||
}
|
||||
|
||||
type Vulnerability struct {
|
||||
Name string `json:"Name,omitempty"`
|
||||
Namespace string `json:"Namespace,omitempty"`
|
||||
Description string `json:"Description,omitempty"`
|
||||
Link string `json:"Link,omitempty"`
|
||||
Severity string `json:"Severity,omitempty"`
|
||||
Metadata map[string]interface{} `json:"Metadata,omitempty"`
|
||||
FixedBy string `json:"FixedBy,omitempty"`
|
||||
FixedIn []Feature `json:"FixedIn,omitempty"`
|
||||
}
|
||||
|
||||
func (v Vulnerability) DatabaseModel() (database.Vulnerability, error) {
|
||||
severity := types.Priority(v.Severity)
|
||||
if !severity.IsValid() {
|
||||
return database.Vulnerability{}, errors.New("Invalid severity")
|
||||
}
|
||||
|
||||
var dbFeatures []database.FeatureVersion
|
||||
for _, feature := range v.FixedIn {
|
||||
dbFeature, err := feature.DatabaseModel()
|
||||
if err != nil {
|
||||
return database.Vulnerability{}, err
|
||||
}
|
||||
|
||||
dbFeatures = append(dbFeatures, dbFeature)
|
||||
}
|
||||
|
||||
return database.Vulnerability{
|
||||
Name: v.Name,
|
||||
Namespace: database.Namespace{Name: v.Namespace},
|
||||
Description: v.Description,
|
||||
Link: v.Link,
|
||||
Severity: severity,
|
||||
Metadata: v.Metadata,
|
||||
FixedIn: dbFeatures,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func VulnerabilityFromDatabaseModel(dbVuln database.Vulnerability, withFixedIn bool) Vulnerability {
|
||||
vuln := Vulnerability{
|
||||
Name: dbVuln.Name,
|
||||
Namespace: dbVuln.Namespace.Name,
|
||||
Description: dbVuln.Description,
|
||||
Link: dbVuln.Link,
|
||||
Severity: string(dbVuln.Severity),
|
||||
Metadata: dbVuln.Metadata,
|
||||
}
|
||||
|
||||
if withFixedIn {
|
||||
for _, dbFeatureVersion := range dbVuln.FixedIn {
|
||||
vuln.FixedIn = append(vuln.FixedIn, FeatureFromDatabaseModel(dbFeatureVersion))
|
||||
}
|
||||
}
|
||||
|
||||
return vuln
|
||||
}
|
||||
|
||||
type Feature struct {
|
||||
Name string `json:"Name,omitempty"`
|
||||
Namespace string `json:"Namespace,omitempty"`
|
||||
Version string `json:"Version,omitempty"`
|
||||
Vulnerabilities []Vulnerability `json:"Vulnerabilities,omitempty"`
|
||||
AddedBy string `json:"AddedBy,omitempty"`
|
||||
}
|
||||
|
||||
func FeatureFromDatabaseModel(dbFeatureVersion database.FeatureVersion) Feature {
|
||||
versionStr := dbFeatureVersion.Version.String()
|
||||
if versionStr == types.MaxVersion.String() {
|
||||
versionStr = "None"
|
||||
}
|
||||
|
||||
return Feature{
|
||||
Name: dbFeatureVersion.Feature.Name,
|
||||
Namespace: dbFeatureVersion.Feature.Namespace.Name,
|
||||
Version: versionStr,
|
||||
AddedBy: dbFeatureVersion.AddedBy.Name,
|
||||
}
|
||||
}
|
||||
|
||||
func (f Feature) DatabaseModel() (database.FeatureVersion, error) {
|
||||
var version types.Version
|
||||
if f.Version == "None" {
|
||||
version = types.MaxVersion
|
||||
} else {
|
||||
var err error
|
||||
version, err = types.NewVersion(f.Version)
|
||||
if err != nil {
|
||||
return database.FeatureVersion{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Name: f.Name,
|
||||
Namespace: database.Namespace{Name: f.Namespace},
|
||||
},
|
||||
Version: version,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type Notification struct {
|
||||
Name string `json:"Name,omitempty"`
|
||||
Created string `json:"Created,omitempty"`
|
||||
Notified string `json:"Notified,omitempty"`
|
||||
Deleted string `json:"Deleted,omitempty"`
|
||||
Limit int `json:"Limit,omitempty"`
|
||||
Page string `json:"Page,omitempty"`
|
||||
NextPage string `json:"NextPage,omitempty"`
|
||||
Old *VulnerabilityWithLayers `json:"Old,omitempty"`
|
||||
New *VulnerabilityWithLayers `json:"New,omitempty"`
|
||||
}
|
||||
|
||||
func NotificationFromDatabaseModel(dbNotification database.VulnerabilityNotification, limit int, pageToken string, nextPage database.VulnerabilityNotificationPageNumber, key string) Notification {
|
||||
var oldVuln *VulnerabilityWithLayers
|
||||
if dbNotification.OldVulnerability != nil {
|
||||
v := VulnerabilityWithLayersFromDatabaseModel(*dbNotification.OldVulnerability)
|
||||
oldVuln = &v
|
||||
}
|
||||
|
||||
var newVuln *VulnerabilityWithLayers
|
||||
if dbNotification.NewVulnerability != nil {
|
||||
v := VulnerabilityWithLayersFromDatabaseModel(*dbNotification.NewVulnerability)
|
||||
newVuln = &v
|
||||
}
|
||||
|
||||
var nextPageStr string
|
||||
if nextPage != database.NoVulnerabilityNotificationPage {
|
||||
nextPageStr = pageNumberToToken(nextPage, key)
|
||||
}
|
||||
|
||||
var created, notified, deleted string
|
||||
if !dbNotification.Created.IsZero() {
|
||||
created = fmt.Sprintf("%d", dbNotification.Created.Unix())
|
||||
}
|
||||
if !dbNotification.Notified.IsZero() {
|
||||
notified = fmt.Sprintf("%d", dbNotification.Notified.Unix())
|
||||
}
|
||||
if !dbNotification.Deleted.IsZero() {
|
||||
deleted = fmt.Sprintf("%d", dbNotification.Deleted.Unix())
|
||||
}
|
||||
|
||||
// TODO(jzelinskie): implement "changed" key
|
||||
fmt.Println(dbNotification.Deleted.IsZero())
|
||||
return Notification{
|
||||
Name: dbNotification.Name,
|
||||
Created: created,
|
||||
Notified: notified,
|
||||
Deleted: deleted,
|
||||
Limit: limit,
|
||||
Page: pageToken,
|
||||
NextPage: nextPageStr,
|
||||
Old: oldVuln,
|
||||
New: newVuln,
|
||||
}
|
||||
}
|
||||
|
||||
type VulnerabilityWithLayers struct {
|
||||
Vulnerability *Vulnerability `json:"Vulnerability,omitempty"`
|
||||
LayersIntroducingVulnerability []string `json:"LayersIntroducingVulnerability,omitempty"`
|
||||
}
|
||||
|
||||
func VulnerabilityWithLayersFromDatabaseModel(dbVuln database.Vulnerability) VulnerabilityWithLayers {
|
||||
vuln := VulnerabilityFromDatabaseModel(dbVuln, true)
|
||||
|
||||
var layers []string
|
||||
for _, layer := range dbVuln.LayersIntroducingVulnerability {
|
||||
layers = append(layers, layer.Name)
|
||||
}
|
||||
|
||||
return VulnerabilityWithLayers{
|
||||
Vulnerability: &vuln,
|
||||
LayersIntroducingVulnerability: layers,
|
||||
}
|
||||
}
|
||||
|
||||
type LayerEnvelope struct {
|
||||
Layer *Layer `json:"Layer,omitempty"`
|
||||
Error *Error `json:"Error,omitempty"`
|
||||
}
|
||||
|
||||
type NamespaceEnvelope struct {
|
||||
Namespaces *[]string `json:"Namespaces,omitempty"`
|
||||
Error *Error `json:"Error,omitempty"`
|
||||
}
|
||||
|
||||
type VulnerabilityEnvelope struct {
|
||||
Vulnerability *Vulnerability `json:"Vulnerability,omitempty"`
|
||||
Error *Error `json:"Error,omitempty"`
|
||||
}
|
||||
|
||||
type NotificationEnvelope struct {
|
||||
Notification *Notification `json:"Notification,omitempty"`
|
||||
Error *Error `json:"Error,omitempty"`
|
||||
}
|
||||
|
||||
type FeatureEnvelope struct {
|
||||
Feature *Feature `json:"Feature,omitempty"`
|
||||
Features *[]Feature `json:"Features,omitempty"`
|
||||
Error *Error `json:"Error,omitempty"`
|
||||
}
|
||||
|
||||
func tokenToPageNumber(token, key string) (database.VulnerabilityNotificationPageNumber, error) {
|
||||
k, _ := fernet.DecodeKey(key)
|
||||
msg := fernet.VerifyAndDecrypt([]byte(token), time.Hour, []*fernet.Key{k})
|
||||
if msg == nil {
|
||||
return database.VulnerabilityNotificationPageNumber{}, errors.New("invalid or expired pagination token")
|
||||
}
|
||||
|
||||
page := database.VulnerabilityNotificationPageNumber{}
|
||||
err := json.NewDecoder(bytes.NewBuffer(msg)).Decode(&page)
|
||||
return page, err
|
||||
}
|
||||
|
||||
func pageNumberToToken(page database.VulnerabilityNotificationPageNumber, key string) string {
|
||||
var buf bytes.Buffer
|
||||
err := json.NewEncoder(&buf).Encode(page)
|
||||
if err != nil {
|
||||
log.Fatal("failed to encode VulnerabilityNotificationPageNumber")
|
||||
}
|
||||
|
||||
k, _ := fernet.DecodeKey(key)
|
||||
tokenBytes, err := fernet.EncryptAndSign(buf.Bytes(), k)
|
||||
if err != nil {
|
||||
log.Fatal("failed to encrypt VulnerabilityNotificationpageNumber")
|
||||
}
|
||||
|
||||
return string(tokenBytes)
|
||||
}
|
55
api/v1/router.go
Normal file
55
api/v1/router.go
Normal file
@ -0,0 +1,55 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package v1 implements the first version of the Clair API.
|
||||
package v1
|
||||
|
||||
import (
|
||||
"github.com/julienschmidt/httprouter"
|
||||
|
||||
"github.com/coreos/clair/api/context"
|
||||
)
|
||||
|
||||
// NewRouter creates an HTTP router for version 1 of the Clair API.
|
||||
func NewRouter(ctx *context.RouteContext) *httprouter.Router {
|
||||
router := httprouter.New()
|
||||
|
||||
// Layers
|
||||
router.POST("/layers", context.HTTPHandler(postLayer, ctx))
|
||||
router.GET("/layers/:layerName", context.HTTPHandler(getLayer, ctx))
|
||||
router.DELETE("/layers/:layerName", context.HTTPHandler(deleteLayer, ctx))
|
||||
|
||||
// Namespaces
|
||||
router.GET("/namespaces", context.HTTPHandler(getNamespaces, ctx))
|
||||
|
||||
// Vulnerabilities
|
||||
router.POST("/namespaces/:namespaceName/vulnerabilities", context.HTTPHandler(postVulnerability, ctx))
|
||||
router.GET("/namespaces/:namespaceName/vulnerabilities/:vulnerabilityName", context.HTTPHandler(getVulnerability, ctx))
|
||||
router.PUT("/namespaces/:namespaceName/vulnerabilities/:vulnerabilityName", context.HTTPHandler(putVulnerability, ctx))
|
||||
router.DELETE("/namespaces/:namespaceName/vulnerabilities/:vulnerabilityName", context.HTTPHandler(deleteVulnerability, ctx))
|
||||
|
||||
// Fixes
|
||||
router.GET("/namespaces/:namespaceName/vulnerabilities/:vulnerabilityName/fixes", context.HTTPHandler(getFixes, ctx))
|
||||
router.PUT("/namespaces/:namespaceName/vulnerabilities/:vulnerabilityName/fixes/:fixName", context.HTTPHandler(putFix, ctx))
|
||||
router.DELETE("/namespaces/:namespaceName/vulnerabilities/:vulnerabilityName/fixes/:fixName", context.HTTPHandler(deleteFix, ctx))
|
||||
|
||||
// Notifications
|
||||
router.GET("/notifications/:notificationName", context.HTTPHandler(getNotification, ctx))
|
||||
router.DELETE("/notifications/:notificationName", context.HTTPHandler(deleteNotification, ctx))
|
||||
|
||||
// Metrics
|
||||
router.GET("/metrics", context.HTTPHandler(getMetrics, ctx))
|
||||
|
||||
return router
|
||||
}
|
430
api/v1/routes.go
Normal file
430
api/v1/routes.go
Normal file
@ -0,0 +1,430 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/julienschmidt/httprouter"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/coreos/clair/api/context"
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/utils"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/coreos/clair/worker"
|
||||
)
|
||||
|
||||
const (
|
||||
// These are the route identifiers for prometheus.
|
||||
postLayerRoute = "v1/postLayer"
|
||||
getLayerRoute = "v1/getLayer"
|
||||
deleteLayerRoute = "v1/deleteLayer"
|
||||
getNamespacesRoute = "v1/getNamespaces"
|
||||
postVulnerabilityRoute = "v1/postVulnerability"
|
||||
getVulnerabilityRoute = "v1/getVulnerability"
|
||||
putVulnerabilityRoute = "v1/putVulnerability"
|
||||
deleteVulnerabilityRoute = "v1/deleteVulnerability"
|
||||
getFixesRoute = "v1/getFixes"
|
||||
putFixRoute = "v1/putFix"
|
||||
deleteFixRoute = "v1/deleteFix"
|
||||
getNotificationRoute = "v1/getNotification"
|
||||
deleteNotificationRoute = "v1/deleteNotification"
|
||||
getMetricsRoute = "v1/getMetrics"
|
||||
|
||||
// maxBodySize restricts client request bodies to 1MiB.
|
||||
maxBodySize int64 = 1048576
|
||||
|
||||
// statusUnprocessableEntity represents the 422 (Unprocessable Entity) status code, which means
|
||||
// the server understands the content type of the request entity
|
||||
// (hence a 415(Unsupported Media Type) status code is inappropriate), and the syntax of the
|
||||
// request entity is correct (thus a 400 (Bad Request) status code is inappropriate) but was
|
||||
// unable to process the contained instructions.
|
||||
statusUnprocessableEntity = 422
|
||||
)
|
||||
|
||||
func decodeJSON(r *http.Request, v interface{}) error {
|
||||
defer r.Body.Close()
|
||||
return json.NewDecoder(io.LimitReader(r.Body, maxBodySize)).Decode(v)
|
||||
}
|
||||
|
||||
func writeResponse(w http.ResponseWriter, r *http.Request, status int, resp interface{}) {
|
||||
// Headers must be written before the response.
|
||||
header := w.Header()
|
||||
header.Set("Content-Type", "application/json;charset=utf-8")
|
||||
header.Set("Server", "clair")
|
||||
|
||||
// Gzip the response if the client supports it.
|
||||
var writer io.Writer = w
|
||||
if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
|
||||
gzipWriter := gzip.NewWriter(w)
|
||||
defer gzipWriter.Close()
|
||||
writer = gzipWriter
|
||||
|
||||
header.Set("Content-Encoding", "gzip")
|
||||
}
|
||||
|
||||
// Write the response.
|
||||
w.WriteHeader(status)
|
||||
err := json.NewEncoder(writer).Encode(resp)
|
||||
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case *json.MarshalerError, *json.UnsupportedTypeError, *json.UnsupportedValueError:
|
||||
panic("v1: failed to marshal response: " + err.Error())
|
||||
default:
|
||||
log.Warningf("failed to write response: %s", err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func postLayer(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
request := LayerEnvelope{}
|
||||
err := decodeJSON(r, &request)
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, LayerEnvelope{Error: &Error{err.Error()}})
|
||||
return postLayerRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
if request.Layer == nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, LayerEnvelope{Error: &Error{"failed to provide layer"}})
|
||||
return postLayerRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
err = worker.Process(ctx.Store, request.Layer.Name, request.Layer.ParentName, request.Layer.Path, request.Layer.Format)
|
||||
if err != nil {
|
||||
if err == utils.ErrCouldNotExtract ||
|
||||
err == utils.ErrExtractedFileTooBig ||
|
||||
err == worker.ErrUnsupported {
|
||||
writeResponse(w, r, statusUnprocessableEntity, LayerEnvelope{Error: &Error{err.Error()}})
|
||||
return postLayerRoute, statusUnprocessableEntity
|
||||
}
|
||||
|
||||
if _, badreq := err.(*cerrors.ErrBadRequest); badreq {
|
||||
writeResponse(w, r, http.StatusBadRequest, LayerEnvelope{Error: &Error{err.Error()}})
|
||||
return postLayerRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
writeResponse(w, r, http.StatusInternalServerError, LayerEnvelope{Error: &Error{err.Error()}})
|
||||
return postLayerRoute, http.StatusInternalServerError
|
||||
}
|
||||
|
||||
writeResponse(w, r, http.StatusCreated, LayerEnvelope{Layer: &Layer{
|
||||
Name: request.Layer.Name,
|
||||
ParentName: request.Layer.ParentName,
|
||||
Path: request.Layer.Path,
|
||||
Format: request.Layer.Format,
|
||||
IndexedByVersion: worker.Version,
|
||||
}})
|
||||
return postLayerRoute, http.StatusCreated
|
||||
}
|
||||
|
||||
func getLayer(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
_, withFeatures := r.URL.Query()["features"]
|
||||
_, withVulnerabilities := r.URL.Query()["vulnerabilities"]
|
||||
|
||||
dbLayer, err := ctx.Store.FindLayer(p.ByName("layerName"), withFeatures, withVulnerabilities)
|
||||
if err == cerrors.ErrNotFound {
|
||||
writeResponse(w, r, http.StatusNotFound, LayerEnvelope{Error: &Error{err.Error()}})
|
||||
return getLayerRoute, http.StatusNotFound
|
||||
} else if err != nil {
|
||||
writeResponse(w, r, http.StatusInternalServerError, LayerEnvelope{Error: &Error{err.Error()}})
|
||||
return getLayerRoute, http.StatusInternalServerError
|
||||
}
|
||||
|
||||
layer := LayerFromDatabaseModel(dbLayer, withFeatures, withVulnerabilities)
|
||||
|
||||
writeResponse(w, r, http.StatusOK, LayerEnvelope{Layer: &layer})
|
||||
return getLayerRoute, http.StatusOK
|
||||
}
|
||||
|
||||
func deleteLayer(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
err := ctx.Store.DeleteLayer(p.ByName("layerName"))
|
||||
if err == cerrors.ErrNotFound {
|
||||
writeResponse(w, r, http.StatusNotFound, LayerEnvelope{Error: &Error{err.Error()}})
|
||||
return deleteLayerRoute, http.StatusNotFound
|
||||
} else if err != nil {
|
||||
writeResponse(w, r, http.StatusInternalServerError, LayerEnvelope{Error: &Error{err.Error()}})
|
||||
return deleteLayerRoute, http.StatusInternalServerError
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return deleteLayerRoute, http.StatusOK
|
||||
}
|
||||
|
||||
func getNamespaces(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
dbNamespaces, err := ctx.Store.ListNamespaces()
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusInternalServerError, NamespaceEnvelope{Error: &Error{err.Error()}})
|
||||
return getNamespacesRoute, http.StatusInternalServerError
|
||||
}
|
||||
var namespaces []string
|
||||
for _, dbNamespace := range dbNamespaces {
|
||||
namespaces = append(namespaces, dbNamespace.Name)
|
||||
}
|
||||
|
||||
writeResponse(w, r, http.StatusOK, NamespaceEnvelope{Namespaces: &namespaces})
|
||||
return getNamespacesRoute, http.StatusOK
|
||||
}
|
||||
|
||||
func postVulnerability(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
request := VulnerabilityEnvelope{}
|
||||
err := decodeJSON(r, &request)
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return postVulnerabilityRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
if request.Vulnerability == nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{"failed to provide vulnerability"}})
|
||||
return postVulnerabilityRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
vuln, err := request.Vulnerability.DatabaseModel()
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return postVulnerabilityRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
err = ctx.Store.InsertVulnerabilities([]database.Vulnerability{vuln}, true)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case *cerrors.ErrBadRequest:
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return postVulnerabilityRoute, http.StatusBadRequest
|
||||
default:
|
||||
writeResponse(w, r, http.StatusInternalServerError, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return postVulnerabilityRoute, http.StatusInternalServerError
|
||||
}
|
||||
}
|
||||
|
||||
writeResponse(w, r, http.StatusCreated, VulnerabilityEnvelope{Vulnerability: request.Vulnerability})
|
||||
return postVulnerabilityRoute, http.StatusCreated
|
||||
}
|
||||
|
||||
func getVulnerability(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
_, withFixedIn := r.URL.Query()["fixedIn"]
|
||||
|
||||
dbVuln, err := ctx.Store.FindVulnerability(p.ByName("namespaceName"), p.ByName("vulnerabilityName"))
|
||||
if err == cerrors.ErrNotFound {
|
||||
writeResponse(w, r, http.StatusNotFound, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return getVulnerabilityRoute, http.StatusNotFound
|
||||
} else if err != nil {
|
||||
writeResponse(w, r, http.StatusInternalServerError, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return getVulnerabilityRoute, http.StatusInternalServerError
|
||||
}
|
||||
|
||||
vuln := VulnerabilityFromDatabaseModel(dbVuln, withFixedIn)
|
||||
|
||||
writeResponse(w, r, http.StatusOK, VulnerabilityEnvelope{Vulnerability: &vuln})
|
||||
return getVulnerabilityRoute, http.StatusOK
|
||||
}
|
||||
|
||||
func putVulnerability(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
request := VulnerabilityEnvelope{}
|
||||
err := decodeJSON(r, &request)
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return putVulnerabilityRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
if request.Vulnerability == nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{"failed to provide vulnerability"}})
|
||||
return putVulnerabilityRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
if len(request.Vulnerability.FixedIn) != 0 {
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{"Vulnerability.FixedIn must be empty"}})
|
||||
return putVulnerabilityRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
vuln, err := request.Vulnerability.DatabaseModel()
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return putVulnerabilityRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
vuln.Namespace.Name = p.ByName("namespaceName")
|
||||
vuln.Name = p.ByName("vulnerabilityName")
|
||||
|
||||
err = ctx.Store.InsertVulnerabilities([]database.Vulnerability{vuln}, true)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case *cerrors.ErrBadRequest:
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return putVulnerabilityRoute, http.StatusBadRequest
|
||||
default:
|
||||
writeResponse(w, r, http.StatusInternalServerError, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return putVulnerabilityRoute, http.StatusInternalServerError
|
||||
}
|
||||
}
|
||||
|
||||
writeResponse(w, r, http.StatusOK, VulnerabilityEnvelope{Vulnerability: request.Vulnerability})
|
||||
return putVulnerabilityRoute, http.StatusOK
|
||||
}
|
||||
|
||||
func deleteVulnerability(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
err := ctx.Store.DeleteVulnerability(p.ByName("namespaceName"), p.ByName("vulnerabilityName"))
|
||||
if err == cerrors.ErrNotFound {
|
||||
writeResponse(w, r, http.StatusNotFound, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return deleteVulnerabilityRoute, http.StatusNotFound
|
||||
} else if err != nil {
|
||||
writeResponse(w, r, http.StatusInternalServerError, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return deleteVulnerabilityRoute, http.StatusInternalServerError
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return deleteVulnerabilityRoute, http.StatusOK
|
||||
}
|
||||
|
||||
func getFixes(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
dbVuln, err := ctx.Store.FindVulnerability(p.ByName("namespaceName"), p.ByName("vulnerabilityName"))
|
||||
if err == cerrors.ErrNotFound {
|
||||
writeResponse(w, r, http.StatusNotFound, FeatureEnvelope{Error: &Error{err.Error()}})
|
||||
return getFixesRoute, http.StatusNotFound
|
||||
} else if err != nil {
|
||||
writeResponse(w, r, http.StatusInternalServerError, FeatureEnvelope{Error: &Error{err.Error()}})
|
||||
return getFixesRoute, http.StatusInternalServerError
|
||||
}
|
||||
|
||||
vuln := VulnerabilityFromDatabaseModel(dbVuln, true)
|
||||
writeResponse(w, r, http.StatusOK, FeatureEnvelope{Features: &vuln.FixedIn})
|
||||
return getFixesRoute, http.StatusOK
|
||||
}
|
||||
|
||||
func putFix(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
request := FeatureEnvelope{}
|
||||
err := decodeJSON(r, &request)
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, FeatureEnvelope{Error: &Error{err.Error()}})
|
||||
return putFixRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
if request.Feature == nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, FeatureEnvelope{Error: &Error{"failed to provide feature"}})
|
||||
return putFixRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
if request.Feature.Name != p.ByName("fixName") {
|
||||
writeResponse(w, r, http.StatusBadRequest, FeatureEnvelope{Error: &Error{"feature name in URL and JSON do not match"}})
|
||||
return putFixRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
dbFix, err := request.Feature.DatabaseModel()
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, FeatureEnvelope{Error: &Error{err.Error()}})
|
||||
return putFixRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
err = ctx.Store.InsertVulnerabilityFixes(p.ByName("vulnerabilityNamespace"), p.ByName("vulnerabilityName"), []database.FeatureVersion{dbFix})
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case *cerrors.ErrBadRequest:
|
||||
writeResponse(w, r, http.StatusBadRequest, FeatureEnvelope{Error: &Error{err.Error()}})
|
||||
return putFixRoute, http.StatusBadRequest
|
||||
default:
|
||||
if err == cerrors.ErrNotFound {
|
||||
writeResponse(w, r, http.StatusNotFound, FeatureEnvelope{Error: &Error{err.Error()}})
|
||||
return putFixRoute, http.StatusNotFound
|
||||
}
|
||||
writeResponse(w, r, http.StatusInternalServerError, FeatureEnvelope{Error: &Error{err.Error()}})
|
||||
return putFixRoute, http.StatusInternalServerError
|
||||
}
|
||||
}
|
||||
|
||||
writeResponse(w, r, http.StatusOK, FeatureEnvelope{Feature: request.Feature})
|
||||
return putFixRoute, http.StatusOK
|
||||
}
|
||||
|
||||
func deleteFix(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
err := ctx.Store.DeleteVulnerabilityFix(p.ByName("vulnerabilityNamespace"), p.ByName("vulnerabilityName"), p.ByName("fixName"))
|
||||
if err == cerrors.ErrNotFound {
|
||||
writeResponse(w, r, http.StatusNotFound, FeatureEnvelope{Error: &Error{err.Error()}})
|
||||
return deleteFixRoute, http.StatusNotFound
|
||||
} else if err != nil {
|
||||
writeResponse(w, r, http.StatusInternalServerError, FeatureEnvelope{Error: &Error{err.Error()}})
|
||||
return deleteFixRoute, http.StatusInternalServerError
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return deleteFixRoute, http.StatusOK
|
||||
}
|
||||
|
||||
func getNotification(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
query := r.URL.Query()
|
||||
|
||||
limitStrs, limitExists := query["limit"]
|
||||
if !limitExists {
|
||||
writeResponse(w, r, http.StatusBadRequest, NotificationEnvelope{Error: &Error{"must provide limit query parameter"}})
|
||||
return getNotificationRoute, http.StatusBadRequest
|
||||
}
|
||||
limit, err := strconv.Atoi(limitStrs[0])
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, NotificationEnvelope{Error: &Error{"invalid limit format: " + err.Error()}})
|
||||
return getNotificationRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
var pageToken string
|
||||
page := database.VulnerabilityNotificationFirstPage
|
||||
pageStrs, pageExists := query["page"]
|
||||
if pageExists {
|
||||
page, err = tokenToPageNumber(pageStrs[0], ctx.Config.PaginationKey)
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, NotificationEnvelope{Error: &Error{"invalid page format: " + err.Error()}})
|
||||
return getNotificationRoute, http.StatusBadRequest
|
||||
}
|
||||
pageToken = pageStrs[0]
|
||||
} else {
|
||||
pageToken = pageNumberToToken(page, ctx.Config.PaginationKey)
|
||||
}
|
||||
|
||||
dbNotification, nextPage, err := ctx.Store.GetNotification(p.ByName("notificationName"), limit, page)
|
||||
if err == cerrors.ErrNotFound {
|
||||
writeResponse(w, r, http.StatusNotFound, NotificationEnvelope{Error: &Error{err.Error()}})
|
||||
return deleteNotificationRoute, http.StatusNotFound
|
||||
} else if err != nil {
|
||||
writeResponse(w, r, http.StatusInternalServerError, NotificationEnvelope{Error: &Error{err.Error()}})
|
||||
return getNotificationRoute, http.StatusInternalServerError
|
||||
}
|
||||
|
||||
notification := NotificationFromDatabaseModel(dbNotification, limit, pageToken, nextPage, ctx.Config.PaginationKey)
|
||||
|
||||
writeResponse(w, r, http.StatusOK, NotificationEnvelope{Notification: ¬ification})
|
||||
return getNotificationRoute, http.StatusOK
|
||||
}
|
||||
|
||||
func deleteNotification(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
err := ctx.Store.DeleteNotification(p.ByName("notificationName"))
|
||||
if err == cerrors.ErrNotFound {
|
||||
writeResponse(w, r, http.StatusNotFound, NotificationEnvelope{Error: &Error{err.Error()}})
|
||||
return deleteNotificationRoute, http.StatusNotFound
|
||||
} else if err != nil {
|
||||
writeResponse(w, r, http.StatusInternalServerError, NotificationEnvelope{Error: &Error{err.Error()}})
|
||||
return deleteNotificationRoute, http.StatusInternalServerError
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return deleteNotificationRoute, http.StatusOK
|
||||
}
|
||||
|
||||
func getMetrics(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
prometheus.Handler().ServeHTTP(w, r)
|
||||
return getMetricsRoute, 0
|
||||
}
|
@ -1,72 +0,0 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package wrappers contains httprouter.Handle wrappers that are used in the API.
|
||||
package wrappers
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"github.com/julienschmidt/httprouter"
|
||||
)
|
||||
|
||||
var log = capnslog.NewPackageLogger("github.com/coreos/clair", "api")
|
||||
|
||||
type logWriter struct {
|
||||
http.ResponseWriter
|
||||
status int
|
||||
size int
|
||||
}
|
||||
|
||||
func (lw *logWriter) Header() http.Header {
|
||||
return lw.ResponseWriter.Header()
|
||||
}
|
||||
|
||||
func (lw *logWriter) Write(b []byte) (int, error) {
|
||||
if !lw.Written() {
|
||||
lw.WriteHeader(http.StatusOK)
|
||||
}
|
||||
size, err := lw.ResponseWriter.Write(b)
|
||||
lw.size += size
|
||||
return size, err
|
||||
}
|
||||
|
||||
func (lw *logWriter) WriteHeader(s int) {
|
||||
lw.status = s
|
||||
lw.ResponseWriter.WriteHeader(s)
|
||||
}
|
||||
|
||||
func (lw *logWriter) Size() int {
|
||||
return lw.size
|
||||
}
|
||||
|
||||
func (lw *logWriter) Written() bool {
|
||||
return lw.status != 0
|
||||
}
|
||||
|
||||
func (lw *logWriter) Status() int {
|
||||
return lw.status
|
||||
}
|
||||
|
||||
// Log wraps a http.HandlerFunc and logs the API call
|
||||
func Log(fn httprouter.Handle) httprouter.Handle {
|
||||
return func(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
lw := &logWriter{ResponseWriter: w}
|
||||
start := time.Now()
|
||||
fn(lw, r, p)
|
||||
log.Infof("%d %s %s (%s)", lw.Status(), r.Method, r.RequestURI, time.Since(start))
|
||||
}
|
||||
}
|
@ -1,101 +0,0 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package wrappers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/julienschmidt/httprouter"
|
||||
|
||||
httputils "github.com/coreos/clair/utils/http"
|
||||
)
|
||||
|
||||
// ErrHandlerTimeout is returned on ResponseWriter Write calls
|
||||
// in handlers which have timed out.
|
||||
var ErrHandlerTimeout = errors.New("http: Handler timeout")
|
||||
|
||||
type timeoutWriter struct {
|
||||
http.ResponseWriter
|
||||
|
||||
mu sync.Mutex
|
||||
timedOut bool
|
||||
wroteHeader bool
|
||||
}
|
||||
|
||||
func (tw *timeoutWriter) Header() http.Header {
|
||||
return tw.ResponseWriter.Header()
|
||||
}
|
||||
|
||||
func (tw *timeoutWriter) Write(p []byte) (int, error) {
|
||||
tw.mu.Lock()
|
||||
defer tw.mu.Unlock()
|
||||
tw.wroteHeader = true // implicitly at least
|
||||
if tw.timedOut {
|
||||
return 0, ErrHandlerTimeout
|
||||
}
|
||||
return tw.ResponseWriter.Write(p)
|
||||
}
|
||||
|
||||
func (tw *timeoutWriter) WriteHeader(status int) {
|
||||
tw.mu.Lock()
|
||||
defer tw.mu.Unlock()
|
||||
if tw.timedOut || tw.wroteHeader {
|
||||
return
|
||||
}
|
||||
tw.wroteHeader = true
|
||||
tw.ResponseWriter.WriteHeader(status)
|
||||
}
|
||||
|
||||
// TimeOut wraps a http.HandlerFunc and ensure that a response is given under
|
||||
// the specified duration.
|
||||
//
|
||||
// If the handler takes longer than the time limit, the wrapper responds with
|
||||
// a Service Unavailable error, an error message and the handler response which
|
||||
// may come later is ignored.
|
||||
//
|
||||
// After a timeout, any write the handler to its ResponseWriter will return
|
||||
// ErrHandlerTimeout.
|
||||
//
|
||||
// If the duration is 0, the wrapper does nothing.
|
||||
func TimeOut(d time.Duration, fn httprouter.Handle) httprouter.Handle {
|
||||
if d == 0 {
|
||||
return fn
|
||||
}
|
||||
|
||||
return func(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
done := make(chan bool)
|
||||
tw := &timeoutWriter{ResponseWriter: w}
|
||||
|
||||
go func() {
|
||||
fn(tw, r, p)
|
||||
done <- true
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
return
|
||||
case <-time.After(d):
|
||||
tw.mu.Lock()
|
||||
defer tw.mu.Unlock()
|
||||
if !tw.wroteHeader {
|
||||
httputils.WriteHTTPError(tw.ResponseWriter, http.StatusServiceUnavailable, ErrHandlerTimeout)
|
||||
}
|
||||
tw.timedOut = true
|
||||
}
|
||||
}
|
||||
}
|
15
clair.go
15
clair.go
@ -23,8 +23,9 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/coreos/clair/api"
|
||||
"github.com/coreos/clair/api/context"
|
||||
"github.com/coreos/clair/config"
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/database/pgsql"
|
||||
"github.com/coreos/clair/notifier"
|
||||
"github.com/coreos/clair/updater"
|
||||
"github.com/coreos/clair/utils"
|
||||
@ -40,25 +41,25 @@ func Boot(config *config.Config) {
|
||||
st := utils.NewStopper()
|
||||
|
||||
// Open database
|
||||
err := database.Open(config.Database)
|
||||
db, err := pgsql.Open(config.Database)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer database.Close()
|
||||
defer db.Close()
|
||||
|
||||
// Start notifier
|
||||
st.Begin()
|
||||
go notifier.Run(config.Notifier, st)
|
||||
go notifier.Run(config.Notifier, db, st)
|
||||
|
||||
// Start API
|
||||
st.Begin()
|
||||
go api.Run(config.API, st)
|
||||
go api.Run(config.API, &context.RouteContext{db, config.API}, st)
|
||||
st.Begin()
|
||||
go api.RunHealth(config.API, st)
|
||||
go api.RunHealth(config.API, &context.RouteContext{db, config.API}, st)
|
||||
|
||||
// Start updater
|
||||
st.Begin()
|
||||
go updater.Run(config.Updater, st)
|
||||
go updater.Run(config.Updater, db, st)
|
||||
|
||||
// Wait for interruption and shutdown gracefully.
|
||||
waitForSignals(os.Interrupt)
|
||||
|
@ -27,10 +27,22 @@ import (
|
||||
|
||||
// Register components
|
||||
_ "github.com/coreos/clair/notifier/notifiers"
|
||||
_ "github.com/coreos/clair/updater/fetchers"
|
||||
_ "github.com/coreos/clair/worker/detectors/data"
|
||||
_ "github.com/coreos/clair/worker/detectors/os"
|
||||
_ "github.com/coreos/clair/worker/detectors/packages"
|
||||
|
||||
_ "github.com/coreos/clair/updater/fetchers/debian"
|
||||
_ "github.com/coreos/clair/updater/fetchers/rhel"
|
||||
_ "github.com/coreos/clair/updater/fetchers/ubuntu"
|
||||
_ "github.com/coreos/clair/updater/metadata_fetchers/nvd"
|
||||
|
||||
_ "github.com/coreos/clair/worker/detectors/data/aci"
|
||||
_ "github.com/coreos/clair/worker/detectors/data/docker"
|
||||
|
||||
_ "github.com/coreos/clair/worker/detectors/feature/dpkg"
|
||||
_ "github.com/coreos/clair/worker/detectors/feature/rpm"
|
||||
|
||||
_ "github.com/coreos/clair/worker/detectors/namespace/aptsources"
|
||||
_ "github.com/coreos/clair/worker/detectors/namespace/lsbrelease"
|
||||
_ "github.com/coreos/clair/worker/detectors/namespace/osrelease"
|
||||
_ "github.com/coreos/clair/worker/detectors/namespace/redhatrelease"
|
||||
)
|
||||
|
||||
var log = capnslog.NewPackageLogger("github.com/coreos/clair/cmd/clair", "main")
|
||||
|
@ -1,41 +1,72 @@
|
||||
# The values specified here are the default values that Clair uses if no configuration file
|
||||
# is specified or if the keys are not defined.
|
||||
# Copyright 2015 clair authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# The values specified here are the default values that Clair uses if no configuration file is specified or if the keys are not defined.
|
||||
---
|
||||
database:
|
||||
# Database backend.
|
||||
#
|
||||
# Possible values are "bolt", "leveldb", "memstore", "mongo", "sql".
|
||||
#
|
||||
# When running multiple instances is not desired, using BoltDB backend is the best choice as it is
|
||||
# lightning fast. However, using PostgreSQL enables running multiple instances concurrently.
|
||||
# The default is just an ephemeral database.
|
||||
type: memstore
|
||||
# Path to the database.
|
||||
#
|
||||
# Can be a file or a connection string.
|
||||
path:
|
||||
# PostgreSQL Connection string
|
||||
# http://www.postgresql.org/docs/9.4/static/libpq-connect.html
|
||||
source:
|
||||
|
||||
# Number of elements kept in the cache
|
||||
# Values unlikely to change (e.g. namespaces) are cached in order to save prevent needless roundtrips to the database.
|
||||
cacheSize: 16384
|
||||
|
||||
api:
|
||||
# Port on which the main API and the health API will listen on.
|
||||
# API server port
|
||||
port: 6060
|
||||
|
||||
# Health server port
|
||||
# This is an unencrypted endpoint useful for load balancers to check to healthiness of the clair server.
|
||||
healthport: 6061
|
||||
# Maximum time that API requests may take before they time-out with a HTTP 503 error.
|
||||
|
||||
# Deadline before an API request will respond with a 503
|
||||
timeout: 900s
|
||||
# Paths to certificates to secure the main API with TLS and client certificate auth.
|
||||
|
||||
# 32-bit URL-safe base64 key used to encrypt pagination tokens
|
||||
# If one is not provided, it will be generated.
|
||||
# Multiple clair instances in the same cluster need the same value.
|
||||
paginationKey:
|
||||
|
||||
# Optional PKI configuration
|
||||
# If you want to easily generate client certificates and CAs, try the following projects:
|
||||
# https://github.com/coreos/etcd-ca
|
||||
# https://github.com/cloudflare/cfssl
|
||||
cafile:
|
||||
keyfile:
|
||||
certfile:
|
||||
|
||||
updater:
|
||||
# Frequency at which the vulnerability updater will run.
|
||||
# Use 0 to disable the updater entirely.
|
||||
# Frequency the database will be updated with vulnerabilities from the default data sources
|
||||
# The value 0 disables the updater entirely.
|
||||
interval: 2h
|
||||
|
||||
notifier:
|
||||
# How many attempts will the notifier do when a notifier backend fails.
|
||||
# Number of attempts before the notification is marked as failed to be sent
|
||||
attempts: 3
|
||||
# Configuration for HTTP notifier
|
||||
|
||||
# Duration before a failed notification is retried
|
||||
renotifyInterval: 2h
|
||||
|
||||
http:
|
||||
# Endpoint that will receive notifications with POST requests.
|
||||
# Optional endpoint that will receive notifications via POST requests
|
||||
endpoint:
|
||||
# Server name and path to certificates to call the endpoint securely with TLS and client certificate auth.
|
||||
|
||||
# Optional PKI configuration
|
||||
# If you want to easily generate client certificates and CAs, try the following projects:
|
||||
# https://github.com/coreos/etcd-ca
|
||||
# https://github.com/cloudflare/cfssl
|
||||
servername:
|
||||
cafile:
|
||||
keyfile:
|
||||
|
@ -19,6 +19,7 @@ import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/fernet/fernet-go"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
@ -33,8 +34,8 @@ type Config struct {
|
||||
// DatabaseConfig is the configuration used to specify how Clair connects
|
||||
// to a database.
|
||||
type DatabaseConfig struct {
|
||||
Type string
|
||||
Path string
|
||||
Source string
|
||||
CacheSize int
|
||||
}
|
||||
|
||||
// UpdaterConfig is the configuration for the Updater service.
|
||||
@ -45,6 +46,7 @@ type UpdaterConfig struct {
|
||||
// NotifierConfig is the configuration for the Notifier service and its registered notifiers.
|
||||
type NotifierConfig struct {
|
||||
Attempts int
|
||||
RenotifyInterval time.Duration
|
||||
Params map[string]interface{} `yaml:",inline"`
|
||||
}
|
||||
|
||||
@ -53,13 +55,14 @@ type APIConfig struct {
|
||||
Port int
|
||||
HealthPort int
|
||||
Timeout time.Duration
|
||||
PaginationKey string
|
||||
CertFile, KeyFile, CAFile string
|
||||
}
|
||||
|
||||
// DefaultConfig is a configuration that can be used as a fallback value.
|
||||
var DefaultConfig = Config{
|
||||
Database: &DatabaseConfig{
|
||||
Type: "memstore",
|
||||
CacheSize: 16384,
|
||||
},
|
||||
Updater: &UpdaterConfig{
|
||||
Interval: 1 * time.Hour,
|
||||
@ -71,6 +74,7 @@ var DefaultConfig = Config{
|
||||
},
|
||||
Notifier: &NotifierConfig{
|
||||
Attempts: 5,
|
||||
RenotifyInterval: 2 * time.Hour,
|
||||
},
|
||||
}
|
||||
|
||||
@ -94,5 +98,22 @@ func Load(path string) (config *Config, err error) {
|
||||
}
|
||||
|
||||
err = yaml.Unmarshal(d, config)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if config.API.PaginationKey == "" {
|
||||
var key fernet.Key
|
||||
if err = key.Generate(); err != nil {
|
||||
return
|
||||
}
|
||||
config.API.PaginationKey = key.Encode()
|
||||
} else {
|
||||
_, err = fernet.DecodeKey(config.API.PaginationKey)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -22,38 +22,32 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/clair/api/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
postLayerURI = "/v1/layers"
|
||||
getLayerVulnerabilitiesURI = "/v1/layers/%s/vulnerabilities?minimumPriority=%s"
|
||||
getLayerFeaturesURI = "/v1/layers/%s?vulnerabilities"
|
||||
httpPort = 9279
|
||||
)
|
||||
|
||||
type APIVulnerabilitiesResponse struct {
|
||||
Vulnerabilities []APIVulnerability
|
||||
}
|
||||
|
||||
type APIVulnerability struct {
|
||||
ID, Link, Priority, Description, CausedByPackage string
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Parse command-line arguments.
|
||||
endpoint := flag.String("endpoint", "http://127.0.0.1:6060", "Address to Clair API")
|
||||
myAddress := flag.String("my-address", "127.0.0.1", "Address from the point of view of Clair")
|
||||
minimumPriority := flag.String("minimum-priority", "Low", "Minimum vulnerability vulnerability to show")
|
||||
|
||||
flag.Usage = func() {
|
||||
fmt.Fprintf(os.Stderr, "Usage: %s [options] image-id\n\nOptions:\n", os.Args[0])
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
|
||||
flag.Parse()
|
||||
|
||||
if len(flag.Args()) != 1 {
|
||||
@ -62,46 +56,41 @@ func main() {
|
||||
}
|
||||
imageName := flag.Args()[0]
|
||||
|
||||
// Save image
|
||||
// Save image.
|
||||
fmt.Printf("Saving %s\n", imageName)
|
||||
path, err := save(imageName)
|
||||
defer os.RemoveAll(path)
|
||||
if err != nil {
|
||||
log.Fatalf("- Could not save image: %s\n", err)
|
||||
fmt.Printf("- Could not save image: %s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Retrieve history
|
||||
// Retrieve history.
|
||||
fmt.Println("Getting image's history")
|
||||
layerIDs, err := historyFromManifest(path)
|
||||
if err != nil {
|
||||
layerIDs, err = historyFromCommand(imageName)
|
||||
}
|
||||
if err != nil || len(layerIDs) == 0 {
|
||||
log.Fatalf("- Could not get image's history: %s\n", err)
|
||||
fmt.Printf("- Could not get image's history: %s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Setup a simple HTTP server if Clair is not local
|
||||
// Setup a simple HTTP server if Clair is not local.
|
||||
if !strings.Contains(*endpoint, "127.0.0.1") && !strings.Contains(*endpoint, "localhost") {
|
||||
go func(path string) {
|
||||
allowedHost := strings.TrimPrefix(*endpoint, "http://")
|
||||
portIndex := strings.Index(allowedHost, ":")
|
||||
if portIndex >= 0 {
|
||||
allowedHost = allowedHost[:portIndex]
|
||||
}
|
||||
|
||||
fmt.Printf("Setting up HTTP server (allowing: %s)\n", allowedHost)
|
||||
|
||||
err := http.ListenAndServe(":"+strconv.Itoa(httpPort), restrictedFileServer(path, allowedHost))
|
||||
if err != nil {
|
||||
log.Fatalf("- An error occurs with the HTTP Server: %s\n", err)
|
||||
}
|
||||
}(path)
|
||||
go listenHTTP(path, allowedHost)
|
||||
|
||||
path = "http://" + *myAddress + ":" + strconv.Itoa(httpPort)
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
}
|
||||
|
||||
// Analyze layers
|
||||
// Analyze layers.
|
||||
fmt.Printf("Analyzing %d layers\n", len(layerIDs))
|
||||
for i := 0; i < len(layerIDs); i++ {
|
||||
fmt.Printf("- Analyzing %s\n", layerIDs[i])
|
||||
@ -113,26 +102,62 @@ func main() {
|
||||
err = analyzeLayer(*endpoint, path+"/"+layerIDs[i]+"/layer.tar", layerIDs[i], "")
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("- Could not analyze layer: %s\n", err)
|
||||
fmt.Printf("- Could not analyze layer: %s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// Get vulnerabilities
|
||||
// Get vulnerabilities.
|
||||
fmt.Println("Getting image's vulnerabilities")
|
||||
vulnerabilities, err := getVulnerabilities(*endpoint, layerIDs[len(layerIDs)-1], *minimumPriority)
|
||||
layer, err := getLayer(*endpoint, layerIDs[len(layerIDs)-1])
|
||||
if err != nil {
|
||||
log.Fatalf("- Could not get vulnerabilities: %s\n", err)
|
||||
fmt.Printf("- Could not get layer information: %s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if len(vulnerabilities) == 0 {
|
||||
fmt.Println("Bravo, your image looks SAFE !")
|
||||
|
||||
// Print report.
|
||||
fmt.Printf("\n# Clair report for image %s (%s)\n", imageName, time.Now().UTC())
|
||||
|
||||
if len(layer.Features) == 0 {
|
||||
fmt.Println("No feature has been detected on the image.")
|
||||
fmt.Println("This usually means that the image isn't supported by Clair.")
|
||||
os.Exit(0)
|
||||
}
|
||||
for _, vulnerability := range vulnerabilities {
|
||||
fmt.Printf("- # %s\n", vulnerability.ID)
|
||||
fmt.Printf(" - Priority: %s\n", vulnerability.Priority)
|
||||
|
||||
isSafe := true
|
||||
for _, feature := range layer.Features {
|
||||
fmt.Printf("## Feature: %s %s (%s)\n", feature.Name, feature.Version, feature.Namespace)
|
||||
|
||||
if len(feature.Vulnerabilities) > 0 {
|
||||
isSafe = false
|
||||
|
||||
fmt.Printf(" - Added by: %s\n", feature.AddedBy)
|
||||
|
||||
for _, vulnerability := range feature.Vulnerabilities {
|
||||
fmt.Printf("### (%s) %s\n", vulnerability.Severity, vulnerability.Name)
|
||||
|
||||
if vulnerability.Description != "" {
|
||||
fmt.Printf(" - Link: %s\n", vulnerability.Link)
|
||||
fmt.Printf(" - Package: %s\n", vulnerability.CausedByPackage)
|
||||
}
|
||||
|
||||
if vulnerability.Link != "" {
|
||||
fmt.Printf(" - Description: %s\n", vulnerability.Description)
|
||||
}
|
||||
|
||||
if vulnerability.FixedBy != "" {
|
||||
fmt.Printf(" - Fixed version: %s\n", vulnerability.FixedBy)
|
||||
}
|
||||
|
||||
if len(vulnerability.Metadata) > 0 {
|
||||
fmt.Printf(" - Metadata: %+v\n", vulnerability.Metadata)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if isSafe {
|
||||
fmt.Println("\nBravo, your image looks SAFE !")
|
||||
}
|
||||
}
|
||||
|
||||
func save(imageName string) (string, error) {
|
||||
@ -227,8 +252,37 @@ func historyFromCommand(imageName string) ([]string, error) {
|
||||
return layers, nil
|
||||
}
|
||||
|
||||
func analyzeLayer(endpoint, path, layerID, parentLayerID string) error {
|
||||
payload := struct{ ID, Path, ParentID, ImageFormat string }{ID: layerID, Path: path, ParentID: parentLayerID, ImageFormat: "Docker"}
|
||||
func listenHTTP(path, allowedHost string) {
|
||||
fmt.Printf("Setting up HTTP server (allowing: %s)\n", allowedHost)
|
||||
|
||||
restrictedFileServer := func(path, allowedHost string) http.Handler {
|
||||
fc := func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Host == allowedHost {
|
||||
http.FileServer(http.Dir(path)).ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(403)
|
||||
}
|
||||
return http.HandlerFunc(fc)
|
||||
}
|
||||
|
||||
err := http.ListenAndServe(":"+strconv.Itoa(httpPort), restrictedFileServer(path, allowedHost))
|
||||
if err != nil {
|
||||
fmt.Printf("- An error occurs with the HTTP server: %s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func analyzeLayer(endpoint, path, layerName, parentLayerName string) error {
|
||||
payload := v1.LayerEnvelope{
|
||||
Layer: &v1.Layer{
|
||||
Name: layerName,
|
||||
Path: path,
|
||||
ParentName: parentLayerName,
|
||||
Format: "Docker",
|
||||
},
|
||||
}
|
||||
|
||||
jsonPayload, err := json.Marshal(payload)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -249,40 +303,31 @@ func analyzeLayer(endpoint, path, layerID, parentLayerID string) error {
|
||||
|
||||
if response.StatusCode != 201 {
|
||||
body, _ := ioutil.ReadAll(response.Body)
|
||||
return fmt.Errorf("Got response %d with message %s", response.StatusCode, string(body))
|
||||
return fmt.Errorf("- Got response %d with message %s", response.StatusCode, string(body))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getVulnerabilities(endpoint, layerID, minimumPriority string) ([]APIVulnerability, error) {
|
||||
response, err := http.Get(endpoint + fmt.Sprintf(getLayerVulnerabilitiesURI, layerID, minimumPriority))
|
||||
func getLayer(endpoint, layerID string) (v1.Layer, error) {
|
||||
response, err := http.Get(endpoint + fmt.Sprintf(getLayerFeaturesURI, layerID))
|
||||
if err != nil {
|
||||
return []APIVulnerability{}, err
|
||||
return v1.Layer{}, err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
if response.StatusCode != 200 {
|
||||
body, _ := ioutil.ReadAll(response.Body)
|
||||
return []APIVulnerability{}, fmt.Errorf("Got response %d with message %s", response.StatusCode, string(body))
|
||||
err := fmt.Errorf("- Got response %d with message %s", response.StatusCode, string(body))
|
||||
return v1.Layer{}, err
|
||||
}
|
||||
|
||||
var apiResponse APIVulnerabilitiesResponse
|
||||
err = json.NewDecoder(response.Body).Decode(&apiResponse)
|
||||
if err != nil {
|
||||
return []APIVulnerability{}, err
|
||||
var apiResponse v1.LayerEnvelope
|
||||
if err = json.NewDecoder(response.Body).Decode(&apiResponse); err != nil {
|
||||
return v1.Layer{}, err
|
||||
} else if apiResponse.Error != nil {
|
||||
return v1.Layer{}, errors.New(apiResponse.Error.Message)
|
||||
}
|
||||
|
||||
return apiResponse.Vulnerabilities, nil
|
||||
}
|
||||
|
||||
func restrictedFileServer(path, allowedHost string) http.Handler {
|
||||
fc := func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Host == allowedHost {
|
||||
http.FileServer(http.Dir(path)).ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(403)
|
||||
}
|
||||
return http.HandlerFunc(fc)
|
||||
return *apiResponse.Layer, nil
|
||||
}
|
||||
|
@ -12,184 +12,138 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package database implements every database models and the functions that
|
||||
// manipulate them.
|
||||
// Package database defines the Clair's models and a common interface for database implementations.
|
||||
package database
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
"github.com/barakmich/glog"
|
||||
"github.com/coreos/clair/config"
|
||||
"github.com/coreos/clair/health"
|
||||
"github.com/coreos/clair/utils"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"github.com/google/cayley"
|
||||
"github.com/google/cayley/graph"
|
||||
"github.com/google/cayley/graph/path"
|
||||
|
||||
// Load all supported backends.
|
||||
_ "github.com/google/cayley/graph/bolt"
|
||||
_ "github.com/google/cayley/graph/leveldb"
|
||||
_ "github.com/google/cayley/graph/memstore"
|
||||
_ "github.com/google/cayley/graph/mongo"
|
||||
_ "github.com/google/cayley/graph/sql"
|
||||
)
|
||||
|
||||
const (
|
||||
// fieldIs is the graph predicate defining the type of an entity.
|
||||
fieldIs = "is"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
log = capnslog.NewPackageLogger("github.com/coreos/clair", "database")
|
||||
|
||||
// ErrTransaction is an error that occurs when a database transaction fails.
|
||||
ErrTransaction = errors.New("database: transaction failed (concurrent modification?)")
|
||||
// ErrBackendException is an error that occurs when the database backend does
|
||||
// not work properly (ie. unreachable).
|
||||
ErrBackendException = errors.New("database: could not query backend")
|
||||
ErrBackendException = errors.New("database: an error occured when querying the backend")
|
||||
|
||||
// ErrInconsistent is an error that occurs when a database consistency check
|
||||
// fails (ie. when an entity which is supposed to be unique is detected twice)
|
||||
ErrInconsistent = errors.New("database: inconsistent database")
|
||||
|
||||
// ErrCantOpen is an error that occurs when the database could not be opened
|
||||
ErrCantOpen = errors.New("database: could not open database")
|
||||
|
||||
store *cayley.Handle
|
||||
)
|
||||
|
||||
func init() {
|
||||
health.RegisterHealthchecker("database", Healthcheck)
|
||||
}
|
||||
|
||||
// Open opens a Cayley database, creating it if necessary and return its handle
|
||||
func Open(config *config.DatabaseConfig) error {
|
||||
if store != nil {
|
||||
log.Errorf("could not open database at %s : a database is already opened", config.Path)
|
||||
return ErrCantOpen
|
||||
}
|
||||
if config.Type != "memstore" && config.Path == "" {
|
||||
log.Errorf("could not open database : no path provided.")
|
||||
return ErrCantOpen
|
||||
}
|
||||
|
||||
var err error
|
||||
options := make(graph.Options)
|
||||
|
||||
switch config.Type {
|
||||
case "bolt", "leveldb":
|
||||
if _, err := os.Stat(config.Path); os.IsNotExist(err) {
|
||||
log.Infof("database at %s does not exist yet, creating it", config.Path)
|
||||
|
||||
err = graph.InitQuadStore(config.Type, config.Path, options)
|
||||
if err != nil && err != graph.ErrDatabaseExists {
|
||||
log.Errorf("could not create database at %s : %s", config.Path, err)
|
||||
return ErrCantOpen
|
||||
}
|
||||
}
|
||||
case "sql":
|
||||
// Replaces the PostgreSQL's slow COUNT query with a fast estimator.
|
||||
// Ref: https://wiki.postgresql.org/wiki/Count_estimate
|
||||
options["use_estimates"] = true
|
||||
|
||||
err := graph.InitQuadStore(config.Type, config.Path, options)
|
||||
if err != nil && err != graph.ErrDatabaseExists {
|
||||
log.Errorf("could not create database at %s : %s", config.Path, err)
|
||||
return ErrCantOpen
|
||||
}
|
||||
}
|
||||
|
||||
store, err = cayley.NewGraph(config.Type, config.Path, options)
|
||||
if err != nil {
|
||||
log.Errorf("could not open database at %s : %s", config.Path, err)
|
||||
return ErrCantOpen
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes a Cayley database
|
||||
func Close() {
|
||||
if store != nil {
|
||||
store.Close()
|
||||
store = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Healthcheck simply adds and then remove a quad in Cayley to ensure it is working
|
||||
// It returns true when everything is ok
|
||||
func Healthcheck() health.Status {
|
||||
var err error
|
||||
if store != nil {
|
||||
t := cayley.NewTransaction()
|
||||
q := cayley.Triple("cayley", "is", "healthy")
|
||||
t.AddQuad(q)
|
||||
t.RemoveQuad(q)
|
||||
glog.SetStderrThreshold("FATAL") // TODO REMOVE ME
|
||||
err = store.ApplyTransaction(t)
|
||||
glog.SetStderrThreshold("ERROR") // TODO REMOVE ME
|
||||
}
|
||||
|
||||
return health.Status{IsEssential: true, IsHealthy: err == nil, Details: nil}
|
||||
}
|
||||
|
||||
// toValue returns a single value from a path
|
||||
// If the path does not lead to a value, an empty string is returned
|
||||
// If the path leads to multiple values or if a database error occurs, an empty string and an error are returned
|
||||
func toValue(p *path.Path) (string, error) {
|
||||
var value string
|
||||
found := false
|
||||
|
||||
it, _ := p.BuildIterator().Optimize()
|
||||
defer it.Close()
|
||||
for cayley.RawNext(it) {
|
||||
if found {
|
||||
log.Error("failed query in toValue: used on an iterator containing multiple values")
|
||||
return "", ErrInconsistent
|
||||
}
|
||||
|
||||
if it.Result() != nil {
|
||||
value = store.NameOf(it.Result())
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if it.Err() != nil {
|
||||
log.Errorf("failed query in toValue: %s", it.Err())
|
||||
return "", ErrBackendException
|
||||
}
|
||||
|
||||
return value, nil
|
||||
}
|
||||
|
||||
// toValues returns multiple values from a path
|
||||
// If the path does not lead to any value, an empty array is returned
|
||||
// If a database error occurs, an empty array and an error are returned
|
||||
func toValues(p *path.Path) ([]string, error) {
|
||||
var values []string
|
||||
|
||||
it, _ := p.BuildIterator().Optimize()
|
||||
defer it.Close()
|
||||
for cayley.RawNext(it) {
|
||||
if it.Result() != nil {
|
||||
values = append(values, store.NameOf(it.Result()))
|
||||
}
|
||||
}
|
||||
if it.Err() != nil {
|
||||
log.Errorf("failed query in toValues: %s", it.Err())
|
||||
return []string{}, ErrBackendException
|
||||
}
|
||||
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// saveFields appends cayley's Save method to a path for each field in
|
||||
// selectedFields, except the ones that appears also in exceptFields
|
||||
func saveFields(p *path.Path, selectedFields []string, exceptFields []string) {
|
||||
for _, selectedField := range selectedFields {
|
||||
if utils.Contains(selectedField, exceptFields) {
|
||||
continue
|
||||
}
|
||||
p = p.Save(selectedField, selectedField)
|
||||
}
|
||||
// Datastore is the interface that describes a database backend implementation.
|
||||
type Datastore interface {
|
||||
// # Namespace
|
||||
// ListNamespaces returns the entire list of known Namespaces.
|
||||
ListNamespaces() ([]Namespace, error)
|
||||
|
||||
// # Layer
|
||||
// InsertLayer stores a Layer in the database.
|
||||
// A Layer is uniquely identified by its Name. The Name and EngineVersion fields are mandatory.
|
||||
// If a Parent is specified, it is expected that it has been retrieved using FindLayer.
|
||||
// If a Layer that already exists is inserted and the EngineVersion of the given Layer is higher
|
||||
// than the stored one, the stored Layer should be updated.
|
||||
// The function has to be idempotent, inserting a layer that already exists shouln'd return an
|
||||
// error.
|
||||
InsertLayer(Layer) error
|
||||
|
||||
// FindLayer retrieves a Layer from the database.
|
||||
// withFeatures specifies whether the Features field should be filled. When withVulnerabilities is
|
||||
// true, the Features field should be filled and their AffectedBy fields should contain every
|
||||
// vulnerabilities that affect them.
|
||||
FindLayer(name string, withFeatures, withVulnerabilities bool) (Layer, error)
|
||||
|
||||
// DeleteLayer deletes a Layer from the database and every layers that are based on it,
|
||||
// recursively.
|
||||
DeleteLayer(name string) error
|
||||
|
||||
// # Vulnerability
|
||||
// InsertVulnerabilities stores the given Vulnerabilities in the database, updating them if
|
||||
// necessary. A vulnerability is uniquely identified by its Namespace and its Name.
|
||||
// The FixedIn field may only contain a partial list of Features that are affected by the
|
||||
// Vulnerability, along with the version in which the vulnerability is fixed. It is the
|
||||
// responsability of the implementation to update the list properly. A version equals to
|
||||
// types.MinVersion means that the given Feature is not being affected by the Vulnerability at
|
||||
// all and thus, should be removed from the list. It is important that Features should be unique
|
||||
// in the FixedIn list. For example, it doesn't make sense to have two `openssl` Feature listed as
|
||||
// a Vulnerability can only be fixed in one Version. This is true because Vulnerabilities and
|
||||
// Features are Namespaced (i.e. specific to one operating system).
|
||||
// Each vulnerability insertion or update has to create a Notification that will contain the
|
||||
// old and the updated Vulnerability, unless createNotification equals to true.
|
||||
InsertVulnerabilities(vulnerabilities []Vulnerability, createNotification bool) error
|
||||
|
||||
// FindVulnerability retrieves a Vulnerability from the database, including the FixedIn list.
|
||||
FindVulnerability(namespaceName, name string) (Vulnerability, error)
|
||||
|
||||
// DeleteVulnerability removes a Vulnerability from the database.
|
||||
// It has to create a Notification that will contain the old Vulnerability.
|
||||
DeleteVulnerability(namespaceName, name string) error
|
||||
|
||||
// InsertVulnerabilityFixes adds new FixedIn Feature or update the Versions of existing ones to
|
||||
// the specified Vulnerability in the database.
|
||||
// It has has to create a Notification that will contain the old and the updated Vulnerability.
|
||||
InsertVulnerabilityFixes(vulnerabilityNamespace, vulnerabilityName string, fixes []FeatureVersion) error
|
||||
|
||||
// DeleteVulnerabilityFix removes a FixedIn Feature from the specified Vulnerability in the
|
||||
// database. It can be used to store the fact that a Vulnerability no longer affects the given
|
||||
// Feature in any Version.
|
||||
// It has has to create a Notification that will contain the old and the updated Vulnerability.
|
||||
DeleteVulnerabilityFix(vulnerabilityNamespace, vulnerabilityName, featureName string) error
|
||||
|
||||
// # Notification
|
||||
// GetAvailableNotification returns the Name, Created, Notified and Deleted fields of a
|
||||
// Notification that should be handled. The renotify interval defines how much time after being
|
||||
// marked as Notified by SetNotificationNotified, a Notification that hasn't been deleted should
|
||||
// be returned again by this function. A Notification for which there is a valid Lock with the
|
||||
// same Name should not be returned.
|
||||
GetAvailableNotification(renotifyInterval time.Duration) (VulnerabilityNotification, error)
|
||||
|
||||
// GetNotification returns a Notification, including its OldVulnerability and NewVulnerability
|
||||
// fields. On these Vulnerabilities, LayersIntroducingVulnerability should be filled with
|
||||
// every Layer that introduces the Vulnerability (i.e. adds at least one affected FeatureVersion).
|
||||
// The Limit and page parameters are used to paginate LayersIntroducingVulnerability. The first
|
||||
// given page should be VulnerabilityNotificationFirstPage. The function will then return the next
|
||||
// availage page. If there is no more page, NoVulnerabilityNotificationPage has to be returned.
|
||||
GetNotification(name string, limit int, page VulnerabilityNotificationPageNumber) (VulnerabilityNotification, VulnerabilityNotificationPageNumber, error)
|
||||
|
||||
// SetNotificationNotified marks a Notification as notified and thus, makes it unavailable for
|
||||
// GetAvailableNotification, until the renotify duration is elapsed.
|
||||
SetNotificationNotified(name string) error
|
||||
|
||||
// DeleteNotification marks a Notification as deleted, and thus, makes it unavailable for
|
||||
// GetAvailableNotification.
|
||||
DeleteNotification(name string) error
|
||||
|
||||
// # Key/Value
|
||||
// InsertKeyValue stores or updates a simple key/value pair in the database.
|
||||
InsertKeyValue(key, value string) error
|
||||
|
||||
// GetKeyValue retrieves a value from the database from the given key.
|
||||
// It returns an empty string if there is no such key.
|
||||
GetKeyValue(key string) (string, error)
|
||||
|
||||
// # Lock
|
||||
// Lock creates or renew a Lock in the database with the given name, owner and duration.
|
||||
// After the specified duration, the Lock expires by itself if it hasn't been unlocked, and thus,
|
||||
// let other users create a Lock with the same name. However, the owner can renew its Lock by
|
||||
// setting renew to true. Lock should not block, it should instead returns whether the Lock has
|
||||
// been successfully acquired/renewed. If it's the case, the expiration time of that Lock is
|
||||
// returned as well.
|
||||
Lock(name string, owner string, duration time.Duration, renew bool) (bool, time.Time)
|
||||
|
||||
// Unlock releases an existing Lock.
|
||||
Unlock(name, owner string)
|
||||
|
||||
// FindLock returns the owner of a Lock specified by the name, and its experation time if it
|
||||
// exists.
|
||||
FindLock(name string) (string, time.Time, error)
|
||||
|
||||
// # Miscellaneous
|
||||
// Ping returns the health status of the database.
|
||||
Ping() bool
|
||||
|
||||
// Close closes the database and free any allocated resource.
|
||||
Close()
|
||||
}
|
||||
|
@ -1,86 +0,0 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/clair/config"
|
||||
"github.com/google/cayley"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestHealthcheck(t *testing.T) {
|
||||
Open(&config.DatabaseConfig{Type: "memstore"})
|
||||
defer Close()
|
||||
|
||||
b := Healthcheck()
|
||||
assert.True(t, b.IsHealthy, "Healthcheck failed")
|
||||
}
|
||||
|
||||
func TestToValue(t *testing.T) {
|
||||
Open(&config.DatabaseConfig{Type: "memstore"})
|
||||
defer Close()
|
||||
|
||||
// toValue()
|
||||
v, err := toValue(cayley.StartPath(store, "tests").Out("are"))
|
||||
assert.Nil(t, err, "toValue should work even if the requested path leads to nothing")
|
||||
assert.Equal(t, "", v, "toValue should return an empty string if the requested path leads to nothing")
|
||||
|
||||
store.AddQuad(cayley.Triple("tests", "are", "awesome"))
|
||||
v, err = toValue(cayley.StartPath(store, "tests").Out("are"))
|
||||
assert.Nil(t, err, "toValue should have worked")
|
||||
assert.Equal(t, "awesome", v, "toValue did not return the expected value")
|
||||
|
||||
store.AddQuad(cayley.Triple("tests", "are", "running"))
|
||||
v, err = toValue(cayley.StartPath(store, "tests").Out("are"))
|
||||
assert.NotNil(t, err, "toValue should return an error and an empty string if the path leads to multiple values")
|
||||
assert.Equal(t, "", v, "toValue should return an error and an empty string if the path leads to multiple values")
|
||||
|
||||
// toValues()
|
||||
vs, err := toValues(cayley.StartPath(store, "CoreOS").Out(fieldIs))
|
||||
assert.Nil(t, err, "toValues should work even if the requested path leads to nothing")
|
||||
assert.Len(t, vs, 0, "toValue should return an empty array if the requested path leads to nothing")
|
||||
words := []string{"powerful", "lightweight"}
|
||||
for i, word := range words {
|
||||
store.AddQuad(cayley.Triple("CoreOS", fieldIs, word))
|
||||
v, err := toValues(cayley.StartPath(store, "CoreOS").Out(fieldIs))
|
||||
assert.Nil(t, err, "toValues should have worked")
|
||||
assert.Len(t, v, i+1, "toValues did not return the right amount of values")
|
||||
for _, e := range words[:i+1] {
|
||||
assert.Contains(t, v, e, "toValues did not return the values we expected")
|
||||
}
|
||||
}
|
||||
|
||||
// toValue(s)() and empty values
|
||||
store.AddQuad(cayley.Triple("bob", "likes", ""))
|
||||
v, err = toValue(cayley.StartPath(store, "bob").Out("likes"))
|
||||
assert.Nil(t, err, "toValue should work even if the requested path leads to nothing")
|
||||
assert.Equal(t, "", v, "toValue should return an empty string if the requested path leads to nothing")
|
||||
|
||||
store.AddQuad(cayley.Triple("bob", "likes", "running"))
|
||||
v, err = toValue(cayley.StartPath(store, "bob").Out("likes"))
|
||||
assert.NotNil(t, err, "toValue should return an error and an empty string if the path leads to multiple values")
|
||||
assert.Equal(t, "", v, "toValue should return an error and an empty string if the path leads to multiple values")
|
||||
|
||||
store.AddQuad(cayley.Triple("bob", "likes", "swimming"))
|
||||
va, err := toValues(cayley.StartPath(store, "bob").Out("likes"))
|
||||
assert.Nil(t, err, "toValues should have worked")
|
||||
if assert.Len(t, va, 3, "toValues should have returned 2 values") {
|
||||
assert.Contains(t, va, "running")
|
||||
assert.Contains(t, va, "swimming")
|
||||
assert.Contains(t, va, "")
|
||||
}
|
||||
}
|
@ -1,63 +0,0 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/google/cayley"
|
||||
)
|
||||
|
||||
const (
|
||||
fieldFlagValue = "value"
|
||||
flagNodePrefix = "flag"
|
||||
)
|
||||
|
||||
// UpdateFlag creates a flag or update an existing flag's value
|
||||
func UpdateFlag(name, value string) error {
|
||||
if name == "" || value == "" {
|
||||
log.Warning("could not insert a flag which has an empty name or value")
|
||||
return cerrors.NewBadRequestError("could not insert a flag which has an empty name or value")
|
||||
}
|
||||
|
||||
// Initialize transaction
|
||||
t := cayley.NewTransaction()
|
||||
|
||||
// Get current flag value
|
||||
currentValue, err := GetFlagValue(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Build transaction
|
||||
name = flagNodePrefix + ":" + name
|
||||
if currentValue != "" {
|
||||
t.RemoveQuad(cayley.Triple(name, fieldFlagValue, currentValue))
|
||||
}
|
||||
t.AddQuad(cayley.Triple(name, fieldFlagValue, value))
|
||||
|
||||
// Apply transaction
|
||||
if err = store.ApplyTransaction(t); err != nil {
|
||||
log.Errorf("failed transaction (UpdateFlag): %s", err)
|
||||
return ErrTransaction
|
||||
}
|
||||
|
||||
// Return
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetFlagValue returns the value of the flag given by its name (or an empty string if the flag does not exist)
|
||||
func GetFlagValue(name string) (string, error) {
|
||||
return toValue(cayley.StartPath(store, flagNodePrefix+":"+name).Out(fieldFlagValue))
|
||||
}
|
@ -1,49 +0,0 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/clair/config"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestFlag(t *testing.T) {
|
||||
Open(&config.DatabaseConfig{Type: "memstore"})
|
||||
defer Close()
|
||||
|
||||
// Get non existing flag
|
||||
f, err := GetFlagValue("test")
|
||||
assert.Nil(t, err, "GetFlagValue should have worked")
|
||||
assert.Empty(t, "", f, "Getting a non-existing flag should return an empty string")
|
||||
|
||||
// Try to insert invalid flags
|
||||
assert.Error(t, UpdateFlag("test", ""), "It should not accept a flag with an empty name or value")
|
||||
assert.Error(t, UpdateFlag("", "test"), "It should not accept a flag with an empty name or value")
|
||||
assert.Error(t, UpdateFlag("", ""), "It should not accept a flag with an empty name or value")
|
||||
|
||||
// Insert a flag and verify its value
|
||||
assert.Nil(t, UpdateFlag("test", "test1"))
|
||||
f, err = GetFlagValue("test")
|
||||
assert.Nil(t, err, "GetFlagValue should have worked")
|
||||
assert.Equal(t, "test1", f, "GetFlagValue did not return the expected value")
|
||||
|
||||
// Update a flag and verify its value
|
||||
assert.Nil(t, UpdateFlag("test", "test2"))
|
||||
f, err = GetFlagValue("test")
|
||||
assert.Nil(t, err, "GetFlagValue should have worked")
|
||||
assert.Equal(t, "test2", f, "GetFlagValue did not return the expected value")
|
||||
}
|
@ -1,432 +0,0 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/coreos/clair/utils"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/google/cayley"
|
||||
"github.com/google/cayley/graph"
|
||||
"github.com/google/cayley/graph/path"
|
||||
)
|
||||
|
||||
const (
|
||||
FieldLayerID = "id"
|
||||
FieldLayerParent = "parent"
|
||||
FieldLayerSuccessors = "successors"
|
||||
FieldLayerOS = "os"
|
||||
FieldLayerEngineVersion = "engineVersion"
|
||||
FieldLayerPackages = "adds/removes"
|
||||
|
||||
// These fields are not selectable and are for internal use only.
|
||||
fieldLayerIsValue = "layer"
|
||||
fieldLayerInstalledPackages = "adds"
|
||||
fieldLayerRemovedPackages = "removes"
|
||||
)
|
||||
|
||||
var FieldLayerAll = []string{FieldLayerID, FieldLayerParent, FieldLayerSuccessors, FieldLayerOS, FieldLayerPackages, FieldLayerEngineVersion}
|
||||
|
||||
// Layer represents an unique container layer
|
||||
type Layer struct {
|
||||
Node string `json:"-"`
|
||||
ID string
|
||||
ParentNode string `json:"-"`
|
||||
SuccessorsNodes []string `json:"-"`
|
||||
OS string
|
||||
InstalledPackagesNodes []string `json:"-"`
|
||||
RemovedPackagesNodes []string `json:"-"`
|
||||
EngineVersion int
|
||||
}
|
||||
|
||||
// GetNode returns the node name of a Layer
|
||||
// Requires the key field: ID
|
||||
func (l *Layer) GetNode() string {
|
||||
return fieldLayerIsValue + ":" + utils.Hash(l.ID)
|
||||
}
|
||||
|
||||
// InsertLayer insert a single layer in the database
|
||||
//
|
||||
// ID, and EngineVersion fields are required.
|
||||
// ParentNode, OS, InstalledPackagesNodes and RemovedPackagesNodes are optional,
|
||||
// SuccessorsNodes is unnecessary.
|
||||
//
|
||||
// The ID MUST be unique for two different layers.
|
||||
//
|
||||
//
|
||||
// If the Layer already exists, nothing is done, except if the provided engine
|
||||
// version is higher than the existing one, in which case, the OS,
|
||||
// InstalledPackagesNodes and RemovedPackagesNodes fields will be replaced.
|
||||
//
|
||||
// The layer should only contains the newly installed/removed packages
|
||||
// There is no safeguard that prevents from marking a package as newly installed
|
||||
// while it has already been installed in one of its parent.
|
||||
func InsertLayer(layer *Layer) error {
|
||||
// Verify parameters
|
||||
if layer.ID == "" {
|
||||
log.Warning("could not insert a layer which has an empty ID")
|
||||
return cerrors.NewBadRequestError("could not insert a layer which has an empty ID")
|
||||
}
|
||||
|
||||
// Create required data structures
|
||||
t := cayley.NewTransaction()
|
||||
layer.Node = layer.GetNode()
|
||||
|
||||
// Try to find an existing layer
|
||||
existingLayer, err := FindOneLayerByNode(layer.Node, FieldLayerAll)
|
||||
if err != nil && err != cerrors.ErrNotFound {
|
||||
return err
|
||||
}
|
||||
|
||||
if existingLayer != nil && existingLayer.EngineVersion >= layer.EngineVersion {
|
||||
// The layer exists and has an equal or higher engine verison, do nothing
|
||||
return nil
|
||||
}
|
||||
|
||||
if existingLayer == nil {
|
||||
// Create case: add permanent nodes
|
||||
t.AddQuad(cayley.Triple(layer.Node, fieldIs, fieldLayerIsValue))
|
||||
t.AddQuad(cayley.Triple(layer.Node, FieldLayerID, layer.ID))
|
||||
t.AddQuad(cayley.Triple(layer.Node, FieldLayerParent, layer.ParentNode))
|
||||
} else {
|
||||
// Update case: remove everything before we add updated data
|
||||
t.RemoveQuad(cayley.Triple(layer.Node, FieldLayerOS, existingLayer.OS))
|
||||
for _, pkg := range existingLayer.InstalledPackagesNodes {
|
||||
t.RemoveQuad(cayley.Triple(layer.Node, fieldLayerInstalledPackages, pkg))
|
||||
}
|
||||
for _, pkg := range existingLayer.RemovedPackagesNodes {
|
||||
t.RemoveQuad(cayley.Triple(layer.Node, fieldLayerRemovedPackages, pkg))
|
||||
}
|
||||
t.RemoveQuad(cayley.Triple(layer.Node, FieldLayerEngineVersion, strconv.Itoa(existingLayer.EngineVersion)))
|
||||
}
|
||||
|
||||
// Add OS/Packages
|
||||
t.AddQuad(cayley.Triple(layer.Node, FieldLayerOS, layer.OS))
|
||||
for _, pkg := range layer.InstalledPackagesNodes {
|
||||
t.AddQuad(cayley.Triple(layer.Node, fieldLayerInstalledPackages, pkg))
|
||||
}
|
||||
for _, pkg := range layer.RemovedPackagesNodes {
|
||||
t.AddQuad(cayley.Triple(layer.Node, fieldLayerRemovedPackages, pkg))
|
||||
}
|
||||
t.AddQuad(cayley.Triple(layer.Node, FieldLayerEngineVersion, strconv.Itoa(layer.EngineVersion)))
|
||||
|
||||
// Apply transaction
|
||||
if err = store.ApplyTransaction(t); err != nil {
|
||||
log.Errorf("failed transaction (InsertLayer): %s", err)
|
||||
return ErrTransaction
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteLayer deletes the specified layer and any child layers that are
|
||||
// dependent on the specified layer.
|
||||
func DeleteLayer(ID string) error {
|
||||
layer, err := FindOneLayerByID(ID, []string{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return deleteLayerTreeFrom(layer.Node, nil)
|
||||
}
|
||||
|
||||
func deleteLayerTreeFrom(node string, t *graph.Transaction) error {
|
||||
// Determine if that function call is the root call of the recursivity
|
||||
// And create transaction if its the case.
|
||||
root := (t == nil)
|
||||
if root {
|
||||
t = cayley.NewTransaction()
|
||||
}
|
||||
|
||||
// Find layer.
|
||||
layer, err := FindOneLayerByNode(node, FieldLayerAll)
|
||||
if err != nil {
|
||||
// Ignore missing layer.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove all successor layers.
|
||||
for _, succNode := range layer.SuccessorsNodes {
|
||||
deleteLayerTreeFrom(succNode, t)
|
||||
}
|
||||
|
||||
// Remove layer.
|
||||
t.RemoveQuad(cayley.Triple(layer.Node, fieldIs, fieldLayerIsValue))
|
||||
t.RemoveQuad(cayley.Triple(layer.Node, FieldLayerID, layer.ID))
|
||||
t.RemoveQuad(cayley.Triple(layer.Node, FieldLayerParent, layer.ParentNode))
|
||||
t.RemoveQuad(cayley.Triple(layer.Node, FieldLayerOS, layer.OS))
|
||||
t.RemoveQuad(cayley.Triple(layer.Node, FieldLayerEngineVersion, strconv.Itoa(layer.EngineVersion)))
|
||||
for _, pkg := range layer.InstalledPackagesNodes {
|
||||
t.RemoveQuad(cayley.Triple(layer.Node, fieldLayerInstalledPackages, pkg))
|
||||
}
|
||||
for _, pkg := range layer.RemovedPackagesNodes {
|
||||
t.RemoveQuad(cayley.Triple(layer.Node, fieldLayerRemovedPackages, pkg))
|
||||
}
|
||||
|
||||
// Apply transaction if root call.
|
||||
if root {
|
||||
if err = store.ApplyTransaction(t); err != nil {
|
||||
log.Errorf("failed transaction (deleteLayerTreeFrom): %s", err)
|
||||
return ErrTransaction
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FindOneLayerByID finds and returns a single layer having the given ID,
|
||||
// selecting the specified fields and hardcoding its ID
|
||||
func FindOneLayerByID(ID string, selectedFields []string) (*Layer, error) {
|
||||
t := &Layer{ID: ID}
|
||||
l, err := FindOneLayerByNode(t.GetNode(), selectedFields)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l.ID = ID
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// FindOneLayerByNode finds and returns a single package by its node, selecting the specified fields
|
||||
func FindOneLayerByNode(node string, selectedFields []string) (*Layer, error) {
|
||||
l, err := toLayers(cayley.StartPath(store, node).Has(fieldIs, fieldLayerIsValue), selectedFields)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(l) == 1 {
|
||||
return l[0], nil
|
||||
}
|
||||
if len(l) > 1 {
|
||||
log.Errorf("found multiple layers with identical node [Node: %s]", node)
|
||||
return nil, ErrInconsistent
|
||||
}
|
||||
|
||||
return nil, cerrors.ErrNotFound
|
||||
}
|
||||
|
||||
// FindAllLayersByAddedPackageNodes finds and returns all layers that add the
|
||||
// given packages (by their nodes), selecting the specified fields
|
||||
func FindAllLayersByAddedPackageNodes(nodes []string, selectedFields []string) ([]*Layer, error) {
|
||||
layers, err := toLayers(cayley.StartPath(store, nodes...).In(fieldLayerInstalledPackages), selectedFields)
|
||||
if err != nil {
|
||||
return []*Layer{}, err
|
||||
}
|
||||
return layers, nil
|
||||
}
|
||||
|
||||
// FindAllLayersByPackageNode finds and returns all layers that have the given package (by its node), selecting the specified fields
|
||||
// func FindAllLayersByPackageNode(node string, only map[string]struct{}) ([]*Layer, error) {
|
||||
// var layers []*Layer
|
||||
//
|
||||
// // We need the successors field
|
||||
// if only != nil {
|
||||
// only[FieldLayerSuccessors] = struct{}{}
|
||||
// }
|
||||
//
|
||||
// // Get all the layers which remove the package
|
||||
// layersNodesRemoving, err := toValues(cayley.StartPath(store, node).In(fieldLayerRemovedPackages).Has(fieldIs, fieldLayerIsValue))
|
||||
// if err != nil {
|
||||
// return []*Layer{}, err
|
||||
// }
|
||||
// layersNodesRemovingMap := make(map[string]struct{})
|
||||
// for _, l := range layersNodesRemoving {
|
||||
// layersNodesRemovingMap[l] = struct{}{}
|
||||
// }
|
||||
//
|
||||
// layersToBrowse, err := toLayers(cayley.StartPath(store, node).In(fieldLayerInstalledPackages).Has(fieldIs, fieldLayerIsValue), only)
|
||||
// if err != nil {
|
||||
// return []*Layer{}, err
|
||||
// }
|
||||
// for len(layersToBrowse) > 0 {
|
||||
// var newLayersToBrowse []*Layer
|
||||
// for _, layerToBrowse := range layersToBrowse {
|
||||
// if _, layerRemovesPackage := layersNodesRemovingMap[layerToBrowse.Node]; !layerRemovesPackage {
|
||||
// layers = append(layers, layerToBrowse)
|
||||
// successors, err := layerToBrowse.Successors(only)
|
||||
// if err != nil {
|
||||
// return []*Layer{}, err
|
||||
// }
|
||||
// newLayersToBrowse = append(newLayersToBrowse, successors...)
|
||||
// }
|
||||
// layersToBrowse = newLayersToBrowse
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// return layers, nil
|
||||
// }
|
||||
|
||||
// toLayers converts a path leading to one or multiple layers to Layer structs,
|
||||
// selecting the specified fields
|
||||
func toLayers(path *path.Path, selectedFields []string) ([]*Layer, error) {
|
||||
var layers []*Layer
|
||||
|
||||
saveFields(path, selectedFields, []string{FieldLayerSuccessors, FieldLayerPackages, fieldLayerInstalledPackages, fieldLayerRemovedPackages})
|
||||
it, _ := path.BuildIterator().Optimize()
|
||||
defer it.Close()
|
||||
for cayley.RawNext(it) {
|
||||
tags := make(map[string]graph.Value)
|
||||
it.TagResults(tags)
|
||||
|
||||
layer := Layer{Node: store.NameOf(it.Result())}
|
||||
for _, selectedField := range selectedFields {
|
||||
switch selectedField {
|
||||
case FieldLayerID:
|
||||
layer.ID = store.NameOf(tags[FieldLayerID])
|
||||
case FieldLayerParent:
|
||||
layer.ParentNode = store.NameOf(tags[FieldLayerParent])
|
||||
case FieldLayerSuccessors:
|
||||
var err error
|
||||
layer.SuccessorsNodes, err = toValues(cayley.StartPath(store, layer.Node).In(FieldLayerParent))
|
||||
if err != nil {
|
||||
log.Errorf("could not get successors of layer %s: %s.", layer.Node, err.Error())
|
||||
return nil, err
|
||||
}
|
||||
case FieldLayerOS:
|
||||
layer.OS = store.NameOf(tags[FieldLayerOS])
|
||||
case FieldLayerPackages:
|
||||
var err error
|
||||
it, _ := cayley.StartPath(store, layer.Node).OutWithTags([]string{"predicate"}, fieldLayerInstalledPackages, fieldLayerRemovedPackages).BuildIterator().Optimize()
|
||||
defer it.Close()
|
||||
for cayley.RawNext(it) {
|
||||
tags := make(map[string]graph.Value)
|
||||
it.TagResults(tags)
|
||||
|
||||
predicate := store.NameOf(tags["predicate"])
|
||||
if predicate == fieldLayerInstalledPackages {
|
||||
layer.InstalledPackagesNodes = append(layer.InstalledPackagesNodes, store.NameOf(it.Result()))
|
||||
} else if predicate == fieldLayerRemovedPackages {
|
||||
layer.RemovedPackagesNodes = append(layer.RemovedPackagesNodes, store.NameOf(it.Result()))
|
||||
}
|
||||
}
|
||||
if it.Err() != nil {
|
||||
log.Errorf("could not get installed/removed packages of layer %s: %s.", layer.Node, it.Err())
|
||||
return nil, err
|
||||
}
|
||||
case FieldLayerEngineVersion:
|
||||
layer.EngineVersion, _ = strconv.Atoi(store.NameOf(tags[FieldLayerEngineVersion]))
|
||||
default:
|
||||
panic("unknown selectedField")
|
||||
}
|
||||
}
|
||||
layers = append(layers, &layer)
|
||||
}
|
||||
if it.Err() != nil {
|
||||
log.Errorf("failed query in toLayers: %s", it.Err())
|
||||
return []*Layer{}, ErrBackendException
|
||||
}
|
||||
|
||||
return layers, nil
|
||||
}
|
||||
|
||||
// Successors find and returns all layers that define l as their parent,
|
||||
// selecting the specified fields
|
||||
// It requires that FieldLayerSuccessors field has been selected on l
|
||||
// func (l *Layer) Successors(selectedFields []string) ([]*Layer, error) {
|
||||
// if len(l.SuccessorsNodes) == 0 {
|
||||
// return []*Layer{}, nil
|
||||
// }
|
||||
//
|
||||
// return toLayers(cayley.StartPath(store, l.SuccessorsNodes...), only)
|
||||
// }
|
||||
|
||||
// Parent find and returns the parent layer of l, selecting the specified fields
|
||||
// It requires that FieldLayerParent field has been selected on l
|
||||
func (l *Layer) Parent(selectedFields []string) (*Layer, error) {
|
||||
if l.ParentNode == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
parent, err := toLayers(cayley.StartPath(store, l.ParentNode), selectedFields)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(parent) == 1 {
|
||||
return parent[0], nil
|
||||
}
|
||||
if len(parent) > 1 {
|
||||
log.Errorf("found multiple layers when getting parent layer of layer %s", l.ParentNode)
|
||||
return nil, ErrInconsistent
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Sublayers find and returns all layers that compose l, selecting the specified
|
||||
// fields
|
||||
// It requires that FieldLayerParent field has been selected on l
|
||||
// The base image comes first, and l is last
|
||||
// func (l *Layer) Sublayers(selectedFields []string) ([]*Layer, error) {
|
||||
// var sublayers []*Layer
|
||||
//
|
||||
// // We need the parent field
|
||||
// if only != nil {
|
||||
// only[FieldLayerParent] = struct{}{}
|
||||
// }
|
||||
//
|
||||
// parent, err := l.Parent(only)
|
||||
// if err != nil {
|
||||
// return []*Layer{}, err
|
||||
// }
|
||||
// if parent != nil {
|
||||
// parentSublayers, err := parent.Sublayers(only)
|
||||
// if err != nil {
|
||||
// return []*Layer{}, err
|
||||
// }
|
||||
// sublayers = append(sublayers, parentSublayers...)
|
||||
// }
|
||||
//
|
||||
// sublayers = append(sublayers, l)
|
||||
//
|
||||
// return sublayers, nil
|
||||
// }
|
||||
|
||||
// AllPackages computes the full list of packages that l has and return them as
|
||||
// nodes.
|
||||
// It requires that FieldLayerParent, FieldLayerContentInstalledPackages,
|
||||
// FieldLayerContentRemovedPackages fields has been selected on l
|
||||
func (l *Layer) AllPackages() ([]string, error) {
|
||||
var allPackages []string
|
||||
|
||||
parent, err := l.Parent([]string{FieldLayerParent, FieldLayerPackages})
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
if parent != nil {
|
||||
allPackages, err = parent.AllPackages()
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return append(utils.CompareStringLists(allPackages, l.RemovedPackagesNodes), l.InstalledPackagesNodes...), nil
|
||||
}
|
||||
|
||||
// OperatingSystem tries to find the Operating System of a layer using its
|
||||
// parents.
|
||||
// It requires that FieldLayerParent and FieldLayerOS fields has been
|
||||
// selected on l
|
||||
func (l *Layer) OperatingSystem() (string, error) {
|
||||
if l.OS != "" {
|
||||
return l.OS, nil
|
||||
}
|
||||
|
||||
// Try from the parent
|
||||
parent, err := l.Parent([]string{FieldLayerParent, FieldLayerOS})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if parent != nil {
|
||||
return parent.OperatingSystem()
|
||||
}
|
||||
return "", nil
|
||||
}
|
@ -1,178 +0,0 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/clair/config"
|
||||
"github.com/coreos/clair/utils"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// TestInvalidLayers tries to insert invalid layers
|
||||
func TestInvalidLayers(t *testing.T) {
|
||||
Open(&config.DatabaseConfig{Type: "memstore"})
|
||||
defer Close()
|
||||
|
||||
assert.Error(t, InsertLayer(&Layer{ID: ""})) // No ID
|
||||
}
|
||||
|
||||
// TestLayerSimple inserts a single layer and ensures it can be retrieved and
|
||||
// that methods works
|
||||
func TestLayerSimple(t *testing.T) {
|
||||
Open(&config.DatabaseConfig{Type: "memstore"})
|
||||
defer Close()
|
||||
|
||||
// Insert a layer and find it back
|
||||
l1 := &Layer{ID: "l1", OS: "os1", InstalledPackagesNodes: []string{"p1", "p2"}, EngineVersion: 1}
|
||||
if assert.Nil(t, InsertLayer(l1)) {
|
||||
fl1, err := FindOneLayerByID(l1.ID, FieldLayerAll)
|
||||
if assert.Nil(t, err) && assert.NotNil(t, fl1) {
|
||||
// Saved = found
|
||||
assert.True(t, layerEqual(l1, fl1), "layers are not equal, expected %v, have %s", l1, fl1)
|
||||
|
||||
// No parent
|
||||
p, err := fl1.Parent(FieldLayerAll)
|
||||
assert.Nil(t, err)
|
||||
assert.Nil(t, p)
|
||||
|
||||
// AllPackages()
|
||||
pk, err := fl1.AllPackages()
|
||||
assert.Nil(t, err)
|
||||
if assert.Len(t, pk, 2) {
|
||||
assert.Contains(t, pk, l1.InstalledPackagesNodes[0])
|
||||
assert.Contains(t, pk, l1.InstalledPackagesNodes[1])
|
||||
}
|
||||
// OS()
|
||||
o, err := fl1.OperatingSystem()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, l1.OS, o)
|
||||
}
|
||||
|
||||
// FindAllLayersByAddedPackageNodes
|
||||
al1, err := FindAllLayersByAddedPackageNodes([]string{"p1", "p3"}, FieldLayerAll)
|
||||
if assert.Nil(t, err) && assert.Len(t, al1, 1) {
|
||||
assert.Equal(t, al1[0].Node, l1.Node)
|
||||
}
|
||||
|
||||
// Delete
|
||||
if assert.Nil(t, DeleteLayer(l1.ID)) {
|
||||
_, err := FindOneLayerByID(l1.ID, FieldLayerAll)
|
||||
assert.Equal(t, cerrors.ErrNotFound, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestLayerTree inserts a tree of layers and ensure that the tree lgoic works
|
||||
func TestLayerTree(t *testing.T) {
|
||||
Open(&config.DatabaseConfig{Type: "memstore"})
|
||||
defer Close()
|
||||
|
||||
var layers []*Layer
|
||||
layers = append(layers, &Layer{ID: "l1"})
|
||||
layers = append(layers, &Layer{ID: "l2", ParentNode: layers[0].GetNode(), OS: "os2", InstalledPackagesNodes: []string{"p1", "p2"}})
|
||||
layers = append(layers, &Layer{ID: "l3", ParentNode: layers[1].GetNode()}) // Repeat an empty layer archive (l1)
|
||||
layers = append(layers, &Layer{ID: "l4a", ParentNode: layers[2].GetNode(), InstalledPackagesNodes: []string{"p3"}, RemovedPackagesNodes: []string{"p1", "p4"}}) // p4 does not exists and thu can't actually be removed
|
||||
layers = append(layers, &Layer{ID: "l4b", ParentNode: layers[2].GetNode(), InstalledPackagesNodes: []string{}, RemovedPackagesNodes: []string{"p2", "p1"}})
|
||||
|
||||
var flayers []*Layer
|
||||
ok := true
|
||||
for _, l := range layers {
|
||||
ok = ok && assert.Nil(t, InsertLayer(l))
|
||||
|
||||
fl, err := FindOneLayerByID(l.ID, FieldLayerAll)
|
||||
ok = ok && assert.Nil(t, err)
|
||||
ok = ok && assert.NotNil(t, fl)
|
||||
flayers = append(flayers, fl)
|
||||
}
|
||||
if assert.True(t, ok) {
|
||||
// Start testing
|
||||
|
||||
// l4a
|
||||
// Parent()
|
||||
fl4ap, err := flayers[3].Parent(FieldLayerAll)
|
||||
assert.Nil(t, err, "l4a should has l3 as parent")
|
||||
if assert.NotNil(t, fl4ap, "l4a should has l3 as parent") {
|
||||
assert.Equal(t, "l3", fl4ap.ID, "l4a should has l3 as parent")
|
||||
}
|
||||
|
||||
// OS()
|
||||
fl4ao, err := flayers[3].OperatingSystem()
|
||||
assert.Nil(t, err, "l4a should inherits its OS from l2")
|
||||
assert.Equal(t, "os2", fl4ao, "l4a should inherits its OS from l2")
|
||||
// AllPackages()
|
||||
fl4apkg, err := flayers[3].AllPackages()
|
||||
assert.Nil(t, err)
|
||||
if assert.Len(t, fl4apkg, 2) {
|
||||
assert.Contains(t, fl4apkg, "p2")
|
||||
assert.Contains(t, fl4apkg, "p3")
|
||||
}
|
||||
|
||||
// l4b
|
||||
// AllPackages()
|
||||
fl4bpkg, err := flayers[4].AllPackages()
|
||||
assert.Nil(t, err)
|
||||
assert.Len(t, fl4bpkg, 0)
|
||||
|
||||
// Delete a layer in the middle of the tree.
|
||||
if assert.Nil(t, DeleteLayer(flayers[1].ID)) {
|
||||
for _, l := range layers[1:] {
|
||||
_, err := FindOneLayerByID(l.ID, FieldLayerAll)
|
||||
assert.Equal(t, cerrors.ErrNotFound, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLayerUpdate(t *testing.T) {
|
||||
Open(&config.DatabaseConfig{Type: "memstore"})
|
||||
defer Close()
|
||||
|
||||
l1 := &Layer{ID: "l1", OS: "os1", InstalledPackagesNodes: []string{"p1", "p2"}, RemovedPackagesNodes: []string{"p3", "p4"}, EngineVersion: 1}
|
||||
if assert.Nil(t, InsertLayer(l1)) {
|
||||
// Do not update layer content if the engine versions are equals
|
||||
l1b := &Layer{ID: "l1", OS: "os2", InstalledPackagesNodes: []string{"p1"}, RemovedPackagesNodes: []string{""}, EngineVersion: 1}
|
||||
if assert.Nil(t, InsertLayer(l1b)) {
|
||||
fl1b, err := FindOneLayerByID(l1.ID, FieldLayerAll)
|
||||
if assert.Nil(t, err) && assert.NotNil(t, fl1b) {
|
||||
assert.True(t, layerEqual(l1, fl1b), "layer contents are not equal, expected %v, have %s", l1, fl1b)
|
||||
}
|
||||
}
|
||||
|
||||
// Update the layer content with new data and a higher engine version
|
||||
l1c := &Layer{ID: "l1", OS: "os2", InstalledPackagesNodes: []string{"p1", "p5"}, RemovedPackagesNodes: []string{"p6", "p7"}, EngineVersion: 2}
|
||||
if assert.Nil(t, InsertLayer(l1c)) {
|
||||
fl1c, err := FindOneLayerByID(l1c.ID, FieldLayerAll)
|
||||
if assert.Nil(t, err) && assert.NotNil(t, fl1c) {
|
||||
assert.True(t, layerEqual(l1c, fl1c), "layer contents are not equal, expected %v, have %s", l1c, fl1c)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func layerEqual(expected, actual *Layer) bool {
|
||||
eq := true
|
||||
eq = eq && expected.Node == actual.Node
|
||||
eq = eq && expected.ID == actual.ID
|
||||
eq = eq && expected.ParentNode == actual.ParentNode
|
||||
eq = eq && expected.OS == actual.OS
|
||||
eq = eq && expected.EngineVersion == actual.EngineVersion
|
||||
eq = eq && len(utils.CompareStringLists(actual.SuccessorsNodes, expected.SuccessorsNodes)) == 0 && len(utils.CompareStringLists(expected.SuccessorsNodes, actual.SuccessorsNodes)) == 0
|
||||
eq = eq && len(utils.CompareStringLists(actual.RemovedPackagesNodes, expected.RemovedPackagesNodes)) == 0 && len(utils.CompareStringLists(expected.RemovedPackagesNodes, actual.RemovedPackagesNodes)) == 0
|
||||
eq = eq && len(utils.CompareStringLists(actual.InstalledPackagesNodes, expected.InstalledPackagesNodes)) == 0 && len(utils.CompareStringLists(expected.InstalledPackagesNodes, actual.InstalledPackagesNodes)) == 0
|
||||
return eq
|
||||
}
|
163
database/lock.go
163
database/lock.go
@ -1,163 +0,0 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/barakmich/glog"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/google/cayley"
|
||||
"github.com/google/cayley/graph"
|
||||
"github.com/google/cayley/graph/path"
|
||||
)
|
||||
|
||||
const (
|
||||
fieldLockLocked = "locked"
|
||||
fieldLockLockedValue = "locked"
|
||||
fieldLockLockedBy = "locked_by"
|
||||
fieldLockLockedUntil = "locked_until"
|
||||
)
|
||||
|
||||
// Lock tries to set a temporary lock in the database.
|
||||
// If a lock already exists with the given name/owner, then the lock is renewed
|
||||
//
|
||||
// Lock does not block, instead, it returns true and its expiration time
|
||||
// is the lock has been successfully acquired or false otherwise
|
||||
func Lock(name string, duration time.Duration, owner string) (bool, time.Time) {
|
||||
pruneLocks()
|
||||
|
||||
until := time.Now().Add(duration)
|
||||
untilString := strconv.FormatInt(until.Unix(), 10)
|
||||
|
||||
// Try to get the expiration time of a lock with the same name/owner
|
||||
currentExpiration, err := toValue(cayley.StartPath(store, name).Has(fieldLockLockedBy, owner).Out(fieldLockLockedUntil))
|
||||
if err == nil && currentExpiration != "" {
|
||||
// Renew our lock
|
||||
if currentExpiration == untilString {
|
||||
return true, until
|
||||
}
|
||||
|
||||
t := cayley.NewTransaction()
|
||||
t.RemoveQuad(cayley.Triple(name, fieldLockLockedUntil, currentExpiration))
|
||||
t.AddQuad(cayley.Triple(name, fieldLockLockedUntil, untilString))
|
||||
// It is not necessary to verify if the lock is ours again in the transaction
|
||||
// because if someone took it, the lock's current expiration probably changed and the transaction will fail
|
||||
return store.ApplyTransaction(t) == nil, until
|
||||
}
|
||||
|
||||
t := cayley.NewTransaction()
|
||||
t.AddQuad(cayley.Triple(name, fieldLockLocked, fieldLockLockedValue)) // Necessary to make the transaction fails if the lock already exists (and has not been pruned)
|
||||
t.AddQuad(cayley.Triple(name, fieldLockLockedUntil, untilString))
|
||||
t.AddQuad(cayley.Triple(name, fieldLockLockedBy, owner))
|
||||
|
||||
glog.SetStderrThreshold("FATAL")
|
||||
success := store.ApplyTransaction(t) == nil
|
||||
glog.SetStderrThreshold("ERROR")
|
||||
|
||||
return success, until
|
||||
}
|
||||
|
||||
// Unlock unlocks a lock specified by its name if I own it
|
||||
func Unlock(name, owner string) {
|
||||
unlocked := 0
|
||||
it, _ := cayley.StartPath(store, name).Has(fieldLockLocked, fieldLockLockedValue).Has(fieldLockLockedBy, owner).Save(fieldLockLockedUntil, fieldLockLockedUntil).BuildIterator().Optimize()
|
||||
defer it.Close()
|
||||
for cayley.RawNext(it) {
|
||||
tags := make(map[string]graph.Value)
|
||||
it.TagResults(tags)
|
||||
|
||||
t := cayley.NewTransaction()
|
||||
t.RemoveQuad(cayley.Triple(name, fieldLockLocked, fieldLockLockedValue))
|
||||
t.RemoveQuad(cayley.Triple(name, fieldLockLockedUntil, store.NameOf(tags[fieldLockLockedUntil])))
|
||||
t.RemoveQuad(cayley.Triple(name, fieldLockLockedBy, owner))
|
||||
err := store.ApplyTransaction(t)
|
||||
if err != nil {
|
||||
log.Errorf("failed transaction (Unlock): %s", err)
|
||||
}
|
||||
|
||||
unlocked++
|
||||
}
|
||||
if it.Err() != nil {
|
||||
log.Errorf("failed query in Unlock: %s", it.Err())
|
||||
}
|
||||
if unlocked > 1 {
|
||||
// We should never see this, it would mean that our database doesn't ensure quad uniqueness
|
||||
// and that the entire lock system is jeopardized.
|
||||
log.Errorf("found inconsistency in Unlock: matched %d times a locked named: %s", unlocked, name)
|
||||
}
|
||||
}
|
||||
|
||||
// LockInfo returns the owner of a lock specified by its name and its
|
||||
// expiration time
|
||||
func LockInfo(name string) (string, time.Time, error) {
|
||||
it, _ := cayley.StartPath(store, name).Has(fieldLockLocked, fieldLockLockedValue).Save(fieldLockLockedUntil, fieldLockLockedUntil).Save(fieldLockLockedBy, fieldLockLockedBy).BuildIterator().Optimize()
|
||||
defer it.Close()
|
||||
for cayley.RawNext(it) {
|
||||
tags := make(map[string]graph.Value)
|
||||
it.TagResults(tags)
|
||||
|
||||
tt, _ := strconv.ParseInt(store.NameOf(tags[fieldLockLockedUntil]), 10, 64)
|
||||
return store.NameOf(tags[fieldLockLockedBy]), time.Unix(tt, 0), nil
|
||||
}
|
||||
if it.Err() != nil {
|
||||
log.Errorf("failed query in LockInfo: %s", it.Err())
|
||||
return "", time.Time{}, ErrBackendException
|
||||
}
|
||||
|
||||
return "", time.Time{}, cerrors.ErrNotFound
|
||||
}
|
||||
|
||||
// pruneLocks removes every expired locks from the database
|
||||
func pruneLocks() {
|
||||
now := time.Now()
|
||||
|
||||
// Delete every expired locks
|
||||
it, _ := cayley.StartPath(store, "locked").In("locked").Save(fieldLockLockedUntil, fieldLockLockedUntil).Save(fieldLockLockedBy, fieldLockLockedBy).BuildIterator().Optimize()
|
||||
defer it.Close()
|
||||
for cayley.RawNext(it) {
|
||||
tags := make(map[string]graph.Value)
|
||||
it.TagResults(tags)
|
||||
|
||||
n := store.NameOf(it.Result())
|
||||
t := store.NameOf(tags[fieldLockLockedUntil])
|
||||
o := store.NameOf(tags[fieldLockLockedBy])
|
||||
tt, _ := strconv.ParseInt(t, 10, 64)
|
||||
|
||||
if now.Unix() > tt {
|
||||
log.Debugf("lock %s owned by %s has expired.", n, o)
|
||||
|
||||
tr := cayley.NewTransaction()
|
||||
tr.RemoveQuad(cayley.Triple(n, fieldLockLocked, fieldLockLockedValue))
|
||||
tr.RemoveQuad(cayley.Triple(n, fieldLockLockedUntil, t))
|
||||
tr.RemoveQuad(cayley.Triple(n, fieldLockLockedBy, o))
|
||||
err := store.ApplyTransaction(tr)
|
||||
if err != nil {
|
||||
log.Errorf("failed transaction (pruneLocks): %s", err)
|
||||
continue
|
||||
}
|
||||
log.Debugf("lock %s has been successfully pruned.", n)
|
||||
}
|
||||
}
|
||||
if it.Err() != nil {
|
||||
log.Errorf("failed query in Unlock: %s", it.Err())
|
||||
}
|
||||
}
|
||||
|
||||
// getLockedNodes returns every nodes that are currently locked
|
||||
func getLockedNodes() *path.Path {
|
||||
return cayley.StartPath(store, "locked").In("locked")
|
||||
}
|
119
database/models.go
Normal file
119
database/models.go
Normal file
@ -0,0 +1,119 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/clair/utils/types"
|
||||
)
|
||||
|
||||
// ID is only meant to be used by database implementations and should never be used for anything else.
|
||||
type Model struct {
|
||||
ID int
|
||||
}
|
||||
|
||||
type Layer struct {
|
||||
Model
|
||||
|
||||
Name string
|
||||
EngineVersion int
|
||||
Parent *Layer
|
||||
Namespace *Namespace
|
||||
Features []FeatureVersion
|
||||
}
|
||||
|
||||
type Namespace struct {
|
||||
Model
|
||||
|
||||
Name string
|
||||
}
|
||||
|
||||
type Feature struct {
|
||||
Model
|
||||
|
||||
Name string
|
||||
Namespace Namespace
|
||||
}
|
||||
|
||||
type FeatureVersion struct {
|
||||
Model
|
||||
|
||||
Feature Feature
|
||||
Version types.Version
|
||||
AffectedBy []Vulnerability
|
||||
|
||||
// For output purposes. Only make sense when the feature version is in the context of an image.
|
||||
AddedBy Layer
|
||||
}
|
||||
|
||||
type Vulnerability struct {
|
||||
Model
|
||||
|
||||
Name string
|
||||
Namespace Namespace
|
||||
|
||||
Description string
|
||||
Link string
|
||||
Severity types.Priority
|
||||
|
||||
Metadata MetadataMap
|
||||
|
||||
FixedIn []FeatureVersion
|
||||
LayersIntroducingVulnerability []Layer
|
||||
|
||||
// For output purposes. Only make sense when the vulnerability
|
||||
// is already about a specific Feature/FeatureVersion.
|
||||
FixedBy types.Version `json:",omitempty"`
|
||||
}
|
||||
|
||||
type MetadataMap map[string]interface{}
|
||||
|
||||
func (mm *MetadataMap) Scan(value interface{}) error {
|
||||
val, ok := value.([]byte)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return json.Unmarshal(val, mm)
|
||||
}
|
||||
|
||||
func (mm *MetadataMap) Value() (driver.Value, error) {
|
||||
json, err := json.Marshal(*mm)
|
||||
return string(json), err
|
||||
}
|
||||
|
||||
type VulnerabilityNotification struct {
|
||||
Model
|
||||
|
||||
Name string
|
||||
|
||||
Created time.Time
|
||||
Notified time.Time
|
||||
Deleted time.Time
|
||||
|
||||
OldVulnerability *Vulnerability
|
||||
NewVulnerability *Vulnerability
|
||||
}
|
||||
|
||||
type VulnerabilityNotificationPageNumber struct {
|
||||
// -1 means that we reached the end already.
|
||||
OldVulnerability int
|
||||
NewVulnerability int
|
||||
}
|
||||
|
||||
var VulnerabilityNotificationFirstPage = VulnerabilityNotificationPageNumber{0, 0}
|
||||
var NoVulnerabilityNotificationPage = VulnerabilityNotificationPageNumber{-1, -1}
|
@ -15,7 +15,6 @@
|
||||
package database
|
||||
|
||||
// DebianReleasesMapping translates Debian code names and class names to version numbers
|
||||
// TODO That should probably be stored in the database or in a file
|
||||
var DebianReleasesMapping = map[string]string{
|
||||
// Code names
|
||||
"squeeze": "6",
|
||||
@ -32,7 +31,6 @@ var DebianReleasesMapping = map[string]string{
|
||||
}
|
||||
|
||||
// UbuntuReleasesMapping translates Ubuntu code names to version numbers
|
||||
// TODO That should probably be stored in the database or in a file
|
||||
var UbuntuReleasesMapping = map[string]string{
|
||||
"precise": "12.04",
|
||||
"quantal": "12.10",
|
||||
@ -40,6 +38,6 @@ var UbuntuReleasesMapping = map[string]string{
|
||||
"trusty": "14.04",
|
||||
"utopic": "14.10",
|
||||
"vivid": "15.04",
|
||||
"vivid/ubuntu-core": "15.04-core",
|
||||
"wily": "15.10",
|
||||
"xenial": "16.04",
|
||||
}
|
@ -1,409 +0,0 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
|
||||
"github.com/coreos/clair/utils"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
"github.com/google/cayley"
|
||||
"github.com/google/cayley/graph"
|
||||
"github.com/pborman/uuid"
|
||||
)
|
||||
|
||||
const (
|
||||
// maxNotifications is the number of notifications that InsertNotifications
|
||||
// will accept at the same time. Above this number, notifications are ignored.
|
||||
maxNotifications = 100
|
||||
|
||||
fieldNotificationIsValue = "notification"
|
||||
fieldNotificationType = "type"
|
||||
fieldNotificationData = "data"
|
||||
fieldNotificationIsSent = "isSent"
|
||||
)
|
||||
|
||||
// A Notification defines an interface to a message that can be sent by a
|
||||
// notifier.Notifier.
|
||||
// A NotificationWrapper has to be used to convert it into a NotificationWrap,
|
||||
// which can be stored in the database.
|
||||
type Notification interface {
|
||||
// GetName returns the explicit (humanly meaningful) name of a notification.
|
||||
GetName() string
|
||||
// GetType returns the type of a notification, which is used by a
|
||||
// NotificationWrapper to determine the concrete type of a Notification.
|
||||
GetType() string
|
||||
// GetContent returns the content of the notification.
|
||||
GetContent() (interface{}, error)
|
||||
}
|
||||
|
||||
// NotificationWrapper is an interface defined how to convert a Notification to
|
||||
// a NotificationWrap object and vice-versa.
|
||||
type NotificationWrapper interface {
|
||||
// Wrap packs a Notification instance into a new NotificationWrap.
|
||||
Wrap(n Notification) (*NotificationWrap, error)
|
||||
// Unwrap unpacks an instance of NotificationWrap into a new Notification.
|
||||
Unwrap(nw *NotificationWrap) (Notification, error)
|
||||
}
|
||||
|
||||
// A NotificationWrap wraps a Notification into something that can be stored in
|
||||
// the database. A NotificationWrapper has to be used to convert it into a
|
||||
// Notification.
|
||||
type NotificationWrap struct {
|
||||
Type string
|
||||
Data string
|
||||
}
|
||||
|
||||
// DefaultWrapper is an implementation of NotificationWrapper that supports
|
||||
// NewVulnerabilityNotification notifications.
|
||||
type DefaultWrapper struct{}
|
||||
|
||||
func (w *DefaultWrapper) Wrap(n Notification) (*NotificationWrap, error) {
|
||||
data, err := json.Marshal(n)
|
||||
if err != nil {
|
||||
log.Warningf("could not marshal notification [ID: %s, Type: %s]: %s", n.GetName(), n.GetType(), err)
|
||||
return nil, cerrors.NewBadRequestError("could not marshal notification with DefaultWrapper")
|
||||
}
|
||||
|
||||
return &NotificationWrap{Type: n.GetType(), Data: string(data)}, nil
|
||||
}
|
||||
|
||||
func (w *DefaultWrapper) Unwrap(nw *NotificationWrap) (Notification, error) {
|
||||
var v Notification
|
||||
|
||||
// Create struct depending on the type
|
||||
switch nw.Type {
|
||||
case "NewVulnerabilityNotification":
|
||||
v = &NewVulnerabilityNotification{}
|
||||
case "VulnerabilityPriorityIncreasedNotification":
|
||||
v = &VulnerabilityPriorityIncreasedNotification{}
|
||||
case "VulnerabilityPackageChangedNotification":
|
||||
v = &VulnerabilityPackageChangedNotification{}
|
||||
default:
|
||||
log.Warningf("could not unwrap notification [Type: %s]: unknown type for DefaultWrapper", nw.Type)
|
||||
return nil, cerrors.NewBadRequestError("could not unwrap notification")
|
||||
}
|
||||
|
||||
// Unmarshal notification
|
||||
err := json.Unmarshal([]byte(nw.Data), v)
|
||||
if err != nil {
|
||||
log.Warningf("could not unmarshal notification with DefaultWrapper [Type: %s]: %s", nw.Type, err)
|
||||
return nil, cerrors.NewBadRequestError("could not unmarshal notification")
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// GetDefaultNotificationWrapper returns the default wrapper
|
||||
func GetDefaultNotificationWrapper() NotificationWrapper {
|
||||
return &DefaultWrapper{}
|
||||
}
|
||||
|
||||
// A NewVulnerabilityNotification is a notification that informs about a new
|
||||
// vulnerability and contains all the layers that introduce that vulnerability
|
||||
type NewVulnerabilityNotification struct {
|
||||
VulnerabilityID string
|
||||
}
|
||||
|
||||
func (n *NewVulnerabilityNotification) GetName() string {
|
||||
return n.VulnerabilityID
|
||||
}
|
||||
|
||||
func (n *NewVulnerabilityNotification) GetType() string {
|
||||
return "NewVulnerabilityNotification"
|
||||
}
|
||||
|
||||
func (n *NewVulnerabilityNotification) GetContent() (interface{}, error) {
|
||||
// This notification is about a new vulnerability
|
||||
// Returns the list of layers that introduce this vulnerability
|
||||
|
||||
// Find vulnerability.
|
||||
vulnerability, err := FindOneVulnerability(n.VulnerabilityID, []string{FieldVulnerabilityID, FieldVulnerabilityLink, FieldVulnerabilityPriority, FieldVulnerabilityDescription, FieldVulnerabilityFixedIn})
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
abstractVulnerability, err := vulnerability.ToAbstractVulnerability()
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
|
||||
layers, err := FindAllLayersIntroducingVulnerability(n.VulnerabilityID, []string{FieldLayerID})
|
||||
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
|
||||
layersIDs := []string{} // empty slice, not null
|
||||
for _, l := range layers {
|
||||
layersIDs = append(layersIDs, l.ID)
|
||||
}
|
||||
|
||||
return struct {
|
||||
Vulnerability *AbstractVulnerability
|
||||
IntroducingLayersIDs []string
|
||||
}{
|
||||
Vulnerability: abstractVulnerability,
|
||||
IntroducingLayersIDs: layersIDs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// A VulnerabilityPriorityIncreasedNotification is a notification that informs
|
||||
// about the fact that the priority of a vulnerability increased
|
||||
// vulnerability and contains all the layers that introduce that vulnerability.
|
||||
type VulnerabilityPriorityIncreasedNotification struct {
|
||||
VulnerabilityID string
|
||||
OldPriority, NewPriority types.Priority
|
||||
}
|
||||
|
||||
func (n *VulnerabilityPriorityIncreasedNotification) GetName() string {
|
||||
return n.VulnerabilityID
|
||||
}
|
||||
|
||||
func (n *VulnerabilityPriorityIncreasedNotification) GetType() string {
|
||||
return "VulnerabilityPriorityIncreasedNotification"
|
||||
}
|
||||
|
||||
func (n *VulnerabilityPriorityIncreasedNotification) GetContent() (interface{}, error) {
|
||||
// Returns the list of layers that introduce this vulnerability
|
||||
// And both the old and new priorities
|
||||
|
||||
// Find vulnerability.
|
||||
vulnerability, err := FindOneVulnerability(n.VulnerabilityID, []string{FieldVulnerabilityID, FieldVulnerabilityLink, FieldVulnerabilityPriority, FieldVulnerabilityDescription, FieldVulnerabilityFixedIn})
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
abstractVulnerability, err := vulnerability.ToAbstractVulnerability()
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
|
||||
layers, err := FindAllLayersIntroducingVulnerability(n.VulnerabilityID, []string{FieldLayerID})
|
||||
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
|
||||
layersIDs := []string{} // empty slice, not null
|
||||
for _, l := range layers {
|
||||
layersIDs = append(layersIDs, l.ID)
|
||||
}
|
||||
|
||||
return struct {
|
||||
Vulnerability *AbstractVulnerability
|
||||
OldPriority, NewPriority types.Priority
|
||||
IntroducingLayersIDs []string
|
||||
}{
|
||||
Vulnerability: abstractVulnerability,
|
||||
OldPriority: n.OldPriority,
|
||||
NewPriority: n.NewPriority,
|
||||
IntroducingLayersIDs: layersIDs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// A VulnerabilityPackageChangedNotification is a notification that informs that
|
||||
// an existing vulnerability's fixed package list has been updated and may not
|
||||
// affect some layers anymore or may affect new layers.
|
||||
type VulnerabilityPackageChangedNotification struct {
|
||||
VulnerabilityID string
|
||||
AddedFixedInNodes, RemovedFixedInNodes []string
|
||||
}
|
||||
|
||||
func (n *VulnerabilityPackageChangedNotification) GetName() string {
|
||||
return n.VulnerabilityID
|
||||
}
|
||||
|
||||
func (n *VulnerabilityPackageChangedNotification) GetType() string {
|
||||
return "VulnerabilityPackageChangedNotification"
|
||||
}
|
||||
|
||||
func (n *VulnerabilityPackageChangedNotification) GetContent() (interface{}, error) {
|
||||
// Returns the removed and added packages as well as the layers that
|
||||
// introduced the vulnerability in the past but don't anymore because of the
|
||||
// removed packages and the layers that now introduce the vulnerability
|
||||
// because of the added packages
|
||||
|
||||
// Find vulnerability.
|
||||
vulnerability, err := FindOneVulnerability(n.VulnerabilityID, []string{FieldVulnerabilityID, FieldVulnerabilityLink, FieldVulnerabilityPriority, FieldVulnerabilityDescription, FieldVulnerabilityFixedIn})
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
abstractVulnerability, err := vulnerability.ToAbstractVulnerability()
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
|
||||
// First part of the answer : added/removed packages
|
||||
addedPackages, err := FindAllPackagesByNodes(n.AddedFixedInNodes, []string{FieldPackageOS, FieldPackageName, FieldPackageVersion, FieldPackagePreviousVersion})
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
removedPackages, err := FindAllPackagesByNodes(n.RemovedFixedInNodes, []string{FieldPackageOS, FieldPackageName, FieldPackageVersion, FieldPackagePreviousVersion})
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
|
||||
// Second part of the answer
|
||||
var addedPackagesPreviousVersions []string
|
||||
for _, pkg := range addedPackages {
|
||||
previousVersions, err := pkg.PreviousVersions([]string{})
|
||||
if err != nil {
|
||||
return []*Layer{}, err
|
||||
}
|
||||
for _, version := range previousVersions {
|
||||
addedPackagesPreviousVersions = append(addedPackagesPreviousVersions, version.Node)
|
||||
}
|
||||
}
|
||||
var removedPackagesPreviousVersions []string
|
||||
for _, pkg := range removedPackages {
|
||||
previousVersions, err := pkg.PreviousVersions([]string{})
|
||||
if err != nil {
|
||||
return []*Layer{}, err
|
||||
}
|
||||
for _, version := range previousVersions {
|
||||
removedPackagesPreviousVersions = append(removedPackagesPreviousVersions, version.Node)
|
||||
}
|
||||
}
|
||||
|
||||
newIntroducingLayers, err := FindAllLayersByAddedPackageNodes(addedPackagesPreviousVersions, []string{FieldLayerID})
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
formerIntroducingLayers, err := FindAllLayersByAddedPackageNodes(removedPackagesPreviousVersions, []string{FieldLayerID})
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
|
||||
newIntroducingLayersIDs := []string{} // empty slice, not null
|
||||
for _, l := range newIntroducingLayers {
|
||||
newIntroducingLayersIDs = append(newIntroducingLayersIDs, l.ID)
|
||||
}
|
||||
formerIntroducingLayersIDs := []string{} // empty slice, not null
|
||||
for _, l := range formerIntroducingLayers {
|
||||
formerIntroducingLayersIDs = append(formerIntroducingLayersIDs, l.ID)
|
||||
}
|
||||
|
||||
// Remove layers which appears both in new and former lists (eg. case of updated packages but still vulnerable)
|
||||
filteredNewIntroducingLayersIDs := utils.CompareStringLists(newIntroducingLayersIDs, formerIntroducingLayersIDs)
|
||||
filteredFormerIntroducingLayersIDs := utils.CompareStringLists(formerIntroducingLayersIDs, newIntroducingLayersIDs)
|
||||
|
||||
return struct {
|
||||
Vulnerability *AbstractVulnerability
|
||||
AddedAffectedPackages, RemovedAffectedPackages []*AbstractPackage
|
||||
NewIntroducingLayersIDs, FormerIntroducingLayerIDs []string
|
||||
}{
|
||||
Vulnerability: abstractVulnerability,
|
||||
AddedAffectedPackages: PackagesToAbstractPackages(addedPackages),
|
||||
RemovedAffectedPackages: PackagesToAbstractPackages(removedPackages),
|
||||
NewIntroducingLayersIDs: filteredNewIntroducingLayersIDs,
|
||||
FormerIntroducingLayerIDs: filteredFormerIntroducingLayersIDs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// InsertNotifications stores multiple Notification in the database
|
||||
// It uses the given NotificationWrapper to convert these notifications to
|
||||
// something that can be stored in the database.
|
||||
func InsertNotifications(notifications []Notification, wrapper NotificationWrapper) error {
|
||||
if len(notifications) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do not send notifications if there are too many of them (first update for example)
|
||||
if len(notifications) > maxNotifications {
|
||||
log.Noticef("Ignoring %d notifications", len(notifications))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Initialize transaction
|
||||
t := cayley.NewTransaction()
|
||||
|
||||
// Iterate over all the vulnerabilities we need to insert
|
||||
for _, notification := range notifications {
|
||||
// Wrap notification
|
||||
wrappedNotification, err := wrapper.Wrap(notification)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
node := fieldNotificationIsValue + ":" + uuid.New()
|
||||
t.AddQuad(cayley.Triple(node, fieldIs, fieldNotificationIsValue))
|
||||
t.AddQuad(cayley.Triple(node, fieldNotificationType, wrappedNotification.Type))
|
||||
t.AddQuad(cayley.Triple(node, fieldNotificationData, wrappedNotification.Data))
|
||||
t.AddQuad(cayley.Triple(node, fieldNotificationIsSent, strconv.FormatBool(false)))
|
||||
}
|
||||
|
||||
// Apply transaction
|
||||
if err := store.ApplyTransaction(t); err != nil {
|
||||
log.Errorf("failed transaction (InsertNotifications): %s", err)
|
||||
return ErrTransaction
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FindOneNotificationToSend finds and returns a notification that is not sent
|
||||
// yet and not locked. Returns nil if there is none.
|
||||
func FindOneNotificationToSend(wrapper NotificationWrapper) (string, Notification, error) {
|
||||
it, _ := cayley.StartPath(store, fieldNotificationIsValue).In(fieldIs).Has(fieldNotificationIsSent, strconv.FormatBool(false)).Except(getLockedNodes()).Save(fieldNotificationType, fieldNotificationType).Save(fieldNotificationData, fieldNotificationData).BuildIterator().Optimize()
|
||||
defer it.Close()
|
||||
for cayley.RawNext(it) {
|
||||
tags := make(map[string]graph.Value)
|
||||
it.TagResults(tags)
|
||||
|
||||
notification, err := wrapper.Unwrap(&NotificationWrap{Type: store.NameOf(tags[fieldNotificationType]), Data: store.NameOf(tags[fieldNotificationData])})
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
return store.NameOf(it.Result()), notification, nil
|
||||
}
|
||||
if it.Err() != nil {
|
||||
log.Errorf("failed query in FindOneNotificationToSend: %s", it.Err())
|
||||
return "", nil, ErrBackendException
|
||||
}
|
||||
|
||||
return "", nil, nil
|
||||
}
|
||||
|
||||
// CountNotificationsToSend returns the number of pending notifications
|
||||
// Note that it also count the locked notifications.
|
||||
func CountNotificationsToSend() (int, error) {
|
||||
c := 0
|
||||
|
||||
it, _ := cayley.StartPath(store, fieldNotificationIsValue).In(fieldIs).Has(fieldNotificationIsSent, strconv.FormatBool(false)).BuildIterator().Optimize()
|
||||
defer it.Close()
|
||||
for cayley.RawNext(it) {
|
||||
c = c + 1
|
||||
}
|
||||
if it.Err() != nil {
|
||||
log.Errorf("failed query in CountNotificationsToSend: %s", it.Err())
|
||||
return 0, ErrBackendException
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// MarkNotificationAsSent marks a notification as sent.
|
||||
func MarkNotificationAsSent(node string) {
|
||||
// Initialize transaction
|
||||
t := cayley.NewTransaction()
|
||||
|
||||
t.RemoveQuad(cayley.Triple(node, fieldNotificationIsSent, strconv.FormatBool(false)))
|
||||
t.AddQuad(cayley.Triple(node, fieldNotificationIsSent, strconv.FormatBool(true)))
|
||||
|
||||
// Apply transaction
|
||||
store.ApplyTransaction(t)
|
||||
}
|
@ -1,145 +0,0 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/clair/config"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type TestWrapper struct{}
|
||||
|
||||
func (w *TestWrapper) Wrap(n Notification) (*NotificationWrap, error) {
|
||||
data, err := json.Marshal(n)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &NotificationWrap{Type: n.GetType(), Data: string(data)}, nil
|
||||
}
|
||||
|
||||
func (w *TestWrapper) Unwrap(nw *NotificationWrap) (Notification, error) {
|
||||
var v Notification
|
||||
|
||||
switch nw.Type {
|
||||
case "ntest1":
|
||||
v = &NotificationTest1{}
|
||||
case "ntest2":
|
||||
v = &NotificationTest2{}
|
||||
default:
|
||||
return nil, fmt.Errorf("Could not Unwrap NotificationWrapper [Type: %s, Data: %s]: Unknown notification type.", nw.Type, nw.Data)
|
||||
}
|
||||
|
||||
err := json.Unmarshal([]byte(nw.Data), v)
|
||||
return v, err
|
||||
}
|
||||
|
||||
type NotificationTest1 struct {
|
||||
Test1 string
|
||||
}
|
||||
|
||||
func (n NotificationTest1) GetName() string {
|
||||
return n.Test1
|
||||
}
|
||||
|
||||
func (n NotificationTest1) GetType() string {
|
||||
return "ntest1"
|
||||
}
|
||||
|
||||
func (n NotificationTest1) GetContent() (interface{}, error) {
|
||||
return struct{ Test1 string }{Test1: n.Test1}, nil
|
||||
}
|
||||
|
||||
type NotificationTest2 struct {
|
||||
Test2 string
|
||||
}
|
||||
|
||||
func (n NotificationTest2) GetName() string {
|
||||
return n.Test2
|
||||
}
|
||||
|
||||
func (n NotificationTest2) GetType() string {
|
||||
return "ntest2"
|
||||
}
|
||||
|
||||
func (n NotificationTest2) GetContent() (interface{}, error) {
|
||||
return struct{ Test2 string }{Test2: n.Test2}, nil
|
||||
}
|
||||
|
||||
func TestNotification(t *testing.T) {
|
||||
Open(&config.DatabaseConfig{Type: "memstore"})
|
||||
defer Close()
|
||||
|
||||
wrapper := &TestWrapper{}
|
||||
|
||||
// Insert two notifications of different types
|
||||
n1 := &NotificationTest1{Test1: "test1"}
|
||||
n2 := &NotificationTest2{Test2: "test2"}
|
||||
err := InsertNotifications([]Notification{n1, n2}, &TestWrapper{})
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Count notifications to send
|
||||
c, err := CountNotificationsToSend()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 2, c)
|
||||
|
||||
foundN1 := false
|
||||
foundN2 := false
|
||||
|
||||
// Select the first one
|
||||
node, n, err := FindOneNotificationToSend(wrapper)
|
||||
assert.Nil(t, err)
|
||||
if assert.NotNil(t, n) {
|
||||
if reflect.DeepEqual(n1, n) {
|
||||
foundN1 = true
|
||||
} else if reflect.DeepEqual(n2, n) {
|
||||
foundN2 = true
|
||||
} else {
|
||||
assert.Fail(t, "did not find any expected notification")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Mark the first one as sent
|
||||
MarkNotificationAsSent(node)
|
||||
|
||||
// Count notifications to send
|
||||
c, err = CountNotificationsToSend()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 1, c)
|
||||
|
||||
// Select again
|
||||
node, n, err = FindOneNotificationToSend(wrapper)
|
||||
assert.Nil(t, err)
|
||||
if foundN1 {
|
||||
assert.Equal(t, n2, n)
|
||||
} else if foundN2 {
|
||||
assert.Equal(t, n1, n)
|
||||
}
|
||||
|
||||
// Lock the second one
|
||||
Lock(node, time.Minute, "TestNotification")
|
||||
|
||||
// Select again
|
||||
_, n, err = FindOneNotificationToSend(wrapper)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, nil, n)
|
||||
}
|
@ -1,448 +0,0 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/coreos/clair/utils"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
"github.com/google/cayley"
|
||||
"github.com/google/cayley/graph"
|
||||
"github.com/google/cayley/graph/path"
|
||||
)
|
||||
|
||||
const (
|
||||
FieldPackageOS = "os"
|
||||
FieldPackageName = "name"
|
||||
FieldPackageVersion = "version"
|
||||
FieldPackageNextVersion = "nextVersion"
|
||||
FieldPackagePreviousVersion = "previousVersion"
|
||||
|
||||
// This field is not selectable and is for internal use only.
|
||||
fieldPackageIsValue = "package"
|
||||
)
|
||||
|
||||
var FieldPackageAll = []string{FieldPackageOS, FieldPackageName, FieldPackageVersion, FieldPackageNextVersion, FieldPackagePreviousVersion}
|
||||
|
||||
// Package represents a package
|
||||
type Package struct {
|
||||
Node string `json:"-"`
|
||||
OS string
|
||||
Name string
|
||||
Version types.Version
|
||||
NextVersionNode string `json:"-"`
|
||||
PreviousVersionNode string `json:"-"`
|
||||
}
|
||||
|
||||
// GetNode returns an unique identifier for the graph node
|
||||
// Requires the key fields: OS, Name, Version
|
||||
func (p *Package) GetNode() string {
|
||||
return fieldPackageIsValue + ":" + utils.Hash(p.Key())
|
||||
}
|
||||
|
||||
// Key returns an unique string defining p
|
||||
// Requires the key fields: OS, Name, Version
|
||||
func (p *Package) Key() string {
|
||||
return p.OS + ":" + p.Name + ":" + p.Version.String()
|
||||
}
|
||||
|
||||
// Branch returns an unique string defined the Branch of p (os, name)
|
||||
// Requires the key fields: OS, Name
|
||||
func (p *Package) Branch() string {
|
||||
return p.OS + ":" + p.Name
|
||||
}
|
||||
|
||||
// AbstractPackage is a package that abstract types.MaxVersion by modifying
|
||||
// using a AllVersion boolean field and renaming Version to BeforeVersion
|
||||
// which makes more sense for an usage with a Vulnerability
|
||||
type AbstractPackage struct {
|
||||
OS string
|
||||
Name string
|
||||
|
||||
AllVersions bool
|
||||
BeforeVersion types.Version
|
||||
}
|
||||
|
||||
// PackagesToAbstractPackages converts several Packages to AbstractPackages
|
||||
func PackagesToAbstractPackages(packages []*Package) (abstractPackages []*AbstractPackage) {
|
||||
for _, p := range packages {
|
||||
ap := &AbstractPackage{OS: p.OS, Name: p.Name}
|
||||
if p.Version != types.MaxVersion {
|
||||
ap.BeforeVersion = p.Version
|
||||
} else {
|
||||
ap.AllVersions = true
|
||||
}
|
||||
abstractPackages = append(abstractPackages, ap)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AbstractPackagesToPackages converts several AbstractPackages to Packages
|
||||
func AbstractPackagesToPackages(abstractPackages []*AbstractPackage) (packages []*Package) {
|
||||
for _, ap := range abstractPackages {
|
||||
p := &Package{OS: ap.OS, Name: ap.Name}
|
||||
if ap.AllVersions {
|
||||
p.Version = types.MaxVersion
|
||||
} else {
|
||||
p.Version = ap.BeforeVersion
|
||||
}
|
||||
packages = append(packages, p)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// InsertPackages inserts several packages in the database in one transaction
|
||||
// Packages are stored in linked lists, one per Branch. Each linked list has a start package and an end package defined with types.MinVersion/types.MaxVersion versions
|
||||
//
|
||||
// OS, Name and Version fields have to be specified.
|
||||
// If the insertion is successfull, the Node field is filled and represents the graph node identifier.
|
||||
func InsertPackages(packageParameters []*Package) error {
|
||||
if len(packageParameters) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify parameters
|
||||
for _, pkg := range packageParameters {
|
||||
if pkg.OS == "" || pkg.Name == "" || pkg.Version.String() == "" {
|
||||
log.Warningf("could not insert an incomplete package [OS: %s, Name: %s, Version: %s]", pkg.OS, pkg.Name, pkg.Version)
|
||||
return cerrors.NewBadRequestError("could not insert an incomplete package")
|
||||
}
|
||||
}
|
||||
|
||||
// Iterate over all the packages we need to insert
|
||||
for _, packageParameter := range packageParameters {
|
||||
t := cayley.NewTransaction()
|
||||
|
||||
// Is the package already existing ?
|
||||
pkg, err := FindOnePackage(packageParameter.OS, packageParameter.Name, packageParameter.Version, []string{})
|
||||
if err != nil && err != cerrors.ErrNotFound {
|
||||
return err
|
||||
}
|
||||
if pkg != nil {
|
||||
packageParameter.Node = pkg.Node
|
||||
continue
|
||||
}
|
||||
|
||||
// Get all packages of the same branch (both from local cache and database)
|
||||
branchPackages, err := FindAllPackagesByBranch(packageParameter.OS, packageParameter.Name, []string{FieldPackageOS, FieldPackageName, FieldPackageVersion, FieldPackageNextVersion})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(branchPackages) == 0 {
|
||||
// The branch does not exist yet
|
||||
insertingStartPackage := packageParameter.Version == types.MinVersion
|
||||
insertingEndPackage := packageParameter.Version == types.MaxVersion
|
||||
|
||||
// Create and insert a end package
|
||||
endPackage := &Package{
|
||||
OS: packageParameter.OS,
|
||||
Name: packageParameter.Name,
|
||||
Version: types.MaxVersion,
|
||||
}
|
||||
endPackage.Node = endPackage.GetNode()
|
||||
|
||||
t.AddQuad(cayley.Triple(endPackage.Node, fieldIs, fieldPackageIsValue))
|
||||
t.AddQuad(cayley.Triple(endPackage.Node, FieldPackageOS, endPackage.OS))
|
||||
t.AddQuad(cayley.Triple(endPackage.Node, FieldPackageName, endPackage.Name))
|
||||
t.AddQuad(cayley.Triple(endPackage.Node, FieldPackageVersion, endPackage.Version.String()))
|
||||
t.AddQuad(cayley.Triple(endPackage.Node, FieldPackageNextVersion, ""))
|
||||
|
||||
// Create the inserted package if it is different than a start/end package
|
||||
var newPackage *Package
|
||||
if !insertingStartPackage && !insertingEndPackage {
|
||||
newPackage = &Package{
|
||||
OS: packageParameter.OS,
|
||||
Name: packageParameter.Name,
|
||||
Version: packageParameter.Version,
|
||||
}
|
||||
newPackage.Node = newPackage.GetNode()
|
||||
|
||||
t.AddQuad(cayley.Triple(newPackage.Node, fieldIs, fieldPackageIsValue))
|
||||
t.AddQuad(cayley.Triple(newPackage.Node, FieldPackageOS, newPackage.OS))
|
||||
t.AddQuad(cayley.Triple(newPackage.Node, FieldPackageName, newPackage.Name))
|
||||
t.AddQuad(cayley.Triple(newPackage.Node, FieldPackageVersion, newPackage.Version.String()))
|
||||
t.AddQuad(cayley.Triple(newPackage.Node, FieldPackageNextVersion, endPackage.Node))
|
||||
|
||||
packageParameter.Node = newPackage.Node
|
||||
}
|
||||
|
||||
// Create and insert a start package
|
||||
startPackage := &Package{
|
||||
OS: packageParameter.OS,
|
||||
Name: packageParameter.Name,
|
||||
Version: types.MinVersion,
|
||||
}
|
||||
startPackage.Node = startPackage.GetNode()
|
||||
|
||||
t.AddQuad(cayley.Triple(startPackage.Node, fieldIs, fieldPackageIsValue))
|
||||
t.AddQuad(cayley.Triple(startPackage.Node, FieldPackageOS, startPackage.OS))
|
||||
t.AddQuad(cayley.Triple(startPackage.Node, FieldPackageName, startPackage.Name))
|
||||
t.AddQuad(cayley.Triple(startPackage.Node, FieldPackageVersion, startPackage.Version.String()))
|
||||
if !insertingStartPackage && !insertingEndPackage {
|
||||
t.AddQuad(cayley.Triple(startPackage.Node, FieldPackageNextVersion, newPackage.Node))
|
||||
} else {
|
||||
t.AddQuad(cayley.Triple(startPackage.Node, FieldPackageNextVersion, endPackage.Node))
|
||||
}
|
||||
|
||||
// Set package node
|
||||
if insertingEndPackage {
|
||||
packageParameter.Node = endPackage.Node
|
||||
} else if insertingStartPackage {
|
||||
packageParameter.Node = startPackage.Node
|
||||
}
|
||||
} else {
|
||||
// The branch already exists
|
||||
|
||||
// Create the package
|
||||
newPackage := &Package{OS: packageParameter.OS, Name: packageParameter.Name, Version: packageParameter.Version}
|
||||
newPackage.Node = "package:" + utils.Hash(newPackage.Key())
|
||||
packageParameter.Node = newPackage.Node
|
||||
|
||||
t.AddQuad(cayley.Triple(newPackage.Node, fieldIs, fieldPackageIsValue))
|
||||
t.AddQuad(cayley.Triple(newPackage.Node, FieldPackageOS, newPackage.OS))
|
||||
t.AddQuad(cayley.Triple(newPackage.Node, FieldPackageName, newPackage.Name))
|
||||
t.AddQuad(cayley.Triple(newPackage.Node, FieldPackageVersion, newPackage.Version.String()))
|
||||
|
||||
// Sort branchPackages by version (including the new package)
|
||||
branchPackages = append(branchPackages, newPackage)
|
||||
sort.Sort(ByVersion(branchPackages))
|
||||
|
||||
// Find my prec/succ GraphID in the sorted slice now
|
||||
newPackageKey := newPackage.Key()
|
||||
var pred, succ *Package
|
||||
var found bool
|
||||
for _, p := range branchPackages {
|
||||
equal := p.Key() == newPackageKey
|
||||
if !equal && !found {
|
||||
pred = p
|
||||
} else if found {
|
||||
succ = p
|
||||
break
|
||||
} else if equal {
|
||||
found = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
if pred == nil || succ == nil {
|
||||
log.Warningf("could not find any package predecessor/successor of: [OS: %s, Name: %s, Version: %s].", packageParameter.OS, packageParameter.Name, packageParameter.Version)
|
||||
return cerrors.NewBadRequestError("could not find package predecessor/successor")
|
||||
}
|
||||
|
||||
// Link the new packages with the branch
|
||||
t.RemoveQuad(cayley.Triple(pred.Node, FieldPackageNextVersion, succ.Node))
|
||||
|
||||
pred.NextVersionNode = newPackage.Node
|
||||
t.AddQuad(cayley.Triple(pred.Node, FieldPackageNextVersion, newPackage.Node))
|
||||
|
||||
newPackage.NextVersionNode = succ.Node
|
||||
t.AddQuad(cayley.Triple(newPackage.Node, FieldPackageNextVersion, succ.Node))
|
||||
}
|
||||
|
||||
// Apply transaction
|
||||
if err := store.ApplyTransaction(t); err != nil {
|
||||
log.Errorf("failed transaction (InsertPackages): %s", err)
|
||||
return ErrTransaction
|
||||
}
|
||||
}
|
||||
|
||||
// Return
|
||||
return nil
|
||||
}
|
||||
|
||||
// FindOnePackage finds and returns a single package having the given OS, name and version, selecting the specified fields
|
||||
func FindOnePackage(OS, name string, version types.Version, selectedFields []string) (*Package, error) {
|
||||
packageParameter := Package{OS: OS, Name: name, Version: version}
|
||||
p, err := toPackages(cayley.StartPath(store, packageParameter.GetNode()).Has(fieldIs, fieldPackageIsValue), selectedFields)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(p) == 1 {
|
||||
return p[0], nil
|
||||
}
|
||||
if len(p) > 1 {
|
||||
log.Errorf("found multiple packages with identical data [OS: %s, Name: %s, Version: %s]", OS, name, version)
|
||||
return nil, ErrInconsistent
|
||||
}
|
||||
return nil, cerrors.ErrNotFound
|
||||
}
|
||||
|
||||
// FindAllPackagesByNodes finds and returns all packages given by their nodes, selecting the specified fields
|
||||
func FindAllPackagesByNodes(nodes []string, selectedFields []string) ([]*Package, error) {
|
||||
if len(nodes) == 0 {
|
||||
return []*Package{}, nil
|
||||
}
|
||||
|
||||
return toPackages(cayley.StartPath(store, nodes...).Has(fieldIs, fieldPackageIsValue), selectedFields)
|
||||
}
|
||||
|
||||
// FindAllPackagesByBranch finds and returns all packages that belong to the given Branch, selecting the specified fields
|
||||
func FindAllPackagesByBranch(OS, name string, selectedFields []string) ([]*Package, error) {
|
||||
return toPackages(cayley.StartPath(store, name).In(FieldPackageName).Has(FieldPackageOS, OS), selectedFields)
|
||||
}
|
||||
|
||||
// toPackages converts a path leading to one or multiple packages to Package structs, selecting the specified fields
|
||||
func toPackages(path *path.Path, selectedFields []string) ([]*Package, error) {
|
||||
var packages []*Package
|
||||
var err error
|
||||
|
||||
saveFields(path, selectedFields, []string{FieldPackagePreviousVersion})
|
||||
it, _ := path.BuildIterator().Optimize()
|
||||
defer it.Close()
|
||||
for cayley.RawNext(it) {
|
||||
tags := make(map[string]graph.Value)
|
||||
it.TagResults(tags)
|
||||
|
||||
pkg := Package{Node: store.NameOf(it.Result())}
|
||||
for _, selectedField := range selectedFields {
|
||||
switch selectedField {
|
||||
case FieldPackageOS:
|
||||
pkg.OS = store.NameOf(tags[FieldPackageOS])
|
||||
case FieldPackageName:
|
||||
pkg.Name = store.NameOf(tags[FieldPackageName])
|
||||
case FieldPackageVersion:
|
||||
pkg.Version, err = types.NewVersion(store.NameOf(tags[FieldPackageVersion]))
|
||||
if err != nil {
|
||||
log.Warningf("could not parse version of package %s: %s", pkg.Node, err.Error())
|
||||
}
|
||||
case FieldPackageNextVersion:
|
||||
pkg.NextVersionNode = store.NameOf(tags[FieldPackageNextVersion])
|
||||
case FieldPackagePreviousVersion:
|
||||
pkg.PreviousVersionNode, err = toValue(cayley.StartPath(store, pkg.Node).In(FieldPackageNextVersion))
|
||||
if err != nil {
|
||||
log.Warningf("could not get previousVersion on package %s: %s.", pkg.Node, err.Error())
|
||||
return []*Package{}, ErrInconsistent
|
||||
}
|
||||
default:
|
||||
panic("unknown selectedField")
|
||||
}
|
||||
}
|
||||
packages = append(packages, &pkg)
|
||||
}
|
||||
if it.Err() != nil {
|
||||
log.Errorf("failed query in toPackages: %s", it.Err())
|
||||
return []*Package{}, ErrBackendException
|
||||
}
|
||||
|
||||
return packages, nil
|
||||
}
|
||||
|
||||
// NextVersion find and returns the package of the same branch that has a higher version number, selecting the specified fields
|
||||
// It requires that FieldPackageNextVersion field has been selected on p
|
||||
func (p *Package) NextVersion(selectedFields []string) (*Package, error) {
|
||||
if p.NextVersionNode == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
v, err := FindAllPackagesByNodes([]string{p.NextVersionNode}, selectedFields)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(v) != 1 {
|
||||
log.Errorf("found multiple packages when getting next version of package %s", p.Node)
|
||||
return nil, ErrInconsistent
|
||||
}
|
||||
return v[0], nil
|
||||
}
|
||||
|
||||
// NextVersions find and returns all the packages of the same branch that have
|
||||
// a higher version number, selecting the specified fields
|
||||
// It requires that FieldPackageNextVersion field has been selected on p
|
||||
// The immediate higher version is listed first, and the special end-of-Branch package is last, p is not listed
|
||||
func (p *Package) NextVersions(selectedFields []string) ([]*Package, error) {
|
||||
var nextVersions []*Package
|
||||
|
||||
if !utils.Contains(FieldPackageNextVersion, selectedFields) {
|
||||
selectedFields = append(selectedFields, FieldPackageNextVersion)
|
||||
}
|
||||
|
||||
nextVersion, err := p.NextVersion(selectedFields)
|
||||
if err != nil {
|
||||
return []*Package{}, err
|
||||
}
|
||||
if nextVersion != nil {
|
||||
nextVersions = append(nextVersions, nextVersion)
|
||||
|
||||
nextNextVersions, err := nextVersion.NextVersions(selectedFields)
|
||||
if err != nil {
|
||||
return []*Package{}, err
|
||||
}
|
||||
nextVersions = append(nextVersions, nextNextVersions...)
|
||||
}
|
||||
|
||||
return nextVersions, nil
|
||||
}
|
||||
|
||||
// PreviousVersion find and returns the package of the same branch that has an
|
||||
// immediate lower version number, selecting the specified fields
|
||||
// It requires that FieldPackagePreviousVersion field has been selected on p
|
||||
func (p *Package) PreviousVersion(selectedFields []string) (*Package, error) {
|
||||
if p.PreviousVersionNode == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
v, err := FindAllPackagesByNodes([]string{p.PreviousVersionNode}, selectedFields)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(v) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if len(v) != 1 {
|
||||
log.Errorf("found multiple packages when getting previous version of package %s", p.Node)
|
||||
return nil, ErrInconsistent
|
||||
}
|
||||
return v[0], nil
|
||||
}
|
||||
|
||||
// PreviousVersions find and returns all the packages of the same branch that
|
||||
// have a lower version number, selecting the specified fields
|
||||
// It requires that FieldPackageNextVersion field has been selected on p
|
||||
// The immediate lower version is listed first, and the special start-of-Branch
|
||||
// package is last, p is not listed
|
||||
func (p *Package) PreviousVersions(selectedFields []string) ([]*Package, error) {
|
||||
var previousVersions []*Package
|
||||
|
||||
if !utils.Contains(FieldPackagePreviousVersion, selectedFields) {
|
||||
selectedFields = append(selectedFields, FieldPackagePreviousVersion)
|
||||
}
|
||||
|
||||
previousVersion, err := p.PreviousVersion(selectedFields)
|
||||
if err != nil {
|
||||
return []*Package{}, err
|
||||
}
|
||||
if previousVersion != nil {
|
||||
previousVersions = append(previousVersions, previousVersion)
|
||||
|
||||
previousPreviousVersions, err := previousVersion.PreviousVersions(selectedFields)
|
||||
if err != nil {
|
||||
return []*Package{}, err
|
||||
}
|
||||
previousVersions = append(previousVersions, previousPreviousVersions...)
|
||||
}
|
||||
|
||||
return previousVersions, nil
|
||||
}
|
||||
|
||||
// ByVersion implements sort.Interface for []*Package based on the Version field
|
||||
// It uses github.com/quentin-m/dpkgcomp internally and makes use of types.MinVersion/types.MaxVersion
|
||||
type ByVersion []*Package
|
||||
|
||||
func (p ByVersion) Len() int { return len(p) }
|
||||
func (p ByVersion) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
func (p ByVersion) Less(i, j int) bool { return p[i].Version.Compare(p[j].Version) < 0 }
|
@ -1,194 +0,0 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/clair/config"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestPackage(t *testing.T) {
|
||||
Open(&config.DatabaseConfig{Type: "memstore"})
|
||||
defer Close()
|
||||
|
||||
// Try to insert invalid packages
|
||||
for _, invalidPkg := range []*Package{
|
||||
&Package{OS: "", Name: "testpkg1", Version: types.NewVersionUnsafe("1.0")},
|
||||
&Package{OS: "testOS", Name: "", Version: types.NewVersionUnsafe("1.0")},
|
||||
&Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("")},
|
||||
&Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("bad version")},
|
||||
&Package{OS: "", Name: "", Version: types.NewVersionUnsafe("")},
|
||||
} {
|
||||
err := InsertPackages([]*Package{invalidPkg})
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// Insert a package
|
||||
pkg1 := &Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("1.0")}
|
||||
err := InsertPackages([]*Package{pkg1})
|
||||
if assert.Nil(t, err) {
|
||||
// Find the inserted package and verify its content
|
||||
pkg1b, err := FindOnePackage(pkg1.OS, pkg1.Name, pkg1.Version, FieldPackageAll)
|
||||
if assert.Nil(t, err) && assert.NotNil(t, pkg1b) {
|
||||
assert.Equal(t, pkg1.Node, pkg1b.Node)
|
||||
assert.Equal(t, pkg1.OS, pkg1b.OS)
|
||||
assert.Equal(t, pkg1.Name, pkg1b.Name)
|
||||
assert.Equal(t, pkg1.Version, pkg1b.Version)
|
||||
}
|
||||
|
||||
// Find packages from the inserted branch and verify their content
|
||||
// (the first one should be a start package, the second one the inserted one and the third one the end package)
|
||||
pkgs1c, err := FindAllPackagesByBranch(pkg1.OS, pkg1.Name, FieldPackageAll)
|
||||
if assert.Nil(t, err) && assert.Equal(t, 3, len(pkgs1c)) {
|
||||
sort.Sort(ByVersion(pkgs1c))
|
||||
|
||||
assert.Equal(t, pkg1.OS, pkgs1c[0].OS)
|
||||
assert.Equal(t, pkg1.Name, pkgs1c[0].Name)
|
||||
assert.Equal(t, types.MinVersion, pkgs1c[0].Version)
|
||||
|
||||
assert.Equal(t, pkg1.OS, pkgs1c[1].OS)
|
||||
assert.Equal(t, pkg1.Name, pkgs1c[1].Name)
|
||||
assert.Equal(t, pkg1.Version, pkgs1c[1].Version)
|
||||
|
||||
assert.Equal(t, pkg1.OS, pkgs1c[2].OS)
|
||||
assert.Equal(t, pkg1.Name, pkgs1c[2].Name)
|
||||
assert.Equal(t, types.MaxVersion, pkgs1c[2].Version)
|
||||
}
|
||||
}
|
||||
|
||||
// Insert multiple packages in the same branch, one in another branch, insert local duplicates and database duplicates as well
|
||||
pkg2 := []*Package{
|
||||
&Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("0.8")},
|
||||
&Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("0.9")},
|
||||
&Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("1.0")}, // Already present in the database
|
||||
&Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("1.1")},
|
||||
&Package{OS: "testOS", Name: "testpkg2", Version: types.NewVersionUnsafe("1.0")}, // Another branch
|
||||
&Package{OS: "testOS", Name: "testpkg2", Version: types.NewVersionUnsafe("1.0")}, // Local duplicates
|
||||
}
|
||||
nbInSameBranch := 4 + 2 // (start/end packages)
|
||||
|
||||
err = InsertPackages(shuffle(pkg2))
|
||||
if assert.Nil(t, err) {
|
||||
// Find packages from the inserted branch, verify their order and NextVersion / PreviousVersion
|
||||
pkgs2b, err := FindAllPackagesByBranch("testOS", "testpkg1", FieldPackageAll)
|
||||
if assert.Nil(t, err) && assert.Equal(t, nbInSameBranch, len(pkgs2b)) {
|
||||
sort.Sort(ByVersion(pkgs2b))
|
||||
|
||||
for i := 0; i < nbInSameBranch; i = i + 1 {
|
||||
if i == 0 {
|
||||
assert.Equal(t, types.MinVersion, pkgs2b[0].Version)
|
||||
} else if i < nbInSameBranch-2 {
|
||||
assert.Equal(t, pkg2[i].Version, pkgs2b[i+1].Version)
|
||||
|
||||
nv, err := pkgs2b[i+1].NextVersion(FieldPackageAll)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, pkgs2b[i+2], nv)
|
||||
|
||||
if i > 0 {
|
||||
pv, err := pkgs2b[i].PreviousVersion(FieldPackageAll)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, pkgs2b[i-1], pv)
|
||||
} else {
|
||||
pv, err := pkgs2b[i].PreviousVersion(FieldPackageAll)
|
||||
assert.Nil(t, err)
|
||||
assert.Nil(t, pv)
|
||||
}
|
||||
} else {
|
||||
assert.Equal(t, types.MaxVersion, pkgs2b[nbInSameBranch-1].Version)
|
||||
|
||||
nv, err := pkgs2b[nbInSameBranch-1].NextVersion(FieldPackageAll)
|
||||
assert.Nil(t, err)
|
||||
assert.Nil(t, nv)
|
||||
|
||||
pv, err := pkgs2b[i].PreviousVersion(FieldPackageAll)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, pkgs2b[i-1], pv)
|
||||
}
|
||||
}
|
||||
|
||||
// NextVersions
|
||||
nv, err := pkgs2b[0].NextVersions(FieldPackageAll)
|
||||
if assert.Nil(t, err) && assert.Len(t, nv, nbInSameBranch-1) {
|
||||
for i := 0; i < nbInSameBranch-1; i = i + 1 {
|
||||
if i < nbInSameBranch-2 {
|
||||
assert.Equal(t, pkg2[i].Version, nv[i].Version)
|
||||
} else {
|
||||
assert.Equal(t, types.MaxVersion, nv[i].Version)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PreviousVersions
|
||||
pv, err := pkgs2b[nbInSameBranch-1].PreviousVersions(FieldPackageAll)
|
||||
if assert.Nil(t, err) && assert.Len(t, pv, nbInSameBranch-1) {
|
||||
for i := 0; i < len(pv); i = i + 1 {
|
||||
assert.Equal(t, pkgs2b[len(pkgs2b)-i-2], pv[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Verify that the one we added which was already present in the database has the same node value (meaning that we just fetched it actually)
|
||||
assert.Contains(t, pkg2, pkg1)
|
||||
}
|
||||
|
||||
// Insert duplicated latest packages directly, ensure only one is actually inserted. Then insert another package in the branch and ensure that its next version is the latest one
|
||||
pkg3a := &Package{OS: "testOS", Name: "testpkg3", Version: types.MaxVersion}
|
||||
pkg3b := &Package{OS: "testOS", Name: "testpkg3", Version: types.MaxVersion}
|
||||
pkg3c := &Package{OS: "testOS", Name: "testpkg3", Version: types.MaxVersion}
|
||||
err1 := InsertPackages([]*Package{pkg3a, pkg3b})
|
||||
err2 := InsertPackages([]*Package{pkg3c})
|
||||
if assert.Nil(t, err1) && assert.Nil(t, err2) {
|
||||
assert.Equal(t, pkg3a, pkg3b)
|
||||
assert.Equal(t, pkg3b, pkg3c)
|
||||
}
|
||||
pkg4 := Package{OS: "testOS", Name: "testpkg3", Version: types.NewVersionUnsafe("1.0")}
|
||||
InsertPackages([]*Package{&pkg4})
|
||||
pkgs34, _ := FindAllPackagesByBranch("testOS", "testpkg3", FieldPackageAll)
|
||||
if assert.Len(t, pkgs34, 3) {
|
||||
sort.Sort(ByVersion(pkgs34))
|
||||
assert.Equal(t, pkg4.Node, pkgs34[1].Node)
|
||||
assert.Equal(t, pkg3a.Node, pkgs34[2].Node)
|
||||
assert.Equal(t, pkg3a.Node, pkgs34[1].NextVersionNode)
|
||||
}
|
||||
|
||||
// Insert two identical packages but with "different" versions
|
||||
// The second version should be simplified to the first one
|
||||
// Therefore, we should just have three packages (the inserted one and the start/end packages of the branch)
|
||||
InsertPackages([]*Package{&Package{OS: "testOS", Name: "testdirtypkg", Version: types.NewVersionUnsafe("0.1")}})
|
||||
InsertPackages([]*Package{&Package{OS: "testOS", Name: "testdirtypkg", Version: types.NewVersionUnsafe("0:0.1")}})
|
||||
dirtypkgs, err := FindAllPackagesByBranch("testOS", "testdirtypkg", FieldPackageAll)
|
||||
assert.Nil(t, err)
|
||||
assert.Len(t, dirtypkgs, 3)
|
||||
}
|
||||
|
||||
func shuffle(packageParameters []*Package) []*Package {
|
||||
rand.Seed(int64(time.Now().Nanosecond()))
|
||||
|
||||
sPackage := make([]*Package, len(packageParameters))
|
||||
copy(sPackage, packageParameters)
|
||||
|
||||
for i := len(sPackage) - 1; i > 0; i-- {
|
||||
j := rand.Intn(i)
|
||||
sPackage[i], sPackage[j] = sPackage[j], sPackage[i]
|
||||
}
|
||||
|
||||
return sPackage
|
||||
}
|
158
database/pgsql/complex_test.go
Normal file
158
database/pgsql/complex_test.go
Normal file
@ -0,0 +1,158 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/utils"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
"github.com/pborman/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const (
|
||||
numVulnerabilities = 100
|
||||
numFeatureVersions = 100
|
||||
)
|
||||
|
||||
func TestRaceAffects(t *testing.T) {
|
||||
datastore, err := OpenForTest("RaceAffects", false)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer datastore.Close()
|
||||
|
||||
// Insert the Feature on which we'll work.
|
||||
feature := database.Feature{
|
||||
Namespace: database.Namespace{Name: "TestRaceAffectsFeatureNamespace1"},
|
||||
Name: "TestRaceAffecturesFeature1",
|
||||
}
|
||||
_, err = datastore.insertFeature(feature)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Initialize random generator and enforce max procs.
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
|
||||
// Generate FeatureVersions.
|
||||
featureVersions := make([]database.FeatureVersion, numFeatureVersions)
|
||||
for i := 0; i < numFeatureVersions; i++ {
|
||||
version := rand.Intn(numFeatureVersions)
|
||||
|
||||
featureVersions[i] = database.FeatureVersion{
|
||||
Feature: feature,
|
||||
Version: types.NewVersionUnsafe(strconv.Itoa(version)),
|
||||
}
|
||||
}
|
||||
|
||||
// Generate vulnerabilities.
|
||||
// They are mapped by fixed version, which will make verification really easy afterwards.
|
||||
vulnerabilities := make(map[int][]database.Vulnerability)
|
||||
for i := 0; i < numVulnerabilities; i++ {
|
||||
version := rand.Intn(numFeatureVersions) + 1
|
||||
|
||||
// if _, ok := vulnerabilities[version]; !ok {
|
||||
// vulnerabilities[version] = make([]database.Vulnerability)
|
||||
// }
|
||||
|
||||
vulnerability := database.Vulnerability{
|
||||
Name: uuid.New(),
|
||||
Namespace: feature.Namespace,
|
||||
FixedIn: []database.FeatureVersion{
|
||||
database.FeatureVersion{
|
||||
Feature: feature,
|
||||
Version: types.NewVersionUnsafe(strconv.Itoa(version)),
|
||||
},
|
||||
},
|
||||
Severity: types.Unknown,
|
||||
}
|
||||
|
||||
vulnerabilities[version] = append(vulnerabilities[version], vulnerability)
|
||||
}
|
||||
|
||||
// Insert featureversions and vulnerabilities in parallel.
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for _, vulnerabilitiesM := range vulnerabilities {
|
||||
for _, vulnerability := range vulnerabilitiesM {
|
||||
err = datastore.InsertVulnerabilities([]database.Vulnerability{vulnerability}, true)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
}
|
||||
fmt.Println("finished to insert vulnerabilities")
|
||||
}()
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for i := 0; i < len(featureVersions); i++ {
|
||||
featureVersions[i].ID, err = datastore.insertFeatureVersion(featureVersions[i])
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
fmt.Println("finished to insert featureVersions")
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// Verify consistency now.
|
||||
var actualAffectedNames []string
|
||||
var expectedAffectedNames []string
|
||||
|
||||
for _, featureVersion := range featureVersions {
|
||||
featureVersionVersion, _ := strconv.Atoi(featureVersion.Version.String())
|
||||
|
||||
// Get actual affects.
|
||||
rows, err := datastore.Query(searchComplexTestFeatureVersionAffects,
|
||||
featureVersion.ID)
|
||||
assert.Nil(t, err)
|
||||
defer rows.Close()
|
||||
|
||||
var vulnName string
|
||||
for rows.Next() {
|
||||
err = rows.Scan(&vulnName)
|
||||
if !assert.Nil(t, err) {
|
||||
continue
|
||||
}
|
||||
actualAffectedNames = append(actualAffectedNames, vulnName)
|
||||
}
|
||||
if assert.Nil(t, rows.Err()) {
|
||||
rows.Close()
|
||||
}
|
||||
|
||||
// Get expected affects.
|
||||
for i := numVulnerabilities; i > featureVersionVersion; i-- {
|
||||
for _, vulnerability := range vulnerabilities[i] {
|
||||
expectedAffectedNames = append(expectedAffectedNames, vulnerability.Name)
|
||||
}
|
||||
}
|
||||
|
||||
assert.Len(t, utils.CompareStringLists(expectedAffectedNames, actualAffectedNames), 0)
|
||||
assert.Len(t, utils.CompareStringLists(actualAffectedNames, expectedAffectedNames), 0)
|
||||
}
|
||||
}
|
223
database/pgsql/feature.go
Normal file
223
database/pgsql/feature.go
Normal file
@ -0,0 +1,223 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
)
|
||||
|
||||
func (pgSQL *pgSQL) insertFeature(feature database.Feature) (int, error) {
|
||||
if feature.Name == "" {
|
||||
return 0, cerrors.NewBadRequestError("could not find/insert invalid Feature")
|
||||
}
|
||||
|
||||
// Do cache lookup.
|
||||
if pgSQL.cache != nil {
|
||||
promCacheQueriesTotal.WithLabelValues("feature").Inc()
|
||||
id, found := pgSQL.cache.Get("feature:" + feature.Namespace.Name + ":" + feature.Name)
|
||||
if found {
|
||||
promCacheHitsTotal.WithLabelValues("feature").Inc()
|
||||
return id.(int), nil
|
||||
}
|
||||
}
|
||||
|
||||
// We do `defer observeQueryTime` here because we don't want to observe cached features.
|
||||
defer observeQueryTime("insertFeature", "all", time.Now())
|
||||
|
||||
// Find or create Namespace.
|
||||
namespaceID, err := pgSQL.insertNamespace(feature.Namespace)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Find or create Feature.
|
||||
var id int
|
||||
err = pgSQL.QueryRow(soiFeature, feature.Name, namespaceID).Scan(&id)
|
||||
if err != nil {
|
||||
return 0, handleError("soiFeature", err)
|
||||
}
|
||||
|
||||
if pgSQL.cache != nil {
|
||||
pgSQL.cache.Add("feature:"+feature.Namespace.Name+":"+feature.Name, id)
|
||||
}
|
||||
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func (pgSQL *pgSQL) insertFeatureVersion(featureVersion database.FeatureVersion) (id int, err error) {
|
||||
if featureVersion.Version.String() == "" {
|
||||
return 0, cerrors.NewBadRequestError("could not find/insert invalid FeatureVersion")
|
||||
}
|
||||
|
||||
// Do cache lookup.
|
||||
cacheIndex := "featureversion:" + featureVersion.Feature.Namespace.Name + ":" + featureVersion.Feature.Name + ":" + featureVersion.Version.String()
|
||||
if pgSQL.cache != nil {
|
||||
promCacheQueriesTotal.WithLabelValues("featureversion").Inc()
|
||||
id, found := pgSQL.cache.Get(cacheIndex)
|
||||
if found {
|
||||
promCacheHitsTotal.WithLabelValues("featureversion").Inc()
|
||||
return id.(int), nil
|
||||
}
|
||||
}
|
||||
|
||||
// We do `defer observeQueryTime` here because we don't want to observe cached featureversions.
|
||||
defer observeQueryTime("insertFeatureVersion", "all", time.Now())
|
||||
|
||||
// Find or create Feature first.
|
||||
t := time.Now()
|
||||
featureID, err := pgSQL.insertFeature(featureVersion.Feature)
|
||||
observeQueryTime("insertFeatureVersion", "insertFeature", t)
|
||||
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
featureVersion.Feature.ID = featureID
|
||||
|
||||
// Begin transaction.
|
||||
tx, err := pgSQL.Begin()
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return 0, handleError("insertFeatureVersion.Begin()", err)
|
||||
}
|
||||
|
||||
// Lock Vulnerability_Affects_FeatureVersion exclusively.
|
||||
// We want to prevent InsertVulnerability to modify it.
|
||||
promConcurrentLockVAFV.Inc()
|
||||
defer promConcurrentLockVAFV.Dec()
|
||||
t = time.Now()
|
||||
_, err = tx.Exec(lockVulnerabilityAffects)
|
||||
observeQueryTime("insertFeatureVersion", "lock", t)
|
||||
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return 0, handleError("insertFeatureVersion.lockVulnerabilityAffects", err)
|
||||
}
|
||||
|
||||
// Find or create FeatureVersion.
|
||||
var newOrExisting string
|
||||
|
||||
t = time.Now()
|
||||
err = tx.QueryRow(soiFeatureVersion, featureID, &featureVersion.Version).
|
||||
Scan(&newOrExisting, &featureVersion.ID)
|
||||
observeQueryTime("insertFeatureVersion", "soiFeatureVersion", t)
|
||||
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return 0, handleError("soiFeatureVersion", err)
|
||||
}
|
||||
|
||||
if newOrExisting == "exi" {
|
||||
// That featureVersion already exists, return its id.
|
||||
tx.Commit()
|
||||
|
||||
if pgSQL.cache != nil {
|
||||
pgSQL.cache.Add(cacheIndex, featureVersion.ID)
|
||||
}
|
||||
|
||||
return featureVersion.ID, nil
|
||||
}
|
||||
|
||||
// Link the new FeatureVersion with every vulnerabilities that affect it, by inserting in
|
||||
// Vulnerability_Affects_FeatureVersion.
|
||||
t = time.Now()
|
||||
err = linkFeatureVersionToVulnerabilities(tx, featureVersion)
|
||||
observeQueryTime("insertFeatureVersion", "linkFeatureVersionToVulnerabilities", t)
|
||||
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Commit transaction.
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
return 0, handleError("insertFeatureVersion.Commit()", err)
|
||||
}
|
||||
|
||||
if pgSQL.cache != nil {
|
||||
pgSQL.cache.Add(cacheIndex, featureVersion.ID)
|
||||
}
|
||||
|
||||
return featureVersion.ID, nil
|
||||
}
|
||||
|
||||
// TODO(Quentin-M): Batch me
|
||||
func (pgSQL *pgSQL) insertFeatureVersions(featureVersions []database.FeatureVersion) ([]int, error) {
|
||||
IDs := make([]int, 0, len(featureVersions))
|
||||
|
||||
for i := 0; i < len(featureVersions); i++ {
|
||||
id, err := pgSQL.insertFeatureVersion(featureVersions[i])
|
||||
if err != nil {
|
||||
return IDs, err
|
||||
}
|
||||
IDs = append(IDs, id)
|
||||
}
|
||||
|
||||
return IDs, nil
|
||||
}
|
||||
|
||||
type vulnerabilityAffectsFeatureVersion struct {
|
||||
vulnerabilityID int
|
||||
fixedInID int
|
||||
fixedInVersion types.Version
|
||||
}
|
||||
|
||||
func linkFeatureVersionToVulnerabilities(tx *sql.Tx, featureVersion database.FeatureVersion) error {
|
||||
// Select every vulnerability and the fixed version that affect this Feature.
|
||||
// TODO(Quentin-M): LIMIT
|
||||
rows, err := tx.Query(searchVulnerabilityFixedInFeature, featureVersion.Feature.ID)
|
||||
if err != nil {
|
||||
return handleError("searchVulnerabilityFixedInFeature", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var affects []vulnerabilityAffectsFeatureVersion
|
||||
for rows.Next() {
|
||||
var affect vulnerabilityAffectsFeatureVersion
|
||||
|
||||
err := rows.Scan(&affect.fixedInID, &affect.vulnerabilityID, &affect.fixedInVersion)
|
||||
if err != nil {
|
||||
return handleError("searchVulnerabilityFixedInFeature.Scan()", err)
|
||||
}
|
||||
|
||||
if featureVersion.Version.Compare(affect.fixedInVersion) < 0 {
|
||||
// The version of the FeatureVersion we are inserting is lower than the fixed version on this
|
||||
// Vulnerability, thus, this FeatureVersion is affected by it.
|
||||
affects = append(affects, affect)
|
||||
}
|
||||
}
|
||||
if err = rows.Err(); err != nil {
|
||||
return handleError("searchVulnerabilityFixedInFeature.Rows()", err)
|
||||
}
|
||||
rows.Close()
|
||||
|
||||
// Insert into Vulnerability_Affects_FeatureVersion.
|
||||
for _, affect := range affects {
|
||||
// TODO(Quentin-M): Batch me.
|
||||
_, err := tx.Exec(insertVulnerabilityAffectsFeatureVersion, affect.vulnerabilityID,
|
||||
featureVersion.ID, affect.fixedInID)
|
||||
if err != nil {
|
||||
return handleError("insertVulnerabilityAffectsFeatureVersion", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
102
database/pgsql/feature_test.go
Normal file
102
database/pgsql/feature_test.go
Normal file
@ -0,0 +1,102 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestInsertFeature(t *testing.T) {
|
||||
datastore, err := OpenForTest("InsertFeature", false)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer datastore.Close()
|
||||
|
||||
// Invalid Feature.
|
||||
id0, err := datastore.insertFeature(database.Feature{})
|
||||
assert.NotNil(t, err)
|
||||
assert.Zero(t, id0)
|
||||
|
||||
id0, err = datastore.insertFeature(database.Feature{
|
||||
Namespace: database.Namespace{},
|
||||
Name: "TestInsertFeature0",
|
||||
})
|
||||
assert.NotNil(t, err)
|
||||
assert.Zero(t, id0)
|
||||
|
||||
// Insert Feature and ensure we can find it.
|
||||
feature := database.Feature{
|
||||
Namespace: database.Namespace{Name: "TestInsertFeatureNamespace1"},
|
||||
Name: "TestInsertFeature1",
|
||||
}
|
||||
id1, err := datastore.insertFeature(feature)
|
||||
assert.Nil(t, err)
|
||||
id2, err := datastore.insertFeature(feature)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, id1, id2)
|
||||
|
||||
// Insert invalid FeatureVersion.
|
||||
for _, invalidFeatureVersion := range []database.FeatureVersion{
|
||||
database.FeatureVersion{
|
||||
Feature: database.Feature{},
|
||||
Version: types.NewVersionUnsafe("1.0"),
|
||||
},
|
||||
database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Namespace: database.Namespace{},
|
||||
Name: "TestInsertFeature2",
|
||||
},
|
||||
Version: types.NewVersionUnsafe("1.0"),
|
||||
},
|
||||
database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Namespace: database.Namespace{Name: "TestInsertFeatureNamespace2"},
|
||||
Name: "TestInsertFeature2",
|
||||
},
|
||||
Version: types.NewVersionUnsafe(""),
|
||||
},
|
||||
database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Namespace: database.Namespace{Name: "TestInsertFeatureNamespace2"},
|
||||
Name: "TestInsertFeature2",
|
||||
},
|
||||
Version: types.NewVersionUnsafe("bad version"),
|
||||
},
|
||||
} {
|
||||
id3, err := datastore.insertFeatureVersion(invalidFeatureVersion)
|
||||
assert.Error(t, err)
|
||||
assert.Zero(t, id3)
|
||||
}
|
||||
|
||||
// Insert FeatureVersion and ensure we can find it.
|
||||
featureVersion := database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Namespace: database.Namespace{Name: "TestInsertFeatureNamespace1"},
|
||||
Name: "TestInsertFeature1",
|
||||
},
|
||||
Version: types.NewVersionUnsafe("2:3.0-imba"),
|
||||
}
|
||||
id4, err := datastore.insertFeatureVersion(featureVersion)
|
||||
assert.Nil(t, err)
|
||||
id5, err := datastore.insertFeatureVersion(featureVersion)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, id4, id5)
|
||||
}
|
83
database/pgsql/keyvalue.go
Normal file
83
database/pgsql/keyvalue.go
Normal file
@ -0,0 +1,83 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"time"
|
||||
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
)
|
||||
|
||||
// InsertKeyValue stores (or updates) a single key / value tuple.
|
||||
func (pgSQL *pgSQL) InsertKeyValue(key, value string) (err error) {
|
||||
if key == "" || value == "" {
|
||||
log.Warning("could not insert a flag which has an empty name or value")
|
||||
return cerrors.NewBadRequestError("could not insert a flag which has an empty name or value")
|
||||
}
|
||||
|
||||
defer observeQueryTime("InsertKeyValue", "all", time.Now())
|
||||
|
||||
// Upsert.
|
||||
//
|
||||
// Note: UPSERT works only on >= PostgreSQL 9.5 which is not yet supported by AWS RDS.
|
||||
// The best solution is currently the use of http://dba.stackexchange.com/a/13477
|
||||
// but the key/value storage doesn't need to be super-efficient and super-safe at the
|
||||
// moment so we can just use a client-side solution with transactions, based on
|
||||
// http://postgresql.org/docs/current/static/plpgsql-control-structures.html.
|
||||
// TODO(Quentin-M): Enable Upsert as soon as 9.5 is stable.
|
||||
|
||||
for {
|
||||
// First, try to update.
|
||||
r, err := pgSQL.Exec(updateKeyValue, value, key)
|
||||
if err != nil {
|
||||
return handleError("updateKeyValue", err)
|
||||
}
|
||||
if n, _ := r.RowsAffected(); n > 0 {
|
||||
// Updated successfully.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Try to insert the key.
|
||||
// If someone else inserts the same key concurrently, we could get a unique-key violation error.
|
||||
_, err = pgSQL.Exec(insertKeyValue, key, value)
|
||||
if err != nil {
|
||||
if isErrUniqueViolation(err) {
|
||||
// Got unique constraint violation, retry.
|
||||
continue
|
||||
}
|
||||
return handleError("insertKeyValue", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// GetValue reads a single key / value tuple and returns an empty string if the key doesn't exist.
|
||||
func (pgSQL *pgSQL) GetKeyValue(key string) (string, error) {
|
||||
defer observeQueryTime("GetKeyValue", "all", time.Now())
|
||||
|
||||
var value string
|
||||
err := pgSQL.QueryRow(searchKeyValue, key).Scan(&value)
|
||||
|
||||
if err == sql.ErrNoRows {
|
||||
return "", nil
|
||||
}
|
||||
if err != nil {
|
||||
return "", handleError("searchKeyValue", err)
|
||||
}
|
||||
|
||||
return value, nil
|
||||
}
|
52
database/pgsql/keyvalue_test.go
Normal file
52
database/pgsql/keyvalue_test.go
Normal file
@ -0,0 +1,52 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestKeyValue(t *testing.T) {
|
||||
datastore, err := OpenForTest("KeyValue", false)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer datastore.Close()
|
||||
|
||||
// Get non-existing key/value
|
||||
f, err := datastore.GetKeyValue("test")
|
||||
assert.Nil(t, err)
|
||||
assert.Empty(t, "", f)
|
||||
|
||||
// Try to insert invalid key/value.
|
||||
assert.Error(t, datastore.InsertKeyValue("test", ""))
|
||||
assert.Error(t, datastore.InsertKeyValue("", "test"))
|
||||
assert.Error(t, datastore.InsertKeyValue("", ""))
|
||||
|
||||
// Insert and verify.
|
||||
assert.Nil(t, datastore.InsertKeyValue("test", "test1"))
|
||||
f, err = datastore.GetKeyValue("test")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "test1", f)
|
||||
|
||||
// Update and verify.
|
||||
assert.Nil(t, datastore.InsertKeyValue("test", "test2"))
|
||||
f, err = datastore.GetKeyValue("test")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "test2", f)
|
||||
}
|
405
database/pgsql/layer.go
Normal file
405
database/pgsql/layer.go
Normal file
@ -0,0 +1,405 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/utils"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/guregu/null/zero"
|
||||
)
|
||||
|
||||
func (pgSQL *pgSQL) FindLayer(name string, withFeatures, withVulnerabilities bool) (database.Layer, error) {
|
||||
subquery := "all"
|
||||
if withFeatures {
|
||||
subquery += "/features"
|
||||
} else if withVulnerabilities {
|
||||
subquery += "/features+vulnerabilities"
|
||||
}
|
||||
defer observeQueryTime("FindLayer", subquery, time.Now())
|
||||
|
||||
// Find the layer
|
||||
var layer database.Layer
|
||||
var parentID zero.Int
|
||||
var parentName zero.String
|
||||
var namespaceID zero.Int
|
||||
var namespaceName sql.NullString
|
||||
|
||||
t := time.Now()
|
||||
err := pgSQL.QueryRow(searchLayer, name).
|
||||
Scan(&layer.ID, &layer.Name, &layer.EngineVersion, &parentID, &parentName, &namespaceID,
|
||||
&namespaceName)
|
||||
observeQueryTime("FindLayer", "searchLayer", t)
|
||||
|
||||
if err != nil {
|
||||
return layer, handleError("searchLayer", err)
|
||||
}
|
||||
|
||||
if !parentID.IsZero() {
|
||||
layer.Parent = &database.Layer{
|
||||
Model: database.Model{ID: int(parentID.Int64)},
|
||||
Name: parentName.String,
|
||||
}
|
||||
}
|
||||
if !namespaceID.IsZero() {
|
||||
layer.Namespace = &database.Namespace{
|
||||
Model: database.Model{ID: int(namespaceID.Int64)},
|
||||
Name: namespaceName.String,
|
||||
}
|
||||
}
|
||||
|
||||
// Find its features
|
||||
if withFeatures || withVulnerabilities {
|
||||
// Create a transaction to disable hash/merge joins as our experiments have shown that
|
||||
// PostgreSQL 9.4 makes bad planning decisions about:
|
||||
// - joining the layer tree to feature versions and feature
|
||||
// - joining the feature versions to affected/fixed feature version and vulnerabilities
|
||||
// It would for instance do a merge join between affected feature versions (300 rows, estimated
|
||||
// 3000 rows) and fixed in feature version (100k rows). In this case, it is much more
|
||||
// preferred to use a nested loop.
|
||||
tx, err := pgSQL.Begin()
|
||||
if err != nil {
|
||||
return layer, handleError("FindLayer.Begin()", err)
|
||||
}
|
||||
defer tx.Commit()
|
||||
|
||||
_, err = tx.Exec(disableHashJoin)
|
||||
if err != nil {
|
||||
log.Warningf("FindLayer: could not disable hash join: %s", err)
|
||||
}
|
||||
_, err = tx.Exec(disableMergeJoin)
|
||||
if err != nil {
|
||||
log.Warningf("FindLayer: could not disable merge join: %s", err)
|
||||
}
|
||||
|
||||
t = time.Now()
|
||||
featureVersions, err := getLayerFeatureVersions(tx, layer.ID)
|
||||
observeQueryTime("FindLayer", "getLayerFeatureVersions", t)
|
||||
|
||||
if err != nil {
|
||||
return layer, err
|
||||
}
|
||||
|
||||
layer.Features = featureVersions
|
||||
|
||||
if withVulnerabilities {
|
||||
// Load the vulnerabilities that affect the FeatureVersions.
|
||||
t = time.Now()
|
||||
err := loadAffectedBy(tx, layer.Features)
|
||||
observeQueryTime("FindLayer", "loadAffectedBy", t)
|
||||
|
||||
if err != nil {
|
||||
return layer, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return layer, nil
|
||||
}
|
||||
|
||||
// getLayerFeatureVersions returns list of database.FeatureVersion that a database.Layer has.
|
||||
func getLayerFeatureVersions(tx *sql.Tx, layerID int) ([]database.FeatureVersion, error) {
|
||||
var featureVersions []database.FeatureVersion
|
||||
|
||||
// Query.
|
||||
rows, err := tx.Query(searchLayerFeatureVersion, layerID)
|
||||
if err != nil {
|
||||
return featureVersions, handleError("searchLayerFeatureVersion", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
// Scan query.
|
||||
var modification string
|
||||
mapFeatureVersions := make(map[int]database.FeatureVersion)
|
||||
for rows.Next() {
|
||||
var featureVersion database.FeatureVersion
|
||||
|
||||
err = rows.Scan(&featureVersion.ID, &modification, &featureVersion.Feature.Namespace.ID,
|
||||
&featureVersion.Feature.Namespace.Name, &featureVersion.Feature.ID,
|
||||
&featureVersion.Feature.Name, &featureVersion.ID, &featureVersion.Version,
|
||||
&featureVersion.AddedBy.ID, &featureVersion.AddedBy.Name)
|
||||
if err != nil {
|
||||
return featureVersions, handleError("searchLayerFeatureVersion.Scan()", err)
|
||||
}
|
||||
|
||||
// Do transitive closure.
|
||||
switch modification {
|
||||
case "add":
|
||||
mapFeatureVersions[featureVersion.ID] = featureVersion
|
||||
case "del":
|
||||
delete(mapFeatureVersions, featureVersion.ID)
|
||||
default:
|
||||
log.Warningf("unknown Layer_diff_FeatureVersion's modification: %s", modification)
|
||||
return featureVersions, database.ErrInconsistent
|
||||
}
|
||||
}
|
||||
if err = rows.Err(); err != nil {
|
||||
return featureVersions, handleError("searchLayerFeatureVersion.Rows()", err)
|
||||
}
|
||||
|
||||
// Build result by converting our map to a slice.
|
||||
for _, featureVersion := range mapFeatureVersions {
|
||||
featureVersions = append(featureVersions, featureVersion)
|
||||
}
|
||||
|
||||
return featureVersions, nil
|
||||
}
|
||||
|
||||
// loadAffectedBy returns the list of database.Vulnerability that affect the given
|
||||
// FeatureVersion.
|
||||
func loadAffectedBy(tx *sql.Tx, featureVersions []database.FeatureVersion) error {
|
||||
if len(featureVersions) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Construct list of FeatureVersion IDs, we will do a single query
|
||||
featureVersionIDs := make([]int, 0, len(featureVersions))
|
||||
for i := 0; i < len(featureVersions); i++ {
|
||||
featureVersionIDs = append(featureVersionIDs, featureVersions[i].ID)
|
||||
}
|
||||
|
||||
rows, err := tx.Query(searchFeatureVersionVulnerability,
|
||||
buildInputArray(featureVersionIDs))
|
||||
if err != nil && err != sql.ErrNoRows {
|
||||
return handleError("searchFeatureVersionVulnerability", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
vulnerabilities := make(map[int][]database.Vulnerability, len(featureVersions))
|
||||
var featureversionID int
|
||||
for rows.Next() {
|
||||
var vulnerability database.Vulnerability
|
||||
err := rows.Scan(&featureversionID, &vulnerability.ID, &vulnerability.Name,
|
||||
&vulnerability.Description, &vulnerability.Link, &vulnerability.Severity,
|
||||
&vulnerability.Metadata, &vulnerability.Namespace.Name, &vulnerability.FixedBy)
|
||||
if err != nil {
|
||||
return handleError("searchFeatureVersionVulnerability.Scan()", err)
|
||||
}
|
||||
vulnerabilities[featureversionID] = append(vulnerabilities[featureversionID], vulnerability)
|
||||
}
|
||||
if err = rows.Err(); err != nil {
|
||||
return handleError("searchFeatureVersionVulnerability.Rows()", err)
|
||||
}
|
||||
|
||||
// Assign vulnerabilities to every FeatureVersions
|
||||
for i := 0; i < len(featureVersions); i++ {
|
||||
featureVersions[i].AffectedBy = vulnerabilities[featureVersions[i].ID]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Internally, only Feature additions/removals are stored for each layer. If a layer has a parent,
|
||||
// the Feature list will be compared to the parent's Feature list and the difference will be stored.
|
||||
// Note that when the Namespace of a layer differs from its parent, it is expected that several
|
||||
// Feature that were already included a parent will have their Namespace updated as well
|
||||
// (happens when Feature detectors relies on the detected layer Namespace). However, if the listed
|
||||
// Feature has the same Name/Version as its parent, InsertLayer considers that the Feature hasn't
|
||||
// been modified.
|
||||
func (pgSQL *pgSQL) InsertLayer(layer database.Layer) error {
|
||||
tf := time.Now()
|
||||
|
||||
// Verify parameters
|
||||
if layer.Name == "" {
|
||||
log.Warning("could not insert a layer which has an empty Name")
|
||||
return cerrors.NewBadRequestError("could not insert a layer which has an empty Name")
|
||||
}
|
||||
|
||||
// Get a potentially existing layer.
|
||||
existingLayer, err := pgSQL.FindLayer(layer.Name, true, false)
|
||||
if err != nil && err != cerrors.ErrNotFound {
|
||||
return err
|
||||
} else if err == nil {
|
||||
if existingLayer.EngineVersion >= layer.EngineVersion {
|
||||
// The layer exists and has an equal or higher engine verison, do nothing.
|
||||
return nil
|
||||
}
|
||||
|
||||
layer.ID = existingLayer.ID
|
||||
}
|
||||
|
||||
// We do `defer observeQueryTime` here because we don't want to observe existing layers.
|
||||
defer observeQueryTime("InsertLayer", "all", tf)
|
||||
|
||||
// Get parent ID.
|
||||
var parentID zero.Int
|
||||
if layer.Parent != nil {
|
||||
if layer.Parent.ID == 0 {
|
||||
log.Warning("Parent is expected to be retrieved from database when inserting a layer.")
|
||||
return cerrors.NewBadRequestError("Parent is expected to be retrieved from database when inserting a layer.")
|
||||
}
|
||||
|
||||
parentID = zero.IntFrom(int64(layer.Parent.ID))
|
||||
}
|
||||
|
||||
// Find or insert namespace if provided.
|
||||
var namespaceID zero.Int
|
||||
if layer.Namespace != nil {
|
||||
n, err := pgSQL.insertNamespace(*layer.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
namespaceID = zero.IntFrom(int64(n))
|
||||
} else if layer.Namespace == nil && layer.Parent != nil {
|
||||
// Import the Namespace from the parent if it has one and this layer doesn't specify one.
|
||||
if layer.Parent.Namespace != nil {
|
||||
namespaceID = zero.IntFrom(int64(layer.Parent.Namespace.ID))
|
||||
}
|
||||
}
|
||||
|
||||
// Begin transaction.
|
||||
tx, err := pgSQL.Begin()
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return handleError("InsertLayer.Begin()", err)
|
||||
}
|
||||
|
||||
if layer.ID == 0 {
|
||||
// Insert a new layer.
|
||||
err = tx.QueryRow(insertLayer, layer.Name, layer.EngineVersion, parentID, namespaceID).
|
||||
Scan(&layer.ID)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
|
||||
if isErrUniqueViolation(err) {
|
||||
// Ignore this error, another process collided.
|
||||
return nil
|
||||
}
|
||||
return handleError("insertLayer", err)
|
||||
}
|
||||
} else {
|
||||
// Update an existing layer.
|
||||
_, err = tx.Exec(updateLayer, layer.ID, layer.EngineVersion, namespaceID)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return handleError("updateLayer", err)
|
||||
}
|
||||
|
||||
// Remove all existing Layer_diff_FeatureVersion.
|
||||
_, err = tx.Exec(removeLayerDiffFeatureVersion, layer.ID)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return handleError("removeLayerDiffFeatureVersion", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Update Layer_diff_FeatureVersion now.
|
||||
err = pgSQL.updateDiffFeatureVersions(tx, &layer, &existingLayer)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
// Commit transaction.
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return handleError("InsertLayer.Commit()", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pgSQL *pgSQL) updateDiffFeatureVersions(tx *sql.Tx, layer, existingLayer *database.Layer) error {
|
||||
// add and del are the FeatureVersion diff we should insert.
|
||||
var add []database.FeatureVersion
|
||||
var del []database.FeatureVersion
|
||||
|
||||
if layer.Parent == nil {
|
||||
// There is no parent, every Features are added.
|
||||
add = append(add, layer.Features...)
|
||||
} else if layer.Parent != nil {
|
||||
// There is a parent, we need to diff the Features with it.
|
||||
|
||||
// Build name:version structures.
|
||||
layerFeaturesMapNV, layerFeaturesNV := createNV(layer.Features)
|
||||
parentLayerFeaturesMapNV, parentLayerFeaturesNV := createNV(layer.Parent.Features)
|
||||
|
||||
// Calculate the added and deleted FeatureVersions name:version.
|
||||
addNV := utils.CompareStringLists(layerFeaturesNV, parentLayerFeaturesNV)
|
||||
delNV := utils.CompareStringLists(parentLayerFeaturesNV, layerFeaturesNV)
|
||||
|
||||
// Fill the structures containing the added and deleted FeatureVersions
|
||||
for _, nv := range addNV {
|
||||
add = append(add, *layerFeaturesMapNV[nv])
|
||||
}
|
||||
for _, nv := range delNV {
|
||||
del = append(del, *parentLayerFeaturesMapNV[nv])
|
||||
}
|
||||
}
|
||||
|
||||
// Insert FeatureVersions in the database.
|
||||
addIDs, err := pgSQL.insertFeatureVersions(add)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
delIDs, err := pgSQL.insertFeatureVersions(del)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Insert diff in the database.
|
||||
if len(addIDs) > 0 {
|
||||
_, err = tx.Exec(insertLayerDiffFeatureVersion, layer.ID, "add", buildInputArray(addIDs))
|
||||
if err != nil {
|
||||
return handleError("insertLayerDiffFeatureVersion.Add", err)
|
||||
}
|
||||
}
|
||||
if len(delIDs) > 0 {
|
||||
_, err = tx.Exec(insertLayerDiffFeatureVersion, layer.ID, "del", buildInputArray(delIDs))
|
||||
if err != nil {
|
||||
return handleError("insertLayerDiffFeatureVersion.Del", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createNV(features []database.FeatureVersion) (map[string]*database.FeatureVersion, []string) {
|
||||
mapNV := make(map[string]*database.FeatureVersion, 0)
|
||||
sliceNV := make([]string, 0, len(features))
|
||||
|
||||
for i := 0; i < len(features); i++ {
|
||||
featureVersion := &features[i]
|
||||
nv := featureVersion.Feature.Name + ":" + featureVersion.Version.String()
|
||||
mapNV[nv] = featureVersion
|
||||
sliceNV = append(sliceNV, nv)
|
||||
}
|
||||
|
||||
return mapNV, sliceNV
|
||||
}
|
||||
|
||||
func (pgSQL *pgSQL) DeleteLayer(name string) error {
|
||||
defer observeQueryTime("DeleteLayer", "all", time.Now())
|
||||
|
||||
result, err := pgSQL.Exec(removeLayer, name)
|
||||
if err != nil {
|
||||
return handleError("removeLayer", err)
|
||||
}
|
||||
|
||||
affected, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return handleError("removeLayer.RowsAffected()", err)
|
||||
}
|
||||
|
||||
if affected <= 0 {
|
||||
return cerrors.ErrNotFound
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
350
database/pgsql/layer_test.go
Normal file
350
database/pgsql/layer_test.go
Normal file
@ -0,0 +1,350 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestFindLayer(t *testing.T) {
|
||||
datastore, err := OpenForTest("FindLayer", true)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer datastore.Close()
|
||||
|
||||
// Layer-0: no parent, no namespace, no feature, no vulnerability
|
||||
layer, err := datastore.FindLayer("layer-0", false, false)
|
||||
if assert.Nil(t, err) && assert.NotNil(t, layer) {
|
||||
assert.Equal(t, "layer-0", layer.Name)
|
||||
assert.Nil(t, layer.Namespace)
|
||||
assert.Nil(t, layer.Parent)
|
||||
assert.Equal(t, 1, layer.EngineVersion)
|
||||
assert.Len(t, layer.Features, 0)
|
||||
}
|
||||
|
||||
layer, err = datastore.FindLayer("layer-0", true, false)
|
||||
if assert.Nil(t, err) && assert.NotNil(t, layer) {
|
||||
assert.Len(t, layer.Features, 0)
|
||||
}
|
||||
|
||||
// Layer-1: one parent, adds two features, one vulnerability
|
||||
layer, err = datastore.FindLayer("layer-1", false, false)
|
||||
if assert.Nil(t, err) && assert.NotNil(t, layer) {
|
||||
assert.Equal(t, layer.Name, "layer-1")
|
||||
assert.Equal(t, "debian:7", layer.Namespace.Name)
|
||||
if assert.NotNil(t, layer.Parent) {
|
||||
assert.Equal(t, "layer-0", layer.Parent.Name)
|
||||
}
|
||||
assert.Equal(t, 1, layer.EngineVersion)
|
||||
assert.Len(t, layer.Features, 0)
|
||||
}
|
||||
|
||||
layer, err = datastore.FindLayer("layer-1", true, false)
|
||||
if assert.Nil(t, err) && assert.NotNil(t, layer) && assert.Len(t, layer.Features, 2) {
|
||||
for _, featureVersion := range layer.Features {
|
||||
assert.Equal(t, "debian:7", featureVersion.Feature.Namespace.Name)
|
||||
|
||||
switch featureVersion.Feature.Name {
|
||||
case "wechat":
|
||||
assert.Equal(t, types.NewVersionUnsafe("0.5"), featureVersion.Version)
|
||||
case "openssl":
|
||||
assert.Equal(t, types.NewVersionUnsafe("1.0"), featureVersion.Version)
|
||||
default:
|
||||
t.Errorf("unexpected package %s for layer-1", featureVersion.Feature.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
layer, err = datastore.FindLayer("layer-1", true, true)
|
||||
if assert.Nil(t, err) && assert.NotNil(t, layer) && assert.Len(t, layer.Features, 2) {
|
||||
for _, featureVersion := range layer.Features {
|
||||
assert.Equal(t, "debian:7", featureVersion.Feature.Namespace.Name)
|
||||
|
||||
switch featureVersion.Feature.Name {
|
||||
case "wechat":
|
||||
assert.Equal(t, types.NewVersionUnsafe("0.5"), featureVersion.Version)
|
||||
case "openssl":
|
||||
assert.Equal(t, types.NewVersionUnsafe("1.0"), featureVersion.Version)
|
||||
|
||||
if assert.Len(t, featureVersion.AffectedBy, 1) {
|
||||
assert.Equal(t, "debian:7", featureVersion.AffectedBy[0].Namespace.Name)
|
||||
assert.Equal(t, "CVE-OPENSSL-1-DEB7", featureVersion.AffectedBy[0].Name)
|
||||
assert.Equal(t, types.High, featureVersion.AffectedBy[0].Severity)
|
||||
assert.Equal(t, "A vulnerability affecting OpenSSL < 2.0 on Debian 7.0", featureVersion.AffectedBy[0].Description)
|
||||
assert.Equal(t, "http://google.com/#q=CVE-OPENSSL-1-DEB7", featureVersion.AffectedBy[0].Link)
|
||||
assert.Equal(t, types.NewVersionUnsafe("2.0"), featureVersion.AffectedBy[0].FixedBy)
|
||||
}
|
||||
default:
|
||||
t.Errorf("unexpected package %s for layer-1", featureVersion.Feature.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInsertLayer(t *testing.T) {
|
||||
datastore, err := OpenForTest("InsertLayer", false)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer datastore.Close()
|
||||
|
||||
// Insert invalid layer.
|
||||
testInsertLayerInvalid(t, datastore)
|
||||
|
||||
// Insert a layer tree.
|
||||
testInsertLayerTree(t, datastore)
|
||||
|
||||
// Update layer.
|
||||
testInsertLayerUpdate(t, datastore)
|
||||
|
||||
// Delete layer.
|
||||
testInsertLayerDelete(t, datastore)
|
||||
}
|
||||
|
||||
func testInsertLayerInvalid(t *testing.T, datastore database.Datastore) {
|
||||
invalidLayers := []database.Layer{
|
||||
database.Layer{},
|
||||
database.Layer{Name: "layer0", Parent: &database.Layer{}},
|
||||
database.Layer{Name: "layer0", Parent: &database.Layer{Name: "UnknownLayer"}},
|
||||
}
|
||||
|
||||
for _, invalidLayer := range invalidLayers {
|
||||
err := datastore.InsertLayer(invalidLayer)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func testInsertLayerTree(t *testing.T, datastore database.Datastore) {
|
||||
f1 := database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Namespace: database.Namespace{Name: "TestInsertLayerNamespace2"},
|
||||
Name: "TestInsertLayerFeature1",
|
||||
},
|
||||
Version: types.NewVersionUnsafe("1.0"),
|
||||
}
|
||||
f2 := database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Namespace: database.Namespace{Name: "TestInsertLayerNamespace2"},
|
||||
Name: "TestInsertLayerFeature2",
|
||||
},
|
||||
Version: types.NewVersionUnsafe("0.34"),
|
||||
}
|
||||
f3 := database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Namespace: database.Namespace{Name: "TestInsertLayerNamespace2"},
|
||||
Name: "TestInsertLayerFeature3",
|
||||
},
|
||||
Version: types.NewVersionUnsafe("0.56"),
|
||||
}
|
||||
f4 := database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Namespace: database.Namespace{Name: "TestInsertLayerNamespace3"},
|
||||
Name: "TestInsertLayerFeature2",
|
||||
},
|
||||
Version: types.NewVersionUnsafe("0.34"),
|
||||
}
|
||||
f5 := database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Namespace: database.Namespace{Name: "TestInsertLayerNamespace3"},
|
||||
Name: "TestInsertLayerFeature3",
|
||||
},
|
||||
Version: types.NewVersionUnsafe("0.57"),
|
||||
}
|
||||
f6 := database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Namespace: database.Namespace{Name: "TestInsertLayerNamespace3"},
|
||||
Name: "TestInsertLayerFeature4",
|
||||
},
|
||||
Version: types.NewVersionUnsafe("0.666"),
|
||||
}
|
||||
|
||||
layers := []database.Layer{
|
||||
database.Layer{
|
||||
Name: "TestInsertLayer1",
|
||||
},
|
||||
database.Layer{
|
||||
Name: "TestInsertLayer2",
|
||||
Parent: &database.Layer{Name: "TestInsertLayer1"},
|
||||
Namespace: &database.Namespace{Name: "TestInsertLayerNamespace1"},
|
||||
},
|
||||
// This layer changes the namespace and adds Features.
|
||||
database.Layer{
|
||||
Name: "TestInsertLayer3",
|
||||
Parent: &database.Layer{Name: "TestInsertLayer2"},
|
||||
Namespace: &database.Namespace{Name: "TestInsertLayerNamespace2"},
|
||||
Features: []database.FeatureVersion{f1, f2, f3},
|
||||
},
|
||||
// This layer covers the case where the last layer doesn't provide any new Feature.
|
||||
database.Layer{
|
||||
Name: "TestInsertLayer4a",
|
||||
Parent: &database.Layer{Name: "TestInsertLayer3"},
|
||||
Features: []database.FeatureVersion{f1, f2, f3},
|
||||
},
|
||||
// This layer covers the case where the last layer provides Features.
|
||||
// It also modifies the Namespace ("upgrade") but keeps some Features not upgraded, their
|
||||
// Namespaces should then remain unchanged.
|
||||
database.Layer{
|
||||
Name: "TestInsertLayer4b",
|
||||
Parent: &database.Layer{Name: "TestInsertLayer3"},
|
||||
Namespace: &database.Namespace{Name: "TestInsertLayerNamespace3"},
|
||||
Features: []database.FeatureVersion{
|
||||
// Deletes TestInsertLayerFeature1.
|
||||
// Keep TestInsertLayerFeature2 (old Namespace should be kept):
|
||||
f4,
|
||||
// Upgrades TestInsertLayerFeature3 (with new Namespace):
|
||||
f5,
|
||||
// Adds TestInsertLayerFeature4:
|
||||
f6,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var err error
|
||||
retrievedLayers := make(map[string]database.Layer)
|
||||
for _, layer := range layers {
|
||||
if layer.Parent != nil {
|
||||
// Retrieve from database its parent and assign.
|
||||
parent := retrievedLayers[layer.Parent.Name]
|
||||
layer.Parent = &parent
|
||||
}
|
||||
|
||||
err = datastore.InsertLayer(layer)
|
||||
assert.Nil(t, err)
|
||||
|
||||
retrievedLayers[layer.Name], err = datastore.FindLayer(layer.Name, true, false)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
l4a := retrievedLayers["TestInsertLayer4a"]
|
||||
if assert.NotNil(t, l4a.Namespace) {
|
||||
assert.Equal(t, "TestInsertLayerNamespace2", l4a.Namespace.Name)
|
||||
}
|
||||
assert.Len(t, l4a.Features, 3)
|
||||
for _, featureVersion := range l4a.Features {
|
||||
if cmpFV(featureVersion, f1) && cmpFV(featureVersion, f2) && cmpFV(featureVersion, f3) {
|
||||
assert.Error(t, fmt.Errorf("TestInsertLayer4a contains an unexpected package: %#v. Should contain %#v and %#v and %#v.", featureVersion, f1, f2, f3))
|
||||
}
|
||||
}
|
||||
|
||||
l4b := retrievedLayers["TestInsertLayer4b"]
|
||||
if assert.NotNil(t, l4b.Namespace) {
|
||||
assert.Equal(t, "TestInsertLayerNamespace3", l4b.Namespace.Name)
|
||||
}
|
||||
assert.Len(t, l4b.Features, 3)
|
||||
for _, featureVersion := range l4b.Features {
|
||||
if cmpFV(featureVersion, f2) && cmpFV(featureVersion, f5) && cmpFV(featureVersion, f6) {
|
||||
assert.Error(t, fmt.Errorf("TestInsertLayer4a contains an unexpected package: %#v. Should contain %#v and %#v and %#v.", featureVersion, f2, f4, f6))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testInsertLayerUpdate(t *testing.T, datastore database.Datastore) {
|
||||
f7 := database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Namespace: database.Namespace{Name: "TestInsertLayerNamespace3"},
|
||||
Name: "TestInsertLayerFeature7",
|
||||
},
|
||||
Version: types.NewVersionUnsafe("0.01"),
|
||||
}
|
||||
|
||||
l3, _ := datastore.FindLayer("TestInsertLayer3", true, false)
|
||||
l3u := database.Layer{
|
||||
Name: l3.Name,
|
||||
Parent: l3.Parent,
|
||||
Namespace: &database.Namespace{Name: "TestInsertLayerNamespaceUpdated1"},
|
||||
Features: []database.FeatureVersion{f7},
|
||||
}
|
||||
|
||||
l4u := database.Layer{
|
||||
Name: "TestInsertLayer4",
|
||||
Parent: &database.Layer{Name: "TestInsertLayer3"},
|
||||
Features: []database.FeatureVersion{f7},
|
||||
EngineVersion: 2,
|
||||
}
|
||||
|
||||
// Try to re-insert without increasing the EngineVersion.
|
||||
err := datastore.InsertLayer(l3u)
|
||||
assert.Nil(t, err)
|
||||
|
||||
l3uf, err := datastore.FindLayer(l3u.Name, true, false)
|
||||
if assert.Nil(t, err) {
|
||||
assert.Equal(t, l3.Namespace.Name, l3uf.Namespace.Name)
|
||||
assert.Equal(t, l3.EngineVersion, l3uf.EngineVersion)
|
||||
assert.Len(t, l3uf.Features, len(l3.Features))
|
||||
}
|
||||
|
||||
// Update layer l3.
|
||||
// Verify that the Namespace, EngineVersion and FeatureVersions got updated.
|
||||
l3u.EngineVersion = 2
|
||||
err = datastore.InsertLayer(l3u)
|
||||
assert.Nil(t, err)
|
||||
|
||||
l3uf, err = datastore.FindLayer(l3u.Name, true, false)
|
||||
if assert.Nil(t, err) {
|
||||
assert.Equal(t, l3u.Namespace.Name, l3uf.Namespace.Name)
|
||||
assert.Equal(t, l3u.EngineVersion, l3uf.EngineVersion)
|
||||
if assert.Len(t, l3uf.Features, 1) {
|
||||
assert.True(t, cmpFV(l3uf.Features[0], f7), "Updated layer should have %#v but actually have %#v", f7, l3uf.Features[0])
|
||||
}
|
||||
}
|
||||
|
||||
// Update layer l4.
|
||||
// Verify that the Namespace got updated from its new Parent's, and also verify the
|
||||
// EnginVersion and FeatureVersions.
|
||||
l4u.Parent = &l3uf
|
||||
err = datastore.InsertLayer(l4u)
|
||||
assert.Nil(t, err)
|
||||
|
||||
l4uf, err := datastore.FindLayer(l3u.Name, true, false)
|
||||
if assert.Nil(t, err) {
|
||||
assert.Equal(t, l3u.Namespace.Name, l4uf.Namespace.Name)
|
||||
assert.Equal(t, l4u.EngineVersion, l4uf.EngineVersion)
|
||||
if assert.Len(t, l4uf.Features, 1) {
|
||||
assert.True(t, cmpFV(l3uf.Features[0], f7), "Updated layer should have %#v but actually have %#v", f7, l4uf.Features[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testInsertLayerDelete(t *testing.T, datastore database.Datastore) {
|
||||
err := datastore.DeleteLayer("TestInsertLayerX")
|
||||
assert.Equal(t, cerrors.ErrNotFound, err)
|
||||
|
||||
err = datastore.DeleteLayer("TestInsertLayer3")
|
||||
assert.Nil(t, err)
|
||||
|
||||
_, err = datastore.FindLayer("TestInsertLayer3", false, false)
|
||||
assert.Equal(t, cerrors.ErrNotFound, err)
|
||||
|
||||
_, err = datastore.FindLayer("TestInsertLayer4a", false, false)
|
||||
assert.Equal(t, cerrors.ErrNotFound, err)
|
||||
|
||||
_, err = datastore.FindLayer("TestInsertLayer4b", true, false)
|
||||
assert.Equal(t, cerrors.ErrNotFound, err)
|
||||
}
|
||||
|
||||
func cmpFV(a, b database.FeatureVersion) bool {
|
||||
return a.Feature.Name == b.Feature.Name &&
|
||||
a.Feature.Namespace.Name == b.Feature.Namespace.Name &&
|
||||
a.Version.String() == b.Version.String()
|
||||
}
|
105
database/pgsql/lock.go
Normal file
105
database/pgsql/lock.go
Normal file
@ -0,0 +1,105 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
)
|
||||
|
||||
// Lock tries to set a temporary lock in the database.
|
||||
//
|
||||
// Lock does not block, instead, it returns true and its expiration time
|
||||
// is the lock has been successfully acquired or false otherwise
|
||||
func (pgSQL *pgSQL) Lock(name string, owner string, duration time.Duration, renew bool) (bool, time.Time) {
|
||||
if name == "" || owner == "" || duration == 0 {
|
||||
log.Warning("could not create an invalid lock")
|
||||
return false, time.Time{}
|
||||
}
|
||||
|
||||
defer observeQueryTime("Lock", "all", time.Now())
|
||||
|
||||
// Compute expiration.
|
||||
until := time.Now().Add(duration)
|
||||
|
||||
if renew {
|
||||
// Renew lock.
|
||||
r, err := pgSQL.Exec(updateLock, name, owner, until)
|
||||
if err != nil {
|
||||
handleError("updateLock", err)
|
||||
return false, until
|
||||
}
|
||||
if n, _ := r.RowsAffected(); n > 0 {
|
||||
// Updated successfully.
|
||||
return true, until
|
||||
}
|
||||
} else {
|
||||
// Prune locks.
|
||||
pgSQL.pruneLocks()
|
||||
}
|
||||
|
||||
// Lock.
|
||||
_, err := pgSQL.Exec(insertLock, name, owner, until)
|
||||
if err != nil {
|
||||
if !isErrUniqueViolation(err) {
|
||||
handleError("insertLock", err)
|
||||
}
|
||||
return false, until
|
||||
}
|
||||
|
||||
return true, until
|
||||
}
|
||||
|
||||
// Unlock unlocks a lock specified by its name if I own it
|
||||
func (pgSQL *pgSQL) Unlock(name, owner string) {
|
||||
if name == "" || owner == "" {
|
||||
log.Warning("could not delete an invalid lock")
|
||||
return
|
||||
}
|
||||
|
||||
defer observeQueryTime("Unlock", "all", time.Now())
|
||||
|
||||
pgSQL.Exec(removeLock, name, owner)
|
||||
}
|
||||
|
||||
// FindLock returns the owner of a lock specified by its name and its
|
||||
// expiration time.
|
||||
func (pgSQL *pgSQL) FindLock(name string) (string, time.Time, error) {
|
||||
if name == "" {
|
||||
log.Warning("could not find an invalid lock")
|
||||
return "", time.Time{}, cerrors.NewBadRequestError("could not find an invalid lock")
|
||||
}
|
||||
|
||||
defer observeQueryTime("FindLock", "all", time.Now())
|
||||
|
||||
var owner string
|
||||
var until time.Time
|
||||
err := pgSQL.QueryRow(searchLock, name).Scan(&owner, &until)
|
||||
if err != nil {
|
||||
return owner, until, handleError("searchLock", err)
|
||||
}
|
||||
|
||||
return owner, until, nil
|
||||
}
|
||||
|
||||
// pruneLocks removes every expired locks from the database
|
||||
func (pgSQL *pgSQL) pruneLocks() {
|
||||
defer observeQueryTime("pruneLocks", "all", time.Now())
|
||||
|
||||
if _, err := pgSQL.Exec(removeLockExpired); err != nil {
|
||||
handleError("removeLockExpired", err)
|
||||
}
|
||||
}
|
@ -12,46 +12,58 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/clair/config"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestLock(t *testing.T) {
|
||||
Open(&config.DatabaseConfig{Type: "memstore"})
|
||||
defer Close()
|
||||
datastore, err := OpenForTest("InsertNamespace", false)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer datastore.Close()
|
||||
|
||||
var l bool
|
||||
var et time.Time
|
||||
|
||||
// Create a first lock
|
||||
l, _ = Lock("test1", time.Minute, "owner1")
|
||||
// Create a first lock.
|
||||
l, _ = datastore.Lock("test1", "owner1", time.Minute, false)
|
||||
assert.True(t, l)
|
||||
// Try to lock the same lock with another owner
|
||||
l, _ = Lock("test1", time.Minute, "owner2")
|
||||
|
||||
// Try to lock the same lock with another owner.
|
||||
l, _ = datastore.Lock("test1", "owner2", time.Minute, true)
|
||||
assert.False(t, l)
|
||||
// Renew the lock
|
||||
l, _ = Lock("test1", 2*time.Minute, "owner1")
|
||||
|
||||
l, _ = datastore.Lock("test1", "owner2", time.Minute, false)
|
||||
assert.False(t, l)
|
||||
|
||||
// Renew the lock.
|
||||
l, _ = datastore.Lock("test1", "owner1", 2*time.Minute, true)
|
||||
assert.True(t, l)
|
||||
// Unlock and then relock by someone else
|
||||
Unlock("test1", "owner1")
|
||||
l, et = Lock("test1", time.Minute, "owner2")
|
||||
|
||||
// Unlock and then relock by someone else.
|
||||
datastore.Unlock("test1", "owner1")
|
||||
|
||||
l, et = datastore.Lock("test1", "owner2", time.Minute, false)
|
||||
assert.True(t, l)
|
||||
|
||||
// LockInfo
|
||||
o, et2, err := LockInfo("test1")
|
||||
o, et2, err := datastore.FindLock("test1")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "owner2", o)
|
||||
assert.Equal(t, et.Second(), et2.Second())
|
||||
|
||||
// Create a second lock which is actually already expired ...
|
||||
l, _ = Lock("test2", -time.Minute, "owner1")
|
||||
l, _ = datastore.Lock("test2", "owner1", -time.Minute, false)
|
||||
assert.True(t, l)
|
||||
|
||||
// Take over the lock
|
||||
l, _ = Lock("test2", time.Minute, "owner2")
|
||||
l, _ = datastore.Lock("test2", "owner2", time.Minute, false)
|
||||
assert.True(t, l)
|
||||
}
|
174
database/pgsql/migrations/20151222113213_Initial.sql
Normal file
174
database/pgsql/migrations/20151222113213_Initial.sql
Normal file
@ -0,0 +1,174 @@
|
||||
-- Copyright 2015 clair authors
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
-- You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
|
||||
-- +goose Up
|
||||
|
||||
-- -----------------------------------------------------
|
||||
-- Table Namespace
|
||||
-- -----------------------------------------------------
|
||||
CREATE TABLE IF NOT EXISTS Namespace (
|
||||
id SERIAL PRIMARY KEY,
|
||||
name VARCHAR(128) NULL);
|
||||
|
||||
|
||||
-- -----------------------------------------------------
|
||||
-- Table Layer
|
||||
-- -----------------------------------------------------
|
||||
CREATE TABLE IF NOT EXISTS Layer (
|
||||
id SERIAL PRIMARY KEY,
|
||||
name VARCHAR(128) NOT NULL UNIQUE,
|
||||
engineversion SMALLINT NOT NULL,
|
||||
parent_id INT NULL REFERENCES Layer ON DELETE CASCADE,
|
||||
namespace_id INT NULL REFERENCES Namespace,
|
||||
created_at TIMESTAMP WITH TIME ZONE);
|
||||
|
||||
CREATE INDEX ON Layer (parent_id);
|
||||
CREATE INDEX ON Layer (namespace_id);
|
||||
|
||||
|
||||
-- -----------------------------------------------------
|
||||
-- Table Feature
|
||||
-- -----------------------------------------------------
|
||||
CREATE TABLE IF NOT EXISTS Feature (
|
||||
id SERIAL PRIMARY KEY,
|
||||
namespace_id INT NOT NULL REFERENCES Namespace,
|
||||
name VARCHAR(128) NOT NULL,
|
||||
|
||||
UNIQUE (namespace_id, name));
|
||||
|
||||
|
||||
-- -----------------------------------------------------
|
||||
-- Table FeatureVersion
|
||||
-- -----------------------------------------------------
|
||||
CREATE TABLE IF NOT EXISTS FeatureVersion (
|
||||
id SERIAL PRIMARY KEY,
|
||||
feature_id INT NOT NULL REFERENCES Feature,
|
||||
version VARCHAR(128) NOT NULL);
|
||||
|
||||
CREATE INDEX ON FeatureVersion (feature_id);
|
||||
|
||||
|
||||
-- -----------------------------------------------------
|
||||
-- Table Layer_diff_FeatureVersion
|
||||
-- -----------------------------------------------------
|
||||
CREATE TYPE modification AS ENUM ('add', 'del');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS Layer_diff_FeatureVersion (
|
||||
id SERIAL PRIMARY KEY,
|
||||
layer_id INT NOT NULL REFERENCES Layer ON DELETE CASCADE,
|
||||
featureversion_id INT NOT NULL REFERENCES FeatureVersion,
|
||||
modification modification NOT NULL,
|
||||
|
||||
UNIQUE (layer_id, featureversion_id));
|
||||
|
||||
CREATE INDEX ON Layer_diff_FeatureVersion (layer_id);
|
||||
CREATE INDEX ON Layer_diff_FeatureVersion (featureversion_id);
|
||||
CREATE INDEX ON Layer_diff_FeatureVersion (featureversion_id, layer_id);
|
||||
|
||||
|
||||
-- -----------------------------------------------------
|
||||
-- Table Vulnerability
|
||||
-- -----------------------------------------------------
|
||||
CREATE TYPE severity AS ENUM ('Unknown', 'Negligible', 'Low', 'Medium', 'High', 'Critical', 'Defcon1');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS Vulnerability (
|
||||
id SERIAL PRIMARY KEY,
|
||||
namespace_id INT NOT NULL REFERENCES Namespace,
|
||||
name VARCHAR(128) NOT NULL,
|
||||
description TEXT NULL,
|
||||
link VARCHAR(128) NULL,
|
||||
severity severity NOT NULL,
|
||||
metadata TEXT NULL,
|
||||
created_at TIMESTAMP WITH TIME ZONE,
|
||||
deleted_at TIMESTAMP WITH TIME ZONE NULL);
|
||||
|
||||
|
||||
-- -----------------------------------------------------
|
||||
-- Table Vulnerability_FixedIn_Feature
|
||||
-- -----------------------------------------------------
|
||||
CREATE TABLE IF NOT EXISTS Vulnerability_FixedIn_Feature (
|
||||
id SERIAL PRIMARY KEY,
|
||||
vulnerability_id INT NOT NULL REFERENCES Vulnerability ON DELETE CASCADE,
|
||||
feature_id INT NOT NULL REFERENCES Feature,
|
||||
version VARCHAR(128) NOT NULL,
|
||||
|
||||
UNIQUE (vulnerability_id, feature_id));
|
||||
|
||||
CREATE INDEX ON Vulnerability_FixedIn_Feature (feature_id, vulnerability_id);
|
||||
|
||||
|
||||
-- -----------------------------------------------------
|
||||
-- Table Vulnerability_Affects_FeatureVersion
|
||||
-- -----------------------------------------------------
|
||||
CREATE TABLE IF NOT EXISTS Vulnerability_Affects_FeatureVersion (
|
||||
id SERIAL PRIMARY KEY,
|
||||
vulnerability_id INT NOT NULL REFERENCES Vulnerability ON DELETE CASCADE,
|
||||
featureversion_id INT NOT NULL REFERENCES FeatureVersion,
|
||||
fixedin_id INT NOT NULL REFERENCES Vulnerability_FixedIn_Feature ON DELETE CASCADE,
|
||||
|
||||
UNIQUE (vulnerability_id, featureversion_id));
|
||||
|
||||
CREATE INDEX ON Vulnerability_Affects_FeatureVersion (fixedin_id);
|
||||
CREATE INDEX ON Vulnerability_Affects_FeatureVersion (featureversion_id, vulnerability_id);
|
||||
|
||||
|
||||
-- -----------------------------------------------------
|
||||
-- Table KeyValue
|
||||
-- -----------------------------------------------------
|
||||
CREATE TABLE IF NOT EXISTS KeyValue (
|
||||
id SERIAL PRIMARY KEY,
|
||||
key VARCHAR(128) NOT NULL UNIQUE,
|
||||
value TEXT);
|
||||
|
||||
|
||||
-- -----------------------------------------------------
|
||||
-- Table Lock
|
||||
-- -----------------------------------------------------
|
||||
CREATE TABLE IF NOT EXISTS Lock (
|
||||
id SERIAL PRIMARY KEY,
|
||||
name VARCHAR(64) NOT NULL UNIQUE,
|
||||
owner VARCHAR(64) NOT NULL,
|
||||
until TIMESTAMP WITH TIME ZONE);
|
||||
|
||||
CREATE INDEX ON Lock (owner);
|
||||
|
||||
|
||||
-- -----------------------------------------------------
|
||||
-- Table VulnerabilityNotification
|
||||
-- -----------------------------------------------------
|
||||
CREATE TABLE IF NOT EXISTS Vulnerability_Notification (
|
||||
id SERIAL PRIMARY KEY,
|
||||
name VARCHAR(64) NOT NULL UNIQUE,
|
||||
created_at TIMESTAMP WITH TIME ZONE,
|
||||
notified_at TIMESTAMP WITH TIME ZONE NULL,
|
||||
deleted_at TIMESTAMP WITH TIME ZONE NULL,
|
||||
old_vulnerability_id INT NULL REFERENCES Vulnerability ON DELETE CASCADE,
|
||||
new_vulnerability_id INT NULL REFERENCES Vulnerability ON DELETE CASCADE);
|
||||
|
||||
CREATE INDEX ON Vulnerability_Notification (notified_at);
|
||||
|
||||
-- +goose Down
|
||||
|
||||
DROP TABLE IF EXISTS Namespace,
|
||||
Layer,
|
||||
Feature,
|
||||
FeatureVersion,
|
||||
Layer_diff_FeatureVersion,
|
||||
Vulnerability,
|
||||
Vulnerability_FixedIn_Feature,
|
||||
Vulnerability_Affects_FeatureVersion,
|
||||
Vulnerability_Notification,
|
||||
KeyValue,
|
||||
Lock
|
||||
CASCADE;
|
75
database/pgsql/namespace.go
Normal file
75
database/pgsql/namespace.go
Normal file
@ -0,0 +1,75 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
)
|
||||
|
||||
func (pgSQL *pgSQL) insertNamespace(namespace database.Namespace) (int, error) {
|
||||
if namespace.Name == "" {
|
||||
return 0, cerrors.NewBadRequestError("could not find/insert invalid Namespace")
|
||||
}
|
||||
|
||||
if pgSQL.cache != nil {
|
||||
promCacheQueriesTotal.WithLabelValues("namespace").Inc()
|
||||
if id, found := pgSQL.cache.Get("namespace:" + namespace.Name); found {
|
||||
promCacheHitsTotal.WithLabelValues("namespace").Inc()
|
||||
return id.(int), nil
|
||||
}
|
||||
}
|
||||
|
||||
// We do `defer observeQueryTime` here because we don't want to observe cached namespaces.
|
||||
defer observeQueryTime("insertNamespace", "all", time.Now())
|
||||
|
||||
var id int
|
||||
err := pgSQL.QueryRow(soiNamespace, namespace.Name).Scan(&id)
|
||||
if err != nil {
|
||||
return 0, handleError("soiNamespace", err)
|
||||
}
|
||||
|
||||
if pgSQL.cache != nil {
|
||||
pgSQL.cache.Add("namespace:"+namespace.Name, id)
|
||||
}
|
||||
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func (pgSQL *pgSQL) ListNamespaces() (namespaces []database.Namespace, err error) {
|
||||
rows, err := pgSQL.Query(listNamespace)
|
||||
if err != nil {
|
||||
return namespaces, handleError("listNamespace", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var namespace database.Namespace
|
||||
|
||||
err = rows.Scan(&namespace.ID, &namespace.Name)
|
||||
if err != nil {
|
||||
return namespaces, handleError("listNamespace.Scan()", err)
|
||||
}
|
||||
|
||||
namespaces = append(namespaces, namespace)
|
||||
}
|
||||
if err = rows.Err(); err != nil {
|
||||
return namespaces, handleError("listNamespace.Rows()", err)
|
||||
}
|
||||
|
||||
return namespaces, err
|
||||
}
|
66
database/pgsql/namespace_test.go
Normal file
66
database/pgsql/namespace_test.go
Normal file
@ -0,0 +1,66 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"fmt"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestInsertNamespace(t *testing.T) {
|
||||
datastore, err := OpenForTest("InsertNamespace", false)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer datastore.Close()
|
||||
|
||||
// Invalid Namespace.
|
||||
id0, err := datastore.insertNamespace(database.Namespace{})
|
||||
assert.NotNil(t, err)
|
||||
assert.Zero(t, id0)
|
||||
|
||||
// Insert Namespace and ensure we can find it.
|
||||
id1, err := datastore.insertNamespace(database.Namespace{Name: "TestInsertNamespace1"})
|
||||
assert.Nil(t, err)
|
||||
id2, err := datastore.insertNamespace(database.Namespace{Name: "TestInsertNamespace1"})
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, id1, id2)
|
||||
}
|
||||
|
||||
func TestListNamespace(t *testing.T) {
|
||||
datastore, err := OpenForTest("ListNamespaces", true)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer datastore.Close()
|
||||
|
||||
namespaces, err := datastore.ListNamespaces()
|
||||
assert.Nil(t, err)
|
||||
if assert.Len(t, namespaces, 2) {
|
||||
for _, namespace := range namespaces {
|
||||
switch namespace.Name {
|
||||
case "debian:7", "debian:8":
|
||||
continue
|
||||
default:
|
||||
assert.Error(t, fmt.Errorf("ListNamespaces should not have returned '%s'", namespace.Name))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
214
database/pgsql/notification.go
Normal file
214
database/pgsql/notification.go
Normal file
@ -0,0 +1,214 @@
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/guregu/null/zero"
|
||||
"github.com/pborman/uuid"
|
||||
)
|
||||
|
||||
// do it in tx so we won't insert/update a vuln without notification and vice-versa.
|
||||
// name and created doesn't matter.
|
||||
func createNotification(tx *sql.Tx, oldVulnerabilityID, newVulnerabilityID int) error {
|
||||
defer observeQueryTime("createNotification", "all", time.Now())
|
||||
|
||||
// Insert Notification.
|
||||
oldVulnerabilityNullableID := sql.NullInt64{Int64: int64(oldVulnerabilityID), Valid: oldVulnerabilityID != 0}
|
||||
newVulnerabilityNullableID := sql.NullInt64{Int64: int64(newVulnerabilityID), Valid: newVulnerabilityID != 0}
|
||||
_, err := tx.Exec(insertNotification, uuid.New(), oldVulnerabilityNullableID, newVulnerabilityNullableID)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return handleError("insertNotification", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get one available notification name (!locked && !deleted && (!notified || notified_but_timed-out)).
|
||||
// Does not fill new/old vuln.
|
||||
func (pgSQL *pgSQL) GetAvailableNotification(renotifyInterval time.Duration) (database.VulnerabilityNotification, error) {
|
||||
defer observeQueryTime("GetAvailableNotification", "all", time.Now())
|
||||
|
||||
before := time.Now().Add(-renotifyInterval)
|
||||
row := pgSQL.QueryRow(searchNotificationAvailable, before)
|
||||
notification, err := pgSQL.scanNotification(row, false)
|
||||
|
||||
return notification, handleError("searchNotificationAvailable", err)
|
||||
}
|
||||
|
||||
func (pgSQL *pgSQL) GetNotification(name string, limit int, page database.VulnerabilityNotificationPageNumber) (database.VulnerabilityNotification, database.VulnerabilityNotificationPageNumber, error) {
|
||||
defer observeQueryTime("GetNotification", "all", time.Now())
|
||||
|
||||
// Get Notification.
|
||||
notification, err := pgSQL.scanNotification(pgSQL.QueryRow(searchNotification, name), true)
|
||||
if err != nil {
|
||||
return notification, page, handleError("searchNotification", err)
|
||||
}
|
||||
|
||||
// Load vulnerabilities' LayersIntroducingVulnerability.
|
||||
page.OldVulnerability, err = pgSQL.loadLayerIntroducingVulnerability(
|
||||
notification.OldVulnerability,
|
||||
limit,
|
||||
page.OldVulnerability,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return notification, page, err
|
||||
}
|
||||
|
||||
page.NewVulnerability, err = pgSQL.loadLayerIntroducingVulnerability(
|
||||
notification.NewVulnerability,
|
||||
limit,
|
||||
page.NewVulnerability,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return notification, page, err
|
||||
}
|
||||
|
||||
return notification, page, nil
|
||||
}
|
||||
|
||||
func (pgSQL *pgSQL) scanNotification(row *sql.Row, hasVulns bool) (database.VulnerabilityNotification, error) {
|
||||
var notification database.VulnerabilityNotification
|
||||
var created zero.Time
|
||||
var notified zero.Time
|
||||
var deleted zero.Time
|
||||
var oldVulnerabilityNullableID sql.NullInt64
|
||||
var newVulnerabilityNullableID sql.NullInt64
|
||||
|
||||
// Scan notification.
|
||||
if hasVulns {
|
||||
err := row.Scan(
|
||||
¬ification.ID,
|
||||
¬ification.Name,
|
||||
&created,
|
||||
¬ified,
|
||||
&deleted,
|
||||
&oldVulnerabilityNullableID,
|
||||
&newVulnerabilityNullableID,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return notification, err
|
||||
}
|
||||
} else {
|
||||
err := row.Scan(¬ification.ID, ¬ification.Name, &created, ¬ified, &deleted)
|
||||
|
||||
if err != nil {
|
||||
return notification, err
|
||||
}
|
||||
}
|
||||
|
||||
notification.Created = created.Time
|
||||
notification.Notified = notified.Time
|
||||
notification.Deleted = deleted.Time
|
||||
|
||||
if hasVulns {
|
||||
if oldVulnerabilityNullableID.Valid {
|
||||
vulnerability, err := pgSQL.findVulnerabilityByIDWithDeleted(int(oldVulnerabilityNullableID.Int64))
|
||||
if err != nil {
|
||||
return notification, err
|
||||
}
|
||||
|
||||
notification.OldVulnerability = &vulnerability
|
||||
}
|
||||
|
||||
if newVulnerabilityNullableID.Valid {
|
||||
vulnerability, err := pgSQL.findVulnerabilityByIDWithDeleted(int(newVulnerabilityNullableID.Int64))
|
||||
if err != nil {
|
||||
return notification, err
|
||||
}
|
||||
|
||||
notification.NewVulnerability = &vulnerability
|
||||
}
|
||||
}
|
||||
|
||||
return notification, nil
|
||||
}
|
||||
|
||||
// Fills Vulnerability.LayersIntroducingVulnerability.
|
||||
// limit -1: won't do anything
|
||||
// limit 0: will just get the startID of the second page
|
||||
func (pgSQL *pgSQL) loadLayerIntroducingVulnerability(vulnerability *database.Vulnerability, limit, startID int) (int, error) {
|
||||
tf := time.Now()
|
||||
|
||||
if vulnerability == nil {
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
// A startID equals to -1 means that we reached the end already.
|
||||
if startID == -1 || limit == -1 {
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
// We do `defer observeQueryTime` here because we don't want to observe invalid calls.
|
||||
defer observeQueryTime("loadLayerIntroducingVulnerability", "all", tf)
|
||||
|
||||
// Query with limit + 1, the last item will be used to know the next starting ID.
|
||||
rows, err := pgSQL.Query(searchNotificationLayerIntroducingVulnerability,
|
||||
vulnerability.ID, startID, limit+1)
|
||||
if err != nil {
|
||||
return 0, handleError("searchVulnerabilityFixedInFeature", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var layers []database.Layer
|
||||
for rows.Next() {
|
||||
var layer database.Layer
|
||||
|
||||
if err := rows.Scan(&layer.ID, &layer.Name); err != nil {
|
||||
return -1, handleError("searchNotificationLayerIntroducingVulnerability.Scan()", err)
|
||||
}
|
||||
|
||||
layers = append(layers, layer)
|
||||
}
|
||||
if err = rows.Err(); err != nil {
|
||||
return -1, handleError("searchNotificationLayerIntroducingVulnerability.Rows()", err)
|
||||
}
|
||||
|
||||
size := limit
|
||||
if len(layers) < limit {
|
||||
size = len(layers)
|
||||
}
|
||||
vulnerability.LayersIntroducingVulnerability = layers[:size]
|
||||
|
||||
nextID := -1
|
||||
if len(layers) > limit {
|
||||
nextID = layers[limit].ID
|
||||
}
|
||||
|
||||
return nextID, nil
|
||||
}
|
||||
|
||||
func (pgSQL *pgSQL) SetNotificationNotified(name string) error {
|
||||
defer observeQueryTime("SetNotificationNotified", "all", time.Now())
|
||||
|
||||
if _, err := pgSQL.Exec(updatedNotificationNotified, name); err != nil {
|
||||
return handleError("updatedNotificationNotified", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pgSQL *pgSQL) DeleteNotification(name string) error {
|
||||
defer observeQueryTime("DeleteNotification", "all", time.Now())
|
||||
|
||||
result, err := pgSQL.Exec(removeNotification, name)
|
||||
if err != nil {
|
||||
return handleError("removeNotification", err)
|
||||
}
|
||||
|
||||
affected, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return handleError("removeNotification.RowsAffected()", err)
|
||||
}
|
||||
|
||||
if affected <= 0 {
|
||||
return cerrors.ErrNotFound
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
209
database/pgsql/notification_test.go
Normal file
209
database/pgsql/notification_test.go
Normal file
@ -0,0 +1,209 @@
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNotification(t *testing.T) {
|
||||
datastore, err := OpenForTest("Notification", false)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer datastore.Close()
|
||||
|
||||
// Try to get a notification when there is none.
|
||||
_, err = datastore.GetAvailableNotification(time.Second)
|
||||
assert.Equal(t, cerrors.ErrNotFound, err)
|
||||
|
||||
// Create some data.
|
||||
f1 := database.Feature{
|
||||
Name: "TestNotificationFeature1",
|
||||
Namespace: database.Namespace{Name: "TestNotificationNamespace1"},
|
||||
}
|
||||
|
||||
f2 := database.Feature{
|
||||
Name: "TestNotificationFeature2",
|
||||
Namespace: database.Namespace{Name: "TestNotificationNamespace1"},
|
||||
}
|
||||
|
||||
l1 := database.Layer{
|
||||
Name: "TestNotificationLayer1",
|
||||
Features: []database.FeatureVersion{
|
||||
database.FeatureVersion{
|
||||
Feature: f1,
|
||||
Version: types.NewVersionUnsafe("0.1"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
l2 := database.Layer{
|
||||
Name: "TestNotificationLayer2",
|
||||
Features: []database.FeatureVersion{
|
||||
database.FeatureVersion{
|
||||
Feature: f1,
|
||||
Version: types.NewVersionUnsafe("0.2"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
l3 := database.Layer{
|
||||
Name: "TestNotificationLayer3",
|
||||
Features: []database.FeatureVersion{
|
||||
database.FeatureVersion{
|
||||
Feature: f1,
|
||||
Version: types.NewVersionUnsafe("0.3"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
l4 := database.Layer{
|
||||
Name: "TestNotificationLayer4",
|
||||
Features: []database.FeatureVersion{
|
||||
database.FeatureVersion{
|
||||
Feature: f2,
|
||||
Version: types.NewVersionUnsafe("0.1"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if !assert.Nil(t, datastore.InsertLayer(l1)) ||
|
||||
!assert.Nil(t, datastore.InsertLayer(l2)) ||
|
||||
!assert.Nil(t, datastore.InsertLayer(l3)) ||
|
||||
!assert.Nil(t, datastore.InsertLayer(l4)) {
|
||||
return
|
||||
}
|
||||
|
||||
// Insert a new vulnerability that is introduced by three layers.
|
||||
v1 := database.Vulnerability{
|
||||
Name: "TestNotificationVulnerability1",
|
||||
Namespace: f1.Namespace,
|
||||
Description: "TestNotificationDescription1",
|
||||
Link: "TestNotificationLink1",
|
||||
Severity: "Unknown",
|
||||
FixedIn: []database.FeatureVersion{
|
||||
database.FeatureVersion{
|
||||
Feature: f1,
|
||||
Version: types.NewVersionUnsafe("1.0"),
|
||||
},
|
||||
},
|
||||
}
|
||||
assert.Nil(t, datastore.insertVulnerability(v1, false, true))
|
||||
|
||||
// Get the notification associated to the previously inserted vulnerability.
|
||||
notification, err := datastore.GetAvailableNotification(time.Second)
|
||||
|
||||
if assert.Nil(t, err) && assert.NotEmpty(t, notification.Name) {
|
||||
// Verify the renotify behaviour.
|
||||
if assert.Nil(t, datastore.SetNotificationNotified(notification.Name)) {
|
||||
_, err := datastore.GetAvailableNotification(time.Second)
|
||||
assert.Equal(t, cerrors.ErrNotFound, err)
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
notificationB, err := datastore.GetAvailableNotification(20 * time.Millisecond)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, notification.Name, notificationB.Name)
|
||||
|
||||
datastore.SetNotificationNotified(notification.Name)
|
||||
}
|
||||
|
||||
// Get notification.
|
||||
filledNotification, nextPage, err := datastore.GetNotification(notification.Name, 2, database.VulnerabilityNotificationFirstPage)
|
||||
if assert.Nil(t, err) {
|
||||
assert.NotEqual(t, database.NoVulnerabilityNotificationPage, nextPage)
|
||||
assert.Nil(t, filledNotification.OldVulnerability)
|
||||
|
||||
if assert.NotNil(t, filledNotification.NewVulnerability) {
|
||||
assert.Equal(t, v1.Name, filledNotification.NewVulnerability.Name)
|
||||
assert.Len(t, filledNotification.NewVulnerability.LayersIntroducingVulnerability, 2)
|
||||
}
|
||||
}
|
||||
|
||||
// Get second page.
|
||||
filledNotification, nextPage, err = datastore.GetNotification(notification.Name, 2, nextPage)
|
||||
if assert.Nil(t, err) {
|
||||
assert.Equal(t, database.NoVulnerabilityNotificationPage, nextPage)
|
||||
assert.Nil(t, filledNotification.OldVulnerability)
|
||||
|
||||
if assert.NotNil(t, filledNotification.NewVulnerability) {
|
||||
assert.Equal(t, v1.Name, filledNotification.NewVulnerability.Name)
|
||||
assert.Len(t, filledNotification.NewVulnerability.LayersIntroducingVulnerability, 1)
|
||||
}
|
||||
}
|
||||
|
||||
// Delete notification.
|
||||
assert.Nil(t, datastore.DeleteNotification(notification.Name))
|
||||
|
||||
_, err = datastore.GetAvailableNotification(time.Millisecond)
|
||||
assert.Equal(t, cerrors.ErrNotFound, err)
|
||||
}
|
||||
|
||||
// Update a vulnerability and ensure that the old/new vulnerabilities are correct.
|
||||
v1b := v1
|
||||
v1b.Severity = types.High
|
||||
v1b.FixedIn = []database.FeatureVersion{
|
||||
database.FeatureVersion{
|
||||
Feature: f1,
|
||||
Version: types.MinVersion,
|
||||
},
|
||||
database.FeatureVersion{
|
||||
Feature: f2,
|
||||
Version: types.MaxVersion,
|
||||
},
|
||||
}
|
||||
|
||||
if assert.Nil(t, datastore.insertVulnerability(v1b, false, true)) {
|
||||
notification, err = datastore.GetAvailableNotification(time.Second)
|
||||
assert.Nil(t, err)
|
||||
assert.NotEmpty(t, notification.Name)
|
||||
|
||||
if assert.Nil(t, err) && assert.NotEmpty(t, notification.Name) {
|
||||
filledNotification, nextPage, err := datastore.GetNotification(notification.Name, 2, database.VulnerabilityNotificationFirstPage)
|
||||
if assert.Nil(t, err) {
|
||||
if assert.NotNil(t, filledNotification.OldVulnerability) {
|
||||
assert.Equal(t, v1.Name, filledNotification.OldVulnerability.Name)
|
||||
assert.Equal(t, v1.Severity, filledNotification.OldVulnerability.Severity)
|
||||
assert.Len(t, filledNotification.OldVulnerability.LayersIntroducingVulnerability, 2)
|
||||
}
|
||||
|
||||
if assert.NotNil(t, filledNotification.NewVulnerability) {
|
||||
assert.Equal(t, v1b.Name, filledNotification.NewVulnerability.Name)
|
||||
assert.Equal(t, v1b.Severity, filledNotification.NewVulnerability.Severity)
|
||||
assert.Len(t, filledNotification.NewVulnerability.LayersIntroducingVulnerability, 1)
|
||||
}
|
||||
|
||||
assert.Equal(t, -1, nextPage.NewVulnerability)
|
||||
}
|
||||
|
||||
assert.Nil(t, datastore.DeleteNotification(notification.Name))
|
||||
}
|
||||
}
|
||||
|
||||
// Delete a vulnerability and verify the notification.
|
||||
if assert.Nil(t, datastore.DeleteVulnerability(v1b.Namespace.Name, v1b.Name)) {
|
||||
notification, err = datastore.GetAvailableNotification(time.Second)
|
||||
assert.Nil(t, err)
|
||||
assert.NotEmpty(t, notification.Name)
|
||||
|
||||
if assert.Nil(t, err) && assert.NotEmpty(t, notification.Name) {
|
||||
filledNotification, _, err := datastore.GetNotification(notification.Name, 2, database.VulnerabilityNotificationFirstPage)
|
||||
if assert.Nil(t, err) {
|
||||
assert.Nil(t, filledNotification.NewVulnerability)
|
||||
|
||||
if assert.NotNil(t, filledNotification.OldVulnerability) {
|
||||
assert.Equal(t, v1b.Name, filledNotification.OldVulnerability.Name)
|
||||
assert.Equal(t, v1b.Severity, filledNotification.OldVulnerability.Severity)
|
||||
assert.Len(t, filledNotification.OldVulnerability.LayersIntroducingVulnerability, 1)
|
||||
}
|
||||
}
|
||||
|
||||
assert.Nil(t, datastore.DeleteNotification(notification.Name))
|
||||
}
|
||||
}
|
||||
}
|
287
database/pgsql/pgsql.go
Normal file
287
database/pgsql/pgsql.go
Normal file
@ -0,0 +1,287 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package pgsql implements database.Datastore with PostgreSQL.
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"bitbucket.org/liamstask/goose/lib/goose"
|
||||
"github.com/coreos/clair/config"
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/utils"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"github.com/hashicorp/golang-lru"
|
||||
"github.com/lib/pq"
|
||||
"github.com/pborman/uuid"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var (
|
||||
log = capnslog.NewPackageLogger("github.com/coreos/clair", "pgsql")
|
||||
|
||||
promErrorsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "clair_pgsql_errors_total",
|
||||
Help: "Number of errors that PostgreSQL requests generated.",
|
||||
}, []string{"request"})
|
||||
|
||||
promCacheHitsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "clair_pgsql_cache_hits_total",
|
||||
Help: "Number of cache hits that the PostgreSQL backend did.",
|
||||
}, []string{"object"})
|
||||
|
||||
promCacheQueriesTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "clair_pgsql_cache_queries_total",
|
||||
Help: "Number of cache queries that the PostgreSQL backend did.",
|
||||
}, []string{"object"})
|
||||
|
||||
promQueryDurationMilliseconds = prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Name: "clair_pgsql_query_duration_milliseconds",
|
||||
Help: "Time it takes to execute the database query.",
|
||||
}, []string{"query", "subquery"})
|
||||
|
||||
promConcurrentLockVAFV = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "clair_pgsql_concurrent_lock_vafv_total",
|
||||
Help: "Number of transactions trying to hold the exclusive Vulnerability_Affects_FeatureVersion lock.",
|
||||
})
|
||||
)
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(promErrorsTotal)
|
||||
prometheus.MustRegister(promCacheHitsTotal)
|
||||
prometheus.MustRegister(promCacheQueriesTotal)
|
||||
prometheus.MustRegister(promQueryDurationMilliseconds)
|
||||
prometheus.MustRegister(promConcurrentLockVAFV)
|
||||
}
|
||||
|
||||
type Queryer interface {
|
||||
Query(query string, args ...interface{}) (*sql.Rows, error)
|
||||
QueryRow(query string, args ...interface{}) *sql.Row
|
||||
}
|
||||
|
||||
type pgSQL struct {
|
||||
*sql.DB
|
||||
cache *lru.ARCCache
|
||||
}
|
||||
|
||||
func (pgSQL *pgSQL) Close() {
|
||||
pgSQL.DB.Close()
|
||||
}
|
||||
|
||||
func (pgSQL *pgSQL) Ping() bool {
|
||||
return pgSQL.DB.Ping() == nil
|
||||
}
|
||||
|
||||
// Open creates a Datastore backed by a PostgreSQL database.
|
||||
//
|
||||
// It will run immediately every necessary migration on the database.
|
||||
func Open(config *config.DatabaseConfig) (database.Datastore, error) {
|
||||
// Run migrations.
|
||||
if err := migrate(config.Source); err != nil {
|
||||
log.Error(err)
|
||||
return nil, database.ErrCantOpen
|
||||
}
|
||||
|
||||
// Open database.
|
||||
db, err := sql.Open("postgres", config.Source)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return nil, database.ErrCantOpen
|
||||
}
|
||||
|
||||
// Initialize cache.
|
||||
// TODO(Quentin-M): Benchmark with a simple LRU Cache.
|
||||
var cache *lru.ARCCache
|
||||
if config.CacheSize > 0 {
|
||||
cache, _ = lru.NewARC(config.CacheSize)
|
||||
}
|
||||
|
||||
return &pgSQL{DB: db, cache: cache}, nil
|
||||
}
|
||||
|
||||
// migrate runs all available migrations on a pgSQL database.
|
||||
func migrate(dataSource string) error {
|
||||
log.Info("running database migrations")
|
||||
|
||||
_, filename, _, _ := runtime.Caller(1)
|
||||
migrationDir := path.Join(path.Dir(filename), "/migrations/")
|
||||
conf := &goose.DBConf{
|
||||
MigrationsDir: migrationDir,
|
||||
Driver: goose.DBDriver{
|
||||
Name: "postgres",
|
||||
OpenStr: dataSource,
|
||||
Import: "github.com/lib/pq",
|
||||
Dialect: &goose.PostgresDialect{},
|
||||
},
|
||||
}
|
||||
|
||||
// Determine the most recent revision available from the migrations folder.
|
||||
target, err := goose.GetMostRecentDBVersion(conf.MigrationsDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Run migrations
|
||||
err = goose.RunMigrations(conf, conf.MigrationsDir, target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("database migration ran successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
// createDatabase creates a new database.
|
||||
// The dataSource parameter should not contain a dbname.
|
||||
func createDatabase(dataSource, databaseName string) error {
|
||||
// Open database.
|
||||
db, err := sql.Open("postgres", dataSource)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not open database (CreateDatabase): %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Create database.
|
||||
_, err = db.Exec("CREATE DATABASE " + databaseName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not create database: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// dropDatabase drops an existing database.
|
||||
// The dataSource parameter should not contain a dbname.
|
||||
func dropDatabase(dataSource, databaseName string) error {
|
||||
// Open database.
|
||||
db, err := sql.Open("postgres", dataSource)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not open database (DropDatabase): %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Kill any opened connection.
|
||||
if _, err := db.Exec(`
|
||||
SELECT pg_terminate_backend(pg_stat_activity.pid)
|
||||
FROM pg_stat_activity
|
||||
WHERE pg_stat_activity.datname = $1
|
||||
AND pid <> pg_backend_pid()`, databaseName); err != nil {
|
||||
return fmt.Errorf("could not drop database: %v", err)
|
||||
}
|
||||
|
||||
// Drop database.
|
||||
if _, err = db.Exec("DROP DATABASE " + databaseName); err != nil {
|
||||
return fmt.Errorf("could not drop database: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// pgSQLTest wraps pgSQL for testing purposes.
|
||||
// Its Close() method drops the database.
|
||||
type pgSQLTest struct {
|
||||
*pgSQL
|
||||
dataSourceDefaultDatabase string
|
||||
dbName string
|
||||
}
|
||||
|
||||
// OpenForTest creates a test Datastore backed by a new PostgreSQL database.
|
||||
// It creates a new unique and prefixed ("test_") database.
|
||||
// Using Close() will drop the database.
|
||||
func OpenForTest(name string, withTestData bool) (*pgSQLTest, error) {
|
||||
// Define the PostgreSQL connection strings.
|
||||
dataSource := "host=127.0.0.1 sslmode=disable user=postgres dbname="
|
||||
if dataSourceEnv := os.Getenv("CLAIR_TEST_PGSQL"); dataSourceEnv != "" {
|
||||
dataSource = dataSourceEnv + " dbname="
|
||||
}
|
||||
dbName := "test_" + strings.ToLower(name) + "_" + strings.Replace(uuid.New(), "-", "_", -1)
|
||||
dataSourceDefaultDatabase := dataSource + "postgres"
|
||||
dataSourceTestDatabase := dataSource + dbName
|
||||
|
||||
// Create database.
|
||||
if err := createDatabase(dataSourceDefaultDatabase, dbName); err != nil {
|
||||
log.Error(err)
|
||||
return nil, database.ErrCantOpen
|
||||
}
|
||||
|
||||
// Open database.
|
||||
db, err := Open(&config.DatabaseConfig{Source: dataSourceTestDatabase, CacheSize: 0})
|
||||
if err != nil {
|
||||
dropDatabase(dataSourceDefaultDatabase, dbName)
|
||||
log.Error(err)
|
||||
return nil, database.ErrCantOpen
|
||||
}
|
||||
|
||||
// Load test data if specified.
|
||||
if withTestData {
|
||||
_, filename, _, _ := runtime.Caller(0)
|
||||
d, _ := ioutil.ReadFile(path.Join(path.Dir(filename)) + "/testdata/data.sql")
|
||||
_, err = db.(*pgSQL).Exec(string(d))
|
||||
if err != nil {
|
||||
dropDatabase(dataSourceDefaultDatabase, dbName)
|
||||
log.Error(err)
|
||||
return nil, database.ErrCantOpen
|
||||
}
|
||||
}
|
||||
|
||||
return &pgSQLTest{
|
||||
pgSQL: db.(*pgSQL),
|
||||
dataSourceDefaultDatabase: dataSourceDefaultDatabase,
|
||||
dbName: dbName}, nil
|
||||
}
|
||||
|
||||
func (pgSQL *pgSQLTest) Close() {
|
||||
pgSQL.DB.Close()
|
||||
dropDatabase(pgSQL.dataSourceDefaultDatabase, pgSQL.dbName)
|
||||
}
|
||||
|
||||
// handleError logs an error with an extra description and masks the error if it's an SQL one.
|
||||
// This ensures we never return plain SQL errors and leak anything.
|
||||
func handleError(desc string, err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err == sql.ErrNoRows {
|
||||
return cerrors.ErrNotFound
|
||||
}
|
||||
|
||||
log.Errorf("%s: %v", desc, err)
|
||||
promErrorsTotal.WithLabelValues(desc).Inc()
|
||||
|
||||
if _, o := err.(*pq.Error); o || err == sql.ErrTxDone || strings.HasPrefix(err.Error(), "sql:") {
|
||||
return database.ErrBackendException
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// isErrUniqueViolation determines is the given error is a unique contraint violation.
|
||||
func isErrUniqueViolation(err error) bool {
|
||||
pqErr, ok := err.(*pq.Error)
|
||||
return ok && pqErr.Code == "23505"
|
||||
}
|
||||
|
||||
func observeQueryTime(query, subquery string, start time.Time) {
|
||||
utils.PrometheusObserveTimeMilliseconds(promQueryDurationMilliseconds.WithLabelValues(query, subquery), start)
|
||||
}
|
231
database/pgsql/queries.go
Normal file
231
database/pgsql/queries.go
Normal file
@ -0,0 +1,231 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pgsql
|
||||
|
||||
import "strconv"
|
||||
|
||||
const (
|
||||
lockVulnerabilityAffects = `LOCK Vulnerability_Affects_FeatureVersion IN SHARE ROW EXCLUSIVE MODE`
|
||||
disableHashJoin = `SET LOCAL enable_hashjoin = off`
|
||||
disableMergeJoin = `SET LOCAL enable_mergejoin = off`
|
||||
|
||||
// keyvalue.go
|
||||
updateKeyValue = `UPDATE KeyValue SET value = $1 WHERE key = $2`
|
||||
insertKeyValue = `INSERT INTO KeyValue(key, value) VALUES($1, $2)`
|
||||
searchKeyValue = `SELECT value FROM KeyValue WHERE key = $1`
|
||||
|
||||
// namespace.go
|
||||
soiNamespace = `
|
||||
WITH new_namespace AS (
|
||||
INSERT INTO Namespace(name)
|
||||
SELECT CAST($1 AS VARCHAR)
|
||||
WHERE NOT EXISTS (SELECT name FROM Namespace WHERE name = $1)
|
||||
RETURNING id
|
||||
)
|
||||
SELECT id FROM Namespace WHERE name = $1
|
||||
UNION
|
||||
SELECT id FROM new_namespace`
|
||||
|
||||
listNamespace = `SELECT id, name FROM Namespace`
|
||||
|
||||
// feature.go
|
||||
soiFeature = `
|
||||
WITH new_feature AS (
|
||||
INSERT INTO Feature(name, namespace_id)
|
||||
SELECT CAST($1 AS VARCHAR), CAST($2 AS INTEGER)
|
||||
WHERE NOT EXISTS (SELECT id FROM Feature WHERE name = $1 AND namespace_id = $2)
|
||||
RETURNING id
|
||||
)
|
||||
SELECT id FROM Feature WHERE name = $1 AND namespace_id = $2
|
||||
UNION
|
||||
SELECT id FROM new_feature`
|
||||
|
||||
soiFeatureVersion = `
|
||||
WITH new_featureversion AS (
|
||||
INSERT INTO FeatureVersion(feature_id, version)
|
||||
SELECT CAST($1 AS INTEGER), CAST($2 AS VARCHAR)
|
||||
WHERE NOT EXISTS (SELECT id FROM FeatureVersion WHERE feature_id = $1 AND version = $2)
|
||||
RETURNING id
|
||||
)
|
||||
SELECT 'exi', id FROM FeatureVersion WHERE feature_id = $1 AND version = $2
|
||||
UNION
|
||||
SELECT 'new', id FROM new_featureversion`
|
||||
|
||||
searchVulnerabilityFixedInFeature = `
|
||||
SELECT id, vulnerability_id, version FROM Vulnerability_FixedIn_Feature
|
||||
WHERE feature_id = $1`
|
||||
|
||||
insertVulnerabilityAffectsFeatureVersion = `
|
||||
INSERT INTO Vulnerability_Affects_FeatureVersion(vulnerability_id,
|
||||
featureversion_id, fixedin_id) VALUES($1, $2, $3)`
|
||||
|
||||
// layer.go
|
||||
searchLayer = `
|
||||
SELECT l.id, l.name, l.engineversion, p.id, p.name, n.id, n.name
|
||||
FROM Layer l
|
||||
LEFT JOIN Layer p ON l.parent_id = p.id
|
||||
LEFT JOIN Namespace n ON l.namespace_id = n.id
|
||||
WHERE l.name = $1;`
|
||||
|
||||
searchLayerFeatureVersion = `
|
||||
WITH RECURSIVE layer_tree(id, name, parent_id, depth, path, cycle) AS(
|
||||
SELECT l.id, l.name, l.parent_id, 1, ARRAY[l.id], false
|
||||
FROM Layer l
|
||||
WHERE l.id = $1
|
||||
UNION ALL
|
||||
SELECT l.id, l.name, l.parent_id, lt.depth + 1, path || l.id, l.id = ANY(path)
|
||||
FROM Layer l, layer_tree lt
|
||||
WHERE l.id = lt.parent_id
|
||||
)
|
||||
SELECT ldf.featureversion_id, ldf.modification, fn.id, fn.name, f.id, f.name, fv.id, fv.version, ltree.id, ltree.name
|
||||
FROM Layer_diff_FeatureVersion ldf
|
||||
JOIN (
|
||||
SELECT row_number() over (ORDER BY depth DESC), id, name FROM layer_tree
|
||||
) AS ltree (ordering, id, name) ON ldf.layer_id = ltree.id, FeatureVersion fv, Feature f, Namespace fn
|
||||
WHERE ldf.featureversion_id = fv.id AND fv.feature_id = f.id AND f.namespace_id = fn.id
|
||||
ORDER BY ltree.ordering`
|
||||
|
||||
searchFeatureVersionVulnerability = `
|
||||
SELECT vafv.featureversion_id, v.id, v.name, v.description, v.link, v.severity, v.metadata,
|
||||
vn.name, vfif.version
|
||||
FROM Vulnerability_Affects_FeatureVersion vafv, Vulnerability v,
|
||||
Namespace vn, Vulnerability_FixedIn_Feature vfif
|
||||
WHERE vafv.featureversion_id = ANY($1::integer[])
|
||||
AND vfif.vulnerability_id = v.id
|
||||
AND vafv.fixedin_id = vfif.id
|
||||
AND v.namespace_id = vn.id
|
||||
AND v.deleted_at IS NULL`
|
||||
|
||||
insertLayer = `
|
||||
INSERT INTO Layer(name, engineversion, parent_id, namespace_id, created_at)
|
||||
VALUES($1, $2, $3, $4, CURRENT_TIMESTAMP)
|
||||
RETURNING id`
|
||||
|
||||
updateLayer = `UPDATE LAYER SET engineversion = $2, namespace_id = $3 WHERE id = $1`
|
||||
|
||||
removeLayerDiffFeatureVersion = `
|
||||
DELETE FROM Layer_diff_FeatureVersion
|
||||
WHERE layer_id = $1`
|
||||
|
||||
insertLayerDiffFeatureVersion = `
|
||||
INSERT INTO Layer_diff_FeatureVersion(layer_id, featureversion_id, modification)
|
||||
SELECT $1, fv.id, $2
|
||||
FROM FeatureVersion fv
|
||||
WHERE fv.id = ANY($3::integer[])`
|
||||
|
||||
removeLayer = `DELETE FROM Layer WHERE name = $1`
|
||||
|
||||
// lock.go
|
||||
insertLock = `INSERT INTO Lock(name, owner, until) VALUES($1, $2, $3)`
|
||||
searchLock = `SELECT owner, until FROM Lock WHERE name = $1`
|
||||
updateLock = `UPDATE Lock SET until = $3 WHERE name = $1 AND owner = $2`
|
||||
removeLock = `DELETE FROM Lock WHERE name = $1 AND owner = $2`
|
||||
removeLockExpired = `DELETE FROM LOCK WHERE until < CURRENT_TIMESTAMP`
|
||||
|
||||
// vulnerability.go
|
||||
searchVulnerabilityBase = `
|
||||
SELECT v.id, v.name, n.id, n.name, v.description, v.link, v.severity, v.metadata
|
||||
FROM Vulnerability v JOIN Namespace n ON v.namespace_id = n.id`
|
||||
searchVulnerabilityForUpdate = ` FOR UPDATE OF v`
|
||||
searchVulnerabilityByNamespaceAndName = ` WHERE n.name = $1 AND v.name = $2 AND v.deleted_at IS NULL`
|
||||
searchVulnerabilityByID = ` WHERE v.id = $1`
|
||||
|
||||
searchVulnerabilityFixedIn = `
|
||||
SELECT vfif.version, f.id, f.Name
|
||||
FROM Vulnerability_FixedIn_Feature vfif JOIN Feature f ON vfif.feature_id = f.id
|
||||
WHERE vfif.vulnerability_id = $1`
|
||||
|
||||
insertVulnerability = `
|
||||
INSERT INTO Vulnerability(namespace_id, name, description, link, severity, metadata, created_at)
|
||||
VALUES($1, $2, $3, $4, $5, $6, CURRENT_TIMESTAMP)
|
||||
RETURNING id`
|
||||
|
||||
insertVulnerabilityFixedInFeature = `
|
||||
INSERT INTO Vulnerability_FixedIn_Feature(vulnerability_id, feature_id, version)
|
||||
VALUES($1, $2, $3)
|
||||
RETURNING id`
|
||||
|
||||
searchFeatureVersionByFeature = `SELECT id, version FROM FeatureVersion WHERE feature_id = $1`
|
||||
|
||||
removeVulnerability = `
|
||||
UPDATE Vulnerability
|
||||
SET deleted_at = CURRENT_TIMESTAMP
|
||||
WHERE namespace_id = (SELECT id FROM Namespace WHERE name = $1)
|
||||
AND name = $2
|
||||
AND deleted_at IS NULL
|
||||
RETURNING id`
|
||||
|
||||
// notification.go
|
||||
insertNotification = `
|
||||
INSERT INTO Vulnerability_Notification(name, created_at, old_vulnerability_id, new_vulnerability_id)
|
||||
VALUES($1, CURRENT_TIMESTAMP, $2, $3)`
|
||||
|
||||
updatedNotificationNotified = `
|
||||
UPDATE Vulnerability_Notification
|
||||
SET notified_at = CURRENT_TIMESTAMP
|
||||
WHERE name = $1`
|
||||
|
||||
removeNotification = `
|
||||
UPDATE Vulnerability_Notification
|
||||
SET deleted_at = CURRENT_TIMESTAMP
|
||||
WHERE name = $1`
|
||||
|
||||
searchNotificationAvailable = `
|
||||
SELECT id, name, created_at, notified_at, deleted_at
|
||||
FROM Vulnerability_Notification
|
||||
WHERE (notified_at IS NULL OR notified_at < $1)
|
||||
AND deleted_at IS NULL
|
||||
AND name NOT IN (SELECT name FROM Lock)
|
||||
ORDER BY Random()
|
||||
LIMIT 1`
|
||||
|
||||
searchNotification = `
|
||||
SELECT id, name, created_at, notified_at, deleted_at, old_vulnerability_id, new_vulnerability_id
|
||||
FROM Vulnerability_Notification
|
||||
WHERE name = $1`
|
||||
|
||||
searchNotificationLayerIntroducingVulnerability = `
|
||||
SELECT l.ID, l.name
|
||||
FROM Vulnerability v, Vulnerability_Affects_FeatureVersion vafv, FeatureVersion fv, Layer_diff_FeatureVersion ldfv, Layer l
|
||||
WHERE v.id = $1
|
||||
AND v.id = vafv.vulnerability_id
|
||||
AND vafv.featureversion_id = fv.id
|
||||
AND fv.id = ldfv.featureversion_id
|
||||
AND ldfv.modification = 'add'
|
||||
AND ldfv.layer_id = l.id
|
||||
AND l.id >= $2
|
||||
ORDER BY l.ID
|
||||
LIMIT $3`
|
||||
|
||||
// complex_test.go
|
||||
searchComplexTestFeatureVersionAffects = `
|
||||
SELECT v.name
|
||||
FROM FeatureVersion fv
|
||||
LEFT JOIN Vulnerability_Affects_FeatureVersion vaf ON fv.id = vaf.featureversion_id
|
||||
JOIN Vulnerability v ON vaf.vulnerability_id = v.id
|
||||
WHERE featureversion_id = $1`
|
||||
)
|
||||
|
||||
// buildInputArray constructs a PostgreSQL input array from the specified integers.
|
||||
// Useful to use the `= ANY($1::integer[])` syntax that let us use a IN clause while using
|
||||
// a single placeholder.
|
||||
func buildInputArray(ints []int) string {
|
||||
str := "{"
|
||||
for i := 0; i < len(ints)-1; i++ {
|
||||
str = str + strconv.Itoa(ints[i]) + ","
|
||||
}
|
||||
str = str + strconv.Itoa(ints[len(ints)-1]) + "}"
|
||||
return str
|
||||
}
|
65
database/pgsql/testdata/data.sql
vendored
Normal file
65
database/pgsql/testdata/data.sql
vendored
Normal file
@ -0,0 +1,65 @@
|
||||
-- Copyright 2015 clair authors
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
-- You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
|
||||
INSERT INTO namespace (id, name) VALUES
|
||||
(1, 'debian:7'),
|
||||
(2, 'debian:8');
|
||||
|
||||
INSERT INTO feature (id, namespace_id, name) VALUES
|
||||
(1, 1, 'wechat'),
|
||||
(2, 1, 'openssl'),
|
||||
(4, 1, 'libssl'),
|
||||
(3, 2, 'openssl');
|
||||
|
||||
INSERT INTO featureversion (id, feature_id, version) VALUES
|
||||
(1, 1, '0.5'),
|
||||
(2, 2, '1.0'),
|
||||
(3, 2, '2.0'),
|
||||
(4, 3, '1.0');
|
||||
|
||||
INSERT INTO layer (id, name, engineversion, parent_id, namespace_id) VALUES
|
||||
(1, 'layer-0', 1, NULL, NULL),
|
||||
(2, 'layer-1', 1, 1, 1),
|
||||
(3, 'layer-2', 1, 2, 1),
|
||||
(4, 'layer-3a', 1, 3, 1),
|
||||
(5, 'layer-3b', 1, 3, 2);
|
||||
|
||||
INSERT INTO layer_diff_featureversion (id, layer_id, featureversion_id, modification) VALUES
|
||||
(1, 2, 1, 'add'),
|
||||
(2, 2, 2, 'add'),
|
||||
(3, 3, 2, 'del'), -- layer-2: Update Debian:7 OpenSSL 1.0 -> 2.0
|
||||
(4, 3, 3, 'add'), -- ^
|
||||
(5, 5, 3, 'del'), -- layer-3b: Delete Debian:7 OpenSSL 2.0
|
||||
(6, 5, 4, 'add'); -- layer-3b: Add Debian:8 OpenSSL 1.0
|
||||
|
||||
INSERT INTO vulnerability (id, namespace_id, name, description, link, severity) VALUES
|
||||
(1, 1, 'CVE-OPENSSL-1-DEB7', 'A vulnerability affecting OpenSSL < 2.0 on Debian 7.0', 'http://google.com/#q=CVE-OPENSSL-1-DEB7', 'High'),
|
||||
(2, 1, 'CVE-NOPE', 'A vulnerability affecting nothing', '', 'Unknown');
|
||||
|
||||
INSERT INTO vulnerability_fixedin_feature (id, vulnerability_id, feature_id, version) VALUES
|
||||
(1, 1, 2, '2.0'),
|
||||
(2, 1, 4, '1.9-abc');
|
||||
|
||||
INSERT INTO vulnerability_affects_featureversion (id, vulnerability_id, featureversion_id, fixedin_id) VALUES
|
||||
(1, 1, 2, 1); -- CVE-OPENSSL-1-DEB7 affects Debian:7 OpenSSL 1.0
|
||||
|
||||
SELECT pg_catalog.setval(pg_get_serial_sequence('namespace', 'id'), (SELECT MAX(id) FROM namespace)+1);
|
||||
SELECT pg_catalog.setval(pg_get_serial_sequence('feature', 'id'), (SELECT MAX(id) FROM feature)+1);
|
||||
SELECT pg_catalog.setval(pg_get_serial_sequence('featureversion', 'id'), (SELECT MAX(id) FROM featureversion)+1);
|
||||
SELECT pg_catalog.setval(pg_get_serial_sequence('layer', 'id'), (SELECT MAX(id) FROM layer)+1);
|
||||
SELECT pg_catalog.setval(pg_get_serial_sequence('layer_diff_featureversion', 'id'), (SELECT MAX(id) FROM layer_diff_featureversion)+1);
|
||||
SELECT pg_catalog.setval(pg_get_serial_sequence('vulnerability', 'id'), (SELECT MAX(id) FROM vulnerability)+1);
|
||||
SELECT pg_catalog.setval(pg_get_serial_sequence('vulnerability_fixedin_feature', 'id'), (SELECT MAX(id) FROM vulnerability_fixedin_feature)+1);
|
||||
SELECT pg_catalog.setval(pg_get_serial_sequence('vulnerability_affects_featureversion', 'id'), (SELECT MAX(id) FROM vulnerability_affects_featureversion)+1);
|
||||
SELECT pg_catalog.setval(pg_get_serial_sequence('vulnerability', 'id'), (SELECT MAX(id) FROM vulnerability)+1);
|
514
database/pgsql/vulnerability.go
Normal file
514
database/pgsql/vulnerability.go
Normal file
@ -0,0 +1,514 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/utils"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
"github.com/guregu/null/zero"
|
||||
)
|
||||
|
||||
func (pgSQL *pgSQL) FindVulnerability(namespaceName, name string) (database.Vulnerability, error) {
|
||||
return findVulnerability(pgSQL, namespaceName, name, false)
|
||||
}
|
||||
|
||||
func findVulnerability(queryer Queryer, namespaceName, name string, forUpdate bool) (database.Vulnerability, error) {
|
||||
defer observeQueryTime("findVulnerability", "all", time.Now())
|
||||
|
||||
queryName := "searchVulnerabilityBase+searchVulnerabilityByNamespaceAndName"
|
||||
query := searchVulnerabilityBase + searchVulnerabilityByNamespaceAndName
|
||||
if forUpdate {
|
||||
queryName = queryName + "+searchVulnerabilityForUpdate"
|
||||
query = query + searchVulnerabilityForUpdate
|
||||
}
|
||||
|
||||
return scanVulnerability(queryer, queryName, queryer.QueryRow(query, namespaceName, name))
|
||||
}
|
||||
|
||||
func (pgSQL *pgSQL) findVulnerabilityByIDWithDeleted(id int) (database.Vulnerability, error) {
|
||||
defer observeQueryTime("findVulnerabilityByIDWithDeleted", "all", time.Now())
|
||||
|
||||
queryName := "searchVulnerabilityBase+searchVulnerabilityByID"
|
||||
query := searchVulnerabilityBase + searchVulnerabilityByID
|
||||
|
||||
return scanVulnerability(pgSQL, queryName, pgSQL.QueryRow(query, id))
|
||||
}
|
||||
|
||||
func scanVulnerability(queryer Queryer, queryName string, vulnerabilityRow *sql.Row) (database.Vulnerability, error) {
|
||||
var vulnerability database.Vulnerability
|
||||
|
||||
err := vulnerabilityRow.Scan(
|
||||
&vulnerability.ID,
|
||||
&vulnerability.Name,
|
||||
&vulnerability.Namespace.ID,
|
||||
&vulnerability.Namespace.Name,
|
||||
&vulnerability.Description,
|
||||
&vulnerability.Link,
|
||||
&vulnerability.Severity,
|
||||
&vulnerability.Metadata,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return vulnerability, handleError(queryName+".Scan()", err)
|
||||
}
|
||||
|
||||
if vulnerability.ID == 0 {
|
||||
return vulnerability, cerrors.ErrNotFound
|
||||
}
|
||||
|
||||
// Query the FixedIn FeatureVersion now.
|
||||
rows, err := queryer.Query(searchVulnerabilityFixedIn, vulnerability.ID)
|
||||
if err != nil {
|
||||
return vulnerability, handleError("searchVulnerabilityFixedIn.Scan()", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var featureVersionID zero.Int
|
||||
var featureVersionVersion zero.String
|
||||
var featureVersionFeatureName zero.String
|
||||
|
||||
err := rows.Scan(
|
||||
&featureVersionVersion,
|
||||
&featureVersionID,
|
||||
&featureVersionFeatureName,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return vulnerability, handleError("searchVulnerabilityFixedIn.Scan()", err)
|
||||
}
|
||||
|
||||
if !featureVersionID.IsZero() {
|
||||
// Note that the ID we fill in featureVersion is actually a Feature ID, and not
|
||||
// a FeatureVersion ID.
|
||||
featureVersion := database.FeatureVersion{
|
||||
Model: database.Model{ID: int(featureVersionID.Int64)},
|
||||
Feature: database.Feature{
|
||||
Model: database.Model{ID: int(featureVersionID.Int64)},
|
||||
Namespace: vulnerability.Namespace,
|
||||
Name: featureVersionFeatureName.String,
|
||||
},
|
||||
Version: types.NewVersionUnsafe(featureVersionVersion.String),
|
||||
}
|
||||
vulnerability.FixedIn = append(vulnerability.FixedIn, featureVersion)
|
||||
}
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
return vulnerability, handleError("searchVulnerabilityFixedIn.Rows()", err)
|
||||
}
|
||||
|
||||
return vulnerability, nil
|
||||
}
|
||||
|
||||
// FixedIn.Namespace are not necessary, they are overwritten by the vuln.
|
||||
// By setting the fixed version to minVersion, we can say that the vuln does'nt affect anymore.
|
||||
func (pgSQL *pgSQL) InsertVulnerabilities(vulnerabilities []database.Vulnerability, generateNotifications bool) error {
|
||||
for _, vulnerability := range vulnerabilities {
|
||||
err := pgSQL.insertVulnerability(vulnerability, false, generateNotifications)
|
||||
if err != nil {
|
||||
fmt.Printf("%#v\n", vulnerability)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pgSQL *pgSQL) insertVulnerability(vulnerability database.Vulnerability, onlyFixedIn, generateNotification bool) error {
|
||||
tf := time.Now()
|
||||
|
||||
// Verify parameters
|
||||
if vulnerability.Name == "" || vulnerability.Namespace.Name == "" {
|
||||
return cerrors.NewBadRequestError("insertVulnerability needs at least the Name and the Namespace")
|
||||
}
|
||||
if !onlyFixedIn && !vulnerability.Severity.IsValid() {
|
||||
msg := fmt.Sprintf("could not insert a vulnerability that has an invalid Severity: %s", vulnerability.Severity)
|
||||
log.Warning(msg)
|
||||
return cerrors.NewBadRequestError(msg)
|
||||
}
|
||||
for i := 0; i < len(vulnerability.FixedIn); i++ {
|
||||
fifv := &vulnerability.FixedIn[i]
|
||||
|
||||
if fifv.Feature.Namespace.Name == "" {
|
||||
// As there is no Namespace on that FixedIn FeatureVersion, set it to the Vulnerability's
|
||||
// Namespace.
|
||||
fifv.Feature.Namespace.Name = vulnerability.Namespace.Name
|
||||
} else if fifv.Feature.Namespace.Name != vulnerability.Namespace.Name {
|
||||
msg := "could not insert an invalid vulnerability that contains FixedIn FeatureVersion that are not in the same namespace as the Vulnerability"
|
||||
log.Warning(msg)
|
||||
return cerrors.NewBadRequestError(msg)
|
||||
}
|
||||
}
|
||||
|
||||
// We do `defer observeQueryTime` here because we don't want to observe invalid vulnerabilities.
|
||||
defer observeQueryTime("insertVulnerability", "all", tf)
|
||||
|
||||
// Begin transaction.
|
||||
tx, err := pgSQL.Begin()
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return handleError("insertVulnerability.Begin()", err)
|
||||
}
|
||||
|
||||
// Find existing vulnerability and its Vulnerability_FixedIn_Features (for update).
|
||||
existingVulnerability, err := findVulnerability(tx, vulnerability.Namespace.Name, vulnerability.Name, true)
|
||||
if err != nil && err != cerrors.ErrNotFound {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
if onlyFixedIn {
|
||||
// Because this call tries to update FixedIn FeatureVersion, import all other data from the
|
||||
// existing one.
|
||||
if existingVulnerability.ID == 0 {
|
||||
return cerrors.ErrNotFound
|
||||
}
|
||||
|
||||
fixedIn := vulnerability.FixedIn
|
||||
vulnerability = existingVulnerability
|
||||
vulnerability.FixedIn = fixedIn
|
||||
}
|
||||
|
||||
if existingVulnerability.ID != 0 {
|
||||
updateMetadata := vulnerability.Description != existingVulnerability.Description ||
|
||||
vulnerability.Link != existingVulnerability.Link ||
|
||||
vulnerability.Severity != existingVulnerability.Severity ||
|
||||
!reflect.DeepEqual(castMetadata(vulnerability.Metadata), existingVulnerability.Metadata)
|
||||
|
||||
// Construct the entire list of FixedIn FeatureVersion, by using the
|
||||
// the FixedIn list of the old vulnerability.
|
||||
//
|
||||
// TODO(Quentin-M): We could use !updateFixedIn to just copy FixedIn/Affects rows from the
|
||||
// existing vulnerability in order to make metadata updates much faster.
|
||||
var updateFixedIn bool
|
||||
vulnerability.FixedIn, updateFixedIn = applyFixedInDiff(existingVulnerability.FixedIn, vulnerability.FixedIn)
|
||||
|
||||
if !updateMetadata && !updateFixedIn {
|
||||
tx.Commit()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mark the old vulnerability as non latest.
|
||||
_, err = tx.Exec(removeVulnerability, vulnerability.Namespace.Name, vulnerability.Name)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return handleError("removeVulnerability", err)
|
||||
}
|
||||
} else {
|
||||
// The vulnerability is new, we don't want to have any types.MinVersion as they are only used
|
||||
// for diffing existing vulnerabilities.
|
||||
var fixedIn []database.FeatureVersion
|
||||
for _, fv := range vulnerability.FixedIn {
|
||||
if fv.Version != types.MinVersion {
|
||||
fixedIn = append(fixedIn, fv)
|
||||
}
|
||||
}
|
||||
vulnerability.FixedIn = fixedIn
|
||||
}
|
||||
|
||||
// Find or insert Vulnerability's Namespace.
|
||||
namespaceID, err := pgSQL.insertNamespace(vulnerability.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Insert vulnerability.
|
||||
err = tx.QueryRow(
|
||||
insertVulnerability,
|
||||
namespaceID,
|
||||
vulnerability.Name,
|
||||
vulnerability.Description,
|
||||
vulnerability.Link,
|
||||
&vulnerability.Severity,
|
||||
&vulnerability.Metadata,
|
||||
).Scan(&vulnerability.ID)
|
||||
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return handleError("insertVulnerability", err)
|
||||
}
|
||||
|
||||
// Update Vulnerability_FixedIn_Feature and Vulnerability_Affects_FeatureVersion now.
|
||||
err = pgSQL.insertVulnerabilityFixedInFeatureVersions(tx, vulnerability.ID, vulnerability.FixedIn)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
// Create a notification.
|
||||
if generateNotification {
|
||||
err = createNotification(tx, existingVulnerability.ID, vulnerability.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Commit transaction.
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return handleError("insertVulnerability.Commit()", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// castMetadata marshals the given database.MetadataMap and unmarshals it again to make sure that
|
||||
// everything has the interface{} type.
|
||||
// It is required when comparing crafted MetadataMap against MetadataMap that we get from the
|
||||
// database.
|
||||
func castMetadata(m database.MetadataMap) database.MetadataMap {
|
||||
c := make(database.MetadataMap)
|
||||
j, _ := json.Marshal(m)
|
||||
json.Unmarshal(j, &c)
|
||||
return c
|
||||
}
|
||||
|
||||
// applyFixedInDiff applies a FeatureVersion diff on a FeatureVersion list and returns the result.
|
||||
func applyFixedInDiff(currentList, diff []database.FeatureVersion) ([]database.FeatureVersion, bool) {
|
||||
currentMap, currentNames := createFeatureVersionNameMap(currentList)
|
||||
diffMap, diffNames := createFeatureVersionNameMap(diff)
|
||||
|
||||
addedNames := utils.CompareStringLists(diffNames, currentNames)
|
||||
inBothNames := utils.CompareStringListsInBoth(diffNames, currentNames)
|
||||
|
||||
different := false
|
||||
|
||||
for _, name := range addedNames {
|
||||
if diffMap[name].Version == types.MinVersion {
|
||||
// MinVersion only makes sense when a Feature is already fixed in some version,
|
||||
// in which case we would be in the "inBothNames".
|
||||
continue
|
||||
}
|
||||
|
||||
currentMap[name] = diffMap[name]
|
||||
different = true
|
||||
}
|
||||
|
||||
for _, name := range inBothNames {
|
||||
fv := diffMap[name]
|
||||
|
||||
if fv.Version == types.MinVersion {
|
||||
// MinVersion means that the Feature doesn't affect the Vulnerability anymore.
|
||||
delete(currentMap, name)
|
||||
different = true
|
||||
} else if fv.Version != currentMap[name].Version {
|
||||
// The version got updated.
|
||||
currentMap[name] = diffMap[name]
|
||||
different = true
|
||||
}
|
||||
}
|
||||
|
||||
// Convert currentMap to a slice and return it.
|
||||
var newList []database.FeatureVersion
|
||||
for _, fv := range currentMap {
|
||||
newList = append(newList, fv)
|
||||
}
|
||||
|
||||
return newList, different
|
||||
}
|
||||
|
||||
func createFeatureVersionNameMap(features []database.FeatureVersion) (map[string]database.FeatureVersion, []string) {
|
||||
m := make(map[string]database.FeatureVersion, 0)
|
||||
s := make([]string, 0, len(features))
|
||||
|
||||
for i := 0; i < len(features); i++ {
|
||||
featureVersion := features[i]
|
||||
m[featureVersion.Feature.Name] = featureVersion
|
||||
s = append(s, featureVersion.Feature.Name)
|
||||
}
|
||||
|
||||
return m, s
|
||||
}
|
||||
|
||||
// insertVulnerabilityFixedInFeatureVersions populates Vulnerability_FixedIn_Feature for the given
|
||||
// vulnerability with the specified database.FeatureVersion list and uses
|
||||
// linkVulnerabilityToFeatureVersions to propagate the changes on Vulnerability_FixedIn_Feature to
|
||||
// Vulnerability_Affects_FeatureVersion.
|
||||
func (pgSQL *pgSQL) insertVulnerabilityFixedInFeatureVersions(tx *sql.Tx, vulnerabilityID int, fixedIn []database.FeatureVersion) error {
|
||||
defer observeQueryTime("insertVulnerabilityFixedInFeatureVersions", "all", time.Now())
|
||||
|
||||
// Insert or find the Features.
|
||||
// TODO(Quentin-M): Batch me.
|
||||
var err error
|
||||
var features []*database.Feature
|
||||
for i := 0; i < len(fixedIn); i++ {
|
||||
features = append(features, &fixedIn[i].Feature)
|
||||
}
|
||||
for _, feature := range features {
|
||||
if feature.ID == 0 {
|
||||
if feature.ID, err = pgSQL.insertFeature(*feature); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Lock Vulnerability_Affects_FeatureVersion exclusively.
|
||||
// We want to prevent InsertFeatureVersion to modify it.
|
||||
promConcurrentLockVAFV.Inc()
|
||||
defer promConcurrentLockVAFV.Dec()
|
||||
t := time.Now()
|
||||
_, err = tx.Exec(lockVulnerabilityAffects)
|
||||
observeQueryTime("insertVulnerability", "lock", t)
|
||||
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return handleError("insertVulnerability.lockVulnerabilityAffects", err)
|
||||
}
|
||||
|
||||
for _, fv := range fixedIn {
|
||||
var fixedInID int
|
||||
|
||||
// Insert Vulnerability_FixedIn_Feature.
|
||||
err = tx.QueryRow(
|
||||
insertVulnerabilityFixedInFeature,
|
||||
vulnerabilityID, fv.Feature.ID,
|
||||
&fv.Version,
|
||||
).Scan(&fixedInID)
|
||||
|
||||
if err != nil {
|
||||
return handleError("insertVulnerabilityFixedInFeature", err)
|
||||
}
|
||||
|
||||
// Insert Vulnerability_Affects_FeatureVersion.
|
||||
err = linkVulnerabilityToFeatureVersions(tx, fixedInID, vulnerabilityID, fv.Feature.ID, fv.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func linkVulnerabilityToFeatureVersions(tx *sql.Tx, fixedInID, vulnerabilityID, featureID int, fixedInVersion types.Version) error {
|
||||
// Find every FeatureVersions of the Feature that the vulnerability affects.
|
||||
// TODO(Quentin-M): LIMIT
|
||||
rows, err := tx.Query(searchFeatureVersionByFeature, featureID)
|
||||
if err != nil {
|
||||
return handleError("searchFeatureVersionByFeature", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var affecteds []database.FeatureVersion
|
||||
for rows.Next() {
|
||||
var affected database.FeatureVersion
|
||||
|
||||
err := rows.Scan(&affected.ID, &affected.Version)
|
||||
if err != nil {
|
||||
return handleError("searchFeatureVersionByFeature.Scan()", err)
|
||||
}
|
||||
|
||||
if affected.Version.Compare(fixedInVersion) < 0 {
|
||||
// The version of the FeatureVersion is lower than the fixed version of this vulnerability,
|
||||
// thus, this FeatureVersion is affected by it.
|
||||
affecteds = append(affecteds, affected)
|
||||
}
|
||||
}
|
||||
if err = rows.Err(); err != nil {
|
||||
return handleError("searchFeatureVersionByFeature.Rows()", err)
|
||||
}
|
||||
rows.Close()
|
||||
|
||||
// Insert into Vulnerability_Affects_FeatureVersion.
|
||||
for _, affected := range affecteds {
|
||||
// TODO(Quentin-M): Batch me.
|
||||
_, err := tx.Exec(insertVulnerabilityAffectsFeatureVersion, vulnerabilityID,
|
||||
affected.ID, fixedInID)
|
||||
if err != nil {
|
||||
return handleError("insertVulnerabilityAffectsFeatureVersion", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pgSQL *pgSQL) InsertVulnerabilityFixes(vulnerabilityNamespace, vulnerabilityName string, fixes []database.FeatureVersion) error {
|
||||
defer observeQueryTime("InsertVulnerabilityFixes", "all", time.Now())
|
||||
|
||||
v := database.Vulnerability{
|
||||
Name: vulnerabilityName,
|
||||
Namespace: database.Namespace{
|
||||
Name: vulnerabilityNamespace,
|
||||
},
|
||||
FixedIn: fixes,
|
||||
}
|
||||
|
||||
return pgSQL.insertVulnerability(v, true, true)
|
||||
}
|
||||
|
||||
func (pgSQL *pgSQL) DeleteVulnerabilityFix(vulnerabilityNamespace, vulnerabilityName, featureName string) error {
|
||||
defer observeQueryTime("DeleteVulnerabilityFix", "all", time.Now())
|
||||
|
||||
v := database.Vulnerability{
|
||||
Name: vulnerabilityName,
|
||||
Namespace: database.Namespace{
|
||||
Name: vulnerabilityNamespace,
|
||||
},
|
||||
FixedIn: []database.FeatureVersion{
|
||||
database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Name: featureName,
|
||||
Namespace: database.Namespace{
|
||||
Name: vulnerabilityNamespace,
|
||||
},
|
||||
},
|
||||
Version: types.MinVersion,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return pgSQL.insertVulnerability(v, true, true)
|
||||
}
|
||||
|
||||
func (pgSQL *pgSQL) DeleteVulnerability(namespaceName, name string) error {
|
||||
defer observeQueryTime("DeleteVulnerability", "all", time.Now())
|
||||
|
||||
// Begin transaction.
|
||||
tx, err := pgSQL.Begin()
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return handleError("DeleteVulnerability.Begin()", err)
|
||||
}
|
||||
|
||||
var vulnerabilityID int
|
||||
err = tx.QueryRow(removeVulnerability, namespaceName, name).Scan(&vulnerabilityID)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return handleError("removeVulnerability", err)
|
||||
}
|
||||
|
||||
// Create a notification.
|
||||
err = createNotification(tx, vulnerabilityID, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Commit transaction.
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return handleError("DeleteVulnerability.Commit()", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
276
database/pgsql/vulnerability_test.go
Normal file
276
database/pgsql/vulnerability_test.go
Normal file
@ -0,0 +1,276 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestFindVulnerability(t *testing.T) {
|
||||
datastore, err := OpenForTest("FindVulnerability", true)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer datastore.Close()
|
||||
|
||||
// Find a vulnerability that does not exist.
|
||||
_, err = datastore.FindVulnerability("", "")
|
||||
assert.Equal(t, cerrors.ErrNotFound, err)
|
||||
|
||||
// Find a normal vulnerability.
|
||||
v1 := database.Vulnerability{
|
||||
Name: "CVE-OPENSSL-1-DEB7",
|
||||
Description: "A vulnerability affecting OpenSSL < 2.0 on Debian 7.0",
|
||||
Link: "http://google.com/#q=CVE-OPENSSL-1-DEB7",
|
||||
Severity: types.High,
|
||||
Namespace: database.Namespace{Name: "debian:7"},
|
||||
FixedIn: []database.FeatureVersion{
|
||||
database.FeatureVersion{
|
||||
Feature: database.Feature{Name: "openssl"},
|
||||
Version: types.NewVersionUnsafe("2.0"),
|
||||
},
|
||||
database.FeatureVersion{
|
||||
Feature: database.Feature{Name: "libssl"},
|
||||
Version: types.NewVersionUnsafe("1.9-abc"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
v1f, err := datastore.FindVulnerability("debian:7", "CVE-OPENSSL-1-DEB7")
|
||||
if assert.Nil(t, err) {
|
||||
equalsVuln(t, &v1, &v1f)
|
||||
}
|
||||
|
||||
// Find a vulnerability that has no link, no severity and no FixedIn.
|
||||
v2 := database.Vulnerability{
|
||||
Name: "CVE-NOPE",
|
||||
Description: "A vulnerability affecting nothing",
|
||||
Namespace: database.Namespace{Name: "debian:7"},
|
||||
Severity: types.Unknown,
|
||||
}
|
||||
|
||||
v2f, err := datastore.FindVulnerability("debian:7", "CVE-NOPE")
|
||||
if assert.Nil(t, err) {
|
||||
equalsVuln(t, &v2, &v2f)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteVulnerability(t *testing.T) {
|
||||
datastore, err := OpenForTest("InsertVulnerability", true)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer datastore.Close()
|
||||
|
||||
// Delete non-existing Vulnerability.
|
||||
err = datastore.DeleteVulnerability("TestDeleteVulnerabilityNamespace1", "CVE-OPENSSL-1-DEB7")
|
||||
assert.Equal(t, cerrors.ErrNotFound, err)
|
||||
err = datastore.DeleteVulnerability("debian:7", "TestDeleteVulnerabilityVulnerability1")
|
||||
assert.Equal(t, cerrors.ErrNotFound, err)
|
||||
|
||||
// Delete Vulnerability.
|
||||
err = datastore.DeleteVulnerability("debian:7", "CVE-OPENSSL-1-DEB7")
|
||||
if assert.Nil(t, err) {
|
||||
_, err := datastore.FindVulnerability("debian:7", "CVE-OPENSSL-1-DEB7")
|
||||
assert.Equal(t, cerrors.ErrNotFound, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInsertVulnerability(t *testing.T) {
|
||||
datastore, err := OpenForTest("InsertVulnerability", false)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer datastore.Close()
|
||||
|
||||
// Create some data.
|
||||
n1 := database.Namespace{Name: "TestInsertVulnerabilityNamespace1"}
|
||||
n2 := database.Namespace{Name: "TestInsertVulnerabilityNamespace2"}
|
||||
|
||||
f1 := database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Name: "TestInsertVulnerabilityFeatureVersion1",
|
||||
Namespace: n1,
|
||||
},
|
||||
Version: types.NewVersionUnsafe("1.0"),
|
||||
}
|
||||
f2 := database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Name: "TestInsertVulnerabilityFeatureVersion1",
|
||||
Namespace: n2,
|
||||
},
|
||||
Version: types.NewVersionUnsafe("1.0"),
|
||||
}
|
||||
f3 := database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Name: "TestInsertVulnerabilityFeatureVersion2",
|
||||
},
|
||||
Version: types.MaxVersion,
|
||||
}
|
||||
f4 := database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Name: "TestInsertVulnerabilityFeatureVersion2",
|
||||
},
|
||||
Version: types.NewVersionUnsafe("1.4"),
|
||||
}
|
||||
f5 := database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Name: "TestInsertVulnerabilityFeatureVersion3",
|
||||
},
|
||||
Version: types.NewVersionUnsafe("1.5"),
|
||||
}
|
||||
f6 := database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Name: "TestInsertVulnerabilityFeatureVersion4",
|
||||
},
|
||||
Version: types.NewVersionUnsafe("0.1"),
|
||||
}
|
||||
f7 := database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Name: "TestInsertVulnerabilityFeatureVersion5",
|
||||
},
|
||||
Version: types.MaxVersion,
|
||||
}
|
||||
f8 := database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Name: "TestInsertVulnerabilityFeatureVersion5",
|
||||
},
|
||||
Version: types.MinVersion,
|
||||
}
|
||||
|
||||
// Insert invalid vulnerabilities.
|
||||
for _, vulnerability := range []database.Vulnerability{
|
||||
database.Vulnerability{
|
||||
Name: "",
|
||||
Namespace: n1,
|
||||
FixedIn: []database.FeatureVersion{f1},
|
||||
Severity: types.Unknown,
|
||||
},
|
||||
database.Vulnerability{
|
||||
Name: "TestInsertVulnerability0",
|
||||
Namespace: database.Namespace{},
|
||||
FixedIn: []database.FeatureVersion{f1},
|
||||
Severity: types.Unknown,
|
||||
},
|
||||
database.Vulnerability{
|
||||
Name: "TestInsertVulnerability0-",
|
||||
Namespace: database.Namespace{},
|
||||
FixedIn: []database.FeatureVersion{f1},
|
||||
},
|
||||
database.Vulnerability{
|
||||
Name: "TestInsertVulnerability0",
|
||||
Namespace: n1,
|
||||
FixedIn: []database.FeatureVersion{f1},
|
||||
Severity: types.Priority(""),
|
||||
},
|
||||
database.Vulnerability{
|
||||
Name: "TestInsertVulnerability0",
|
||||
Namespace: n1,
|
||||
FixedIn: []database.FeatureVersion{f2},
|
||||
Severity: types.Unknown,
|
||||
},
|
||||
} {
|
||||
err := datastore.InsertVulnerabilities([]database.Vulnerability{vulnerability}, true)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// Insert a simple vulnerability and find it.
|
||||
v1meta := make(map[string]interface{})
|
||||
v1meta["TestInsertVulnerabilityMetadata1"] = "TestInsertVulnerabilityMetadataValue1"
|
||||
v1meta["TestInsertVulnerabilityMetadata2"] = struct {
|
||||
Test string
|
||||
}{
|
||||
Test: "TestInsertVulnerabilityMetadataValue1",
|
||||
}
|
||||
|
||||
v1 := database.Vulnerability{
|
||||
Name: "TestInsertVulnerability1",
|
||||
Namespace: n1,
|
||||
FixedIn: []database.FeatureVersion{f1, f3, f6, f7},
|
||||
Severity: types.Low,
|
||||
Description: "TestInsertVulnerabilityDescription1",
|
||||
Link: "TestInsertVulnerabilityLink1",
|
||||
Metadata: v1meta,
|
||||
}
|
||||
err = datastore.InsertVulnerabilities([]database.Vulnerability{v1}, true)
|
||||
if assert.Nil(t, err) {
|
||||
v1f, err := datastore.FindVulnerability(n1.Name, v1.Name)
|
||||
if assert.Nil(t, err) {
|
||||
equalsVuln(t, &v1, &v1f)
|
||||
}
|
||||
}
|
||||
|
||||
// Update vulnerability.
|
||||
v1.Description = "TestInsertVulnerabilityLink2"
|
||||
v1.Link = "TestInsertVulnerabilityLink2"
|
||||
v1.Severity = types.High
|
||||
// Update f3 in f4, add fixed in f5, add fixed in f6 which already exists, removes fixed in f7 by
|
||||
// adding f8 which is f7 but with MinVersion.
|
||||
v1.FixedIn = []database.FeatureVersion{f4, f5, f6, f8}
|
||||
|
||||
err = datastore.InsertVulnerabilities([]database.Vulnerability{v1}, true)
|
||||
if assert.Nil(t, err) {
|
||||
v1f, err := datastore.FindVulnerability(n1.Name, v1.Name)
|
||||
if assert.Nil(t, err) {
|
||||
// We already had f1 before the update.
|
||||
// Add it to the struct for comparison.
|
||||
v1.FixedIn = append(v1.FixedIn, f1)
|
||||
|
||||
// Removes f8 from the struct for comparison as it was just here to cancel f7.
|
||||
for i := 0; i < len(v1.FixedIn); i++ {
|
||||
if v1.FixedIn[i].Feature.Name == f8.Feature.Name {
|
||||
v1.FixedIn = append(v1.FixedIn[:i], v1.FixedIn[i+1:]...)
|
||||
}
|
||||
}
|
||||
|
||||
equalsVuln(t, &v1, &v1f)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func equalsVuln(t *testing.T, expected, actual *database.Vulnerability) {
|
||||
assert.Equal(t, expected.Name, actual.Name)
|
||||
assert.Equal(t, expected.Namespace.Name, actual.Namespace.Name)
|
||||
assert.Equal(t, expected.Description, actual.Description)
|
||||
assert.Equal(t, expected.Link, actual.Link)
|
||||
assert.Equal(t, expected.Severity, actual.Severity)
|
||||
assert.True(t, reflect.DeepEqual(castMetadata(expected.Metadata), actual.Metadata), "Got metadata %#v, expected %#v", actual.Metadata, castMetadata(expected.Metadata))
|
||||
|
||||
if assert.Len(t, actual.FixedIn, len(expected.FixedIn)) {
|
||||
for _, actualFeatureVersion := range actual.FixedIn {
|
||||
found := false
|
||||
for _, expectedFeatureVersion := range expected.FixedIn {
|
||||
if expectedFeatureVersion.Feature.Name == actualFeatureVersion.Feature.Name {
|
||||
found = true
|
||||
|
||||
assert.Equal(t, expected.Namespace.Name, actualFeatureVersion.Feature.Namespace.Name)
|
||||
assert.Equal(t, expectedFeatureVersion.Version, actualFeatureVersion.Version)
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("unexpected package %s in %s", actualFeatureVersion.Feature.Name, expected.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,51 +0,0 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import cerrors "github.com/coreos/clair/utils/errors"
|
||||
|
||||
// FindAllLayersIntroducingVulnerability finds and returns the list of layers
|
||||
// that introduce the given vulnerability (by its ID), selecting the specified fields
|
||||
func FindAllLayersIntroducingVulnerability(vulnerabilityID string, selectedFields []string) ([]*Layer, error) {
|
||||
// Find vulnerability
|
||||
vulnerability, err := FindOneVulnerability(vulnerabilityID, []string{FieldVulnerabilityFixedIn})
|
||||
if err != nil {
|
||||
return []*Layer{}, err
|
||||
}
|
||||
if vulnerability == nil {
|
||||
return []*Layer{}, cerrors.ErrNotFound
|
||||
}
|
||||
|
||||
// Find FixedIn packages
|
||||
fixedInPackages, err := FindAllPackagesByNodes(vulnerability.FixedInNodes, []string{FieldPackagePreviousVersion})
|
||||
if err != nil {
|
||||
return []*Layer{}, err
|
||||
}
|
||||
|
||||
// Find all FixedIn packages's ancestors packages (which are therefore vulnerable to the vulnerability)
|
||||
var vulnerablePackagesNodes []string
|
||||
for _, pkg := range fixedInPackages {
|
||||
previousVersions, err := pkg.PreviousVersions([]string{})
|
||||
if err != nil {
|
||||
return []*Layer{}, err
|
||||
}
|
||||
for _, version := range previousVersions {
|
||||
vulnerablePackagesNodes = append(vulnerablePackagesNodes, version.Node)
|
||||
}
|
||||
}
|
||||
|
||||
// Return all the layers that add these packages
|
||||
return FindAllLayersByAddedPackageNodes(vulnerablePackagesNodes, selectedFields)
|
||||
}
|
@ -1,377 +0,0 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"github.com/coreos/clair/utils"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
"github.com/google/cayley"
|
||||
"github.com/google/cayley/graph"
|
||||
"github.com/google/cayley/graph/path"
|
||||
)
|
||||
|
||||
const (
|
||||
FieldVulnerabilityID = "id"
|
||||
FieldVulnerabilityLink = "link"
|
||||
FieldVulnerabilityPriority = "priority"
|
||||
FieldVulnerabilityDescription = "description"
|
||||
FieldVulnerabilityFixedIn = "fixedIn"
|
||||
// FieldVulnerabilityCausedByPackage only makes sense with FindAllVulnerabilitiesByFixedIn.
|
||||
FieldVulnerabilityCausedByPackage = "causedByPackage"
|
||||
|
||||
// This field is not selectable and is for internal use only.
|
||||
fieldVulnerabilityIsValue = "vulnerability"
|
||||
)
|
||||
|
||||
var FieldVulnerabilityAll = []string{FieldVulnerabilityID, FieldVulnerabilityLink, FieldVulnerabilityPriority, FieldVulnerabilityDescription, FieldVulnerabilityFixedIn}
|
||||
|
||||
// Vulnerability represents a vulnerability that is fixed in some Packages
|
||||
type Vulnerability struct {
|
||||
Node string `json:"-"`
|
||||
ID string
|
||||
Link string
|
||||
Priority types.Priority
|
||||
Description string `json:",omitempty"`
|
||||
FixedInNodes []string `json:"-"`
|
||||
|
||||
CausedByPackage string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// GetNode returns an unique identifier for the graph node
|
||||
// Requires the key field: ID
|
||||
func (v *Vulnerability) GetNode() string {
|
||||
return fieldVulnerabilityIsValue + ":" + utils.Hash(v.ID)
|
||||
}
|
||||
|
||||
// ToAbstractVulnerability converts a Vulnerability into an
|
||||
// AbstractVulnerability.
|
||||
func (v *Vulnerability) ToAbstractVulnerability() (*AbstractVulnerability, error) {
|
||||
// Find FixedIn packages.
|
||||
fixedInPackages, err := FindAllPackagesByNodes(v.FixedInNodes, []string{FieldPackageOS, FieldPackageName, FieldPackageVersion})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &AbstractVulnerability{
|
||||
ID: v.ID,
|
||||
Link: v.Link,
|
||||
Priority: v.Priority,
|
||||
Description: v.Description,
|
||||
AffectedPackages: PackagesToAbstractPackages(fixedInPackages),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// AbstractVulnerability represents a Vulnerability as it is defined in the database
|
||||
// package but exposes directly a list of AbstractPackage instead of
|
||||
// nodes to packages.
|
||||
type AbstractVulnerability struct {
|
||||
ID string
|
||||
Link string
|
||||
Priority types.Priority
|
||||
Description string
|
||||
AffectedPackages []*AbstractPackage
|
||||
}
|
||||
|
||||
// ToVulnerability converts an abstractVulnerability into
|
||||
// a Vulnerability
|
||||
func (av *AbstractVulnerability) ToVulnerability(fixedInNodes []string) *Vulnerability {
|
||||
return &Vulnerability{
|
||||
ID: av.ID,
|
||||
Link: av.Link,
|
||||
Priority: av.Priority,
|
||||
Description: av.Description,
|
||||
FixedInNodes: fixedInNodes,
|
||||
}
|
||||
}
|
||||
|
||||
// InsertVulnerabilities inserts or updates several vulnerabilities in the database in one transaction
|
||||
// During an update, if the vulnerability was previously fixed by a version in a branch and a new package of that branch is specified, the previous one is deleted
|
||||
// Otherwise, it simply adds the defined packages, there is currently no way to delete affected packages.
|
||||
//
|
||||
// ID, Link, Priority and FixedInNodes fields have to be specified. Description is optionnal.
|
||||
func InsertVulnerabilities(vulnerabilities []*Vulnerability) ([]Notification, error) {
|
||||
if len(vulnerabilities) == 0 {
|
||||
return []Notification{}, nil
|
||||
}
|
||||
|
||||
// Create required data structure
|
||||
var err error
|
||||
t := cayley.NewTransaction()
|
||||
cachedVulnerabilities := make(map[string]*Vulnerability)
|
||||
|
||||
var notifications []Notification
|
||||
newVulnerabilityNotifications := make(map[string]*NewVulnerabilityNotification)
|
||||
vulnerabilityPriorityIncreasedNotifications := make(map[string]*VulnerabilityPriorityIncreasedNotification)
|
||||
vulnerabilityPackageChangedNotifications := make(map[string]*VulnerabilityPackageChangedNotification)
|
||||
|
||||
// Iterate over all the vulnerabilities we need to insert/update
|
||||
for _, vulnerability := range vulnerabilities {
|
||||
// Check if the vulnerability already exists
|
||||
existingVulnerability, _ := cachedVulnerabilities[vulnerability.ID]
|
||||
if existingVulnerability == nil {
|
||||
existingVulnerability, err = FindOneVulnerability(vulnerability.ID, FieldVulnerabilityAll)
|
||||
if err != nil && err != cerrors.ErrNotFound {
|
||||
return []Notification{}, err
|
||||
}
|
||||
if existingVulnerability != nil {
|
||||
cachedVulnerabilities[vulnerability.ID] = existingVulnerability
|
||||
}
|
||||
}
|
||||
|
||||
// Insert/Update vulnerability
|
||||
if existingVulnerability == nil {
|
||||
// The vulnerability does not exist, create it
|
||||
|
||||
// Verify parameters
|
||||
if vulnerability.ID == "" || vulnerability.Link == "" || vulnerability.Priority == "" {
|
||||
log.Warningf("could not insert an incomplete vulnerability [ID: %s, Link: %s, Priority: %s]", vulnerability.ID, vulnerability.Link, vulnerability.Priority)
|
||||
return []Notification{}, cerrors.NewBadRequestError("Could not insert an incomplete vulnerability")
|
||||
}
|
||||
if !vulnerability.Priority.IsValid() {
|
||||
log.Warningf("could not insert a vulnerability which has an invalid priority [ID: %s, Link: %s, Priority: %s]. Valid priorities are: %v.", vulnerability.ID, vulnerability.Link, vulnerability.Priority, types.Priorities)
|
||||
return []Notification{}, cerrors.NewBadRequestError("Could not insert a vulnerability which has an invalid priority")
|
||||
}
|
||||
if len(vulnerability.FixedInNodes) == 0 {
|
||||
log.Warningf("could not insert a vulnerability which doesn't affect any package [ID: %s].", vulnerability.ID)
|
||||
return []Notification{}, cerrors.NewBadRequestError("could not insert a vulnerability which doesn't affect any package")
|
||||
}
|
||||
|
||||
// Insert it
|
||||
vulnerability.Node = vulnerability.GetNode()
|
||||
|
||||
t.AddQuad(cayley.Triple(vulnerability.Node, fieldIs, fieldVulnerabilityIsValue))
|
||||
t.AddQuad(cayley.Triple(vulnerability.Node, FieldVulnerabilityID, vulnerability.ID))
|
||||
t.AddQuad(cayley.Triple(vulnerability.Node, FieldVulnerabilityLink, vulnerability.Link))
|
||||
t.AddQuad(cayley.Triple(vulnerability.Node, FieldVulnerabilityPriority, string(vulnerability.Priority)))
|
||||
t.AddQuad(cayley.Triple(vulnerability.Node, FieldVulnerabilityDescription, vulnerability.Description))
|
||||
for _, p := range vulnerability.FixedInNodes {
|
||||
t.AddQuad(cayley.Triple(vulnerability.Node, FieldVulnerabilityFixedIn, p))
|
||||
}
|
||||
|
||||
// Add a notification
|
||||
notification := &NewVulnerabilityNotification{VulnerabilityID: vulnerability.ID}
|
||||
notifications = append(notifications, notification)
|
||||
newVulnerabilityNotifications[vulnerability.ID] = notification
|
||||
|
||||
cachedVulnerabilities[vulnerability.ID] = vulnerability
|
||||
} else {
|
||||
// The vulnerability already exists, update it
|
||||
if vulnerability.Link != "" && existingVulnerability.Link != vulnerability.Link {
|
||||
t.RemoveQuad(cayley.Triple(existingVulnerability.Node, FieldVulnerabilityLink, existingVulnerability.Link))
|
||||
t.AddQuad(cayley.Triple(existingVulnerability.Node, FieldVulnerabilityLink, vulnerability.Link))
|
||||
existingVulnerability.Link = vulnerability.Link
|
||||
}
|
||||
|
||||
if vulnerability.Priority != "" && vulnerability.Priority != types.Unknown && existingVulnerability.Priority != vulnerability.Priority {
|
||||
if !vulnerability.Priority.IsValid() {
|
||||
log.Warningf("could not update a vulnerability which has an invalid priority [ID: %s, Link: %s, Priority: %s]. Valid priorities are: %v.", vulnerability.ID, vulnerability.Link, vulnerability.Priority, types.Priorities)
|
||||
return []Notification{}, cerrors.NewBadRequestError("Could not update a vulnerability which has an invalid priority")
|
||||
}
|
||||
|
||||
// Add a notification about the priority change if the new priority is higher and the vulnerability is not new
|
||||
if vulnerability.Priority.Compare(existingVulnerability.Priority) > 0 {
|
||||
if _, newVulnerabilityNotificationExists := newVulnerabilityNotifications[vulnerability.ID]; !newVulnerabilityNotificationExists {
|
||||
// Any priorityChangeNotification already ?
|
||||
if existingPriorityNotification, _ := vulnerabilityPriorityIncreasedNotifications[vulnerability.ID]; existingPriorityNotification != nil {
|
||||
// There is a priority change notification, replace it but keep the old priority field
|
||||
existingPriorityNotification.NewPriority = vulnerability.Priority
|
||||
} else {
|
||||
// No previous notification, just add a new one
|
||||
notification := &VulnerabilityPriorityIncreasedNotification{OldPriority: existingVulnerability.Priority, NewPriority: vulnerability.Priority, VulnerabilityID: existingVulnerability.ID}
|
||||
notifications = append(notifications, notification)
|
||||
vulnerabilityPriorityIncreasedNotifications[vulnerability.ID] = notification
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
t.RemoveQuad(cayley.Triple(existingVulnerability.Node, FieldVulnerabilityPriority, string(existingVulnerability.Priority)))
|
||||
t.AddQuad(cayley.Triple(existingVulnerability.Node, FieldVulnerabilityPriority, string(vulnerability.Priority)))
|
||||
existingVulnerability.Priority = vulnerability.Priority
|
||||
}
|
||||
|
||||
if vulnerability.Description != "" && existingVulnerability.Description != vulnerability.Description {
|
||||
t.RemoveQuad(cayley.Triple(existingVulnerability.Node, FieldVulnerabilityDescription, existingVulnerability.Description))
|
||||
t.AddQuad(cayley.Triple(existingVulnerability.Node, FieldVulnerabilityDescription, vulnerability.Description))
|
||||
existingVulnerability.Description = vulnerability.Description
|
||||
}
|
||||
|
||||
newFixedInNodes := utils.CompareStringLists(vulnerability.FixedInNodes, existingVulnerability.FixedInNodes)
|
||||
if len(newFixedInNodes) > 0 {
|
||||
var removedNodes []string
|
||||
var addedNodes []string
|
||||
|
||||
existingVulnerabilityFixedInPackages, err := FindAllPackagesByNodes(existingVulnerability.FixedInNodes, []string{FieldPackageOS, FieldPackageName, FieldPackageVersion})
|
||||
if err != nil {
|
||||
return []Notification{}, err
|
||||
}
|
||||
newFixedInPackages, err := FindAllPackagesByNodes(newFixedInNodes, []string{FieldPackageOS, FieldPackageName, FieldPackageVersion})
|
||||
if err != nil {
|
||||
return []Notification{}, err
|
||||
}
|
||||
|
||||
for _, p := range newFixedInPackages {
|
||||
for _, ep := range existingVulnerabilityFixedInPackages {
|
||||
if p.Branch() == ep.Branch() {
|
||||
// A link to this package branch already exist and is not the same version, we will delete it
|
||||
t.RemoveQuad(cayley.Triple(existingVulnerability.Node, FieldVulnerabilityFixedIn, ep.Node))
|
||||
|
||||
var index int
|
||||
for i, n := range existingVulnerability.FixedInNodes {
|
||||
if n == ep.Node {
|
||||
index = i
|
||||
break
|
||||
}
|
||||
}
|
||||
existingVulnerability.FixedInNodes = append(existingVulnerability.FixedInNodes[index:], existingVulnerability.FixedInNodes[index+1:]...)
|
||||
removedNodes = append(removedNodes, ep.Node)
|
||||
}
|
||||
}
|
||||
|
||||
t.AddQuad(cayley.Triple(existingVulnerability.Node, FieldVulnerabilityFixedIn, p.Node))
|
||||
existingVulnerability.FixedInNodes = append(existingVulnerability.FixedInNodes, p.Node)
|
||||
addedNodes = append(addedNodes, p.Node)
|
||||
}
|
||||
|
||||
// Add notification about the FixedIn modification if the vulnerability is not new
|
||||
if _, newVulnerabilityNotificationExists := newVulnerabilityNotifications[vulnerability.ID]; !newVulnerabilityNotificationExists {
|
||||
// Any VulnerabilityPackageChangedNotification already ?
|
||||
if existingPackageNotification, _ := vulnerabilityPackageChangedNotifications[vulnerability.ID]; existingPackageNotification != nil {
|
||||
// There is a priority change notification, add the packages modifications to it
|
||||
existingPackageNotification.AddedFixedInNodes = append(existingPackageNotification.AddedFixedInNodes, addedNodes...)
|
||||
existingPackageNotification.RemovedFixedInNodes = append(existingPackageNotification.RemovedFixedInNodes, removedNodes...)
|
||||
} else {
|
||||
// No previous notification, just add a new one
|
||||
notification := &VulnerabilityPackageChangedNotification{VulnerabilityID: vulnerability.ID, AddedFixedInNodes: addedNodes, RemovedFixedInNodes: removedNodes}
|
||||
notifications = append(notifications, notification)
|
||||
vulnerabilityPackageChangedNotifications[vulnerability.ID] = notification
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Apply transaction
|
||||
if err = store.ApplyTransaction(t); err != nil {
|
||||
log.Errorf("failed transaction (InsertVulnerabilities): %s", err)
|
||||
return []Notification{}, ErrTransaction
|
||||
}
|
||||
|
||||
return notifications, nil
|
||||
}
|
||||
|
||||
// DeleteVulnerability deletes the vulnerability having the given ID
|
||||
func DeleteVulnerability(id string) error {
|
||||
vulnerability, err := FindOneVulnerability(id, FieldVulnerabilityAll)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t := cayley.NewTransaction()
|
||||
t.RemoveQuad(cayley.Triple(vulnerability.Node, FieldVulnerabilityID, vulnerability.ID))
|
||||
t.RemoveQuad(cayley.Triple(vulnerability.Node, FieldVulnerabilityLink, vulnerability.Link))
|
||||
t.RemoveQuad(cayley.Triple(vulnerability.Node, FieldVulnerabilityPriority, string(vulnerability.Priority)))
|
||||
t.RemoveQuad(cayley.Triple(vulnerability.Node, FieldVulnerabilityDescription, vulnerability.Description))
|
||||
for _, p := range vulnerability.FixedInNodes {
|
||||
t.RemoveQuad(cayley.Triple(vulnerability.Node, FieldVulnerabilityFixedIn, p))
|
||||
}
|
||||
|
||||
if err := store.ApplyTransaction(t); err != nil {
|
||||
log.Errorf("failed transaction (DeleteVulnerability): %s", err)
|
||||
return ErrTransaction
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FindOneVulnerability finds and returns a single vulnerability having the given ID selecting the specified fields
|
||||
func FindOneVulnerability(id string, selectedFields []string) (*Vulnerability, error) {
|
||||
t := &Vulnerability{ID: id}
|
||||
v, err := toVulnerabilities(cayley.StartPath(store, t.GetNode()).Has(fieldIs, fieldVulnerabilityIsValue), selectedFields)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(v) == 1 {
|
||||
return v[0], nil
|
||||
}
|
||||
if len(v) > 1 {
|
||||
log.Errorf("found multiple vulnerabilities with identical ID [ID: %s]", id)
|
||||
return nil, ErrInconsistent
|
||||
}
|
||||
return nil, cerrors.ErrNotFound
|
||||
}
|
||||
|
||||
// FindAllVulnerabilitiesByFixedIn finds and returns all vulnerabilities that are fixed in the given packages (speficied by their nodes), selecting the specified fields
|
||||
func FindAllVulnerabilitiesByFixedIn(nodes []string, selectedFields []string) ([]*Vulnerability, error) {
|
||||
if len(nodes) == 0 {
|
||||
log.Warning("Could not FindAllVulnerabilitiesByFixedIn with an empty nodes array.")
|
||||
return []*Vulnerability{}, nil
|
||||
}
|
||||
|
||||
// Construct path, potentially saving FieldVulnerabilityCausedByPackage
|
||||
path := cayley.StartPath(store, nodes...)
|
||||
if utils.Contains(FieldVulnerabilityCausedByPackage, selectedFields) {
|
||||
path = path.Save(FieldPackageName, FieldVulnerabilityCausedByPackage)
|
||||
}
|
||||
path = path.In(FieldVulnerabilityFixedIn)
|
||||
|
||||
return toVulnerabilities(path, selectedFields)
|
||||
}
|
||||
|
||||
// toVulnerabilities converts a path leading to one or multiple vulnerabilities to Vulnerability structs, selecting the specified fields
|
||||
func toVulnerabilities(path *path.Path, selectedFields []string) ([]*Vulnerability, error) {
|
||||
var vulnerabilities []*Vulnerability
|
||||
|
||||
saveFields(path, selectedFields, []string{FieldVulnerabilityFixedIn, FieldVulnerabilityCausedByPackage})
|
||||
it, _ := path.BuildIterator().Optimize()
|
||||
defer it.Close()
|
||||
for cayley.RawNext(it) {
|
||||
tags := make(map[string]graph.Value)
|
||||
it.TagResults(tags)
|
||||
|
||||
vulnerability := Vulnerability{Node: store.NameOf(it.Result())}
|
||||
for _, selectedField := range selectedFields {
|
||||
switch selectedField {
|
||||
case FieldVulnerabilityID:
|
||||
vulnerability.ID = store.NameOf(tags[FieldVulnerabilityID])
|
||||
case FieldVulnerabilityLink:
|
||||
vulnerability.Link = store.NameOf(tags[FieldVulnerabilityLink])
|
||||
case FieldVulnerabilityPriority:
|
||||
vulnerability.Priority = types.Priority(store.NameOf(tags[FieldVulnerabilityPriority]))
|
||||
case FieldVulnerabilityDescription:
|
||||
vulnerability.Description = store.NameOf(tags[FieldVulnerabilityDescription])
|
||||
case FieldVulnerabilityFixedIn:
|
||||
var err error
|
||||
vulnerability.FixedInNodes, err = toValues(cayley.StartPath(store, vulnerability.Node).Out(FieldVulnerabilityFixedIn))
|
||||
if err != nil {
|
||||
log.Errorf("could not get fixedIn on vulnerability %s: %s.", vulnerability.Node, err.Error())
|
||||
return []*Vulnerability{}, err
|
||||
}
|
||||
case FieldVulnerabilityCausedByPackage:
|
||||
vulnerability.CausedByPackage = store.NameOf(tags[FieldVulnerabilityCausedByPackage])
|
||||
default:
|
||||
panic("unknown selectedField")
|
||||
}
|
||||
}
|
||||
vulnerabilities = append(vulnerabilities, &vulnerability)
|
||||
}
|
||||
if it.Err() != nil {
|
||||
log.Errorf("failed query in toVulnerabilities: %s", it.Err())
|
||||
return []*Vulnerability{}, ErrBackendException
|
||||
}
|
||||
|
||||
return vulnerabilities, nil
|
||||
}
|
@ -1,238 +0,0 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/clair/config"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestVulnerability(t *testing.T) {
|
||||
Open(&config.DatabaseConfig{Type: "memstore"})
|
||||
defer Close()
|
||||
|
||||
// Insert invalid vulnerabilities
|
||||
for _, vulnerability := range []Vulnerability{
|
||||
Vulnerability{ID: "", Link: "link1", Priority: types.Medium, FixedInNodes: []string{"pkg1"}},
|
||||
Vulnerability{ID: "test1", Link: "", Priority: types.Medium, FixedInNodes: []string{"pkg1"}},
|
||||
Vulnerability{ID: "test1", Link: "link1", Priority: "InvalidPriority", FixedInNodes: []string{"pkg1"}},
|
||||
Vulnerability{ID: "test1", Link: "link1", Priority: types.Medium, FixedInNodes: []string{}},
|
||||
} {
|
||||
_, err := InsertVulnerabilities([]*Vulnerability{&vulnerability})
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// Some data
|
||||
vuln1 := &Vulnerability{ID: "test1", Link: "link1", Priority: types.Medium, Description: "testDescription1", FixedInNodes: []string{"pkg1"}}
|
||||
vuln2 := &Vulnerability{ID: "test2", Link: "link2", Priority: types.High, Description: "testDescription2", FixedInNodes: []string{"pkg1", "pkg2"}}
|
||||
vuln3 := &Vulnerability{ID: "test3", Link: "link3", Priority: types.High, FixedInNodes: []string{"pkg3"}} // Empty description
|
||||
|
||||
// Insert some vulnerabilities
|
||||
_, err := InsertVulnerabilities([]*Vulnerability{vuln1, vuln2, vuln3})
|
||||
if assert.Nil(t, err) {
|
||||
// Find one of the vulnerabilities we just inserted and verify its content
|
||||
v1, err := FindOneVulnerability(vuln1.ID, FieldVulnerabilityAll)
|
||||
if assert.Nil(t, err) && assert.NotNil(t, v1) {
|
||||
assert.Equal(t, vuln1.ID, v1.ID)
|
||||
assert.Equal(t, vuln1.Link, v1.Link)
|
||||
assert.Equal(t, vuln1.Priority, v1.Priority)
|
||||
assert.Equal(t, vuln1.Description, v1.Description)
|
||||
if assert.Len(t, v1.FixedInNodes, 1) {
|
||||
assert.Equal(t, vuln1.FixedInNodes[0], v1.FixedInNodes[0])
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that vulnerabilities with empty descriptions work as well
|
||||
v3, err := FindOneVulnerability(vuln3.ID, FieldVulnerabilityAll)
|
||||
if assert.Nil(t, err) && assert.NotNil(t, v3) {
|
||||
assert.Equal(t, vuln3.Description, v3.Description)
|
||||
}
|
||||
|
||||
// Find vulnerabilities by fixed packages
|
||||
vulnsFixedInPkg1AndPkg3, err := FindAllVulnerabilitiesByFixedIn([]string{"pkg2", "pkg3"}, FieldVulnerabilityAll)
|
||||
assert.Nil(t, err)
|
||||
assert.Len(t, vulnsFixedInPkg1AndPkg3, 2)
|
||||
|
||||
// Delete vulnerability
|
||||
if assert.Nil(t, DeleteVulnerability(vuln1.ID)) {
|
||||
v1, err := FindOneVulnerability(vuln1.ID, FieldVulnerabilityAll)
|
||||
assert.Equal(t, cerrors.ErrNotFound, err)
|
||||
assert.Nil(t, v1)
|
||||
}
|
||||
}
|
||||
|
||||
// Update a vulnerability and verify its new content
|
||||
pkg1 := &Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("1.0")}
|
||||
InsertPackages([]*Package{pkg1})
|
||||
vuln5 := &Vulnerability{ID: "test5", Link: "link5", Priority: types.Medium, Description: "testDescription5", FixedInNodes: []string{pkg1.Node}}
|
||||
|
||||
_, err = InsertVulnerabilities([]*Vulnerability{vuln5})
|
||||
if assert.Nil(t, err) {
|
||||
// Partial updates
|
||||
// # Just a field update
|
||||
vuln5b := &Vulnerability{ID: "test5", Priority: types.High}
|
||||
_, err := InsertVulnerabilities([]*Vulnerability{vuln5b})
|
||||
if assert.Nil(t, err) {
|
||||
v5b, err := FindOneVulnerability(vuln5b.ID, FieldVulnerabilityAll)
|
||||
if assert.Nil(t, err) && assert.NotNil(t, v5b) {
|
||||
assert.Equal(t, vuln5b.ID, v5b.ID)
|
||||
assert.Equal(t, vuln5b.Priority, v5b.Priority)
|
||||
|
||||
if assert.Len(t, v5b.FixedInNodes, 1) {
|
||||
assert.Contains(t, v5b.FixedInNodes, pkg1.Node)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// # Just a field update, twice in the same transaction
|
||||
vuln5b1 := &Vulnerability{ID: "test5", Link: "http://foo.bar"}
|
||||
vuln5b2 := &Vulnerability{ID: "test5", Link: "http://bar.foo"}
|
||||
_, err = InsertVulnerabilities([]*Vulnerability{vuln5b1, vuln5b2})
|
||||
if assert.Nil(t, err) {
|
||||
v5b2, err := FindOneVulnerability(vuln5b2.ID, FieldVulnerabilityAll)
|
||||
if assert.Nil(t, err) && assert.NotNil(t, v5b2) {
|
||||
assert.Equal(t, vuln5b2.Link, v5b2.Link)
|
||||
}
|
||||
}
|
||||
|
||||
// # All fields except fixedIn update
|
||||
vuln5c := &Vulnerability{ID: "test5", Link: "link5c", Priority: types.Critical, Description: "testDescription5c"}
|
||||
_, err = InsertVulnerabilities([]*Vulnerability{vuln5c})
|
||||
if assert.Nil(t, err) {
|
||||
v5c, err := FindOneVulnerability(vuln5c.ID, FieldVulnerabilityAll)
|
||||
if assert.Nil(t, err) && assert.NotNil(t, v5c) {
|
||||
assert.Equal(t, vuln5c.ID, v5c.ID)
|
||||
assert.Equal(t, vuln5c.Link, v5c.Link)
|
||||
assert.Equal(t, vuln5c.Priority, v5c.Priority)
|
||||
assert.Equal(t, vuln5c.Description, v5c.Description)
|
||||
|
||||
if assert.Len(t, v5c.FixedInNodes, 1) {
|
||||
assert.Contains(t, v5c.FixedInNodes, pkg1.Node)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Complete update
|
||||
pkg2 := &Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("1.1")}
|
||||
pkg3 := &Package{OS: "testOS", Name: "testpkg2", Version: types.NewVersionUnsafe("1.0")}
|
||||
InsertPackages([]*Package{pkg2, pkg3})
|
||||
vuln5d := &Vulnerability{ID: "test5", Link: "link5d", Priority: types.Low, Description: "testDescription5d", FixedInNodes: []string{pkg2.Node, pkg3.Node}}
|
||||
|
||||
_, err = InsertVulnerabilities([]*Vulnerability{vuln5d})
|
||||
if assert.Nil(t, err) {
|
||||
v5d, err := FindOneVulnerability(vuln5d.ID, FieldVulnerabilityAll)
|
||||
if assert.Nil(t, err) && assert.NotNil(t, v5d) {
|
||||
assert.Equal(t, vuln5d.ID, v5d.ID)
|
||||
assert.Equal(t, vuln5d.Link, v5d.Link)
|
||||
assert.Equal(t, vuln5d.Priority, v5d.Priority)
|
||||
assert.Equal(t, vuln5d.Description, v5d.Description)
|
||||
|
||||
// Here, we ensure that a vulnerability can only be fixed by one package of a given branch at a given time
|
||||
// And that we can add new fixed packages as well
|
||||
if assert.Len(t, v5d.FixedInNodes, 2) {
|
||||
assert.NotContains(t, v5d.FixedInNodes, pkg1.Node)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create and update a vulnerability's packages (and from the same branch) in the same batch
|
||||
pkg1 = &Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("1.0")}
|
||||
pkg1b := &Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("1.1")}
|
||||
InsertPackages([]*Package{pkg1, pkg1b})
|
||||
|
||||
// # Two updates of the same vulnerability in the same batch with packages of the same branch
|
||||
pkg0 := &Package{OS: "testOS", Name: "testpkg0", Version: types.NewVersionUnsafe("1.0")}
|
||||
InsertPackages([]*Package{pkg0})
|
||||
_, err = InsertVulnerabilities([]*Vulnerability{&Vulnerability{ID: "test7", Link: "link7", Priority: types.Medium, Description: "testDescription7", FixedInNodes: []string{pkg0.Node}}})
|
||||
if assert.Nil(t, err) {
|
||||
vuln7b := &Vulnerability{ID: "test7", FixedInNodes: []string{pkg1.Node}}
|
||||
vuln7c := &Vulnerability{ID: "test7", FixedInNodes: []string{pkg1b.Node}}
|
||||
_, err = InsertVulnerabilities([]*Vulnerability{vuln7b, vuln7c})
|
||||
if assert.Nil(t, err) {
|
||||
v7, err := FindOneVulnerability("test7", FieldVulnerabilityAll)
|
||||
if assert.Nil(t, err) && assert.Len(t, v7.FixedInNodes, 2) {
|
||||
assert.Contains(t, v7.FixedInNodes, pkg0.Node)
|
||||
assert.NotContains(t, v7.FixedInNodes, pkg1.Node)
|
||||
assert.Contains(t, v7.FixedInNodes, pkg1b.Node)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInsertVulnerabilityNotifications(t *testing.T) {
|
||||
Open(&config.DatabaseConfig{Type: "memstore"})
|
||||
defer Close()
|
||||
|
||||
pkg1 := &Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("1.0")}
|
||||
pkg1b := &Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("1.2")}
|
||||
pkg2 := &Package{OS: "testOS", Name: "testpkg2", Version: types.NewVersionUnsafe("1.0")}
|
||||
InsertPackages([]*Package{pkg1, pkg1b, pkg2})
|
||||
|
||||
// NewVulnerabilityNotification
|
||||
vuln1 := &Vulnerability{ID: "test1", Link: "link1", Priority: types.Medium, Description: "testDescription1", FixedInNodes: []string{pkg1.Node}}
|
||||
vuln2 := &Vulnerability{ID: "test2", Link: "link2", Priority: types.High, Description: "testDescription2", FixedInNodes: []string{pkg1.Node, pkg2.Node}}
|
||||
vuln1b := &Vulnerability{ID: "test1", Priority: types.High, FixedInNodes: []string{"pkg3"}}
|
||||
notifications, err := InsertVulnerabilities([]*Vulnerability{vuln1, vuln2, vuln1b})
|
||||
if assert.Nil(t, err) {
|
||||
// We should only have two NewVulnerabilityNotification notifications: one for test1 and one for test2
|
||||
// We should not have a VulnerabilityPriorityIncreasedNotification or a VulnerabilityPackageChangedNotification
|
||||
// for test1 because it is in the same batch
|
||||
if assert.Len(t, notifications, 2) {
|
||||
for _, n := range notifications {
|
||||
_, ok := n.(*NewVulnerabilityNotification)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// VulnerabilityPriorityIncreasedNotification
|
||||
vuln1c := &Vulnerability{ID: "test1", Priority: types.Critical}
|
||||
notifications, err = InsertVulnerabilities([]*Vulnerability{vuln1c})
|
||||
if assert.Nil(t, err) {
|
||||
if assert.Len(t, notifications, 1) {
|
||||
if nn, ok := notifications[0].(*VulnerabilityPriorityIncreasedNotification); assert.True(t, ok) {
|
||||
assert.Equal(t, vuln1b.Priority, nn.OldPriority)
|
||||
assert.Equal(t, vuln1c.Priority, nn.NewPriority)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
notifications, err = InsertVulnerabilities([]*Vulnerability{&Vulnerability{ID: "test1", Priority: types.Low}})
|
||||
assert.Nil(t, err)
|
||||
assert.Len(t, notifications, 0)
|
||||
|
||||
// VulnerabilityPackageChangedNotification
|
||||
vuln1e := &Vulnerability{ID: "test1", FixedInNodes: []string{pkg1b.Node}}
|
||||
vuln1f := &Vulnerability{ID: "test1", FixedInNodes: []string{pkg2.Node}}
|
||||
notifications, err = InsertVulnerabilities([]*Vulnerability{vuln1e, vuln1f})
|
||||
if assert.Nil(t, err) {
|
||||
if assert.Len(t, notifications, 1) {
|
||||
if nn, ok := notifications[0].(*VulnerabilityPackageChangedNotification); assert.True(t, ok) {
|
||||
// Here, we say that pkg1b fixes the vulnerability, but as pkg1b is in
|
||||
// the same branch as pkg1, pkg1 should be removed and pkg1b added
|
||||
// We also add pkg2 as fixed
|
||||
assert.Contains(t, nn.AddedFixedInNodes, pkg1b.Node)
|
||||
assert.Contains(t, nn.RemovedFixedInNodes, pkg1.Node)
|
||||
|
||||
assert.Contains(t, nn.AddedFixedInNodes, pkg2.Node)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
801
docs/API.md
801
docs/API.md
@ -1,801 +0,0 @@
|
||||
# General
|
||||
|
||||
## Fetch API Version
|
||||
|
||||
It returns the versions of the API and the layer processing engine.
|
||||
|
||||
GET /v1/versions
|
||||
|
||||
* The versions are integers.
|
||||
* The API version number is raised each time there is an structural change.
|
||||
* The Engine version is increased when the a new layer analysis could find new
|
||||
relevant data.
|
||||
|
||||
### Example
|
||||
|
||||
```
|
||||
curl -s 127.0.0.1:6060/v1/versions | python -m json.tool
|
||||
```
|
||||
|
||||
### Response
|
||||
|
||||
```
|
||||
HTTP/1.1 200 OK
|
||||
{
|
||||
"APIVersion": "1",
|
||||
"EngineVersion": "1"
|
||||
}
|
||||
```
|
||||
|
||||
## Fetch Health status
|
||||
|
||||
GET /v1/health
|
||||
|
||||
Returns 200 if essential services are healthy (ie. database) and 503 otherwise.
|
||||
|
||||
This call is also available on the API port + 1, without any security, allowing
|
||||
external monitoring systems to easily access it.
|
||||
|
||||
### Example
|
||||
|
||||
```
|
||||
curl -s 127.0.0.1:6060/v1/health | python -m json.tool
|
||||
```
|
||||
|
||||
```
|
||||
curl -s 127.0.0.1:6061/ | python -m json.tool
|
||||
```
|
||||
|
||||
### Success Response
|
||||
|
||||
```
|
||||
HTTP/1.1 200 OK
|
||||
{
|
||||
"database":{
|
||||
"IsHealthy":true
|
||||
},
|
||||
"notifier":{
|
||||
"IsHealthy":true,
|
||||
"Details":{
|
||||
"QueueSize":0
|
||||
}
|
||||
},
|
||||
"updater":{
|
||||
"IsHealthy":true,
|
||||
"Details":{
|
||||
"HealthIdentifier":"cf65a8f6-425c-4a9c-87fe-f59ddf75fc87",
|
||||
"HealthLockOwner":"1e7fce65-ee67-4ca5-b2e9-61e9f5e0d3ed",
|
||||
"LatestSuccessfulUpdate":"2015-09-30T14:47:47Z",
|
||||
"ConsecutiveLocalFailures":0
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Error Response
|
||||
|
||||
```
|
||||
HTTP/1.1 503 Service unavailable
|
||||
{
|
||||
"database":{
|
||||
"IsHealthy":false
|
||||
},
|
||||
"notifier":{
|
||||
"IsHealthy":true,
|
||||
"Details":{
|
||||
"QueueSize":0
|
||||
}
|
||||
},
|
||||
"updater":{
|
||||
"IsHealthy":true,
|
||||
"Details":{
|
||||
"HealthIdentifier":"cf65a8f6-425c-4a9c-87fe-f59ddf75fc87",
|
||||
"HealthLockOwner":"1e7fce65-ee67-4ca5-b2e9-61e9f5e0d3ed",
|
||||
"LatestSuccessfulUpdate":"2015-09-30T14:47:47Z",
|
||||
"ConsecutiveLocalFailures":0
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
# Layers
|
||||
|
||||
## Insert a new Layer
|
||||
|
||||
It processes and inserts a new Layer in the database.
|
||||
|
||||
POST /v1/layers
|
||||
|
||||
### Parameters
|
||||
|
||||
|Name|Type|Description|
|
||||
|------|-----|-------------|
|
||||
|ID|String|Unique ID of the Layer|
|
||||
|Path|String|Absolute path or HTTP link pointing to the Layer's tar file|
|
||||
|ParentID|String|(Optional) Unique ID of the Layer's parent|
|
||||
|ImageFormat|String|Image format of the Layer ('Docker' or 'ACI')|
|
||||
|
||||
If the Layer has not parent, the ParentID field should be omitted or empty.
|
||||
|
||||
### Example
|
||||
|
||||
```
|
||||
curl -s -H "Content-Type: application/json" -X POST -d \
|
||||
'{
|
||||
"ID": "39bb80489af75406073b5364c9c326134015140e1f7976a370a8bd446889e6f8",
|
||||
"Path": "https://layers_storage/39bb80489af75406073b5364c9c326134015140e1f7976a370a8bd446889e6f8.tar",
|
||||
"ParentID": "df2a0347c9d081fa05ecb83669dcae5830c67b0676a6d6358218e55d8a45969c",
|
||||
"ImageFormat": "Docker"
|
||||
}' \
|
||||
127.0.0.1:6060/v1/layers
|
||||
```
|
||||
|
||||
### Success Response
|
||||
|
||||
If the layer has been successfully processed, the version of the engine which processed it is returned.
|
||||
|
||||
```
|
||||
HTTP/1.1 201 Created
|
||||
{
|
||||
"Version": "1"
|
||||
}
|
||||
```
|
||||
|
||||
### Error Response
|
||||
|
||||
```
|
||||
HTTP/1.1 400 Bad Request
|
||||
{
|
||||
"Message": "Layer 39bb80489af75406073b5364c9c326134015140e1f7976a370a8bd446889e6f8's parent (df2a0347c9d081fa05ecb83669dcae5830c67b0676a6d6358218e55d8a45969c) is unknown."
|
||||
}
|
||||
```
|
||||
|
||||
It could also return a `415 Unsupported Media Type` response with a `Message` if the request content is not valid JSON.
|
||||
|
||||
## Delete a Layer
|
||||
|
||||
It deletes a layer from the database and any child layers that are dependent on the specified layer.
|
||||
|
||||
DELETE /v1/layers/{ID}
|
||||
|
||||
### Parameters
|
||||
|
||||
|Name|Type|Description|
|
||||
|------|-----|-------------|
|
||||
|ID|String|Unique ID of the Layer|
|
||||
|
||||
### Example
|
||||
|
||||
```
|
||||
curl -s -X DELETE 127.0.0.1:6060/v1/layers/39bb80489af75406073b5364c9c326134015140e1f7976a370a8bd446889e6f8
|
||||
```
|
||||
|
||||
### Success Response
|
||||
|
||||
```
|
||||
HTTP/1.1 204 No Content
|
||||
```
|
||||
|
||||
### Error Response
|
||||
|
||||
```
|
||||
HTTP/1.1 404 Not Found
|
||||
{
|
||||
"Message": "the resource cannot be found"
|
||||
}
|
||||
```
|
||||
|
||||
//////////
|
||||
|
||||
## Get a Layer's operating system
|
||||
|
||||
It returns the operating system a given Layer.
|
||||
|
||||
GET /v1/layers/{ID}/os
|
||||
|
||||
### Parameters
|
||||
|
||||
|Name|Type|Description|
|
||||
|------|-----|-------------|
|
||||
|ID|String|Unique ID of the Layer|
|
||||
|
||||
### Example
|
||||
|
||||
curl -s 127.0.0.1:6060/v1/layers/39bb80489af75406073b5364c9c326134015140e1f7976a370a8bd446889e6f8/os | python -m json.tool
|
||||
|
||||
### Success Response
|
||||
|
||||
```
|
||||
HTTP/1.1 200 OK
|
||||
{
|
||||
"OS": "debian:8",
|
||||
}
|
||||
```
|
||||
|
||||
### Error Response
|
||||
```
|
||||
HTTP/1.1 404 Not Found
|
||||
{
|
||||
"Message": "the resource cannot be found"
|
||||
}
|
||||
```
|
||||
|
||||
## Get a Layer's parent
|
||||
|
||||
It returns the parent's ID of a given Layer.
|
||||
It returns an empty ID string when the layer has no parent.
|
||||
|
||||
GET /v1/layers/{ID}/parent
|
||||
|
||||
### Parameters
|
||||
|
||||
|Name|Type|Description|
|
||||
|------|-----|-------------|
|
||||
|ID|String|Unique ID of the Layer|
|
||||
|
||||
### Example
|
||||
|
||||
curl -s 127.0.0.1:6060/v1/layers/39bb80489af75406073b5364c9c326134015140e1f7976a370a8bd446889e6f8/parent | python -m json.tool
|
||||
|
||||
### Success Response
|
||||
|
||||
```
|
||||
HTTP/1.1 200 OK
|
||||
{
|
||||
"ID": "df2a0347c9d081fa05ecb83669dcae5830c67b0676a6d6358218e55d8a45969c",
|
||||
}
|
||||
```
|
||||
|
||||
### Error Response
|
||||
|
||||
```
|
||||
HTTP/1.1 404 Not Found
|
||||
{
|
||||
"Message": "the resource cannot be found"
|
||||
}
|
||||
```
|
||||
|
||||
## Get a Layer's package list
|
||||
|
||||
It returns the package list of a given Layer.
|
||||
|
||||
GET /v1/layers/{ID}/packages
|
||||
|
||||
### Parameters
|
||||
|
||||
|Name|Type|Description|
|
||||
|------|-----|-------------|
|
||||
|ID|String|Unique ID of the Layer|
|
||||
|
||||
### Example
|
||||
|
||||
curl -s 127.0.0.1:6060/v1/layers/39bb80489af75406073b5364c9c326134015140e1f7976a370a8bd446889e6f8/packages | python -m json.tool
|
||||
|
||||
### Success Response
|
||||
|
||||
```
|
||||
HTTP/1.1 200 OK
|
||||
{
|
||||
"Packages": [
|
||||
{
|
||||
"Name": "gcc-4.9",
|
||||
"OS": "debian:8",
|
||||
"Version": "4.9.2-10"
|
||||
},
|
||||
[...]
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Error Response
|
||||
```
|
||||
HTTP/1.1 404 Not Found
|
||||
{
|
||||
"Message": "the resource cannot be found"
|
||||
}
|
||||
```
|
||||
|
||||
## Get a Layer's package diff
|
||||
|
||||
It returns the lists of packages a given Layer installs and removes.
|
||||
|
||||
GET /v1/layers/{ID}/packages/diff
|
||||
|
||||
### Parameters
|
||||
|
||||
|Name|Type|Description|
|
||||
|------|-----|-------------|
|
||||
|ID|String|Unique ID of the Layer|
|
||||
|
||||
### Example
|
||||
|
||||
curl -s 127.0.0.1:6060/v1/layers/39bb80489af75406073b5364c9c326134015140e1f7976a370a8bd446889e6f8/packages/diff | python -m json.tool
|
||||
|
||||
### Success Response
|
||||
|
||||
```
|
||||
HTTP/1.1 200 OK
|
||||
{
|
||||
"InstalledPackages": [
|
||||
{
|
||||
"Name": "gcc-4.9",
|
||||
"OS": "debian:8",
|
||||
"Version": "4.9.2-10"
|
||||
},
|
||||
[...]
|
||||
],
|
||||
"RemovedPackages": null
|
||||
}
|
||||
```
|
||||
|
||||
### Error Response
|
||||
|
||||
```
|
||||
HTTP/1.1 404 Not Found
|
||||
{
|
||||
"Message": "the resource cannot be found"
|
||||
}
|
||||
```
|
||||
|
||||
## Get a Layer's vulnerabilities
|
||||
|
||||
It returns the lists of vulnerabilities which affect a given Layer.
|
||||
|
||||
GET /v1/layers/{ID}/vulnerabilities(?minimumPriority=Low)
|
||||
|
||||
### Parameters
|
||||
|
||||
|Name|Type|Description|
|
||||
|------|-----|-------------|
|
||||
|ID|String|Unique ID of the Layer|
|
||||
|minimumPriority|Priority|(Optional) The minimum priority of the returned vulnerabilities. Defaults to High|
|
||||
|
||||
### Example
|
||||
|
||||
curl -s "127.0.0.1:6060/v1/layers/39bb80489af75406073b5364c9c326134015140e1f7976a370a8bd446889e6f8/vulnerabilities?minimumPriority=Negligible" | python -m json.tool
|
||||
|
||||
### Success Response
|
||||
|
||||
```
|
||||
HTTP/1.1 200 OK
|
||||
{
|
||||
"Vulnerabilities": [
|
||||
{
|
||||
"ID": "CVE-2014-2583",
|
||||
"Link": "http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2014-2583",
|
||||
"Priority": "Low",
|
||||
"Description": "Multiple directory traversal vulnerabilities in pam_timestamp.c in the pam_timestamp module for Linux-PAM (aka pam) 1.1.8 allow local users to create aribitrary files or possibly bypass authentication via a .. (dot dot) in the (1) PAM_RUSER value to the get_ruser function or (2) PAM_TTY value to the check_tty funtion, which is used by the format_timestamp_name function.",
|
||||
"CausedByPackage": "pam"
|
||||
},
|
||||
[...]
|
||||
}
|
||||
```
|
||||
|
||||
### Error Response
|
||||
|
||||
```
|
||||
HTTP/1.1 404 Not Found
|
||||
{
|
||||
"Message": "the resource cannot be found"
|
||||
}
|
||||
```
|
||||
|
||||
## Get vulnerabilities that a layer introduces and removes
|
||||
|
||||
It returns the lists of vulnerabilities which are introduced and removed by the given Layer.
|
||||
|
||||
GET /v1/layers/{ID}/vulnerabilities/diff(?minimumPriority=Low)
|
||||
|
||||
### Parameters
|
||||
|
||||
|Name|Type|Description|
|
||||
|------|-----|-------------|
|
||||
|ID|String|Unique ID of the Layer|
|
||||
|minimumPriority|Priority|(Optional) The minimum priority of the returned vulnerabilities|
|
||||
|
||||
### Example
|
||||
|
||||
curl -s "127.0.0.1:6060/v1/layers/39bb80489af75406073b5364c9c326134015140e1f7976a370a8bd446889e6f8/vulnerabilities?minimumPriority=Negligible" | python -m json.tool
|
||||
|
||||
### Success Response
|
||||
|
||||
```
|
||||
HTTP/1.1 200 OK
|
||||
{
|
||||
"Adds": [
|
||||
{
|
||||
"ID": "CVE-2014-2583",
|
||||
"Link": "http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2014-2583",
|
||||
"Priority": "Low",
|
||||
"Description": "Multiple directory traversal vulnerabilities in pam_timestamp.c in the pam_timestamp module for Linux-PAM (aka pam) 1.1.8 allow local users to create aribitrary files or possibly bypass authentication via a .. (dot dot) in the (1) PAM_RUSER value to the get_ruser function or (2) PAM_TTY value to the check_tty funtion, which is used by the format_timestamp_name function.",
|
||||
"CausedByPackage": "pam"
|
||||
},
|
||||
[...]
|
||||
],
|
||||
"Removes": null
|
||||
}
|
||||
```
|
||||
|
||||
### Error Response
|
||||
|
||||
```
|
||||
HTTP/1.1 404 Not Found
|
||||
{
|
||||
"Message": "the resource cannot be found"
|
||||
}
|
||||
```
|
||||
|
||||
## Get a Layers' vulnerabilities (Batch)
|
||||
|
||||
It returns the lists of vulnerabilities which affect the given Layers.
|
||||
|
||||
POST /v1/batch/layers/vulnerabilities(?minimumPriority=Low)
|
||||
|
||||
Counterintuitively, this request is actually a POST to be able to pass a lot of parameters.
|
||||
|
||||
### Parameters
|
||||
|
||||
|Name|Type|Description|
|
||||
|------|-----|-------------|
|
||||
|LayersIDs|Array of strings|Unique IDs of Layers|
|
||||
|minimumPriority|Priority|(Optional) The minimum priority of the returned vulnerabilities. Defaults to High|
|
||||
|
||||
### Example
|
||||
|
||||
```
|
||||
curl -s -H "Content-Type: application/json" -X POST -d \
|
||||
'{
|
||||
"LayersIDs": [
|
||||
"a005304e4e74c1541988d3d1abb170e338c1d45daee7151f8e82f8460634d329",
|
||||
"f1b10cd842498c23d206ee0cbeaa9de8d2ae09ff3c7af2723a9e337a6965d639"
|
||||
]
|
||||
}' \
|
||||
127.0.0.1:6060/v1/batch/layers/vulnerabilities
|
||||
```
|
||||
|
||||
### Success Response
|
||||
|
||||
```
|
||||
HTTP/1.1 200 OK
|
||||
{
|
||||
"a005304e4e74c1541988d3d1abb170e338c1d45daee7151f8e82f8460634d329": {
|
||||
"Vulnerabilities": [
|
||||
{
|
||||
"ID": "CVE-2014-2583",
|
||||
"Link": "http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2014-2583",
|
||||
"Priority": "Low",
|
||||
"Description": "Multiple directory traversal vulnerabilities in pam_timestamp.c in the pam_timestamp module for Linux-PAM (aka pam) 1.1.8 allow local users to create aribitrary files or possibly bypass authentication via a .. (dot dot) in the (1) PAM_RUSER value to the get_ruser function or (2) PAM_TTY value to the check_tty funtion, which is used by the format_timestamp_name function.",
|
||||
"CausedByPackage": "pam"
|
||||
},
|
||||
[...]
|
||||
]
|
||||
},
|
||||
[...]
|
||||
}
|
||||
```
|
||||
|
||||
### Error Response
|
||||
|
||||
```
|
||||
HTTP/1.1 404 Not Found
|
||||
{
|
||||
"Message": "the resource cannot be found"
|
||||
}
|
||||
```
|
||||
|
||||
# Vulnerabilities
|
||||
|
||||
## Get a vulnerability's informations
|
||||
|
||||
It returns all known informations about a Vulnerability and its fixes.
|
||||
|
||||
GET /v1/vulnerabilities/{ID}
|
||||
|
||||
### Parameters
|
||||
|
||||
|Name|Type|Description|
|
||||
|------|-----|-------------|
|
||||
|ID|String|Unique ID of the Vulnerability|
|
||||
|
||||
### Example
|
||||
|
||||
curl -s 127.0.0.1:6060/v1/vulnerabilities/CVE-2015-0235 | python -m json.tool
|
||||
|
||||
### Success Response
|
||||
|
||||
```
|
||||
HTTP/1.1 200 OK
|
||||
{
|
||||
"ID": "CVE-2015-0235",
|
||||
"Link": "https://security-tracker.debian.org/tracker/CVE-2015-0235",
|
||||
"Priority": "High",
|
||||
"Description": "Heap-based buffer overflow in the __nss_hostname_digits_dots function in glibc 2.2, and other 2.x versions before 2.18, allows context-dependent attackers to execute arbitrary code via vectors related to the (1) gethostbyname or (2) gethostbyname2 function, aka \"GHOST.\"",
|
||||
"AffectedPackages": [
|
||||
{
|
||||
"Name": "eglibc",
|
||||
"OS": "debian:7",
|
||||
"AllVersions": false,
|
||||
"BeforeVersion": "2.13-38+deb7u7"
|
||||
},
|
||||
{
|
||||
"Name": "glibc",
|
||||
"OS": "debian:8",
|
||||
"AllVersions": false,
|
||||
"BeforeVersion": "2.18-1"
|
||||
},
|
||||
{
|
||||
"Name": "glibc",
|
||||
"OS": "debian:9",
|
||||
"AllVersions": false,
|
||||
"BeforeVersion": "2.18-1"
|
||||
},
|
||||
{
|
||||
"Name": "glibc",
|
||||
"OS": "debian:unstable",
|
||||
"AllVersions": false,
|
||||
"BeforeVersion": "2.18-1"
|
||||
},
|
||||
{
|
||||
"Name": "eglibc",
|
||||
"OS": "debian:6",
|
||||
"AllVersions": true,
|
||||
"BeforeVersion": "",
|
||||
}
|
||||
],
|
||||
}
|
||||
```
|
||||
|
||||
The `AffectedPackages` array represents the list of affected packages and provides the first known versions in which the Vulnerability has been fixed - each previous versions may be vulnerable. If `AllVersions` is equal to `true`, no fix exists, thus, all versions may be vulnerable.
|
||||
|
||||
### Error Response
|
||||
|
||||
```
|
||||
HTTP/1.1 404 Not Found
|
||||
{
|
||||
"Message":"the resource cannot be found"
|
||||
}
|
||||
```
|
||||
|
||||
## Insert a new Vulnerability
|
||||
|
||||
It manually inserts a new Vulnerability.
|
||||
|
||||
POST /v1/vulnerabilities
|
||||
|
||||
### Parameters
|
||||
|
||||
|Name|Type|Description|
|
||||
|------|-----|-------------|
|
||||
|ID|String|Unique ID of the Vulnerability|
|
||||
|Link|String|Link to the Vulnerability tracker|
|
||||
|Priority|Priority|Priority of the Vulnerability|
|
||||
|AffectedPackages|Array of Package|Affected packages (Name, OS) and fixed version (or all versions)|
|
||||
|
||||
If no fix exists for a package, `AllVersions` should be set to `true`.
|
||||
|
||||
Valid Priorities are based on [Ubuntu CVE Tracker/README](http://bazaar.launchpad.net/~ubuntu-security/ubuntu-cve-tracker/master/view/head:/README)
|
||||
|
||||
* **Unknown** is either a security problem that has not been ssigned to a priority yet or a priority that our system did not recognize
|
||||
* **Negligible** is technically a security problem, but is only theoretical in nature, requires a very special situation, has almost no install base, or does no real damage. These tend not to get backport from upstreams, and will likely not be included in security updates unless there is an easy fix and some other issue causes an update.
|
||||
* **Low** is a security problem, but is hard to exploit due to environment, requires a user-assisted attack, a small install base, or does very little damage. These tend to be included in security updates only when higher priority issues require an update, or if many low priority issues have built up.
|
||||
* **Medium** is a real security problem, and is exploitable for many people. Includes network daemon denial of service attacks, cross-site scripting, and gaining user privileges. Updates should be made soon for this priority of issue.
|
||||
* **High** is a real problem, exploitable for many people in a default installation. Includes serious remote denial of services, local root privilege escalations, or data loss.
|
||||
* **Critical** is a world-burning problem, exploitable for nearly all people in a default installation of Ubuntu. Includes remote root privilege escalations, or massive data loss.
|
||||
* **Defcon1** is a **Critical** problem which has been manually highlighted by the team. It requires an immediate attention.
|
||||
|
||||
### Example
|
||||
|
||||
```
|
||||
curl -s -H "Content-Type: application/json" -X POST -d \
|
||||
'{
|
||||
"ID": "CVE-2015-0235",
|
||||
"Link": "https:security-tracker.debian.org/tracker/CVE-2015-0235",
|
||||
"Priority": "High",
|
||||
"Description": "Heap-based buffer overflow in the __nss_hostname_digits_dots function in glibc 2.2, and other 2.x versions before 2.18, allows context-dependent attackers to execute arbitrary code via vectors related to the (1) gethostbyname or (2) gethostbyname2 function, aka \"GHOST.\"",
|
||||
"AffectedPackages": [
|
||||
{
|
||||
"Name": "eglibc",
|
||||
"OS": "debian:7",
|
||||
"BeforeVersion": "2.13-38+deb7u7"
|
||||
},
|
||||
{
|
||||
"Name": "glibc",
|
||||
"OS": "debian:8",
|
||||
"BeforeVersion": "2.18-1"
|
||||
},
|
||||
{
|
||||
"Name": "glibc",
|
||||
"OS": "debian:9",
|
||||
"BeforeVersion": "2.18-1"
|
||||
},
|
||||
{
|
||||
"Name": "glibc",
|
||||
"OS": "debian:unstable",
|
||||
"BeforeVersion": "2.18-1"
|
||||
},
|
||||
{
|
||||
"Name": "eglibc",
|
||||
"OS": "debian:6",
|
||||
"AllVersions": true,
|
||||
"BeforeVersion": ""
|
||||
}
|
||||
]
|
||||
}' \
|
||||
127.0.0.1:6060/v1/vulnerabilities
|
||||
```
|
||||
|
||||
### Success Response
|
||||
|
||||
HTTP/1.1 201 Created
|
||||
|
||||
### Error Response
|
||||
|
||||
```
|
||||
HTTP/1.1 400 Bad Request
|
||||
{
|
||||
"Message":"Could not insert a vulnerability which has an invalid priority"
|
||||
}
|
||||
```
|
||||
|
||||
It could also return a `415 Unsupported Media Type` response with a `Message` if the request content is not valid JSON.
|
||||
|
||||
## Update a Vulnerability
|
||||
|
||||
It updates an existing Vulnerability.
|
||||
|
||||
PUT /v1/vulnerabilities/{ID}
|
||||
|
||||
The Link, Priority and Description fields can be updated. FixedIn packages are added to the vulnerability. However, as a vulnerability can be fixed by only one package on a given branch (OS, Name): old FixedIn packages, which belong to the same branch as a new added one, will be removed.
|
||||
|
||||
### Parameters
|
||||
|
||||
|Name|Type|Description|
|
||||
|------|-----|-------------|
|
||||
|Link|String|Link to the Vulnerability tracker|
|
||||
|Priority|Priority|Priority of the Vulnerability|
|
||||
|FixedIn|Array of Package|Affected packages (Name, OS) and fixed version (or all versions)|
|
||||
|
||||
If no fix exists for a package, `AllVersions` should be set to `true`.
|
||||
|
||||
### Example
|
||||
|
||||
curl -s -H "Content-Type: application/json" -X PUT -d '{"Priority": "Critical" }' 127.0.0.1:6060/v1/vulnerabilities/CVE-2015-0235
|
||||
|
||||
### Success Response
|
||||
|
||||
```
|
||||
HTTP/1.1 204 No content
|
||||
```
|
||||
|
||||
### Error Response
|
||||
|
||||
```
|
||||
HTTP/1.1 404 Not Found
|
||||
{
|
||||
"Message":"the resource cannot be found"
|
||||
}
|
||||
```
|
||||
|
||||
It could also return a `415 Unsupported Media Type` response with a `Message` if the request content is not valid JSON.
|
||||
|
||||
## Delete a Vulnerability
|
||||
|
||||
It deletes an existing Vulnerability.
|
||||
|
||||
DEL /v1/vulnerabilities/{ID}
|
||||
|
||||
Be aware that it does not prevent fetcher's to re-create it. Therefore it is only useful to remove manually inserted vulnerabilities.
|
||||
|
||||
### Parameters
|
||||
|
||||
|Name|Type|Description|
|
||||
|------|-----|-------------|
|
||||
|ID|String|Unique ID of the Vulnerability|
|
||||
|
||||
### Example
|
||||
|
||||
curl -s -X DEL 127.0.0.1:6060/v1/vulnerabilities/CVE-2015-0235
|
||||
|
||||
### Success Response
|
||||
|
||||
```
|
||||
HTTP/1.1 204 No content
|
||||
```
|
||||
|
||||
### Error Response
|
||||
|
||||
```
|
||||
HTTP/1.1 404 Not Found
|
||||
{
|
||||
"Message":"the resource cannot be found"
|
||||
}
|
||||
```
|
||||
|
||||
## Get layers introducing a vulnerability
|
||||
|
||||
It gets all the layers (their IDs) that introduce the given vulnerability.
|
||||
|
||||
GET /v1/vulnerabilities/:id/introducing-layers
|
||||
|
||||
### Parameters
|
||||
|
||||
|Name|Type|Description|
|
||||
|------|-----|-------------|
|
||||
|ID|String|Unique ID of the Vulnerability|
|
||||
|
||||
### Example
|
||||
|
||||
curl -s -X GET 127.0.0.1:6060/v1/vulnerabilities/CVE-2015-0235/introducing-layers
|
||||
|
||||
### Success Response
|
||||
|
||||
```
|
||||
HTTP/1.1 200
|
||||
{
|
||||
"IntroducingLayers":[
|
||||
"fb9cc58bde0c0a8fe53e6fdd23898e45041783f2d7869d939d7364f5777fde6f"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Error Response
|
||||
|
||||
```
|
||||
HTTP/1.1 404 Not Found
|
||||
{
|
||||
"Message":"the resource cannot be found"
|
||||
}
|
||||
```
|
||||
|
||||
## Get layers affected by a vulnerability
|
||||
|
||||
It returns whether the specified Layers are vulnerable to the given Vulnerability or not.
|
||||
|
||||
POST /v1/vulnerabilities/{ID}/affected-layers
|
||||
|
||||
Counterintuitively, this request is actually a POST to be able to pass a lot of parameters.
|
||||
|
||||
### Parameters
|
||||
|
||||
|Name|Type|Description|
|
||||
|------|-----|-------------|
|
||||
|ID|String|Unique ID of the Vulnerability|
|
||||
|LayersIDs|Array of strings|Unique IDs of Layers|
|
||||
|
||||
### Example
|
||||
|
||||
```
|
||||
curl -s -H "Content-Type: application/json" -X POST -d \
|
||||
'{
|
||||
"LayersIDs": [
|
||||
"a005304e4e74c1541988d3d1abb170e338c1d45daee7151f8e82f8460634d329",
|
||||
"f1b10cd842498c23d206ee0cbeaa9de8d2ae09ff3c7af2723a9e337a6965d639"
|
||||
]
|
||||
}' \
|
||||
127.0.0.1:6060/v1/vulnerabilities/CVE-2015-0235/affected-layers
|
||||
```
|
||||
|
||||
### Success Response
|
||||
|
||||
```
|
||||
HTTP/1.1 200 OK
|
||||
{
|
||||
"f1b10cd842498c23d206ee0cbeaa9de8d2ae09ff3c7af2723a9e337a6965d639": {
|
||||
"Vulnerable": false
|
||||
},
|
||||
"fb9cc58bde0c0a8fe53e6fdd23898e45041783f2d7869d939d7364f5777fde6f": {
|
||||
"Vulnerable": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Error Response
|
||||
|
||||
Returned when the Layer or the Vulnerability does not exist.
|
||||
|
||||
```
|
||||
HTTP/1.1 404 Not Found
|
||||
{
|
||||
"Message": "the resource cannot be found"
|
||||
}
|
||||
```
|
Binary file not shown.
@ -1,70 +0,0 @@
|
||||
# Legend
|
||||
-> outbound edges
|
||||
<- inbound edges
|
||||
|
||||
# Layer
|
||||
|
||||
Key: "layer:" + Hash(id)
|
||||
|
||||
-> is = "layer"
|
||||
-> id
|
||||
-> parent (my ancestor is)
|
||||
|
||||
-> os
|
||||
-> adds*
|
||||
-> removes*
|
||||
-> engineVersion
|
||||
|
||||
<- parent* (is ancestor of)
|
||||
|
||||
# Package
|
||||
|
||||
Key: "package:" + Hash(os + ":" + name + ":" + version)
|
||||
|
||||
-> is = "package"
|
||||
-> os
|
||||
-> name
|
||||
-> version
|
||||
-> nextVersion
|
||||
|
||||
<- nextVersion
|
||||
<- adds*
|
||||
<- removes*
|
||||
<- fixed_in*
|
||||
|
||||
Packages are organized in linked lists : there is one linked list for one os/name couple. Each linked list has a tail and a head with special versions.
|
||||
|
||||
# Vulnerability
|
||||
|
||||
Key: "vulnerability:" + Hash(name)
|
||||
|
||||
-> is = "vulnerability"
|
||||
-> name
|
||||
-> priority
|
||||
-> link
|
||||
-> fixed_in*
|
||||
|
||||
# Notification
|
||||
|
||||
Key: "notification:" + random uuid
|
||||
|
||||
-> is = "notification"
|
||||
-> type
|
||||
-> data
|
||||
-> isSent
|
||||
|
||||
# Flag
|
||||
|
||||
Key: "flag:" + name
|
||||
|
||||
-> value
|
||||
|
||||
# Lock
|
||||
|
||||
Key: name
|
||||
|
||||
-> locked = "locked"
|
||||
-> locked_until (timestamp)
|
||||
-> locked_by
|
||||
|
||||
A lock can be used to lock a specific graph node by using the node Key as the lock name.
|
BIN
docs/Model.png
BIN
docs/Model.png
Binary file not shown.
Before Width: | Height: | Size: 88 KiB |
@ -1,129 +0,0 @@
|
||||
# Notifications
|
||||
|
||||
This tool can send notifications to external services when specific events happen, such as vulnerability updates.
|
||||
|
||||
For now, it only supports transmitting them to an webhook endpoint using HTTP POST requests, but it can be extended quite easily by registering a new Notifier kind.
|
||||
To enable the notification system, you simply have to specify the appropriate configuration. See the [example configuration](../config.example.yaml).
|
||||
|
||||
# Types of notifications
|
||||
|
||||
## A new vulnerability has been released
|
||||
|
||||
A notification of this kind is sent as soon as a new vulnerability is added in the system, via the updater or the API.
|
||||
|
||||
### Example
|
||||
|
||||
```
|
||||
{
|
||||
"Name":"CVE-2016-0001",
|
||||
"Type":"NewVulnerabilityNotification",
|
||||
"Content":{
|
||||
"Vulnerability":{
|
||||
"ID":"CVE-2016-0001",
|
||||
"Link":"https:security-tracker.debian.org/tracker/CVE-2016-0001",
|
||||
"Priority":"Medium",
|
||||
"Description":"A futurist vulnerability",
|
||||
"AffectedPackages":[
|
||||
{
|
||||
"OS":"centos:6",
|
||||
"Name":"bash",
|
||||
"AllVersions":true,
|
||||
"BeforeVersion":""
|
||||
}
|
||||
]
|
||||
},
|
||||
"IntroducingLayersIDs":[
|
||||
"fb9cc58bde0c0a8fe53e6fdd23898e45041783f2d7869d939d7364f5777fde6f"
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The `IntroducingLayersIDs` array contains every layers that install at least one affected package.
|
||||
|
||||
## A vulnerability's priority has increased
|
||||
|
||||
This notification is sent when a vulnerability's priority has increased.
|
||||
|
||||
### Example
|
||||
|
||||
```
|
||||
{
|
||||
"Name":"CVE-2016-0001",
|
||||
"Type":"VulnerabilityPriorityIncreasedNotification",
|
||||
"Content":{
|
||||
"Vulnerability":{
|
||||
"ID":"CVE-2016-0001",
|
||||
"Link":"https:security-tracker.debian.org/tracker/CVE-2016-0001",
|
||||
"Priority":"Critical",
|
||||
"Description":"A futurist vulnerability",
|
||||
"AffectedPackages":[
|
||||
{
|
||||
"OS":"centos:6",
|
||||
"Name":"bash",
|
||||
"AllVersions":true,
|
||||
"BeforeVersion":""
|
||||
}
|
||||
]
|
||||
},
|
||||
"OldPriority":"Medium",
|
||||
"NewPriority":"Critical",
|
||||
"IntroducingLayersIDs":[
|
||||
"fb9cc58bde0c0a8fe53e6fdd23898e45041783f2d7869d939d7364f5777fde6f"
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The `IntroducingLayersIDs` array contains every layers that install at least one affected package.
|
||||
|
||||
## A vulnerability's affected package list changed
|
||||
|
||||
This notification is sent when the affected packages of a vulnerability changes.
|
||||
|
||||
### Example
|
||||
|
||||
```
|
||||
{
|
||||
"Name":"CVE-2016-0001",
|
||||
"Type":"VulnerabilityPackageChangedNotification",
|
||||
"Content":{
|
||||
"Vulnerability":{
|
||||
"ID":"CVE-2016-0001",
|
||||
"Link":"https:security-tracker.debian.org/tracker/CVE-2016-0001",
|
||||
"Priority":"Critical",
|
||||
"Description":"A futurist vulnerability",
|
||||
"AffectedPackages":[
|
||||
{
|
||||
"OS":"centos:6",
|
||||
"Name":"bash",
|
||||
"AllVersions":false,
|
||||
"BeforeVersion":"4.0"
|
||||
}
|
||||
]
|
||||
},
|
||||
"AddedAffectedPackages":[
|
||||
{
|
||||
"OS":"centos:6",
|
||||
"Name":"bash",
|
||||
"AllVersions":false,
|
||||
"BeforeVersion":"4.0"
|
||||
}
|
||||
],
|
||||
"RemovedAffectedPackages":[
|
||||
{
|
||||
"OS":"centos:6",
|
||||
"Name":"bash",
|
||||
"AllVersions":true,
|
||||
"BeforeVersion":""
|
||||
}
|
||||
],
|
||||
"NewIntroducingLayersIDs": [],
|
||||
"FormerIntroducingLayerIDs":[
|
||||
"fb9cc58bde0c0a8fe53e6fdd23898e45041783f2d7869d939d7364f5777fde6f",
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The `NewIntroducingLayersIDs` array contains the layers that install at least one of the newly affected package, and thus which are now vulnerable because of this change. In the other hand, the `FormerIntroducingLayerIDs` array contains the layers that are not introducing the vulnerability anymore.
|
27
docs/Run.md
27
docs/Run.md
@ -1,27 +0,0 @@
|
||||
# Running Clair
|
||||
|
||||
## Configuration
|
||||
|
||||
Clair makes uses of a configuration file in YAML.
|
||||
|
||||
Copy [`config.example.yaml`](../config.example.yaml) to your choice of location, and update the values as required.
|
||||
The example configuration file is commented and explains every available key.
|
||||
|
||||
## Docker
|
||||
|
||||
The easiest way to run Clair is with Docker.
|
||||
|
||||
```
|
||||
$ docker pull quay.io/coreos/clair:latest
|
||||
$ docker run -p 6060:6060 -p 6061:6061 -v <DIR_WITH_CONFIG>:/config:ro quay.io/coreos/clair:latest --config=/config/<CONFIG_FILENAME>.yaml
|
||||
```
|
||||
|
||||
## Initial update & API
|
||||
|
||||
Right after Clair starts, it will update its vulnerability database.
|
||||
The initial update can take quite a long time depending on the database backend in use.
|
||||
Clair will announce the update completion.
|
||||
|
||||
As soon as Clair has started, you can start querying the API to interact with it.
|
||||
Read the [API Documentation](API.md) to learn more.
|
||||
The [`contrib`](../contrib) folder contains some tools that may help you to get started.
|
@ -1,55 +0,0 @@
|
||||
# Security
|
||||
|
||||
# Enabling HTTPS on the API
|
||||
|
||||
HTTPS provides clients the ability to verify the server identity and provide transport security.
|
||||
|
||||
For this you need your CA certificate (ca.crt) and signed key pair (server.crt, server.key) ready.
|
||||
To enable it, provide the signed key pair files in the configuration under `api/keyfile` and `api/certfile` keys.
|
||||
|
||||
To test it, you want to use curl like this:
|
||||
|
||||
curl --cacert ca.crt -L https://127.0.0.1:6060/v1/versions
|
||||
|
||||
You should be able to see the handshake succeed. Because we use self-signed certificates with our own certificate authorities you need to provide the CA to curl using the --cacert option. Another possibility would be to add your CA certificate to the trusted certificates on your system (usually in /etc/ssl/certs).
|
||||
|
||||
**OSX 10.9+ Users**: curl 7.30.0 on OSX 10.9+ doesn't understand certificates passed in on the command line. Instead you must import the dummy ca.crt directly into the keychain or add the -k flag to curl to ignore errors. If you want to test without the -k flag run open ca.crt and follow the prompts. Please remove this certificate after you are done testing!
|
||||
|
||||
# Enabling Client Certificate Auth on the API
|
||||
|
||||
We can also use client certificates to prevent unauthorized access to the API.
|
||||
|
||||
The clients will provide their certificates to the server and the server will check whether the cert is signed by the supplied CA and decide whether to serve the request.
|
||||
|
||||
You need the same files mentioned in the HTTPS section, as well as a key pair for the client (client.crt, client.key) signed by the same certificate authority. To enable it, use the same configuration as above for HTTPS and the additional `api/cafile` key parameter with the CA certificate path.
|
||||
|
||||
The test command from the HTTPS section should be rejected, instead we need to provide the client key pair:
|
||||
|
||||
curl --cacert ca.crt --cert client.crt --key client.key -L https://127.0.0.1:6060/v1/versions
|
||||
|
||||
**OSX 10.10+ Users**: A bundle in P12 (PKCS#12) format must be used. To convert your key pair, the following command should be used, in which the password is mandatory. Then, `--cert client.p12` along with `--password pass` replace `--cert client.crt --key client.key`. You may also import the P12 certificate into your Keychain and specify its name as it appears in the Keychain instead of the path to the file.
|
||||
|
||||
openssl pkcs12 -export -in client.crt -inkey client1.key -out certs/client.p12 -password pass:pass
|
||||
|
||||
# Generating self-signed certificates
|
||||
[etcd-ca](https://github.com/coreos/etcd-ca) is a great tool when it comes to easily generate certificates. Below is an example to generate a new CA, server and client key pairs, inspired by their example.
|
||||
|
||||
```
|
||||
git clone https://github.com/coreos/etcd-ca
|
||||
cd etcd-ca
|
||||
./build
|
||||
|
||||
# Create CA
|
||||
./bin/etcd-ca init
|
||||
./bin/etcd-ca export | tar xvf -
|
||||
|
||||
# Create certificate for server
|
||||
./bin/etcd-ca new-cert --passphrase $passphrase --ip $server1ip --domain $server1hostname server1
|
||||
./bin/etcd-ca sign --passphrase $passphrase server1
|
||||
./bin/etcd-ca export --insecure --passphrase $passphrase server1 | tar xvf -
|
||||
|
||||
# Create certificate for client
|
||||
./bin/etcd-ca new-cert --passphrase $passphrase client1
|
||||
./bin/etcd-ca sign --passphrase $passphrase client1
|
||||
./bin/etcd-ca export --insecure --passphrase $passphrase client1 | tar xvf -
|
||||
```
|
1098
grafana.json
Normal file
1098
grafana.json
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,80 +0,0 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package health defines a standard healthcheck response format and expose
|
||||
// a function that summarizes registered healthchecks.
|
||||
package health
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Status defines a way to know the health status of a service
|
||||
type Status struct {
|
||||
// IsEssential determines if the service is essential to the app, which can't
|
||||
// run in case of a failure
|
||||
IsEssential bool
|
||||
// IsHealthy defines whether the service is working or not
|
||||
IsHealthy bool
|
||||
// Details gives informations specific to the service
|
||||
Details interface{}
|
||||
}
|
||||
|
||||
// A Healthchecker function is a method returning the Status of the tested service
|
||||
type Healthchecker func() Status
|
||||
|
||||
var (
|
||||
healthcheckersLock sync.Mutex
|
||||
healthcheckers = make(map[string]Healthchecker)
|
||||
)
|
||||
|
||||
// RegisterHealthchecker registers a Healthchecker function which will be part of Healthchecks
|
||||
func RegisterHealthchecker(name string, f Healthchecker) {
|
||||
if name == "" {
|
||||
panic("Could not register a Healthchecker with an empty name")
|
||||
}
|
||||
if f == nil {
|
||||
panic("Could not register a nil Healthchecker")
|
||||
}
|
||||
|
||||
healthcheckersLock.Lock()
|
||||
defer healthcheckersLock.Unlock()
|
||||
|
||||
if _, alreadyExists := healthcheckers[name]; alreadyExists {
|
||||
panic(fmt.Sprintf("Healthchecker '%s' is already registered", name))
|
||||
}
|
||||
healthcheckers[name] = f
|
||||
}
|
||||
|
||||
// Healthcheck calls every registered Healthchecker and summarize their output
|
||||
func Healthcheck() (bool, map[string]interface{}) {
|
||||
globalHealth := true
|
||||
|
||||
statuses := make(map[string]interface{})
|
||||
for serviceName, serviceChecker := range healthcheckers {
|
||||
status := serviceChecker()
|
||||
|
||||
globalHealth = globalHealth && (!status.IsEssential || status.IsHealthy)
|
||||
statuses[serviceName] = struct {
|
||||
IsHealthy bool
|
||||
Details interface{} `json:",omitempty"`
|
||||
}{
|
||||
IsHealthy: status.IsHealthy,
|
||||
Details: status.Details,
|
||||
}
|
||||
}
|
||||
|
||||
return globalHealth, statuses
|
||||
}
|
BIN
img/simple_diagram.png
Normal file
BIN
img/simple_diagram.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 12 KiB |
26
notifier/README.md
Normal file
26
notifier/README.md
Normal file
@ -0,0 +1,26 @@
|
||||
# Notifications
|
||||
|
||||
Notifications are a way for Clair to inform an endpoint that changes to tracked vulnerabilities have occurred.
|
||||
Notifications should contain only the name of a notification.
|
||||
Because notification data can require pagination, it is expected that the receiving endpoint calls the Clair API for reading notifications and marking them as read after being notified.
|
||||
|
||||
## Webhook
|
||||
|
||||
Webhook is an out-of-the-box notifier that sends the following JSON object via an HTTP POST:
|
||||
|
||||
```json
|
||||
{
|
||||
"Notification": {
|
||||
"Name": "6e4ad270-4957-4242-b5ad-dad851379573"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Custom Notifiers
|
||||
|
||||
Clair can also be compiled with custom notifiers by importing them in `main.go`.
|
||||
Custom notifiers are any Go package that implements the `Notifier` interface and registers themselves with the `notifier` package.
|
||||
Notifiers are registered in [init()] similar to drivers for Go's standard [database/sql] package.
|
||||
|
||||
[init()]: https://golang.org/doc/effective_go.html#init
|
||||
[database/sql]: https://godoc.org/database/sql
|
@ -12,8 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package notifier fetches notifications from the database and sends them
|
||||
// to the specified remote handler.
|
||||
// Package notifier fetches notifications from the database and informs the specified remote handler
|
||||
// about their existences, inviting the third party to actively query the API about it.
|
||||
package notifier
|
||||
|
||||
import (
|
||||
@ -22,15 +22,14 @@ import (
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"github.com/coreos/pkg/timeutil"
|
||||
"github.com/pborman/uuid"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/coreos/clair/config"
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/health"
|
||||
"github.com/coreos/clair/utils"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
)
|
||||
|
||||
var log = capnslog.NewPackageLogger("github.com/coreos/clair", "notifier")
|
||||
|
||||
const (
|
||||
checkInterval = 5 * time.Minute
|
||||
refreshLockDuration = time.Minute * 2
|
||||
@ -38,23 +37,34 @@ const (
|
||||
maxBackOff = 15 * time.Minute
|
||||
)
|
||||
|
||||
// TODO(Quentin-M): Allow registering custom notification handlers.
|
||||
var (
|
||||
log = capnslog.NewPackageLogger("github.com/coreos/clair", "notifier")
|
||||
|
||||
// A Notification represents the structure of the notifications that are sent by a Notifier.
|
||||
type Notification struct {
|
||||
Name, Type string
|
||||
Content interface{}
|
||||
}
|
||||
notifiers = make(map[string]Notifier)
|
||||
|
||||
var notifiers = make(map[string]Notifier)
|
||||
promNotifierLatencyMilliseconds = prometheus.NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "clair_notifier_latency_milliseconds",
|
||||
Help: "Time it takes to send a notification after it's been created.",
|
||||
})
|
||||
|
||||
promNotifierBackendErrorsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "clair_notifier_backend_errors_total",
|
||||
Help: "Number of errors that notifier backends generated.",
|
||||
}, []string{"backend"})
|
||||
)
|
||||
|
||||
// Notifier represents anything that can transmit notifications.
|
||||
type Notifier interface {
|
||||
// Configure attempts to initialize the notifier with the provided configuration.
|
||||
// It returns whether the notifier is enabled or not.
|
||||
Configure(*config.NotifierConfig) (bool, error)
|
||||
// Send transmits the specified notification.
|
||||
Send(notification *Notification) error
|
||||
// Send informs the existence of the specified notification.
|
||||
Send(notification database.VulnerabilityNotification) error
|
||||
}
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(promNotifierLatencyMilliseconds)
|
||||
prometheus.MustRegister(promNotifierBackendErrorsTotal)
|
||||
}
|
||||
|
||||
// RegisterNotifier makes a Fetcher available by the provided name.
|
||||
@ -77,7 +87,7 @@ func RegisterNotifier(name string, n Notifier) {
|
||||
}
|
||||
|
||||
// Run starts the Notifier service.
|
||||
func Run(config *config.NotifierConfig, stopper *utils.Stopper) {
|
||||
func Run(config *config.NotifierConfig, datastore database.Datastore, stopper *utils.Stopper) {
|
||||
defer stopper.End()
|
||||
|
||||
// Configure registered notifiers.
|
||||
@ -101,14 +111,10 @@ func Run(config *config.NotifierConfig, stopper *utils.Stopper) {
|
||||
whoAmI := uuid.New()
|
||||
log.Infof("notifier service started. lock identifier: %s\n", whoAmI)
|
||||
|
||||
// Register healthchecker.
|
||||
health.RegisterHealthchecker("notifier", Healthcheck)
|
||||
|
||||
for running := true; running; {
|
||||
// Find task.
|
||||
// TODO(Quentin-M): Combine node and notification.
|
||||
node, notification := findTask(whoAmI, stopper)
|
||||
if node == "" && notification == nil {
|
||||
notification := findTask(datastore, config.RenotifyInterval, whoAmI, stopper)
|
||||
if notification == nil {
|
||||
// Interrupted while finding a task, Clair is stopping.
|
||||
break
|
||||
}
|
||||
@ -116,14 +122,15 @@ func Run(config *config.NotifierConfig, stopper *utils.Stopper) {
|
||||
// Handle task.
|
||||
done := make(chan bool, 1)
|
||||
go func() {
|
||||
success, interrupted := handleTask(notification, stopper, config.Attempts)
|
||||
success, interrupted := handleTask(*notification, stopper, config.Attempts)
|
||||
if success {
|
||||
database.MarkNotificationAsSent(node)
|
||||
utils.PrometheusObserveTimeMilliseconds(promNotifierLatencyMilliseconds, notification.Created)
|
||||
datastore.SetNotificationNotified(notification.Name)
|
||||
}
|
||||
if interrupted {
|
||||
running = false
|
||||
}
|
||||
database.Unlock(node, whoAmI)
|
||||
datastore.Unlock(notification.Name, whoAmI)
|
||||
done <- true
|
||||
}()
|
||||
|
||||
@ -134,7 +141,7 @@ func Run(config *config.NotifierConfig, stopper *utils.Stopper) {
|
||||
case <-done:
|
||||
break outer
|
||||
case <-time.After(refreshLockDuration):
|
||||
database.Lock(node, lockDuration, whoAmI)
|
||||
datastore.Lock(notification.Name, whoAmI, lockDuration, true)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -142,46 +149,33 @@ func Run(config *config.NotifierConfig, stopper *utils.Stopper) {
|
||||
log.Info("notifier service stopped")
|
||||
}
|
||||
|
||||
func findTask(whoAmI string, stopper *utils.Stopper) (string, database.Notification) {
|
||||
func findTask(datastore database.Datastore, renotifyInterval time.Duration, whoAmI string, stopper *utils.Stopper) *database.VulnerabilityNotification {
|
||||
for {
|
||||
// Find a notification to send.
|
||||
node, notification, err := database.FindOneNotificationToSend(database.GetDefaultNotificationWrapper())
|
||||
notification, err := datastore.GetAvailableNotification(renotifyInterval)
|
||||
if err != nil {
|
||||
// There is no notification or an error occured.
|
||||
if err != cerrors.ErrNotFound {
|
||||
log.Warningf("could not get notification to send: %s", err)
|
||||
}
|
||||
|
||||
// No notification or error: wait.
|
||||
if notification == nil || err != nil {
|
||||
// Wait.
|
||||
if !stopper.Sleep(checkInterval) {
|
||||
return "", nil
|
||||
return nil
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// Lock the notification.
|
||||
if hasLock, _ := database.Lock(node, lockDuration, whoAmI); hasLock {
|
||||
log.Infof("found and locked a notification: %s", notification.GetName())
|
||||
return node, notification
|
||||
if hasLock, _ := datastore.Lock(notification.Name, whoAmI, lockDuration, false); hasLock {
|
||||
log.Infof("found and locked a notification: %s", notification.Name)
|
||||
return ¬ification
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func handleTask(notification database.Notification, st *utils.Stopper, maxAttempts int) (bool, bool) {
|
||||
// Get notification content.
|
||||
// TODO(Quentin-M): Split big notifications.
|
||||
notificationContent, err := notification.GetContent()
|
||||
if err != nil {
|
||||
log.Warningf("could not get content of notification '%s': %s", notification.GetName(), err)
|
||||
return false, false
|
||||
}
|
||||
|
||||
// Create notification.
|
||||
payload := &Notification{
|
||||
Name: notification.GetName(),
|
||||
Type: notification.GetType(),
|
||||
Content: notificationContent,
|
||||
}
|
||||
|
||||
func handleTask(notification database.VulnerabilityNotification, st *utils.Stopper, maxAttempts int) (bool, bool) {
|
||||
// Send notification.
|
||||
for notifierName, notifier := range notifiers {
|
||||
var attempts int
|
||||
@ -189,37 +183,33 @@ func handleTask(notification database.Notification, st *utils.Stopper, maxAttemp
|
||||
for {
|
||||
// Max attempts exceeded.
|
||||
if attempts >= maxAttempts {
|
||||
log.Infof("giving up on sending notification '%s' to notifier '%s': max attempts exceeded (%d)\n", notification.GetName(), notifierName, maxAttempts)
|
||||
log.Infof("giving up on sending notification '%s' via notifier '%s': max attempts exceeded (%d)\n", notification.Name, notifierName, maxAttempts)
|
||||
return false, false
|
||||
}
|
||||
|
||||
// Backoff.
|
||||
if backOff > 0 {
|
||||
log.Infof("waiting %v before retrying to send notification '%s' to notifier '%s' (Attempt %d / %d)\n", backOff, notification.GetName(), notifierName, attempts+1, maxAttempts)
|
||||
log.Infof("waiting %v before retrying to send notification '%s' via notifier '%s' (Attempt %d / %d)\n", backOff, notification.Name, notifierName, attempts+1, maxAttempts)
|
||||
if !st.Sleep(backOff) {
|
||||
return false, true
|
||||
}
|
||||
}
|
||||
|
||||
// Send using the current notifier.
|
||||
if err := notifier.Send(payload); err == nil {
|
||||
// Send has been successful. Go to the next one.
|
||||
break
|
||||
}
|
||||
|
||||
if err := notifier.Send(notification); err != nil {
|
||||
// Send failed; increase attempts/backoff and retry.
|
||||
log.Errorf("could not send notification '%s' to notifier '%s': %s", notification.GetName(), notifierName, err)
|
||||
promNotifierBackendErrorsTotal.WithLabelValues(notifierName).Inc()
|
||||
log.Errorf("could not send notification '%s' via notifier '%s': %v", notification.Name, notifierName, err)
|
||||
backOff = timeutil.ExpBackoff(backOff, maxBackOff)
|
||||
attempts++
|
||||
continue
|
||||
}
|
||||
|
||||
// Send has been successful. Go to the next notifier.
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("successfully sent notification '%s'\n", notification.GetName())
|
||||
log.Infof("successfully sent notification '%s'\n", notification.Name)
|
||||
return true, false
|
||||
}
|
||||
|
||||
// Healthcheck returns the health of the notifier service.
|
||||
func Healthcheck() health.Status {
|
||||
queueSize, err := database.CountNotificationsToSend()
|
||||
return health.Status{IsEssential: false, IsHealthy: err == nil, Details: struct{ QueueSize int }{QueueSize: queueSize}}
|
||||
}
|
||||
|
@ -25,13 +25,17 @@ import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/coreos/clair/config"
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/notifier"
|
||||
)
|
||||
|
||||
const timeout = 5 * time.Second
|
||||
|
||||
// A WebhookNotifier dispatches notifications to a webhook endpoint.
|
||||
type WebhookNotifier struct {
|
||||
endpoint string
|
||||
@ -88,13 +92,20 @@ func (h *WebhookNotifier) Configure(config *config.NotifierConfig) (bool, error)
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: tlsConfig,
|
||||
},
|
||||
Timeout: timeout,
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (h *WebhookNotifier) Send(notification *notifier.Notification) error {
|
||||
type notificationEnvelope struct {
|
||||
Notification struct {
|
||||
Name string
|
||||
}
|
||||
}
|
||||
|
||||
func (h *WebhookNotifier) Send(notification database.VulnerabilityNotification) error {
|
||||
// Marshal notification.
|
||||
jsonNotification, err := json.Marshal(notification)
|
||||
jsonNotification, err := json.Marshal(notificationEnvelope{struct{ Name string }{notification.Name}})
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not marshal: %s", err)
|
||||
}
|
||||
@ -103,7 +114,7 @@ func (h *WebhookNotifier) Send(notification *notifier.Notification) error {
|
||||
resp, err := h.client.Post(h.endpoint, "application/json", bytes.NewBuffer(jsonNotification))
|
||||
if err != nil || resp == nil || (resp.StatusCode != 200 && resp.StatusCode != 201) {
|
||||
if resp != nil {
|
||||
return fmt.Errorf("(%d) %s", resp.StatusCode, err)
|
||||
return fmt.Errorf("got status %d, expected 200/201", resp.StatusCode)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -20,7 +20,12 @@ var fetchers = make(map[string]Fetcher)
|
||||
|
||||
// Fetcher represents anything that can fetch vulnerabilities.
|
||||
type Fetcher interface {
|
||||
FetchUpdate() (FetcherResponse, error)
|
||||
// FetchUpdate gets vulnerability updates.
|
||||
FetchUpdate(database.Datastore) (FetcherResponse, error)
|
||||
|
||||
// Clean deletes any allocated resources.
|
||||
// It is invoked when Clair stops.
|
||||
Clean()
|
||||
}
|
||||
|
||||
// FetcherResponse represents the sum of results of an update.
|
||||
@ -28,8 +33,7 @@ type FetcherResponse struct {
|
||||
FlagName string
|
||||
FlagValue string
|
||||
Notes []string
|
||||
Vulnerabilities []*database.Vulnerability
|
||||
Packages []*database.Package
|
||||
Vulnerabilities []database.Vulnerability
|
||||
}
|
||||
|
||||
// RegisterFetcher makes a Fetcher available by the provided name.
|
||||
|
@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package fetchers
|
||||
package debian
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
@ -27,14 +27,17 @@ import (
|
||||
"github.com/coreos/clair/updater"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
)
|
||||
|
||||
const (
|
||||
url = "https://security-tracker.debian.org/tracker/data/json"
|
||||
cveURLPrefix = "https://security-tracker.debian.org/tracker"
|
||||
debianUpdaterFlag = "debianUpdater"
|
||||
updaterFlag = "debianUpdater"
|
||||
)
|
||||
|
||||
var log = capnslog.NewPackageLogger("github.com/coreos/clair", "updater/fetchers/debian")
|
||||
|
||||
type jsonData map[string]map[string]jsonVuln
|
||||
|
||||
type jsonVuln struct {
|
||||
@ -57,7 +60,7 @@ func init() {
|
||||
}
|
||||
|
||||
// FetchUpdate fetches vulnerability updates from the Debian Security Tracker.
|
||||
func (fetcher *DebianFetcher) FetchUpdate() (resp updater.FetcherResponse, err error) {
|
||||
func (fetcher *DebianFetcher) FetchUpdate(datastore database.Datastore) (resp updater.FetcherResponse, err error) {
|
||||
log.Info("fetching Debian vulnerabilities")
|
||||
|
||||
// Download JSON.
|
||||
@ -68,7 +71,7 @@ func (fetcher *DebianFetcher) FetchUpdate() (resp updater.FetcherResponse, err e
|
||||
}
|
||||
|
||||
// Get the SHA-1 of the latest update's JSON data
|
||||
latestHash, err := database.GetFlagValue(debianUpdaterFlag)
|
||||
latestHash, err := datastore.GetKeyValue(updaterFlag)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
@ -88,7 +91,7 @@ func buildResponse(jsonReader io.Reader, latestKnownHash string) (resp updater.F
|
||||
// Defer the addition of flag information to the response.
|
||||
defer func() {
|
||||
if err == nil {
|
||||
resp.FlagName = debianUpdaterFlag
|
||||
resp.FlagName = updaterFlag
|
||||
resp.FlagValue = hash
|
||||
}
|
||||
}()
|
||||
@ -103,7 +106,7 @@ func buildResponse(jsonReader io.Reader, latestKnownHash string) (resp updater.F
|
||||
err = json.NewDecoder(teedJSONReader).Decode(&data)
|
||||
if err != nil {
|
||||
log.Errorf("could not unmarshal Debian's JSON: %s", err)
|
||||
return resp, ErrCouldNotParse
|
||||
return resp, cerrors.ErrCouldNotParse
|
||||
}
|
||||
|
||||
// Calculate the hash and skip updating if the hash has been seen before.
|
||||
@ -115,7 +118,7 @@ func buildResponse(jsonReader io.Reader, latestKnownHash string) (resp updater.F
|
||||
|
||||
// Extract vulnerability data from Debian's JSON schema.
|
||||
var unknownReleases map[string]struct{}
|
||||
resp.Vulnerabilities, resp.Packages, unknownReleases = parseDebianJSON(&data)
|
||||
resp.Vulnerabilities, unknownReleases = parseDebianJSON(&data)
|
||||
|
||||
// Log unknown releases
|
||||
for k := range unknownReleases {
|
||||
@ -127,7 +130,7 @@ func buildResponse(jsonReader io.Reader, latestKnownHash string) (resp updater.F
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func parseDebianJSON(data *jsonData) (vulnerabilities []*database.Vulnerability, packages []*database.Package, unknownReleases map[string]struct{}) {
|
||||
func parseDebianJSON(data *jsonData) (vulnerabilities []database.Vulnerability, unknownReleases map[string]struct{}) {
|
||||
mvulnerabilities := make(map[string]*database.Vulnerability)
|
||||
unknownReleases = make(map[string]struct{})
|
||||
|
||||
@ -140,8 +143,8 @@ func parseDebianJSON(data *jsonData) (vulnerabilities []*database.Vulnerability,
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip if the release is not affected.
|
||||
if releaseNode.FixedVersion == "0" || releaseNode.Status == "undetermined" {
|
||||
// Skip if the status is not determined or the vulnerability is a temporary one.
|
||||
if !strings.HasPrefix(vulnName, "CVE-") || releaseNode.Status == "undetermined" {
|
||||
continue
|
||||
}
|
||||
|
||||
@ -149,9 +152,9 @@ func parseDebianJSON(data *jsonData) (vulnerabilities []*database.Vulnerability,
|
||||
vulnerability, vulnerabilityAlreadyExists := mvulnerabilities[vulnName]
|
||||
if !vulnerabilityAlreadyExists {
|
||||
vulnerability = &database.Vulnerability{
|
||||
ID: vulnName,
|
||||
Name: vulnName,
|
||||
Link: strings.Join([]string{cveURLPrefix, "/", vulnName}, ""),
|
||||
Priority: types.Unknown,
|
||||
Severity: types.Unknown,
|
||||
Description: vulnNode.Description,
|
||||
}
|
||||
}
|
||||
@ -159,15 +162,18 @@ func parseDebianJSON(data *jsonData) (vulnerabilities []*database.Vulnerability,
|
||||
// Set the priority of the vulnerability.
|
||||
// In the JSON, a vulnerability has one urgency per package it affects.
|
||||
// The highest urgency should be the one set.
|
||||
urgency := urgencyToPriority(releaseNode.Urgency)
|
||||
if urgency.Compare(vulnerability.Priority) > 0 {
|
||||
vulnerability.Priority = urgency
|
||||
urgency := urgencyToSeverity(releaseNode.Urgency)
|
||||
if urgency.Compare(vulnerability.Severity) > 0 {
|
||||
vulnerability.Severity = urgency
|
||||
}
|
||||
|
||||
// Determine the version of the package the vulnerability affects.
|
||||
var version types.Version
|
||||
var err error
|
||||
if releaseNode.Status == "open" {
|
||||
if releaseNode.FixedVersion == "0" {
|
||||
// This means that the package is not affected by this vulnerability.
|
||||
version = types.MinVersion
|
||||
} else if releaseNode.Status == "open" {
|
||||
// Open means that the package is currently vulnerable in the latest
|
||||
// version of this Debian release.
|
||||
version = types.MaxVersion
|
||||
@ -181,14 +187,17 @@ func parseDebianJSON(data *jsonData) (vulnerabilities []*database.Vulnerability,
|
||||
}
|
||||
}
|
||||
|
||||
// Create and add the package.
|
||||
pkg := &database.Package{
|
||||
OS: "debian:" + database.DebianReleasesMapping[releaseName],
|
||||
// Create and add the feature version.
|
||||
pkg := database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Name: pkgName,
|
||||
Namespace: database.Namespace{
|
||||
Name: "debian:" + database.DebianReleasesMapping[releaseName],
|
||||
},
|
||||
},
|
||||
Version: version,
|
||||
}
|
||||
vulnerability.FixedInNodes = append(vulnerability.FixedInNodes, pkg.GetNode())
|
||||
packages = append(packages, pkg)
|
||||
vulnerability.FixedIn = append(vulnerability.FixedIn, pkg)
|
||||
|
||||
// Store the vulnerability.
|
||||
mvulnerabilities[vulnName] = vulnerability
|
||||
@ -198,13 +207,13 @@ func parseDebianJSON(data *jsonData) (vulnerabilities []*database.Vulnerability,
|
||||
|
||||
// Convert the vulnerabilities map to a slice
|
||||
for _, v := range mvulnerabilities {
|
||||
vulnerabilities = append(vulnerabilities, v)
|
||||
vulnerabilities = append(vulnerabilities, *v)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func urgencyToPriority(urgency string) types.Priority {
|
||||
func urgencyToSeverity(urgency string) types.Priority {
|
||||
switch urgency {
|
||||
case "not yet assigned":
|
||||
return types.Unknown
|
||||
@ -240,3 +249,6 @@ func urgencyToPriority(urgency string) types.Priority {
|
||||
return types.Unknown
|
||||
}
|
||||
}
|
||||
|
||||
// Clean deletes any allocated resources.
|
||||
func (fetcher *DebianFetcher) Clean() {}
|
117
updater/fetchers/debian/debian_test.go
Normal file
117
updater/fetchers/debian/debian_test.go
Normal file
@ -0,0 +1,117 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package debian
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDebianParser(t *testing.T) {
|
||||
_, filename, _, _ := runtime.Caller(0)
|
||||
|
||||
// Test parsing testdata/fetcher_debian_test.json
|
||||
testFile, _ := os.Open(path.Join(path.Dir(filename)) + "/testdata/fetcher_debian_test.json")
|
||||
response, err := buildResponse(testFile, "")
|
||||
if assert.Nil(t, err) && assert.Len(t, response.Vulnerabilities, 3) {
|
||||
for _, vulnerability := range response.Vulnerabilities {
|
||||
if vulnerability.Name == "CVE-2015-1323" {
|
||||
assert.Equal(t, "https://security-tracker.debian.org/tracker/CVE-2015-1323", vulnerability.Link)
|
||||
assert.Equal(t, types.Low, vulnerability.Severity)
|
||||
assert.Equal(t, "This vulnerability is not very dangerous.", vulnerability.Description)
|
||||
|
||||
expectedFeatureVersions := []database.FeatureVersion{
|
||||
database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Namespace: database.Namespace{Name: "debian:8"},
|
||||
Name: "aptdaemon",
|
||||
},
|
||||
Version: types.MaxVersion,
|
||||
},
|
||||
database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Namespace: database.Namespace{Name: "debian:unstable"},
|
||||
|
||||
Name: "aptdaemon",
|
||||
},
|
||||
Version: types.NewVersionUnsafe("1.1.1+bzr982-1"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, expectedFeatureVersion := range expectedFeatureVersions {
|
||||
assert.Contains(t, vulnerability.FixedIn, expectedFeatureVersion)
|
||||
}
|
||||
} else if vulnerability.Name == "CVE-2003-0779" {
|
||||
assert.Equal(t, "https://security-tracker.debian.org/tracker/CVE-2003-0779", vulnerability.Link)
|
||||
assert.Equal(t, types.High, vulnerability.Severity)
|
||||
assert.Equal(t, "But this one is very dangerous.", vulnerability.Description)
|
||||
|
||||
expectedFeatureVersions := []database.FeatureVersion{
|
||||
database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Namespace: database.Namespace{Name: "debian:8"},
|
||||
Name: "aptdaemon",
|
||||
},
|
||||
Version: types.NewVersionUnsafe("0.7.0"),
|
||||
},
|
||||
database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Namespace: database.Namespace{Name: "debian:unstable"},
|
||||
Name: "aptdaemon",
|
||||
},
|
||||
Version: types.NewVersionUnsafe("0.7.0"),
|
||||
},
|
||||
database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Namespace: database.Namespace{Name: "debian:8"},
|
||||
Name: "asterisk",
|
||||
},
|
||||
Version: types.NewVersionUnsafe("0.5.56"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, expectedFeatureVersion := range expectedFeatureVersions {
|
||||
assert.Contains(t, vulnerability.FixedIn, expectedFeatureVersion)
|
||||
}
|
||||
} else if vulnerability.Name == "CVE-2013-2685" {
|
||||
assert.Equal(t, "https://security-tracker.debian.org/tracker/CVE-2013-2685", vulnerability.Link)
|
||||
assert.Equal(t, types.Negligible, vulnerability.Severity)
|
||||
assert.Equal(t, "Un-affected packages.", vulnerability.Description)
|
||||
|
||||
expectedFeatureVersions := []database.FeatureVersion{
|
||||
database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Namespace: database.Namespace{Name: "debian:8"},
|
||||
Name: "asterisk",
|
||||
},
|
||||
Version: types.MinVersion,
|
||||
},
|
||||
}
|
||||
|
||||
for _, expectedFeatureVersion := range expectedFeatureVersions {
|
||||
assert.Contains(t, vulnerability.FixedIn, expectedFeatureVersion)
|
||||
}
|
||||
} else {
|
||||
assert.Fail(t, "Wrong vulnerability name: ", vulnerability.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -54,7 +54,8 @@
|
||||
},
|
||||
"asterisk": {
|
||||
"CVE-2013-2685": {
|
||||
"description": "Un-affected packages",
|
||||
"_comment": "This CVE has a non-affected (anymore?) package.",
|
||||
"description": "Un-affected packages.",
|
||||
"releases": {
|
||||
"jessie": {
|
||||
"fixed_version": "0",
|
||||
@ -63,21 +64,6 @@
|
||||
},
|
||||
"status": "resolved",
|
||||
"urgency": "unimportant"
|
||||
},
|
||||
"wheezy": {
|
||||
"repositories": {
|
||||
"sid": "1:13.1.0~dfsg-1.1"
|
||||
},
|
||||
"status": "undetermined",
|
||||
"urgency": "unimportant"
|
||||
},
|
||||
"sid": {
|
||||
"fixed_version": "0",
|
||||
"repositories": {
|
||||
"sid": "1:13.1.0~dfsg-1.1"
|
||||
},
|
||||
"status": "resolved",
|
||||
"urgency": "unimportant"
|
||||
}
|
||||
}
|
||||
},
|
@ -1,90 +0,0 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package fetchers
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDebianParser(t *testing.T) {
|
||||
_, filename, _, _ := runtime.Caller(0)
|
||||
|
||||
// Test parsing testdata/fetcher_debian_test.json
|
||||
testFile, _ := os.Open(path.Join(path.Dir(filename)) + "/testdata/fetcher_debian_test.json")
|
||||
response, err := buildResponse(testFile, "")
|
||||
if assert.Nil(t, err) && assert.Len(t, response.Vulnerabilities, 2) {
|
||||
for _, vulnerability := range response.Vulnerabilities {
|
||||
if vulnerability.ID == "CVE-2015-1323" {
|
||||
assert.Equal(t, "https://security-tracker.debian.org/tracker/CVE-2015-1323", vulnerability.Link)
|
||||
assert.Equal(t, types.Low, vulnerability.Priority)
|
||||
assert.Equal(t, "This vulnerability is not very dangerous.", vulnerability.Description)
|
||||
|
||||
expectedPackages := []*database.Package{
|
||||
&database.Package{
|
||||
OS: "debian:8",
|
||||
Name: "aptdaemon",
|
||||
Version: types.MaxVersion,
|
||||
},
|
||||
&database.Package{
|
||||
OS: "debian:unstable",
|
||||
Name: "aptdaemon",
|
||||
Version: types.NewVersionUnsafe("1.1.1+bzr982-1"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, expectedPackage := range expectedPackages {
|
||||
assert.Contains(t, response.Packages, expectedPackage)
|
||||
assert.Contains(t, vulnerability.FixedInNodes, expectedPackage.GetNode())
|
||||
}
|
||||
} else if vulnerability.ID == "CVE-2003-0779" {
|
||||
assert.Equal(t, "https://security-tracker.debian.org/tracker/CVE-2003-0779", vulnerability.Link)
|
||||
assert.Equal(t, types.High, vulnerability.Priority)
|
||||
assert.Equal(t, "But this one is very dangerous.", vulnerability.Description)
|
||||
|
||||
expectedPackages := []*database.Package{
|
||||
&database.Package{
|
||||
OS: "debian:8",
|
||||
Name: "aptdaemon",
|
||||
Version: types.NewVersionUnsafe("0.7.0"),
|
||||
},
|
||||
&database.Package{
|
||||
OS: "debian:unstable",
|
||||
Name: "aptdaemon",
|
||||
Version: types.NewVersionUnsafe("0.7.0"),
|
||||
},
|
||||
&database.Package{
|
||||
OS: "debian:8",
|
||||
Name: "asterisk",
|
||||
Version: types.NewVersionUnsafe("0.5.56"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, expectedPackage := range expectedPackages {
|
||||
assert.Contains(t, response.Packages, expectedPackage)
|
||||
assert.Contains(t, vulnerability.FixedInNodes, expectedPackage.GetNode())
|
||||
}
|
||||
} else {
|
||||
assert.Fail(t, "Wrong vulnerability name: ", vulnerability.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,32 +0,0 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package fetchers implements vulnerability fetchers for several sources.
|
||||
package fetchers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
)
|
||||
|
||||
var (
|
||||
log = capnslog.NewPackageLogger("github.com/coreos/clair", "updater/fetchers")
|
||||
|
||||
// ErrCouldNotParse is returned when a fetcher fails to parse the update data.
|
||||
ErrCouldNotParse = errors.New("updater/fetchers: could not parse")
|
||||
|
||||
// ErrFilesystem is returned when a fetcher fails to interact with the local filesystem.
|
||||
ErrFilesystem = errors.New("updater/fetchers: something went wrong when interacting with the fs")
|
||||
)
|
@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package fetchers
|
||||
package rhel
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
@ -27,6 +27,7 @@ import (
|
||||
"github.com/coreos/clair/updater"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -36,7 +37,7 @@ const (
|
||||
|
||||
ovalURI = "https://www.redhat.com/security/data/oval/"
|
||||
rhsaFilePrefix = "com.redhat.rhsa-"
|
||||
rhelUpdaterFlag = "rhelUpdater"
|
||||
updaterFlag = "rhelUpdater"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -48,6 +49,8 @@ var (
|
||||
}
|
||||
|
||||
rhsaRegexp = regexp.MustCompile(`com.redhat.rhsa-(\d+).xml`)
|
||||
|
||||
log = capnslog.NewPackageLogger("github.com/coreos/clair", "updater/fetchers/rhel")
|
||||
)
|
||||
|
||||
type oval struct {
|
||||
@ -85,11 +88,11 @@ func init() {
|
||||
}
|
||||
|
||||
// FetchUpdate gets vulnerability updates from the Red Hat OVAL definitions.
|
||||
func (f *RHELFetcher) FetchUpdate() (resp updater.FetcherResponse, err error) {
|
||||
func (f *RHELFetcher) FetchUpdate(datastore database.Datastore) (resp updater.FetcherResponse, err error) {
|
||||
log.Info("fetching Red Hat vulnerabilities")
|
||||
|
||||
// Get the first RHSA we have to manage.
|
||||
flagValue, err := database.GetFlagValue(rhelUpdaterFlag)
|
||||
flagValue, err := datastore.GetKeyValue(updaterFlag)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
@ -128,19 +131,20 @@ func (f *RHELFetcher) FetchUpdate() (resp updater.FetcherResponse, err error) {
|
||||
}
|
||||
|
||||
// Parse the XML.
|
||||
vs, pkgs, err := parseRHSA(r.Body)
|
||||
vs, err := parseRHSA(r.Body)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// Collect vulnerabilities.
|
||||
resp.Vulnerabilities = append(resp.Vulnerabilities, vs...)
|
||||
resp.Packages = append(resp.Packages, pkgs...)
|
||||
for _, v := range vs {
|
||||
resp.Vulnerabilities = append(resp.Vulnerabilities, v)
|
||||
}
|
||||
}
|
||||
|
||||
// Set the flag if we found anything.
|
||||
if len(rhsaList) > 0 {
|
||||
resp.FlagName = rhelUpdaterFlag
|
||||
resp.FlagName = updaterFlag
|
||||
resp.FlagValue = strconv.Itoa(rhsaList[len(rhsaList)-1])
|
||||
} else {
|
||||
log.Debug("no Red Hat update.")
|
||||
@ -149,32 +153,31 @@ func (f *RHELFetcher) FetchUpdate() (resp updater.FetcherResponse, err error) {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func parseRHSA(ovalReader io.Reader) (vulnerabilities []*database.Vulnerability, packages []*database.Package, err error) {
|
||||
func parseRHSA(ovalReader io.Reader) (vulnerabilities []database.Vulnerability, err error) {
|
||||
// Decode the XML.
|
||||
var ov oval
|
||||
err = xml.NewDecoder(ovalReader).Decode(&ov)
|
||||
if err != nil {
|
||||
log.Errorf("could not decode RHEL's XML: %s.", err)
|
||||
err = ErrCouldNotParse
|
||||
log.Errorf("could not decode RHEL's XML: %s", err)
|
||||
err = cerrors.ErrCouldNotParse
|
||||
return
|
||||
}
|
||||
|
||||
// Iterate over the definitions and collect any vulnerabilities that affect
|
||||
// at least one package.
|
||||
for _, definition := range ov.Definitions {
|
||||
pkgs := toPackages(definition.Criteria)
|
||||
pkgs := toFeatureVersions(definition.Criteria)
|
||||
if len(pkgs) > 0 {
|
||||
vulnerability := &database.Vulnerability{
|
||||
ID: name(definition),
|
||||
vulnerability := database.Vulnerability{
|
||||
Name: name(definition),
|
||||
Link: link(definition),
|
||||
Priority: priority(definition),
|
||||
Severity: priority(definition),
|
||||
Description: description(definition),
|
||||
}
|
||||
for _, p := range pkgs {
|
||||
vulnerability.FixedInNodes = append(vulnerability.FixedInNodes, p.GetNode())
|
||||
vulnerability.FixedIn = append(vulnerability.FixedIn, p)
|
||||
}
|
||||
vulnerabilities = append(vulnerabilities, vulnerability)
|
||||
packages = append(packages, pkgs...)
|
||||
}
|
||||
}
|
||||
|
||||
@ -256,15 +259,15 @@ func getPossibilities(node criteria) [][]criterion {
|
||||
return possibilities
|
||||
}
|
||||
|
||||
func toPackages(criteria criteria) []*database.Package {
|
||||
func toFeatureVersions(criteria criteria) []database.FeatureVersion {
|
||||
// There are duplicates in Red Hat .xml files.
|
||||
// This map is for deduplication.
|
||||
packagesParameters := make(map[string]*database.Package)
|
||||
featureVersionParameters := make(map[string]database.FeatureVersion)
|
||||
|
||||
possibilities := getPossibilities(criteria)
|
||||
for _, criterions := range possibilities {
|
||||
var (
|
||||
pkg database.Package
|
||||
featureVersion database.FeatureVersion
|
||||
osVersion int
|
||||
err error
|
||||
)
|
||||
@ -279,8 +282,8 @@ func toPackages(criteria criteria) []*database.Package {
|
||||
}
|
||||
} else if strings.Contains(c.Comment, " is earlier than ") {
|
||||
const prefixLen = len(" is earlier than ")
|
||||
pkg.Name = strings.TrimSpace(c.Comment[:strings.Index(c.Comment, " is earlier than ")])
|
||||
pkg.Version, err = types.NewVersion(c.Comment[strings.Index(c.Comment, " is earlier than ")+prefixLen:])
|
||||
featureVersion.Feature.Name = strings.TrimSpace(c.Comment[:strings.Index(c.Comment, " is earlier than ")])
|
||||
featureVersion.Version, err = types.NewVersion(c.Comment[strings.Index(c.Comment, " is earlier than ")+prefixLen:])
|
||||
if err != nil {
|
||||
log.Warningf("could not parse package version '%s': %s. skipping", c.Comment[strings.Index(c.Comment, " is earlier than ")+prefixLen:], err.Error())
|
||||
}
|
||||
@ -288,25 +291,25 @@ func toPackages(criteria criteria) []*database.Package {
|
||||
}
|
||||
|
||||
if osVersion > firstConsideredRHEL {
|
||||
pkg.OS = "centos" + ":" + strconv.Itoa(osVersion)
|
||||
featureVersion.Feature.Namespace.Name = "centos" + ":" + strconv.Itoa(osVersion)
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
|
||||
if pkg.OS != "" && pkg.Name != "" && pkg.Version.String() != "" {
|
||||
packagesParameters[pkg.Key()] = &pkg
|
||||
if featureVersion.Feature.Namespace.Name != "" && featureVersion.Feature.Name != "" && featureVersion.Version.String() != "" {
|
||||
featureVersionParameters[featureVersion.Feature.Namespace.Name+":"+featureVersion.Feature.Name] = featureVersion
|
||||
} else {
|
||||
log.Warningf("could not determine a valid package from criterions: %v", criterions)
|
||||
}
|
||||
}
|
||||
|
||||
// Convert the map to slice.
|
||||
var packagesParametersArray []*database.Package
|
||||
for _, p := range packagesParameters {
|
||||
packagesParametersArray = append(packagesParametersArray, p)
|
||||
var featureVersionParametersArray []database.FeatureVersion
|
||||
for _, fv := range featureVersionParameters {
|
||||
featureVersionParametersArray = append(featureVersionParametersArray, fv)
|
||||
}
|
||||
|
||||
return packagesParametersArray
|
||||
return featureVersionParametersArray
|
||||
}
|
||||
|
||||
func description(def definition) (desc string) {
|
||||
@ -351,3 +354,6 @@ func priority(def definition) types.Priority {
|
||||
return types.Unknown
|
||||
}
|
||||
}
|
||||
|
||||
// Clean deletes any allocated resources.
|
||||
func (f *RHELFetcher) Clean() {}
|
@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package fetchers
|
||||
package rhel
|
||||
|
||||
import (
|
||||
"os"
|
||||
@ -31,62 +31,70 @@ func TestRHELParser(t *testing.T) {
|
||||
|
||||
// Test parsing testdata/fetcher_rhel_test.1.xml
|
||||
testFile, _ := os.Open(path + "/testdata/fetcher_rhel_test.1.xml")
|
||||
vulnerabilities, packages, err := parseRHSA(testFile)
|
||||
vulnerabilities, err := parseRHSA(testFile)
|
||||
if assert.Nil(t, err) && assert.Len(t, vulnerabilities, 1) {
|
||||
assert.Equal(t, "RHSA-2015:1193", vulnerabilities[0].ID)
|
||||
assert.Equal(t, "RHSA-2015:1193", vulnerabilities[0].Name)
|
||||
assert.Equal(t, "https://rhn.redhat.com/errata/RHSA-2015-1193.html", vulnerabilities[0].Link)
|
||||
assert.Equal(t, types.Medium, vulnerabilities[0].Priority)
|
||||
assert.Equal(t, types.Medium, vulnerabilities[0].Severity)
|
||||
assert.Equal(t, `Xerces-C is a validating XML parser written in a portable subset of C++. A flaw was found in the way the Xerces-C XML parser processed certain XML documents. A remote attacker could provide specially crafted XML input that, when parsed by an application using Xerces-C, would cause that application to crash.`, vulnerabilities[0].Description)
|
||||
|
||||
expectedPackages := []*database.Package{
|
||||
&database.Package{
|
||||
OS: "centos:7",
|
||||
expectedFeatureVersions := []database.FeatureVersion{
|
||||
database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Namespace: database.Namespace{Name: "centos:7"},
|
||||
Name: "xerces-c",
|
||||
},
|
||||
Version: types.NewVersionUnsafe("3.1.1-7.el7_1"),
|
||||
},
|
||||
&database.Package{
|
||||
OS: "centos:7",
|
||||
database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Namespace: database.Namespace{Name: "centos:7"},
|
||||
Name: "xerces-c-devel",
|
||||
},
|
||||
Version: types.NewVersionUnsafe("3.1.1-7.el7_1"),
|
||||
},
|
||||
&database.Package{
|
||||
OS: "centos:7",
|
||||
database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Namespace: database.Namespace{Name: "centos:7"},
|
||||
Name: "xerces-c-doc",
|
||||
},
|
||||
Version: types.NewVersionUnsafe("3.1.1-7.el7_1"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, expectedPackage := range expectedPackages {
|
||||
assert.Contains(t, packages, expectedPackage)
|
||||
assert.Contains(t, vulnerabilities[0].FixedInNodes, expectedPackage.GetNode())
|
||||
for _, expectedFeatureVersion := range expectedFeatureVersions {
|
||||
assert.Contains(t, vulnerabilities[0].FixedIn, expectedFeatureVersion)
|
||||
}
|
||||
}
|
||||
|
||||
// Test parsing testdata/fetcher_rhel_test.2.xml
|
||||
testFile, _ = os.Open(path + "/testdata/fetcher_rhel_test.2.xml")
|
||||
vulnerabilities, packages, err = parseRHSA(testFile)
|
||||
vulnerabilities, err = parseRHSA(testFile)
|
||||
if assert.Nil(t, err) && assert.Len(t, vulnerabilities, 1) {
|
||||
assert.Equal(t, "RHSA-2015:1207", vulnerabilities[0].ID)
|
||||
assert.Equal(t, "RHSA-2015:1207", vulnerabilities[0].Name)
|
||||
assert.Equal(t, "https://rhn.redhat.com/errata/RHSA-2015-1207.html", vulnerabilities[0].Link)
|
||||
assert.Equal(t, types.Critical, vulnerabilities[0].Priority)
|
||||
assert.Equal(t, types.Critical, vulnerabilities[0].Severity)
|
||||
assert.Equal(t, `Mozilla Firefox is an open source web browser. XULRunner provides the XUL Runtime environment for Mozilla Firefox. Several flaws were found in the processing of malformed web content. A web page containing malicious content could cause Firefox to crash or, potentially, execute arbitrary code with the privileges of the user running Firefox.`, vulnerabilities[0].Description)
|
||||
|
||||
expectedPackages := []*database.Package{
|
||||
&database.Package{
|
||||
OS: "centos:6",
|
||||
expectedFeatureVersions := []database.FeatureVersion{
|
||||
database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Namespace: database.Namespace{Name: "centos:6"},
|
||||
Name: "firefox",
|
||||
},
|
||||
Version: types.NewVersionUnsafe("38.1.0-1.el6_6"),
|
||||
},
|
||||
&database.Package{
|
||||
OS: "centos:7",
|
||||
database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Namespace: database.Namespace{Name: "centos:7"},
|
||||
Name: "firefox",
|
||||
},
|
||||
Version: types.NewVersionUnsafe("38.1.0-1.el7_1"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, expectedPackage := range expectedPackages {
|
||||
assert.Contains(t, packages, expectedPackage)
|
||||
assert.Contains(t, vulnerabilities[0].FixedInNodes, expectedPackage.GetNode())
|
||||
for _, expectedFeatureVersion := range expectedFeatureVersions {
|
||||
assert.Contains(t, vulnerabilities[0].FixedIn, expectedFeatureVersion)
|
||||
}
|
||||
}
|
||||
}
|
@ -12,11 +12,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package fetchers
|
||||
package ubuntu
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@ -30,17 +31,17 @@ import (
|
||||
"github.com/coreos/clair/utils"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
)
|
||||
|
||||
const (
|
||||
ubuntuTrackerURI = "https://launchpad.net/ubuntu-cve-tracker"
|
||||
ubuntuTracker = "lp:ubuntu-cve-tracker"
|
||||
ubuntuUpdaterFlag = "ubuntuUpdater"
|
||||
trackerURI = "https://launchpad.net/ubuntu-cve-tracker"
|
||||
trackerRepository = "lp:ubuntu-cve-tracker"
|
||||
updaterFlag = "ubuntuUpdater"
|
||||
cveURL = "http://people.ubuntu.com/~ubuntu-security/cve/%s"
|
||||
)
|
||||
|
||||
var (
|
||||
repositoryLocalPath string
|
||||
|
||||
ubuntuIgnoredReleases = map[string]struct{}{
|
||||
"upstream": struct{}{},
|
||||
"devel": struct{}{},
|
||||
@ -59,6 +60,7 @@ var (
|
||||
"oneiric": struct{}{},
|
||||
"saucy": struct{}{},
|
||||
|
||||
"vivid/ubuntu-core": struct{}{},
|
||||
"vivid/stable-phone-overlay": struct{}{},
|
||||
|
||||
// Syntax error
|
||||
@ -69,22 +71,29 @@ var (
|
||||
|
||||
affectsCaptureRegexp = regexp.MustCompile(`(?P<release>.*)_(?P<package>.*): (?P<status>[^\s]*)( \(+(?P<note>[^()]*)\)+)?`)
|
||||
affectsCaptureRegexpNames = affectsCaptureRegexp.SubexpNames()
|
||||
|
||||
log = capnslog.NewPackageLogger("github.com/coreos/clair", "updater/fetchers/ubuntu")
|
||||
|
||||
// ErrFilesystem is returned when a fetcher fails to interact with the local filesystem.
|
||||
ErrFilesystem = errors.New("updater/fetchers: something went wrong when interacting with the fs")
|
||||
)
|
||||
|
||||
// UbuntuFetcher implements updater.Fetcher and get vulnerability updates from
|
||||
// UbuntuFetcher implements updater.Fetcher and gets vulnerability updates from
|
||||
// the Ubuntu CVE Tracker.
|
||||
type UbuntuFetcher struct{}
|
||||
type UbuntuFetcher struct {
|
||||
repositoryLocalPath string
|
||||
}
|
||||
|
||||
func init() {
|
||||
updater.RegisterFetcher("Ubuntu", &UbuntuFetcher{})
|
||||
}
|
||||
|
||||
// FetchUpdate gets vulnerability updates from the Ubuntu CVE Tracker.
|
||||
func (fetcher *UbuntuFetcher) FetchUpdate() (resp updater.FetcherResponse, err error) {
|
||||
func (fetcher *UbuntuFetcher) FetchUpdate(datastore database.Datastore) (resp updater.FetcherResponse, err error) {
|
||||
log.Info("fetching Ubuntu vulnerabilities")
|
||||
|
||||
// Check to see if the repository does not already exist.
|
||||
if _, pathExists := os.Stat(repositoryLocalPath); repositoryLocalPath == "" || os.IsNotExist(pathExists) {
|
||||
if _, pathExists := os.Stat(fetcher.repositoryLocalPath); fetcher.repositoryLocalPath == "" || os.IsNotExist(pathExists) {
|
||||
// Create a temporary folder and download the repository.
|
||||
p, err := ioutil.TempDir(os.TempDir(), "ubuntu-cve-tracker")
|
||||
if err != nil {
|
||||
@ -92,63 +101,62 @@ func (fetcher *UbuntuFetcher) FetchUpdate() (resp updater.FetcherResponse, err e
|
||||
}
|
||||
|
||||
// bzr wants an empty target directory.
|
||||
repositoryLocalPath = p + "/repository"
|
||||
fetcher.repositoryLocalPath = p + "/repository"
|
||||
|
||||
// Create the new repository.
|
||||
err = createRepository(repositoryLocalPath)
|
||||
err = createRepository(fetcher.repositoryLocalPath)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
} else {
|
||||
// Update the repository that's already on disk.
|
||||
err = updateRepository(repositoryLocalPath)
|
||||
err = updateRepository(fetcher.repositoryLocalPath)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
}
|
||||
|
||||
// Get revision number.
|
||||
revisionNumber, err := getRevisionNumber(repositoryLocalPath)
|
||||
revisionNumber, err := getRevisionNumber(fetcher.repositoryLocalPath)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// Get the latest revision number we successfully applied in the database.
|
||||
dbRevisionNumber, err := database.GetFlagValue("ubuntuUpdater")
|
||||
dbRevisionNumber, err := datastore.GetKeyValue("ubuntuUpdater")
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// Get the list of vulnerabilities that we have to update.
|
||||
modifiedCVE, err := collectModifiedVulnerabilities(revisionNumber, dbRevisionNumber, repositoryLocalPath)
|
||||
modifiedCVE, err := collectModifiedVulnerabilities(revisionNumber, dbRevisionNumber, fetcher.repositoryLocalPath)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// Parse and add the vulnerabilities.
|
||||
notes := make(map[string]struct{})
|
||||
for cvePath := range modifiedCVE {
|
||||
file, err := os.Open(repositoryLocalPath + "/" + cvePath)
|
||||
// Open the CVE file.
|
||||
file, err := os.Open(fetcher.repositoryLocalPath + "/" + cvePath)
|
||||
if err != nil {
|
||||
// This can happen when a file is modified and then moved in another
|
||||
// commit.
|
||||
continue
|
||||
}
|
||||
|
||||
v, pkgs, unknownReleases, err := parseUbuntuCVE(file)
|
||||
// Parse the vulnerability.
|
||||
v, unknownReleases, err := parseUbuntuCVE(file)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
if len(v.FixedInNodes) > 0 {
|
||||
// Add the vulnerability to the response.
|
||||
resp.Vulnerabilities = append(resp.Vulnerabilities, v)
|
||||
resp.Packages = append(resp.Packages, pkgs...)
|
||||
}
|
||||
|
||||
// Log any unknown releases.
|
||||
// Store any unknown releases as notes.
|
||||
for k := range unknownReleases {
|
||||
note := fmt.Sprintf("Ubuntu %s is not mapped to any version number (eg. trusty->14.04). Please update me.", k)
|
||||
resp.Notes = append(resp.Notes, note)
|
||||
log.Warning(note)
|
||||
notes[note] = struct{}{}
|
||||
|
||||
// If we encountered unknown Ubuntu release, we don't want the revision
|
||||
// number to be considered as managed.
|
||||
@ -164,9 +172,12 @@ func (fetcher *UbuntuFetcher) FetchUpdate() (resp updater.FetcherResponse, err e
|
||||
file.Close()
|
||||
}
|
||||
|
||||
// Add flag information
|
||||
resp.FlagName = ubuntuUpdaterFlag
|
||||
// Add flag and notes.
|
||||
resp.FlagName = updaterFlag
|
||||
resp.FlagValue = strconv.Itoa(revisionNumber)
|
||||
for note := range notes {
|
||||
resp.Notes = append(resp.Notes, note)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
@ -238,7 +249,7 @@ func collectModifiedVulnerabilities(revision int, dbRevision, repositoryLocalPat
|
||||
|
||||
func createRepository(pathToRepo string) error {
|
||||
// Branch repository
|
||||
out, err := utils.Exec("/tmp/", "bzr", "branch", ubuntuTracker, pathToRepo)
|
||||
out, err := utils.Exec("/tmp/", "bzr", "branch", trackerRepository, pathToRepo)
|
||||
if err != nil {
|
||||
log.Errorf("could not branch Ubuntu repository: %s. output: %s", err, out)
|
||||
return cerrors.ErrCouldNotDownload
|
||||
@ -270,8 +281,7 @@ func getRevisionNumber(pathToRepo string) (int, error) {
|
||||
return revno, nil
|
||||
}
|
||||
|
||||
func parseUbuntuCVE(fileContent io.Reader) (vulnerability *database.Vulnerability, packages []*database.Package, unknownReleases map[string]struct{}, err error) {
|
||||
vulnerability = &database.Vulnerability{}
|
||||
func parseUbuntuCVE(fileContent io.Reader) (vulnerability database.Vulnerability, unknownReleases map[string]struct{}, err error) {
|
||||
unknownReleases = make(map[string]struct{})
|
||||
readingDescription := false
|
||||
scanner := bufio.NewScanner(fileContent)
|
||||
@ -286,13 +296,8 @@ func parseUbuntuCVE(fileContent io.Reader) (vulnerability *database.Vulnerabilit
|
||||
|
||||
// Parse the name.
|
||||
if strings.HasPrefix(line, "Candidate:") {
|
||||
vulnerability.ID = strings.TrimSpace(strings.TrimPrefix(line, "Candidate:"))
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse the link.
|
||||
if vulnerability.Link == "" && strings.HasPrefix(line, "http") {
|
||||
vulnerability.Link = strings.TrimSpace(line)
|
||||
vulnerability.Name = strings.TrimSpace(strings.TrimPrefix(line, "Candidate:"))
|
||||
vulnerability.Link = fmt.Sprintf(cveURL, vulnerability.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -305,7 +310,7 @@ func parseUbuntuCVE(fileContent io.Reader) (vulnerability *database.Vulnerabilit
|
||||
priority = priority[:strings.Index(priority, " ")]
|
||||
}
|
||||
|
||||
vulnerability.Priority = ubuntuPriorityToPriority(priority)
|
||||
vulnerability.Severity = ubuntuPriorityToSeverity(priority)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -339,9 +344,9 @@ func parseUbuntuCVE(fileContent io.Reader) (vulnerability *database.Vulnerabilit
|
||||
continue
|
||||
}
|
||||
|
||||
// Only consider the package if its status is needed, active, deferred
|
||||
// or released. Ignore DNE, needs-triage, not-affected, ignored, pending.
|
||||
if md["status"] == "needed" || md["status"] == "active" || md["status"] == "deferred" || md["status"] == "released" {
|
||||
// Only consider the package if its status is needed, active, deferred, not-affected or
|
||||
// released. Ignore DNE (package does not exist), needs-triage, ignored, pending.
|
||||
if md["status"] == "needed" || md["status"] == "active" || md["status"] == "deferred" || md["status"] == "released" || md["status"] == "not-affected" {
|
||||
if _, isReleaseIgnored := ubuntuIgnoredReleases[md["release"]]; isReleaseIgnored {
|
||||
continue
|
||||
}
|
||||
@ -359,6 +364,8 @@ func parseUbuntuCVE(fileContent io.Reader) (vulnerability *database.Vulnerabilit
|
||||
log.Warningf("could not parse package version '%s': %s. skipping", md["note"], err)
|
||||
}
|
||||
}
|
||||
} else if md["status"] == "not-affected" {
|
||||
version = types.MinVersion
|
||||
} else {
|
||||
version = types.MaxVersion
|
||||
}
|
||||
@ -367,13 +374,14 @@ func parseUbuntuCVE(fileContent io.Reader) (vulnerability *database.Vulnerabilit
|
||||
}
|
||||
|
||||
// Create and add the new package.
|
||||
pkg := &database.Package{
|
||||
OS: "ubuntu:" + database.UbuntuReleasesMapping[md["release"]],
|
||||
featureVersion := database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Namespace: database.Namespace{Name: "ubuntu:" + database.UbuntuReleasesMapping[md["release"]]},
|
||||
Name: md["package"],
|
||||
},
|
||||
Version: version,
|
||||
}
|
||||
packages = append(packages, pkg)
|
||||
vulnerability.FixedInNodes = append(vulnerability.FixedInNodes, pkg.GetNode())
|
||||
vulnerability.FixedIn = append(vulnerability.FixedIn, featureVersion)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -383,18 +391,18 @@ func parseUbuntuCVE(fileContent io.Reader) (vulnerability *database.Vulnerabilit
|
||||
|
||||
// If no link has been provided (CVE-2006-NNN0 for instance), add the link to the tracker
|
||||
if vulnerability.Link == "" {
|
||||
vulnerability.Link = ubuntuTrackerURI
|
||||
vulnerability.Link = trackerURI
|
||||
}
|
||||
|
||||
// If no priority has been provided (CVE-2007-0667 for instance), set the priority to Unknown
|
||||
if vulnerability.Priority == "" {
|
||||
vulnerability.Priority = types.Unknown
|
||||
if vulnerability.Severity == "" {
|
||||
vulnerability.Severity = types.Unknown
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func ubuntuPriorityToPriority(priority string) types.Priority {
|
||||
func ubuntuPriorityToSeverity(priority string) types.Priority {
|
||||
switch priority {
|
||||
case "untriaged":
|
||||
return types.Unknown
|
||||
@ -413,3 +421,10 @@ func ubuntuPriorityToPriority(priority string) types.Priority {
|
||||
log.Warning("Could not determine a vulnerability priority from: %s", priority)
|
||||
return types.Unknown
|
||||
}
|
||||
|
||||
// Clean deletes any allocated resources.
|
||||
func (fetcher *UbuntuFetcher) Clean() {
|
||||
if fetcher.repositoryLocalPath != "" {
|
||||
os.RemoveAll(fetcher.repositoryLocalPath)
|
||||
}
|
||||
}
|
@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package fetchers
|
||||
package ubuntu
|
||||
|
||||
import (
|
||||
"os"
|
||||
@ -32,37 +32,42 @@ func TestUbuntuParser(t *testing.T) {
|
||||
// Test parsing testdata/fetcher_
|
||||
testData, _ := os.Open(path + "/testdata/fetcher_ubuntu_test.txt")
|
||||
defer testData.Close()
|
||||
vulnerability, packages, unknownReleases, err := parseUbuntuCVE(testData)
|
||||
vulnerability, unknownReleases, err := parseUbuntuCVE(testData)
|
||||
if assert.Nil(t, err) {
|
||||
assert.Equal(t, "CVE-2015-4471", vulnerability.ID)
|
||||
assert.Equal(t, types.Medium, vulnerability.Priority)
|
||||
assert.Equal(t, "CVE-2015-4471", vulnerability.Name)
|
||||
assert.Equal(t, types.Medium, vulnerability.Severity)
|
||||
assert.Equal(t, "Off-by-one error in the lzxd_decompress function in lzxd.c in libmspack before 0.5 allows remote attackers to cause a denial of service (buffer under-read and application crash) via a crafted CAB archive.", vulnerability.Description)
|
||||
|
||||
// Unknown release (line 28)
|
||||
_, hasUnkownRelease := unknownReleases["unknown"]
|
||||
assert.True(t, hasUnkownRelease)
|
||||
|
||||
expectedPackages := []*database.Package{
|
||||
&database.Package{
|
||||
OS: "ubuntu:14.04",
|
||||
expectedFeatureVersions := []database.FeatureVersion{
|
||||
database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Namespace: database.Namespace{Name: "ubuntu:14.04"},
|
||||
Name: "libmspack",
|
||||
},
|
||||
Version: types.MaxVersion,
|
||||
},
|
||||
&database.Package{
|
||||
OS: "ubuntu:15.04",
|
||||
database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Namespace: database.Namespace{Name: "ubuntu:15.04"},
|
||||
Name: "libmspack",
|
||||
},
|
||||
Version: types.NewVersionUnsafe("0.4-3"),
|
||||
},
|
||||
&database.Package{
|
||||
OS: "ubuntu:15.10",
|
||||
database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Namespace: database.Namespace{Name: "ubuntu:15.10"},
|
||||
Name: "libmspack-anotherpkg",
|
||||
},
|
||||
Version: types.NewVersionUnsafe("0.1"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, expectedPackage := range expectedPackages {
|
||||
assert.Contains(t, packages, expectedPackage)
|
||||
assert.Contains(t, vulnerability.FixedInNodes, expectedPackage.GetNode())
|
||||
for _, expectedFeatureVersion := range expectedFeatureVersions {
|
||||
assert.Contains(t, vulnerability.FixedIn, expectedFeatureVersion)
|
||||
}
|
||||
}
|
||||
}
|
64
updater/metadata_fetchers.go
Normal file
64
updater/metadata_fetchers.go
Normal file
@ -0,0 +1,64 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package updater
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
)
|
||||
|
||||
var metadataFetchers = make(map[string]MetadataFetcher)
|
||||
|
||||
type VulnerabilityWithLock struct {
|
||||
*database.Vulnerability
|
||||
Lock sync.Mutex
|
||||
}
|
||||
|
||||
// MetadataFetcher
|
||||
type MetadataFetcher interface {
|
||||
// Load runs right before the Updater calls AddMetadata for each vulnerabilities.
|
||||
Load(database.Datastore) error
|
||||
|
||||
// AddMetadata adds metadata to the given database.Vulnerability.
|
||||
// It is expected that the fetcher uses .Lock.Lock() when manipulating the Metadata map.
|
||||
AddMetadata(*VulnerabilityWithLock) error
|
||||
|
||||
// Unload runs right after the Updater finished calling AddMetadata for every vulnerabilities.
|
||||
Unload()
|
||||
|
||||
// Clean deletes any allocated resources.
|
||||
// It is invoked when Clair stops.
|
||||
Clean()
|
||||
}
|
||||
|
||||
// RegisterFetcher makes a Fetcher available by the provided name.
|
||||
// If Register is called twice with the same name or if driver is nil,
|
||||
// it panics.
|
||||
func RegisterMetadataFetcher(name string, f MetadataFetcher) {
|
||||
if name == "" {
|
||||
panic("updater: could not register a MetadataFetcher with an empty name")
|
||||
}
|
||||
|
||||
if f == nil {
|
||||
panic("updater: could not register a nil MetadataFetcher")
|
||||
}
|
||||
|
||||
if _, dup := fetchers[name]; dup {
|
||||
panic("updater: RegisterMetadataFetcher called twice for " + name)
|
||||
}
|
||||
|
||||
metadataFetchers[name] = f
|
||||
}
|
19
updater/metadata_fetchers/nvd/nested_read_closer.go
Normal file
19
updater/metadata_fetchers/nvd/nested_read_closer.go
Normal file
@ -0,0 +1,19 @@
|
||||
package nvd
|
||||
|
||||
import "io"
|
||||
|
||||
// NestedReadCloser wraps an io.Reader and implements io.ReadCloser by closing every embed
|
||||
// io.ReadCloser.
|
||||
// It allows chaining io.ReadCloser together and still keep the ability to close them all in a
|
||||
// simple manner.
|
||||
type NestedReadCloser struct {
|
||||
io.Reader
|
||||
NestedReadClosers []io.ReadCloser
|
||||
}
|
||||
|
||||
// Close closes the gzip.Reader and the underlying io.ReadCloser.
|
||||
func (nrc *NestedReadCloser) Close() {
|
||||
for _, nestedReadCloser := range nrc.NestedReadClosers {
|
||||
nestedReadCloser.Close()
|
||||
}
|
||||
}
|
228
updater/metadata_fetchers/nvd/nvd.go
Normal file
228
updater/metadata_fetchers/nvd/nvd.go
Normal file
@ -0,0 +1,228 @@
|
||||
package nvd
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"compress/gzip"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/updater"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
)
|
||||
|
||||
const (
|
||||
dataFeedURL string = "http://static.nvd.nist.gov/feeds/xml/cve/nvdcve-2.0-%s.xml.gz"
|
||||
dataFeedMetaURL string = "http://static.nvd.nist.gov/feeds/xml/cve/nvdcve-2.0-%s.meta"
|
||||
|
||||
metadataKey string = "NVD"
|
||||
)
|
||||
|
||||
var (
|
||||
log = capnslog.NewPackageLogger("github.com/coreos/clair", "updater/fetchers/metadata_fetchers")
|
||||
)
|
||||
|
||||
type NVDMetadataFetcher struct {
|
||||
localPath string
|
||||
dataFeedHashes map[string]string
|
||||
lock sync.Mutex
|
||||
|
||||
metadata map[string]NVDMetadata
|
||||
}
|
||||
|
||||
type NVDMetadata struct {
|
||||
CVSSv2 NVDmetadataCVSSv2
|
||||
}
|
||||
|
||||
type NVDmetadataCVSSv2 struct {
|
||||
Vectors string
|
||||
Score float64
|
||||
}
|
||||
|
||||
func init() {
|
||||
updater.RegisterMetadataFetcher("NVD", &NVDMetadataFetcher{})
|
||||
}
|
||||
|
||||
func (fetcher *NVDMetadataFetcher) Load(datastore database.Datastore) error {
|
||||
fetcher.lock.Lock()
|
||||
defer fetcher.lock.Unlock()
|
||||
|
||||
var err error
|
||||
fetcher.metadata = make(map[string]NVDMetadata)
|
||||
|
||||
// Init if necessary.
|
||||
if fetcher.localPath == "" {
|
||||
// Create a temporary folder to store the NVD data and create hashes struct.
|
||||
if fetcher.localPath, err = ioutil.TempDir(os.TempDir(), "nvd-data"); err != nil {
|
||||
return cerrors.ErrFilesystem
|
||||
}
|
||||
|
||||
fetcher.dataFeedHashes = make(map[string]string)
|
||||
}
|
||||
|
||||
// Get data feeds.
|
||||
dataFeedReaders, dataFeedHashes, err := getDataFeeds(fetcher.dataFeedHashes, fetcher.localPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fetcher.dataFeedHashes = dataFeedHashes
|
||||
|
||||
// Parse data feeds.
|
||||
for dataFeedName, dataFeedReader := range dataFeedReaders {
|
||||
var nvd nvd
|
||||
if err = xml.NewDecoder(dataFeedReader).Decode(&nvd); err != nil {
|
||||
log.Errorf("could not decode NVD data feed '%s': %s", dataFeedName, err)
|
||||
return cerrors.ErrCouldNotParse
|
||||
}
|
||||
|
||||
// For each entry of this data feed:
|
||||
for _, nvdEntry := range nvd.Entries {
|
||||
// Create metadata entry.
|
||||
if metadata := nvdEntry.Metadata(); metadata != nil {
|
||||
fetcher.metadata[nvdEntry.Name] = *metadata
|
||||
}
|
||||
}
|
||||
|
||||
dataFeedReader.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fetcher *NVDMetadataFetcher) AddMetadata(vulnerability *updater.VulnerabilityWithLock) error {
|
||||
fetcher.lock.Lock()
|
||||
defer fetcher.lock.Unlock()
|
||||
|
||||
if nvdMetadata, ok := fetcher.metadata[vulnerability.Name]; ok {
|
||||
vulnerability.Lock.Lock()
|
||||
defer vulnerability.Lock.Unlock()
|
||||
|
||||
// Create Metadata map if necessary.
|
||||
if vulnerability.Metadata == nil {
|
||||
vulnerability.Metadata = make(map[string]interface{})
|
||||
}
|
||||
|
||||
vulnerability.Metadata[metadataKey] = nvdMetadata
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fetcher *NVDMetadataFetcher) Unload() {
|
||||
fetcher.lock.Lock()
|
||||
defer fetcher.lock.Unlock()
|
||||
|
||||
fetcher.metadata = nil
|
||||
}
|
||||
|
||||
func (fetcher *NVDMetadataFetcher) Clean() {
|
||||
fetcher.lock.Lock()
|
||||
defer fetcher.lock.Unlock()
|
||||
|
||||
if fetcher.localPath != "" {
|
||||
os.RemoveAll(fetcher.localPath)
|
||||
}
|
||||
}
|
||||
|
||||
func getDataFeeds(dataFeedHashes map[string]string, localPath string) (map[string]NestedReadCloser, map[string]string, error) {
|
||||
var dataFeedNames []string
|
||||
for y := 2002; y <= time.Now().Year(); y++ {
|
||||
dataFeedNames = append(dataFeedNames, strconv.Itoa(y))
|
||||
}
|
||||
|
||||
// Get hashes for these feeds.
|
||||
for _, dataFeedName := range dataFeedNames {
|
||||
hash, err := getHashFromMetaURL(fmt.Sprintf(dataFeedMetaURL, dataFeedName))
|
||||
if err != nil {
|
||||
log.Warningf("could get get NVD data feed hash '%s': %s", dataFeedName, err)
|
||||
|
||||
// It's not a big deal, no need interrupt, we're just going to download it again then.
|
||||
continue
|
||||
}
|
||||
|
||||
dataFeedHashes[dataFeedName] = hash
|
||||
}
|
||||
|
||||
// Create io.Reader for every data feed.
|
||||
dataFeedReaders := make(map[string]NestedReadCloser)
|
||||
for _, dataFeedName := range dataFeedNames {
|
||||
fileName := localPath + dataFeedName + ".xml"
|
||||
|
||||
if h, ok := dataFeedHashes[dataFeedName]; ok && h == dataFeedHashes[dataFeedName] {
|
||||
// The hash is known, the disk should contains the feed. Try to read from it.
|
||||
if localPath != "" {
|
||||
if f, err := os.Open(fileName); err == nil {
|
||||
dataFeedReaders[dataFeedName] = NestedReadCloser{
|
||||
Reader: f,
|
||||
NestedReadClosers: []io.ReadCloser{f},
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Download data feed.
|
||||
r, err := http.Get(fmt.Sprintf(dataFeedURL, dataFeedName))
|
||||
if err != nil {
|
||||
log.Errorf("could not download NVD data feed file '%s': %s", dataFeedName, err)
|
||||
return dataFeedReaders, dataFeedHashes, cerrors.ErrCouldNotDownload
|
||||
}
|
||||
|
||||
// Un-gzip it.
|
||||
gr, err := gzip.NewReader(r.Body)
|
||||
if err != nil {
|
||||
log.Errorf("could not read NVD data feed file '%s': %s", dataFeedName, err)
|
||||
return dataFeedReaders, dataFeedHashes, cerrors.ErrCouldNotDownload
|
||||
}
|
||||
|
||||
// Store it to a file at the same time if possible.
|
||||
if f, err := os.Create(fileName); err == nil {
|
||||
nrc := NestedReadCloser{
|
||||
Reader: io.TeeReader(gr, f),
|
||||
NestedReadClosers: []io.ReadCloser{r.Body, gr, f},
|
||||
}
|
||||
dataFeedReaders[dataFeedName] = nrc
|
||||
} else {
|
||||
nrc := NestedReadCloser{
|
||||
Reader: gr,
|
||||
NestedReadClosers: []io.ReadCloser{gr, r.Body},
|
||||
}
|
||||
dataFeedReaders[dataFeedName] = nrc
|
||||
|
||||
log.Warningf("could not store NVD data feed to filesystem: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return dataFeedReaders, dataFeedHashes, nil
|
||||
}
|
||||
|
||||
func getHashFromMetaURL(metaURL string) (string, error) {
|
||||
r, err := http.Get(metaURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer r.Body.Close()
|
||||
|
||||
scanner := bufio.NewScanner(r.Body)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if strings.HasPrefix(line, "sha256:") {
|
||||
return strings.TrimPrefix(line, "sha256:"), nil
|
||||
}
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return "", errors.New("invalid .meta file format")
|
||||
}
|
82
updater/metadata_fetchers/nvd/xml.go
Normal file
82
updater/metadata_fetchers/nvd/xml.go
Normal file
@ -0,0 +1,82 @@
|
||||
package nvd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type nvd struct {
|
||||
Entries []nvdEntry `xml:"entry"`
|
||||
}
|
||||
|
||||
type nvdEntry struct {
|
||||
Name string `xml:"http://scap.nist.gov/schema/vulnerability/0.4 cve-id"`
|
||||
CVSS nvdCVSS `xml:"http://scap.nist.gov/schema/vulnerability/0.4 cvss"`
|
||||
}
|
||||
|
||||
type nvdCVSS struct {
|
||||
BaseMetrics nvdCVSSBaseMetrics `xml:"http://scap.nist.gov/schema/cvss-v2/0.2 base_metrics"`
|
||||
}
|
||||
|
||||
type nvdCVSSBaseMetrics struct {
|
||||
Score float64 `xml:"score"`
|
||||
AccessVector string `xml:"access-vector"`
|
||||
AccessComplexity string `xml:"access-complexity"`
|
||||
Authentication string `xml:"authentication"`
|
||||
ConfImpact string `xml:"confidentiality-impact"`
|
||||
IntegImpact string `xml:"integrity-impact"`
|
||||
AvailImpact string `xml:"avaibility-impact"`
|
||||
}
|
||||
|
||||
var vectorValuesToLetters map[string]string
|
||||
|
||||
func init() {
|
||||
vectorValuesToLetters = make(map[string]string)
|
||||
vectorValuesToLetters["NETWORK"] = "N"
|
||||
vectorValuesToLetters["ADJACENT_NETWORK"] = "A"
|
||||
vectorValuesToLetters["LOCAL"] = "L"
|
||||
vectorValuesToLetters["HIGH"] = "H"
|
||||
vectorValuesToLetters["MEDIUM"] = "M"
|
||||
vectorValuesToLetters["LOW"] = "L"
|
||||
vectorValuesToLetters["NONE"] = "N"
|
||||
vectorValuesToLetters["SINGLE_INSTANCE"] = "S"
|
||||
vectorValuesToLetters["MULTIPLE_INSTANCES"] = "M"
|
||||
vectorValuesToLetters["PARTIAL"] = "P"
|
||||
vectorValuesToLetters["COMPLETE"] = "C"
|
||||
}
|
||||
|
||||
func (n nvdEntry) Metadata() *NVDMetadata {
|
||||
metadata := &NVDMetadata{
|
||||
CVSSv2: NVDmetadataCVSSv2{
|
||||
Vectors: n.CVSS.BaseMetrics.String(),
|
||||
Score: n.CVSS.BaseMetrics.Score,
|
||||
},
|
||||
}
|
||||
|
||||
if metadata.CVSSv2.Vectors == "" {
|
||||
return nil
|
||||
}
|
||||
return metadata
|
||||
}
|
||||
|
||||
func (n nvdCVSSBaseMetrics) String() string {
|
||||
var str string
|
||||
addVec(&str, "AV", n.AccessVector)
|
||||
addVec(&str, "AC", n.AccessComplexity)
|
||||
addVec(&str, "Au", n.Authentication)
|
||||
addVec(&str, "C", n.ConfImpact)
|
||||
addVec(&str, "I", n.IntegImpact)
|
||||
addVec(&str, "A", n.AvailImpact)
|
||||
str = strings.TrimSuffix(str, "/")
|
||||
return str
|
||||
}
|
||||
|
||||
func addVec(str *string, vec, val string) {
|
||||
if val != "" {
|
||||
if let, ok := vectorValuesToLetters[val]; ok {
|
||||
*str = fmt.Sprintf("%s%s:%s/", *str, vec, let)
|
||||
} else {
|
||||
log.Warningf("unknown value '%v' for CVSSv2 vector '%s'", val, vec)
|
||||
}
|
||||
}
|
||||
}
|
@ -17,31 +17,55 @@
|
||||
package updater
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/clair/config"
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/health"
|
||||
"github.com/coreos/clair/utils"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"github.com/pborman/uuid"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const (
|
||||
flagName = "updater"
|
||||
flagName = "updater/last"
|
||||
notesFlagName = "updater/notes"
|
||||
refreshLockDuration = time.Minute * 8
|
||||
|
||||
lockName = "updater"
|
||||
lockDuration = refreshLockDuration + time.Minute*2
|
||||
refreshLockDuration = time.Minute * 8
|
||||
)
|
||||
|
||||
var log = capnslog.NewPackageLogger("github.com/coreos/clair", "updater")
|
||||
var (
|
||||
log = capnslog.NewPackageLogger("github.com/coreos/clair", "updater")
|
||||
|
||||
promUpdaterErrorsTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "clair_updater_errors_total",
|
||||
Help: "Numbers of errors that the updater generated.",
|
||||
})
|
||||
|
||||
promUpdaterDurationSeconds = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "clair_updater_duration_seconds",
|
||||
Help: "Time it takes to update the vulnerability database.",
|
||||
})
|
||||
|
||||
promUpdaterNotesTotal = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "clair_updater_notes_total",
|
||||
Help: "Number of notes that the vulnerability fetchers generated.",
|
||||
})
|
||||
)
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(promUpdaterErrorsTotal)
|
||||
prometheus.MustRegister(promUpdaterDurationSeconds)
|
||||
prometheus.MustRegister(promUpdaterNotesTotal)
|
||||
}
|
||||
|
||||
// Run updates the vulnerability database at regular intervals.
|
||||
func Run(config *config.UpdaterConfig, st *utils.Stopper) {
|
||||
func Run(config *config.UpdaterConfig, datastore database.Datastore, st *utils.Stopper) {
|
||||
defer st.End()
|
||||
|
||||
// Do not run the updater if there is no config or if the interval is 0.
|
||||
@ -50,34 +74,33 @@ func Run(config *config.UpdaterConfig, st *utils.Stopper) {
|
||||
return
|
||||
}
|
||||
|
||||
// Register healthchecker.
|
||||
health.RegisterHealthchecker("updater", Healthcheck)
|
||||
|
||||
whoAmI := uuid.New()
|
||||
log.Infof("updater service started. lock identifier: %s", whoAmI)
|
||||
|
||||
for {
|
||||
// Set the next update time to (last update time + interval) or now if there
|
||||
// is no last update time stored in database (first update) or if an error
|
||||
// occurs.
|
||||
var nextUpdate time.Time
|
||||
var stop bool
|
||||
if lastUpdate := getLastUpdate(); !lastUpdate.IsZero() {
|
||||
|
||||
// Determine if this is the first update and define the next update time.
|
||||
// The next update time is (last update time + interval) or now if this is the first update.
|
||||
nextUpdate := time.Now().UTC()
|
||||
lastUpdate, firstUpdate, err := getLastUpdate(datastore)
|
||||
if err != nil {
|
||||
log.Errorf("an error occured while getting the last update time")
|
||||
nextUpdate = nextUpdate.Add(config.Interval)
|
||||
} else if firstUpdate == false {
|
||||
nextUpdate = lastUpdate.Add(config.Interval)
|
||||
} else {
|
||||
nextUpdate = time.Now().UTC()
|
||||
}
|
||||
|
||||
// If the next update timer is in the past, then try to update.
|
||||
if nextUpdate.Before(time.Now().UTC()) {
|
||||
// Attempt to get a lock on the the update.
|
||||
log.Debug("attempting to obtain update lock")
|
||||
hasLock, hasLockUntil := database.Lock(flagName, lockDuration, whoAmI)
|
||||
hasLock, hasLockUntil := datastore.Lock(lockName, whoAmI, lockDuration, false)
|
||||
if hasLock {
|
||||
// Launch update in a new go routine.
|
||||
doneC := make(chan bool, 1)
|
||||
go func() {
|
||||
Update()
|
||||
Update(datastore, firstUpdate)
|
||||
doneC <- true
|
||||
}()
|
||||
|
||||
@ -87,21 +110,21 @@ func Run(config *config.UpdaterConfig, st *utils.Stopper) {
|
||||
done = true
|
||||
case <-time.After(refreshLockDuration):
|
||||
// Refresh the lock until the update is done.
|
||||
database.Lock(flagName, lockDuration, whoAmI)
|
||||
datastore.Lock(lockName, whoAmI, lockDuration, true)
|
||||
case <-st.Chan():
|
||||
stop = true
|
||||
}
|
||||
}
|
||||
|
||||
// Unlock the update.
|
||||
database.Unlock(flagName, whoAmI)
|
||||
datastore.Unlock(lockName, whoAmI)
|
||||
|
||||
if stop {
|
||||
break
|
||||
}
|
||||
continue
|
||||
} else {
|
||||
lockOwner, lockExpiration, err := database.LockInfo(flagName)
|
||||
lockOwner, lockExpiration, err := datastore.FindLock(lockName)
|
||||
if err != nil {
|
||||
log.Debug("update lock is already taken")
|
||||
nextUpdate = hasLockUntil
|
||||
@ -123,75 +146,75 @@ func Run(config *config.UpdaterConfig, st *utils.Stopper) {
|
||||
}
|
||||
}
|
||||
|
||||
// Clean resources.
|
||||
for _, metadataFetcher := range metadataFetchers {
|
||||
metadataFetcher.Clean()
|
||||
}
|
||||
for _, fetcher := range fetchers {
|
||||
fetcher.Clean()
|
||||
}
|
||||
|
||||
log.Info("updater service stopped")
|
||||
}
|
||||
|
||||
// Update fetches all the vulnerabilities from the registered fetchers, upserts
|
||||
// them into the database and then sends notifications.
|
||||
func Update() {
|
||||
func Update(datastore database.Datastore, firstUpdate bool) {
|
||||
defer setUpdaterDuration(time.Now())
|
||||
|
||||
log.Info("updating vulnerabilities")
|
||||
|
||||
// Fetch updates.
|
||||
status, responses := fetch()
|
||||
|
||||
// Merge responses.
|
||||
vulnerabilities, packages, flags, notes, err := mergeAndVerify(responses)
|
||||
if err != nil {
|
||||
log.Errorf("an error occured when merging update responses: %s", err)
|
||||
return
|
||||
}
|
||||
responses = nil
|
||||
|
||||
// TODO(Quentin-M): Complete informations using NVD
|
||||
|
||||
// Insert packages.
|
||||
log.Tracef("beginning insertion of %d packages for update", len(packages))
|
||||
err = database.InsertPackages(packages)
|
||||
if err != nil {
|
||||
log.Errorf("an error occured when inserting packages for update: %s", err)
|
||||
return
|
||||
}
|
||||
packages = nil
|
||||
status, vulnerabilities, flags, notes := fetch(datastore)
|
||||
|
||||
// Insert vulnerabilities.
|
||||
log.Tracef("beginning insertion of %d vulnerabilities for update", len(vulnerabilities))
|
||||
notifications, err := database.InsertVulnerabilities(vulnerabilities)
|
||||
log.Tracef("inserting %d vulnerabilities for update", len(vulnerabilities))
|
||||
err := datastore.InsertVulnerabilities(vulnerabilities, !firstUpdate)
|
||||
if err != nil {
|
||||
promUpdaterErrorsTotal.Inc()
|
||||
log.Errorf("an error occured when inserting vulnerabilities for update: %s", err)
|
||||
return
|
||||
}
|
||||
vulnerabilities = nil
|
||||
|
||||
// Insert notifications into the database.
|
||||
err = database.InsertNotifications(notifications, database.GetDefaultNotificationWrapper())
|
||||
if err != nil {
|
||||
log.Errorf("an error occured when inserting notifications for update: %s", err)
|
||||
return
|
||||
}
|
||||
notifications = nil
|
||||
|
||||
// Update flags and notes.
|
||||
// Update flags.
|
||||
for flagName, flagValue := range flags {
|
||||
database.UpdateFlag(flagName, flagValue)
|
||||
datastore.InsertKeyValue(flagName, flagValue)
|
||||
}
|
||||
database.UpdateFlag(notesFlagName, notes)
|
||||
|
||||
// Log notes.
|
||||
for _, note := range notes {
|
||||
log.Warningf("fetcher note: %s", note)
|
||||
}
|
||||
promUpdaterNotesTotal.Set(float64(len(notes)))
|
||||
|
||||
// Update last successful update if every fetchers worked properly.
|
||||
if status {
|
||||
database.UpdateFlag(flagName, strconv.FormatInt(time.Now().UTC().Unix(), 10))
|
||||
datastore.InsertKeyValue(flagName, strconv.FormatInt(time.Now().UTC().Unix(), 10))
|
||||
}
|
||||
|
||||
log.Info("update finished")
|
||||
}
|
||||
|
||||
func setUpdaterDuration(start time.Time) {
|
||||
promUpdaterDurationSeconds.Set(time.Since(start).Seconds())
|
||||
}
|
||||
|
||||
// fetch get data from the registered fetchers, in parallel.
|
||||
func fetch() (status bool, responses []*FetcherResponse) {
|
||||
func fetch(datastore database.Datastore) (bool, []database.Vulnerability, map[string]string, []string) {
|
||||
var vulnerabilities []database.Vulnerability
|
||||
var notes []string
|
||||
status := true
|
||||
flags := make(map[string]string)
|
||||
|
||||
// Fetch updates in parallel.
|
||||
status = true
|
||||
log.Info("fetching vulnerability updates")
|
||||
var responseC = make(chan *FetcherResponse, 0)
|
||||
for n, f := range fetchers {
|
||||
go func(name string, fetcher Fetcher) {
|
||||
response, err := fetcher.FetchUpdate()
|
||||
response, err := fetcher.FetchUpdate(datastore)
|
||||
if err != nil {
|
||||
promUpdaterErrorsTotal.Inc()
|
||||
log.Errorf("an error occured when fetching update '%s': %s.", name, err)
|
||||
status = false
|
||||
responseC <- nil
|
||||
@ -206,119 +229,115 @@ func fetch() (status bool, responses []*FetcherResponse) {
|
||||
for i := 0; i < len(fetchers); i++ {
|
||||
resp := <-responseC
|
||||
if resp != nil {
|
||||
responses = append(responses, resp)
|
||||
vulnerabilities = append(vulnerabilities, doVulnerabilitiesNamespacing(resp.Vulnerabilities)...)
|
||||
notes = append(notes, resp.Notes...)
|
||||
if resp.FlagName != "" && resp.FlagValue != "" {
|
||||
flags[resp.FlagName] = resp.FlagValue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
close(responseC)
|
||||
return
|
||||
return status, addMetadata(datastore, vulnerabilities), flags, notes
|
||||
}
|
||||
|
||||
// merge put all the responses together (vulnerabilities, packages, flags, notes), ensure the
|
||||
// uniqueness of vulnerabilities and packages and verify that every vulnerability's fixedInNodes
|
||||
// have their corresponding package definition.
|
||||
func mergeAndVerify(responses []*FetcherResponse) (svulnerabilities []*database.Vulnerability, spackages []*database.Package, flags map[string]string, snotes string, err error) {
|
||||
vulnerabilities := make(map[string]*database.Vulnerability)
|
||||
packages := make(map[string]*database.Package)
|
||||
flags = make(map[string]string)
|
||||
var notes []string
|
||||
// Add metadata to the specified vulnerabilities using the registered MetadataFetchers, in parallel.
|
||||
func addMetadata(datastore database.Datastore, vulnerabilities []database.Vulnerability) []database.Vulnerability {
|
||||
if len(metadataFetchers) == 0 {
|
||||
return vulnerabilities
|
||||
}
|
||||
|
||||
// Merge responses.
|
||||
for _, response := range responses {
|
||||
// Notes
|
||||
notes = append(notes, response.Notes...)
|
||||
// Flags
|
||||
if response.FlagName != "" && response.FlagValue != "" {
|
||||
flags[response.FlagName] = response.FlagValue
|
||||
log.Info("adding metadata to vulnerabilities")
|
||||
|
||||
// Wrap vulnerabilities in VulnerabilityWithLock.
|
||||
// It ensures that only one metadata fetcher at a time can modify the Metadata map.
|
||||
vulnerabilitiesWithLocks := make([]*VulnerabilityWithLock, 0, len(vulnerabilities))
|
||||
for i := 0; i < len(vulnerabilities); i++ {
|
||||
vulnerabilitiesWithLocks = append(vulnerabilitiesWithLocks, &VulnerabilityWithLock{
|
||||
Vulnerability: &vulnerabilities[i],
|
||||
})
|
||||
}
|
||||
// Packages
|
||||
for _, p := range response.Packages {
|
||||
node := p.GetNode()
|
||||
if _, ok := packages[node]; !ok {
|
||||
packages[node] = p
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(metadataFetchers))
|
||||
|
||||
for n, f := range metadataFetchers {
|
||||
go func(name string, metadataFetcher MetadataFetcher) {
|
||||
defer wg.Done()
|
||||
|
||||
// Load the metadata fetcher.
|
||||
if err := metadataFetcher.Load(datastore); err != nil {
|
||||
promUpdaterErrorsTotal.Inc()
|
||||
log.Errorf("an error occured when loading metadata fetcher '%s': %s.", name, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Add metadata to each vulnerability.
|
||||
for _, vulnerability := range vulnerabilitiesWithLocks {
|
||||
metadataFetcher.AddMetadata(vulnerability)
|
||||
}
|
||||
// Vulnerabilities
|
||||
for _, v := range response.Vulnerabilities {
|
||||
if vulnerability, ok := vulnerabilities[v.ID]; !ok {
|
||||
vulnerabilities[v.ID] = v
|
||||
|
||||
metadataFetcher.Unload()
|
||||
}(n, f)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return vulnerabilities
|
||||
}
|
||||
|
||||
func getLastUpdate(datastore database.Datastore) (time.Time, bool, error) {
|
||||
lastUpdateTSS, err := datastore.GetKeyValue(flagName)
|
||||
if err != nil {
|
||||
return time.Time{}, false, err
|
||||
}
|
||||
|
||||
if lastUpdateTSS == "" {
|
||||
// This is the first update.
|
||||
return time.Time{}, true, nil
|
||||
}
|
||||
|
||||
lastUpdateTS, err := strconv.ParseInt(lastUpdateTSS, 10, 64)
|
||||
if err != nil {
|
||||
return time.Time{}, false, err
|
||||
}
|
||||
|
||||
return time.Unix(lastUpdateTS, 0).UTC(), false, nil
|
||||
}
|
||||
|
||||
// doVulnerabilitiesNamespacing takes Vulnerabilities that don't have a Namespace and split them
|
||||
// into multiple vulnerabilities that have a Namespace and only contains the FixedIn
|
||||
// FeatureVersions corresponding to their Namespace.
|
||||
//
|
||||
// It helps simplifying the fetchers that share the same metadata about a Vulnerability regardless
|
||||
// of their actual namespace (ie. same vulnerability information for every version of a distro).
|
||||
func doVulnerabilitiesNamespacing(vulnerabilities []database.Vulnerability) []database.Vulnerability {
|
||||
vulnerabilitiesMap := make(map[string]*database.Vulnerability)
|
||||
|
||||
for _, v := range vulnerabilities {
|
||||
featureVersions := v.FixedIn
|
||||
v.FixedIn = []database.FeatureVersion{}
|
||||
|
||||
for _, fv := range featureVersions {
|
||||
index := fv.Feature.Namespace.Name + ":" + v.Name
|
||||
|
||||
if vulnerability, ok := vulnerabilitiesMap[index]; !ok {
|
||||
newVulnerability := v
|
||||
newVulnerability.Namespace.Name = fv.Feature.Namespace.Name
|
||||
newVulnerability.FixedIn = []database.FeatureVersion{fv}
|
||||
|
||||
vulnerabilitiesMap[index] = &newVulnerability
|
||||
} else {
|
||||
mergeVulnerability(vulnerability, v)
|
||||
vulnerability.FixedIn = append(vulnerability.FixedIn, fv)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Verify that the packages used in the vulnerabilities are specified.
|
||||
for _, v := range vulnerabilities {
|
||||
for _, node := range v.FixedInNodes {
|
||||
if _, ok := packages[node]; !ok {
|
||||
err = fmt.Errorf("vulnerability %s is fixed by an unspecified package", v.ID)
|
||||
return
|
||||
}
|
||||
}
|
||||
// Convert map into a slice.
|
||||
var response []database.Vulnerability
|
||||
for _, vulnerability := range vulnerabilitiesMap {
|
||||
response = append(response, *vulnerability)
|
||||
}
|
||||
|
||||
// Convert data and return
|
||||
for _, v := range vulnerabilities {
|
||||
svulnerabilities = append(svulnerabilities, v)
|
||||
}
|
||||
for _, p := range packages {
|
||||
spackages = append(spackages, p)
|
||||
}
|
||||
|
||||
bnotes, _ := json.Marshal(notes)
|
||||
snotes = string(bnotes)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// mergeVulnerability updates the target vulnerability structure using the specified one.
|
||||
func mergeVulnerability(target, source *database.Vulnerability) {
|
||||
if source.Link != "" {
|
||||
target.Link = source.Link
|
||||
}
|
||||
if source.Description != "" {
|
||||
target.Description = source.Description
|
||||
}
|
||||
if source.Priority.Compare(target.Priority) > 0 {
|
||||
target.Priority = source.Priority
|
||||
}
|
||||
for _, node := range source.FixedInNodes {
|
||||
if !utils.Contains(node, target.FixedInNodes) {
|
||||
target.FixedInNodes = append(target.FixedInNodes, node)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Healthcheck returns the health of the updater service.
|
||||
func Healthcheck() health.Status {
|
||||
notes := getNotes()
|
||||
|
||||
return health.Status{
|
||||
IsEssential: false,
|
||||
IsHealthy: len(notes) == 0,
|
||||
Details: struct {
|
||||
LatestSuccessfulUpdate time.Time
|
||||
Notes []string `json:",omitempty"`
|
||||
}{
|
||||
LatestSuccessfulUpdate: getLastUpdate(),
|
||||
Notes: notes,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getLastUpdate() time.Time {
|
||||
if lastUpdateTSS, err := database.GetFlagValue(flagName); err == nil && lastUpdateTSS != "" {
|
||||
if lastUpdateTS, err := strconv.ParseInt(lastUpdateTSS, 10, 64); err == nil {
|
||||
return time.Unix(lastUpdateTS, 0).UTC()
|
||||
}
|
||||
}
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
func getNotes() (notes []string) {
|
||||
if jsonNotes, err := database.GetFlagValue(notesFlagName); err == nil && jsonNotes != "" {
|
||||
json.Unmarshal([]byte(jsonNotes), notes)
|
||||
}
|
||||
return
|
||||
return response
|
||||
}
|
||||
|
57
updater/updater_test.go
Normal file
57
updater/updater_test.go
Normal file
@ -0,0 +1,57 @@
|
||||
package updater
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDoVulnerabilitiesNamespacing(t *testing.T) {
|
||||
fv1 := database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Namespace: database.Namespace{Name: "Namespace1"},
|
||||
Name: "Feature1",
|
||||
},
|
||||
Version: types.NewVersionUnsafe("0.1"),
|
||||
}
|
||||
|
||||
fv2 := database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Namespace: database.Namespace{Name: "Namespace2"},
|
||||
Name: "Feature1",
|
||||
},
|
||||
Version: types.NewVersionUnsafe("0.2"),
|
||||
}
|
||||
|
||||
fv3 := database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Namespace: database.Namespace{Name: "Namespace2"},
|
||||
Name: "Feature2",
|
||||
},
|
||||
Version: types.NewVersionUnsafe("0.3"),
|
||||
}
|
||||
|
||||
vulnerability := database.Vulnerability{
|
||||
Name: "DoVulnerabilityNamespacing",
|
||||
FixedIn: []database.FeatureVersion{fv1, fv2, fv3},
|
||||
}
|
||||
|
||||
vulnerabilities := doVulnerabilitiesNamespacing([]database.Vulnerability{vulnerability})
|
||||
for _, vulnerability := range vulnerabilities {
|
||||
switch vulnerability.Namespace.Name {
|
||||
case fv1.Feature.Namespace.Name:
|
||||
assert.Len(t, vulnerability.FixedIn, 1)
|
||||
assert.Contains(t, vulnerability.FixedIn, fv1)
|
||||
case fv2.Feature.Namespace.Name:
|
||||
assert.Len(t, vulnerability.FixedIn, 2)
|
||||
assert.Contains(t, vulnerability.FixedIn, fv2)
|
||||
assert.Contains(t, vulnerability.FixedIn, fv3)
|
||||
default:
|
||||
t.Errorf("Should not have a Vulnerability with '%s' as its Namespace.", vulnerability.Namespace.Name)
|
||||
fmt.Printf("%#v\n", vulnerability)
|
||||
}
|
||||
}
|
||||
}
|
@ -20,10 +20,15 @@ import "errors"
|
||||
var (
|
||||
// ErrFilesystem occurs when a filesystem interaction fails.
|
||||
ErrFilesystem = errors.New("something went wrong when interacting with the fs")
|
||||
|
||||
// ErrCouldNotDownload occurs when a download fails.
|
||||
ErrCouldNotDownload = errors.New("could not download requested ressource")
|
||||
|
||||
// ErrNotFound occurs when a resource could not be found.
|
||||
ErrNotFound = errors.New("the resource cannot be found")
|
||||
|
||||
// ErrCouldNotParse is returned when a fetcher fails to parse the update data.
|
||||
ErrCouldNotParse = errors.New("updater/fetchers: could not parse")
|
||||
)
|
||||
|
||||
// ErrBadRequest occurs when a method has been passed an inappropriate argument.
|
||||
|
@ -26,7 +26,7 @@ import (
|
||||
"github.com/coreos/clair/worker"
|
||||
)
|
||||
|
||||
// MaxPostSize is the maximum number of bytes that ParseHTTPBody reads from an http.Request.Body.
|
||||
// MaxBodySize is the maximum number of bytes that ParseHTTPBody reads from an http.Request.Body.
|
||||
const MaxBodySize int64 = 1048576
|
||||
|
||||
// WriteHTTP writes a JSON-encoded object to a http.ResponseWriter, as well as
|
||||
@ -54,7 +54,7 @@ func WriteHTTPError(w http.ResponseWriter, httpStatus int, err error) {
|
||||
switch err {
|
||||
case cerrors.ErrNotFound:
|
||||
httpStatus = http.StatusNotFound
|
||||
case database.ErrTransaction, database.ErrBackendException:
|
||||
case database.ErrBackendException:
|
||||
httpStatus = http.StatusServiceUnavailable
|
||||
case worker.ErrParentUnknown, worker.ErrUnsupported, utils.ErrCouldNotExtract, utils.ErrExtractedFileTooBig:
|
||||
httpStatus = http.StatusBadRequest
|
||||
|
13
utils/prometheus.go
Normal file
13
utils/prometheus.go
Normal file
@ -0,0 +1,13 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// PrometheusObserveTimeMilliseconds observes the elapsed time since start, in milliseconds,
|
||||
// on the specified Prometheus Histogram.
|
||||
func PrometheusObserveTimeMilliseconds(h prometheus.Histogram, start time.Time) {
|
||||
h.Observe(float64(time.Since(start).Nanoseconds()) / float64(time.Millisecond))
|
||||
}
|
@ -14,29 +14,17 @@
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"regexp"
|
||||
)
|
||||
import "regexp"
|
||||
|
||||
var urlParametersRegexp = regexp.MustCompile(`(\?|\&)([^=]+)\=([^ &]+)`)
|
||||
|
||||
// Hash returns an unique hash of the given string
|
||||
func Hash(str string) string {
|
||||
h := sha1.New()
|
||||
h.Write([]byte(str))
|
||||
bs := h.Sum(nil)
|
||||
return hex.EncodeToString(bs)
|
||||
}
|
||||
|
||||
// CleanURL removes all parameters from an URL
|
||||
// CleanURL removes all parameters from an URL.
|
||||
func CleanURL(str string) string {
|
||||
return urlParametersRegexp.ReplaceAllString(str, "")
|
||||
}
|
||||
|
||||
// Contains looks for a string into an array of strings and returns whether
|
||||
// the string exists
|
||||
// the string exists.
|
||||
func Contains(needle string, haystack []string) bool {
|
||||
for _, h := range haystack {
|
||||
if h == needle {
|
||||
@ -46,22 +34,41 @@ func Contains(needle string, haystack []string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// CompareStringLists returns the strings which are present in X but not in Y
|
||||
// CompareStringLists returns the strings that are present in X but not in Y.
|
||||
func CompareStringLists(X, Y []string) []string {
|
||||
m := make(map[string]int)
|
||||
m := make(map[string]bool)
|
||||
|
||||
for _, y := range Y {
|
||||
m[y] = 1
|
||||
m[y] = true
|
||||
}
|
||||
|
||||
diff := []string{}
|
||||
for _, x := range X {
|
||||
if m[x] > 0 {
|
||||
if m[x] {
|
||||
continue
|
||||
}
|
||||
|
||||
diff = append(diff, x)
|
||||
m[x] = 1
|
||||
m[x] = true
|
||||
}
|
||||
|
||||
return diff
|
||||
}
|
||||
|
||||
// CompareStringListsInBoth returns the strings that are present in both X and Y.
|
||||
func CompareStringListsInBoth(X, Y []string) []string {
|
||||
m := make(map[string]struct{})
|
||||
|
||||
for _, y := range Y {
|
||||
m[y] = struct{}{}
|
||||
}
|
||||
|
||||
diff := []string{}
|
||||
for _, x := range X {
|
||||
if _, e := m[x]; e {
|
||||
diff = append(diff, x)
|
||||
delete(m, x)
|
||||
}
|
||||
}
|
||||
|
||||
return diff
|
||||
|
@ -15,6 +15,12 @@
|
||||
// Package types defines useful types that are used in database models.
|
||||
package types
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Priority defines a vulnerability priority
|
||||
type Priority string
|
||||
|
||||
@ -86,3 +92,19 @@ func (p Priority) Compare(p2 Priority) int {
|
||||
|
||||
return i1 - i2
|
||||
}
|
||||
|
||||
func (p *Priority) Scan(value interface{}) error {
|
||||
val, ok := value.([]byte)
|
||||
if !ok {
|
||||
return errors.New("could not scan a Priority from a non-string input")
|
||||
}
|
||||
*p = Priority(string(val))
|
||||
if !p.IsValid() {
|
||||
return fmt.Errorf("could not scan an invalid Priority (%v)", p)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Priority) Value() (driver.Value, error) {
|
||||
return string(*p), nil
|
||||
}
|
||||
|
@ -15,6 +15,7 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"strconv"
|
||||
@ -178,11 +179,24 @@ func (v Version) MarshalJSON() ([]byte, error) {
|
||||
func (v *Version) UnmarshalJSON(b []byte) (err error) {
|
||||
var str string
|
||||
json.Unmarshal(b, &str)
|
||||
vp, err := NewVersion(str)
|
||||
vp := NewVersionUnsafe(str)
|
||||
*v = vp
|
||||
return
|
||||
}
|
||||
|
||||
func (v *Version) Scan(value interface{}) (err error) {
|
||||
val, ok := value.([]byte)
|
||||
if !ok {
|
||||
return errors.New("could not scan a Version from a non-string input")
|
||||
}
|
||||
*v, err = NewVersion(string(val))
|
||||
return
|
||||
}
|
||||
|
||||
func (v *Version) Value() (driver.Value, error) {
|
||||
return v.String(), nil
|
||||
}
|
||||
|
||||
func verrevcmp(t1, t2 string) int {
|
||||
t1, rt1 := nextRune(t1)
|
||||
t2, rt2 := nextRune(t2)
|
||||
|
@ -29,7 +29,16 @@ const fileToDownload = "http://www.google.com/robots.txt"
|
||||
|
||||
// TestDiff tests the diff.go source file
|
||||
func TestDiff(t *testing.T) {
|
||||
assert.NotContains(t, CompareStringLists([]string{"a", "b", "a"}, []string{"a", "c"}), "a")
|
||||
cmp := CompareStringLists([]string{"a", "b", "b", "a"}, []string{"a", "c"})
|
||||
assert.Len(t, cmp, 1)
|
||||
assert.NotContains(t, cmp, "a")
|
||||
assert.Contains(t, cmp, "b")
|
||||
|
||||
cmp = CompareStringListsInBoth([]string{"a", "a", "b", "c"}, []string{"a", "c", "c"})
|
||||
assert.Len(t, cmp, 2)
|
||||
assert.NotContains(t, cmp, "b")
|
||||
assert.Contains(t, cmp, "a")
|
||||
assert.Contains(t, cmp, "c")
|
||||
}
|
||||
|
||||
// TestExec tests the exec.go source file
|
||||
@ -47,9 +56,6 @@ func TestExec(t *testing.T) {
|
||||
|
||||
// TestString tests the string.go file
|
||||
func TestString(t *testing.T) {
|
||||
assert.Equal(t, Hash("abc123"), Hash("abc123"))
|
||||
assert.NotEqual(t, Hash("abc123."), Hash("abc123"))
|
||||
|
||||
assert.False(t, Contains("", []string{}))
|
||||
assert.True(t, Contains("a", []string{"a", "b"}))
|
||||
assert.False(t, Contains("c", []string{"a", "b"}))
|
||||
|
139
vendor/bitbucket.org/liamstask/goose/lib/goose/dbconf.go
generated
vendored
Normal file
139
vendor/bitbucket.org/liamstask/goose/lib/goose/dbconf.go
generated
vendored
Normal file
@ -0,0 +1,139 @@
|
||||
package goose
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/kylelemons/go-gypsy/yaml"
|
||||
"github.com/lib/pq"
|
||||
)
|
||||
|
||||
// DBDriver encapsulates the info needed to work with
|
||||
// a specific database driver
|
||||
type DBDriver struct {
|
||||
Name string
|
||||
OpenStr string
|
||||
Import string
|
||||
Dialect SqlDialect
|
||||
}
|
||||
|
||||
type DBConf struct {
|
||||
MigrationsDir string
|
||||
Env string
|
||||
Driver DBDriver
|
||||
PgSchema string
|
||||
}
|
||||
|
||||
// extract configuration details from the given file
|
||||
func NewDBConf(p, env string, pgschema string) (*DBConf, error) {
|
||||
|
||||
cfgFile := filepath.Join(p, "dbconf.yml")
|
||||
|
||||
f, err := yaml.ReadFile(cfgFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
drv, err := f.Get(fmt.Sprintf("%s.driver", env))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
drv = os.ExpandEnv(drv)
|
||||
|
||||
open, err := f.Get(fmt.Sprintf("%s.open", env))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
open = os.ExpandEnv(open)
|
||||
|
||||
// Automatically parse postgres urls
|
||||
if drv == "postgres" {
|
||||
|
||||
// Assumption: If we can parse the URL, we should
|
||||
if parsedURL, err := pq.ParseURL(open); err == nil && parsedURL != "" {
|
||||
open = parsedURL
|
||||
}
|
||||
}
|
||||
|
||||
d := newDBDriver(drv, open)
|
||||
|
||||
// allow the configuration to override the Import for this driver
|
||||
if imprt, err := f.Get(fmt.Sprintf("%s.import", env)); err == nil {
|
||||
d.Import = imprt
|
||||
}
|
||||
|
||||
// allow the configuration to override the Dialect for this driver
|
||||
if dialect, err := f.Get(fmt.Sprintf("%s.dialect", env)); err == nil {
|
||||
d.Dialect = dialectByName(dialect)
|
||||
}
|
||||
|
||||
if !d.IsValid() {
|
||||
return nil, errors.New(fmt.Sprintf("Invalid DBConf: %v", d))
|
||||
}
|
||||
|
||||
return &DBConf{
|
||||
MigrationsDir: filepath.Join(p, "migrations"),
|
||||
Env: env,
|
||||
Driver: d,
|
||||
PgSchema: pgschema,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Create a new DBDriver and populate driver specific
|
||||
// fields for drivers that we know about.
|
||||
// Further customization may be done in NewDBConf
|
||||
func newDBDriver(name, open string) DBDriver {
|
||||
|
||||
d := DBDriver{
|
||||
Name: name,
|
||||
OpenStr: open,
|
||||
}
|
||||
|
||||
switch name {
|
||||
case "postgres":
|
||||
d.Import = "github.com/lib/pq"
|
||||
d.Dialect = &PostgresDialect{}
|
||||
|
||||
case "mymysql":
|
||||
d.Import = "github.com/ziutek/mymysql/godrv"
|
||||
d.Dialect = &MySqlDialect{}
|
||||
|
||||
case "mysql":
|
||||
d.Import = "github.com/go-sql-driver/mysql"
|
||||
d.Dialect = &MySqlDialect{}
|
||||
|
||||
case "sqlite3":
|
||||
d.Import = "github.com/mattn/go-sqlite3"
|
||||
d.Dialect = &Sqlite3Dialect{}
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
// ensure we have enough info about this driver
|
||||
func (drv *DBDriver) IsValid() bool {
|
||||
return len(drv.Import) > 0 && drv.Dialect != nil
|
||||
}
|
||||
|
||||
// OpenDBFromDBConf wraps database/sql.DB.Open() and configures
|
||||
// the newly opened DB based on the given DBConf.
|
||||
//
|
||||
// Callers must Close() the returned DB.
|
||||
func OpenDBFromDBConf(conf *DBConf) (*sql.DB, error) {
|
||||
db, err := sql.Open(conf.Driver.Name, conf.Driver.OpenStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// if a postgres schema has been specified, apply it
|
||||
if conf.Driver.Name == "postgres" && conf.PgSchema != "" {
|
||||
if _, err := db.Exec("SET search_path TO " + conf.PgSchema); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return db, nil
|
||||
}
|
123
vendor/bitbucket.org/liamstask/goose/lib/goose/dialect.go
generated
vendored
Normal file
123
vendor/bitbucket.org/liamstask/goose/lib/goose/dialect.go
generated
vendored
Normal file
@ -0,0 +1,123 @@
|
||||
package goose
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
// SqlDialect abstracts the details of specific SQL dialects
|
||||
// for goose's few SQL specific statements
|
||||
type SqlDialect interface {
|
||||
createVersionTableSql() string // sql string to create the goose_db_version table
|
||||
insertVersionSql() string // sql string to insert the initial version table row
|
||||
dbVersionQuery(db *sql.DB) (*sql.Rows, error)
|
||||
}
|
||||
|
||||
// drivers that we don't know about can ask for a dialect by name
|
||||
func dialectByName(d string) SqlDialect {
|
||||
switch d {
|
||||
case "postgres":
|
||||
return &PostgresDialect{}
|
||||
case "mysql":
|
||||
return &MySqlDialect{}
|
||||
case "sqlite3":
|
||||
return &Sqlite3Dialect{}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
////////////////////////////
|
||||
// Postgres
|
||||
////////////////////////////
|
||||
|
||||
type PostgresDialect struct{}
|
||||
|
||||
func (pg PostgresDialect) createVersionTableSql() string {
|
||||
return `CREATE TABLE goose_db_version (
|
||||
id serial NOT NULL,
|
||||
version_id bigint NOT NULL,
|
||||
is_applied boolean NOT NULL,
|
||||
tstamp timestamp NULL default now(),
|
||||
PRIMARY KEY(id)
|
||||
);`
|
||||
}
|
||||
|
||||
func (pg PostgresDialect) insertVersionSql() string {
|
||||
return "INSERT INTO goose_db_version (version_id, is_applied) VALUES ($1, $2);"
|
||||
}
|
||||
|
||||
func (pg PostgresDialect) dbVersionQuery(db *sql.DB) (*sql.Rows, error) {
|
||||
rows, err := db.Query("SELECT version_id, is_applied from goose_db_version ORDER BY id DESC")
|
||||
|
||||
// XXX: check for postgres specific error indicating the table doesn't exist.
|
||||
// for now, assume any error is because the table doesn't exist,
|
||||
// in which case we'll try to create it.
|
||||
if err != nil {
|
||||
return nil, ErrTableDoesNotExist
|
||||
}
|
||||
|
||||
return rows, err
|
||||
}
|
||||
|
||||
////////////////////////////
|
||||
// MySQL
|
||||
////////////////////////////
|
||||
|
||||
type MySqlDialect struct{}
|
||||
|
||||
func (m MySqlDialect) createVersionTableSql() string {
|
||||
return `CREATE TABLE goose_db_version (
|
||||
id serial NOT NULL,
|
||||
version_id bigint NOT NULL,
|
||||
is_applied boolean NOT NULL,
|
||||
tstamp timestamp NULL default now(),
|
||||
PRIMARY KEY(id)
|
||||
);`
|
||||
}
|
||||
|
||||
func (m MySqlDialect) insertVersionSql() string {
|
||||
return "INSERT INTO goose_db_version (version_id, is_applied) VALUES (?, ?);"
|
||||
}
|
||||
|
||||
func (m MySqlDialect) dbVersionQuery(db *sql.DB) (*sql.Rows, error) {
|
||||
rows, err := db.Query("SELECT version_id, is_applied from goose_db_version ORDER BY id DESC")
|
||||
|
||||
// XXX: check for mysql specific error indicating the table doesn't exist.
|
||||
// for now, assume any error is because the table doesn't exist,
|
||||
// in which case we'll try to create it.
|
||||
if err != nil {
|
||||
return nil, ErrTableDoesNotExist
|
||||
}
|
||||
|
||||
return rows, err
|
||||
}
|
||||
|
||||
////////////////////////////
|
||||
// sqlite3
|
||||
////////////////////////////
|
||||
|
||||
type Sqlite3Dialect struct{}
|
||||
|
||||
func (m Sqlite3Dialect) createVersionTableSql() string {
|
||||
return `CREATE TABLE goose_db_version (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
version_id INTEGER NOT NULL,
|
||||
is_applied INTEGER NOT NULL,
|
||||
tstamp TIMESTAMP DEFAULT (datetime('now'))
|
||||
);`
|
||||
}
|
||||
|
||||
func (m Sqlite3Dialect) insertVersionSql() string {
|
||||
return "INSERT INTO goose_db_version (version_id, is_applied) VALUES (?, ?);"
|
||||
}
|
||||
|
||||
func (m Sqlite3Dialect) dbVersionQuery(db *sql.DB) (*sql.Rows, error) {
|
||||
rows, err := db.Query("SELECT version_id, is_applied from goose_db_version ORDER BY id DESC")
|
||||
|
||||
switch err.(type) {
|
||||
case sqlite3.Error:
|
||||
return nil, ErrTableDoesNotExist
|
||||
}
|
||||
return rows, err
|
||||
}
|
413
vendor/bitbucket.org/liamstask/goose/lib/goose/migrate.go
generated
vendored
Normal file
413
vendor/bitbucket.org/liamstask/goose/lib/goose/migrate.go
generated
vendored
Normal file
@ -0,0 +1,413 @@
|
||||
package goose
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
_ "github.com/go-sql-driver/mysql"
|
||||
_ "github.com/lib/pq"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
_ "github.com/ziutek/mymysql/godrv"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrTableDoesNotExist = errors.New("table does not exist")
|
||||
ErrNoPreviousVersion = errors.New("no previous version found")
|
||||
)
|
||||
|
||||
type MigrationRecord struct {
|
||||
VersionId int64
|
||||
TStamp time.Time
|
||||
IsApplied bool // was this a result of up() or down()
|
||||
}
|
||||
|
||||
type Migration struct {
|
||||
Version int64
|
||||
Next int64 // next version, or -1 if none
|
||||
Previous int64 // previous version, -1 if none
|
||||
Source string // path to .go or .sql script
|
||||
}
|
||||
|
||||
type migrationSorter []*Migration
|
||||
|
||||
// helpers so we can use pkg sort
|
||||
func (ms migrationSorter) Len() int { return len(ms) }
|
||||
func (ms migrationSorter) Swap(i, j int) { ms[i], ms[j] = ms[j], ms[i] }
|
||||
func (ms migrationSorter) Less(i, j int) bool { return ms[i].Version < ms[j].Version }
|
||||
|
||||
func newMigration(v int64, src string) *Migration {
|
||||
return &Migration{v, -1, -1, src}
|
||||
}
|
||||
|
||||
func RunMigrations(conf *DBConf, migrationsDir string, target int64) (err error) {
|
||||
|
||||
db, err := OpenDBFromDBConf(conf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
return RunMigrationsOnDb(conf, migrationsDir, target, db)
|
||||
}
|
||||
|
||||
// Runs migration on a specific database instance.
|
||||
func RunMigrationsOnDb(conf *DBConf, migrationsDir string, target int64, db *sql.DB) (err error) {
|
||||
current, err := EnsureDBVersion(conf, db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
migrations, err := CollectMigrations(migrationsDir, current, target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(migrations) == 0 {
|
||||
fmt.Printf("goose: no migrations to run. current version: %d\n", current)
|
||||
return nil
|
||||
}
|
||||
|
||||
ms := migrationSorter(migrations)
|
||||
direction := current < target
|
||||
ms.Sort(direction)
|
||||
|
||||
fmt.Printf("goose: migrating db environment '%v', current version: %d, target: %d\n",
|
||||
conf.Env, current, target)
|
||||
|
||||
for _, m := range ms {
|
||||
|
||||
switch filepath.Ext(m.Source) {
|
||||
case ".go":
|
||||
err = runGoMigration(conf, m.Source, m.Version, direction)
|
||||
case ".sql":
|
||||
err = runSQLMigration(conf, db, m.Source, m.Version, direction)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return errors.New(fmt.Sprintf("FAIL %v, quitting migration", err))
|
||||
}
|
||||
|
||||
fmt.Println("OK ", filepath.Base(m.Source))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// collect all the valid looking migration scripts in the
|
||||
// migrations folder, and key them by version
|
||||
func CollectMigrations(dirpath string, current, target int64) (m []*Migration, err error) {
|
||||
|
||||
// extract the numeric component of each migration,
|
||||
// filter out any uninteresting files,
|
||||
// and ensure we only have one file per migration version.
|
||||
filepath.Walk(dirpath, func(name string, info os.FileInfo, err error) error {
|
||||
|
||||
if v, e := NumericComponent(name); e == nil {
|
||||
|
||||
for _, g := range m {
|
||||
if v == g.Version {
|
||||
log.Fatalf("more than one file specifies the migration for version %d (%s and %s)",
|
||||
v, g.Source, filepath.Join(dirpath, name))
|
||||
}
|
||||
}
|
||||
|
||||
if versionFilter(v, current, target) {
|
||||
m = append(m, newMigration(v, name))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func versionFilter(v, current, target int64) bool {
|
||||
|
||||
if target > current {
|
||||
return v > current && v <= target
|
||||
}
|
||||
|
||||
if target < current {
|
||||
return v <= current && v > target
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (ms migrationSorter) Sort(direction bool) {
|
||||
|
||||
// sort ascending or descending by version
|
||||
if direction {
|
||||
sort.Sort(ms)
|
||||
} else {
|
||||
sort.Sort(sort.Reverse(ms))
|
||||
}
|
||||
|
||||
// now that we're sorted in the appropriate direction,
|
||||
// populate next and previous for each migration
|
||||
for i, m := range ms {
|
||||
prev := int64(-1)
|
||||
if i > 0 {
|
||||
prev = ms[i-1].Version
|
||||
ms[i-1].Next = m.Version
|
||||
}
|
||||
ms[i].Previous = prev
|
||||
}
|
||||
}
|
||||
|
||||
// look for migration scripts with names in the form:
|
||||
// XXX_descriptivename.ext
|
||||
// where XXX specifies the version number
|
||||
// and ext specifies the type of migration
|
||||
func NumericComponent(name string) (int64, error) {
|
||||
|
||||
base := filepath.Base(name)
|
||||
|
||||
if ext := filepath.Ext(base); ext != ".go" && ext != ".sql" {
|
||||
return 0, errors.New("not a recognized migration file type")
|
||||
}
|
||||
|
||||
idx := strings.Index(base, "_")
|
||||
if idx < 0 {
|
||||
return 0, errors.New("no separator found")
|
||||
}
|
||||
|
||||
n, e := strconv.ParseInt(base[:idx], 10, 64)
|
||||
if e == nil && n <= 0 {
|
||||
return 0, errors.New("migration IDs must be greater than zero")
|
||||
}
|
||||
|
||||
return n, e
|
||||
}
|
||||
|
||||
// retrieve the current version for this DB.
|
||||
// Create and initialize the DB version table if it doesn't exist.
|
||||
func EnsureDBVersion(conf *DBConf, db *sql.DB) (int64, error) {
|
||||
|
||||
rows, err := conf.Driver.Dialect.dbVersionQuery(db)
|
||||
if err != nil {
|
||||
if err == ErrTableDoesNotExist {
|
||||
return 0, createVersionTable(conf, db)
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
// The most recent record for each migration specifies
|
||||
// whether it has been applied or rolled back.
|
||||
// The first version we find that has been applied is the current version.
|
||||
|
||||
toSkip := make([]int64, 0)
|
||||
|
||||
for rows.Next() {
|
||||
var row MigrationRecord
|
||||
if err = rows.Scan(&row.VersionId, &row.IsApplied); err != nil {
|
||||
log.Fatal("error scanning rows:", err)
|
||||
}
|
||||
|
||||
// have we already marked this version to be skipped?
|
||||
skip := false
|
||||
for _, v := range toSkip {
|
||||
if v == row.VersionId {
|
||||
skip = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if skip {
|
||||
continue
|
||||
}
|
||||
|
||||
// if version has been applied we're done
|
||||
if row.IsApplied {
|
||||
return row.VersionId, nil
|
||||
}
|
||||
|
||||
// latest version of migration has not been applied.
|
||||
toSkip = append(toSkip, row.VersionId)
|
||||
}
|
||||
|
||||
panic("failure in EnsureDBVersion()")
|
||||
}
|
||||
|
||||
// Create the goose_db_version table
|
||||
// and insert the initial 0 value into it
|
||||
func createVersionTable(conf *DBConf, db *sql.DB) error {
|
||||
txn, err := db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d := conf.Driver.Dialect
|
||||
|
||||
if _, err := txn.Exec(d.createVersionTableSql()); err != nil {
|
||||
txn.Rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
version := 0
|
||||
applied := true
|
||||
if _, err := txn.Exec(d.insertVersionSql(), version, applied); err != nil {
|
||||
txn.Rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
return txn.Commit()
|
||||
}
|
||||
|
||||
// wrapper for EnsureDBVersion for callers that don't already have
|
||||
// their own DB instance
|
||||
func GetDBVersion(conf *DBConf) (version int64, err error) {
|
||||
|
||||
db, err := OpenDBFromDBConf(conf)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
version, err = EnsureDBVersion(conf, db)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
return version, nil
|
||||
}
|
||||
|
||||
func GetPreviousDBVersion(dirpath string, version int64) (previous int64, err error) {
|
||||
|
||||
previous = -1
|
||||
sawGivenVersion := false
|
||||
|
||||
filepath.Walk(dirpath, func(name string, info os.FileInfo, walkerr error) error {
|
||||
|
||||
if !info.IsDir() {
|
||||
if v, e := NumericComponent(name); e == nil {
|
||||
if v > previous && v < version {
|
||||
previous = v
|
||||
}
|
||||
if v == version {
|
||||
sawGivenVersion = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if previous == -1 {
|
||||
if sawGivenVersion {
|
||||
// the given version is (likely) valid but we didn't find
|
||||
// anything before it.
|
||||
// 'previous' must reflect that no migrations have been applied.
|
||||
previous = 0
|
||||
} else {
|
||||
err = ErrNoPreviousVersion
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// helper to identify the most recent possible version
|
||||
// within a folder of migration scripts
|
||||
func GetMostRecentDBVersion(dirpath string) (version int64, err error) {
|
||||
|
||||
version = -1
|
||||
|
||||
filepath.Walk(dirpath, func(name string, info os.FileInfo, walkerr error) error {
|
||||
if walkerr != nil {
|
||||
return walkerr
|
||||
}
|
||||
|
||||
if !info.IsDir() {
|
||||
if v, e := NumericComponent(name); e == nil {
|
||||
if v > version {
|
||||
version = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if version == -1 {
|
||||
err = errors.New("no valid version found")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func CreateMigration(name, migrationType, dir string, t time.Time) (path string, err error) {
|
||||
|
||||
if migrationType != "go" && migrationType != "sql" {
|
||||
return "", errors.New("migration type must be 'go' or 'sql'")
|
||||
}
|
||||
|
||||
timestamp := t.Format("20060102150405")
|
||||
filename := fmt.Sprintf("%v_%v.%v", timestamp, name, migrationType)
|
||||
|
||||
fpath := filepath.Join(dir, filename)
|
||||
|
||||
var tmpl *template.Template
|
||||
if migrationType == "sql" {
|
||||
tmpl = sqlMigrationTemplate
|
||||
} else {
|
||||
tmpl = goMigrationTemplate
|
||||
}
|
||||
|
||||
path, err = writeTemplateToFile(fpath, tmpl, timestamp)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Update the version table for the given migration,
|
||||
// and finalize the transaction.
|
||||
func FinalizeMigration(conf *DBConf, txn *sql.Tx, direction bool, v int64) error {
|
||||
|
||||
// XXX: drop goose_db_version table on some minimum version number?
|
||||
stmt := conf.Driver.Dialect.insertVersionSql()
|
||||
if _, err := txn.Exec(stmt, v, direction); err != nil {
|
||||
txn.Rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
return txn.Commit()
|
||||
}
|
||||
|
||||
var goMigrationTemplate = template.Must(template.New("goose.go-migration").Parse(`
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
)
|
||||
|
||||
// Up is executed when this migration is applied
|
||||
func Up_{{ . }}(txn *sql.Tx) {
|
||||
|
||||
}
|
||||
|
||||
// Down is executed when this migration is rolled back
|
||||
func Down_{{ . }}(txn *sql.Tx) {
|
||||
|
||||
}
|
||||
`))
|
||||
|
||||
var sqlMigrationTemplate = template.Must(template.New("goose.sql-migration").Parse(`
|
||||
-- +goose Up
|
||||
-- SQL in section 'Up' is executed when this migration is applied
|
||||
|
||||
|
||||
-- +goose Down
|
||||
-- SQL section 'Down' is executed when this migration is rolled back
|
||||
|
||||
`))
|
137
vendor/bitbucket.org/liamstask/goose/lib/goose/migration_go.go
generated
vendored
Normal file
137
vendor/bitbucket.org/liamstask/goose/lib/goose/migration_go.go
generated
vendored
Normal file
@ -0,0 +1,137 @@
|
||||
package goose
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
type templateData struct {
|
||||
Version int64
|
||||
Import string
|
||||
Conf string // gob encoded DBConf
|
||||
Direction bool
|
||||
Func string
|
||||
InsertStmt string
|
||||
}
|
||||
|
||||
func init() {
|
||||
gob.Register(PostgresDialect{})
|
||||
gob.Register(MySqlDialect{})
|
||||
gob.Register(Sqlite3Dialect{})
|
||||
}
|
||||
|
||||
//
|
||||
// Run a .go migration.
|
||||
//
|
||||
// In order to do this, we copy a modified version of the
|
||||
// original .go migration, and execute it via `go run` along
|
||||
// with a main() of our own creation.
|
||||
//
|
||||
func runGoMigration(conf *DBConf, path string, version int64, direction bool) error {
|
||||
|
||||
// everything gets written to a temp dir, and zapped afterwards
|
||||
d, e := ioutil.TempDir("", "goose")
|
||||
if e != nil {
|
||||
log.Fatal(e)
|
||||
}
|
||||
defer os.RemoveAll(d)
|
||||
|
||||
directionStr := "Down"
|
||||
if direction {
|
||||
directionStr = "Up"
|
||||
}
|
||||
|
||||
var bb bytes.Buffer
|
||||
if err := gob.NewEncoder(&bb).Encode(conf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// XXX: there must be a better way of making this byte array
|
||||
// available to the generated code...
|
||||
// but for now, print an array literal of the gob bytes
|
||||
var sb bytes.Buffer
|
||||
sb.WriteString("[]byte{ ")
|
||||
for _, b := range bb.Bytes() {
|
||||
sb.WriteString(fmt.Sprintf("0x%02x, ", b))
|
||||
}
|
||||
sb.WriteString("}")
|
||||
|
||||
td := &templateData{
|
||||
Version: version,
|
||||
Import: conf.Driver.Import,
|
||||
Conf: sb.String(),
|
||||
Direction: direction,
|
||||
Func: fmt.Sprintf("%v_%v", directionStr, version),
|
||||
InsertStmt: conf.Driver.Dialect.insertVersionSql(),
|
||||
}
|
||||
main, e := writeTemplateToFile(filepath.Join(d, "goose_main.go"), goMigrationDriverTemplate, td)
|
||||
if e != nil {
|
||||
log.Fatal(e)
|
||||
}
|
||||
|
||||
outpath := filepath.Join(d, filepath.Base(path))
|
||||
if _, e = copyFile(outpath, path); e != nil {
|
||||
log.Fatal(e)
|
||||
}
|
||||
|
||||
cmd := exec.Command("go", "run", main, outpath)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if e = cmd.Run(); e != nil {
|
||||
log.Fatal("`go run` failed: ", e)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//
|
||||
// template for the main entry point to a go-based migration.
|
||||
// this gets linked against the substituted versions of the user-supplied
|
||||
// scripts in order to execute a migration via `go run`
|
||||
//
|
||||
var goMigrationDriverTemplate = template.Must(template.New("goose.go-driver").Parse(`
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
|
||||
_ "{{.Import}}"
|
||||
"bitbucket.org/liamstask/goose/lib/goose"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
var conf goose.DBConf
|
||||
buf := bytes.NewBuffer({{ .Conf }})
|
||||
if err := gob.NewDecoder(buf).Decode(&conf); err != nil {
|
||||
log.Fatal("gob.Decode - ", err)
|
||||
}
|
||||
|
||||
db, err := goose.OpenDBFromDBConf(&conf)
|
||||
if err != nil {
|
||||
log.Fatal("failed to open DB:", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
txn, err := db.Begin()
|
||||
if err != nil {
|
||||
log.Fatal("db.Begin:", err)
|
||||
}
|
||||
|
||||
{{ .Func }}(txn)
|
||||
|
||||
err = goose.FinalizeMigration(&conf, txn, {{ .Direction }}, {{ .Version }})
|
||||
if err != nil {
|
||||
log.Fatal("Commit() failed:", err)
|
||||
}
|
||||
}
|
||||
`))
|
168
vendor/bitbucket.org/liamstask/goose/lib/goose/migration_sql.go
generated
vendored
Normal file
168
vendor/bitbucket.org/liamstask/goose/lib/goose/migration_sql.go
generated
vendored
Normal file
@ -0,0 +1,168 @@
|
||||
package goose
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"database/sql"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const sqlCmdPrefix = "-- +goose "
|
||||
|
||||
// Checks the line to see if the line has a statement-ending semicolon
|
||||
// or if the line contains a double-dash comment.
|
||||
func endsWithSemicolon(line string) bool {
|
||||
|
||||
prev := ""
|
||||
scanner := bufio.NewScanner(strings.NewReader(line))
|
||||
scanner.Split(bufio.ScanWords)
|
||||
|
||||
for scanner.Scan() {
|
||||
word := scanner.Text()
|
||||
if strings.HasPrefix(word, "--") {
|
||||
break
|
||||
}
|
||||
prev = word
|
||||
}
|
||||
|
||||
return strings.HasSuffix(prev, ";")
|
||||
}
|
||||
|
||||
// Split the given sql script into individual statements.
|
||||
//
|
||||
// The base case is to simply split on semicolons, as these
|
||||
// naturally terminate a statement.
|
||||
//
|
||||
// However, more complex cases like pl/pgsql can have semicolons
|
||||
// within a statement. For these cases, we provide the explicit annotations
|
||||
// 'StatementBegin' and 'StatementEnd' to allow the script to
|
||||
// tell us to ignore semicolons.
|
||||
func splitSQLStatements(r io.Reader, direction bool) (stmts []string) {
|
||||
|
||||
var buf bytes.Buffer
|
||||
scanner := bufio.NewScanner(r)
|
||||
|
||||
// track the count of each section
|
||||
// so we can diagnose scripts with no annotations
|
||||
upSections := 0
|
||||
downSections := 0
|
||||
|
||||
statementEnded := false
|
||||
ignoreSemicolons := false
|
||||
directionIsActive := false
|
||||
|
||||
for scanner.Scan() {
|
||||
|
||||
line := scanner.Text()
|
||||
|
||||
// handle any goose-specific commands
|
||||
if strings.HasPrefix(line, sqlCmdPrefix) {
|
||||
cmd := strings.TrimSpace(line[len(sqlCmdPrefix):])
|
||||
switch cmd {
|
||||
case "Up":
|
||||
directionIsActive = (direction == true)
|
||||
upSections++
|
||||
break
|
||||
|
||||
case "Down":
|
||||
directionIsActive = (direction == false)
|
||||
downSections++
|
||||
break
|
||||
|
||||
case "StatementBegin":
|
||||
if directionIsActive {
|
||||
ignoreSemicolons = true
|
||||
}
|
||||
break
|
||||
|
||||
case "StatementEnd":
|
||||
if directionIsActive {
|
||||
statementEnded = (ignoreSemicolons == true)
|
||||
ignoreSemicolons = false
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !directionIsActive {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, err := buf.WriteString(line + "\n"); err != nil {
|
||||
log.Fatalf("io err: %v", err)
|
||||
}
|
||||
|
||||
// Wrap up the two supported cases: 1) basic with semicolon; 2) psql statement
|
||||
// Lines that end with semicolon that are in a statement block
|
||||
// do not conclude statement.
|
||||
if (!ignoreSemicolons && endsWithSemicolon(line)) || statementEnded {
|
||||
statementEnded = false
|
||||
stmts = append(stmts, buf.String())
|
||||
buf.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
log.Fatalf("scanning migration: %v", err)
|
||||
}
|
||||
|
||||
// diagnose likely migration script errors
|
||||
if ignoreSemicolons {
|
||||
log.Println("WARNING: saw '-- +goose StatementBegin' with no matching '-- +goose StatementEnd'")
|
||||
}
|
||||
|
||||
if bufferRemaining := strings.TrimSpace(buf.String()); len(bufferRemaining) > 0 {
|
||||
log.Printf("WARNING: Unexpected unfinished SQL query: %s. Missing a semicolon?\n", bufferRemaining)
|
||||
}
|
||||
|
||||
if upSections == 0 && downSections == 0 {
|
||||
log.Fatalf(`ERROR: no Up/Down annotations found, so no statements were executed.
|
||||
See https://bitbucket.org/liamstask/goose/overview for details.`)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Run a migration specified in raw SQL.
|
||||
//
|
||||
// Sections of the script can be annotated with a special comment,
|
||||
// starting with "-- +goose" to specify whether the section should
|
||||
// be applied during an Up or Down migration
|
||||
//
|
||||
// All statements following an Up or Down directive are grouped together
|
||||
// until another direction directive is found.
|
||||
func runSQLMigration(conf *DBConf, db *sql.DB, scriptFile string, v int64, direction bool) error {
|
||||
|
||||
txn, err := db.Begin()
|
||||
if err != nil {
|
||||
log.Fatal("db.Begin:", err)
|
||||
}
|
||||
|
||||
f, err := os.Open(scriptFile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// find each statement, checking annotations for up/down direction
|
||||
// and execute each of them in the current transaction.
|
||||
// Commits the transaction if successfully applied each statement and
|
||||
// records the version into the version table or returns an error and
|
||||
// rolls back the transaction.
|
||||
for _, query := range splitSQLStatements(f, direction) {
|
||||
if _, err = txn.Exec(query); err != nil {
|
||||
txn.Rollback()
|
||||
log.Fatalf("FAIL %s (%v), quitting migration.", filepath.Base(scriptFile), err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err = FinalizeMigration(conf, txn, direction, v); err != nil {
|
||||
log.Fatalf("error finalizing migration %s, quitting. (%v)", filepath.Base(scriptFile), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user