Vendor clair so that builds work properly.
This commit is contained in:
parent
464514b2ea
commit
67084070cf
@ -26,12 +26,12 @@ import (
|
||||
"time"
|
||||
|
||||
"bitbucket.org/liamstask/goose/lib/goose"
|
||||
"github.com/cloudflare/clair/vendor/github.com/hashicorp/golang-lru"
|
||||
"github.com/coreos/clair/config"
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/utils"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"github.com/hashicorp/golang-lru"
|
||||
"github.com/lib/pq"
|
||||
"github.com/pborman/uuid"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
7
vendor/github.com/coreos/clair/.dockerignore
generated
vendored
Normal file
7
vendor/github.com/coreos/clair/.dockerignore
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
.*
|
||||
*.md
|
||||
DCO
|
||||
LICENSE
|
||||
NOTICE
|
||||
docs
|
||||
cloudconfig
|
29
vendor/github.com/coreos/clair/.travis.yml
generated
vendored
Normal file
29
vendor/github.com/coreos/clair/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.6
|
||||
- 1.5
|
||||
- tip
|
||||
|
||||
sudo: false
|
||||
|
||||
before_install:
|
||||
- export GO15VENDOREXPERIMENT=1
|
||||
|
||||
install:
|
||||
- echo 'nop'
|
||||
|
||||
script:
|
||||
- go test -v $(go list ./... | grep -v /vendor/)
|
||||
|
||||
services:
|
||||
- postgresql
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- rpm
|
||||
postgresql: "9.4"
|
||||
|
||||
notifications:
|
||||
email: false
|
71
vendor/github.com/coreos/clair/CONTRIBUTING.md
generated
vendored
Executable file
71
vendor/github.com/coreos/clair/CONTRIBUTING.md
generated
vendored
Executable file
@ -0,0 +1,71 @@
|
||||
# How to Contribute
|
||||
|
||||
CoreOS projects are [Apache 2.0 licensed](LICENSE) and accept contributions via
|
||||
GitHub pull requests. This document outlines some of the conventions on
|
||||
development workflow, commit message formatting, contact points and other
|
||||
resources to make it easier to get your contribution accepted.
|
||||
|
||||
# Certificate of Origin
|
||||
|
||||
By contributing to this project you agree to the Developer Certificate of
|
||||
Origin (DCO). This document was created by the Linux Kernel community and is a
|
||||
simple statement that you, as a contributor, have the legal right to make the
|
||||
contribution. See the [DCO](DCO) file for details.
|
||||
|
||||
# Email and Chat
|
||||
|
||||
The project currently uses the general CoreOS email list and IRC channel:
|
||||
- Email: [coreos-dev](https://groups.google.com/forum/#!forum/coreos-dev)
|
||||
- IRC: #[coreos](irc://irc.freenode.org:6667/#coreos) IRC channel on freenode.org
|
||||
|
||||
Please avoid emailing maintainers found in the MAINTAINERS file directly. They
|
||||
are very busy and read the mailing lists.
|
||||
|
||||
## Getting Started
|
||||
|
||||
- Fork the repository on GitHub
|
||||
- Read the [README](README.md) for build and test instructions
|
||||
- Play with the project, submit bugs, submit patches!
|
||||
|
||||
## Contribution Flow
|
||||
|
||||
This is a rough outline of what a contributor's workflow looks like:
|
||||
|
||||
- Create a topic branch from where you want to base your work (usually master).
|
||||
- Make commits of logical units.
|
||||
- Make sure your commit messages are in the proper format (see below).
|
||||
- Push your changes to a topic branch in your fork of the repository.
|
||||
- Make sure the tests pass, and add any new tests as appropriate.
|
||||
- Submit a pull request to the original repository.
|
||||
|
||||
Thanks for your contributions!
|
||||
|
||||
### Format of the Commit Message
|
||||
|
||||
We follow a rough convention for commit messages that is designed to answer two
|
||||
questions: what changed and why. The subject line should feature the what and
|
||||
the body of the commit should describe the why.
|
||||
|
||||
```
|
||||
scripts: add the test-cluster command
|
||||
|
||||
this uses tmux to setup a test cluster that you can easily kill and
|
||||
start for debugging.
|
||||
|
||||
Fixes #38
|
||||
```
|
||||
|
||||
The format can be described more formally as follows:
|
||||
|
||||
```
|
||||
<subsystem>: <what changed>
|
||||
<BLANK LINE>
|
||||
<why this change was made>
|
||||
<BLANK LINE>
|
||||
<footer>
|
||||
```
|
||||
|
||||
The first line is the subject and should be no longer than 70 characters, the
|
||||
second line is always blank, and other lines should be wrapped at 80 characters.
|
||||
This allows the message to be easier to read on GitHub as well as in various
|
||||
git tools.
|
36
vendor/github.com/coreos/clair/DCO
generated
vendored
Executable file
36
vendor/github.com/coreos/clair/DCO
generated
vendored
Executable file
@ -0,0 +1,36 @@
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
34
vendor/github.com/coreos/clair/Dockerfile
generated
vendored
Normal file
34
vendor/github.com/coreos/clair/Dockerfile
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
# Copyright 2015 clair authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM golang:1.6
|
||||
|
||||
MAINTAINER Quentin Machu <quentin.machu@coreos.com>
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y bzr rpm xz-utils && \
|
||||
apt-get autoremove -y && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* # 18MAR2016
|
||||
|
||||
VOLUME /config
|
||||
|
||||
EXPOSE 6060 6061
|
||||
|
||||
ADD . /go/src/github.com/coreos/clair/
|
||||
WORKDIR /go/src/github.com/coreos/clair/
|
||||
|
||||
RUN go install -v github.com/coreos/clair/cmd/clair
|
||||
|
||||
ENTRYPOINT ["clair"]
|
152
vendor/github.com/coreos/clair/Godeps/Godeps.json
generated
vendored
Normal file
152
vendor/github.com/coreos/clair/Godeps/Godeps.json
generated
vendored
Normal file
@ -0,0 +1,152 @@
|
||||
{
|
||||
"ImportPath": "github.com/coreos/clair",
|
||||
"GoVersion": "go1.5",
|
||||
"Packages": [
|
||||
"./..."
|
||||
],
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "bitbucket.org/liamstask/goose/lib/goose",
|
||||
"Rev": "8488cc47d90c8a502b1c41a462a6d9cc8ee0a895"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/beorn7/perks/quantile",
|
||||
"Rev": "b965b613227fddccbfffe13eae360ed3fa822f8d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/codegangsta/negroni",
|
||||
"Comment": "v0.1-70-gc7477ad",
|
||||
"Rev": "c7477ad8e330bef55bf1ebe300cf8aa67c492d1b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/go-systemd/journal",
|
||||
"Comment": "v4-34-g4f14f6d",
|
||||
"Rev": "4f14f6deef2da87e4aa59e6c1c1f3e02ba44c5e1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/pkg/capnslog",
|
||||
"Rev": "2c77715c4df99b5420ffcae14ead08f52104065d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/pkg/timeutil",
|
||||
"Rev": "2c77715c4df99b5420ffcae14ead08f52104065d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/davecgh/go-spew/spew",
|
||||
"Rev": "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fernet/fernet-go",
|
||||
"Rev": "1b2437bc582b3cfbb341ee5a29f8ef5b42912ff2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/go-sql-driver/mysql",
|
||||
"Comment": "v1.2-125-gd512f20",
|
||||
"Rev": "d512f204a577a4ab037a1816604c48c9c13210be"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/protobuf/proto",
|
||||
"Rev": "5fc2294e655b78ed8a02082d37808d46c17d7e64"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/guregu/null/zero",
|
||||
"Comment": "v3-3-g79c5bd3",
|
||||
"Rev": "79c5bd36b615db4c06132321189f579c8a5fca98"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/golang-lru",
|
||||
"Rev": "5c7531c003d8bf158b0fe5063649a2f41a822146"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/julienschmidt/httprouter",
|
||||
"Comment": "v1.1-14-g21439ef",
|
||||
"Rev": "21439ef4d70ba4f3e2a5ed9249e7b03af4019b40"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/kylelemons/go-gypsy/yaml",
|
||||
"Comment": "go.weekly.2011-11-02-19-g42fc2c7",
|
||||
"Rev": "42fc2c7ee9b8bd0ff636cd2d7a8c0a49491044c5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/lib/pq",
|
||||
"Comment": "go1.0-cutoff-63-g11fc39a",
|
||||
"Rev": "11fc39a580a008f1f39bb3d11d984fb34ed778d9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mattn/go-sqlite3",
|
||||
"Comment": "v1.1.0-30-g5510da3",
|
||||
"Rev": "5510da399572b4962c020184bb291120c0a412e2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil",
|
||||
"Rev": "d0c3fe89de86839aecf2e0579c40ba3bb336a453"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/pborman/uuid",
|
||||
"Rev": "dee7705ef7b324f27ceb85a121c61f2c2e8ce988"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/pmezard/go-difflib/difflib",
|
||||
"Rev": "e8554b8641db39598be7f6342874b958f12ae1d4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/client_golang/prometheus",
|
||||
"Comment": "0.7.0-68-g67994f1",
|
||||
"Rev": "67994f177195311c3ea3d4407ed0175e34a4256f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/client_model/go",
|
||||
"Comment": "model-0.0.2-12-gfa8ad6f",
|
||||
"Rev": "fa8ad6fec33561be4280a8f0514318c79d7f6cb6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/common/expfmt",
|
||||
"Rev": "dba5e39d4516169e840def50e507ef5f21b985f9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg",
|
||||
"Rev": "dba5e39d4516169e840def50e507ef5f21b985f9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/common/model",
|
||||
"Rev": "dba5e39d4516169e840def50e507ef5f21b985f9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/procfs",
|
||||
"Rev": "406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stretchr/testify/assert",
|
||||
"Comment": "v1.0-91-g5b9da39",
|
||||
"Rev": "5b9da39b66e8e994455c2525c4421c8cc00a7f93"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/tylerb/graceful",
|
||||
"Comment": "v1.2.3",
|
||||
"Rev": "48afeb21e2fcbcff0f30bd5ad6b97747b0fae38e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ziutek/mymysql/godrv",
|
||||
"Comment": "v1.5.4-13-g75ce5fb",
|
||||
"Rev": "75ce5fbba34b1912a3641adbd58cf317d7315821"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ziutek/mymysql/mysql",
|
||||
"Comment": "v1.5.4-13-g75ce5fb",
|
||||
"Rev": "75ce5fbba34b1912a3641adbd58cf317d7315821"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ziutek/mymysql/native",
|
||||
"Comment": "v1.5.4-13-g75ce5fb",
|
||||
"Rev": "75ce5fbba34b1912a3641adbd58cf317d7315821"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/netutil",
|
||||
"Rev": "1d7a0b2100da090d8b02afcfb42f97e2c77e71a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/yaml.v2",
|
||||
"Rev": "f7716cbe52baa25d2e9b0d0da546fcf909fc16b4"
|
||||
}
|
||||
]
|
||||
}
|
5
vendor/github.com/coreos/clair/Godeps/Readme
generated
vendored
Normal file
5
vendor/github.com/coreos/clair/Godeps/Readme
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
This directory tree is generated automatically by godep.
|
||||
|
||||
Please do not edit.
|
||||
|
||||
See https://github.com/tools/godep for more information.
|
202
vendor/github.com/coreos/clair/LICENSE
generated
vendored
Executable file
202
vendor/github.com/coreos/clair/LICENSE
generated
vendored
Executable file
@ -0,0 +1,202 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
5
vendor/github.com/coreos/clair/NOTICE
generated
vendored
Executable file
5
vendor/github.com/coreos/clair/NOTICE
generated
vendored
Executable file
@ -0,0 +1,5 @@
|
||||
CoreOS Project
|
||||
Copyright 2015 CoreOS, Inc
|
||||
|
||||
This product includes software developed at CoreOS, Inc.
|
||||
(http://www.coreos.com/).
|
197
vendor/github.com/coreos/clair/README.md
generated
vendored
Normal file
197
vendor/github.com/coreos/clair/README.md
generated
vendored
Normal file
@ -0,0 +1,197 @@
|
||||
# Clair
|
||||
|
||||
[![Build Status](https://api.travis-ci.org/coreos/clair.svg?branch=master "Build Status")](https://travis-ci.org/coreos/clair)
|
||||
[![Docker Repository on Quay](https://quay.io/repository/coreos/clair/status "Docker Repository on Quay")](https://quay.io/repository/coreos/clair)
|
||||
[![Go Report Card](https://goreportcard.com/badge/coreos/clair "Go Report Card")](https://goreportcard.com/report/coreos/clair)
|
||||
[![GoDoc](https://godoc.org/github.com/coreos/clair?status.svg "GoDoc")](https://godoc.org/github.com/coreos/clair)
|
||||
[![IRC Channel](https://img.shields.io/badge/freenode-%23clair-blue.svg "IRC Channel")](http://webchat.freenode.net/?channels=clair)
|
||||
|
||||
**Note**: The `master` branch may be in an *unstable or even broken state* during development.
|
||||
Please use [releases] instead of the `master` branch in order to get stable binaries.
|
||||
|
||||
![Clair Logo](img/Clair_horizontal_color.png)
|
||||
|
||||
Clair is an open source project for the static analysis of vulnerabilities in [appc] and [docker] containers.
|
||||
|
||||
Vulnerability data is continuously imported from a known set of sources and correlated with the indexed contents of container images in order to produce lists of vulnerabilities that threaten a container.
|
||||
When vulnerability data changes upstream, the previous state and new state of the vulnerability along with the images they affect can be sent via webhook to a configured endpoint.
|
||||
All major components can be [customized programmatically] at compile-time without forking the project.
|
||||
|
||||
Our goal is to enable a more transparent view of the security of container-based infrastructure.
|
||||
Thus, the project was named `Clair` after the French term which translates to *clear*, *bright*, *transparent*.
|
||||
|
||||
[appc]: https://github.com/appc/spec
|
||||
[docker]: https://github.com/docker/docker/blob/master/image/spec/v1.md
|
||||
[customized programmatically]: #customization
|
||||
[releases]: https://github.com/coreos/clair/releases
|
||||
|
||||
## Common Use Cases
|
||||
|
||||
### Manual Auditing
|
||||
|
||||
You're building an application and want to depend on a third-party container image that you found by searching the internet.
|
||||
To make sure that you do not knowingly introduce a new vulnerability into your production service, you decide to scan the container for vulnerabilities.
|
||||
You `docker pull` the container to your development machine and start an instance of Clair.
|
||||
Once it finishes updating, you use the [local image analysis tool] to analyze the container.
|
||||
You realize this container is vulnerable to many critical CVEs, so you decide to use another one.
|
||||
|
||||
[local image analysis tool]: https://github.com/coreos/clair/tree/master/contrib/analyze-local-images
|
||||
|
||||
### Container Registry Integration
|
||||
|
||||
Your company has a continuous-integration pipeline and you want to stop deployments if they introduce a dangerous vulnerability.
|
||||
A developer merges some code into the master branch of your codebase.
|
||||
The first step of your continuous-integration pipeline automates the testing and building of your container and pushes a new container to your container registry.
|
||||
Your container registry notifies Clair which causes the download and indexing of the images for the new container.
|
||||
Clair detects some vulnerabilities and sends a webhook to your continuous deployment tool to prevent this vulnerable build from seeing the light of day.
|
||||
|
||||
## Hello Heartbleed
|
||||
|
||||
During the first run, Clair will bootstrap its database with vulnerability data from its data sources.
|
||||
It can take several minutes before the database has been fully populated.
|
||||
|
||||
**NOTE:** These setups are not meant for production workloads, but as a quick way to get started.
|
||||
|
||||
### Kubernetes
|
||||
|
||||
An easy way to run Clair is with Kubernetes 1.2+.
|
||||
If you are using the [CoreOS Kubernetes single-node instructions][single-node] for Vagrant you will be able to access the Clair's API at http://172.17.4.99:30060/ after following these instructions.
|
||||
|
||||
```
|
||||
git clone https://github.com/coreos/clair
|
||||
cd clair/contrib/k8s
|
||||
kubectl create secret generic clairsecret --from-file=./config.yaml
|
||||
kubectl create -f clair-kubernetes.yaml
|
||||
```
|
||||
|
||||
[single-node]: https://coreos.com/kubernetes/docs/latest/kubernetes-on-vagrant-single.html
|
||||
|
||||
### Docker Compose
|
||||
|
||||
Another easy way to get an instance of Clair running is to use Docker Compose to run everything locally.
|
||||
This runs a PostgreSQL database insecurely and locally in a container.
|
||||
This method should only be used for testing.
|
||||
|
||||
```sh
|
||||
$ curl -L https://raw.githubusercontent.com/coreos/clair/master/docker-compose.yml -o $HOME/docker-compose.yml
|
||||
$ mkdir $HOME/clair_config
|
||||
$ curl -L https://raw.githubusercontent.com/coreos/clair/master/config.example.yaml -o $HOME/clair_config/config.yaml
|
||||
$ $EDITOR $HOME/clair_config/config.yaml # Edit database source to be postgresql://postgres:password@postgres:5432?sslmode=disable
|
||||
$ docker-compose -f $HOME/docker-compose.yml up -d
|
||||
```
|
||||
|
||||
Docker Compose may start Clair before Postgres which will raise an error.
|
||||
If this error is raised, manually execute `docker start clair_clair`.
|
||||
|
||||
|
||||
### Docker
|
||||
|
||||
This method assumes you already have a [PostgreSQL 9.4+] database running.
|
||||
This is the recommended method for production deployments.
|
||||
|
||||
[PostgreSQL 9.4+]: http://postgresql.org
|
||||
|
||||
```sh
|
||||
$ mkdir $HOME/clair_config
|
||||
$ curl -L https://raw.githubusercontent.com/coreos/clair/master/config.example.yaml -o $HOME/clair_config/config.yaml
|
||||
$ $EDITOR $HOME/clair_config/config.yaml # Add the URI for your postgres database
|
||||
$ docker run -d -p 6060-6061:6060-6061 -v $HOME/clair_config:/config quay.io/coreos/clair -config=/config/config.yaml
|
||||
```
|
||||
|
||||
### Source
|
||||
|
||||
To build Clair, you need to latest stable version of [Go] and a working [Go environment].
|
||||
In addition, Clair requires that [bzr], [rpm], and [xz] be available on the system [$PATH].
|
||||
|
||||
[Go]: https://github.com/golang/go/releases
|
||||
[Go environment]: https://golang.org/doc/code.html
|
||||
[bzr]: http://bazaar.canonical.com/en
|
||||
[rpm]: http://www.rpm.org
|
||||
[xz]: http://tukaani.org/xz
|
||||
[$PATH]: https://en.wikipedia.org/wiki/PATH_(variable)
|
||||
|
||||
```sh
|
||||
$ go get github.com/coreos/clair
|
||||
$ go install github.com/coreos/clair/cmd/clair
|
||||
$ $EDITOR config.yaml # Add the URI for your postgres database
|
||||
$ ./$GOBIN/clair -config=config.yaml
|
||||
```
|
||||
|
||||
## Documentation
|
||||
|
||||
Documentation can be found in a `README.md` file located in the directory of the component.
|
||||
|
||||
- [Notifier](https://github.com/coreos/clair/blob/master/notifier/README.md)
|
||||
- [v1 API](https://github.com/coreos/clair/blob/master/api/v1/README.md)
|
||||
|
||||
### Architecture at a Glance
|
||||
|
||||
![Simple Clair Diagram](img/simple_diagram.png)
|
||||
|
||||
### Terminology
|
||||
|
||||
- *Image* - a tarball of the contents of a container
|
||||
- *Layer* - an *appc* or *Docker* image that may or maybe not be dependent on another image
|
||||
- *Detector* - a Go package that identifies the content, *namespaces* and *features* from a *layer*
|
||||
- *Namespace* - a context around *features* and *vulnerabilities* (e.g. an operating system)
|
||||
- *Feature* - anything that when present could be an indication of a *vulnerability* (e.g. the presence of a file or an installed software package)
|
||||
- *Fetcher* - a Go package that tracks an upstream vulnerability database and imports them into Clair
|
||||
|
||||
### Vulnerability Analysis
|
||||
|
||||
There are two major ways to perform analysis of programs: [Static Analysis] and [Dynamic Analysis].
|
||||
Clair has been designed to perform *static analysis*; containers never need to be executed.
|
||||
Rather, the filesystem of the container image is inspected and *features* are indexed into a database.
|
||||
By indexing the features of an image into the database, images only need to be rescanned when new *detectors* are added.
|
||||
|
||||
[Static Analysis]: https://en.wikipedia.org/wiki/Static_program_analysis
|
||||
[Dynamic Analysis]: https://en.wikipedia.org/wiki/Dynamic_program_analysis
|
||||
|
||||
### Default Data Sources
|
||||
|
||||
| Data Source | Versions | Format |
|
||||
|-------------------------------|--------------------------------------------------------|--------|
|
||||
| [Debian Security Bug Tracker] | 6, 7, 8, unstable | [dpkg] |
|
||||
| [Ubuntu CVE Tracker] | 12.04, 12.10, 13.04, 14.04, 14.10, 15.04, 15.10, 16.04 | [dpkg] |
|
||||
| [Red Hat Security Data] | 5, 6, 7 | [rpm] |
|
||||
|
||||
[Debian Security Bug Tracker]: https://security-tracker.debian.org/tracker
|
||||
[Ubuntu CVE Tracker]: https://launchpad.net/ubuntu-cve-tracker
|
||||
[Red Hat Security Data]: https://www.redhat.com/security/data/metrics
|
||||
[dpkg]: https://en.wikipedia.org/wiki/dpkg
|
||||
[rpm]: http://www.rpm.org
|
||||
|
||||
|
||||
### Customization
|
||||
|
||||
The major components of Clair are all programmatically extensible in the same way Go's standard [database/sql] package is extensible.
|
||||
|
||||
Custom behavior can be accomplished by creating a package that contains a type that implements an interface declared in Clair and registering that interface in [init()]. To expose the new behavior, unqualified imports to the package must be added in your [main.go], which should then start Clair using `Boot(*config.Config)`.
|
||||
|
||||
The following interfaces can have custom implementations registered via [init()] at compile time:
|
||||
|
||||
- `Datastore` - the backing storage
|
||||
- `Notifier` - the means by which endpoints are notified of vulnerability changes
|
||||
- `Fetcher` - the sources of vulnerability data that is automatically imported
|
||||
- `MetadataFetcher` - the sources of vulnerability metadata that is automatically added to known vulnerabilities
|
||||
- `DataDetector` - the means by which contents of an image are detected
|
||||
- `FeatureDetector` - the means by which features are identified from a layer
|
||||
- `NamespaceDetector` - the means by which a namespace is identified from a layer
|
||||
|
||||
[init()]: https://golang.org/doc/effective_go.html#init
|
||||
[database/sql]: https://godoc.org/database/sql
|
||||
[main.go]: https://github.com/coreos/clair/blob/master/cmd/clair/main.go
|
||||
|
||||
## Related Links
|
||||
|
||||
### Talks & Slides
|
||||
|
||||
- _Clair: A Container Image Security Analyzer_ - [Event](https://www.meetup.com/Microservices-NYC/events/230023492/) [Video](https://www.youtube.com/watch?v=ynwKi2yhIX4) [Slides](https://docs.google.com/presentation/d/1ly9wQKQIlI7rlb0JNU1_P-rPDHU4xdRCCM3rxOdjcgc)
|
||||
- _Clair: A Container Image Security Analyzer_ - [Event](https://www.meetup.com/Container-Orchestration-NYC/events/229779466/) [Video](https://www.youtube.com/watch?v=wTfCOUDNV_M) [Slides](https://docs.google.com/presentation/d/1ly9wQKQIlI7rlb0JNU1_P-rPDHU4xdRCCM3rxOdjcgc)
|
||||
|
||||
### Projects Integrating with Clair
|
||||
|
||||
- [Quay](https://quay.io): the first container registry to integrate with Clair
|
||||
- [Dockyard](https://github.com/containerops/dockyard): an open source container registry with Clair integration
|
||||
- [Hyperclair](https://github.com/wemanity-belgium/hyperclair): a lightweight command-line tool for working locally with Clair
|
||||
- [Clair w/ SQS](https://github.com/zalando/clair-sqs): a container containing Clair and additional processes that integrate Clair with [Amazon SQS](https://aws.amazon.com/sqs)
|
16
vendor/github.com/coreos/clair/ROADMAP.md
generated
vendored
Normal file
16
vendor/github.com/coreos/clair/ROADMAP.md
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
# Clair Roadmap
|
||||
|
||||
This document defines a high level roadmap for Clair development.
|
||||
|
||||
The dates below should not be considered authoritative, but rather indicative of the projected timeline of the project.
|
||||
The [milestones defined in GitHub](https://github.com/coreos/clair/milestones) represent the most up-to-date and issue-for-issue plans.
|
||||
|
||||
The roadmap below outlines new features that will be added to Clair, and while subject to change, define what future stable will look like.
|
||||
|
||||
### Clair 2.0 (July)
|
||||
|
||||
- Standardize component registration
|
||||
- Revisit database implementation
|
||||
- Improve release distribution
|
||||
- Address client UX
|
||||
- Expand detection capabilities
|
144
vendor/github.com/coreos/clair/api/api.go
generated
vendored
Normal file
144
vendor/github.com/coreos/clair/api/api.go
generated
vendored
Normal file
@ -0,0 +1,144 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/tylerb/graceful"
|
||||
|
||||
"github.com/coreos/clair/api/context"
|
||||
"github.com/coreos/clair/config"
|
||||
"github.com/coreos/clair/utils"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
)
|
||||
|
||||
const timeoutResponse = `{"Error":{"Message":"Clair failed to respond within the configured timeout window.","Type":"Timeout"}}`
|
||||
|
||||
var log = capnslog.NewPackageLogger("github.com/coreos/clair", "api")
|
||||
|
||||
func Run(config *config.APIConfig, ctx *context.RouteContext, st *utils.Stopper) {
|
||||
defer st.End()
|
||||
|
||||
// Do not run the API service if there is no config.
|
||||
if config == nil {
|
||||
log.Infof("main API service is disabled.")
|
||||
return
|
||||
}
|
||||
log.Infof("starting main API on port %d.", config.Port)
|
||||
|
||||
tlsConfig, err := tlsClientConfig(config.CAFile)
|
||||
if err != nil {
|
||||
log.Fatalf("could not initialize client cert authentication: %s\n", err)
|
||||
}
|
||||
if tlsConfig != nil {
|
||||
log.Info("main API configured with client certificate authentication")
|
||||
}
|
||||
|
||||
srv := &graceful.Server{
|
||||
Timeout: 0, // Already handled by our TimeOut middleware
|
||||
NoSignalHandling: true, // We want to use our own Stopper
|
||||
Server: &http.Server{
|
||||
Addr: ":" + strconv.Itoa(config.Port),
|
||||
TLSConfig: tlsConfig,
|
||||
Handler: http.TimeoutHandler(newAPIHandler(ctx), config.Timeout, timeoutResponse),
|
||||
},
|
||||
}
|
||||
|
||||
listenAndServeWithStopper(srv, st, config.CertFile, config.KeyFile)
|
||||
|
||||
log.Info("main API stopped")
|
||||
}
|
||||
|
||||
func RunHealth(config *config.APIConfig, ctx *context.RouteContext, st *utils.Stopper) {
|
||||
defer st.End()
|
||||
|
||||
// Do not run the API service if there is no config.
|
||||
if config == nil {
|
||||
log.Infof("health API service is disabled.")
|
||||
return
|
||||
}
|
||||
log.Infof("starting health API on port %d.", config.HealthPort)
|
||||
|
||||
srv := &graceful.Server{
|
||||
Timeout: 10 * time.Second, // Interrupt health checks when stopping
|
||||
NoSignalHandling: true, // We want to use our own Stopper
|
||||
Server: &http.Server{
|
||||
Addr: ":" + strconv.Itoa(config.HealthPort),
|
||||
Handler: http.TimeoutHandler(newHealthHandler(ctx), config.Timeout, timeoutResponse),
|
||||
},
|
||||
}
|
||||
|
||||
listenAndServeWithStopper(srv, st, "", "")
|
||||
|
||||
log.Info("health API stopped")
|
||||
}
|
||||
|
||||
// listenAndServeWithStopper wraps graceful.Server's
|
||||
// ListenAndServe/ListenAndServeTLS and adds the ability to interrupt them with
|
||||
// the provided utils.Stopper
|
||||
func listenAndServeWithStopper(srv *graceful.Server, st *utils.Stopper, certFile, keyFile string) {
|
||||
go func() {
|
||||
<-st.Chan()
|
||||
srv.Stop(0)
|
||||
}()
|
||||
|
||||
var err error
|
||||
if certFile != "" && keyFile != "" {
|
||||
log.Info("API: TLS Enabled")
|
||||
err = srv.ListenAndServeTLS(certFile, keyFile)
|
||||
} else {
|
||||
err = srv.ListenAndServe()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if opErr, ok := err.(*net.OpError); !ok || (ok && opErr.Op != "accept") {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// tlsClientConfig initializes a *tls.Config using the given CA. The resulting
|
||||
// *tls.Config is meant to be used to configure an HTTP server to do client
|
||||
// certificate authentication.
|
||||
//
|
||||
// If no CA is given, a nil *tls.Config is returned; no client certificate will
|
||||
// be required and verified. In other words, authentication will be disabled.
|
||||
func tlsClientConfig(caPath string) (*tls.Config, error) {
|
||||
if caPath == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
caCert, err := ioutil.ReadFile(caPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
caCertPool := x509.NewCertPool()
|
||||
caCertPool.AppendCertsFromPEM(caCert)
|
||||
|
||||
tlsConfig := &tls.Config{
|
||||
ClientCAs: caCertPool,
|
||||
ClientAuth: tls.RequireAndVerifyClientCert,
|
||||
}
|
||||
|
||||
return tlsConfig, nil
|
||||
}
|
64
vendor/github.com/coreos/clair/api/context/context.go
generated
vendored
Normal file
64
vendor/github.com/coreos/clair/api/context/context.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package context
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"github.com/julienschmidt/httprouter"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/coreos/clair/config"
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
log = capnslog.NewPackageLogger("github.com/coreos/clair", "api")
|
||||
|
||||
promResponseDurationMilliseconds = prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Name: "clair_api_response_duration_milliseconds",
|
||||
Help: "The duration of time it takes to receieve and write a response to an API request",
|
||||
Buckets: prometheus.ExponentialBuckets(9.375, 2, 10),
|
||||
}, []string{"route", "code"})
|
||||
)
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(promResponseDurationMilliseconds)
|
||||
}
|
||||
|
||||
type Handler func(http.ResponseWriter, *http.Request, httprouter.Params, *RouteContext) (route string, status int)
|
||||
|
||||
func HTTPHandler(handler Handler, ctx *RouteContext) httprouter.Handle {
|
||||
return func(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
start := time.Now()
|
||||
route, status := handler(w, r, p, ctx)
|
||||
statusStr := strconv.Itoa(status)
|
||||
if status == 0 {
|
||||
statusStr = "???"
|
||||
}
|
||||
utils.PrometheusObserveTimeMilliseconds(promResponseDurationMilliseconds.WithLabelValues(route, statusStr), start)
|
||||
|
||||
log.Infof("%s \"%s %s\" %s (%s)", r.RemoteAddr, r.Method, r.RequestURI, statusStr, time.Since(start))
|
||||
}
|
||||
}
|
||||
|
||||
type RouteContext struct {
|
||||
Store database.Datastore
|
||||
Config *config.APIConfig
|
||||
}
|
76
vendor/github.com/coreos/clair/api/router.go
generated
vendored
Normal file
76
vendor/github.com/coreos/clair/api/router.go
generated
vendored
Normal file
@ -0,0 +1,76 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/julienschmidt/httprouter"
|
||||
|
||||
"github.com/coreos/clair/api/context"
|
||||
"github.com/coreos/clair/api/v1"
|
||||
)
|
||||
|
||||
// router is an HTTP router that forwards requests to the appropriate sub-router
|
||||
// depending on the API version specified in the request URI.
|
||||
type router map[string]*httprouter.Router
|
||||
|
||||
// Let's hope we never have more than 99 API versions.
|
||||
const apiVersionLength = len("v99")
|
||||
|
||||
func newAPIHandler(ctx *context.RouteContext) http.Handler {
|
||||
router := make(router)
|
||||
router["/v1"] = v1.NewRouter(ctx)
|
||||
return router
|
||||
}
|
||||
|
||||
func (rtr router) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
urlStr := r.URL.String()
|
||||
var version string
|
||||
if len(urlStr) >= apiVersionLength {
|
||||
version = urlStr[:apiVersionLength]
|
||||
}
|
||||
|
||||
if router, _ := rtr[version]; router != nil {
|
||||
// Remove the version number from the request path to let the router do its
|
||||
// job but do not update the RequestURI
|
||||
r.URL.Path = strings.Replace(r.URL.Path, version, "", 1)
|
||||
router.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("%s %d %s %s", http.StatusNotFound, r.Method, r.RequestURI, r.RemoteAddr)
|
||||
http.NotFound(w, r)
|
||||
}
|
||||
|
||||
func newHealthHandler(ctx *context.RouteContext) http.Handler {
|
||||
router := httprouter.New()
|
||||
router.GET("/health", context.HTTPHandler(getHealth, ctx))
|
||||
return router
|
||||
}
|
||||
|
||||
func getHealth(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
header := w.Header()
|
||||
header.Set("Server", "clair")
|
||||
|
||||
status := http.StatusInternalServerError
|
||||
if ctx.Store.Ping() {
|
||||
status = http.StatusOK
|
||||
}
|
||||
|
||||
w.WriteHeader(status)
|
||||
return "health", status
|
||||
}
|
640
vendor/github.com/coreos/clair/api/v1/README.md
generated
vendored
Normal file
640
vendor/github.com/coreos/clair/api/v1/README.md
generated
vendored
Normal file
@ -0,0 +1,640 @@
|
||||
# Clair v1 API
|
||||
|
||||
- [Error Handling](#error-handling)
|
||||
- [Layers](#layers)
|
||||
- [POST](#post-layers)
|
||||
- [GET](#get-layersname)
|
||||
- [DELETE](#delete-layersname)
|
||||
- [Namespaces](#namespaces)
|
||||
- [GET](#get-namespaces)
|
||||
- [Vulnerabilities](#vulnerabilities)
|
||||
- [List](#get-namespacesnsnamevulnerabilities)
|
||||
- [POST](#post-namespacesnamevulnerabilities)
|
||||
- [GET](#get-namespacesnsnamevulnerabilitiesvulnname)
|
||||
- [PUT](#put-namespacesnsnamevulnerabilitiesvulnname)
|
||||
- [DELETE](#delete-namespacesnsnamevulnerabilitiesvulnname)
|
||||
- [Fixes](#fixes)
|
||||
- [GET](#get-namespacesnsnamevulnerabilitiesvulnnamefixes)
|
||||
- [PUT](#put-namespacesnsnamevulnerabilitiesvulnnamefixesfeaturename)
|
||||
- [DELETE](#delete-namespacesnsnamevulnerabilitiesvulnnamefixesfeaturename)
|
||||
- [Notifications](#notifications)
|
||||
- [GET](#get-notificationsname)
|
||||
- [DELETE](#delete-notificationname)
|
||||
|
||||
## Error Handling
|
||||
|
||||
###### Description
|
||||
|
||||
Every route can optionally provide an `Error` property on the response object.
|
||||
The HTTP status code of the response should indicate what type of failure occurred and how the client should reaction.
|
||||
|
||||
###### Client Retry Behavior
|
||||
|
||||
| Code | Name | Retry Behavior |
|
||||
|------|-----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| 400 | Bad Request | The body of the request invalid. The request either must be changed before being retried or depends on another request being processed before it. |
|
||||
| 404 | Not Found | The requested resource could not be found. The request must be changed before being retried. |
|
||||
| 422 | Unprocessable Entity | The request body is valid, but unsupported. This request should never be retried. |
|
||||
| 500 | Internal Server Error | The server encountered an error while processing the request. This request should be retried without change. |
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 400 Bad Request
|
||||
Content-Type: application/json;charset=utf-8
|
||||
Server: clair
|
||||
|
||||
{
|
||||
"Error": {
|
||||
"Message": "example error message"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Layers
|
||||
|
||||
#### POST /layers
|
||||
|
||||
###### Description
|
||||
|
||||
The POST route for the Layers resource performs the indexing of a Layer from the provided path and displays the provided Layer with an updated `IndexByVersion` property.
|
||||
This request blocks for the entire duration of the downloading and indexing of the layer.
|
||||
The Authorization field is an optional value whose contents will fill the Authorization HTTP Header when requesting the layer via HTTP.
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
POST http://localhost:6060/v1/layers HTTP/1.1
|
||||
|
||||
{
|
||||
"Layer": {
|
||||
"Name": "523ef1d23f222195488575f52a39c729c76a8c5630c9a194139cb246fb212da6",
|
||||
"Path": "https://mystorage.com/layers/523ef1d23f222195488575f52a39c729c76a8c5630c9a194139cb246fb212da6/layer.tar",
|
||||
"Headers": {
|
||||
"Authorization": "Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWV9.EkN-DOsnsuRjRO6BxXemmJDm3HbxrbRzXglbN2S4sOkopdU4IsDxTI8jO19W_A4K8ZPJijNLis4EZsHeY559a4DFOd50_OqgHGuERTqYZyuhtF39yxJPAjUESwxk2J5k_4zM3O-vtd1Ghyo4IbqKKSy6J9mTniYJPenn5-HIirE"
|
||||
},
|
||||
"ParentName": "140f9bdfeb9784cf8730e9dab5dd12fbd704151cf555ac8cae650451794e5ac2",
|
||||
"Format": "Docker"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 201 Created
|
||||
Content-Type: application/json;charset=utf-8
|
||||
Server: clair
|
||||
|
||||
{
|
||||
"Layer": {
|
||||
"Name": "523ef1d23f222195488575f52a39c729c76a8c5630c9a194139cb246fb212da6",
|
||||
"Path": "https://mystorage.com/layers/523ef1d23f222195488575f52a39c729c76a8c5630c9a194139cb246fb212da6/layer.tar",
|
||||
"Headers": {
|
||||
"Authorization": "Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWV9.EkN-DOsnsuRjRO6BxXemmJDm3HbxrbRzXglbN2S4sOkopdU4IsDxTI8jO19W_A4K8ZPJijNLis4EZsHeY559a4DFOd50_OqgHGuERTqYZyuhtF39yxJPAjUESwxk2J5k_4zM3O-vtd1Ghyo4IbqKKSy6J9mTniYJPenn5-HIirE"
|
||||
},
|
||||
"ParentName": "140f9bdfeb9784cf8730e9dab5dd12fbd704151cf555ac8cae650451794e5ac2",
|
||||
"Format": "Docker",
|
||||
"IndexedByVersion": 1
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### GET /layers/`:name`
|
||||
|
||||
###### Description
|
||||
|
||||
The GET route for the Layers resource displays a Layer and optionally all of its features and vulnerabilities.
|
||||
|
||||
###### Query Parameters
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
|-----------------|------|----------|-------------------------------------------------------------------------------|
|
||||
| features | bool | optional | Displays the list of features indexed in this layer and all of its parents. |
|
||||
| vulnerabilities | bool | optional | Displays the list of vulnerabilities along with the features described above. |
|
||||
|
||||
###### Example Request
|
||||
|
||||
```
|
||||
GET http://localhost:6060/v1/layers/17675ec01494d651e1ccf81dc9cf63959ebfeed4f978fddb1666b6ead008ed52?features&vulnerabilities HTTP/1.1
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json;charset=utf-8
|
||||
Server: clair
|
||||
|
||||
{
|
||||
"Layer": {
|
||||
"Name": "17675ec01494d651e1ccf81dc9cf63959ebfeed4f978fddb1666b6ead008ed52",
|
||||
"NamespaceName": "debian:8",
|
||||
"ParentName": "140f9bdfeb9784cf8730e9dab5dd12fbd704151cf555ac8cae650451794e5ac2",
|
||||
"IndexedByVersion": 1,
|
||||
"Features": [
|
||||
{
|
||||
"Name": "coreutils",
|
||||
"NamespaceName": "debian:8",
|
||||
"Version": "8.23-4",
|
||||
"Vulnerabilities": [
|
||||
{
|
||||
"Name": "CVE-2014-9471",
|
||||
"NamespaceName": "debian:8",
|
||||
"Description": "The parse_datetime function in GNU coreutils allows remote attackers to cause a denial of service (crash) or possibly execute arbitrary code via a crafted date string, as demonstrated by the \"--date=TZ=\"123\"345\" @1\" string to the touch or date command.",
|
||||
"Link": "https://security-tracker.debian.org/tracker/CVE-2014-9471",
|
||||
"Severity": "Low",
|
||||
"FixedBy": "9.23-5"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### DELETE /layers/`:name`
|
||||
|
||||
###### Description
|
||||
|
||||
The DELETE route for the Layers resource removes a Layer and all of its children from the database.
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
DELETE http://localhost:6060/v1/layers/17675ec01494d651e1ccf81dc9cf63959ebfeed4f978fddb1666b6ead008ed52 HTTP/1.1
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Server: clair
|
||||
```
|
||||
|
||||
|
||||
## Namespaces
|
||||
|
||||
#### GET /namespaces
|
||||
|
||||
###### Description
|
||||
|
||||
The GET route for the Namespaces resource displays a list of namespaces currently being managed.
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
GET http://localhost:6060/v1/namespaces HTTP/1.1
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json;charset=utf-8
|
||||
Server: clair
|
||||
|
||||
{
|
||||
"Namespaces": [
|
||||
{ "Name": "debian:8" },
|
||||
{ "Name": "debian:9" }
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Vulnerabilities
|
||||
|
||||
#### GET /namespaces/`:nsName`/vulnerabilities
|
||||
|
||||
###### Description
|
||||
|
||||
The GET route for the Vulnerabilities resource displays the vulnerabilities data for a given namespace.
|
||||
|
||||
###### Query Parameters
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
|---------|------|----------|------------------------------------------------------------|
|
||||
| limit | int | required | Limits the amount of the vunlerabilities data for a given namespace. |
|
||||
| page | int | required | Displays the specific page of the vunlerabilities data for a given namespace. |
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
GET http://localhost:6060/v1/namespaces/debian%3A8/vulnerabilities?limit=2 HTTP/1.1
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json;charset=utf-8
|
||||
Server: clair
|
||||
|
||||
{
|
||||
"Vulnerabilities": [
|
||||
{
|
||||
"Name": "CVE-1999-1332",
|
||||
"NamespaceName": "debian:8",
|
||||
"Description": "gzexe in the gzip package on Red Hat Linux 5.0 and earlier allows local users to overwrite files of other users via a symlink attack on a temporary file.",
|
||||
"Link": "https://security-tracker.debian.org/tracker/CVE-1999-1332",
|
||||
"Severity": "Low"
|
||||
},
|
||||
{
|
||||
"Name": "CVE-1999-1572",
|
||||
"NamespaceName": "debian:8",
|
||||
"Description": "cpio on FreeBSD 2.1.0, Debian GNU/Linux 3.0, and possibly other operating systems, uses a 0 umask when creating files using the -O (archive) or -F options, which creates the files with mode 0666 and allows local users to read or overwrite those files.",
|
||||
"Link": "https://security-tracker.debian.org/tracker/CVE-1999-1572",
|
||||
"Severity": "Low",
|
||||
"Metadata": {
|
||||
"NVD": {
|
||||
"CVSSv2": {
|
||||
"Score": 2.1,
|
||||
"Vectors": "AV:L/AC:L/Au:N/C:P/I:N"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"NextPage":"gAAAAABW1ABiOlm6KMDKYFE022bEy_IFJdm4ExxTNuJZMN0Eycn0Sut2tOH9bDB4EWGy5s6xwATUHiG-6JXXaU5U32sBs6_DmA=="
|
||||
}
|
||||
```
|
||||
|
||||
#### POST /namespaces/`:name`/vulnerabilities
|
||||
|
||||
###### Description
|
||||
|
||||
The POST route for the Vulnerabilities resource creates a new Vulnerability.
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
POST http://localhost:6060/v1/namespaces/debian%3A8/vulnerabilities HTTP/1.1
|
||||
|
||||
{
|
||||
"Vulnerability": {
|
||||
"Name": "CVE-2014-9471",
|
||||
"NamespaceName": "debian:8",
|
||||
"Link": "https://security-tracker.debian.org/tracker/CVE-2014-9471",
|
||||
"Description": "The parse_datetime function in GNU coreutils allows remote attackers to cause a denial of service (crash) or possibly execute arbitrary code via a crafted date string, as demonstrated by the \"--date=TZ=\"123\"345\" @1\" string to the touch or date command.",
|
||||
"Severity": "Low",
|
||||
"Metadata": {
|
||||
"NVD": {
|
||||
"CVSSv2": {
|
||||
"Score": 7.5,
|
||||
"Vectors": "AV:N/AC:L/Au:N/C:P/I:P"
|
||||
}
|
||||
}
|
||||
},
|
||||
"FixedIn": [
|
||||
{
|
||||
"Name": "coreutils",
|
||||
"NamespaceName": "debian:8",
|
||||
"Version": "8.23-1"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 201 Created
|
||||
Content-Type: application/json;charset=utf-8
|
||||
Server: clair
|
||||
|
||||
{
|
||||
"Vulnerability": {
|
||||
"Name": "CVE-2014-9471",
|
||||
"NamespaceName": "debian:8",
|
||||
"Link": "https://security-tracker.debian.org/tracker/CVE-2014-9471",
|
||||
"Description": "The parse_datetime function in GNU coreutils allows remote attackers to cause a denial of service (crash) or possibly execute arbitrary code via a crafted date string, as demonstrated by the \"--date=TZ=\"123\"345\" @1\" string to the touch or date command.",
|
||||
"Severity": "Low",
|
||||
"Metadata": {
|
||||
"NVD": {
|
||||
"CVSSv2": {
|
||||
"Score": 7.5,
|
||||
"Vectors": "AV:N/AC:L/Au:N/C:P/I:P"
|
||||
}
|
||||
}
|
||||
},
|
||||
"FixedIn": [
|
||||
{
|
||||
"Name": "coreutils",
|
||||
"NamespaceName": "debian:8",
|
||||
"Version": "8.23-1"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### GET /namespaces/`:nsName`/vulnerabilities/`:vulnName`
|
||||
|
||||
###### Description
|
||||
|
||||
The GET route for the Vulnerabilities resource displays the current data for a given vulnerability and optionally the features that fix it.
|
||||
|
||||
###### Query Parameters
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
|---------|------|----------|------------------------------------------------------------|
|
||||
| fixedIn | bool | optional | Displays the list of features that fix this vulnerability. |
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
GET http://localhost:6060/v1/namespaces/debian%3A8/vulnerabilities/CVE-2014-9471?fixedIn HTTP/1.1
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json;charset=utf-8
|
||||
Server: clair
|
||||
|
||||
{
|
||||
"Vulnerability": {
|
||||
"Name": "CVE-2014-9471",
|
||||
"NamespaceName": "debian:8",
|
||||
"Link": "https://security-tracker.debian.org/tracker/CVE-2014-9471",
|
||||
"Description": "The parse_datetime function in GNU coreutils allows remote attackers to cause a denial of service (crash) or possibly execute arbitrary code via a crafted date string, as demonstrated by the \"--date=TZ=\"123\"345\" @1\" string to the touch or date command.",
|
||||
"Severity": "Low",
|
||||
"Metadata": {
|
||||
"NVD": {
|
||||
"CVSSv2": {
|
||||
"Score": 7.5,
|
||||
"Vectors": "AV:N/AC:L/Au:N/C:P/I:P"
|
||||
}
|
||||
}
|
||||
},
|
||||
"FixedIn": [
|
||||
{
|
||||
"Name": "coreutils",
|
||||
"NamespaceName": "debian:8",
|
||||
"Version": "8.23-1"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### PUT /namespaces/`:nsName`/vulnerabilities/`:vulnName`
|
||||
|
||||
###### Description
|
||||
|
||||
The PUT route for the Vulnerabilities resource updates a given Vulnerability.
|
||||
The "FixedIn" property of the Vulnerability must be empty or missing.
|
||||
Fixes should be managed by the Fixes resource.
|
||||
If this vulnerability was inserted by a Fetcher, changes may be lost when the Fetcher updates.
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
PUT http://localhost:6060/v1/namespaces/debian%3A8/vulnerabilities/CVE-2014-9471
|
||||
|
||||
{
|
||||
"Vulnerability": {
|
||||
"Name": "CVE-2014-9471",
|
||||
"NamespaceName": "debian:8",
|
||||
"Link": "https://security-tracker.debian.org/tracker/CVE-2014-9471",
|
||||
"Description": "The parse_datetime function in GNU coreutils allows remote attackers to cause a denial of service (crash) or possibly execute arbitrary code via a crafted date string, as demonstrated by the \"--date=TZ=\"123\"345\" @1\" string to the touch or date command.",
|
||||
"Severity": "Low",
|
||||
"Metadata": {
|
||||
"NVD": {
|
||||
"CVSSv2": {
|
||||
"Score": 7.5,
|
||||
"Vectors": "AV:N/AC:L/Au:N/C:P/I:P"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Server: clair
|
||||
|
||||
{
|
||||
"Vulnerability": {
|
||||
"Name": "CVE-2014-9471",
|
||||
"NamespaceName": "debian:8",
|
||||
"Link": "https://security-tracker.debian.org/tracker/CVE-2014-9471",
|
||||
"Description": "The parse_datetime function in GNU coreutils allows remote attackers to cause a denial of service (crash) or possibly execute arbitrary code via a crafted date string, as demonstrated by the \"--date=TZ=\"123\"345\" @1\" string to the touch or date command.",
|
||||
"Severity": "Low",
|
||||
"Metadata": {
|
||||
"NVD": {
|
||||
"CVSSv2": {
|
||||
"Score": 7.5,
|
||||
"Vectors": "AV:N/AC:L/Au:N/C:P/I:P"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
#### DELETE /namespaces/`:nsName`/vulnerabilities/`:vulnName`
|
||||
|
||||
###### Description
|
||||
|
||||
The DELETE route for the Vulnerabilities resource deletes a given Vulnerability.
|
||||
If this vulnerability was inserted by a Fetcher, it may be re-inserted when the Fetcher updates.
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
GET http://localhost:6060/v1/namespaces/debian%3A8/vulnerabilities/CVE-2014-9471 HTTP/1.1
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Server: clair
|
||||
```
|
||||
|
||||
## Fixes
|
||||
|
||||
#### GET /namespaces/`:nsName`/vulnerabilities/`:vulnName`/fixes
|
||||
|
||||
###### Description
|
||||
|
||||
The GET route for the Fixes resource displays the list of Features that fix the given Vulnerability.
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
GET http://localhost:6060/v1/namespaces/debian%3A8/vulnerabilities/CVE-2014-9471/fixes HTTP/1.1
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json;charset=utf-8
|
||||
Server: clair
|
||||
|
||||
{
|
||||
"Features": [
|
||||
{
|
||||
"Name": "coreutils",
|
||||
"NamespaceName": "debian:8",
|
||||
"Version": "8.23-1"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
#### PUT /namespaces/`:nsName`/vulnerabilities/`:vulnName`/fixes/`:featureName`
|
||||
|
||||
###### Description
|
||||
|
||||
The PUT route for the Fixes resource updates a Feature that is the fix for a given Vulnerability.
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
PUT http://localhost:6060/v1/namespaces/debian%3A8/vulnerabilities/CVE-2014-9471/fixes/coreutils HTTP/1.1
|
||||
|
||||
{
|
||||
"Feature": {
|
||||
"Name": "coreutils",
|
||||
"NamespaceName": "debian:8",
|
||||
"Version": "4.24-9"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Server: clair
|
||||
|
||||
{
|
||||
"Feature": {
|
||||
"Name": "coreutils",
|
||||
"NamespaceName": "debian:8",
|
||||
"Version": "4.24-9"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### DELETE /namespaces/`:nsName`/vulnerabilities/`:vulnName`/fixes/`:featureName`
|
||||
|
||||
###### Description
|
||||
|
||||
The DELETE route for the Fixes resource removes a Feature as fix for the given Vulnerability.
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
DELETE http://localhost:6060/v1/namespaces/debian%3A8/vulnerabilities/CVE-2014-9471/fixes/coreutils
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Server: clair
|
||||
```
|
||||
|
||||
## Notifications
|
||||
|
||||
#### GET /notifications/`:name`
|
||||
|
||||
###### Description
|
||||
|
||||
The GET route for the Notifications resource displays a notification that a Vulnerability has been updated.
|
||||
This route supports simultaneous pagination for both the `Old` and `New` Vulnerabilities' `LayersIntroducingVulnerability` property which can be extremely long.
|
||||
|
||||
###### Query Parameters
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
|-------|--------|----------|---------------------------------------------------------------------------------------------------------------|
|
||||
| page | string | optional | Displays the specific page of the "LayersIntroducingVulnerability" property on New and Old vulnerabilities. |
|
||||
| limit | int | optional | Limits the amount of results in the "LayersIntroducingVulnerability" property on New and Old vulnerabilities. |
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
GET http://localhost:6060/v1/notifications/ec45ec87-bfc8-4129-a1c3-d2b82622175a?limit=2 HTTP/1.1
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json;charset=utf-8
|
||||
Server: clair
|
||||
|
||||
{
|
||||
"Notification": {
|
||||
"Name": "ec45ec87-bfc8-4129-a1c3-d2b82622175a",
|
||||
"Created": "1456247389",
|
||||
"Notified": "1456246708",
|
||||
"Limit": 2,
|
||||
"Page": "gAAAAABWzJaC2JCH6Apr_R1f2EkjGdibnrKOobTcYXBWl6t0Cw6Q04ENGIymB6XlZ3Zi0bYt2c-2cXe43fvsJ7ECZhZz4P8C8F9efr_SR0HPiejzQTuG0qAzeO8klogFfFjSz2peBvgP",
|
||||
"NextPage": "gAAAAABWzJaCTyr6QXP2aYsCwEZfWIkU2GkNplSMlTOhLJfiR3LorBv8QYgEIgyOvZRmHQEzJKvkI6TP2PkRczBkcD17GE89btaaKMqEX14yHDgyfQvdasW1tj3-5bBRt0esKi9ym5En",
|
||||
"New": {
|
||||
"Vulnerability": {
|
||||
"Name": "CVE-TEST",
|
||||
"NamespaceName": "debian:8",
|
||||
"Description": "New CVE",
|
||||
"Severity": "Low",
|
||||
"FixedIn": [
|
||||
{
|
||||
"Name": "grep",
|
||||
"NamespaceName": "debian:8",
|
||||
"Version": "2.25"
|
||||
}
|
||||
]
|
||||
},
|
||||
"LayersIntroducingVulnerability": [
|
||||
"3b59c795b34670618fbcace4dac7a27c5ecec156812c9e2c90d3f4be1916b12d.9673fdf7-b81a-4b3e-acf8-e551ef155449",
|
||||
"523ef1d23f222195488575f52a39c729c76a8c5630c9a194139cb246fb212da6"
|
||||
]
|
||||
},
|
||||
"Old": {
|
||||
"Vulnerability": {
|
||||
"Name": "CVE-TEST",
|
||||
"NamespaceName": "debian:8",
|
||||
"Description": "New CVE",
|
||||
"Severity": "Low",
|
||||
"FixedIn": []
|
||||
},
|
||||
"LayersIntroducingVulnerability": [
|
||||
"3b59c795b34670618fbcace4dac7a27c5ecec156812c9e2c90d3f4be1916b12d.9673fdf7-b81a-4b3e-acf8-e551ef155449",
|
||||
"523ef1d23f222195488575f52a39c729c76a8c5630c9a194139cb246fb212da6"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### DELETE /notifications/`:name`
|
||||
|
||||
###### Description
|
||||
|
||||
The delete route for the Notifications resource marks a Notification as read.
|
||||
If a notification is not marked as read, Clair will continue to notify the provided endpoints.
|
||||
The time at which this Notification was marked as read can be seen in the `Notified` property of the response GET route for Notification.
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
DELETE http://localhost:6060/v1/notification/ec45ec87-bfc8-4129-a1c3-d2b82622175a HTTP/1.1
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Server: clair
|
||||
```
|
319
vendor/github.com/coreos/clair/api/v1/models.go
generated
vendored
Normal file
319
vendor/github.com/coreos/clair/api/v1/models.go
generated
vendored
Normal file
@ -0,0 +1,319 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"github.com/fernet/fernet-go"
|
||||
)
|
||||
|
||||
var log = capnslog.NewPackageLogger("github.com/coreos/clair", "v1")
|
||||
|
||||
type Error struct {
|
||||
Message string `json:"Layer`
|
||||
}
|
||||
|
||||
type Layer struct {
|
||||
Name string `json:"Name,omitempty"`
|
||||
NamespaceName string `json:"NamespaceName,omitempty"`
|
||||
Path string `json:"Path,omitempty"`
|
||||
Headers map[string]string `json:"Headers,omitempty"`
|
||||
ParentName string `json:"ParentName,omitempty"`
|
||||
Format string `json:"Format,omitempty"`
|
||||
IndexedByVersion int `json:"IndexedByVersion,omitempty"`
|
||||
Features []Feature `json:"Features,omitempty"`
|
||||
}
|
||||
|
||||
func LayerFromDatabaseModel(dbLayer database.Layer, withFeatures, withVulnerabilities bool) Layer {
|
||||
layer := Layer{
|
||||
Name: dbLayer.Name,
|
||||
IndexedByVersion: dbLayer.EngineVersion,
|
||||
}
|
||||
|
||||
if dbLayer.Parent != nil {
|
||||
layer.ParentName = dbLayer.Parent.Name
|
||||
}
|
||||
|
||||
if dbLayer.Namespace != nil {
|
||||
layer.NamespaceName = dbLayer.Namespace.Name
|
||||
}
|
||||
|
||||
if withFeatures || withVulnerabilities && dbLayer.Features != nil {
|
||||
for _, dbFeatureVersion := range dbLayer.Features {
|
||||
feature := Feature{
|
||||
Name: dbFeatureVersion.Feature.Name,
|
||||
NamespaceName: dbFeatureVersion.Feature.Namespace.Name,
|
||||
Version: dbFeatureVersion.Version.String(),
|
||||
AddedBy: dbFeatureVersion.AddedBy.Name,
|
||||
}
|
||||
|
||||
for _, dbVuln := range dbFeatureVersion.AffectedBy {
|
||||
vuln := Vulnerability{
|
||||
Name: dbVuln.Name,
|
||||
NamespaceName: dbVuln.Namespace.Name,
|
||||
Description: dbVuln.Description,
|
||||
Link: dbVuln.Link,
|
||||
Severity: string(dbVuln.Severity),
|
||||
Metadata: dbVuln.Metadata,
|
||||
}
|
||||
|
||||
if dbVuln.FixedBy != types.MaxVersion {
|
||||
vuln.FixedBy = dbVuln.FixedBy.String()
|
||||
}
|
||||
feature.Vulnerabilities = append(feature.Vulnerabilities, vuln)
|
||||
}
|
||||
layer.Features = append(layer.Features, feature)
|
||||
}
|
||||
}
|
||||
|
||||
return layer
|
||||
}
|
||||
|
||||
type Namespace struct {
|
||||
Name string `json:"Name,omitempty"`
|
||||
}
|
||||
|
||||
type Vulnerability struct {
|
||||
Name string `json:"Name,omitempty"`
|
||||
NamespaceName string `json:"NamespaceName,omitempty"`
|
||||
Description string `json:"Description,omitempty"`
|
||||
Link string `json:"Link,omitempty"`
|
||||
Severity string `json:"Severity,omitempty"`
|
||||
Metadata map[string]interface{} `json:"Metadata,omitempty"`
|
||||
FixedBy string `json:"FixedBy,omitempty"`
|
||||
FixedIn []Feature `json:"FixedIn,omitempty"`
|
||||
}
|
||||
|
||||
func (v Vulnerability) DatabaseModel() (database.Vulnerability, error) {
|
||||
severity := types.Priority(v.Severity)
|
||||
if !severity.IsValid() {
|
||||
return database.Vulnerability{}, errors.New("Invalid severity")
|
||||
}
|
||||
|
||||
var dbFeatures []database.FeatureVersion
|
||||
for _, feature := range v.FixedIn {
|
||||
dbFeature, err := feature.DatabaseModel()
|
||||
if err != nil {
|
||||
return database.Vulnerability{}, err
|
||||
}
|
||||
|
||||
dbFeatures = append(dbFeatures, dbFeature)
|
||||
}
|
||||
|
||||
return database.Vulnerability{
|
||||
Name: v.Name,
|
||||
Namespace: database.Namespace{Name: v.NamespaceName},
|
||||
Description: v.Description,
|
||||
Link: v.Link,
|
||||
Severity: severity,
|
||||
Metadata: v.Metadata,
|
||||
FixedIn: dbFeatures,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func VulnerabilityFromDatabaseModel(dbVuln database.Vulnerability, withFixedIn bool) Vulnerability {
|
||||
vuln := Vulnerability{
|
||||
Name: dbVuln.Name,
|
||||
NamespaceName: dbVuln.Namespace.Name,
|
||||
Description: dbVuln.Description,
|
||||
Link: dbVuln.Link,
|
||||
Severity: string(dbVuln.Severity),
|
||||
Metadata: dbVuln.Metadata,
|
||||
}
|
||||
|
||||
if withFixedIn {
|
||||
for _, dbFeatureVersion := range dbVuln.FixedIn {
|
||||
vuln.FixedIn = append(vuln.FixedIn, FeatureFromDatabaseModel(dbFeatureVersion))
|
||||
}
|
||||
}
|
||||
|
||||
return vuln
|
||||
}
|
||||
|
||||
type Feature struct {
|
||||
Name string `json:"Name,omitempty"`
|
||||
NamespaceName string `json:"NamespaceName,omitempty"`
|
||||
Version string `json:"Version,omitempty"`
|
||||
Vulnerabilities []Vulnerability `json:"Vulnerabilities,omitempty"`
|
||||
AddedBy string `json:"AddedBy,omitempty"`
|
||||
}
|
||||
|
||||
func FeatureFromDatabaseModel(dbFeatureVersion database.FeatureVersion) Feature {
|
||||
versionStr := dbFeatureVersion.Version.String()
|
||||
if versionStr == types.MaxVersion.String() {
|
||||
versionStr = "None"
|
||||
}
|
||||
|
||||
return Feature{
|
||||
Name: dbFeatureVersion.Feature.Name,
|
||||
NamespaceName: dbFeatureVersion.Feature.Namespace.Name,
|
||||
Version: versionStr,
|
||||
AddedBy: dbFeatureVersion.AddedBy.Name,
|
||||
}
|
||||
}
|
||||
|
||||
func (f Feature) DatabaseModel() (database.FeatureVersion, error) {
|
||||
var version types.Version
|
||||
if f.Version == "None" {
|
||||
version = types.MaxVersion
|
||||
} else {
|
||||
var err error
|
||||
version, err = types.NewVersion(f.Version)
|
||||
if err != nil {
|
||||
return database.FeatureVersion{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Name: f.Name,
|
||||
Namespace: database.Namespace{Name: f.NamespaceName},
|
||||
},
|
||||
Version: version,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type Notification struct {
|
||||
Name string `json:"Name,omitempty"`
|
||||
Created string `json:"Created,omitempty"`
|
||||
Notified string `json:"Notified,omitempty"`
|
||||
Deleted string `json:"Deleted,omitempty"`
|
||||
Limit int `json:"Limit,omitempty"`
|
||||
Page string `json:"Page,omitempty"`
|
||||
NextPage string `json:"NextPage,omitempty"`
|
||||
Old *VulnerabilityWithLayers `json:"Old,omitempty"`
|
||||
New *VulnerabilityWithLayers `json:"New,omitempty"`
|
||||
}
|
||||
|
||||
func NotificationFromDatabaseModel(dbNotification database.VulnerabilityNotification, limit int, pageToken string, nextPage database.VulnerabilityNotificationPageNumber, key string) Notification {
|
||||
var oldVuln *VulnerabilityWithLayers
|
||||
if dbNotification.OldVulnerability != nil {
|
||||
v := VulnerabilityWithLayersFromDatabaseModel(*dbNotification.OldVulnerability)
|
||||
oldVuln = &v
|
||||
}
|
||||
|
||||
var newVuln *VulnerabilityWithLayers
|
||||
if dbNotification.NewVulnerability != nil {
|
||||
v := VulnerabilityWithLayersFromDatabaseModel(*dbNotification.NewVulnerability)
|
||||
newVuln = &v
|
||||
}
|
||||
|
||||
var nextPageStr string
|
||||
if nextPage != database.NoVulnerabilityNotificationPage {
|
||||
nextPageBytes, _ := tokenMarshal(nextPage, key)
|
||||
nextPageStr = string(nextPageBytes)
|
||||
}
|
||||
|
||||
var created, notified, deleted string
|
||||
if !dbNotification.Created.IsZero() {
|
||||
created = fmt.Sprintf("%d", dbNotification.Created.Unix())
|
||||
}
|
||||
if !dbNotification.Notified.IsZero() {
|
||||
notified = fmt.Sprintf("%d", dbNotification.Notified.Unix())
|
||||
}
|
||||
if !dbNotification.Deleted.IsZero() {
|
||||
deleted = fmt.Sprintf("%d", dbNotification.Deleted.Unix())
|
||||
}
|
||||
|
||||
// TODO(jzelinskie): implement "changed" key
|
||||
fmt.Println(dbNotification.Deleted.IsZero())
|
||||
return Notification{
|
||||
Name: dbNotification.Name,
|
||||
Created: created,
|
||||
Notified: notified,
|
||||
Deleted: deleted,
|
||||
Limit: limit,
|
||||
Page: pageToken,
|
||||
NextPage: nextPageStr,
|
||||
Old: oldVuln,
|
||||
New: newVuln,
|
||||
}
|
||||
}
|
||||
|
||||
type VulnerabilityWithLayers struct {
|
||||
Vulnerability *Vulnerability `json:"Vulnerability,omitempty"`
|
||||
LayersIntroducingVulnerability []string `json:"LayersIntroducingVulnerability,omitempty"`
|
||||
}
|
||||
|
||||
func VulnerabilityWithLayersFromDatabaseModel(dbVuln database.Vulnerability) VulnerabilityWithLayers {
|
||||
vuln := VulnerabilityFromDatabaseModel(dbVuln, true)
|
||||
|
||||
var layers []string
|
||||
for _, layer := range dbVuln.LayersIntroducingVulnerability {
|
||||
layers = append(layers, layer.Name)
|
||||
}
|
||||
|
||||
return VulnerabilityWithLayers{
|
||||
Vulnerability: &vuln,
|
||||
LayersIntroducingVulnerability: layers,
|
||||
}
|
||||
}
|
||||
|
||||
type LayerEnvelope struct {
|
||||
Layer *Layer `json:"Layer,omitempty"`
|
||||
Error *Error `json:"Error,omitempty"`
|
||||
}
|
||||
|
||||
type NamespaceEnvelope struct {
|
||||
Namespaces *[]Namespace `json:"Namespaces,omitempty"`
|
||||
Error *Error `json:"Error,omitempty"`
|
||||
}
|
||||
|
||||
type VulnerabilityEnvelope struct {
|
||||
Vulnerability *Vulnerability `json:"Vulnerability,omitempty"`
|
||||
Vulnerabilities *[]Vulnerability `json:"Vulnerabilities,omitempty"`
|
||||
NextPage string `json:"NextPage,omitempty"`
|
||||
Error *Error `json:"Error,omitempty"`
|
||||
}
|
||||
|
||||
type NotificationEnvelope struct {
|
||||
Notification *Notification `json:"Notification,omitempty"`
|
||||
Error *Error `json:"Error,omitempty"`
|
||||
}
|
||||
|
||||
type FeatureEnvelope struct {
|
||||
Feature *Feature `json:"Feature,omitempty"`
|
||||
Features *[]Feature `json:"Features,omitempty"`
|
||||
Error *Error `json:"Error,omitempty"`
|
||||
}
|
||||
|
||||
func tokenUnmarshal(token string, key string, v interface{}) error {
|
||||
k, _ := fernet.DecodeKey(key)
|
||||
msg := fernet.VerifyAndDecrypt([]byte(token), time.Hour, []*fernet.Key{k})
|
||||
if msg == nil {
|
||||
return errors.New("invalid or expired pagination token")
|
||||
}
|
||||
|
||||
return json.NewDecoder(bytes.NewBuffer(msg)).Decode(&v)
|
||||
}
|
||||
|
||||
func tokenMarshal(v interface{}, key string) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
err := json.NewEncoder(&buf).Encode(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
k, _ := fernet.DecodeKey(key)
|
||||
return fernet.EncryptAndSign(buf.Bytes(), k)
|
||||
}
|
56
vendor/github.com/coreos/clair/api/v1/router.go
generated
vendored
Normal file
56
vendor/github.com/coreos/clair/api/v1/router.go
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package v1 implements the first version of the Clair API.
|
||||
package v1
|
||||
|
||||
import (
|
||||
"github.com/julienschmidt/httprouter"
|
||||
|
||||
"github.com/coreos/clair/api/context"
|
||||
)
|
||||
|
||||
// NewRouter creates an HTTP router for version 1 of the Clair API.
|
||||
func NewRouter(ctx *context.RouteContext) *httprouter.Router {
|
||||
router := httprouter.New()
|
||||
|
||||
// Layers
|
||||
router.POST("/layers", context.HTTPHandler(postLayer, ctx))
|
||||
router.GET("/layers/:layerName", context.HTTPHandler(getLayer, ctx))
|
||||
router.DELETE("/layers/:layerName", context.HTTPHandler(deleteLayer, ctx))
|
||||
|
||||
// Namespaces
|
||||
router.GET("/namespaces", context.HTTPHandler(getNamespaces, ctx))
|
||||
|
||||
// Vulnerabilities
|
||||
router.GET("/namespaces/:namespaceName/vulnerabilities", context.HTTPHandler(getVulnerabilities, ctx))
|
||||
router.POST("/namespaces/:namespaceName/vulnerabilities", context.HTTPHandler(postVulnerability, ctx))
|
||||
router.GET("/namespaces/:namespaceName/vulnerabilities/:vulnerabilityName", context.HTTPHandler(getVulnerability, ctx))
|
||||
router.PUT("/namespaces/:namespaceName/vulnerabilities/:vulnerabilityName", context.HTTPHandler(putVulnerability, ctx))
|
||||
router.DELETE("/namespaces/:namespaceName/vulnerabilities/:vulnerabilityName", context.HTTPHandler(deleteVulnerability, ctx))
|
||||
|
||||
// Fixes
|
||||
router.GET("/namespaces/:namespaceName/vulnerabilities/:vulnerabilityName/fixes", context.HTTPHandler(getFixes, ctx))
|
||||
router.PUT("/namespaces/:namespaceName/vulnerabilities/:vulnerabilityName/fixes/:fixName", context.HTTPHandler(putFix, ctx))
|
||||
router.DELETE("/namespaces/:namespaceName/vulnerabilities/:vulnerabilityName/fixes/:fixName", context.HTTPHandler(deleteFix, ctx))
|
||||
|
||||
// Notifications
|
||||
router.GET("/notifications/:notificationName", context.HTTPHandler(getNotification, ctx))
|
||||
router.DELETE("/notifications/:notificationName", context.HTTPHandler(deleteNotification, ctx))
|
||||
|
||||
// Metrics
|
||||
router.GET("/metrics", context.HTTPHandler(getMetrics, ctx))
|
||||
|
||||
return router
|
||||
}
|
499
vendor/github.com/coreos/clair/api/v1/routes.go
generated
vendored
Normal file
499
vendor/github.com/coreos/clair/api/v1/routes.go
generated
vendored
Normal file
@ -0,0 +1,499 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/julienschmidt/httprouter"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/coreos/clair/api/context"
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/utils"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/coreos/clair/worker"
|
||||
)
|
||||
|
||||
const (
|
||||
// These are the route identifiers for prometheus.
|
||||
postLayerRoute = "v1/postLayer"
|
||||
getLayerRoute = "v1/getLayer"
|
||||
deleteLayerRoute = "v1/deleteLayer"
|
||||
getNamespacesRoute = "v1/getNamespaces"
|
||||
getVulnerabilitiesRoute = "v1/getVulnerabilities"
|
||||
postVulnerabilityRoute = "v1/postVulnerability"
|
||||
getVulnerabilityRoute = "v1/getVulnerability"
|
||||
putVulnerabilityRoute = "v1/putVulnerability"
|
||||
deleteVulnerabilityRoute = "v1/deleteVulnerability"
|
||||
getFixesRoute = "v1/getFixes"
|
||||
putFixRoute = "v1/putFix"
|
||||
deleteFixRoute = "v1/deleteFix"
|
||||
getNotificationRoute = "v1/getNotification"
|
||||
deleteNotificationRoute = "v1/deleteNotification"
|
||||
getMetricsRoute = "v1/getMetrics"
|
||||
|
||||
// maxBodySize restricts client request bodies to 1MiB.
|
||||
maxBodySize int64 = 1048576
|
||||
|
||||
// statusUnprocessableEntity represents the 422 (Unprocessable Entity) status code, which means
|
||||
// the server understands the content type of the request entity
|
||||
// (hence a 415(Unsupported Media Type) status code is inappropriate), and the syntax of the
|
||||
// request entity is correct (thus a 400 (Bad Request) status code is inappropriate) but was
|
||||
// unable to process the contained instructions.
|
||||
statusUnprocessableEntity = 422
|
||||
)
|
||||
|
||||
func decodeJSON(r *http.Request, v interface{}) error {
|
||||
defer r.Body.Close()
|
||||
return json.NewDecoder(io.LimitReader(r.Body, maxBodySize)).Decode(v)
|
||||
}
|
||||
|
||||
func writeResponse(w http.ResponseWriter, r *http.Request, status int, resp interface{}) {
|
||||
// Headers must be written before the response.
|
||||
header := w.Header()
|
||||
header.Set("Content-Type", "application/json;charset=utf-8")
|
||||
header.Set("Server", "clair")
|
||||
|
||||
// Gzip the response if the client supports it.
|
||||
var writer io.Writer = w
|
||||
if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
|
||||
gzipWriter := gzip.NewWriter(w)
|
||||
defer gzipWriter.Close()
|
||||
writer = gzipWriter
|
||||
|
||||
header.Set("Content-Encoding", "gzip")
|
||||
}
|
||||
|
||||
// Write the response.
|
||||
w.WriteHeader(status)
|
||||
err := json.NewEncoder(writer).Encode(resp)
|
||||
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case *json.MarshalerError, *json.UnsupportedTypeError, *json.UnsupportedValueError:
|
||||
panic("v1: failed to marshal response: " + err.Error())
|
||||
default:
|
||||
log.Warningf("failed to write response: %s", err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func postLayer(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
request := LayerEnvelope{}
|
||||
err := decodeJSON(r, &request)
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, LayerEnvelope{Error: &Error{err.Error()}})
|
||||
return postLayerRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
if request.Layer == nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, LayerEnvelope{Error: &Error{"failed to provide layer"}})
|
||||
return postLayerRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
err = worker.Process(ctx.Store, request.Layer.Format, request.Layer.Name, request.Layer.ParentName, request.Layer.Path, request.Layer.Headers)
|
||||
if err != nil {
|
||||
if err == utils.ErrCouldNotExtract ||
|
||||
err == utils.ErrExtractedFileTooBig ||
|
||||
err == worker.ErrUnsupported {
|
||||
writeResponse(w, r, statusUnprocessableEntity, LayerEnvelope{Error: &Error{err.Error()}})
|
||||
return postLayerRoute, statusUnprocessableEntity
|
||||
}
|
||||
|
||||
if _, badreq := err.(*cerrors.ErrBadRequest); badreq {
|
||||
writeResponse(w, r, http.StatusBadRequest, LayerEnvelope{Error: &Error{err.Error()}})
|
||||
return postLayerRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
writeResponse(w, r, http.StatusInternalServerError, LayerEnvelope{Error: &Error{err.Error()}})
|
||||
return postLayerRoute, http.StatusInternalServerError
|
||||
}
|
||||
|
||||
writeResponse(w, r, http.StatusCreated, LayerEnvelope{Layer: &Layer{
|
||||
Name: request.Layer.Name,
|
||||
ParentName: request.Layer.ParentName,
|
||||
Path: request.Layer.Path,
|
||||
Headers: request.Layer.Headers,
|
||||
Format: request.Layer.Format,
|
||||
IndexedByVersion: worker.Version,
|
||||
}})
|
||||
return postLayerRoute, http.StatusCreated
|
||||
}
|
||||
|
||||
func getLayer(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
_, withFeatures := r.URL.Query()["features"]
|
||||
_, withVulnerabilities := r.URL.Query()["vulnerabilities"]
|
||||
|
||||
dbLayer, err := ctx.Store.FindLayer(p.ByName("layerName"), withFeatures, withVulnerabilities)
|
||||
if err == cerrors.ErrNotFound {
|
||||
writeResponse(w, r, http.StatusNotFound, LayerEnvelope{Error: &Error{err.Error()}})
|
||||
return getLayerRoute, http.StatusNotFound
|
||||
} else if err != nil {
|
||||
writeResponse(w, r, http.StatusInternalServerError, LayerEnvelope{Error: &Error{err.Error()}})
|
||||
return getLayerRoute, http.StatusInternalServerError
|
||||
}
|
||||
|
||||
layer := LayerFromDatabaseModel(dbLayer, withFeatures, withVulnerabilities)
|
||||
|
||||
writeResponse(w, r, http.StatusOK, LayerEnvelope{Layer: &layer})
|
||||
return getLayerRoute, http.StatusOK
|
||||
}
|
||||
|
||||
func deleteLayer(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
err := ctx.Store.DeleteLayer(p.ByName("layerName"))
|
||||
if err == cerrors.ErrNotFound {
|
||||
writeResponse(w, r, http.StatusNotFound, LayerEnvelope{Error: &Error{err.Error()}})
|
||||
return deleteLayerRoute, http.StatusNotFound
|
||||
} else if err != nil {
|
||||
writeResponse(w, r, http.StatusInternalServerError, LayerEnvelope{Error: &Error{err.Error()}})
|
||||
return deleteLayerRoute, http.StatusInternalServerError
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return deleteLayerRoute, http.StatusOK
|
||||
}
|
||||
|
||||
func getNamespaces(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
dbNamespaces, err := ctx.Store.ListNamespaces()
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusInternalServerError, NamespaceEnvelope{Error: &Error{err.Error()}})
|
||||
return getNamespacesRoute, http.StatusInternalServerError
|
||||
}
|
||||
var namespaces []Namespace
|
||||
for _, dbNamespace := range dbNamespaces {
|
||||
namespaces = append(namespaces, Namespace{Name: dbNamespace.Name})
|
||||
}
|
||||
|
||||
writeResponse(w, r, http.StatusOK, NamespaceEnvelope{Namespaces: &namespaces})
|
||||
return getNamespacesRoute, http.StatusOK
|
||||
}
|
||||
|
||||
func getVulnerabilities(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
query := r.URL.Query()
|
||||
|
||||
limitStrs, limitExists := query["limit"]
|
||||
if !limitExists {
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{"must provide limit query parameter"}})
|
||||
return getVulnerabilitiesRoute, http.StatusBadRequest
|
||||
}
|
||||
limit, err := strconv.Atoi(limitStrs[0])
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{"invalid limit format: " + err.Error()}})
|
||||
return getVulnerabilitiesRoute, http.StatusBadRequest
|
||||
} else if limit < 0 {
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{"limit value should not be less than zero"}})
|
||||
return getVulnerabilitiesRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
page := 0
|
||||
pageStrs, pageExists := query["page"]
|
||||
if pageExists {
|
||||
err = tokenUnmarshal(pageStrs[0], ctx.Config.PaginationKey, &page)
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{"invalid page format: " + err.Error()}})
|
||||
return getNotificationRoute, http.StatusBadRequest
|
||||
}
|
||||
}
|
||||
|
||||
namespace := p.ByName("namespaceName")
|
||||
if namespace == "" {
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{"namespace should not be empty"}})
|
||||
return getNotificationRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
dbVulns, nextPage, err := ctx.Store.ListVulnerabilities(namespace, limit, page)
|
||||
if err == cerrors.ErrNotFound {
|
||||
writeResponse(w, r, http.StatusNotFound, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return getVulnerabilityRoute, http.StatusNotFound
|
||||
} else if err != nil {
|
||||
writeResponse(w, r, http.StatusInternalServerError, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return getVulnerabilitiesRoute, http.StatusInternalServerError
|
||||
}
|
||||
|
||||
var vulns []Vulnerability
|
||||
for _, dbVuln := range dbVulns {
|
||||
vuln := VulnerabilityFromDatabaseModel(dbVuln, false)
|
||||
vulns = append(vulns, vuln)
|
||||
}
|
||||
|
||||
var nextPageStr string
|
||||
if nextPage != -1 {
|
||||
nextPageBytes, err := tokenMarshal(nextPage, ctx.Config.PaginationKey)
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{"failed to marshal token: " + err.Error()}})
|
||||
return getNotificationRoute, http.StatusBadRequest
|
||||
}
|
||||
nextPageStr = string(nextPageBytes)
|
||||
}
|
||||
|
||||
writeResponse(w, r, http.StatusOK, VulnerabilityEnvelope{Vulnerabilities: &vulns, NextPage: nextPageStr})
|
||||
return getVulnerabilitiesRoute, http.StatusOK
|
||||
}
|
||||
|
||||
func postVulnerability(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
request := VulnerabilityEnvelope{}
|
||||
err := decodeJSON(r, &request)
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return postVulnerabilityRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
if request.Vulnerability == nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{"failed to provide vulnerability"}})
|
||||
return postVulnerabilityRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
vuln, err := request.Vulnerability.DatabaseModel()
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return postVulnerabilityRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
err = ctx.Store.InsertVulnerabilities([]database.Vulnerability{vuln}, true)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case *cerrors.ErrBadRequest:
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return postVulnerabilityRoute, http.StatusBadRequest
|
||||
default:
|
||||
writeResponse(w, r, http.StatusInternalServerError, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return postVulnerabilityRoute, http.StatusInternalServerError
|
||||
}
|
||||
}
|
||||
|
||||
writeResponse(w, r, http.StatusCreated, VulnerabilityEnvelope{Vulnerability: request.Vulnerability})
|
||||
return postVulnerabilityRoute, http.StatusCreated
|
||||
}
|
||||
|
||||
func getVulnerability(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
_, withFixedIn := r.URL.Query()["fixedIn"]
|
||||
|
||||
dbVuln, err := ctx.Store.FindVulnerability(p.ByName("namespaceName"), p.ByName("vulnerabilityName"))
|
||||
if err == cerrors.ErrNotFound {
|
||||
writeResponse(w, r, http.StatusNotFound, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return getVulnerabilityRoute, http.StatusNotFound
|
||||
} else if err != nil {
|
||||
writeResponse(w, r, http.StatusInternalServerError, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return getVulnerabilityRoute, http.StatusInternalServerError
|
||||
}
|
||||
|
||||
vuln := VulnerabilityFromDatabaseModel(dbVuln, withFixedIn)
|
||||
|
||||
writeResponse(w, r, http.StatusOK, VulnerabilityEnvelope{Vulnerability: &vuln})
|
||||
return getVulnerabilityRoute, http.StatusOK
|
||||
}
|
||||
|
||||
func putVulnerability(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
request := VulnerabilityEnvelope{}
|
||||
err := decodeJSON(r, &request)
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return putVulnerabilityRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
if request.Vulnerability == nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{"failed to provide vulnerability"}})
|
||||
return putVulnerabilityRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
if len(request.Vulnerability.FixedIn) != 0 {
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{"Vulnerability.FixedIn must be empty"}})
|
||||
return putVulnerabilityRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
vuln, err := request.Vulnerability.DatabaseModel()
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return putVulnerabilityRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
vuln.Namespace.Name = p.ByName("namespaceName")
|
||||
vuln.Name = p.ByName("vulnerabilityName")
|
||||
|
||||
err = ctx.Store.InsertVulnerabilities([]database.Vulnerability{vuln}, true)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case *cerrors.ErrBadRequest:
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return putVulnerabilityRoute, http.StatusBadRequest
|
||||
default:
|
||||
writeResponse(w, r, http.StatusInternalServerError, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return putVulnerabilityRoute, http.StatusInternalServerError
|
||||
}
|
||||
}
|
||||
|
||||
writeResponse(w, r, http.StatusOK, VulnerabilityEnvelope{Vulnerability: request.Vulnerability})
|
||||
return putVulnerabilityRoute, http.StatusOK
|
||||
}
|
||||
|
||||
func deleteVulnerability(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
err := ctx.Store.DeleteVulnerability(p.ByName("namespaceName"), p.ByName("vulnerabilityName"))
|
||||
if err == cerrors.ErrNotFound {
|
||||
writeResponse(w, r, http.StatusNotFound, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return deleteVulnerabilityRoute, http.StatusNotFound
|
||||
} else if err != nil {
|
||||
writeResponse(w, r, http.StatusInternalServerError, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return deleteVulnerabilityRoute, http.StatusInternalServerError
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return deleteVulnerabilityRoute, http.StatusOK
|
||||
}
|
||||
|
||||
func getFixes(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
dbVuln, err := ctx.Store.FindVulnerability(p.ByName("namespaceName"), p.ByName("vulnerabilityName"))
|
||||
if err == cerrors.ErrNotFound {
|
||||
writeResponse(w, r, http.StatusNotFound, FeatureEnvelope{Error: &Error{err.Error()}})
|
||||
return getFixesRoute, http.StatusNotFound
|
||||
} else if err != nil {
|
||||
writeResponse(w, r, http.StatusInternalServerError, FeatureEnvelope{Error: &Error{err.Error()}})
|
||||
return getFixesRoute, http.StatusInternalServerError
|
||||
}
|
||||
|
||||
vuln := VulnerabilityFromDatabaseModel(dbVuln, true)
|
||||
writeResponse(w, r, http.StatusOK, FeatureEnvelope{Features: &vuln.FixedIn})
|
||||
return getFixesRoute, http.StatusOK
|
||||
}
|
||||
|
||||
func putFix(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
request := FeatureEnvelope{}
|
||||
err := decodeJSON(r, &request)
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, FeatureEnvelope{Error: &Error{err.Error()}})
|
||||
return putFixRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
if request.Feature == nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, FeatureEnvelope{Error: &Error{"failed to provide feature"}})
|
||||
return putFixRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
if request.Feature.Name != p.ByName("fixName") {
|
||||
writeResponse(w, r, http.StatusBadRequest, FeatureEnvelope{Error: &Error{"feature name in URL and JSON do not match"}})
|
||||
return putFixRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
dbFix, err := request.Feature.DatabaseModel()
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, FeatureEnvelope{Error: &Error{err.Error()}})
|
||||
return putFixRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
err = ctx.Store.InsertVulnerabilityFixes(p.ByName("vulnerabilityNamespace"), p.ByName("vulnerabilityName"), []database.FeatureVersion{dbFix})
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case *cerrors.ErrBadRequest:
|
||||
writeResponse(w, r, http.StatusBadRequest, FeatureEnvelope{Error: &Error{err.Error()}})
|
||||
return putFixRoute, http.StatusBadRequest
|
||||
default:
|
||||
if err == cerrors.ErrNotFound {
|
||||
writeResponse(w, r, http.StatusNotFound, FeatureEnvelope{Error: &Error{err.Error()}})
|
||||
return putFixRoute, http.StatusNotFound
|
||||
}
|
||||
writeResponse(w, r, http.StatusInternalServerError, FeatureEnvelope{Error: &Error{err.Error()}})
|
||||
return putFixRoute, http.StatusInternalServerError
|
||||
}
|
||||
}
|
||||
|
||||
writeResponse(w, r, http.StatusOK, FeatureEnvelope{Feature: request.Feature})
|
||||
return putFixRoute, http.StatusOK
|
||||
}
|
||||
|
||||
func deleteFix(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
err := ctx.Store.DeleteVulnerabilityFix(p.ByName("vulnerabilityNamespace"), p.ByName("vulnerabilityName"), p.ByName("fixName"))
|
||||
if err == cerrors.ErrNotFound {
|
||||
writeResponse(w, r, http.StatusNotFound, FeatureEnvelope{Error: &Error{err.Error()}})
|
||||
return deleteFixRoute, http.StatusNotFound
|
||||
} else if err != nil {
|
||||
writeResponse(w, r, http.StatusInternalServerError, FeatureEnvelope{Error: &Error{err.Error()}})
|
||||
return deleteFixRoute, http.StatusInternalServerError
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return deleteFixRoute, http.StatusOK
|
||||
}
|
||||
|
||||
func getNotification(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
query := r.URL.Query()
|
||||
|
||||
limitStrs, limitExists := query["limit"]
|
||||
if !limitExists {
|
||||
writeResponse(w, r, http.StatusBadRequest, NotificationEnvelope{Error: &Error{"must provide limit query parameter"}})
|
||||
return getNotificationRoute, http.StatusBadRequest
|
||||
}
|
||||
limit, err := strconv.Atoi(limitStrs[0])
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, NotificationEnvelope{Error: &Error{"invalid limit format: " + err.Error()}})
|
||||
return getNotificationRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
var pageToken string
|
||||
page := database.VulnerabilityNotificationFirstPage
|
||||
pageStrs, pageExists := query["page"]
|
||||
if pageExists {
|
||||
err := tokenUnmarshal(pageStrs[0], ctx.Config.PaginationKey, &page)
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, NotificationEnvelope{Error: &Error{"invalid page format: " + err.Error()}})
|
||||
return getNotificationRoute, http.StatusBadRequest
|
||||
}
|
||||
pageToken = pageStrs[0]
|
||||
} else {
|
||||
pageTokenBytes, err := tokenMarshal(page, ctx.Config.PaginationKey)
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, NotificationEnvelope{Error: &Error{"failed to marshal token: " + err.Error()}})
|
||||
return getNotificationRoute, http.StatusBadRequest
|
||||
}
|
||||
pageToken = string(pageTokenBytes)
|
||||
}
|
||||
|
||||
dbNotification, nextPage, err := ctx.Store.GetNotification(p.ByName("notificationName"), limit, page)
|
||||
if err == cerrors.ErrNotFound {
|
||||
writeResponse(w, r, http.StatusNotFound, NotificationEnvelope{Error: &Error{err.Error()}})
|
||||
return deleteNotificationRoute, http.StatusNotFound
|
||||
} else if err != nil {
|
||||
writeResponse(w, r, http.StatusInternalServerError, NotificationEnvelope{Error: &Error{err.Error()}})
|
||||
return getNotificationRoute, http.StatusInternalServerError
|
||||
}
|
||||
|
||||
notification := NotificationFromDatabaseModel(dbNotification, limit, pageToken, nextPage, ctx.Config.PaginationKey)
|
||||
|
||||
writeResponse(w, r, http.StatusOK, NotificationEnvelope{Notification: ¬ification})
|
||||
return getNotificationRoute, http.StatusOK
|
||||
}
|
||||
|
||||
func deleteNotification(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
err := ctx.Store.DeleteNotification(p.ByName("notificationName"))
|
||||
if err == cerrors.ErrNotFound {
|
||||
writeResponse(w, r, http.StatusNotFound, NotificationEnvelope{Error: &Error{err.Error()}})
|
||||
return deleteNotificationRoute, http.StatusNotFound
|
||||
} else if err != nil {
|
||||
writeResponse(w, r, http.StatusInternalServerError, NotificationEnvelope{Error: &Error{err.Error()}})
|
||||
return deleteNotificationRoute, http.StatusInternalServerError
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return deleteNotificationRoute, http.StatusOK
|
||||
}
|
||||
|
||||
func getMetrics(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
prometheus.Handler().ServeHTTP(w, r)
|
||||
return getMetricsRoute, 0
|
||||
}
|
75
vendor/github.com/coreos/clair/clair.go
generated
vendored
Normal file
75
vendor/github.com/coreos/clair/clair.go
generated
vendored
Normal file
@ -0,0 +1,75 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package clair implements the ability to boot Clair with your own imports
|
||||
// that can dynamically register additional functionality.
|
||||
package clair
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/clair/api"
|
||||
"github.com/coreos/clair/api/context"
|
||||
"github.com/coreos/clair/config"
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/notifier"
|
||||
"github.com/coreos/clair/updater"
|
||||
"github.com/coreos/clair/utils"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
)
|
||||
|
||||
var log = capnslog.NewPackageLogger("github.com/coreos/clair", "main")
|
||||
|
||||
// Boot starts Clair. By exporting this function, anyone can import their own
|
||||
// custom fetchers/updaters into their own package and then call clair.Boot.
|
||||
func Boot(config *config.Config) {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
st := utils.NewStopper()
|
||||
|
||||
// Open database
|
||||
db, err := database.Open(config.Database)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Start notifier
|
||||
st.Begin()
|
||||
go notifier.Run(config.Notifier, db, st)
|
||||
|
||||
// Start API
|
||||
st.Begin()
|
||||
go api.Run(config.API, &context.RouteContext{db, config.API}, st)
|
||||
st.Begin()
|
||||
go api.RunHealth(config.API, &context.RouteContext{db, config.API}, st)
|
||||
|
||||
// Start updater
|
||||
st.Begin()
|
||||
go updater.Run(config.Updater, db, st)
|
||||
|
||||
// Wait for interruption and shutdown gracefully.
|
||||
waitForSignals(syscall.SIGINT, syscall.SIGTERM)
|
||||
log.Info("Received interruption, gracefully stopping ...")
|
||||
st.Stop()
|
||||
}
|
||||
|
||||
func waitForSignals(signals ...os.Signal) {
|
||||
interrupts := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupts, signals...)
|
||||
<-interrupts
|
||||
}
|
98
vendor/github.com/coreos/clair/cmd/clair/main.go
generated
vendored
Normal file
98
vendor/github.com/coreos/clair/cmd/clair/main.go
generated
vendored
Normal file
@ -0,0 +1,98 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
|
||||
"github.com/coreos/clair"
|
||||
"github.com/coreos/clair/config"
|
||||
|
||||
// Register components
|
||||
_ "github.com/coreos/clair/notifier/notifiers"
|
||||
|
||||
_ "github.com/coreos/clair/updater/fetchers/debian"
|
||||
_ "github.com/coreos/clair/updater/fetchers/rhel"
|
||||
_ "github.com/coreos/clair/updater/fetchers/ubuntu"
|
||||
_ "github.com/coreos/clair/updater/metadata_fetchers/nvd"
|
||||
|
||||
_ "github.com/coreos/clair/worker/detectors/data/aci"
|
||||
_ "github.com/coreos/clair/worker/detectors/data/docker"
|
||||
|
||||
_ "github.com/coreos/clair/worker/detectors/feature/dpkg"
|
||||
_ "github.com/coreos/clair/worker/detectors/feature/rpm"
|
||||
|
||||
_ "github.com/coreos/clair/worker/detectors/namespace/aptsources"
|
||||
_ "github.com/coreos/clair/worker/detectors/namespace/lsbrelease"
|
||||
_ "github.com/coreos/clair/worker/detectors/namespace/osrelease"
|
||||
_ "github.com/coreos/clair/worker/detectors/namespace/redhatrelease"
|
||||
|
||||
_ "github.com/coreos/clair/database/pgsql"
|
||||
)
|
||||
|
||||
var log = capnslog.NewPackageLogger("github.com/coreos/clair/cmd/clair", "main")
|
||||
|
||||
func main() {
|
||||
// Parse command-line arguments
|
||||
flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ExitOnError)
|
||||
flagConfigPath := flag.String("config", "/etc/clair/config.yaml", "Load configuration from the specified file.")
|
||||
flagCPUProfilePath := flag.String("cpu-profile", "", "Write a CPU profile to the specified file before exiting.")
|
||||
flagLogLevel := flag.String("log-level", "info", "Define the logging level.")
|
||||
flag.Parse()
|
||||
// Load configuration
|
||||
config, err := config.Load(*flagConfigPath)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to load configuration: %s", err)
|
||||
}
|
||||
|
||||
// Initialize logging system
|
||||
logLevel, err := capnslog.ParseLevel(strings.ToUpper(*flagLogLevel))
|
||||
capnslog.SetGlobalLogLevel(logLevel)
|
||||
capnslog.SetFormatter(capnslog.NewPrettyFormatter(os.Stdout, false))
|
||||
|
||||
// Enable CPU Profiling if specified
|
||||
if *flagCPUProfilePath != "" {
|
||||
defer stopCPUProfiling(startCPUProfiling(*flagCPUProfilePath))
|
||||
}
|
||||
|
||||
clair.Boot(config)
|
||||
}
|
||||
|
||||
func startCPUProfiling(path string) *os.File {
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to create profile file: %s", err)
|
||||
}
|
||||
|
||||
err = pprof.StartCPUProfile(f)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to start CPU profiling: %s", err)
|
||||
}
|
||||
|
||||
log.Info("started CPU profiling")
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func stopCPUProfiling(f *os.File) {
|
||||
pprof.StopCPUProfile()
|
||||
f.Close()
|
||||
log.Info("stopped CPU profiling")
|
||||
}
|
80
vendor/github.com/coreos/clair/config.example.yaml
generated
vendored
Normal file
80
vendor/github.com/coreos/clair/config.example.yaml
generated
vendored
Normal file
@ -0,0 +1,80 @@
|
||||
# Copyright 2015 clair authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# The values specified here are the default values that Clair uses if no configuration file is specified or if the keys are not defined.
|
||||
clair:
|
||||
database:
|
||||
# Database driver
|
||||
type: pgsql
|
||||
options:
|
||||
# PostgreSQL Connection string
|
||||
# http://www.postgresql.org/docs/9.4/static/libpq-connect.html
|
||||
source:
|
||||
|
||||
# Number of elements kept in the cache
|
||||
# Values unlikely to change (e.g. namespaces) are cached in order to save prevent needless roundtrips to the database.
|
||||
cachesize: 16384
|
||||
|
||||
api:
|
||||
# API server port
|
||||
port: 6060
|
||||
|
||||
# Health server port
|
||||
# This is an unencrypted endpoint useful for load balancers to check to healthiness of the clair server.
|
||||
healthport: 6061
|
||||
|
||||
# Deadline before an API request will respond with a 503
|
||||
timeout: 900s
|
||||
|
||||
# 32-bit URL-safe base64 key used to encrypt pagination tokens
|
||||
# If one is not provided, it will be generated.
|
||||
# Multiple clair instances in the same cluster need the same value.
|
||||
paginationkey:
|
||||
|
||||
# Optional PKI configuration
|
||||
# If you want to easily generate client certificates and CAs, try the following projects:
|
||||
# https://github.com/coreos/etcd-ca
|
||||
# https://github.com/cloudflare/cfssl
|
||||
servername:
|
||||
cafile:
|
||||
keyfile:
|
||||
certfile:
|
||||
|
||||
updater:
|
||||
# Frequency the database will be updated with vulnerabilities from the default data sources
|
||||
# The value 0 disables the updater entirely.
|
||||
interval: 2h
|
||||
|
||||
notifier:
|
||||
# Number of attempts before the notification is marked as failed to be sent
|
||||
attempts: 3
|
||||
|
||||
# Duration before a failed notification is retried
|
||||
renotifyinterval: 2h
|
||||
|
||||
http:
|
||||
# Optional endpoint that will receive notifications via POST requests
|
||||
endpoint:
|
||||
|
||||
# Optional PKI configuration
|
||||
# If you want to easily generate client certificates and CAs, try the following projects:
|
||||
# https://github.com/cloudflare/cfssl
|
||||
# https://github.com/coreos/etcd-ca
|
||||
servername:
|
||||
cafile:
|
||||
keyfile:
|
||||
certfile:
|
||||
|
||||
# Optional HTTP Proxy: must be a valid URL (including the scheme).
|
||||
proxy:
|
135
vendor/github.com/coreos/clair/config/config.go
generated
vendored
Normal file
135
vendor/github.com/coreos/clair/config/config.go
generated
vendored
Normal file
@ -0,0 +1,135 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/fernet/fernet-go"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// ErrDatasourceNotLoaded is returned when the datasource variable in the configuration file is not loaded properly
|
||||
var ErrDatasourceNotLoaded = errors.New("could not load configuration: no database source specified")
|
||||
|
||||
// RegistrableComponentConfig is a configuration block that can be used to
|
||||
// determine which registrable component should be initialized and pass
|
||||
// custom configuration to it.
|
||||
type RegistrableComponentConfig struct {
|
||||
Type string
|
||||
Options map[string]interface{}
|
||||
}
|
||||
|
||||
// File represents a YAML configuration file that namespaces all Clair
|
||||
// configuration under the top-level "clair" key.
|
||||
type File struct {
|
||||
Clair Config `yaml:"clair"`
|
||||
}
|
||||
|
||||
// Config is the global configuration for an instance of Clair.
|
||||
type Config struct {
|
||||
Database RegistrableComponentConfig
|
||||
Updater *UpdaterConfig
|
||||
Notifier *NotifierConfig
|
||||
API *APIConfig
|
||||
}
|
||||
|
||||
// UpdaterConfig is the configuration for the Updater service.
|
||||
type UpdaterConfig struct {
|
||||
Interval time.Duration
|
||||
}
|
||||
|
||||
// NotifierConfig is the configuration for the Notifier service and its registered notifiers.
|
||||
type NotifierConfig struct {
|
||||
Attempts int
|
||||
RenotifyInterval time.Duration
|
||||
Params map[string]interface{} `yaml:",inline"`
|
||||
}
|
||||
|
||||
// APIConfig is the configuration for the API service.
|
||||
type APIConfig struct {
|
||||
Port int
|
||||
HealthPort int
|
||||
Timeout time.Duration
|
||||
PaginationKey string
|
||||
CertFile, KeyFile, CAFile string
|
||||
}
|
||||
|
||||
// DefaultConfig is a configuration that can be used as a fallback value.
|
||||
func DefaultConfig() Config {
|
||||
return Config{
|
||||
Database: RegistrableComponentConfig{
|
||||
Type: "pgsql",
|
||||
},
|
||||
Updater: &UpdaterConfig{
|
||||
Interval: 1 * time.Hour,
|
||||
},
|
||||
API: &APIConfig{
|
||||
Port: 6060,
|
||||
HealthPort: 6061,
|
||||
Timeout: 900 * time.Second,
|
||||
},
|
||||
Notifier: &NotifierConfig{
|
||||
Attempts: 5,
|
||||
RenotifyInterval: 2 * time.Hour,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Load is a shortcut to open a file, read it, and generate a Config.
|
||||
// It supports relative and absolute paths. Given "", it returns DefaultConfig.
|
||||
func Load(path string) (config *Config, err error) {
|
||||
var cfgFile File
|
||||
cfgFile.Clair = DefaultConfig()
|
||||
if path == "" {
|
||||
return &cfgFile.Clair, nil
|
||||
}
|
||||
|
||||
f, err := os.Open(os.ExpandEnv(path))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
d, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = yaml.Unmarshal(d, &cfgFile)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
config = &cfgFile.Clair
|
||||
|
||||
// Generate a pagination key if none is provided.
|
||||
if config.API.PaginationKey == "" {
|
||||
var key fernet.Key
|
||||
if err = key.Generate(); err != nil {
|
||||
return
|
||||
}
|
||||
config.API.PaginationKey = key.Encode()
|
||||
} else {
|
||||
_, err = fernet.DecodeKey(config.API.PaginationKey)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
120
vendor/github.com/coreos/clair/contrib/analyze-local-images/Godeps/Godeps.json
generated
vendored
Normal file
120
vendor/github.com/coreos/clair/contrib/analyze-local-images/Godeps/Godeps.json
generated
vendored
Normal file
@ -0,0 +1,120 @@
|
||||
{
|
||||
"ImportPath": "github.com/coreos/clair/contrib/analyze-local-images",
|
||||
"GoVersion": "go1.5",
|
||||
"Packages": [
|
||||
"github.com/coreos/clair/contrib/analyze-local-images"
|
||||
],
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/beorn7/perks/quantile",
|
||||
"Rev": "b965b613227fddccbfffe13eae360ed3fa822f8d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/clair/api/context",
|
||||
"Comment": "v1.0.0-rc1-40-g20b665f",
|
||||
"Rev": "20b665f3927ab6627eda2d9450610d589e35b19f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/clair/api/v1",
|
||||
"Comment": "v1.0.0-rc1-40-g20b665f",
|
||||
"Rev": "20b665f3927ab6627eda2d9450610d589e35b19f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/clair/config",
|
||||
"Comment": "v1.0.0-rc1-40-g20b665f",
|
||||
"Rev": "20b665f3927ab6627eda2d9450610d589e35b19f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/clair/database",
|
||||
"Comment": "v1.0.0-rc1-40-g20b665f",
|
||||
"Rev": "20b665f3927ab6627eda2d9450610d589e35b19f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/clair/utils",
|
||||
"Comment": "v1.0.0-rc1-40-g20b665f",
|
||||
"Rev": "20b665f3927ab6627eda2d9450610d589e35b19f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/clair/worker",
|
||||
"Comment": "v1.0.0-rc1-40-g20b665f",
|
||||
"Rev": "20b665f3927ab6627eda2d9450610d589e35b19f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/go-systemd/journal",
|
||||
"Comment": "v4-34-g4f14f6d",
|
||||
"Rev": "4f14f6deef2da87e4aa59e6c1c1f3e02ba44c5e1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/pkg/capnslog",
|
||||
"Rev": "2c77715c4df99b5420ffcae14ead08f52104065d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fatih/color",
|
||||
"Comment": "v0.1-17-g533cd7f",
|
||||
"Rev": "533cd7fd8a85905f67a1753afb4deddc85ea174f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fernet/fernet-go",
|
||||
"Rev": "1b2437bc582b3cfbb341ee5a29f8ef5b42912ff2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/protobuf/proto",
|
||||
"Rev": "5fc2294e655b78ed8a02082d37808d46c17d7e64"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/julienschmidt/httprouter",
|
||||
"Comment": "v1.1-14-g21439ef",
|
||||
"Rev": "21439ef4d70ba4f3e2a5ed9249e7b03af4019b40"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/kr/text",
|
||||
"Rev": "bb797dc4fb8320488f47bf11de07a733d7233e1f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mattn/go-colorable",
|
||||
"Rev": "9cbef7c35391cca05f15f8181dc0b18bc9736dbb"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mattn/go-isatty",
|
||||
"Rev": "56b76bdf51f7708750eac80fa38b952bb9f32639"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil",
|
||||
"Rev": "d0c3fe89de86839aecf2e0579c40ba3bb336a453"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/client_golang/prometheus",
|
||||
"Comment": "0.7.0-68-g67994f1",
|
||||
"Rev": "67994f177195311c3ea3d4407ed0175e34a4256f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/client_model/go",
|
||||
"Comment": "model-0.0.2-12-gfa8ad6f",
|
||||
"Rev": "fa8ad6fec33561be4280a8f0514318c79d7f6cb6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/common/expfmt",
|
||||
"Rev": "dba5e39d4516169e840def50e507ef5f21b985f9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg",
|
||||
"Rev": "dba5e39d4516169e840def50e507ef5f21b985f9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/common/model",
|
||||
"Rev": "dba5e39d4516169e840def50e507ef5f21b985f9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/procfs",
|
||||
"Rev": "406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/sys/unix",
|
||||
"Rev": "eb2c74142fd19a79b3f237334c7384d5167b1b46"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/yaml.v2",
|
||||
"Rev": "f7716cbe52baa25d2e9b0d0da546fcf909fc16b4"
|
||||
}
|
||||
]
|
||||
}
|
5
vendor/github.com/coreos/clair/contrib/analyze-local-images/Godeps/Readme
generated
vendored
Normal file
5
vendor/github.com/coreos/clair/contrib/analyze-local-images/Godeps/Readme
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
This directory tree is generated automatically by godep.
|
||||
|
||||
Please do not edit.
|
||||
|
||||
See https://github.com/tools/godep for more information.
|
31
vendor/github.com/coreos/clair/contrib/analyze-local-images/README.md
generated
vendored
Normal file
31
vendor/github.com/coreos/clair/contrib/analyze-local-images/README.md
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
# Analyze local images
|
||||
|
||||
This is a basic tool that allow you to analyze your local Docker images with Clair.
|
||||
It is intended to let everyone discover Clair and offer awareness around containers' security.
|
||||
There are absolutely no guarantees and it only uses a minimal subset of Clair's features.
|
||||
|
||||
## Install
|
||||
|
||||
To install the tool, simply run the following command, with a proper Go environment:
|
||||
|
||||
go get -u github.com/coreos/clair/contrib/analyze-local-images
|
||||
|
||||
You also need a working Clair instance. To learn how to run Clair, take a look at the [README](https://github.com/coreos/clair/blob/master/README.md). You then should wait for its initial vulnerability update to complete, which may take some time.
|
||||
|
||||
# Usage
|
||||
|
||||
If you are running Clair locally (ie. compiled or local Docker),
|
||||
|
||||
```
|
||||
analyze-local-images <Docker Image ID>
|
||||
```
|
||||
|
||||
Or, If you run Clair remotely (ie. boot2docker),
|
||||
|
||||
```
|
||||
analyze-local-images -endpoint "http://<CLAIR-IP-ADDRESS>:6060" -my-address "<MY-IP-ADDRESS>" <Docker Image ID>
|
||||
```
|
||||
|
||||
Clair needs access to the image files. If you run Clair locally, this tool will store the files in the system's temporary folder and Clair will find them there. It means if Clair is running in Docker, the host's temporary folder must be mounted in the Clair's container. If you run Clair remotely, this tool will run a small HTTP server to let Clair downloading them. It listens on the port 9279 and allows a single host: Clair's IP address, extracted from the `-endpoint` parameter. The `my-address` parameters defines the IP address of the HTTP server that Clair will use to download the images. With boot2docker, these parameters would be `-endpoint "http://192.168.99.100:6060" -my-address "192.168.99.1"`.
|
||||
|
||||
As it runs an HTTP server and not an HTTP**S** one, be sure to **not** expose sensitive data and container images.
|
453
vendor/github.com/coreos/clair/contrib/analyze-local-images/main.go
generated
vendored
Normal file
453
vendor/github.com/coreos/clair/contrib/analyze-local-images/main.go
generated
vendored
Normal file
@ -0,0 +1,453 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/clair/api/v1"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
"github.com/fatih/color"
|
||||
"github.com/kr/text"
|
||||
)
|
||||
|
||||
const (
|
||||
postLayerURI = "/v1/layers"
|
||||
getLayerFeaturesURI = "/v1/layers/%s?vulnerabilities"
|
||||
httpPort = 9279
|
||||
)
|
||||
|
||||
var (
|
||||
flagEndpoint = flag.String("endpoint", "http://127.0.0.1:6060", "Address to Clair API")
|
||||
flagMyAddress = flag.String("my-address", "127.0.0.1", "Address from the point of view of Clair")
|
||||
flagMinimumSeverity = flag.String("minimum-severity", "Negligible", "Minimum severity of vulnerabilities to show (Unknown, Negligible, Low, Medium, High, Critical, Defcon1)")
|
||||
flagColorMode = flag.String("color", "auto", "Colorize the output (always, auto, never)")
|
||||
)
|
||||
|
||||
type vulnerabilityInfo struct {
|
||||
vulnerability v1.Vulnerability
|
||||
feature v1.Feature
|
||||
severity types.Priority
|
||||
}
|
||||
|
||||
type By func(v1, v2 vulnerabilityInfo) bool
|
||||
|
||||
func (by By) Sort(vulnerabilities []vulnerabilityInfo) {
|
||||
ps := &sorter{
|
||||
vulnerabilities: vulnerabilities,
|
||||
by: by,
|
||||
}
|
||||
sort.Sort(ps)
|
||||
}
|
||||
|
||||
type sorter struct {
|
||||
vulnerabilities []vulnerabilityInfo
|
||||
by func(v1, v2 vulnerabilityInfo) bool
|
||||
}
|
||||
|
||||
func (s *sorter) Len() int {
|
||||
return len(s.vulnerabilities)
|
||||
}
|
||||
|
||||
func (s *sorter) Swap(i, j int) {
|
||||
s.vulnerabilities[i], s.vulnerabilities[j] = s.vulnerabilities[j], s.vulnerabilities[i]
|
||||
}
|
||||
|
||||
func (s *sorter) Less(i, j int) bool {
|
||||
return s.by(s.vulnerabilities[i], s.vulnerabilities[j])
|
||||
}
|
||||
|
||||
func main() {
|
||||
os.Exit(intMain())
|
||||
}
|
||||
|
||||
func intMain() int {
|
||||
// Parse command-line arguments.
|
||||
flag.Usage = func() {
|
||||
fmt.Fprintf(os.Stderr, "Usage: %s [options] image-id\n\nOptions:\n", os.Args[0])
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
flag.Parse()
|
||||
|
||||
if len(flag.Args()) != 1 {
|
||||
flag.Usage()
|
||||
return 1
|
||||
}
|
||||
imageName := flag.Args()[0]
|
||||
|
||||
minSeverity := types.Priority(*flagMinimumSeverity)
|
||||
if !minSeverity.IsValid() {
|
||||
flag.Usage()
|
||||
return 1
|
||||
}
|
||||
|
||||
if *flagColorMode == "never" {
|
||||
color.NoColor = true
|
||||
} else if *flagColorMode == "always" {
|
||||
color.NoColor = false
|
||||
}
|
||||
|
||||
// Create a temporary folder.
|
||||
tmpPath, err := ioutil.TempDir("", "analyze-local-image-")
|
||||
if err != nil {
|
||||
log.Fatalf("Could not create temporary folder: %s", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpPath)
|
||||
|
||||
// Intercept SIGINT / SIGKILl signals.
|
||||
interrupt := make(chan os.Signal)
|
||||
signal.Notify(interrupt, os.Interrupt, os.Kill)
|
||||
|
||||
// Analyze the image.
|
||||
analyzeCh := make(chan error, 1)
|
||||
go func() {
|
||||
analyzeCh <- AnalyzeLocalImage(imageName, minSeverity, *flagEndpoint, *flagMyAddress, tmpPath)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-interrupt:
|
||||
return 130
|
||||
case err := <-analyzeCh:
|
||||
if err != nil {
|
||||
log.Print(err)
|
||||
return 1
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func AnalyzeLocalImage(imageName string, minSeverity types.Priority, endpoint, myAddress, tmpPath string) error {
|
||||
// Save image.
|
||||
log.Printf("Saving %s to local disk (this may take some time)", imageName)
|
||||
err := save(imageName, tmpPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not save image: %s", err)
|
||||
}
|
||||
|
||||
// Retrieve history.
|
||||
log.Println("Retrieving image history")
|
||||
layerIDs, err := historyFromManifest(tmpPath)
|
||||
if err != nil {
|
||||
layerIDs, err = historyFromCommand(imageName)
|
||||
}
|
||||
if err != nil || len(layerIDs) == 0 {
|
||||
return fmt.Errorf("Could not get image's history: %s", err)
|
||||
}
|
||||
|
||||
// Setup a simple HTTP server if Clair is not local.
|
||||
if !strings.Contains(endpoint, "127.0.0.1") && !strings.Contains(endpoint, "localhost") {
|
||||
allowedHost := strings.TrimPrefix(endpoint, "http://")
|
||||
portIndex := strings.Index(allowedHost, ":")
|
||||
if portIndex >= 0 {
|
||||
allowedHost = allowedHost[:portIndex]
|
||||
}
|
||||
|
||||
log.Printf("Setting up HTTP server (allowing: %s)\n", allowedHost)
|
||||
|
||||
ch := make(chan error)
|
||||
go listenHTTP(tmpPath, allowedHost, ch)
|
||||
select {
|
||||
case err := <-ch:
|
||||
return fmt.Errorf("An error occured when starting HTTP server: %s", err)
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
break
|
||||
}
|
||||
|
||||
tmpPath = "http://" + myAddress + ":" + strconv.Itoa(httpPort)
|
||||
}
|
||||
|
||||
// Analyze layers.
|
||||
log.Printf("Analyzing %d layers... \n", len(layerIDs))
|
||||
for i := 0; i < len(layerIDs); i++ {
|
||||
log.Printf("Analyzing %s\n", layerIDs[i])
|
||||
|
||||
if i > 0 {
|
||||
err = analyzeLayer(endpoint, tmpPath+"/"+layerIDs[i]+"/layer.tar", layerIDs[i], layerIDs[i-1])
|
||||
} else {
|
||||
err = analyzeLayer(endpoint, tmpPath+"/"+layerIDs[i]+"/layer.tar", layerIDs[i], "")
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not analyze layer: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Get vulnerabilities.
|
||||
log.Println("Retrieving image's vulnerabilities")
|
||||
layer, err := getLayer(endpoint, layerIDs[len(layerIDs)-1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not get layer information: %s", err)
|
||||
}
|
||||
|
||||
// Print report.
|
||||
fmt.Printf("Clair report for image %s (%s)\n", imageName, time.Now().UTC())
|
||||
|
||||
if len(layer.Features) == 0 {
|
||||
fmt.Printf("%s No features have been detected in the image. This usually means that the image isn't supported by Clair.\n", color.YellowString("NOTE:"))
|
||||
return nil
|
||||
}
|
||||
|
||||
isSafe := true
|
||||
hasVisibleVulnerabilities := false
|
||||
|
||||
var vulnerabilities = make([]vulnerabilityInfo, 0)
|
||||
for _, feature := range layer.Features {
|
||||
if len(feature.Vulnerabilities) > 0 {
|
||||
for _, vulnerability := range feature.Vulnerabilities {
|
||||
severity := types.Priority(vulnerability.Severity)
|
||||
isSafe = false
|
||||
|
||||
if minSeverity.Compare(severity) > 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
hasVisibleVulnerabilities = true
|
||||
vulnerabilities = append(vulnerabilities, vulnerabilityInfo{vulnerability, feature, severity})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sort vulnerabilitiy by severity.
|
||||
priority := func(v1, v2 vulnerabilityInfo) bool {
|
||||
return v1.severity.Compare(v2.severity) >= 0
|
||||
}
|
||||
|
||||
By(priority).Sort(vulnerabilities)
|
||||
|
||||
for _, vulnerabilityInfo := range vulnerabilities {
|
||||
vulnerability := vulnerabilityInfo.vulnerability
|
||||
feature := vulnerabilityInfo.feature
|
||||
severity := vulnerabilityInfo.severity
|
||||
|
||||
fmt.Printf("%s (%s)\n", vulnerability.Name, coloredSeverity(severity))
|
||||
|
||||
if vulnerability.Description != "" {
|
||||
fmt.Printf("%s\n\n", text.Indent(text.Wrap(vulnerability.Description, 80), "\t"))
|
||||
}
|
||||
|
||||
fmt.Printf("\tPackage: %s @ %s\n", feature.Name, feature.Version)
|
||||
|
||||
if vulnerability.FixedBy != "" {
|
||||
fmt.Printf("\tFixed version: %s\n", vulnerability.FixedBy)
|
||||
}
|
||||
|
||||
if vulnerability.Link != "" {
|
||||
fmt.Printf("\tLink: %s\n", vulnerability.Link)
|
||||
}
|
||||
|
||||
fmt.Printf("\tLayer: %s\n", feature.AddedBy)
|
||||
fmt.Println("")
|
||||
}
|
||||
|
||||
if isSafe {
|
||||
fmt.Printf("%s No vulnerabilities were detected in your image\n", color.GreenString("Success!"))
|
||||
} else if !hasVisibleVulnerabilities {
|
||||
fmt.Printf("%s No vulnerabilities matching the minimum severity level were detected in your image\n", color.YellowString("NOTE:"))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func save(imageName, path string) error {
|
||||
var stderr bytes.Buffer
|
||||
save := exec.Command("docker", "save", imageName)
|
||||
save.Stderr = &stderr
|
||||
extract := exec.Command("tar", "xf", "-", "-C"+path)
|
||||
extract.Stderr = &stderr
|
||||
pipe, err := extract.StdinPipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
save.Stdout = pipe
|
||||
|
||||
err = extract.Start()
|
||||
if err != nil {
|
||||
return errors.New(stderr.String())
|
||||
}
|
||||
err = save.Run()
|
||||
if err != nil {
|
||||
return errors.New(stderr.String())
|
||||
}
|
||||
err = pipe.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = extract.Wait()
|
||||
if err != nil {
|
||||
return errors.New(stderr.String())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func historyFromManifest(path string) ([]string, error) {
|
||||
mf, err := os.Open(path + "/manifest.json")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer mf.Close()
|
||||
|
||||
// https://github.com/docker/docker/blob/master/image/tarexport/tarexport.go#L17
|
||||
type manifestItem struct {
|
||||
Config string
|
||||
RepoTags []string
|
||||
Layers []string
|
||||
}
|
||||
|
||||
var manifest []manifestItem
|
||||
if err = json.NewDecoder(mf).Decode(&manifest); err != nil {
|
||||
return nil, err
|
||||
} else if len(manifest) != 1 {
|
||||
return nil, err
|
||||
}
|
||||
var layers []string
|
||||
for _, layer := range manifest[0].Layers {
|
||||
layers = append(layers, strings.TrimSuffix(layer, "/layer.tar"))
|
||||
}
|
||||
return layers, nil
|
||||
}
|
||||
|
||||
func historyFromCommand(imageName string) ([]string, error) {
|
||||
var stderr bytes.Buffer
|
||||
cmd := exec.Command("docker", "history", "-q", "--no-trunc", imageName)
|
||||
cmd.Stderr = &stderr
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
return []string{}, errors.New(stderr.String())
|
||||
}
|
||||
|
||||
var layers []string
|
||||
scanner := bufio.NewScanner(stdout)
|
||||
for scanner.Scan() {
|
||||
layers = append(layers, scanner.Text())
|
||||
}
|
||||
|
||||
for i := len(layers)/2 - 1; i >= 0; i-- {
|
||||
opp := len(layers) - 1 - i
|
||||
layers[i], layers[opp] = layers[opp], layers[i]
|
||||
}
|
||||
|
||||
return layers, nil
|
||||
}
|
||||
|
||||
func listenHTTP(path, allowedHost string, ch chan error) {
|
||||
restrictedFileServer := func(path, allowedHost string) http.Handler {
|
||||
fc := func(w http.ResponseWriter, r *http.Request) {
|
||||
host, _, err := net.SplitHostPort(r.RemoteAddr)
|
||||
if err == nil && strings.EqualFold(host, allowedHost) {
|
||||
http.FileServer(http.Dir(path)).ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(403)
|
||||
}
|
||||
return http.HandlerFunc(fc)
|
||||
}
|
||||
|
||||
ch <- http.ListenAndServe(":"+strconv.Itoa(httpPort), restrictedFileServer(path, allowedHost))
|
||||
}
|
||||
|
||||
func analyzeLayer(endpoint, path, layerName, parentLayerName string) error {
|
||||
payload := v1.LayerEnvelope{
|
||||
Layer: &v1.Layer{
|
||||
Name: layerName,
|
||||
Path: path,
|
||||
ParentName: parentLayerName,
|
||||
Format: "Docker",
|
||||
},
|
||||
}
|
||||
|
||||
jsonPayload, err := json.Marshal(payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
request, err := http.NewRequest("POST", endpoint+postLayerURI, bytes.NewBuffer(jsonPayload))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
request.Header.Set("Content-Type", "application/json")
|
||||
|
||||
client := &http.Client{}
|
||||
response, err := client.Do(request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
if response.StatusCode != 201 {
|
||||
body, _ := ioutil.ReadAll(response.Body)
|
||||
return fmt.Errorf("Got response %d with message %s", response.StatusCode, string(body))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getLayer(endpoint, layerID string) (v1.Layer, error) {
|
||||
response, err := http.Get(endpoint + fmt.Sprintf(getLayerFeaturesURI, layerID))
|
||||
if err != nil {
|
||||
return v1.Layer{}, err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
if response.StatusCode != 200 {
|
||||
body, _ := ioutil.ReadAll(response.Body)
|
||||
err := fmt.Errorf("Got response %d with message %s", response.StatusCode, string(body))
|
||||
return v1.Layer{}, err
|
||||
}
|
||||
|
||||
var apiResponse v1.LayerEnvelope
|
||||
if err = json.NewDecoder(response.Body).Decode(&apiResponse); err != nil {
|
||||
return v1.Layer{}, err
|
||||
} else if apiResponse.Error != nil {
|
||||
return v1.Layer{}, errors.New(apiResponse.Error.Message)
|
||||
}
|
||||
|
||||
return *apiResponse.Layer, nil
|
||||
}
|
||||
|
||||
func coloredSeverity(severity types.Priority) string {
|
||||
red := color.New(color.FgRed).SprintFunc()
|
||||
yellow := color.New(color.FgYellow).SprintFunc()
|
||||
white := color.New(color.FgWhite).SprintFunc()
|
||||
|
||||
switch severity {
|
||||
case types.High, types.Critical:
|
||||
return red(severity)
|
||||
case types.Medium:
|
||||
return yellow(severity)
|
||||
default:
|
||||
return white(severity)
|
||||
}
|
||||
}
|
2388
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
Normal file
2388
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
292
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
Normal file
292
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
Normal file
@ -0,0 +1,292 @@
|
||||
// Package quantile computes approximate quantiles over an unbounded data
|
||||
// stream within low memory and CPU bounds.
|
||||
//
|
||||
// A small amount of accuracy is traded to achieve the above properties.
|
||||
//
|
||||
// Multiple streams can be merged before calling Query to generate a single set
|
||||
// of results. This is meaningful when the streams represent the same type of
|
||||
// data. See Merge and Samples.
|
||||
//
|
||||
// For more detailed information about the algorithm used, see:
|
||||
//
|
||||
// Effective Computation of Biased Quantiles over Data Streams
|
||||
//
|
||||
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
|
||||
package quantile
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Sample holds an observed value and meta information for compression. JSON
|
||||
// tags have been added for convenience.
|
||||
type Sample struct {
|
||||
Value float64 `json:",string"`
|
||||
Width float64 `json:",string"`
|
||||
Delta float64 `json:",string"`
|
||||
}
|
||||
|
||||
// Samples represents a slice of samples. It implements sort.Interface.
|
||||
type Samples []Sample
|
||||
|
||||
func (a Samples) Len() int { return len(a) }
|
||||
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
||||
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
type invariant func(s *stream, r float64) float64
|
||||
|
||||
// NewLowBiased returns an initialized Stream for low-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the lower ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewLowBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * r
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewHighBiased returns an initialized Stream for high-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the higher ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewHighBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * (s.n - r)
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewTargeted returns an initialized Stream concerned with a particular set of
|
||||
// quantile values that are supplied a priori. Knowing these a priori reduces
|
||||
// space and computation time. The targets map maps the desired quantiles to
|
||||
// their absolute errors, i.e. the true quantile of a value returned by a query
|
||||
// is guaranteed to be within (Quantile±Epsilon).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
||||
func NewTargeted(targets map[float64]float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
var m = math.MaxFloat64
|
||||
var f float64
|
||||
for quantile, epsilon := range targets {
|
||||
if quantile*s.n <= r {
|
||||
f = (2 * epsilon * r) / quantile
|
||||
} else {
|
||||
f = (2 * epsilon * (s.n - r)) / (1 - quantile)
|
||||
}
|
||||
if f < m {
|
||||
m = f
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
||||
// design. Take care when using across multiple goroutines.
|
||||
type Stream struct {
|
||||
*stream
|
||||
b Samples
|
||||
sorted bool
|
||||
}
|
||||
|
||||
func newStream(ƒ invariant) *Stream {
|
||||
x := &stream{ƒ: ƒ}
|
||||
return &Stream{x, make(Samples, 0, 500), true}
|
||||
}
|
||||
|
||||
// Insert inserts v into the stream.
|
||||
func (s *Stream) Insert(v float64) {
|
||||
s.insert(Sample{Value: v, Width: 1})
|
||||
}
|
||||
|
||||
func (s *Stream) insert(sample Sample) {
|
||||
s.b = append(s.b, sample)
|
||||
s.sorted = false
|
||||
if len(s.b) == cap(s.b) {
|
||||
s.flush()
|
||||
}
|
||||
}
|
||||
|
||||
// Query returns the computed qth percentiles value. If s was created with
|
||||
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
|
||||
// will return an unspecified result.
|
||||
func (s *Stream) Query(q float64) float64 {
|
||||
if !s.flushed() {
|
||||
// Fast path when there hasn't been enough data for a flush;
|
||||
// this also yields better accuracy for small sets of data.
|
||||
l := len(s.b)
|
||||
if l == 0 {
|
||||
return 0
|
||||
}
|
||||
i := int(float64(l) * q)
|
||||
if i > 0 {
|
||||
i -= 1
|
||||
}
|
||||
s.maybeSort()
|
||||
return s.b[i].Value
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.query(q)
|
||||
}
|
||||
|
||||
// Merge merges samples into the underlying streams samples. This is handy when
|
||||
// merging multiple streams from separate threads, database shards, etc.
|
||||
//
|
||||
// ATTENTION: This method is broken and does not yield correct results. The
|
||||
// underlying algorithm is not capable of merging streams correctly.
|
||||
func (s *Stream) Merge(samples Samples) {
|
||||
sort.Sort(samples)
|
||||
s.stream.merge(samples)
|
||||
}
|
||||
|
||||
// Reset reinitializes and clears the list reusing the samples buffer memory.
|
||||
func (s *Stream) Reset() {
|
||||
s.stream.reset()
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
// Samples returns stream samples held by s.
|
||||
func (s *Stream) Samples() Samples {
|
||||
if !s.flushed() {
|
||||
return s.b
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.samples()
|
||||
}
|
||||
|
||||
// Count returns the total number of samples observed in the stream
|
||||
// since initialization.
|
||||
func (s *Stream) Count() int {
|
||||
return len(s.b) + s.stream.count()
|
||||
}
|
||||
|
||||
func (s *Stream) flush() {
|
||||
s.maybeSort()
|
||||
s.stream.merge(s.b)
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
func (s *Stream) maybeSort() {
|
||||
if !s.sorted {
|
||||
s.sorted = true
|
||||
sort.Sort(s.b)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Stream) flushed() bool {
|
||||
return len(s.stream.l) > 0
|
||||
}
|
||||
|
||||
type stream struct {
|
||||
n float64
|
||||
l []Sample
|
||||
ƒ invariant
|
||||
}
|
||||
|
||||
func (s *stream) reset() {
|
||||
s.l = s.l[:0]
|
||||
s.n = 0
|
||||
}
|
||||
|
||||
func (s *stream) insert(v float64) {
|
||||
s.merge(Samples{{v, 1, 0}})
|
||||
}
|
||||
|
||||
func (s *stream) merge(samples Samples) {
|
||||
// TODO(beorn7): This tries to merge not only individual samples, but
|
||||
// whole summaries. The paper doesn't mention merging summaries at
|
||||
// all. Unittests show that the merging is inaccurate. Find out how to
|
||||
// do merges properly.
|
||||
var r float64
|
||||
i := 0
|
||||
for _, sample := range samples {
|
||||
for ; i < len(s.l); i++ {
|
||||
c := s.l[i]
|
||||
if c.Value > sample.Value {
|
||||
// Insert at position i.
|
||||
s.l = append(s.l, Sample{})
|
||||
copy(s.l[i+1:], s.l[i:])
|
||||
s.l[i] = Sample{
|
||||
sample.Value,
|
||||
sample.Width,
|
||||
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
|
||||
// TODO(beorn7): How to calculate delta correctly?
|
||||
}
|
||||
i++
|
||||
goto inserted
|
||||
}
|
||||
r += c.Width
|
||||
}
|
||||
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
|
||||
i++
|
||||
inserted:
|
||||
s.n += sample.Width
|
||||
r += sample.Width
|
||||
}
|
||||
s.compress()
|
||||
}
|
||||
|
||||
func (s *stream) count() int {
|
||||
return int(s.n)
|
||||
}
|
||||
|
||||
func (s *stream) query(q float64) float64 {
|
||||
t := math.Ceil(q * s.n)
|
||||
t += math.Ceil(s.ƒ(s, t) / 2)
|
||||
p := s.l[0]
|
||||
var r float64
|
||||
for _, c := range s.l[1:] {
|
||||
r += p.Width
|
||||
if r+c.Width+c.Delta > t {
|
||||
return p.Value
|
||||
}
|
||||
p = c
|
||||
}
|
||||
return p.Value
|
||||
}
|
||||
|
||||
func (s *stream) compress() {
|
||||
if len(s.l) < 2 {
|
||||
return
|
||||
}
|
||||
x := s.l[len(s.l)-1]
|
||||
xi := len(s.l) - 1
|
||||
r := s.n - 1 - x.Width
|
||||
|
||||
for i := len(s.l) - 2; i >= 0; i-- {
|
||||
c := s.l[i]
|
||||
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
|
||||
x.Width += c.Width
|
||||
s.l[xi] = x
|
||||
// Remove element at i.
|
||||
copy(s.l[i:], s.l[i+1:])
|
||||
s.l = s.l[:len(s.l)-1]
|
||||
xi -= 1
|
||||
} else {
|
||||
x = c
|
||||
xi = i
|
||||
}
|
||||
r -= c.Width
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stream) samples() Samples {
|
||||
samples := make(Samples, len(s.l))
|
||||
copy(samples, s.l)
|
||||
return samples
|
||||
}
|
202
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/LICENSE
generated
vendored
Normal file
202
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
5
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/NOTICE
generated
vendored
Normal file
5
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/NOTICE
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
CoreOS Project
|
||||
Copyright 2015 CoreOS, Inc
|
||||
|
||||
This product includes software developed at CoreOS, Inc.
|
||||
(http://www.coreos.com/).
|
64
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/api/context/context.go
generated
vendored
Normal file
64
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/api/context/context.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package context
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"github.com/julienschmidt/httprouter"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/coreos/clair/config"
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
log = capnslog.NewPackageLogger("github.com/coreos/clair", "api")
|
||||
|
||||
promResponseDurationMilliseconds = prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Name: "clair_api_response_duration_milliseconds",
|
||||
Help: "The duration of time it takes to receieve and write a response to an API request",
|
||||
Buckets: prometheus.ExponentialBuckets(9.375, 2, 10),
|
||||
}, []string{"route", "code"})
|
||||
)
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(promResponseDurationMilliseconds)
|
||||
}
|
||||
|
||||
type Handler func(http.ResponseWriter, *http.Request, httprouter.Params, *RouteContext) (route string, status int)
|
||||
|
||||
func HTTPHandler(handler Handler, ctx *RouteContext) httprouter.Handle {
|
||||
return func(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
start := time.Now()
|
||||
route, status := handler(w, r, p, ctx)
|
||||
statusStr := strconv.Itoa(status)
|
||||
if status == 0 {
|
||||
statusStr = "???"
|
||||
}
|
||||
utils.PrometheusObserveTimeMilliseconds(promResponseDurationMilliseconds.WithLabelValues(route, statusStr), start)
|
||||
|
||||
log.Infof("%s \"%s %s\" %s (%s)", r.RemoteAddr, r.Method, r.RequestURI, statusStr, time.Since(start))
|
||||
}
|
||||
}
|
||||
|
||||
type RouteContext struct {
|
||||
Store database.Datastore
|
||||
Config *config.APIConfig
|
||||
}
|
633
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/api/v1/README.md
generated
vendored
Normal file
633
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/api/v1/README.md
generated
vendored
Normal file
@ -0,0 +1,633 @@
|
||||
# Clair v1 API
|
||||
|
||||
- [Error Handling](#error-handling)
|
||||
- [Layers](#layers)
|
||||
- [POST](#post-layers)
|
||||
- [GET](#get-layersname)
|
||||
- [DELETE](#delete-layersname)
|
||||
- [Namespaces](#namespaces)
|
||||
- [GET](#get-namespaces)
|
||||
- [Vulnerabilities](#vulnerabilities)
|
||||
- [List](#get-namespacesnsnamevulnerabilities)
|
||||
- [POST](#post-namespacesnamevulnerabilities)
|
||||
- [GET](#get-namespacesnsnamevulnerabilitiesvulnname)
|
||||
- [PUT](#put-namespacesnsnamevulnerabilitiesvulnname)
|
||||
- [DELETE](#delete-namespacesnsnamevulnerabilitiesvulnname)
|
||||
- [Fixes](#fixes)
|
||||
- [GET](#get-namespacesnsnamevulnerabilitiesvulnnamefixes)
|
||||
- [PUT](#put-namespacesnsnamevulnerabilitiesvulnnamefixesfeaturename)
|
||||
- [DELETE](#delete-namespacesnsnamevulnerabilitiesvulnnamefixesfeaturename)
|
||||
- [Notifications](#notifications)
|
||||
- [GET](#get-notificationsname)
|
||||
- [DELETE](#delete-notificationname)
|
||||
|
||||
## Error Handling
|
||||
|
||||
###### Description
|
||||
|
||||
Every route can optionally provide an `Error` property on the response object.
|
||||
The HTTP status code of the response should indicate what type of failure occurred and how the client should reaction.
|
||||
|
||||
###### Client Retry Behavior
|
||||
|
||||
| Code | Name | Retry Behavior |
|
||||
|------|-----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| 400 | Bad Request | The body of the request invalid. The request either must be changed before being retried or depends on another request being processed before it. |
|
||||
| 404 | Not Found | The requested resource could not be found. The request must be changed before being retried. |
|
||||
| 422 | Unprocessable Entity | The request body is valid, but unsupported. This request should never be retried. |
|
||||
| 500 | Internal Server Error | The server encountered an error while processing the request. This request should be retried without change. |
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 400 Bad Request
|
||||
Content-Type: application/json;charset=utf-8
|
||||
Server: clair
|
||||
|
||||
{
|
||||
"Error": {
|
||||
"Message": "example error message"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Layers
|
||||
|
||||
#### POST /layers
|
||||
|
||||
###### Description
|
||||
|
||||
The POST route for the Layers resource performs the indexing of a Layer from the provided path and displays the provided Layer with an updated `IndexByVersion` property.
|
||||
This request blocks for the entire duration of the downloading and indexing of the layer.
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
POST http://localhost:6060/v1/layers HTTP/1.1
|
||||
|
||||
{
|
||||
"Layer": {
|
||||
"Name": "523ef1d23f222195488575f52a39c729c76a8c5630c9a194139cb246fb212da6",
|
||||
"Path": "/mnt/layers/523ef1d23f222195488575f52a39c729c76a8c5630c9a194139cb246fb212da6/layer.tar",
|
||||
"ParentName": "140f9bdfeb9784cf8730e9dab5dd12fbd704151cf555ac8cae650451794e5ac2",
|
||||
"Format": "Docker"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 201 Created
|
||||
Content-Type: application/json;charset=utf-8
|
||||
Server: clair
|
||||
|
||||
{
|
||||
"Layer": {
|
||||
"Name": "523ef1d23f222195488575f52a39c729c76a8c5630c9a194139cb246fb212da6",
|
||||
"Path": "/mnt/layers/523ef1d23f222195488575f52a39c729c76a8c5630c9a194139cb246fb212da6/layer.tar",
|
||||
"ParentName": "140f9bdfeb9784cf8730e9dab5dd12fbd704151cf555ac8cae650451794e5ac2",
|
||||
"Format": "Docker",
|
||||
"IndexedByVersion": 1
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### GET /layers/`:name`
|
||||
|
||||
###### Description
|
||||
|
||||
The GET route for the Layers resource displays a Layer and optionally all of its features and vulnerabilities.
|
||||
|
||||
###### Query Parameters
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
|-----------------|------|----------|-------------------------------------------------------------------------------|
|
||||
| features | bool | optional | Displays the list of features indexed in this layer and all of its parents. |
|
||||
| vulnerabilities | bool | optional | Displays the list of vulnerabilities along with the features described above. |
|
||||
|
||||
###### Example Request
|
||||
|
||||
```
|
||||
GET http://localhost:6060/v1/layers/17675ec01494d651e1ccf81dc9cf63959ebfeed4f978fddb1666b6ead008ed52?features&vulnerabilities HTTP/1.1
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json;charset=utf-8
|
||||
Server: clair
|
||||
|
||||
{
|
||||
"Layer": {
|
||||
"Name": "17675ec01494d651e1ccf81dc9cf63959ebfeed4f978fddb1666b6ead008ed52",
|
||||
"NamespaceName": "debian:8",
|
||||
"ParentName": "140f9bdfeb9784cf8730e9dab5dd12fbd704151cf555ac8cae650451794e5ac2",
|
||||
"IndexedByVersion": 1,
|
||||
"Features": [
|
||||
{
|
||||
"Name": "coreutils",
|
||||
"NamespaceName": "debian:8",
|
||||
"Version": "8.23-4",
|
||||
"Vulnerabilities": [
|
||||
{
|
||||
"Name": "CVE-2014-9471",
|
||||
"NamespaceName": "debian:8",
|
||||
"Description": "The parse_datetime function in GNU coreutils allows remote attackers to cause a denial of service (crash) or possibly execute arbitrary code via a crafted date string, as demonstrated by the \"--date=TZ=\"123\"345\" @1\" string to the touch or date command.",
|
||||
"Link": "https://security-tracker.debian.org/tracker/CVE-2014-9471",
|
||||
"Severity": "Low",
|
||||
"FixedBy": "9.23-5"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### DELETE /layers/`:name`
|
||||
|
||||
###### Description
|
||||
|
||||
The DELETE route for the Layers resource removes a Layer and all of its children from the database.
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
DELETE http://localhost:6060/v1/layers/17675ec01494d651e1ccf81dc9cf63959ebfeed4f978fddb1666b6ead008ed52 HTTP/1.1
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Server: clair
|
||||
```
|
||||
|
||||
|
||||
## Namespaces
|
||||
|
||||
#### GET /namespaces
|
||||
|
||||
###### Description
|
||||
|
||||
The GET route for the Namespaces resource displays a list of namespaces currently being managed.
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
GET http://localhost:6060/v1/namespaces HTTP/1.1
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json;charset=utf-8
|
||||
Server: clair
|
||||
|
||||
{
|
||||
"Namespaces": [
|
||||
{ "Name": "debian:8" },
|
||||
{ "Name": "debian:9" }
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Vulnerabilities
|
||||
|
||||
#### GET /namespaces/`:nsName`/vulnerabilities
|
||||
|
||||
###### Description
|
||||
|
||||
The GET route for the Vulnerabilities resource displays the vulnerabilities data for a given namespace.
|
||||
|
||||
###### Query Parameters
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
|---------|------|----------|------------------------------------------------------------|
|
||||
| limit | int | required | Limits the amount of the vunlerabilities data for a given namespace. |
|
||||
| page | int | required | Displays the specific page of the vunlerabilities data for a given namespace. |
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
GET http://localhost:6060/v1/namespaces/debian%3A8/vulnerabilities?limit=2 HTTP/1.1
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json;charset=utf-8
|
||||
Server: clair
|
||||
|
||||
{
|
||||
"Vulnerabilities": [
|
||||
{
|
||||
"Name": "CVE-1999-1332",
|
||||
"NamespaceName": "debian:8",
|
||||
"Description": "gzexe in the gzip package on Red Hat Linux 5.0 and earlier allows local users to overwrite files of other users via a symlink attack on a temporary file.",
|
||||
"Link": "https://security-tracker.debian.org/tracker/CVE-1999-1332",
|
||||
"Severity": "Low"
|
||||
},
|
||||
{
|
||||
"Name": "CVE-1999-1572",
|
||||
"NamespaceName": "debian:8",
|
||||
"Description": "cpio on FreeBSD 2.1.0, Debian GNU/Linux 3.0, and possibly other operating systems, uses a 0 umask when creating files using the -O (archive) or -F options, which creates the files with mode 0666 and allows local users to read or overwrite those files.",
|
||||
"Link": "https://security-tracker.debian.org/tracker/CVE-1999-1572",
|
||||
"Severity": "Low",
|
||||
"Metadata": {
|
||||
"NVD": {
|
||||
"CVSSv2": {
|
||||
"Score": 2.1,
|
||||
"Vectors": "AV:L/AC:L/Au:N/C:P/I:N"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"NextPage":"gAAAAABW1ABiOlm6KMDKYFE022bEy_IFJdm4ExxTNuJZMN0Eycn0Sut2tOH9bDB4EWGy5s6xwATUHiG-6JXXaU5U32sBs6_DmA=="
|
||||
}
|
||||
```
|
||||
|
||||
#### POST /namespaces/`:name`/vulnerabilities
|
||||
|
||||
###### Description
|
||||
|
||||
The POST route for the Vulnerabilities resource creates a new Vulnerability.
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
POST http://localhost:6060/v1/namespaces/debian%3A8/vulnerabilities HTTP/1.1
|
||||
|
||||
{
|
||||
"Vulnerability": {
|
||||
"Name": "CVE-2014-9471",
|
||||
"NamespaceName": "debian:8",
|
||||
"Link": "https://security-tracker.debian.org/tracker/CVE-2014-9471",
|
||||
"Description": "The parse_datetime function in GNU coreutils allows remote attackers to cause a denial of service (crash) or possibly execute arbitrary code via a crafted date string, as demonstrated by the \"--date=TZ=\"123\"345\" @1\" string to the touch or date command.",
|
||||
"Severity": "Low",
|
||||
"Metadata": {
|
||||
"NVD": {
|
||||
"CVSSv2": {
|
||||
"Score": 7.5,
|
||||
"Vectors": "AV:N/AC:L/Au:N/C:P/I:P"
|
||||
}
|
||||
}
|
||||
},
|
||||
"FixedIn": [
|
||||
{
|
||||
"Name": "coreutils",
|
||||
"NamespaceName": "debian:8",
|
||||
"Version": "8.23-1"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 201 Created
|
||||
Content-Type: application/json;charset=utf-8
|
||||
Server: clair
|
||||
|
||||
{
|
||||
"Vulnerability": {
|
||||
"Name": "CVE-2014-9471",
|
||||
"NamespaceName": "debian:8",
|
||||
"Link": "https://security-tracker.debian.org/tracker/CVE-2014-9471",
|
||||
"Description": "The parse_datetime function in GNU coreutils allows remote attackers to cause a denial of service (crash) or possibly execute arbitrary code via a crafted date string, as demonstrated by the \"--date=TZ=\"123\"345\" @1\" string to the touch or date command.",
|
||||
"Severity": "Low",
|
||||
"Metadata": {
|
||||
"NVD": {
|
||||
"CVSSv2": {
|
||||
"Score": 7.5,
|
||||
"Vectors": "AV:N/AC:L/Au:N/C:P/I:P"
|
||||
}
|
||||
}
|
||||
},
|
||||
"FixedIn": [
|
||||
{
|
||||
"Name": "coreutils",
|
||||
"NamespaceName": "debian:8",
|
||||
"Version": "8.23-1"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### GET /namespaces/`:nsName`/vulnerabilities/`:vulnName`
|
||||
|
||||
###### Description
|
||||
|
||||
The GET route for the Vulnerabilities resource displays the current data for a given vulnerability and optionally the features that fix it.
|
||||
|
||||
###### Query Parameters
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
|---------|------|----------|------------------------------------------------------------|
|
||||
| fixedIn | bool | optional | Displays the list of features that fix this vulnerability. |
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
GET http://localhost:6060/v1/namespaces/debian%3A8/vulnerabilities/CVE-2014-9471?fixedIn HTTP/1.1
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json;charset=utf-8
|
||||
Server: clair
|
||||
|
||||
{
|
||||
"Vulnerability": {
|
||||
"Name": "CVE-2014-9471",
|
||||
"NamespaceName": "debian:8",
|
||||
"Link": "https://security-tracker.debian.org/tracker/CVE-2014-9471",
|
||||
"Description": "The parse_datetime function in GNU coreutils allows remote attackers to cause a denial of service (crash) or possibly execute arbitrary code via a crafted date string, as demonstrated by the \"--date=TZ=\"123\"345\" @1\" string to the touch or date command.",
|
||||
"Severity": "Low",
|
||||
"Metadata": {
|
||||
"NVD": {
|
||||
"CVSSv2": {
|
||||
"Score": 7.5,
|
||||
"Vectors": "AV:N/AC:L/Au:N/C:P/I:P"
|
||||
}
|
||||
}
|
||||
},
|
||||
"FixedIn": [
|
||||
{
|
||||
"Name": "coreutils",
|
||||
"NamespaceName": "debian:8",
|
||||
"Version": "8.23-1"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### PUT /namespaces/`:nsName`/vulnerabilities/`:vulnName`
|
||||
|
||||
###### Description
|
||||
|
||||
The PUT route for the Vulnerabilities resource updates a given Vulnerability.
|
||||
The "FixedIn" property of the Vulnerability must be empty or missing.
|
||||
Fixes should be managed by the Fixes resource.
|
||||
If this vulnerability was inserted by a Fetcher, changes may be lost when the Fetcher updates.
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
PUT http://localhost:6060/v1/namespaces/debian%3A8/vulnerabilities/CVE-2014-9471
|
||||
|
||||
{
|
||||
"Vulnerability": {
|
||||
"Name": "CVE-2014-9471",
|
||||
"NamespaceName": "debian:8",
|
||||
"Link": "https://security-tracker.debian.org/tracker/CVE-2014-9471",
|
||||
"Description": "The parse_datetime function in GNU coreutils allows remote attackers to cause a denial of service (crash) or possibly execute arbitrary code via a crafted date string, as demonstrated by the \"--date=TZ=\"123\"345\" @1\" string to the touch or date command.",
|
||||
"Severity": "Low",
|
||||
"Metadata": {
|
||||
"NVD": {
|
||||
"CVSSv2": {
|
||||
"Score": 7.5,
|
||||
"Vectors": "AV:N/AC:L/Au:N/C:P/I:P"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Server: clair
|
||||
|
||||
{
|
||||
"Vulnerability": {
|
||||
"Name": "CVE-2014-9471",
|
||||
"NamespaceName": "debian:8",
|
||||
"Link": "https://security-tracker.debian.org/tracker/CVE-2014-9471",
|
||||
"Description": "The parse_datetime function in GNU coreutils allows remote attackers to cause a denial of service (crash) or possibly execute arbitrary code via a crafted date string, as demonstrated by the \"--date=TZ=\"123\"345\" @1\" string to the touch or date command.",
|
||||
"Severity": "Low",
|
||||
"Metadata": {
|
||||
"NVD": {
|
||||
"CVSSv2": {
|
||||
"Score": 7.5,
|
||||
"Vectors": "AV:N/AC:L/Au:N/C:P/I:P"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
#### DELETE /namespaces/`:nsName`/vulnerabilities/`:vulnName`
|
||||
|
||||
###### Description
|
||||
|
||||
The DELETE route for the Vulnerabilities resource deletes a given Vulnerability.
|
||||
If this vulnerability was inserted by a Fetcher, it may be re-inserted when the Fetcher updates.
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
GET http://localhost:6060/v1/namespaces/debian%3A8/vulnerabilities/CVE-2014-9471 HTTP/1.1
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Server: clair
|
||||
```
|
||||
|
||||
## Fixes
|
||||
|
||||
#### GET /namespaces/`:nsName`/vulnerabilities/`:vulnName`/fixes
|
||||
|
||||
###### Description
|
||||
|
||||
The GET route for the Fixes resource displays the list of Features that fix the given Vulnerability.
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
GET http://localhost:6060/v1/namespaces/debian%3A8/vulnerabilities/CVE-2014-9471/fixes HTTP/1.1
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json;charset=utf-8
|
||||
Server: clair
|
||||
|
||||
{
|
||||
"Features": [
|
||||
{
|
||||
"Name": "coreutils",
|
||||
"NamespaceName": "debian:8",
|
||||
"Version": "8.23-1"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
#### PUT /namespaces/`:nsName`/vulnerabilities/`:vulnName`/fixes/`:featureName`
|
||||
|
||||
###### Description
|
||||
|
||||
The PUT route for the Fixes resource updates a Feature that is the fix for a given Vulnerability.
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
PUT http://localhost:6060/v1/namespaces/debian%3A8/vulnerabilities/CVE-2014-9471/fixes/coreutils HTTP/1.1
|
||||
|
||||
{
|
||||
"Feature": {
|
||||
"Name": "coreutils",
|
||||
"NamespaceName": "debian:8",
|
||||
"Version": "4.24-9"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Server: clair
|
||||
|
||||
{
|
||||
"Feature": {
|
||||
"Name": "coreutils",
|
||||
"NamespaceName": "debian:8",
|
||||
"Version": "4.24-9"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### DELETE /namespaces/`:nsName`/vulnerabilities/`:vulnName`/fixes/`:featureName`
|
||||
|
||||
###### Description
|
||||
|
||||
The DELETE route for the Fixes resource removes a Feature as fix for the given Vulnerability.
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
DELETE http://localhost:6060/v1/namespaces/debian%3A8/vulnerabilities/CVE-2014-9471/fixes/coreutils
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Server: clair
|
||||
```
|
||||
|
||||
## Notifications
|
||||
|
||||
#### GET /notifications/`:name`
|
||||
|
||||
###### Description
|
||||
|
||||
The GET route for the Notifications resource displays a notification that a Vulnerability has been updated.
|
||||
This route supports simultaneous pagination for both the `Old` and `New` Vulnerabilities' `LayersIntroducingVulnerability` property which can be extremely long.
|
||||
|
||||
###### Query Parameters
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
|-------|--------|----------|---------------------------------------------------------------------------------------------------------------|
|
||||
| page | string | optional | Displays the specific page of the "LayersIntroducingVulnerability" property on New and Old vulnerabilities. |
|
||||
| limit | int | optional | Limits the amount of results in the "LayersIntroducingVulnerability" property on New and Old vulnerabilities. |
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
GET http://localhost:6060/v1/notifications/ec45ec87-bfc8-4129-a1c3-d2b82622175a?limit=2 HTTP/1.1
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json;charset=utf-8
|
||||
Server: clair
|
||||
|
||||
{
|
||||
"Notification": {
|
||||
"Name": "ec45ec87-bfc8-4129-a1c3-d2b82622175a",
|
||||
"Created": "1456247389",
|
||||
"Notified": "1456246708",
|
||||
"Limit": 2,
|
||||
"Page": "gAAAAABWzJaC2JCH6Apr_R1f2EkjGdibnrKOobTcYXBWl6t0Cw6Q04ENGIymB6XlZ3Zi0bYt2c-2cXe43fvsJ7ECZhZz4P8C8F9efr_SR0HPiejzQTuG0qAzeO8klogFfFjSz2peBvgP",
|
||||
"NextPage": "gAAAAABWzJaCTyr6QXP2aYsCwEZfWIkU2GkNplSMlTOhLJfiR3LorBv8QYgEIgyOvZRmHQEzJKvkI6TP2PkRczBkcD17GE89btaaKMqEX14yHDgyfQvdasW1tj3-5bBRt0esKi9ym5En",
|
||||
"New": {
|
||||
"Vulnerability": {
|
||||
"Name": "CVE-TEST",
|
||||
"NamespaceName": "debian:8",
|
||||
"Description": "New CVE",
|
||||
"Severity": "Low",
|
||||
"FixedIn": [
|
||||
{
|
||||
"Name": "grep",
|
||||
"NamespaceName": "debian:8",
|
||||
"Version": "2.25"
|
||||
}
|
||||
]
|
||||
},
|
||||
"LayersIntroducingVulnerability": [
|
||||
"3b59c795b34670618fbcace4dac7a27c5ecec156812c9e2c90d3f4be1916b12d.9673fdf7-b81a-4b3e-acf8-e551ef155449",
|
||||
"523ef1d23f222195488575f52a39c729c76a8c5630c9a194139cb246fb212da6"
|
||||
]
|
||||
},
|
||||
"Old": {
|
||||
"Vulnerability": {
|
||||
"Name": "CVE-TEST",
|
||||
"NamespaceName": "debian:8",
|
||||
"Description": "New CVE",
|
||||
"Severity": "Low",
|
||||
"FixedIn": []
|
||||
},
|
||||
"LayersIntroducingVulnerability": [
|
||||
"3b59c795b34670618fbcace4dac7a27c5ecec156812c9e2c90d3f4be1916b12d.9673fdf7-b81a-4b3e-acf8-e551ef155449",
|
||||
"523ef1d23f222195488575f52a39c729c76a8c5630c9a194139cb246fb212da6"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### DELETE /notifications/`:name`
|
||||
|
||||
###### Description
|
||||
|
||||
The delete route for the Notifications resource marks a Notification as read.
|
||||
If a notification is not marked as read, Clair will continue to notify the provided endpoints.
|
||||
The time at which this Notification was marked as read can be seen in the `Notified` property of the response GET route for Notification.
|
||||
|
||||
###### Example Request
|
||||
|
||||
```json
|
||||
DELETE http://localhost:6060/v1/notification/ec45ec87-bfc8-4129-a1c3-d2b82622175a HTTP/1.1
|
||||
```
|
||||
|
||||
###### Example Response
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Server: clair
|
||||
```
|
318
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/api/v1/models.go
generated
vendored
Normal file
318
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/api/v1/models.go
generated
vendored
Normal file
@ -0,0 +1,318 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"github.com/fernet/fernet-go"
|
||||
)
|
||||
|
||||
var log = capnslog.NewPackageLogger("github.com/coreos/clair", "v1")
|
||||
|
||||
type Error struct {
|
||||
Message string `json:"Layer`
|
||||
}
|
||||
|
||||
type Layer struct {
|
||||
Name string `json:"Name,omitempty"`
|
||||
NamespaceName string `json:"NamespaceName,omitempty"`
|
||||
Path string `json:"Path,omitempty"`
|
||||
ParentName string `json:"ParentName,omitempty"`
|
||||
Format string `json:"Format,omitempty"`
|
||||
IndexedByVersion int `json:"IndexedByVersion,omitempty"`
|
||||
Features []Feature `json:"Features,omitempty"`
|
||||
}
|
||||
|
||||
func LayerFromDatabaseModel(dbLayer database.Layer, withFeatures, withVulnerabilities bool) Layer {
|
||||
layer := Layer{
|
||||
Name: dbLayer.Name,
|
||||
IndexedByVersion: dbLayer.EngineVersion,
|
||||
}
|
||||
|
||||
if dbLayer.Parent != nil {
|
||||
layer.ParentName = dbLayer.Parent.Name
|
||||
}
|
||||
|
||||
if dbLayer.Namespace != nil {
|
||||
layer.NamespaceName = dbLayer.Namespace.Name
|
||||
}
|
||||
|
||||
if withFeatures || withVulnerabilities && dbLayer.Features != nil {
|
||||
for _, dbFeatureVersion := range dbLayer.Features {
|
||||
feature := Feature{
|
||||
Name: dbFeatureVersion.Feature.Name,
|
||||
NamespaceName: dbFeatureVersion.Feature.Namespace.Name,
|
||||
Version: dbFeatureVersion.Version.String(),
|
||||
AddedBy: dbFeatureVersion.AddedBy.Name,
|
||||
}
|
||||
|
||||
for _, dbVuln := range dbFeatureVersion.AffectedBy {
|
||||
vuln := Vulnerability{
|
||||
Name: dbVuln.Name,
|
||||
NamespaceName: dbVuln.Namespace.Name,
|
||||
Description: dbVuln.Description,
|
||||
Link: dbVuln.Link,
|
||||
Severity: string(dbVuln.Severity),
|
||||
Metadata: dbVuln.Metadata,
|
||||
}
|
||||
|
||||
if dbVuln.FixedBy != types.MaxVersion {
|
||||
vuln.FixedBy = dbVuln.FixedBy.String()
|
||||
}
|
||||
feature.Vulnerabilities = append(feature.Vulnerabilities, vuln)
|
||||
}
|
||||
layer.Features = append(layer.Features, feature)
|
||||
}
|
||||
}
|
||||
|
||||
return layer
|
||||
}
|
||||
|
||||
type Namespace struct {
|
||||
Name string `json:"Name,omitempty"`
|
||||
}
|
||||
|
||||
type Vulnerability struct {
|
||||
Name string `json:"Name,omitempty"`
|
||||
NamespaceName string `json:"NamespaceName,omitempty"`
|
||||
Description string `json:"Description,omitempty"`
|
||||
Link string `json:"Link,omitempty"`
|
||||
Severity string `json:"Severity,omitempty"`
|
||||
Metadata map[string]interface{} `json:"Metadata,omitempty"`
|
||||
FixedBy string `json:"FixedBy,omitempty"`
|
||||
FixedIn []Feature `json:"FixedIn,omitempty"`
|
||||
}
|
||||
|
||||
func (v Vulnerability) DatabaseModel() (database.Vulnerability, error) {
|
||||
severity := types.Priority(v.Severity)
|
||||
if !severity.IsValid() {
|
||||
return database.Vulnerability{}, errors.New("Invalid severity")
|
||||
}
|
||||
|
||||
var dbFeatures []database.FeatureVersion
|
||||
for _, feature := range v.FixedIn {
|
||||
dbFeature, err := feature.DatabaseModel()
|
||||
if err != nil {
|
||||
return database.Vulnerability{}, err
|
||||
}
|
||||
|
||||
dbFeatures = append(dbFeatures, dbFeature)
|
||||
}
|
||||
|
||||
return database.Vulnerability{
|
||||
Name: v.Name,
|
||||
Namespace: database.Namespace{Name: v.NamespaceName},
|
||||
Description: v.Description,
|
||||
Link: v.Link,
|
||||
Severity: severity,
|
||||
Metadata: v.Metadata,
|
||||
FixedIn: dbFeatures,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func VulnerabilityFromDatabaseModel(dbVuln database.Vulnerability, withFixedIn bool) Vulnerability {
|
||||
vuln := Vulnerability{
|
||||
Name: dbVuln.Name,
|
||||
NamespaceName: dbVuln.Namespace.Name,
|
||||
Description: dbVuln.Description,
|
||||
Link: dbVuln.Link,
|
||||
Severity: string(dbVuln.Severity),
|
||||
Metadata: dbVuln.Metadata,
|
||||
}
|
||||
|
||||
if withFixedIn {
|
||||
for _, dbFeatureVersion := range dbVuln.FixedIn {
|
||||
vuln.FixedIn = append(vuln.FixedIn, FeatureFromDatabaseModel(dbFeatureVersion))
|
||||
}
|
||||
}
|
||||
|
||||
return vuln
|
||||
}
|
||||
|
||||
type Feature struct {
|
||||
Name string `json:"Name,omitempty"`
|
||||
NamespaceName string `json:"NamespaceName,omitempty"`
|
||||
Version string `json:"Version,omitempty"`
|
||||
Vulnerabilities []Vulnerability `json:"Vulnerabilities,omitempty"`
|
||||
AddedBy string `json:"AddedBy,omitempty"`
|
||||
}
|
||||
|
||||
func FeatureFromDatabaseModel(dbFeatureVersion database.FeatureVersion) Feature {
|
||||
versionStr := dbFeatureVersion.Version.String()
|
||||
if versionStr == types.MaxVersion.String() {
|
||||
versionStr = "None"
|
||||
}
|
||||
|
||||
return Feature{
|
||||
Name: dbFeatureVersion.Feature.Name,
|
||||
NamespaceName: dbFeatureVersion.Feature.Namespace.Name,
|
||||
Version: versionStr,
|
||||
AddedBy: dbFeatureVersion.AddedBy.Name,
|
||||
}
|
||||
}
|
||||
|
||||
func (f Feature) DatabaseModel() (database.FeatureVersion, error) {
|
||||
var version types.Version
|
||||
if f.Version == "None" {
|
||||
version = types.MaxVersion
|
||||
} else {
|
||||
var err error
|
||||
version, err = types.NewVersion(f.Version)
|
||||
if err != nil {
|
||||
return database.FeatureVersion{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Name: f.Name,
|
||||
Namespace: database.Namespace{Name: f.NamespaceName},
|
||||
},
|
||||
Version: version,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type Notification struct {
|
||||
Name string `json:"Name,omitempty"`
|
||||
Created string `json:"Created,omitempty"`
|
||||
Notified string `json:"Notified,omitempty"`
|
||||
Deleted string `json:"Deleted,omitempty"`
|
||||
Limit int `json:"Limit,omitempty"`
|
||||
Page string `json:"Page,omitempty"`
|
||||
NextPage string `json:"NextPage,omitempty"`
|
||||
Old *VulnerabilityWithLayers `json:"Old,omitempty"`
|
||||
New *VulnerabilityWithLayers `json:"New,omitempty"`
|
||||
}
|
||||
|
||||
func NotificationFromDatabaseModel(dbNotification database.VulnerabilityNotification, limit int, pageToken string, nextPage database.VulnerabilityNotificationPageNumber, key string) Notification {
|
||||
var oldVuln *VulnerabilityWithLayers
|
||||
if dbNotification.OldVulnerability != nil {
|
||||
v := VulnerabilityWithLayersFromDatabaseModel(*dbNotification.OldVulnerability)
|
||||
oldVuln = &v
|
||||
}
|
||||
|
||||
var newVuln *VulnerabilityWithLayers
|
||||
if dbNotification.NewVulnerability != nil {
|
||||
v := VulnerabilityWithLayersFromDatabaseModel(*dbNotification.NewVulnerability)
|
||||
newVuln = &v
|
||||
}
|
||||
|
||||
var nextPageStr string
|
||||
if nextPage != database.NoVulnerabilityNotificationPage {
|
||||
nextPageBytes, _ := tokenMarshal(nextPage, key)
|
||||
nextPageStr = string(nextPageBytes)
|
||||
}
|
||||
|
||||
var created, notified, deleted string
|
||||
if !dbNotification.Created.IsZero() {
|
||||
created = fmt.Sprintf("%d", dbNotification.Created.Unix())
|
||||
}
|
||||
if !dbNotification.Notified.IsZero() {
|
||||
notified = fmt.Sprintf("%d", dbNotification.Notified.Unix())
|
||||
}
|
||||
if !dbNotification.Deleted.IsZero() {
|
||||
deleted = fmt.Sprintf("%d", dbNotification.Deleted.Unix())
|
||||
}
|
||||
|
||||
// TODO(jzelinskie): implement "changed" key
|
||||
fmt.Println(dbNotification.Deleted.IsZero())
|
||||
return Notification{
|
||||
Name: dbNotification.Name,
|
||||
Created: created,
|
||||
Notified: notified,
|
||||
Deleted: deleted,
|
||||
Limit: limit,
|
||||
Page: pageToken,
|
||||
NextPage: nextPageStr,
|
||||
Old: oldVuln,
|
||||
New: newVuln,
|
||||
}
|
||||
}
|
||||
|
||||
type VulnerabilityWithLayers struct {
|
||||
Vulnerability *Vulnerability `json:"Vulnerability,omitempty"`
|
||||
LayersIntroducingVulnerability []string `json:"LayersIntroducingVulnerability,omitempty"`
|
||||
}
|
||||
|
||||
func VulnerabilityWithLayersFromDatabaseModel(dbVuln database.Vulnerability) VulnerabilityWithLayers {
|
||||
vuln := VulnerabilityFromDatabaseModel(dbVuln, true)
|
||||
|
||||
var layers []string
|
||||
for _, layer := range dbVuln.LayersIntroducingVulnerability {
|
||||
layers = append(layers, layer.Name)
|
||||
}
|
||||
|
||||
return VulnerabilityWithLayers{
|
||||
Vulnerability: &vuln,
|
||||
LayersIntroducingVulnerability: layers,
|
||||
}
|
||||
}
|
||||
|
||||
type LayerEnvelope struct {
|
||||
Layer *Layer `json:"Layer,omitempty"`
|
||||
Error *Error `json:"Error,omitempty"`
|
||||
}
|
||||
|
||||
type NamespaceEnvelope struct {
|
||||
Namespaces *[]Namespace `json:"Namespaces,omitempty"`
|
||||
Error *Error `json:"Error,omitempty"`
|
||||
}
|
||||
|
||||
type VulnerabilityEnvelope struct {
|
||||
Vulnerability *Vulnerability `json:"Vulnerability,omitempty"`
|
||||
Vulnerabilities *[]Vulnerability `json:"Vulnerabilities,omitempty"`
|
||||
NextPage string `json:"NextPage,omitempty"`
|
||||
Error *Error `json:"Error,omitempty"`
|
||||
}
|
||||
|
||||
type NotificationEnvelope struct {
|
||||
Notification *Notification `json:"Notification,omitempty"`
|
||||
Error *Error `json:"Error,omitempty"`
|
||||
}
|
||||
|
||||
type FeatureEnvelope struct {
|
||||
Feature *Feature `json:"Feature,omitempty"`
|
||||
Features *[]Feature `json:"Features,omitempty"`
|
||||
Error *Error `json:"Error,omitempty"`
|
||||
}
|
||||
|
||||
func tokenUnmarshal(token string, key string, v interface{}) error {
|
||||
k, _ := fernet.DecodeKey(key)
|
||||
msg := fernet.VerifyAndDecrypt([]byte(token), time.Hour, []*fernet.Key{k})
|
||||
if msg == nil {
|
||||
return errors.New("invalid or expired pagination token")
|
||||
}
|
||||
|
||||
return json.NewDecoder(bytes.NewBuffer(msg)).Decode(&v)
|
||||
}
|
||||
|
||||
func tokenMarshal(v interface{}, key string) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
err := json.NewEncoder(&buf).Encode(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
k, _ := fernet.DecodeKey(key)
|
||||
return fernet.EncryptAndSign(buf.Bytes(), k)
|
||||
}
|
56
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/api/v1/router.go
generated
vendored
Normal file
56
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/api/v1/router.go
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package v1 implements the first version of the Clair API.
|
||||
package v1
|
||||
|
||||
import (
|
||||
"github.com/julienschmidt/httprouter"
|
||||
|
||||
"github.com/coreos/clair/api/context"
|
||||
)
|
||||
|
||||
// NewRouter creates an HTTP router for version 1 of the Clair API.
|
||||
func NewRouter(ctx *context.RouteContext) *httprouter.Router {
|
||||
router := httprouter.New()
|
||||
|
||||
// Layers
|
||||
router.POST("/layers", context.HTTPHandler(postLayer, ctx))
|
||||
router.GET("/layers/:layerName", context.HTTPHandler(getLayer, ctx))
|
||||
router.DELETE("/layers/:layerName", context.HTTPHandler(deleteLayer, ctx))
|
||||
|
||||
// Namespaces
|
||||
router.GET("/namespaces", context.HTTPHandler(getNamespaces, ctx))
|
||||
|
||||
// Vulnerabilities
|
||||
router.GET("/namespaces/:namespaceName/vulnerabilities", context.HTTPHandler(getVulnerabilities, ctx))
|
||||
router.POST("/namespaces/:namespaceName/vulnerabilities", context.HTTPHandler(postVulnerability, ctx))
|
||||
router.GET("/namespaces/:namespaceName/vulnerabilities/:vulnerabilityName", context.HTTPHandler(getVulnerability, ctx))
|
||||
router.PUT("/namespaces/:namespaceName/vulnerabilities/:vulnerabilityName", context.HTTPHandler(putVulnerability, ctx))
|
||||
router.DELETE("/namespaces/:namespaceName/vulnerabilities/:vulnerabilityName", context.HTTPHandler(deleteVulnerability, ctx))
|
||||
|
||||
// Fixes
|
||||
router.GET("/namespaces/:namespaceName/vulnerabilities/:vulnerabilityName/fixes", context.HTTPHandler(getFixes, ctx))
|
||||
router.PUT("/namespaces/:namespaceName/vulnerabilities/:vulnerabilityName/fixes/:fixName", context.HTTPHandler(putFix, ctx))
|
||||
router.DELETE("/namespaces/:namespaceName/vulnerabilities/:vulnerabilityName/fixes/:fixName", context.HTTPHandler(deleteFix, ctx))
|
||||
|
||||
// Notifications
|
||||
router.GET("/notifications/:notificationName", context.HTTPHandler(getNotification, ctx))
|
||||
router.DELETE("/notifications/:notificationName", context.HTTPHandler(deleteNotification, ctx))
|
||||
|
||||
// Metrics
|
||||
router.GET("/metrics", context.HTTPHandler(getMetrics, ctx))
|
||||
|
||||
return router
|
||||
}
|
498
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/api/v1/routes.go
generated
vendored
Normal file
498
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/api/v1/routes.go
generated
vendored
Normal file
@ -0,0 +1,498 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/julienschmidt/httprouter"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/coreos/clair/api/context"
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/utils"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/coreos/clair/worker"
|
||||
)
|
||||
|
||||
const (
|
||||
// These are the route identifiers for prometheus.
|
||||
postLayerRoute = "v1/postLayer"
|
||||
getLayerRoute = "v1/getLayer"
|
||||
deleteLayerRoute = "v1/deleteLayer"
|
||||
getNamespacesRoute = "v1/getNamespaces"
|
||||
getVulnerabilitiesRoute = "v1/getVulnerabilities"
|
||||
postVulnerabilityRoute = "v1/postVulnerability"
|
||||
getVulnerabilityRoute = "v1/getVulnerability"
|
||||
putVulnerabilityRoute = "v1/putVulnerability"
|
||||
deleteVulnerabilityRoute = "v1/deleteVulnerability"
|
||||
getFixesRoute = "v1/getFixes"
|
||||
putFixRoute = "v1/putFix"
|
||||
deleteFixRoute = "v1/deleteFix"
|
||||
getNotificationRoute = "v1/getNotification"
|
||||
deleteNotificationRoute = "v1/deleteNotification"
|
||||
getMetricsRoute = "v1/getMetrics"
|
||||
|
||||
// maxBodySize restricts client request bodies to 1MiB.
|
||||
maxBodySize int64 = 1048576
|
||||
|
||||
// statusUnprocessableEntity represents the 422 (Unprocessable Entity) status code, which means
|
||||
// the server understands the content type of the request entity
|
||||
// (hence a 415(Unsupported Media Type) status code is inappropriate), and the syntax of the
|
||||
// request entity is correct (thus a 400 (Bad Request) status code is inappropriate) but was
|
||||
// unable to process the contained instructions.
|
||||
statusUnprocessableEntity = 422
|
||||
)
|
||||
|
||||
func decodeJSON(r *http.Request, v interface{}) error {
|
||||
defer r.Body.Close()
|
||||
return json.NewDecoder(io.LimitReader(r.Body, maxBodySize)).Decode(v)
|
||||
}
|
||||
|
||||
func writeResponse(w http.ResponseWriter, r *http.Request, status int, resp interface{}) {
|
||||
// Headers must be written before the response.
|
||||
header := w.Header()
|
||||
header.Set("Content-Type", "application/json;charset=utf-8")
|
||||
header.Set("Server", "clair")
|
||||
|
||||
// Gzip the response if the client supports it.
|
||||
var writer io.Writer = w
|
||||
if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
|
||||
gzipWriter := gzip.NewWriter(w)
|
||||
defer gzipWriter.Close()
|
||||
writer = gzipWriter
|
||||
|
||||
header.Set("Content-Encoding", "gzip")
|
||||
}
|
||||
|
||||
// Write the response.
|
||||
w.WriteHeader(status)
|
||||
err := json.NewEncoder(writer).Encode(resp)
|
||||
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case *json.MarshalerError, *json.UnsupportedTypeError, *json.UnsupportedValueError:
|
||||
panic("v1: failed to marshal response: " + err.Error())
|
||||
default:
|
||||
log.Warningf("failed to write response: %s", err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func postLayer(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
request := LayerEnvelope{}
|
||||
err := decodeJSON(r, &request)
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, LayerEnvelope{Error: &Error{err.Error()}})
|
||||
return postLayerRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
if request.Layer == nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, LayerEnvelope{Error: &Error{"failed to provide layer"}})
|
||||
return postLayerRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
err = worker.Process(ctx.Store, request.Layer.Name, request.Layer.ParentName, request.Layer.Path, request.Layer.Format)
|
||||
if err != nil {
|
||||
if err == utils.ErrCouldNotExtract ||
|
||||
err == utils.ErrExtractedFileTooBig ||
|
||||
err == worker.ErrUnsupported {
|
||||
writeResponse(w, r, statusUnprocessableEntity, LayerEnvelope{Error: &Error{err.Error()}})
|
||||
return postLayerRoute, statusUnprocessableEntity
|
||||
}
|
||||
|
||||
if _, badreq := err.(*cerrors.ErrBadRequest); badreq {
|
||||
writeResponse(w, r, http.StatusBadRequest, LayerEnvelope{Error: &Error{err.Error()}})
|
||||
return postLayerRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
writeResponse(w, r, http.StatusInternalServerError, LayerEnvelope{Error: &Error{err.Error()}})
|
||||
return postLayerRoute, http.StatusInternalServerError
|
||||
}
|
||||
|
||||
writeResponse(w, r, http.StatusCreated, LayerEnvelope{Layer: &Layer{
|
||||
Name: request.Layer.Name,
|
||||
ParentName: request.Layer.ParentName,
|
||||
Path: request.Layer.Path,
|
||||
Format: request.Layer.Format,
|
||||
IndexedByVersion: worker.Version,
|
||||
}})
|
||||
return postLayerRoute, http.StatusCreated
|
||||
}
|
||||
|
||||
func getLayer(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
_, withFeatures := r.URL.Query()["features"]
|
||||
_, withVulnerabilities := r.URL.Query()["vulnerabilities"]
|
||||
|
||||
dbLayer, err := ctx.Store.FindLayer(p.ByName("layerName"), withFeatures, withVulnerabilities)
|
||||
if err == cerrors.ErrNotFound {
|
||||
writeResponse(w, r, http.StatusNotFound, LayerEnvelope{Error: &Error{err.Error()}})
|
||||
return getLayerRoute, http.StatusNotFound
|
||||
} else if err != nil {
|
||||
writeResponse(w, r, http.StatusInternalServerError, LayerEnvelope{Error: &Error{err.Error()}})
|
||||
return getLayerRoute, http.StatusInternalServerError
|
||||
}
|
||||
|
||||
layer := LayerFromDatabaseModel(dbLayer, withFeatures, withVulnerabilities)
|
||||
|
||||
writeResponse(w, r, http.StatusOK, LayerEnvelope{Layer: &layer})
|
||||
return getLayerRoute, http.StatusOK
|
||||
}
|
||||
|
||||
func deleteLayer(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
err := ctx.Store.DeleteLayer(p.ByName("layerName"))
|
||||
if err == cerrors.ErrNotFound {
|
||||
writeResponse(w, r, http.StatusNotFound, LayerEnvelope{Error: &Error{err.Error()}})
|
||||
return deleteLayerRoute, http.StatusNotFound
|
||||
} else if err != nil {
|
||||
writeResponse(w, r, http.StatusInternalServerError, LayerEnvelope{Error: &Error{err.Error()}})
|
||||
return deleteLayerRoute, http.StatusInternalServerError
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return deleteLayerRoute, http.StatusOK
|
||||
}
|
||||
|
||||
func getNamespaces(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
dbNamespaces, err := ctx.Store.ListNamespaces()
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusInternalServerError, NamespaceEnvelope{Error: &Error{err.Error()}})
|
||||
return getNamespacesRoute, http.StatusInternalServerError
|
||||
}
|
||||
var namespaces []Namespace
|
||||
for _, dbNamespace := range dbNamespaces {
|
||||
namespaces = append(namespaces, Namespace{Name: dbNamespace.Name})
|
||||
}
|
||||
|
||||
writeResponse(w, r, http.StatusOK, NamespaceEnvelope{Namespaces: &namespaces})
|
||||
return getNamespacesRoute, http.StatusOK
|
||||
}
|
||||
|
||||
func getVulnerabilities(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
query := r.URL.Query()
|
||||
|
||||
limitStrs, limitExists := query["limit"]
|
||||
if !limitExists {
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{"must provide limit query parameter"}})
|
||||
return getVulnerabilitiesRoute, http.StatusBadRequest
|
||||
}
|
||||
limit, err := strconv.Atoi(limitStrs[0])
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{"invalid limit format: " + err.Error()}})
|
||||
return getVulnerabilitiesRoute, http.StatusBadRequest
|
||||
} else if limit < 0 {
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{"limit value should not be less than zero"}})
|
||||
return getVulnerabilitiesRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
page := 0
|
||||
pageStrs, pageExists := query["page"]
|
||||
if pageExists {
|
||||
err = tokenUnmarshal(pageStrs[0], ctx.Config.PaginationKey, &page)
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{"invalid page format: " + err.Error()}})
|
||||
return getNotificationRoute, http.StatusBadRequest
|
||||
}
|
||||
}
|
||||
|
||||
namespace := p.ByName("namespaceName")
|
||||
if namespace == "" {
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{"namespace should not be empty"}})
|
||||
return getNotificationRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
dbVulns, nextPage, err := ctx.Store.ListVulnerabilities(namespace, limit, page)
|
||||
if err == cerrors.ErrNotFound {
|
||||
writeResponse(w, r, http.StatusNotFound, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return getVulnerabilityRoute, http.StatusNotFound
|
||||
} else if err != nil {
|
||||
writeResponse(w, r, http.StatusInternalServerError, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return getVulnerabilitiesRoute, http.StatusInternalServerError
|
||||
}
|
||||
|
||||
var vulns []Vulnerability
|
||||
for _, dbVuln := range dbVulns {
|
||||
vuln := VulnerabilityFromDatabaseModel(dbVuln, false)
|
||||
vulns = append(vulns, vuln)
|
||||
}
|
||||
|
||||
var nextPageStr string
|
||||
if nextPage != -1 {
|
||||
nextPageBytes, err := tokenMarshal(nextPage, ctx.Config.PaginationKey)
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{"failed to marshal token: " + err.Error()}})
|
||||
return getNotificationRoute, http.StatusBadRequest
|
||||
}
|
||||
nextPageStr = string(nextPageBytes)
|
||||
}
|
||||
|
||||
writeResponse(w, r, http.StatusOK, VulnerabilityEnvelope{Vulnerabilities: &vulns, NextPage: nextPageStr})
|
||||
return getVulnerabilitiesRoute, http.StatusOK
|
||||
}
|
||||
|
||||
func postVulnerability(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
request := VulnerabilityEnvelope{}
|
||||
err := decodeJSON(r, &request)
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return postVulnerabilityRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
if request.Vulnerability == nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{"failed to provide vulnerability"}})
|
||||
return postVulnerabilityRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
vuln, err := request.Vulnerability.DatabaseModel()
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return postVulnerabilityRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
err = ctx.Store.InsertVulnerabilities([]database.Vulnerability{vuln}, true)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case *cerrors.ErrBadRequest:
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return postVulnerabilityRoute, http.StatusBadRequest
|
||||
default:
|
||||
writeResponse(w, r, http.StatusInternalServerError, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return postVulnerabilityRoute, http.StatusInternalServerError
|
||||
}
|
||||
}
|
||||
|
||||
writeResponse(w, r, http.StatusCreated, VulnerabilityEnvelope{Vulnerability: request.Vulnerability})
|
||||
return postVulnerabilityRoute, http.StatusCreated
|
||||
}
|
||||
|
||||
func getVulnerability(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
_, withFixedIn := r.URL.Query()["fixedIn"]
|
||||
|
||||
dbVuln, err := ctx.Store.FindVulnerability(p.ByName("namespaceName"), p.ByName("vulnerabilityName"))
|
||||
if err == cerrors.ErrNotFound {
|
||||
writeResponse(w, r, http.StatusNotFound, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return getVulnerabilityRoute, http.StatusNotFound
|
||||
} else if err != nil {
|
||||
writeResponse(w, r, http.StatusInternalServerError, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return getVulnerabilityRoute, http.StatusInternalServerError
|
||||
}
|
||||
|
||||
vuln := VulnerabilityFromDatabaseModel(dbVuln, withFixedIn)
|
||||
|
||||
writeResponse(w, r, http.StatusOK, VulnerabilityEnvelope{Vulnerability: &vuln})
|
||||
return getVulnerabilityRoute, http.StatusOK
|
||||
}
|
||||
|
||||
func putVulnerability(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
request := VulnerabilityEnvelope{}
|
||||
err := decodeJSON(r, &request)
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return putVulnerabilityRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
if request.Vulnerability == nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{"failed to provide vulnerability"}})
|
||||
return putVulnerabilityRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
if len(request.Vulnerability.FixedIn) != 0 {
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{"Vulnerability.FixedIn must be empty"}})
|
||||
return putVulnerabilityRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
vuln, err := request.Vulnerability.DatabaseModel()
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return putVulnerabilityRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
vuln.Namespace.Name = p.ByName("namespaceName")
|
||||
vuln.Name = p.ByName("vulnerabilityName")
|
||||
|
||||
err = ctx.Store.InsertVulnerabilities([]database.Vulnerability{vuln}, true)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case *cerrors.ErrBadRequest:
|
||||
writeResponse(w, r, http.StatusBadRequest, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return putVulnerabilityRoute, http.StatusBadRequest
|
||||
default:
|
||||
writeResponse(w, r, http.StatusInternalServerError, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return putVulnerabilityRoute, http.StatusInternalServerError
|
||||
}
|
||||
}
|
||||
|
||||
writeResponse(w, r, http.StatusOK, VulnerabilityEnvelope{Vulnerability: request.Vulnerability})
|
||||
return putVulnerabilityRoute, http.StatusOK
|
||||
}
|
||||
|
||||
func deleteVulnerability(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
err := ctx.Store.DeleteVulnerability(p.ByName("namespaceName"), p.ByName("vulnerabilityName"))
|
||||
if err == cerrors.ErrNotFound {
|
||||
writeResponse(w, r, http.StatusNotFound, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return deleteVulnerabilityRoute, http.StatusNotFound
|
||||
} else if err != nil {
|
||||
writeResponse(w, r, http.StatusInternalServerError, VulnerabilityEnvelope{Error: &Error{err.Error()}})
|
||||
return deleteVulnerabilityRoute, http.StatusInternalServerError
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return deleteVulnerabilityRoute, http.StatusOK
|
||||
}
|
||||
|
||||
func getFixes(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
dbVuln, err := ctx.Store.FindVulnerability(p.ByName("namespaceName"), p.ByName("vulnerabilityName"))
|
||||
if err == cerrors.ErrNotFound {
|
||||
writeResponse(w, r, http.StatusNotFound, FeatureEnvelope{Error: &Error{err.Error()}})
|
||||
return getFixesRoute, http.StatusNotFound
|
||||
} else if err != nil {
|
||||
writeResponse(w, r, http.StatusInternalServerError, FeatureEnvelope{Error: &Error{err.Error()}})
|
||||
return getFixesRoute, http.StatusInternalServerError
|
||||
}
|
||||
|
||||
vuln := VulnerabilityFromDatabaseModel(dbVuln, true)
|
||||
writeResponse(w, r, http.StatusOK, FeatureEnvelope{Features: &vuln.FixedIn})
|
||||
return getFixesRoute, http.StatusOK
|
||||
}
|
||||
|
||||
func putFix(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
request := FeatureEnvelope{}
|
||||
err := decodeJSON(r, &request)
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, FeatureEnvelope{Error: &Error{err.Error()}})
|
||||
return putFixRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
if request.Feature == nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, FeatureEnvelope{Error: &Error{"failed to provide feature"}})
|
||||
return putFixRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
if request.Feature.Name != p.ByName("fixName") {
|
||||
writeResponse(w, r, http.StatusBadRequest, FeatureEnvelope{Error: &Error{"feature name in URL and JSON do not match"}})
|
||||
return putFixRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
dbFix, err := request.Feature.DatabaseModel()
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, FeatureEnvelope{Error: &Error{err.Error()}})
|
||||
return putFixRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
err = ctx.Store.InsertVulnerabilityFixes(p.ByName("vulnerabilityNamespace"), p.ByName("vulnerabilityName"), []database.FeatureVersion{dbFix})
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case *cerrors.ErrBadRequest:
|
||||
writeResponse(w, r, http.StatusBadRequest, FeatureEnvelope{Error: &Error{err.Error()}})
|
||||
return putFixRoute, http.StatusBadRequest
|
||||
default:
|
||||
if err == cerrors.ErrNotFound {
|
||||
writeResponse(w, r, http.StatusNotFound, FeatureEnvelope{Error: &Error{err.Error()}})
|
||||
return putFixRoute, http.StatusNotFound
|
||||
}
|
||||
writeResponse(w, r, http.StatusInternalServerError, FeatureEnvelope{Error: &Error{err.Error()}})
|
||||
return putFixRoute, http.StatusInternalServerError
|
||||
}
|
||||
}
|
||||
|
||||
writeResponse(w, r, http.StatusOK, FeatureEnvelope{Feature: request.Feature})
|
||||
return putFixRoute, http.StatusOK
|
||||
}
|
||||
|
||||
func deleteFix(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
err := ctx.Store.DeleteVulnerabilityFix(p.ByName("vulnerabilityNamespace"), p.ByName("vulnerabilityName"), p.ByName("fixName"))
|
||||
if err == cerrors.ErrNotFound {
|
||||
writeResponse(w, r, http.StatusNotFound, FeatureEnvelope{Error: &Error{err.Error()}})
|
||||
return deleteFixRoute, http.StatusNotFound
|
||||
} else if err != nil {
|
||||
writeResponse(w, r, http.StatusInternalServerError, FeatureEnvelope{Error: &Error{err.Error()}})
|
||||
return deleteFixRoute, http.StatusInternalServerError
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return deleteFixRoute, http.StatusOK
|
||||
}
|
||||
|
||||
func getNotification(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
query := r.URL.Query()
|
||||
|
||||
limitStrs, limitExists := query["limit"]
|
||||
if !limitExists {
|
||||
writeResponse(w, r, http.StatusBadRequest, NotificationEnvelope{Error: &Error{"must provide limit query parameter"}})
|
||||
return getNotificationRoute, http.StatusBadRequest
|
||||
}
|
||||
limit, err := strconv.Atoi(limitStrs[0])
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, NotificationEnvelope{Error: &Error{"invalid limit format: " + err.Error()}})
|
||||
return getNotificationRoute, http.StatusBadRequest
|
||||
}
|
||||
|
||||
var pageToken string
|
||||
page := database.VulnerabilityNotificationFirstPage
|
||||
pageStrs, pageExists := query["page"]
|
||||
if pageExists {
|
||||
err := tokenUnmarshal(pageStrs[0], ctx.Config.PaginationKey, &page)
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, NotificationEnvelope{Error: &Error{"invalid page format: " + err.Error()}})
|
||||
return getNotificationRoute, http.StatusBadRequest
|
||||
}
|
||||
pageToken = pageStrs[0]
|
||||
} else {
|
||||
pageTokenBytes, err := tokenMarshal(page, ctx.Config.PaginationKey)
|
||||
if err != nil {
|
||||
writeResponse(w, r, http.StatusBadRequest, NotificationEnvelope{Error: &Error{"failed to marshal token: " + err.Error()}})
|
||||
return getNotificationRoute, http.StatusBadRequest
|
||||
}
|
||||
pageToken = string(pageTokenBytes)
|
||||
}
|
||||
|
||||
dbNotification, nextPage, err := ctx.Store.GetNotification(p.ByName("notificationName"), limit, page)
|
||||
if err == cerrors.ErrNotFound {
|
||||
writeResponse(w, r, http.StatusNotFound, NotificationEnvelope{Error: &Error{err.Error()}})
|
||||
return deleteNotificationRoute, http.StatusNotFound
|
||||
} else if err != nil {
|
||||
writeResponse(w, r, http.StatusInternalServerError, NotificationEnvelope{Error: &Error{err.Error()}})
|
||||
return getNotificationRoute, http.StatusInternalServerError
|
||||
}
|
||||
|
||||
notification := NotificationFromDatabaseModel(dbNotification, limit, pageToken, nextPage, ctx.Config.PaginationKey)
|
||||
|
||||
writeResponse(w, r, http.StatusOK, NotificationEnvelope{Notification: ¬ification})
|
||||
return getNotificationRoute, http.StatusOK
|
||||
}
|
||||
|
||||
func deleteNotification(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
err := ctx.Store.DeleteNotification(p.ByName("notificationName"))
|
||||
if err == cerrors.ErrNotFound {
|
||||
writeResponse(w, r, http.StatusNotFound, NotificationEnvelope{Error: &Error{err.Error()}})
|
||||
return deleteNotificationRoute, http.StatusNotFound
|
||||
} else if err != nil {
|
||||
writeResponse(w, r, http.StatusInternalServerError, NotificationEnvelope{Error: &Error{err.Error()}})
|
||||
return deleteNotificationRoute, http.StatusInternalServerError
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return deleteNotificationRoute, http.StatusOK
|
||||
}
|
||||
|
||||
func getMetrics(w http.ResponseWriter, r *http.Request, p httprouter.Params, ctx *context.RouteContext) (string, int) {
|
||||
prometheus.Handler().ServeHTTP(w, r)
|
||||
return getMetricsRoute, 0
|
||||
}
|
128
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/config/config.go
generated
vendored
Normal file
128
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/config/config.go
generated
vendored
Normal file
@ -0,0 +1,128 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/fernet/fernet-go"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// File represents a YAML configuration file that namespaces all Clair
|
||||
// configuration under the top-level "clair" key.
|
||||
type File struct {
|
||||
Clair Config `yaml:"clair"`
|
||||
}
|
||||
|
||||
// Config is the global configuration for an instance of Clair.
|
||||
type Config struct {
|
||||
Database *DatabaseConfig
|
||||
Updater *UpdaterConfig
|
||||
Notifier *NotifierConfig
|
||||
API *APIConfig
|
||||
}
|
||||
|
||||
// DatabaseConfig is the configuration used to specify how Clair connects
|
||||
// to a database.
|
||||
type DatabaseConfig struct {
|
||||
Source string
|
||||
CacheSize int
|
||||
}
|
||||
|
||||
// UpdaterConfig is the configuration for the Updater service.
|
||||
type UpdaterConfig struct {
|
||||
Interval time.Duration
|
||||
}
|
||||
|
||||
// NotifierConfig is the configuration for the Notifier service and its registered notifiers.
|
||||
type NotifierConfig struct {
|
||||
Attempts int
|
||||
RenotifyInterval time.Duration
|
||||
Params map[string]interface{} `yaml:",inline"`
|
||||
}
|
||||
|
||||
// APIConfig is the configuration for the API service.
|
||||
type APIConfig struct {
|
||||
Port int
|
||||
HealthPort int
|
||||
Timeout time.Duration
|
||||
PaginationKey string
|
||||
CertFile, KeyFile, CAFile string
|
||||
}
|
||||
|
||||
// DefaultConfig is a configuration that can be used as a fallback value.
|
||||
var DefaultConfig = Config{
|
||||
Database: &DatabaseConfig{
|
||||
CacheSize: 16384,
|
||||
},
|
||||
Updater: &UpdaterConfig{
|
||||
Interval: 1 * time.Hour,
|
||||
},
|
||||
API: &APIConfig{
|
||||
Port: 6060,
|
||||
HealthPort: 6061,
|
||||
Timeout: 900 * time.Second,
|
||||
},
|
||||
Notifier: &NotifierConfig{
|
||||
Attempts: 5,
|
||||
RenotifyInterval: 2 * time.Hour,
|
||||
},
|
||||
}
|
||||
|
||||
// Load is a shortcut to open a file, read it, and generate a Config.
|
||||
// It supports relative and absolute paths. Given "", it returns DefaultConfig.
|
||||
func Load(path string) (config *Config, err error) {
|
||||
config = &DefaultConfig
|
||||
if path == "" {
|
||||
return
|
||||
}
|
||||
|
||||
f, err := os.Open(os.ExpandEnv(path))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
d, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var cfgFile File
|
||||
err = yaml.Unmarshal(d, &cfgFile)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
config = &cfgFile.Clair
|
||||
|
||||
// Generate a pagination key if none is provided.
|
||||
if config.API.PaginationKey == "" {
|
||||
var key fernet.Key
|
||||
if err = key.Generate(); err != nil {
|
||||
return
|
||||
}
|
||||
config.API.PaginationKey = key.Encode()
|
||||
} else {
|
||||
_, err = fernet.DecodeKey(config.API.PaginationKey)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
155
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/database/database.go
generated
vendored
Normal file
155
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/database/database.go
generated
vendored
Normal file
@ -0,0 +1,155 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package database defines the Clair's models and a common interface for database implementations.
|
||||
package database
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrBackendException is an error that occurs when the database backend does
|
||||
// not work properly (ie. unreachable).
|
||||
ErrBackendException = errors.New("database: an error occured when querying the backend")
|
||||
|
||||
// ErrInconsistent is an error that occurs when a database consistency check
|
||||
// fails (ie. when an entity which is supposed to be unique is detected twice)
|
||||
ErrInconsistent = errors.New("database: inconsistent database")
|
||||
|
||||
// ErrCantOpen is an error that occurs when the database could not be opened
|
||||
ErrCantOpen = errors.New("database: could not open database")
|
||||
)
|
||||
|
||||
// Datastore is the interface that describes a database backend implementation.
|
||||
type Datastore interface {
|
||||
// # Namespace
|
||||
// ListNamespaces returns the entire list of known Namespaces.
|
||||
ListNamespaces() ([]Namespace, error)
|
||||
|
||||
// # Layer
|
||||
// InsertLayer stores a Layer in the database.
|
||||
// A Layer is uniquely identified by its Name. The Name and EngineVersion fields are mandatory.
|
||||
// If a Parent is specified, it is expected that it has been retrieved using FindLayer.
|
||||
// If a Layer that already exists is inserted and the EngineVersion of the given Layer is higher
|
||||
// than the stored one, the stored Layer should be updated.
|
||||
// The function has to be idempotent, inserting a layer that already exists shouln'd return an
|
||||
// error.
|
||||
InsertLayer(Layer) error
|
||||
|
||||
// FindLayer retrieves a Layer from the database.
|
||||
// withFeatures specifies whether the Features field should be filled. When withVulnerabilities is
|
||||
// true, the Features field should be filled and their AffectedBy fields should contain every
|
||||
// vulnerabilities that affect them.
|
||||
FindLayer(name string, withFeatures, withVulnerabilities bool) (Layer, error)
|
||||
|
||||
// DeleteLayer deletes a Layer from the database and every layers that are based on it,
|
||||
// recursively.
|
||||
DeleteLayer(name string) error
|
||||
|
||||
// # Vulnerability
|
||||
// ListVulnerabilities returns the list of vulnerabilies of a certain Namespace.
|
||||
// The Limit and page parameters are used to paginate the return list.
|
||||
// The first given page should be 0. The function will then return the next available page.
|
||||
// If there is no more page, -1 has to be returned.
|
||||
ListVulnerabilities(namespaceName string, limit int, page int) ([]Vulnerability, int, error)
|
||||
|
||||
// InsertVulnerabilities stores the given Vulnerabilities in the database, updating them if
|
||||
// necessary. A vulnerability is uniquely identified by its Namespace and its Name.
|
||||
// The FixedIn field may only contain a partial list of Features that are affected by the
|
||||
// Vulnerability, along with the version in which the vulnerability is fixed. It is the
|
||||
// responsibility of the implementation to update the list properly. A version equals to
|
||||
// types.MinVersion means that the given Feature is not being affected by the Vulnerability at
|
||||
// all and thus, should be removed from the list. It is important that Features should be unique
|
||||
// in the FixedIn list. For example, it doesn't make sense to have two `openssl` Feature listed as
|
||||
// a Vulnerability can only be fixed in one Version. This is true because Vulnerabilities and
|
||||
// Features are Namespaced (i.e. specific to one operating system).
|
||||
// Each vulnerability insertion or update has to create a Notification that will contain the
|
||||
// old and the updated Vulnerability, unless createNotification equals to true.
|
||||
InsertVulnerabilities(vulnerabilities []Vulnerability, createNotification bool) error
|
||||
|
||||
// FindVulnerability retrieves a Vulnerability from the database, including the FixedIn list.
|
||||
FindVulnerability(namespaceName, name string) (Vulnerability, error)
|
||||
|
||||
// DeleteVulnerability removes a Vulnerability from the database.
|
||||
// It has to create a Notification that will contain the old Vulnerability.
|
||||
DeleteVulnerability(namespaceName, name string) error
|
||||
|
||||
// InsertVulnerabilityFixes adds new FixedIn Feature or update the Versions of existing ones to
|
||||
// the specified Vulnerability in the database.
|
||||
// It has has to create a Notification that will contain the old and the updated Vulnerability.
|
||||
InsertVulnerabilityFixes(vulnerabilityNamespace, vulnerabilityName string, fixes []FeatureVersion) error
|
||||
|
||||
// DeleteVulnerabilityFix removes a FixedIn Feature from the specified Vulnerability in the
|
||||
// database. It can be used to store the fact that a Vulnerability no longer affects the given
|
||||
// Feature in any Version.
|
||||
// It has has to create a Notification that will contain the old and the updated Vulnerability.
|
||||
DeleteVulnerabilityFix(vulnerabilityNamespace, vulnerabilityName, featureName string) error
|
||||
|
||||
// # Notification
|
||||
// GetAvailableNotification returns the Name, Created, Notified and Deleted fields of a
|
||||
// Notification that should be handled. The renotify interval defines how much time after being
|
||||
// marked as Notified by SetNotificationNotified, a Notification that hasn't been deleted should
|
||||
// be returned again by this function. A Notification for which there is a valid Lock with the
|
||||
// same Name should not be returned.
|
||||
GetAvailableNotification(renotifyInterval time.Duration) (VulnerabilityNotification, error)
|
||||
|
||||
// GetNotification returns a Notification, including its OldVulnerability and NewVulnerability
|
||||
// fields. On these Vulnerabilities, LayersIntroducingVulnerability should be filled with
|
||||
// every Layer that introduces the Vulnerability (i.e. adds at least one affected FeatureVersion).
|
||||
// The Limit and page parameters are used to paginate LayersIntroducingVulnerability. The first
|
||||
// given page should be VulnerabilityNotificationFirstPage. The function will then return the next
|
||||
// availage page. If there is no more page, NoVulnerabilityNotificationPage has to be returned.
|
||||
GetNotification(name string, limit int, page VulnerabilityNotificationPageNumber) (VulnerabilityNotification, VulnerabilityNotificationPageNumber, error)
|
||||
|
||||
// SetNotificationNotified marks a Notification as notified and thus, makes it unavailable for
|
||||
// GetAvailableNotification, until the renotify duration is elapsed.
|
||||
SetNotificationNotified(name string) error
|
||||
|
||||
// DeleteNotification marks a Notification as deleted, and thus, makes it unavailable for
|
||||
// GetAvailableNotification.
|
||||
DeleteNotification(name string) error
|
||||
|
||||
// # Key/Value
|
||||
// InsertKeyValue stores or updates a simple key/value pair in the database.
|
||||
InsertKeyValue(key, value string) error
|
||||
|
||||
// GetKeyValue retrieves a value from the database from the given key.
|
||||
// It returns an empty string if there is no such key.
|
||||
GetKeyValue(key string) (string, error)
|
||||
|
||||
// # Lock
|
||||
// Lock creates or renew a Lock in the database with the given name, owner and duration.
|
||||
// After the specified duration, the Lock expires by itself if it hasn't been unlocked, and thus,
|
||||
// let other users create a Lock with the same name. However, the owner can renew its Lock by
|
||||
// setting renew to true. Lock should not block, it should instead returns whether the Lock has
|
||||
// been successfully acquired/renewed. If it's the case, the expiration time of that Lock is
|
||||
// returned as well.
|
||||
Lock(name string, owner string, duration time.Duration, renew bool) (bool, time.Time)
|
||||
|
||||
// Unlock releases an existing Lock.
|
||||
Unlock(name, owner string)
|
||||
|
||||
// FindLock returns the owner of a Lock specified by the name, and its experation time if it
|
||||
// exists.
|
||||
FindLock(name string) (string, time.Time, error)
|
||||
|
||||
// # Miscellaneous
|
||||
// Ping returns the health status of the database.
|
||||
Ping() bool
|
||||
|
||||
// Close closes the database and free any allocated resource.
|
||||
Close()
|
||||
}
|
119
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/database/models.go
generated
vendored
Normal file
119
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/database/models.go
generated
vendored
Normal file
@ -0,0 +1,119 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/clair/utils/types"
|
||||
)
|
||||
|
||||
// ID is only meant to be used by database implementations and should never be used for anything else.
|
||||
type Model struct {
|
||||
ID int
|
||||
}
|
||||
|
||||
type Layer struct {
|
||||
Model
|
||||
|
||||
Name string
|
||||
EngineVersion int
|
||||
Parent *Layer
|
||||
Namespace *Namespace
|
||||
Features []FeatureVersion
|
||||
}
|
||||
|
||||
type Namespace struct {
|
||||
Model
|
||||
|
||||
Name string
|
||||
}
|
||||
|
||||
type Feature struct {
|
||||
Model
|
||||
|
||||
Name string
|
||||
Namespace Namespace
|
||||
}
|
||||
|
||||
type FeatureVersion struct {
|
||||
Model
|
||||
|
||||
Feature Feature
|
||||
Version types.Version
|
||||
AffectedBy []Vulnerability
|
||||
|
||||
// For output purposes. Only make sense when the feature version is in the context of an image.
|
||||
AddedBy Layer
|
||||
}
|
||||
|
||||
type Vulnerability struct {
|
||||
Model
|
||||
|
||||
Name string
|
||||
Namespace Namespace
|
||||
|
||||
Description string
|
||||
Link string
|
||||
Severity types.Priority
|
||||
|
||||
Metadata MetadataMap
|
||||
|
||||
FixedIn []FeatureVersion
|
||||
LayersIntroducingVulnerability []Layer
|
||||
|
||||
// For output purposes. Only make sense when the vulnerability
|
||||
// is already about a specific Feature/FeatureVersion.
|
||||
FixedBy types.Version `json:",omitempty"`
|
||||
}
|
||||
|
||||
type MetadataMap map[string]interface{}
|
||||
|
||||
func (mm *MetadataMap) Scan(value interface{}) error {
|
||||
val, ok := value.([]byte)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return json.Unmarshal(val, mm)
|
||||
}
|
||||
|
||||
func (mm *MetadataMap) Value() (driver.Value, error) {
|
||||
json, err := json.Marshal(*mm)
|
||||
return string(json), err
|
||||
}
|
||||
|
||||
type VulnerabilityNotification struct {
|
||||
Model
|
||||
|
||||
Name string
|
||||
|
||||
Created time.Time
|
||||
Notified time.Time
|
||||
Deleted time.Time
|
||||
|
||||
OldVulnerability *Vulnerability
|
||||
NewVulnerability *Vulnerability
|
||||
}
|
||||
|
||||
type VulnerabilityNotificationPageNumber struct {
|
||||
// -1 means that we reached the end already.
|
||||
OldVulnerability int
|
||||
NewVulnerability int
|
||||
}
|
||||
|
||||
var VulnerabilityNotificationFirstPage = VulnerabilityNotificationPageNumber{0, 0}
|
||||
var NoVulnerabilityNotificationPage = VulnerabilityNotificationPageNumber{-1, -1}
|
43
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/database/namespace_mapping.go
generated
vendored
Normal file
43
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/database/namespace_mapping.go
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
// DebianReleasesMapping translates Debian code names and class names to version numbers
|
||||
var DebianReleasesMapping = map[string]string{
|
||||
// Code names
|
||||
"squeeze": "6",
|
||||
"wheezy": "7",
|
||||
"jessie": "8",
|
||||
"stretch": "9",
|
||||
"sid": "unstable",
|
||||
|
||||
// Class names
|
||||
"oldstable": "7",
|
||||
"stable": "8",
|
||||
"testing": "9",
|
||||
"unstable": "unstable",
|
||||
}
|
||||
|
||||
// UbuntuReleasesMapping translates Ubuntu code names to version numbers
|
||||
var UbuntuReleasesMapping = map[string]string{
|
||||
"precise": "12.04",
|
||||
"quantal": "12.10",
|
||||
"raring": "13.04",
|
||||
"trusty": "14.04",
|
||||
"utopic": "14.10",
|
||||
"vivid": "15.04",
|
||||
"wily": "15.10",
|
||||
"xenial": "16.04",
|
||||
}
|
240
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/database/pgsql/feature.go
generated
vendored
Normal file
240
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/database/pgsql/feature.go
generated
vendored
Normal file
@ -0,0 +1,240 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
)
|
||||
|
||||
func (pgSQL *pgSQL) insertFeature(feature database.Feature) (int, error) {
|
||||
if feature.Name == "" {
|
||||
return 0, cerrors.NewBadRequestError("could not find/insert invalid Feature")
|
||||
}
|
||||
|
||||
// Do cache lookup.
|
||||
if pgSQL.cache != nil {
|
||||
promCacheQueriesTotal.WithLabelValues("feature").Inc()
|
||||
id, found := pgSQL.cache.Get("feature:" + feature.Namespace.Name + ":" + feature.Name)
|
||||
if found {
|
||||
promCacheHitsTotal.WithLabelValues("feature").Inc()
|
||||
return id.(int), nil
|
||||
}
|
||||
}
|
||||
|
||||
// We do `defer observeQueryTime` here because we don't want to observe cached features.
|
||||
defer observeQueryTime("insertFeature", "all", time.Now())
|
||||
|
||||
// Find or create Namespace.
|
||||
namespaceID, err := pgSQL.insertNamespace(feature.Namespace)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Find or create Feature.
|
||||
var id int
|
||||
err = pgSQL.QueryRow(soiFeature, feature.Name, namespaceID).Scan(&id)
|
||||
if err != nil {
|
||||
return 0, handleError("soiFeature", err)
|
||||
}
|
||||
|
||||
if pgSQL.cache != nil {
|
||||
pgSQL.cache.Add("feature:"+feature.Namespace.Name+":"+feature.Name, id)
|
||||
}
|
||||
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func (pgSQL *pgSQL) insertFeatureVersion(featureVersion database.FeatureVersion) (id int, err error) {
|
||||
if featureVersion.Version.String() == "" {
|
||||
return 0, cerrors.NewBadRequestError("could not find/insert invalid FeatureVersion")
|
||||
}
|
||||
|
||||
// Do cache lookup.
|
||||
cacheIndex := "featureversion:" + featureVersion.Feature.Namespace.Name + ":" + featureVersion.Feature.Name + ":" + featureVersion.Version.String()
|
||||
if pgSQL.cache != nil {
|
||||
promCacheQueriesTotal.WithLabelValues("featureversion").Inc()
|
||||
id, found := pgSQL.cache.Get(cacheIndex)
|
||||
if found {
|
||||
promCacheHitsTotal.WithLabelValues("featureversion").Inc()
|
||||
return id.(int), nil
|
||||
}
|
||||
}
|
||||
|
||||
// We do `defer observeQueryTime` here because we don't want to observe cached featureversions.
|
||||
defer observeQueryTime("insertFeatureVersion", "all", time.Now())
|
||||
|
||||
// Find or create Feature first.
|
||||
t := time.Now()
|
||||
featureID, err := pgSQL.insertFeature(featureVersion.Feature)
|
||||
observeQueryTime("insertFeatureVersion", "insertFeature", t)
|
||||
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
featureVersion.Feature.ID = featureID
|
||||
|
||||
// Try to find the FeatureVersion.
|
||||
//
|
||||
// In a populated database, the likelihood of the FeatureVersion already being there is high.
|
||||
// If we can find it here, we then avoid using a transaction and locking the database.
|
||||
err = pgSQL.QueryRow(searchFeatureVersion, featureID, &featureVersion.Version).
|
||||
Scan(&featureVersion.ID)
|
||||
if err != nil && err != sql.ErrNoRows {
|
||||
return 0, handleError("searchFeatureVersion", err)
|
||||
}
|
||||
if err == nil {
|
||||
if pgSQL.cache != nil {
|
||||
pgSQL.cache.Add(cacheIndex, featureVersion.ID)
|
||||
}
|
||||
|
||||
return featureVersion.ID, nil
|
||||
}
|
||||
|
||||
// Begin transaction.
|
||||
tx, err := pgSQL.Begin()
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return 0, handleError("insertFeatureVersion.Begin()", err)
|
||||
}
|
||||
|
||||
// Lock Vulnerability_Affects_FeatureVersion exclusively.
|
||||
// We want to prevent InsertVulnerability to modify it.
|
||||
promConcurrentLockVAFV.Inc()
|
||||
defer promConcurrentLockVAFV.Dec()
|
||||
t = time.Now()
|
||||
_, err = tx.Exec(lockVulnerabilityAffects)
|
||||
observeQueryTime("insertFeatureVersion", "lock", t)
|
||||
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return 0, handleError("insertFeatureVersion.lockVulnerabilityAffects", err)
|
||||
}
|
||||
|
||||
// Find or create FeatureVersion.
|
||||
var newOrExisting string
|
||||
|
||||
t = time.Now()
|
||||
err = tx.QueryRow(soiFeatureVersion, featureID, &featureVersion.Version).
|
||||
Scan(&newOrExisting, &featureVersion.ID)
|
||||
observeQueryTime("insertFeatureVersion", "soiFeatureVersion", t)
|
||||
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return 0, handleError("soiFeatureVersion", err)
|
||||
}
|
||||
|
||||
if newOrExisting == "exi" {
|
||||
// That featureVersion already exists, return its id.
|
||||
tx.Commit()
|
||||
|
||||
if pgSQL.cache != nil {
|
||||
pgSQL.cache.Add(cacheIndex, featureVersion.ID)
|
||||
}
|
||||
|
||||
return featureVersion.ID, nil
|
||||
}
|
||||
|
||||
// Link the new FeatureVersion with every vulnerabilities that affect it, by inserting in
|
||||
// Vulnerability_Affects_FeatureVersion.
|
||||
t = time.Now()
|
||||
err = linkFeatureVersionToVulnerabilities(tx, featureVersion)
|
||||
observeQueryTime("insertFeatureVersion", "linkFeatureVersionToVulnerabilities", t)
|
||||
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Commit transaction.
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
return 0, handleError("insertFeatureVersion.Commit()", err)
|
||||
}
|
||||
|
||||
if pgSQL.cache != nil {
|
||||
pgSQL.cache.Add(cacheIndex, featureVersion.ID)
|
||||
}
|
||||
|
||||
return featureVersion.ID, nil
|
||||
}
|
||||
|
||||
// TODO(Quentin-M): Batch me
|
||||
func (pgSQL *pgSQL) insertFeatureVersions(featureVersions []database.FeatureVersion) ([]int, error) {
|
||||
IDs := make([]int, 0, len(featureVersions))
|
||||
|
||||
for i := 0; i < len(featureVersions); i++ {
|
||||
id, err := pgSQL.insertFeatureVersion(featureVersions[i])
|
||||
if err != nil {
|
||||
return IDs, err
|
||||
}
|
||||
IDs = append(IDs, id)
|
||||
}
|
||||
|
||||
return IDs, nil
|
||||
}
|
||||
|
||||
type vulnerabilityAffectsFeatureVersion struct {
|
||||
vulnerabilityID int
|
||||
fixedInID int
|
||||
fixedInVersion types.Version
|
||||
}
|
||||
|
||||
func linkFeatureVersionToVulnerabilities(tx *sql.Tx, featureVersion database.FeatureVersion) error {
|
||||
// Select every vulnerability and the fixed version that affect this Feature.
|
||||
// TODO(Quentin-M): LIMIT
|
||||
rows, err := tx.Query(searchVulnerabilityFixedInFeature, featureVersion.Feature.ID)
|
||||
if err != nil {
|
||||
return handleError("searchVulnerabilityFixedInFeature", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var affects []vulnerabilityAffectsFeatureVersion
|
||||
for rows.Next() {
|
||||
var affect vulnerabilityAffectsFeatureVersion
|
||||
|
||||
err := rows.Scan(&affect.fixedInID, &affect.vulnerabilityID, &affect.fixedInVersion)
|
||||
if err != nil {
|
||||
return handleError("searchVulnerabilityFixedInFeature.Scan()", err)
|
||||
}
|
||||
|
||||
if featureVersion.Version.Compare(affect.fixedInVersion) < 0 {
|
||||
// The version of the FeatureVersion we are inserting is lower than the fixed version on this
|
||||
// Vulnerability, thus, this FeatureVersion is affected by it.
|
||||
affects = append(affects, affect)
|
||||
}
|
||||
}
|
||||
if err = rows.Err(); err != nil {
|
||||
return handleError("searchVulnerabilityFixedInFeature.Rows()", err)
|
||||
}
|
||||
rows.Close()
|
||||
|
||||
// Insert into Vulnerability_Affects_FeatureVersion.
|
||||
for _, affect := range affects {
|
||||
// TODO(Quentin-M): Batch me.
|
||||
_, err := tx.Exec(insertVulnerabilityAffectsFeatureVersion, affect.vulnerabilityID,
|
||||
featureVersion.ID, affect.fixedInID)
|
||||
if err != nil {
|
||||
return handleError("insertVulnerabilityAffectsFeatureVersion", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
83
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/database/pgsql/keyvalue.go
generated
vendored
Normal file
83
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/database/pgsql/keyvalue.go
generated
vendored
Normal file
@ -0,0 +1,83 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"time"
|
||||
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
)
|
||||
|
||||
// InsertKeyValue stores (or updates) a single key / value tuple.
|
||||
func (pgSQL *pgSQL) InsertKeyValue(key, value string) (err error) {
|
||||
if key == "" || value == "" {
|
||||
log.Warning("could not insert a flag which has an empty name or value")
|
||||
return cerrors.NewBadRequestError("could not insert a flag which has an empty name or value")
|
||||
}
|
||||
|
||||
defer observeQueryTime("InsertKeyValue", "all", time.Now())
|
||||
|
||||
// Upsert.
|
||||
//
|
||||
// Note: UPSERT works only on >= PostgreSQL 9.5 which is not yet supported by AWS RDS.
|
||||
// The best solution is currently the use of http://dba.stackexchange.com/a/13477
|
||||
// but the key/value storage doesn't need to be super-efficient and super-safe at the
|
||||
// moment so we can just use a client-side solution with transactions, based on
|
||||
// http://postgresql.org/docs/current/static/plpgsql-control-structures.html.
|
||||
// TODO(Quentin-M): Enable Upsert as soon as 9.5 is stable.
|
||||
|
||||
for {
|
||||
// First, try to update.
|
||||
r, err := pgSQL.Exec(updateKeyValue, value, key)
|
||||
if err != nil {
|
||||
return handleError("updateKeyValue", err)
|
||||
}
|
||||
if n, _ := r.RowsAffected(); n > 0 {
|
||||
// Updated successfully.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Try to insert the key.
|
||||
// If someone else inserts the same key concurrently, we could get a unique-key violation error.
|
||||
_, err = pgSQL.Exec(insertKeyValue, key, value)
|
||||
if err != nil {
|
||||
if isErrUniqueViolation(err) {
|
||||
// Got unique constraint violation, retry.
|
||||
continue
|
||||
}
|
||||
return handleError("insertKeyValue", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// GetValue reads a single key / value tuple and returns an empty string if the key doesn't exist.
|
||||
func (pgSQL *pgSQL) GetKeyValue(key string) (string, error) {
|
||||
defer observeQueryTime("GetKeyValue", "all", time.Now())
|
||||
|
||||
var value string
|
||||
err := pgSQL.QueryRow(searchKeyValue, key).Scan(&value)
|
||||
|
||||
if err == sql.ErrNoRows {
|
||||
return "", nil
|
||||
}
|
||||
if err != nil {
|
||||
return "", handleError("searchKeyValue", err)
|
||||
}
|
||||
|
||||
return value, nil
|
||||
}
|
406
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/database/pgsql/layer.go
generated
vendored
Normal file
406
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/database/pgsql/layer.go
generated
vendored
Normal file
@ -0,0 +1,406 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/utils"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/guregu/null/zero"
|
||||
)
|
||||
|
||||
func (pgSQL *pgSQL) FindLayer(name string, withFeatures, withVulnerabilities bool) (database.Layer, error) {
|
||||
subquery := "all"
|
||||
if withFeatures {
|
||||
subquery += "/features"
|
||||
} else if withVulnerabilities {
|
||||
subquery += "/features+vulnerabilities"
|
||||
}
|
||||
defer observeQueryTime("FindLayer", subquery, time.Now())
|
||||
|
||||
// Find the layer
|
||||
var layer database.Layer
|
||||
var parentID zero.Int
|
||||
var parentName zero.String
|
||||
var namespaceID zero.Int
|
||||
var namespaceName sql.NullString
|
||||
|
||||
t := time.Now()
|
||||
err := pgSQL.QueryRow(searchLayer, name).
|
||||
Scan(&layer.ID, &layer.Name, &layer.EngineVersion, &parentID, &parentName, &namespaceID,
|
||||
&namespaceName)
|
||||
observeQueryTime("FindLayer", "searchLayer", t)
|
||||
|
||||
if err != nil {
|
||||
return layer, handleError("searchLayer", err)
|
||||
}
|
||||
|
||||
if !parentID.IsZero() {
|
||||
layer.Parent = &database.Layer{
|
||||
Model: database.Model{ID: int(parentID.Int64)},
|
||||
Name: parentName.String,
|
||||
}
|
||||
}
|
||||
if !namespaceID.IsZero() {
|
||||
layer.Namespace = &database.Namespace{
|
||||
Model: database.Model{ID: int(namespaceID.Int64)},
|
||||
Name: namespaceName.String,
|
||||
}
|
||||
}
|
||||
|
||||
// Find its features
|
||||
if withFeatures || withVulnerabilities {
|
||||
// Create a transaction to disable hash/merge joins as our experiments have shown that
|
||||
// PostgreSQL 9.4 makes bad planning decisions about:
|
||||
// - joining the layer tree to feature versions and feature
|
||||
// - joining the feature versions to affected/fixed feature version and vulnerabilities
|
||||
// It would for instance do a merge join between affected feature versions (300 rows, estimated
|
||||
// 3000 rows) and fixed in feature version (100k rows). In this case, it is much more
|
||||
// preferred to use a nested loop.
|
||||
tx, err := pgSQL.Begin()
|
||||
if err != nil {
|
||||
return layer, handleError("FindLayer.Begin()", err)
|
||||
}
|
||||
defer tx.Commit()
|
||||
|
||||
_, err = tx.Exec(disableHashJoin)
|
||||
if err != nil {
|
||||
log.Warningf("FindLayer: could not disable hash join: %s", err)
|
||||
}
|
||||
_, err = tx.Exec(disableMergeJoin)
|
||||
if err != nil {
|
||||
log.Warningf("FindLayer: could not disable merge join: %s", err)
|
||||
}
|
||||
|
||||
t = time.Now()
|
||||
featureVersions, err := getLayerFeatureVersions(tx, layer.ID)
|
||||
observeQueryTime("FindLayer", "getLayerFeatureVersions", t)
|
||||
|
||||
if err != nil {
|
||||
return layer, err
|
||||
}
|
||||
|
||||
layer.Features = featureVersions
|
||||
|
||||
if withVulnerabilities {
|
||||
// Load the vulnerabilities that affect the FeatureVersions.
|
||||
t = time.Now()
|
||||
err := loadAffectedBy(tx, layer.Features)
|
||||
observeQueryTime("FindLayer", "loadAffectedBy", t)
|
||||
|
||||
if err != nil {
|
||||
return layer, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return layer, nil
|
||||
}
|
||||
|
||||
// getLayerFeatureVersions returns list of database.FeatureVersion that a database.Layer has.
|
||||
func getLayerFeatureVersions(tx *sql.Tx, layerID int) ([]database.FeatureVersion, error) {
|
||||
var featureVersions []database.FeatureVersion
|
||||
|
||||
// Query.
|
||||
rows, err := tx.Query(searchLayerFeatureVersion, layerID)
|
||||
if err != nil {
|
||||
return featureVersions, handleError("searchLayerFeatureVersion", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
// Scan query.
|
||||
var modification string
|
||||
mapFeatureVersions := make(map[int]database.FeatureVersion)
|
||||
for rows.Next() {
|
||||
var featureVersion database.FeatureVersion
|
||||
|
||||
err = rows.Scan(&featureVersion.ID, &modification, &featureVersion.Feature.Namespace.ID,
|
||||
&featureVersion.Feature.Namespace.Name, &featureVersion.Feature.ID,
|
||||
&featureVersion.Feature.Name, &featureVersion.ID, &featureVersion.Version,
|
||||
&featureVersion.AddedBy.ID, &featureVersion.AddedBy.Name)
|
||||
if err != nil {
|
||||
return featureVersions, handleError("searchLayerFeatureVersion.Scan()", err)
|
||||
}
|
||||
|
||||
// Do transitive closure.
|
||||
switch modification {
|
||||
case "add":
|
||||
mapFeatureVersions[featureVersion.ID] = featureVersion
|
||||
case "del":
|
||||
delete(mapFeatureVersions, featureVersion.ID)
|
||||
default:
|
||||
log.Warningf("unknown Layer_diff_FeatureVersion's modification: %s", modification)
|
||||
return featureVersions, database.ErrInconsistent
|
||||
}
|
||||
}
|
||||
if err = rows.Err(); err != nil {
|
||||
return featureVersions, handleError("searchLayerFeatureVersion.Rows()", err)
|
||||
}
|
||||
|
||||
// Build result by converting our map to a slice.
|
||||
for _, featureVersion := range mapFeatureVersions {
|
||||
featureVersions = append(featureVersions, featureVersion)
|
||||
}
|
||||
|
||||
return featureVersions, nil
|
||||
}
|
||||
|
||||
// loadAffectedBy returns the list of database.Vulnerability that affect the given
|
||||
// FeatureVersion.
|
||||
func loadAffectedBy(tx *sql.Tx, featureVersions []database.FeatureVersion) error {
|
||||
if len(featureVersions) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Construct list of FeatureVersion IDs, we will do a single query
|
||||
featureVersionIDs := make([]int, 0, len(featureVersions))
|
||||
for i := 0; i < len(featureVersions); i++ {
|
||||
featureVersionIDs = append(featureVersionIDs, featureVersions[i].ID)
|
||||
}
|
||||
|
||||
rows, err := tx.Query(searchFeatureVersionVulnerability,
|
||||
buildInputArray(featureVersionIDs))
|
||||
if err != nil && err != sql.ErrNoRows {
|
||||
return handleError("searchFeatureVersionVulnerability", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
vulnerabilities := make(map[int][]database.Vulnerability, len(featureVersions))
|
||||
var featureversionID int
|
||||
for rows.Next() {
|
||||
var vulnerability database.Vulnerability
|
||||
err := rows.Scan(&featureversionID, &vulnerability.ID, &vulnerability.Name,
|
||||
&vulnerability.Description, &vulnerability.Link, &vulnerability.Severity,
|
||||
&vulnerability.Metadata, &vulnerability.Namespace.Name, &vulnerability.FixedBy)
|
||||
if err != nil {
|
||||
return handleError("searchFeatureVersionVulnerability.Scan()", err)
|
||||
}
|
||||
vulnerabilities[featureversionID] = append(vulnerabilities[featureversionID], vulnerability)
|
||||
}
|
||||
if err = rows.Err(); err != nil {
|
||||
return handleError("searchFeatureVersionVulnerability.Rows()", err)
|
||||
}
|
||||
|
||||
// Assign vulnerabilities to every FeatureVersions
|
||||
for i := 0; i < len(featureVersions); i++ {
|
||||
featureVersions[i].AffectedBy = vulnerabilities[featureVersions[i].ID]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Internally, only Feature additions/removals are stored for each layer. If a layer has a parent,
|
||||
// the Feature list will be compared to the parent's Feature list and the difference will be stored.
|
||||
// Note that when the Namespace of a layer differs from its parent, it is expected that several
|
||||
// Feature that were already included a parent will have their Namespace updated as well
|
||||
// (happens when Feature detectors relies on the detected layer Namespace). However, if the listed
|
||||
// Feature has the same Name/Version as its parent, InsertLayer considers that the Feature hasn't
|
||||
// been modified.
|
||||
func (pgSQL *pgSQL) InsertLayer(layer database.Layer) error {
|
||||
tf := time.Now()
|
||||
|
||||
// Verify parameters
|
||||
if layer.Name == "" {
|
||||
log.Warning("could not insert a layer which has an empty Name")
|
||||
return cerrors.NewBadRequestError("could not insert a layer which has an empty Name")
|
||||
}
|
||||
|
||||
// Get a potentially existing layer.
|
||||
existingLayer, err := pgSQL.FindLayer(layer.Name, true, false)
|
||||
if err != nil && err != cerrors.ErrNotFound {
|
||||
return err
|
||||
} else if err == nil {
|
||||
if existingLayer.EngineVersion >= layer.EngineVersion {
|
||||
// The layer exists and has an equal or higher engine version, do nothing.
|
||||
return nil
|
||||
}
|
||||
|
||||
layer.ID = existingLayer.ID
|
||||
}
|
||||
|
||||
// We do `defer observeQueryTime` here because we don't want to observe existing layers.
|
||||
defer observeQueryTime("InsertLayer", "all", tf)
|
||||
|
||||
// Get parent ID.
|
||||
var parentID zero.Int
|
||||
if layer.Parent != nil {
|
||||
if layer.Parent.ID == 0 {
|
||||
log.Warning("Parent is expected to be retrieved from database when inserting a layer.")
|
||||
return cerrors.NewBadRequestError("Parent is expected to be retrieved from database when inserting a layer.")
|
||||
}
|
||||
|
||||
parentID = zero.IntFrom(int64(layer.Parent.ID))
|
||||
}
|
||||
|
||||
// Find or insert namespace if provided.
|
||||
var namespaceID zero.Int
|
||||
if layer.Namespace != nil {
|
||||
n, err := pgSQL.insertNamespace(*layer.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
namespaceID = zero.IntFrom(int64(n))
|
||||
} else if layer.Namespace == nil && layer.Parent != nil {
|
||||
// Import the Namespace from the parent if it has one and this layer doesn't specify one.
|
||||
if layer.Parent.Namespace != nil {
|
||||
namespaceID = zero.IntFrom(int64(layer.Parent.Namespace.ID))
|
||||
}
|
||||
}
|
||||
|
||||
// Begin transaction.
|
||||
tx, err := pgSQL.Begin()
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return handleError("InsertLayer.Begin()", err)
|
||||
}
|
||||
|
||||
if layer.ID == 0 {
|
||||
// Insert a new layer.
|
||||
err = tx.QueryRow(insertLayer, layer.Name, layer.EngineVersion, parentID, namespaceID).
|
||||
Scan(&layer.ID)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
|
||||
if isErrUniqueViolation(err) {
|
||||
// Ignore this error, another process collided.
|
||||
log.Debug("Attempted to insert duplicate layer.")
|
||||
return nil
|
||||
}
|
||||
return handleError("insertLayer", err)
|
||||
}
|
||||
} else {
|
||||
// Update an existing layer.
|
||||
_, err = tx.Exec(updateLayer, layer.ID, layer.EngineVersion, namespaceID)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return handleError("updateLayer", err)
|
||||
}
|
||||
|
||||
// Remove all existing Layer_diff_FeatureVersion.
|
||||
_, err = tx.Exec(removeLayerDiffFeatureVersion, layer.ID)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return handleError("removeLayerDiffFeatureVersion", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Update Layer_diff_FeatureVersion now.
|
||||
err = pgSQL.updateDiffFeatureVersions(tx, &layer, &existingLayer)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
// Commit transaction.
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return handleError("InsertLayer.Commit()", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pgSQL *pgSQL) updateDiffFeatureVersions(tx *sql.Tx, layer, existingLayer *database.Layer) error {
|
||||
// add and del are the FeatureVersion diff we should insert.
|
||||
var add []database.FeatureVersion
|
||||
var del []database.FeatureVersion
|
||||
|
||||
if layer.Parent == nil {
|
||||
// There is no parent, every Features are added.
|
||||
add = append(add, layer.Features...)
|
||||
} else if layer.Parent != nil {
|
||||
// There is a parent, we need to diff the Features with it.
|
||||
|
||||
// Build name:version structures.
|
||||
layerFeaturesMapNV, layerFeaturesNV := createNV(layer.Features)
|
||||
parentLayerFeaturesMapNV, parentLayerFeaturesNV := createNV(layer.Parent.Features)
|
||||
|
||||
// Calculate the added and deleted FeatureVersions name:version.
|
||||
addNV := utils.CompareStringLists(layerFeaturesNV, parentLayerFeaturesNV)
|
||||
delNV := utils.CompareStringLists(parentLayerFeaturesNV, layerFeaturesNV)
|
||||
|
||||
// Fill the structures containing the added and deleted FeatureVersions
|
||||
for _, nv := range addNV {
|
||||
add = append(add, *layerFeaturesMapNV[nv])
|
||||
}
|
||||
for _, nv := range delNV {
|
||||
del = append(del, *parentLayerFeaturesMapNV[nv])
|
||||
}
|
||||
}
|
||||
|
||||
// Insert FeatureVersions in the database.
|
||||
addIDs, err := pgSQL.insertFeatureVersions(add)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
delIDs, err := pgSQL.insertFeatureVersions(del)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Insert diff in the database.
|
||||
if len(addIDs) > 0 {
|
||||
_, err = tx.Exec(insertLayerDiffFeatureVersion, layer.ID, "add", buildInputArray(addIDs))
|
||||
if err != nil {
|
||||
return handleError("insertLayerDiffFeatureVersion.Add", err)
|
||||
}
|
||||
}
|
||||
if len(delIDs) > 0 {
|
||||
_, err = tx.Exec(insertLayerDiffFeatureVersion, layer.ID, "del", buildInputArray(delIDs))
|
||||
if err != nil {
|
||||
return handleError("insertLayerDiffFeatureVersion.Del", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createNV(features []database.FeatureVersion) (map[string]*database.FeatureVersion, []string) {
|
||||
mapNV := make(map[string]*database.FeatureVersion, 0)
|
||||
sliceNV := make([]string, 0, len(features))
|
||||
|
||||
for i := 0; i < len(features); i++ {
|
||||
featureVersion := &features[i]
|
||||
nv := featureVersion.Feature.Name + ":" + featureVersion.Version.String()
|
||||
mapNV[nv] = featureVersion
|
||||
sliceNV = append(sliceNV, nv)
|
||||
}
|
||||
|
||||
return mapNV, sliceNV
|
||||
}
|
||||
|
||||
func (pgSQL *pgSQL) DeleteLayer(name string) error {
|
||||
defer observeQueryTime("DeleteLayer", "all", time.Now())
|
||||
|
||||
result, err := pgSQL.Exec(removeLayer, name)
|
||||
if err != nil {
|
||||
return handleError("removeLayer", err)
|
||||
}
|
||||
|
||||
affected, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return handleError("removeLayer.RowsAffected()", err)
|
||||
}
|
||||
|
||||
if affected <= 0 {
|
||||
return cerrors.ErrNotFound
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
105
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/database/pgsql/lock.go
generated
vendored
Normal file
105
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/database/pgsql/lock.go
generated
vendored
Normal file
@ -0,0 +1,105 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
)
|
||||
|
||||
// Lock tries to set a temporary lock in the database.
|
||||
//
|
||||
// Lock does not block, instead, it returns true and its expiration time
|
||||
// is the lock has been successfully acquired or false otherwise
|
||||
func (pgSQL *pgSQL) Lock(name string, owner string, duration time.Duration, renew bool) (bool, time.Time) {
|
||||
if name == "" || owner == "" || duration == 0 {
|
||||
log.Warning("could not create an invalid lock")
|
||||
return false, time.Time{}
|
||||
}
|
||||
|
||||
defer observeQueryTime("Lock", "all", time.Now())
|
||||
|
||||
// Compute expiration.
|
||||
until := time.Now().Add(duration)
|
||||
|
||||
if renew {
|
||||
// Renew lock.
|
||||
r, err := pgSQL.Exec(updateLock, name, owner, until)
|
||||
if err != nil {
|
||||
handleError("updateLock", err)
|
||||
return false, until
|
||||
}
|
||||
if n, _ := r.RowsAffected(); n > 0 {
|
||||
// Updated successfully.
|
||||
return true, until
|
||||
}
|
||||
} else {
|
||||
// Prune locks.
|
||||
pgSQL.pruneLocks()
|
||||
}
|
||||
|
||||
// Lock.
|
||||
_, err := pgSQL.Exec(insertLock, name, owner, until)
|
||||
if err != nil {
|
||||
if !isErrUniqueViolation(err) {
|
||||
handleError("insertLock", err)
|
||||
}
|
||||
return false, until
|
||||
}
|
||||
|
||||
return true, until
|
||||
}
|
||||
|
||||
// Unlock unlocks a lock specified by its name if I own it
|
||||
func (pgSQL *pgSQL) Unlock(name, owner string) {
|
||||
if name == "" || owner == "" {
|
||||
log.Warning("could not delete an invalid lock")
|
||||
return
|
||||
}
|
||||
|
||||
defer observeQueryTime("Unlock", "all", time.Now())
|
||||
|
||||
pgSQL.Exec(removeLock, name, owner)
|
||||
}
|
||||
|
||||
// FindLock returns the owner of a lock specified by its name and its
|
||||
// expiration time.
|
||||
func (pgSQL *pgSQL) FindLock(name string) (string, time.Time, error) {
|
||||
if name == "" {
|
||||
log.Warning("could not find an invalid lock")
|
||||
return "", time.Time{}, cerrors.NewBadRequestError("could not find an invalid lock")
|
||||
}
|
||||
|
||||
defer observeQueryTime("FindLock", "all", time.Now())
|
||||
|
||||
var owner string
|
||||
var until time.Time
|
||||
err := pgSQL.QueryRow(searchLock, name).Scan(&owner, &until)
|
||||
if err != nil {
|
||||
return owner, until, handleError("searchLock", err)
|
||||
}
|
||||
|
||||
return owner, until, nil
|
||||
}
|
||||
|
||||
// pruneLocks removes every expired locks from the database
|
||||
func (pgSQL *pgSQL) pruneLocks() {
|
||||
defer observeQueryTime("pruneLocks", "all", time.Now())
|
||||
|
||||
if _, err := pgSQL.Exec(removeLockExpired); err != nil {
|
||||
handleError("removeLockExpired", err)
|
||||
}
|
||||
}
|
@ -0,0 +1,174 @@
|
||||
-- Copyright 2015 clair authors
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
-- You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
|
||||
-- +goose Up
|
||||
|
||||
-- -----------------------------------------------------
|
||||
-- Table Namespace
|
||||
-- -----------------------------------------------------
|
||||
CREATE TABLE IF NOT EXISTS Namespace (
|
||||
id SERIAL PRIMARY KEY,
|
||||
name VARCHAR(128) NULL);
|
||||
|
||||
|
||||
-- -----------------------------------------------------
|
||||
-- Table Layer
|
||||
-- -----------------------------------------------------
|
||||
CREATE TABLE IF NOT EXISTS Layer (
|
||||
id SERIAL PRIMARY KEY,
|
||||
name VARCHAR(128) NOT NULL UNIQUE,
|
||||
engineversion SMALLINT NOT NULL,
|
||||
parent_id INT NULL REFERENCES Layer ON DELETE CASCADE,
|
||||
namespace_id INT NULL REFERENCES Namespace,
|
||||
created_at TIMESTAMP WITH TIME ZONE);
|
||||
|
||||
CREATE INDEX ON Layer (parent_id);
|
||||
CREATE INDEX ON Layer (namespace_id);
|
||||
|
||||
|
||||
-- -----------------------------------------------------
|
||||
-- Table Feature
|
||||
-- -----------------------------------------------------
|
||||
CREATE TABLE IF NOT EXISTS Feature (
|
||||
id SERIAL PRIMARY KEY,
|
||||
namespace_id INT NOT NULL REFERENCES Namespace,
|
||||
name VARCHAR(128) NOT NULL,
|
||||
|
||||
UNIQUE (namespace_id, name));
|
||||
|
||||
|
||||
-- -----------------------------------------------------
|
||||
-- Table FeatureVersion
|
||||
-- -----------------------------------------------------
|
||||
CREATE TABLE IF NOT EXISTS FeatureVersion (
|
||||
id SERIAL PRIMARY KEY,
|
||||
feature_id INT NOT NULL REFERENCES Feature,
|
||||
version VARCHAR(128) NOT NULL);
|
||||
|
||||
CREATE INDEX ON FeatureVersion (feature_id);
|
||||
|
||||
|
||||
-- -----------------------------------------------------
|
||||
-- Table Layer_diff_FeatureVersion
|
||||
-- -----------------------------------------------------
|
||||
CREATE TYPE modification AS ENUM ('add', 'del');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS Layer_diff_FeatureVersion (
|
||||
id SERIAL PRIMARY KEY,
|
||||
layer_id INT NOT NULL REFERENCES Layer ON DELETE CASCADE,
|
||||
featureversion_id INT NOT NULL REFERENCES FeatureVersion,
|
||||
modification modification NOT NULL,
|
||||
|
||||
UNIQUE (layer_id, featureversion_id));
|
||||
|
||||
CREATE INDEX ON Layer_diff_FeatureVersion (layer_id);
|
||||
CREATE INDEX ON Layer_diff_FeatureVersion (featureversion_id);
|
||||
CREATE INDEX ON Layer_diff_FeatureVersion (featureversion_id, layer_id);
|
||||
|
||||
|
||||
-- -----------------------------------------------------
|
||||
-- Table Vulnerability
|
||||
-- -----------------------------------------------------
|
||||
CREATE TYPE severity AS ENUM ('Unknown', 'Negligible', 'Low', 'Medium', 'High', 'Critical', 'Defcon1');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS Vulnerability (
|
||||
id SERIAL PRIMARY KEY,
|
||||
namespace_id INT NOT NULL REFERENCES Namespace,
|
||||
name VARCHAR(128) NOT NULL,
|
||||
description TEXT NULL,
|
||||
link VARCHAR(128) NULL,
|
||||
severity severity NOT NULL,
|
||||
metadata TEXT NULL,
|
||||
created_at TIMESTAMP WITH TIME ZONE,
|
||||
deleted_at TIMESTAMP WITH TIME ZONE NULL);
|
||||
|
||||
|
||||
-- -----------------------------------------------------
|
||||
-- Table Vulnerability_FixedIn_Feature
|
||||
-- -----------------------------------------------------
|
||||
CREATE TABLE IF NOT EXISTS Vulnerability_FixedIn_Feature (
|
||||
id SERIAL PRIMARY KEY,
|
||||
vulnerability_id INT NOT NULL REFERENCES Vulnerability ON DELETE CASCADE,
|
||||
feature_id INT NOT NULL REFERENCES Feature,
|
||||
version VARCHAR(128) NOT NULL,
|
||||
|
||||
UNIQUE (vulnerability_id, feature_id));
|
||||
|
||||
CREATE INDEX ON Vulnerability_FixedIn_Feature (feature_id, vulnerability_id);
|
||||
|
||||
|
||||
-- -----------------------------------------------------
|
||||
-- Table Vulnerability_Affects_FeatureVersion
|
||||
-- -----------------------------------------------------
|
||||
CREATE TABLE IF NOT EXISTS Vulnerability_Affects_FeatureVersion (
|
||||
id SERIAL PRIMARY KEY,
|
||||
vulnerability_id INT NOT NULL REFERENCES Vulnerability ON DELETE CASCADE,
|
||||
featureversion_id INT NOT NULL REFERENCES FeatureVersion,
|
||||
fixedin_id INT NOT NULL REFERENCES Vulnerability_FixedIn_Feature ON DELETE CASCADE,
|
||||
|
||||
UNIQUE (vulnerability_id, featureversion_id));
|
||||
|
||||
CREATE INDEX ON Vulnerability_Affects_FeatureVersion (fixedin_id);
|
||||
CREATE INDEX ON Vulnerability_Affects_FeatureVersion (featureversion_id, vulnerability_id);
|
||||
|
||||
|
||||
-- -----------------------------------------------------
|
||||
-- Table KeyValue
|
||||
-- -----------------------------------------------------
|
||||
CREATE TABLE IF NOT EXISTS KeyValue (
|
||||
id SERIAL PRIMARY KEY,
|
||||
key VARCHAR(128) NOT NULL UNIQUE,
|
||||
value TEXT);
|
||||
|
||||
|
||||
-- -----------------------------------------------------
|
||||
-- Table Lock
|
||||
-- -----------------------------------------------------
|
||||
CREATE TABLE IF NOT EXISTS Lock (
|
||||
id SERIAL PRIMARY KEY,
|
||||
name VARCHAR(64) NOT NULL UNIQUE,
|
||||
owner VARCHAR(64) NOT NULL,
|
||||
until TIMESTAMP WITH TIME ZONE);
|
||||
|
||||
CREATE INDEX ON Lock (owner);
|
||||
|
||||
|
||||
-- -----------------------------------------------------
|
||||
-- Table VulnerabilityNotification
|
||||
-- -----------------------------------------------------
|
||||
CREATE TABLE IF NOT EXISTS Vulnerability_Notification (
|
||||
id SERIAL PRIMARY KEY,
|
||||
name VARCHAR(64) NOT NULL UNIQUE,
|
||||
created_at TIMESTAMP WITH TIME ZONE,
|
||||
notified_at TIMESTAMP WITH TIME ZONE NULL,
|
||||
deleted_at TIMESTAMP WITH TIME ZONE NULL,
|
||||
old_vulnerability_id INT NULL REFERENCES Vulnerability ON DELETE CASCADE,
|
||||
new_vulnerability_id INT NULL REFERENCES Vulnerability ON DELETE CASCADE);
|
||||
|
||||
CREATE INDEX ON Vulnerability_Notification (notified_at);
|
||||
|
||||
-- +goose Down
|
||||
|
||||
DROP TABLE IF EXISTS Namespace,
|
||||
Layer,
|
||||
Feature,
|
||||
FeatureVersion,
|
||||
Layer_diff_FeatureVersion,
|
||||
Vulnerability,
|
||||
Vulnerability_FixedIn_Feature,
|
||||
Vulnerability_Affects_FeatureVersion,
|
||||
Vulnerability_Notification,
|
||||
KeyValue,
|
||||
Lock
|
||||
CASCADE;
|
75
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/database/pgsql/namespace.go
generated
vendored
Normal file
75
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/database/pgsql/namespace.go
generated
vendored
Normal file
@ -0,0 +1,75 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
)
|
||||
|
||||
func (pgSQL *pgSQL) insertNamespace(namespace database.Namespace) (int, error) {
|
||||
if namespace.Name == "" {
|
||||
return 0, cerrors.NewBadRequestError("could not find/insert invalid Namespace")
|
||||
}
|
||||
|
||||
if pgSQL.cache != nil {
|
||||
promCacheQueriesTotal.WithLabelValues("namespace").Inc()
|
||||
if id, found := pgSQL.cache.Get("namespace:" + namespace.Name); found {
|
||||
promCacheHitsTotal.WithLabelValues("namespace").Inc()
|
||||
return id.(int), nil
|
||||
}
|
||||
}
|
||||
|
||||
// We do `defer observeQueryTime` here because we don't want to observe cached namespaces.
|
||||
defer observeQueryTime("insertNamespace", "all", time.Now())
|
||||
|
||||
var id int
|
||||
err := pgSQL.QueryRow(soiNamespace, namespace.Name).Scan(&id)
|
||||
if err != nil {
|
||||
return 0, handleError("soiNamespace", err)
|
||||
}
|
||||
|
||||
if pgSQL.cache != nil {
|
||||
pgSQL.cache.Add("namespace:"+namespace.Name, id)
|
||||
}
|
||||
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func (pgSQL *pgSQL) ListNamespaces() (namespaces []database.Namespace, err error) {
|
||||
rows, err := pgSQL.Query(listNamespace)
|
||||
if err != nil {
|
||||
return namespaces, handleError("listNamespace", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var namespace database.Namespace
|
||||
|
||||
err = rows.Scan(&namespace.ID, &namespace.Name)
|
||||
if err != nil {
|
||||
return namespaces, handleError("listNamespace.Scan()", err)
|
||||
}
|
||||
|
||||
namespaces = append(namespaces, namespace)
|
||||
}
|
||||
if err = rows.Err(); err != nil {
|
||||
return namespaces, handleError("listNamespace.Rows()", err)
|
||||
}
|
||||
|
||||
return namespaces, err
|
||||
}
|
214
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/database/pgsql/notification.go
generated
vendored
Normal file
214
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/database/pgsql/notification.go
generated
vendored
Normal file
@ -0,0 +1,214 @@
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/guregu/null/zero"
|
||||
"github.com/pborman/uuid"
|
||||
)
|
||||
|
||||
// do it in tx so we won't insert/update a vuln without notification and vice-versa.
|
||||
// name and created doesn't matter.
|
||||
func createNotification(tx *sql.Tx, oldVulnerabilityID, newVulnerabilityID int) error {
|
||||
defer observeQueryTime("createNotification", "all", time.Now())
|
||||
|
||||
// Insert Notification.
|
||||
oldVulnerabilityNullableID := sql.NullInt64{Int64: int64(oldVulnerabilityID), Valid: oldVulnerabilityID != 0}
|
||||
newVulnerabilityNullableID := sql.NullInt64{Int64: int64(newVulnerabilityID), Valid: newVulnerabilityID != 0}
|
||||
_, err := tx.Exec(insertNotification, uuid.New(), oldVulnerabilityNullableID, newVulnerabilityNullableID)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return handleError("insertNotification", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get one available notification name (!locked && !deleted && (!notified || notified_but_timed-out)).
|
||||
// Does not fill new/old vuln.
|
||||
func (pgSQL *pgSQL) GetAvailableNotification(renotifyInterval time.Duration) (database.VulnerabilityNotification, error) {
|
||||
defer observeQueryTime("GetAvailableNotification", "all", time.Now())
|
||||
|
||||
before := time.Now().Add(-renotifyInterval)
|
||||
row := pgSQL.QueryRow(searchNotificationAvailable, before)
|
||||
notification, err := pgSQL.scanNotification(row, false)
|
||||
|
||||
return notification, handleError("searchNotificationAvailable", err)
|
||||
}
|
||||
|
||||
func (pgSQL *pgSQL) GetNotification(name string, limit int, page database.VulnerabilityNotificationPageNumber) (database.VulnerabilityNotification, database.VulnerabilityNotificationPageNumber, error) {
|
||||
defer observeQueryTime("GetNotification", "all", time.Now())
|
||||
|
||||
// Get Notification.
|
||||
notification, err := pgSQL.scanNotification(pgSQL.QueryRow(searchNotification, name), true)
|
||||
if err != nil {
|
||||
return notification, page, handleError("searchNotification", err)
|
||||
}
|
||||
|
||||
// Load vulnerabilities' LayersIntroducingVulnerability.
|
||||
page.OldVulnerability, err = pgSQL.loadLayerIntroducingVulnerability(
|
||||
notification.OldVulnerability,
|
||||
limit,
|
||||
page.OldVulnerability,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return notification, page, err
|
||||
}
|
||||
|
||||
page.NewVulnerability, err = pgSQL.loadLayerIntroducingVulnerability(
|
||||
notification.NewVulnerability,
|
||||
limit,
|
||||
page.NewVulnerability,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return notification, page, err
|
||||
}
|
||||
|
||||
return notification, page, nil
|
||||
}
|
||||
|
||||
func (pgSQL *pgSQL) scanNotification(row *sql.Row, hasVulns bool) (database.VulnerabilityNotification, error) {
|
||||
var notification database.VulnerabilityNotification
|
||||
var created zero.Time
|
||||
var notified zero.Time
|
||||
var deleted zero.Time
|
||||
var oldVulnerabilityNullableID sql.NullInt64
|
||||
var newVulnerabilityNullableID sql.NullInt64
|
||||
|
||||
// Scan notification.
|
||||
if hasVulns {
|
||||
err := row.Scan(
|
||||
¬ification.ID,
|
||||
¬ification.Name,
|
||||
&created,
|
||||
¬ified,
|
||||
&deleted,
|
||||
&oldVulnerabilityNullableID,
|
||||
&newVulnerabilityNullableID,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return notification, err
|
||||
}
|
||||
} else {
|
||||
err := row.Scan(¬ification.ID, ¬ification.Name, &created, ¬ified, &deleted)
|
||||
|
||||
if err != nil {
|
||||
return notification, err
|
||||
}
|
||||
}
|
||||
|
||||
notification.Created = created.Time
|
||||
notification.Notified = notified.Time
|
||||
notification.Deleted = deleted.Time
|
||||
|
||||
if hasVulns {
|
||||
if oldVulnerabilityNullableID.Valid {
|
||||
vulnerability, err := pgSQL.findVulnerabilityByIDWithDeleted(int(oldVulnerabilityNullableID.Int64))
|
||||
if err != nil {
|
||||
return notification, err
|
||||
}
|
||||
|
||||
notification.OldVulnerability = &vulnerability
|
||||
}
|
||||
|
||||
if newVulnerabilityNullableID.Valid {
|
||||
vulnerability, err := pgSQL.findVulnerabilityByIDWithDeleted(int(newVulnerabilityNullableID.Int64))
|
||||
if err != nil {
|
||||
return notification, err
|
||||
}
|
||||
|
||||
notification.NewVulnerability = &vulnerability
|
||||
}
|
||||
}
|
||||
|
||||
return notification, nil
|
||||
}
|
||||
|
||||
// Fills Vulnerability.LayersIntroducingVulnerability.
|
||||
// limit -1: won't do anything
|
||||
// limit 0: will just get the startID of the second page
|
||||
func (pgSQL *pgSQL) loadLayerIntroducingVulnerability(vulnerability *database.Vulnerability, limit, startID int) (int, error) {
|
||||
tf := time.Now()
|
||||
|
||||
if vulnerability == nil {
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
// A startID equals to -1 means that we reached the end already.
|
||||
if startID == -1 || limit == -1 {
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
// We do `defer observeQueryTime` here because we don't want to observe invalid calls.
|
||||
defer observeQueryTime("loadLayerIntroducingVulnerability", "all", tf)
|
||||
|
||||
// Query with limit + 1, the last item will be used to know the next starting ID.
|
||||
rows, err := pgSQL.Query(searchNotificationLayerIntroducingVulnerability,
|
||||
vulnerability.ID, startID, limit+1)
|
||||
if err != nil {
|
||||
return 0, handleError("searchVulnerabilityFixedInFeature", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var layers []database.Layer
|
||||
for rows.Next() {
|
||||
var layer database.Layer
|
||||
|
||||
if err := rows.Scan(&layer.ID, &layer.Name); err != nil {
|
||||
return -1, handleError("searchNotificationLayerIntroducingVulnerability.Scan()", err)
|
||||
}
|
||||
|
||||
layers = append(layers, layer)
|
||||
}
|
||||
if err = rows.Err(); err != nil {
|
||||
return -1, handleError("searchNotificationLayerIntroducingVulnerability.Rows()", err)
|
||||
}
|
||||
|
||||
size := limit
|
||||
if len(layers) < limit {
|
||||
size = len(layers)
|
||||
}
|
||||
vulnerability.LayersIntroducingVulnerability = layers[:size]
|
||||
|
||||
nextID := -1
|
||||
if len(layers) > limit {
|
||||
nextID = layers[limit].ID
|
||||
}
|
||||
|
||||
return nextID, nil
|
||||
}
|
||||
|
||||
func (pgSQL *pgSQL) SetNotificationNotified(name string) error {
|
||||
defer observeQueryTime("SetNotificationNotified", "all", time.Now())
|
||||
|
||||
if _, err := pgSQL.Exec(updatedNotificationNotified, name); err != nil {
|
||||
return handleError("updatedNotificationNotified", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pgSQL *pgSQL) DeleteNotification(name string) error {
|
||||
defer observeQueryTime("DeleteNotification", "all", time.Now())
|
||||
|
||||
result, err := pgSQL.Exec(removeNotification, name)
|
||||
if err != nil {
|
||||
return handleError("removeNotification", err)
|
||||
}
|
||||
|
||||
affected, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return handleError("removeNotification.RowsAffected()", err)
|
||||
}
|
||||
|
||||
if affected <= 0 {
|
||||
return cerrors.ErrNotFound
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
287
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/database/pgsql/pgsql.go
generated
vendored
Normal file
287
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/database/pgsql/pgsql.go
generated
vendored
Normal file
@ -0,0 +1,287 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package pgsql implements database.Datastore with PostgreSQL.
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"bitbucket.org/liamstask/goose/lib/goose"
|
||||
"github.com/coreos/clair/config"
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/utils"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"github.com/hashicorp/golang-lru"
|
||||
"github.com/lib/pq"
|
||||
"github.com/pborman/uuid"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var (
|
||||
log = capnslog.NewPackageLogger("github.com/coreos/clair", "pgsql")
|
||||
|
||||
promErrorsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "clair_pgsql_errors_total",
|
||||
Help: "Number of errors that PostgreSQL requests generated.",
|
||||
}, []string{"request"})
|
||||
|
||||
promCacheHitsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "clair_pgsql_cache_hits_total",
|
||||
Help: "Number of cache hits that the PostgreSQL backend did.",
|
||||
}, []string{"object"})
|
||||
|
||||
promCacheQueriesTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "clair_pgsql_cache_queries_total",
|
||||
Help: "Number of cache queries that the PostgreSQL backend did.",
|
||||
}, []string{"object"})
|
||||
|
||||
promQueryDurationMilliseconds = prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Name: "clair_pgsql_query_duration_milliseconds",
|
||||
Help: "Time it takes to execute the database query.",
|
||||
}, []string{"query", "subquery"})
|
||||
|
||||
promConcurrentLockVAFV = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "clair_pgsql_concurrent_lock_vafv_total",
|
||||
Help: "Number of transactions trying to hold the exclusive Vulnerability_Affects_FeatureVersion lock.",
|
||||
})
|
||||
)
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(promErrorsTotal)
|
||||
prometheus.MustRegister(promCacheHitsTotal)
|
||||
prometheus.MustRegister(promCacheQueriesTotal)
|
||||
prometheus.MustRegister(promQueryDurationMilliseconds)
|
||||
prometheus.MustRegister(promConcurrentLockVAFV)
|
||||
}
|
||||
|
||||
type Queryer interface {
|
||||
Query(query string, args ...interface{}) (*sql.Rows, error)
|
||||
QueryRow(query string, args ...interface{}) *sql.Row
|
||||
}
|
||||
|
||||
type pgSQL struct {
|
||||
*sql.DB
|
||||
cache *lru.ARCCache
|
||||
}
|
||||
|
||||
func (pgSQL *pgSQL) Close() {
|
||||
pgSQL.DB.Close()
|
||||
}
|
||||
|
||||
func (pgSQL *pgSQL) Ping() bool {
|
||||
return pgSQL.DB.Ping() == nil
|
||||
}
|
||||
|
||||
// Open creates a Datastore backed by a PostgreSQL database.
|
||||
//
|
||||
// It will run immediately every necessary migration on the database.
|
||||
func Open(config *config.DatabaseConfig) (database.Datastore, error) {
|
||||
// Run migrations.
|
||||
if err := migrate(config.Source); err != nil {
|
||||
log.Error(err)
|
||||
return nil, database.ErrCantOpen
|
||||
}
|
||||
|
||||
// Open database.
|
||||
db, err := sql.Open("postgres", config.Source)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return nil, database.ErrCantOpen
|
||||
}
|
||||
|
||||
// Initialize cache.
|
||||
// TODO(Quentin-M): Benchmark with a simple LRU Cache.
|
||||
var cache *lru.ARCCache
|
||||
if config.CacheSize > 0 {
|
||||
cache, _ = lru.NewARC(config.CacheSize)
|
||||
}
|
||||
|
||||
return &pgSQL{DB: db, cache: cache}, nil
|
||||
}
|
||||
|
||||
// migrate runs all available migrations on a pgSQL database.
|
||||
func migrate(dataSource string) error {
|
||||
log.Info("running database migrations")
|
||||
|
||||
_, filename, _, _ := runtime.Caller(1)
|
||||
migrationDir := path.Join(path.Dir(filename), "/migrations/")
|
||||
conf := &goose.DBConf{
|
||||
MigrationsDir: migrationDir,
|
||||
Driver: goose.DBDriver{
|
||||
Name: "postgres",
|
||||
OpenStr: dataSource,
|
||||
Import: "github.com/lib/pq",
|
||||
Dialect: &goose.PostgresDialect{},
|
||||
},
|
||||
}
|
||||
|
||||
// Determine the most recent revision available from the migrations folder.
|
||||
target, err := goose.GetMostRecentDBVersion(conf.MigrationsDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Run migrations
|
||||
err = goose.RunMigrations(conf, conf.MigrationsDir, target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("database migration ran successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
// createDatabase creates a new database.
|
||||
// The dataSource parameter should not contain a dbname.
|
||||
func createDatabase(dataSource, databaseName string) error {
|
||||
// Open database.
|
||||
db, err := sql.Open("postgres", dataSource)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not open database (CreateDatabase): %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Create database.
|
||||
_, err = db.Exec("CREATE DATABASE " + databaseName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not create database: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// dropDatabase drops an existing database.
|
||||
// The dataSource parameter should not contain a dbname.
|
||||
func dropDatabase(dataSource, databaseName string) error {
|
||||
// Open database.
|
||||
db, err := sql.Open("postgres", dataSource)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not open database (DropDatabase): %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Kill any opened connection.
|
||||
if _, err := db.Exec(`
|
||||
SELECT pg_terminate_backend(pg_stat_activity.pid)
|
||||
FROM pg_stat_activity
|
||||
WHERE pg_stat_activity.datname = $1
|
||||
AND pid <> pg_backend_pid()`, databaseName); err != nil {
|
||||
return fmt.Errorf("could not drop database: %v", err)
|
||||
}
|
||||
|
||||
// Drop database.
|
||||
if _, err = db.Exec("DROP DATABASE " + databaseName); err != nil {
|
||||
return fmt.Errorf("could not drop database: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// pgSQLTest wraps pgSQL for testing purposes.
|
||||
// Its Close() method drops the database.
|
||||
type pgSQLTest struct {
|
||||
*pgSQL
|
||||
dataSourceDefaultDatabase string
|
||||
dbName string
|
||||
}
|
||||
|
||||
// OpenForTest creates a test Datastore backed by a new PostgreSQL database.
|
||||
// It creates a new unique and prefixed ("test_") database.
|
||||
// Using Close() will drop the database.
|
||||
func OpenForTest(name string, withTestData bool) (*pgSQLTest, error) {
|
||||
// Define the PostgreSQL connection strings.
|
||||
dataSource := "host=127.0.0.1 sslmode=disable user=postgres dbname="
|
||||
if dataSourceEnv := os.Getenv("CLAIR_TEST_PGSQL"); dataSourceEnv != "" {
|
||||
dataSource = dataSourceEnv + " dbname="
|
||||
}
|
||||
dbName := "test_" + strings.ToLower(name) + "_" + strings.Replace(uuid.New(), "-", "_", -1)
|
||||
dataSourceDefaultDatabase := dataSource + "postgres"
|
||||
dataSourceTestDatabase := dataSource + dbName
|
||||
|
||||
// Create database.
|
||||
if err := createDatabase(dataSourceDefaultDatabase, dbName); err != nil {
|
||||
log.Error(err)
|
||||
return nil, database.ErrCantOpen
|
||||
}
|
||||
|
||||
// Open database.
|
||||
db, err := Open(&config.DatabaseConfig{Source: dataSourceTestDatabase, CacheSize: 0})
|
||||
if err != nil {
|
||||
dropDatabase(dataSourceDefaultDatabase, dbName)
|
||||
log.Error(err)
|
||||
return nil, database.ErrCantOpen
|
||||
}
|
||||
|
||||
// Load test data if specified.
|
||||
if withTestData {
|
||||
_, filename, _, _ := runtime.Caller(0)
|
||||
d, _ := ioutil.ReadFile(path.Join(path.Dir(filename)) + "/testdata/data.sql")
|
||||
_, err = db.(*pgSQL).Exec(string(d))
|
||||
if err != nil {
|
||||
dropDatabase(dataSourceDefaultDatabase, dbName)
|
||||
log.Error(err)
|
||||
return nil, database.ErrCantOpen
|
||||
}
|
||||
}
|
||||
|
||||
return &pgSQLTest{
|
||||
pgSQL: db.(*pgSQL),
|
||||
dataSourceDefaultDatabase: dataSourceDefaultDatabase,
|
||||
dbName: dbName}, nil
|
||||
}
|
||||
|
||||
func (pgSQL *pgSQLTest) Close() {
|
||||
pgSQL.DB.Close()
|
||||
dropDatabase(pgSQL.dataSourceDefaultDatabase, pgSQL.dbName)
|
||||
}
|
||||
|
||||
// handleError logs an error with an extra description and masks the error if it's an SQL one.
|
||||
// This ensures we never return plain SQL errors and leak anything.
|
||||
func handleError(desc string, err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err == sql.ErrNoRows {
|
||||
return cerrors.ErrNotFound
|
||||
}
|
||||
|
||||
log.Errorf("%s: %v", desc, err)
|
||||
promErrorsTotal.WithLabelValues(desc).Inc()
|
||||
|
||||
if _, o := err.(*pq.Error); o || err == sql.ErrTxDone || strings.HasPrefix(err.Error(), "sql:") {
|
||||
return database.ErrBackendException
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// isErrUniqueViolation determines is the given error is a unique contraint violation.
|
||||
func isErrUniqueViolation(err error) bool {
|
||||
pqErr, ok := err.(*pq.Error)
|
||||
return ok && pqErr.Code == "23505"
|
||||
}
|
||||
|
||||
func observeQueryTime(query, subquery string, start time.Time) {
|
||||
utils.PrometheusObserveTimeMilliseconds(promQueryDurationMilliseconds.WithLabelValues(query, subquery), start)
|
||||
}
|
239
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/database/pgsql/queries.go
generated
vendored
Normal file
239
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/database/pgsql/queries.go
generated
vendored
Normal file
@ -0,0 +1,239 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pgsql
|
||||
|
||||
import "strconv"
|
||||
|
||||
const (
|
||||
lockVulnerabilityAffects = `LOCK Vulnerability_Affects_FeatureVersion IN SHARE ROW EXCLUSIVE MODE`
|
||||
disableHashJoin = `SET LOCAL enable_hashjoin = off`
|
||||
disableMergeJoin = `SET LOCAL enable_mergejoin = off`
|
||||
|
||||
// keyvalue.go
|
||||
updateKeyValue = `UPDATE KeyValue SET value = $1 WHERE key = $2`
|
||||
insertKeyValue = `INSERT INTO KeyValue(key, value) VALUES($1, $2)`
|
||||
searchKeyValue = `SELECT value FROM KeyValue WHERE key = $1`
|
||||
|
||||
// namespace.go
|
||||
soiNamespace = `
|
||||
WITH new_namespace AS (
|
||||
INSERT INTO Namespace(name)
|
||||
SELECT CAST($1 AS VARCHAR)
|
||||
WHERE NOT EXISTS (SELECT name FROM Namespace WHERE name = $1)
|
||||
RETURNING id
|
||||
)
|
||||
SELECT id FROM Namespace WHERE name = $1
|
||||
UNION
|
||||
SELECT id FROM new_namespace`
|
||||
|
||||
searchNamespace = `SELECT id FROM Namespace WHERE name = $1`
|
||||
listNamespace = `SELECT id, name FROM Namespace`
|
||||
|
||||
// feature.go
|
||||
soiFeature = `
|
||||
WITH new_feature AS (
|
||||
INSERT INTO Feature(name, namespace_id)
|
||||
SELECT CAST($1 AS VARCHAR), CAST($2 AS INTEGER)
|
||||
WHERE NOT EXISTS (SELECT id FROM Feature WHERE name = $1 AND namespace_id = $2)
|
||||
RETURNING id
|
||||
)
|
||||
SELECT id FROM Feature WHERE name = $1 AND namespace_id = $2
|
||||
UNION
|
||||
SELECT id FROM new_feature`
|
||||
|
||||
searchFeatureVersion = `
|
||||
SELECT id FROM FeatureVersion WHERE feature_id = $1 AND version = $2`
|
||||
|
||||
soiFeatureVersion = `
|
||||
WITH new_featureversion AS (
|
||||
INSERT INTO FeatureVersion(feature_id, version)
|
||||
SELECT CAST($1 AS INTEGER), CAST($2 AS VARCHAR)
|
||||
WHERE NOT EXISTS (SELECT id FROM FeatureVersion WHERE feature_id = $1 AND version = $2)
|
||||
RETURNING id
|
||||
)
|
||||
SELECT 'exi', id FROM FeatureVersion WHERE feature_id = $1 AND version = $2
|
||||
UNION
|
||||
SELECT 'new', id FROM new_featureversion`
|
||||
|
||||
searchVulnerabilityFixedInFeature = `
|
||||
SELECT id, vulnerability_id, version FROM Vulnerability_FixedIn_Feature
|
||||
WHERE feature_id = $1`
|
||||
|
||||
insertVulnerabilityAffectsFeatureVersion = `
|
||||
INSERT INTO Vulnerability_Affects_FeatureVersion(vulnerability_id,
|
||||
featureversion_id, fixedin_id) VALUES($1, $2, $3)`
|
||||
|
||||
// layer.go
|
||||
searchLayer = `
|
||||
SELECT l.id, l.name, l.engineversion, p.id, p.name, n.id, n.name
|
||||
FROM Layer l
|
||||
LEFT JOIN Layer p ON l.parent_id = p.id
|
||||
LEFT JOIN Namespace n ON l.namespace_id = n.id
|
||||
WHERE l.name = $1;`
|
||||
|
||||
searchLayerFeatureVersion = `
|
||||
WITH RECURSIVE layer_tree(id, name, parent_id, depth, path, cycle) AS(
|
||||
SELECT l.id, l.name, l.parent_id, 1, ARRAY[l.id], false
|
||||
FROM Layer l
|
||||
WHERE l.id = $1
|
||||
UNION ALL
|
||||
SELECT l.id, l.name, l.parent_id, lt.depth + 1, path || l.id, l.id = ANY(path)
|
||||
FROM Layer l, layer_tree lt
|
||||
WHERE l.id = lt.parent_id
|
||||
)
|
||||
SELECT ldf.featureversion_id, ldf.modification, fn.id, fn.name, f.id, f.name, fv.id, fv.version, ltree.id, ltree.name
|
||||
FROM Layer_diff_FeatureVersion ldf
|
||||
JOIN (
|
||||
SELECT row_number() over (ORDER BY depth DESC), id, name FROM layer_tree
|
||||
) AS ltree (ordering, id, name) ON ldf.layer_id = ltree.id, FeatureVersion fv, Feature f, Namespace fn
|
||||
WHERE ldf.featureversion_id = fv.id AND fv.feature_id = f.id AND f.namespace_id = fn.id
|
||||
ORDER BY ltree.ordering`
|
||||
|
||||
searchFeatureVersionVulnerability = `
|
||||
SELECT vafv.featureversion_id, v.id, v.name, v.description, v.link, v.severity, v.metadata,
|
||||
vn.name, vfif.version
|
||||
FROM Vulnerability_Affects_FeatureVersion vafv, Vulnerability v,
|
||||
Namespace vn, Vulnerability_FixedIn_Feature vfif
|
||||
WHERE vafv.featureversion_id = ANY($1::integer[])
|
||||
AND vfif.vulnerability_id = v.id
|
||||
AND vafv.fixedin_id = vfif.id
|
||||
AND v.namespace_id = vn.id
|
||||
AND v.deleted_at IS NULL`
|
||||
|
||||
insertLayer = `
|
||||
INSERT INTO Layer(name, engineversion, parent_id, namespace_id, created_at)
|
||||
VALUES($1, $2, $3, $4, CURRENT_TIMESTAMP)
|
||||
RETURNING id`
|
||||
|
||||
updateLayer = `UPDATE LAYER SET engineversion = $2, namespace_id = $3 WHERE id = $1`
|
||||
|
||||
removeLayerDiffFeatureVersion = `
|
||||
DELETE FROM Layer_diff_FeatureVersion
|
||||
WHERE layer_id = $1`
|
||||
|
||||
insertLayerDiffFeatureVersion = `
|
||||
INSERT INTO Layer_diff_FeatureVersion(layer_id, featureversion_id, modification)
|
||||
SELECT $1, fv.id, $2
|
||||
FROM FeatureVersion fv
|
||||
WHERE fv.id = ANY($3::integer[])`
|
||||
|
||||
removeLayer = `DELETE FROM Layer WHERE name = $1`
|
||||
|
||||
// lock.go
|
||||
insertLock = `INSERT INTO Lock(name, owner, until) VALUES($1, $2, $3)`
|
||||
searchLock = `SELECT owner, until FROM Lock WHERE name = $1`
|
||||
updateLock = `UPDATE Lock SET until = $3 WHERE name = $1 AND owner = $2`
|
||||
removeLock = `DELETE FROM Lock WHERE name = $1 AND owner = $2`
|
||||
removeLockExpired = `DELETE FROM LOCK WHERE until < CURRENT_TIMESTAMP`
|
||||
|
||||
// vulnerability.go
|
||||
searchVulnerabilityBase = `
|
||||
SELECT v.id, v.name, n.id, n.name, v.description, v.link, v.severity, v.metadata
|
||||
FROM Vulnerability v JOIN Namespace n ON v.namespace_id = n.id`
|
||||
searchVulnerabilityForUpdate = ` FOR UPDATE OF v`
|
||||
searchVulnerabilityByNamespaceAndName = ` WHERE n.name = $1 AND v.name = $2 AND v.deleted_at IS NULL`
|
||||
searchVulnerabilityByID = ` WHERE v.id = $1`
|
||||
searchVulnerabilityByNamespace = ` WHERE n.name = $1 AND v.deleted_at IS NULL
|
||||
AND v.id >= $2
|
||||
ORDER BY v.id
|
||||
LIMIT $3`
|
||||
|
||||
searchVulnerabilityFixedIn = `
|
||||
SELECT vfif.version, f.id, f.Name
|
||||
FROM Vulnerability_FixedIn_Feature vfif JOIN Feature f ON vfif.feature_id = f.id
|
||||
WHERE vfif.vulnerability_id = $1`
|
||||
|
||||
insertVulnerability = `
|
||||
INSERT INTO Vulnerability(namespace_id, name, description, link, severity, metadata, created_at)
|
||||
VALUES($1, $2, $3, $4, $5, $6, CURRENT_TIMESTAMP)
|
||||
RETURNING id`
|
||||
|
||||
insertVulnerabilityFixedInFeature = `
|
||||
INSERT INTO Vulnerability_FixedIn_Feature(vulnerability_id, feature_id, version)
|
||||
VALUES($1, $2, $3)
|
||||
RETURNING id`
|
||||
|
||||
searchFeatureVersionByFeature = `SELECT id, version FROM FeatureVersion WHERE feature_id = $1`
|
||||
|
||||
removeVulnerability = `
|
||||
UPDATE Vulnerability
|
||||
SET deleted_at = CURRENT_TIMESTAMP
|
||||
WHERE namespace_id = (SELECT id FROM Namespace WHERE name = $1)
|
||||
AND name = $2
|
||||
AND deleted_at IS NULL
|
||||
RETURNING id`
|
||||
|
||||
// notification.go
|
||||
insertNotification = `
|
||||
INSERT INTO Vulnerability_Notification(name, created_at, old_vulnerability_id, new_vulnerability_id)
|
||||
VALUES($1, CURRENT_TIMESTAMP, $2, $3)`
|
||||
|
||||
updatedNotificationNotified = `
|
||||
UPDATE Vulnerability_Notification
|
||||
SET notified_at = CURRENT_TIMESTAMP
|
||||
WHERE name = $1`
|
||||
|
||||
removeNotification = `
|
||||
UPDATE Vulnerability_Notification
|
||||
SET deleted_at = CURRENT_TIMESTAMP
|
||||
WHERE name = $1`
|
||||
|
||||
searchNotificationAvailable = `
|
||||
SELECT id, name, created_at, notified_at, deleted_at
|
||||
FROM Vulnerability_Notification
|
||||
WHERE (notified_at IS NULL OR notified_at < $1)
|
||||
AND deleted_at IS NULL
|
||||
AND name NOT IN (SELECT name FROM Lock)
|
||||
ORDER BY Random()
|
||||
LIMIT 1`
|
||||
|
||||
searchNotification = `
|
||||
SELECT id, name, created_at, notified_at, deleted_at, old_vulnerability_id, new_vulnerability_id
|
||||
FROM Vulnerability_Notification
|
||||
WHERE name = $1`
|
||||
|
||||
searchNotificationLayerIntroducingVulnerability = `
|
||||
SELECT l.ID, l.name
|
||||
FROM Vulnerability v, Vulnerability_Affects_FeatureVersion vafv, FeatureVersion fv, Layer_diff_FeatureVersion ldfv, Layer l
|
||||
WHERE v.id = $1
|
||||
AND v.id = vafv.vulnerability_id
|
||||
AND vafv.featureversion_id = fv.id
|
||||
AND fv.id = ldfv.featureversion_id
|
||||
AND ldfv.modification = 'add'
|
||||
AND ldfv.layer_id = l.id
|
||||
AND l.id >= $2
|
||||
ORDER BY l.ID
|
||||
LIMIT $3`
|
||||
|
||||
// complex_test.go
|
||||
searchComplexTestFeatureVersionAffects = `
|
||||
SELECT v.name
|
||||
FROM FeatureVersion fv
|
||||
LEFT JOIN Vulnerability_Affects_FeatureVersion vaf ON fv.id = vaf.featureversion_id
|
||||
JOIN Vulnerability v ON vaf.vulnerability_id = v.id
|
||||
WHERE featureversion_id = $1`
|
||||
)
|
||||
|
||||
// buildInputArray constructs a PostgreSQL input array from the specified integers.
|
||||
// Useful to use the `= ANY($1::integer[])` syntax that let us use a IN clause while using
|
||||
// a single placeholder.
|
||||
func buildInputArray(ints []int) string {
|
||||
str := "{"
|
||||
for i := 0; i < len(ints)-1; i++ {
|
||||
str = str + strconv.Itoa(ints[i]) + ","
|
||||
}
|
||||
str = str + strconv.Itoa(ints[len(ints)-1]) + "}"
|
||||
return str
|
||||
}
|
569
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/database/pgsql/vulnerability.go
generated
vendored
Normal file
569
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/database/pgsql/vulnerability.go
generated
vendored
Normal file
@ -0,0 +1,569 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pgsql
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/utils"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
"github.com/guregu/null/zero"
|
||||
)
|
||||
|
||||
func (pgSQL *pgSQL) ListVulnerabilities(namespaceName string, limit int, startID int) ([]database.Vulnerability, int, error) {
|
||||
defer observeQueryTime("listVulnerabilities", "all", time.Now())
|
||||
|
||||
// Query Namespace.
|
||||
var id int
|
||||
err := pgSQL.QueryRow(searchNamespace, namespaceName).Scan(&id)
|
||||
if err != nil {
|
||||
return nil, -1, handleError("searchNamespace", err)
|
||||
} else if id == 0 {
|
||||
return nil, -1, cerrors.ErrNotFound
|
||||
}
|
||||
|
||||
// Query.
|
||||
query := searchVulnerabilityBase + searchVulnerabilityByNamespace
|
||||
rows, err := pgSQL.Query(query, namespaceName, startID, limit+1)
|
||||
if err != nil {
|
||||
return nil, -1, handleError("searchVulnerabilityByNamespace", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var vulns []database.Vulnerability
|
||||
nextID := -1
|
||||
size := 0
|
||||
// Scan query.
|
||||
for rows.Next() {
|
||||
var vulnerability database.Vulnerability
|
||||
|
||||
err := rows.Scan(
|
||||
&vulnerability.ID,
|
||||
&vulnerability.Name,
|
||||
&vulnerability.Namespace.ID,
|
||||
&vulnerability.Namespace.Name,
|
||||
&vulnerability.Description,
|
||||
&vulnerability.Link,
|
||||
&vulnerability.Severity,
|
||||
&vulnerability.Metadata,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, -1, handleError("searchVulnerabilityByNamespace.Scan()", err)
|
||||
}
|
||||
size++
|
||||
if size > limit {
|
||||
nextID = vulnerability.ID
|
||||
} else {
|
||||
vulns = append(vulns, vulnerability)
|
||||
}
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, -1, handleError("searchVulnerabilityByNamespace.Rows()", err)
|
||||
}
|
||||
|
||||
return vulns, nextID, nil
|
||||
}
|
||||
|
||||
func (pgSQL *pgSQL) FindVulnerability(namespaceName, name string) (database.Vulnerability, error) {
|
||||
return findVulnerability(pgSQL, namespaceName, name, false)
|
||||
}
|
||||
|
||||
func findVulnerability(queryer Queryer, namespaceName, name string, forUpdate bool) (database.Vulnerability, error) {
|
||||
defer observeQueryTime("findVulnerability", "all", time.Now())
|
||||
|
||||
queryName := "searchVulnerabilityBase+searchVulnerabilityByNamespaceAndName"
|
||||
query := searchVulnerabilityBase + searchVulnerabilityByNamespaceAndName
|
||||
if forUpdate {
|
||||
queryName = queryName + "+searchVulnerabilityForUpdate"
|
||||
query = query + searchVulnerabilityForUpdate
|
||||
}
|
||||
|
||||
return scanVulnerability(queryer, queryName, queryer.QueryRow(query, namespaceName, name))
|
||||
}
|
||||
|
||||
func (pgSQL *pgSQL) findVulnerabilityByIDWithDeleted(id int) (database.Vulnerability, error) {
|
||||
defer observeQueryTime("findVulnerabilityByIDWithDeleted", "all", time.Now())
|
||||
|
||||
queryName := "searchVulnerabilityBase+searchVulnerabilityByID"
|
||||
query := searchVulnerabilityBase + searchVulnerabilityByID
|
||||
|
||||
return scanVulnerability(pgSQL, queryName, pgSQL.QueryRow(query, id))
|
||||
}
|
||||
|
||||
func scanVulnerability(queryer Queryer, queryName string, vulnerabilityRow *sql.Row) (database.Vulnerability, error) {
|
||||
var vulnerability database.Vulnerability
|
||||
|
||||
err := vulnerabilityRow.Scan(
|
||||
&vulnerability.ID,
|
||||
&vulnerability.Name,
|
||||
&vulnerability.Namespace.ID,
|
||||
&vulnerability.Namespace.Name,
|
||||
&vulnerability.Description,
|
||||
&vulnerability.Link,
|
||||
&vulnerability.Severity,
|
||||
&vulnerability.Metadata,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return vulnerability, handleError(queryName+".Scan()", err)
|
||||
}
|
||||
|
||||
if vulnerability.ID == 0 {
|
||||
return vulnerability, cerrors.ErrNotFound
|
||||
}
|
||||
|
||||
// Query the FixedIn FeatureVersion now.
|
||||
rows, err := queryer.Query(searchVulnerabilityFixedIn, vulnerability.ID)
|
||||
if err != nil {
|
||||
return vulnerability, handleError("searchVulnerabilityFixedIn.Scan()", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var featureVersionID zero.Int
|
||||
var featureVersionVersion zero.String
|
||||
var featureVersionFeatureName zero.String
|
||||
|
||||
err := rows.Scan(
|
||||
&featureVersionVersion,
|
||||
&featureVersionID,
|
||||
&featureVersionFeatureName,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return vulnerability, handleError("searchVulnerabilityFixedIn.Scan()", err)
|
||||
}
|
||||
|
||||
if !featureVersionID.IsZero() {
|
||||
// Note that the ID we fill in featureVersion is actually a Feature ID, and not
|
||||
// a FeatureVersion ID.
|
||||
featureVersion := database.FeatureVersion{
|
||||
Model: database.Model{ID: int(featureVersionID.Int64)},
|
||||
Feature: database.Feature{
|
||||
Model: database.Model{ID: int(featureVersionID.Int64)},
|
||||
Namespace: vulnerability.Namespace,
|
||||
Name: featureVersionFeatureName.String,
|
||||
},
|
||||
Version: types.NewVersionUnsafe(featureVersionVersion.String),
|
||||
}
|
||||
vulnerability.FixedIn = append(vulnerability.FixedIn, featureVersion)
|
||||
}
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
return vulnerability, handleError("searchVulnerabilityFixedIn.Rows()", err)
|
||||
}
|
||||
|
||||
return vulnerability, nil
|
||||
}
|
||||
|
||||
// FixedIn.Namespace are not necessary, they are overwritten by the vuln.
|
||||
// By setting the fixed version to minVersion, we can say that the vuln does'nt affect anymore.
|
||||
func (pgSQL *pgSQL) InsertVulnerabilities(vulnerabilities []database.Vulnerability, generateNotifications bool) error {
|
||||
for _, vulnerability := range vulnerabilities {
|
||||
err := pgSQL.insertVulnerability(vulnerability, false, generateNotifications)
|
||||
if err != nil {
|
||||
fmt.Printf("%#v\n", vulnerability)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pgSQL *pgSQL) insertVulnerability(vulnerability database.Vulnerability, onlyFixedIn, generateNotification bool) error {
|
||||
tf := time.Now()
|
||||
|
||||
// Verify parameters
|
||||
if vulnerability.Name == "" || vulnerability.Namespace.Name == "" {
|
||||
return cerrors.NewBadRequestError("insertVulnerability needs at least the Name and the Namespace")
|
||||
}
|
||||
if !onlyFixedIn && !vulnerability.Severity.IsValid() {
|
||||
msg := fmt.Sprintf("could not insert a vulnerability that has an invalid Severity: %s", vulnerability.Severity)
|
||||
log.Warning(msg)
|
||||
return cerrors.NewBadRequestError(msg)
|
||||
}
|
||||
for i := 0; i < len(vulnerability.FixedIn); i++ {
|
||||
fifv := &vulnerability.FixedIn[i]
|
||||
|
||||
if fifv.Feature.Namespace.Name == "" {
|
||||
// As there is no Namespace on that FixedIn FeatureVersion, set it to the Vulnerability's
|
||||
// Namespace.
|
||||
fifv.Feature.Namespace.Name = vulnerability.Namespace.Name
|
||||
} else if fifv.Feature.Namespace.Name != vulnerability.Namespace.Name {
|
||||
msg := "could not insert an invalid vulnerability that contains FixedIn FeatureVersion that are not in the same namespace as the Vulnerability"
|
||||
log.Warning(msg)
|
||||
return cerrors.NewBadRequestError(msg)
|
||||
}
|
||||
}
|
||||
|
||||
// We do `defer observeQueryTime` here because we don't want to observe invalid vulnerabilities.
|
||||
defer observeQueryTime("insertVulnerability", "all", tf)
|
||||
|
||||
// Begin transaction.
|
||||
tx, err := pgSQL.Begin()
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return handleError("insertVulnerability.Begin()", err)
|
||||
}
|
||||
|
||||
// Find existing vulnerability and its Vulnerability_FixedIn_Features (for update).
|
||||
existingVulnerability, err := findVulnerability(tx, vulnerability.Namespace.Name, vulnerability.Name, true)
|
||||
if err != nil && err != cerrors.ErrNotFound {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
if onlyFixedIn {
|
||||
// Because this call tries to update FixedIn FeatureVersion, import all other data from the
|
||||
// existing one.
|
||||
if existingVulnerability.ID == 0 {
|
||||
return cerrors.ErrNotFound
|
||||
}
|
||||
|
||||
fixedIn := vulnerability.FixedIn
|
||||
vulnerability = existingVulnerability
|
||||
vulnerability.FixedIn = fixedIn
|
||||
}
|
||||
|
||||
if existingVulnerability.ID != 0 {
|
||||
updateMetadata := vulnerability.Description != existingVulnerability.Description ||
|
||||
vulnerability.Link != existingVulnerability.Link ||
|
||||
vulnerability.Severity != existingVulnerability.Severity ||
|
||||
!reflect.DeepEqual(castMetadata(vulnerability.Metadata), existingVulnerability.Metadata)
|
||||
|
||||
// Construct the entire list of FixedIn FeatureVersion, by using the
|
||||
// the FixedIn list of the old vulnerability.
|
||||
//
|
||||
// TODO(Quentin-M): We could use !updateFixedIn to just copy FixedIn/Affects rows from the
|
||||
// existing vulnerability in order to make metadata updates much faster.
|
||||
var updateFixedIn bool
|
||||
vulnerability.FixedIn, updateFixedIn = applyFixedInDiff(existingVulnerability.FixedIn, vulnerability.FixedIn)
|
||||
|
||||
if !updateMetadata && !updateFixedIn {
|
||||
tx.Commit()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mark the old vulnerability as non latest.
|
||||
_, err = tx.Exec(removeVulnerability, vulnerability.Namespace.Name, vulnerability.Name)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return handleError("removeVulnerability", err)
|
||||
}
|
||||
} else {
|
||||
// The vulnerability is new, we don't want to have any types.MinVersion as they are only used
|
||||
// for diffing existing vulnerabilities.
|
||||
var fixedIn []database.FeatureVersion
|
||||
for _, fv := range vulnerability.FixedIn {
|
||||
if fv.Version != types.MinVersion {
|
||||
fixedIn = append(fixedIn, fv)
|
||||
}
|
||||
}
|
||||
vulnerability.FixedIn = fixedIn
|
||||
}
|
||||
|
||||
// Find or insert Vulnerability's Namespace.
|
||||
namespaceID, err := pgSQL.insertNamespace(vulnerability.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Insert vulnerability.
|
||||
err = tx.QueryRow(
|
||||
insertVulnerability,
|
||||
namespaceID,
|
||||
vulnerability.Name,
|
||||
vulnerability.Description,
|
||||
vulnerability.Link,
|
||||
&vulnerability.Severity,
|
||||
&vulnerability.Metadata,
|
||||
).Scan(&vulnerability.ID)
|
||||
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return handleError("insertVulnerability", err)
|
||||
}
|
||||
|
||||
// Update Vulnerability_FixedIn_Feature and Vulnerability_Affects_FeatureVersion now.
|
||||
err = pgSQL.insertVulnerabilityFixedInFeatureVersions(tx, vulnerability.ID, vulnerability.FixedIn)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
// Create a notification.
|
||||
if generateNotification {
|
||||
err = createNotification(tx, existingVulnerability.ID, vulnerability.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Commit transaction.
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return handleError("insertVulnerability.Commit()", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// castMetadata marshals the given database.MetadataMap and unmarshals it again to make sure that
|
||||
// everything has the interface{} type.
|
||||
// It is required when comparing crafted MetadataMap against MetadataMap that we get from the
|
||||
// database.
|
||||
func castMetadata(m database.MetadataMap) database.MetadataMap {
|
||||
c := make(database.MetadataMap)
|
||||
j, _ := json.Marshal(m)
|
||||
json.Unmarshal(j, &c)
|
||||
return c
|
||||
}
|
||||
|
||||
// applyFixedInDiff applies a FeatureVersion diff on a FeatureVersion list and returns the result.
|
||||
func applyFixedInDiff(currentList, diff []database.FeatureVersion) ([]database.FeatureVersion, bool) {
|
||||
currentMap, currentNames := createFeatureVersionNameMap(currentList)
|
||||
diffMap, diffNames := createFeatureVersionNameMap(diff)
|
||||
|
||||
addedNames := utils.CompareStringLists(diffNames, currentNames)
|
||||
inBothNames := utils.CompareStringListsInBoth(diffNames, currentNames)
|
||||
|
||||
different := false
|
||||
|
||||
for _, name := range addedNames {
|
||||
if diffMap[name].Version == types.MinVersion {
|
||||
// MinVersion only makes sense when a Feature is already fixed in some version,
|
||||
// in which case we would be in the "inBothNames".
|
||||
continue
|
||||
}
|
||||
|
||||
currentMap[name] = diffMap[name]
|
||||
different = true
|
||||
}
|
||||
|
||||
for _, name := range inBothNames {
|
||||
fv := diffMap[name]
|
||||
|
||||
if fv.Version == types.MinVersion {
|
||||
// MinVersion means that the Feature doesn't affect the Vulnerability anymore.
|
||||
delete(currentMap, name)
|
||||
different = true
|
||||
} else if fv.Version != currentMap[name].Version {
|
||||
// The version got updated.
|
||||
currentMap[name] = diffMap[name]
|
||||
different = true
|
||||
}
|
||||
}
|
||||
|
||||
// Convert currentMap to a slice and return it.
|
||||
var newList []database.FeatureVersion
|
||||
for _, fv := range currentMap {
|
||||
newList = append(newList, fv)
|
||||
}
|
||||
|
||||
return newList, different
|
||||
}
|
||||
|
||||
func createFeatureVersionNameMap(features []database.FeatureVersion) (map[string]database.FeatureVersion, []string) {
|
||||
m := make(map[string]database.FeatureVersion, 0)
|
||||
s := make([]string, 0, len(features))
|
||||
|
||||
for i := 0; i < len(features); i++ {
|
||||
featureVersion := features[i]
|
||||
m[featureVersion.Feature.Name] = featureVersion
|
||||
s = append(s, featureVersion.Feature.Name)
|
||||
}
|
||||
|
||||
return m, s
|
||||
}
|
||||
|
||||
// insertVulnerabilityFixedInFeatureVersions populates Vulnerability_FixedIn_Feature for the given
|
||||
// vulnerability with the specified database.FeatureVersion list and uses
|
||||
// linkVulnerabilityToFeatureVersions to propagate the changes on Vulnerability_FixedIn_Feature to
|
||||
// Vulnerability_Affects_FeatureVersion.
|
||||
func (pgSQL *pgSQL) insertVulnerabilityFixedInFeatureVersions(tx *sql.Tx, vulnerabilityID int, fixedIn []database.FeatureVersion) error {
|
||||
defer observeQueryTime("insertVulnerabilityFixedInFeatureVersions", "all", time.Now())
|
||||
|
||||
// Insert or find the Features.
|
||||
// TODO(Quentin-M): Batch me.
|
||||
var err error
|
||||
var features []*database.Feature
|
||||
for i := 0; i < len(fixedIn); i++ {
|
||||
features = append(features, &fixedIn[i].Feature)
|
||||
}
|
||||
for _, feature := range features {
|
||||
if feature.ID == 0 {
|
||||
if feature.ID, err = pgSQL.insertFeature(*feature); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Lock Vulnerability_Affects_FeatureVersion exclusively.
|
||||
// We want to prevent InsertFeatureVersion to modify it.
|
||||
promConcurrentLockVAFV.Inc()
|
||||
defer promConcurrentLockVAFV.Dec()
|
||||
t := time.Now()
|
||||
_, err = tx.Exec(lockVulnerabilityAffects)
|
||||
observeQueryTime("insertVulnerability", "lock", t)
|
||||
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return handleError("insertVulnerability.lockVulnerabilityAffects", err)
|
||||
}
|
||||
|
||||
for _, fv := range fixedIn {
|
||||
var fixedInID int
|
||||
|
||||
// Insert Vulnerability_FixedIn_Feature.
|
||||
err = tx.QueryRow(
|
||||
insertVulnerabilityFixedInFeature,
|
||||
vulnerabilityID, fv.Feature.ID,
|
||||
&fv.Version,
|
||||
).Scan(&fixedInID)
|
||||
|
||||
if err != nil {
|
||||
return handleError("insertVulnerabilityFixedInFeature", err)
|
||||
}
|
||||
|
||||
// Insert Vulnerability_Affects_FeatureVersion.
|
||||
err = linkVulnerabilityToFeatureVersions(tx, fixedInID, vulnerabilityID, fv.Feature.ID, fv.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func linkVulnerabilityToFeatureVersions(tx *sql.Tx, fixedInID, vulnerabilityID, featureID int, fixedInVersion types.Version) error {
|
||||
// Find every FeatureVersions of the Feature that the vulnerability affects.
|
||||
// TODO(Quentin-M): LIMIT
|
||||
rows, err := tx.Query(searchFeatureVersionByFeature, featureID)
|
||||
if err != nil {
|
||||
return handleError("searchFeatureVersionByFeature", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var affecteds []database.FeatureVersion
|
||||
for rows.Next() {
|
||||
var affected database.FeatureVersion
|
||||
|
||||
err := rows.Scan(&affected.ID, &affected.Version)
|
||||
if err != nil {
|
||||
return handleError("searchFeatureVersionByFeature.Scan()", err)
|
||||
}
|
||||
|
||||
if affected.Version.Compare(fixedInVersion) < 0 {
|
||||
// The version of the FeatureVersion is lower than the fixed version of this vulnerability,
|
||||
// thus, this FeatureVersion is affected by it.
|
||||
affecteds = append(affecteds, affected)
|
||||
}
|
||||
}
|
||||
if err = rows.Err(); err != nil {
|
||||
return handleError("searchFeatureVersionByFeature.Rows()", err)
|
||||
}
|
||||
rows.Close()
|
||||
|
||||
// Insert into Vulnerability_Affects_FeatureVersion.
|
||||
for _, affected := range affecteds {
|
||||
// TODO(Quentin-M): Batch me.
|
||||
_, err := tx.Exec(insertVulnerabilityAffectsFeatureVersion, vulnerabilityID,
|
||||
affected.ID, fixedInID)
|
||||
if err != nil {
|
||||
return handleError("insertVulnerabilityAffectsFeatureVersion", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pgSQL *pgSQL) InsertVulnerabilityFixes(vulnerabilityNamespace, vulnerabilityName string, fixes []database.FeatureVersion) error {
|
||||
defer observeQueryTime("InsertVulnerabilityFixes", "all", time.Now())
|
||||
|
||||
v := database.Vulnerability{
|
||||
Name: vulnerabilityName,
|
||||
Namespace: database.Namespace{
|
||||
Name: vulnerabilityNamespace,
|
||||
},
|
||||
FixedIn: fixes,
|
||||
}
|
||||
|
||||
return pgSQL.insertVulnerability(v, true, true)
|
||||
}
|
||||
|
||||
func (pgSQL *pgSQL) DeleteVulnerabilityFix(vulnerabilityNamespace, vulnerabilityName, featureName string) error {
|
||||
defer observeQueryTime("DeleteVulnerabilityFix", "all", time.Now())
|
||||
|
||||
v := database.Vulnerability{
|
||||
Name: vulnerabilityName,
|
||||
Namespace: database.Namespace{
|
||||
Name: vulnerabilityNamespace,
|
||||
},
|
||||
FixedIn: []database.FeatureVersion{
|
||||
{
|
||||
Feature: database.Feature{
|
||||
Name: featureName,
|
||||
Namespace: database.Namespace{
|
||||
Name: vulnerabilityNamespace,
|
||||
},
|
||||
},
|
||||
Version: types.MinVersion,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return pgSQL.insertVulnerability(v, true, true)
|
||||
}
|
||||
|
||||
func (pgSQL *pgSQL) DeleteVulnerability(namespaceName, name string) error {
|
||||
defer observeQueryTime("DeleteVulnerability", "all", time.Now())
|
||||
|
||||
// Begin transaction.
|
||||
tx, err := pgSQL.Begin()
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return handleError("DeleteVulnerability.Begin()", err)
|
||||
}
|
||||
|
||||
var vulnerabilityID int
|
||||
err = tx.QueryRow(removeVulnerability, namespaceName, name).Scan(&vulnerabilityID)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return handleError("removeVulnerability", err)
|
||||
}
|
||||
|
||||
// Create a notification.
|
||||
err = createNotification(tx, vulnerabilityID, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Commit transaction.
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return handleError("DeleteVulnerability.Commit()", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
46
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/utils/errors/errors.go
generated
vendored
Normal file
46
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/utils/errors/errors.go
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package errors defines error types that are used in several modules
|
||||
package errors
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
// ErrFilesystem occurs when a filesystem interaction fails.
|
||||
ErrFilesystem = errors.New("something went wrong when interacting with the fs")
|
||||
|
||||
// ErrCouldNotDownload occurs when a download fails.
|
||||
ErrCouldNotDownload = errors.New("could not download requested resource")
|
||||
|
||||
// ErrNotFound occurs when a resource could not be found.
|
||||
ErrNotFound = errors.New("the resource cannot be found")
|
||||
|
||||
// ErrCouldNotParse is returned when a fetcher fails to parse the update data.
|
||||
ErrCouldNotParse = errors.New("updater/fetchers: could not parse")
|
||||
)
|
||||
|
||||
// ErrBadRequest occurs when a method has been passed an inappropriate argument.
|
||||
type ErrBadRequest struct {
|
||||
s string
|
||||
}
|
||||
|
||||
// NewBadRequestError instantiates a ErrBadRequest with the specified message.
|
||||
func NewBadRequestError(message string) error {
|
||||
return &ErrBadRequest{s: message}
|
||||
}
|
||||
|
||||
func (e *ErrBadRequest) Error() string {
|
||||
return e.s
|
||||
}
|
39
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/utils/exec.go
generated
vendored
Normal file
39
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/utils/exec.go
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package utils simply defines utility functions and types.
|
||||
package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
// Exec runs the given binary with arguments
|
||||
func Exec(dir string, bin string, args ...string) ([]byte, error) {
|
||||
_, err := exec.LookPath(bin)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cmd := exec.Command(bin, args...)
|
||||
cmd.Dir = dir
|
||||
|
||||
var buf bytes.Buffer
|
||||
cmd.Stdout = &buf
|
||||
cmd.Stderr = &buf
|
||||
|
||||
err = cmd.Run()
|
||||
return buf.Bytes(), err
|
||||
}
|
77
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/utils/http/http.go
generated
vendored
Normal file
77
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/utils/http/http.go
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package http provides utility functions for HTTP servers and clients.
|
||||
package http
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/utils"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/coreos/clair/worker"
|
||||
)
|
||||
|
||||
// MaxBodySize is the maximum number of bytes that ParseHTTPBody reads from an http.Request.Body.
|
||||
const MaxBodySize int64 = 1048576
|
||||
|
||||
// WriteHTTP writes a JSON-encoded object to a http.ResponseWriter, as well as
|
||||
// a HTTP status code.
|
||||
func WriteHTTP(w http.ResponseWriter, httpStatus int, v interface{}) {
|
||||
w.WriteHeader(httpStatus)
|
||||
if v != nil {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
result, _ := json.Marshal(v)
|
||||
w.Write(result)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteHTTPError writes an error, wrapped in the Message field of a JSON-encoded
|
||||
// object to a http.ResponseWriter, as well as a HTTP status code.
|
||||
// If the status code is 0, handleError tries to guess the proper HTTP status
|
||||
// code from the error type.
|
||||
func WriteHTTPError(w http.ResponseWriter, httpStatus int, err error) {
|
||||
if httpStatus == 0 {
|
||||
httpStatus = http.StatusInternalServerError
|
||||
// Try to guess the http status code from the error type
|
||||
if _, isBadRequestError := err.(*cerrors.ErrBadRequest); isBadRequestError {
|
||||
httpStatus = http.StatusBadRequest
|
||||
} else {
|
||||
switch err {
|
||||
case cerrors.ErrNotFound:
|
||||
httpStatus = http.StatusNotFound
|
||||
case database.ErrBackendException:
|
||||
httpStatus = http.StatusServiceUnavailable
|
||||
case worker.ErrParentUnknown, worker.ErrUnsupported, utils.ErrCouldNotExtract, utils.ErrExtractedFileTooBig:
|
||||
httpStatus = http.StatusBadRequest
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
WriteHTTP(w, httpStatus, struct{ Message string }{Message: err.Error()})
|
||||
}
|
||||
|
||||
// ParseHTTPBody reads a JSON-encoded body from a http.Request and unmarshals it
|
||||
// into the provided object.
|
||||
func ParseHTTPBody(r *http.Request, v interface{}) (int, error) {
|
||||
defer r.Body.Close()
|
||||
err := json.NewDecoder(io.LimitReader(r.Body, MaxBodySize)).Decode(v)
|
||||
if err != nil {
|
||||
return http.StatusUnsupportedMediaType, err
|
||||
}
|
||||
return 0, nil
|
||||
}
|
13
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/utils/prometheus.go
generated
vendored
Normal file
13
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/utils/prometheus.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// PrometheusObserveTimeMilliseconds observes the elapsed time since start, in milliseconds,
|
||||
// on the specified Prometheus Histogram.
|
||||
func PrometheusObserveTimeMilliseconds(h prometheus.Histogram, start time.Time) {
|
||||
h.Observe(float64(time.Since(start).Nanoseconds()) / float64(time.Millisecond))
|
||||
}
|
65
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/utils/stopper.go
generated
vendored
Normal file
65
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/utils/stopper.go
generated
vendored
Normal file
@ -0,0 +1,65 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Stopper eases the graceful termination of a group of goroutines
|
||||
type Stopper struct {
|
||||
wg sync.WaitGroup
|
||||
stop chan struct{}
|
||||
}
|
||||
|
||||
// NewStopper initializes a new Stopper instance
|
||||
func NewStopper() *Stopper {
|
||||
return &Stopper{stop: make(chan struct{}, 0)}
|
||||
}
|
||||
|
||||
// Begin indicates that a new goroutine has started.
|
||||
func (s *Stopper) Begin() {
|
||||
s.wg.Add(1)
|
||||
}
|
||||
|
||||
// End indicates that a goroutine has stopped.
|
||||
func (s *Stopper) End() {
|
||||
s.wg.Done()
|
||||
}
|
||||
|
||||
// Chan returns the channel on which goroutines could listen to determine if
|
||||
// they should stop. The channel is closed when Stop() is called.
|
||||
func (s *Stopper) Chan() chan struct{} {
|
||||
return s.stop
|
||||
}
|
||||
|
||||
// Sleep puts the current goroutine on sleep during a duration d
|
||||
// Sleep could be interrupted in the case the goroutine should stop itself,
|
||||
// in which case Sleep returns false.
|
||||
func (s *Stopper) Sleep(d time.Duration) bool {
|
||||
select {
|
||||
case <-time.After(d):
|
||||
return true
|
||||
case <-s.stop:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Stop asks every goroutine to end.
|
||||
func (s *Stopper) Stop() {
|
||||
close(s.stop)
|
||||
s.wg.Wait()
|
||||
}
|
75
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/utils/string.go
generated
vendored
Normal file
75
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/utils/string.go
generated
vendored
Normal file
@ -0,0 +1,75 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package utils
|
||||
|
||||
import "regexp"
|
||||
|
||||
var urlParametersRegexp = regexp.MustCompile(`(\?|\&)([^=]+)\=([^ &]+)`)
|
||||
|
||||
// CleanURL removes all parameters from an URL.
|
||||
func CleanURL(str string) string {
|
||||
return urlParametersRegexp.ReplaceAllString(str, "")
|
||||
}
|
||||
|
||||
// Contains looks for a string into an array of strings and returns whether
|
||||
// the string exists.
|
||||
func Contains(needle string, haystack []string) bool {
|
||||
for _, h := range haystack {
|
||||
if h == needle {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// CompareStringLists returns the strings that are present in X but not in Y.
|
||||
func CompareStringLists(X, Y []string) []string {
|
||||
m := make(map[string]bool)
|
||||
|
||||
for _, y := range Y {
|
||||
m[y] = true
|
||||
}
|
||||
|
||||
diff := []string{}
|
||||
for _, x := range X {
|
||||
if m[x] {
|
||||
continue
|
||||
}
|
||||
|
||||
diff = append(diff, x)
|
||||
m[x] = true
|
||||
}
|
||||
|
||||
return diff
|
||||
}
|
||||
|
||||
// CompareStringListsInBoth returns the strings that are present in both X and Y.
|
||||
func CompareStringListsInBoth(X, Y []string) []string {
|
||||
m := make(map[string]struct{})
|
||||
|
||||
for _, y := range Y {
|
||||
m[y] = struct{}{}
|
||||
}
|
||||
|
||||
diff := []string{}
|
||||
for _, x := range X {
|
||||
if _, e := m[x]; e {
|
||||
diff = append(diff, x)
|
||||
delete(m, x)
|
||||
}
|
||||
}
|
||||
|
||||
return diff
|
||||
}
|
181
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/utils/tar.go
generated
vendored
Normal file
181
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/utils/tar.go
generated
vendored
Normal file
@ -0,0 +1,181 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/bzip2"
|
||||
"compress/gzip"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os/exec"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrCouldNotExtract occurs when an extraction fails.
|
||||
ErrCouldNotExtract = errors.New("utils: could not extract the archive")
|
||||
|
||||
// ErrExtractedFileTooBig occurs when a file to extract is too big.
|
||||
ErrExtractedFileTooBig = errors.New("utils: could not extract one or more files from the archive: file too big")
|
||||
|
||||
readLen = 6 // max bytes to sniff
|
||||
|
||||
gzipHeader = []byte{0x1f, 0x8b}
|
||||
bzip2Header = []byte{0x42, 0x5a, 0x68}
|
||||
xzHeader = []byte{0xfd, 0x37, 0x7a, 0x58, 0x5a, 0x00}
|
||||
)
|
||||
|
||||
// XzReader is an io.ReadCloser which decompresses xz compressed data.
|
||||
type XzReader struct {
|
||||
io.ReadCloser
|
||||
cmd *exec.Cmd
|
||||
closech chan error
|
||||
}
|
||||
|
||||
// NewXzReader shells out to a command line xz executable (if
|
||||
// available) to decompress the given io.Reader using the xz
|
||||
// compression format and returns an *XzReader.
|
||||
// It is the caller's responsibility to call Close on the XzReader when done.
|
||||
func NewXzReader(r io.Reader) (*XzReader, error) {
|
||||
rpipe, wpipe := io.Pipe()
|
||||
ex, err := exec.LookPath("xz")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cmd := exec.Command(ex, "--decompress", "--stdout")
|
||||
|
||||
closech := make(chan error)
|
||||
|
||||
cmd.Stdin = r
|
||||
cmd.Stdout = wpipe
|
||||
|
||||
go func() {
|
||||
err := cmd.Run()
|
||||
wpipe.CloseWithError(err)
|
||||
closech <- err
|
||||
}()
|
||||
|
||||
return &XzReader{rpipe, cmd, closech}, nil
|
||||
}
|
||||
|
||||
func (r *XzReader) Close() error {
|
||||
r.ReadCloser.Close()
|
||||
r.cmd.Process.Kill()
|
||||
return <-r.closech
|
||||
}
|
||||
|
||||
// TarReadCloser embeds a *tar.Reader and the related io.Closer
|
||||
// It is the caller's responsibility to call Close on TarReadCloser when
|
||||
// done.
|
||||
type TarReadCloser struct {
|
||||
*tar.Reader
|
||||
io.Closer
|
||||
}
|
||||
|
||||
func (r *TarReadCloser) Close() error {
|
||||
return r.Closer.Close()
|
||||
}
|
||||
|
||||
// SelectivelyExtractArchive extracts the specified files and folders
|
||||
// from targz data read from the given reader and store them in a map indexed by file paths
|
||||
func SelectivelyExtractArchive(r io.Reader, prefix string, toExtract []string, maxFileSize int64) (map[string][]byte, error) {
|
||||
data := make(map[string][]byte)
|
||||
|
||||
// Create a tar or tar/tar-gzip/tar-bzip2/tar-xz reader
|
||||
tr, err := getTarReader(r)
|
||||
if err != nil {
|
||||
return data, ErrCouldNotExtract
|
||||
}
|
||||
defer tr.Close()
|
||||
|
||||
// For each element in the archive
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return data, ErrCouldNotExtract
|
||||
}
|
||||
|
||||
// Get element filename
|
||||
filename := hdr.Name
|
||||
filename = strings.TrimPrefix(filename, "./")
|
||||
if prefix != "" {
|
||||
filename = strings.TrimPrefix(filename, prefix)
|
||||
}
|
||||
|
||||
// Determine if we should extract the element
|
||||
toBeExtracted := false
|
||||
for _, s := range toExtract {
|
||||
if strings.HasPrefix(filename, s) {
|
||||
toBeExtracted = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if toBeExtracted {
|
||||
// File size limit
|
||||
if maxFileSize > 0 && hdr.Size > maxFileSize {
|
||||
return data, ErrExtractedFileTooBig
|
||||
}
|
||||
|
||||
// Extract the element
|
||||
if hdr.Typeflag == tar.TypeSymlink || hdr.Typeflag == tar.TypeLink || hdr.Typeflag == tar.TypeReg {
|
||||
d, _ := ioutil.ReadAll(tr)
|
||||
data[filename] = d
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// getTarReader returns a TarReaderCloser associated with the specified io.Reader.
|
||||
//
|
||||
// Gzip/Bzip2/XZ detection is done by using the magic numbers:
|
||||
// Gzip: the first two bytes should be 0x1f and 0x8b. Defined in the RFC1952.
|
||||
// Bzip2: the first three bytes should be 0x42, 0x5a and 0x68. No RFC.
|
||||
// XZ: the first three bytes should be 0xfd, 0x37, 0x7a, 0x58, 0x5a, 0x00. No RFC.
|
||||
func getTarReader(r io.Reader) (*TarReadCloser, error) {
|
||||
br := bufio.NewReader(r)
|
||||
header, err := br.Peek(readLen)
|
||||
if err == nil {
|
||||
switch {
|
||||
case bytes.HasPrefix(header, gzipHeader):
|
||||
gr, err := gzip.NewReader(br)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &TarReadCloser{tar.NewReader(gr), gr}, nil
|
||||
case bytes.HasPrefix(header, bzip2Header):
|
||||
bzip2r := ioutil.NopCloser(bzip2.NewReader(br))
|
||||
return &TarReadCloser{tar.NewReader(bzip2r), bzip2r}, nil
|
||||
case bytes.HasPrefix(header, xzHeader):
|
||||
xzr, err := NewXzReader(br)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &TarReadCloser{tar.NewReader(xzr), xzr}, nil
|
||||
}
|
||||
}
|
||||
|
||||
dr := ioutil.NopCloser(br)
|
||||
return &TarReadCloser{tar.NewReader(dr), dr}, nil
|
||||
}
|
110
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/utils/types/priority.go
generated
vendored
Normal file
110
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/utils/types/priority.go
generated
vendored
Normal file
@ -0,0 +1,110 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package types defines useful types that are used in database models.
|
||||
package types
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Priority defines a vulnerability priority
|
||||
type Priority string
|
||||
|
||||
const (
|
||||
// Unknown is either a security problem that has not been
|
||||
// assigned to a priority yet or a priority that our system
|
||||
// did not recognize
|
||||
Unknown Priority = "Unknown"
|
||||
// Negligible is technically a security problem, but is
|
||||
// only theoretical in nature, requires a very special
|
||||
// situation, has almost no install base, or does no real
|
||||
// damage. These tend not to get backport from upstreams,
|
||||
// and will likely not be included in security updates unless
|
||||
// there is an easy fix and some other issue causes an update.
|
||||
Negligible Priority = "Negligible"
|
||||
// Low is a security problem, but is hard to
|
||||
// exploit due to environment, requires a user-assisted
|
||||
// attack, a small install base, or does very little damage.
|
||||
// These tend to be included in security updates only when
|
||||
// higher priority issues require an update, or if many
|
||||
// low priority issues have built up.
|
||||
Low Priority = "Low"
|
||||
// Medium is a real security problem, and is exploitable
|
||||
// for many people. Includes network daemon denial of service
|
||||
// attacks, cross-site scripting, and gaining user privileges.
|
||||
// Updates should be made soon for this priority of issue.
|
||||
Medium Priority = "Medium"
|
||||
// High is a real problem, exploitable for many people in a default
|
||||
// installation. Includes serious remote denial of services,
|
||||
// local root privilege escalations, or data loss.
|
||||
High Priority = "High"
|
||||
// Critical is a world-burning problem, exploitable for nearly all people
|
||||
// in a default installation of Linux. Includes remote root
|
||||
// privilege escalations, or massive data loss.
|
||||
Critical Priority = "Critical"
|
||||
// Defcon1 is a Critical problem which has been manually highlighted by
|
||||
// the team. It requires an immediate attention.
|
||||
Defcon1 Priority = "Defcon1"
|
||||
)
|
||||
|
||||
// Priorities lists all known priorities, ordered from lower to higher
|
||||
var Priorities = []Priority{Unknown, Negligible, Low, Medium, High, Critical, Defcon1}
|
||||
|
||||
// IsValid determines if the priority is a valid one
|
||||
func (p Priority) IsValid() bool {
|
||||
for _, pp := range Priorities {
|
||||
if p == pp {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Compare compares two priorities
|
||||
func (p Priority) Compare(p2 Priority) int {
|
||||
var i1, i2 int
|
||||
|
||||
for i1 = 0; i1 < len(Priorities); i1 = i1 + 1 {
|
||||
if p == Priorities[i1] {
|
||||
break
|
||||
}
|
||||
}
|
||||
for i2 = 0; i2 < len(Priorities); i2 = i2 + 1 {
|
||||
if p2 == Priorities[i2] {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return i1 - i2
|
||||
}
|
||||
|
||||
func (p *Priority) Scan(value interface{}) error {
|
||||
val, ok := value.([]byte)
|
||||
if !ok {
|
||||
return errors.New("could not scan a Priority from a non-string input")
|
||||
}
|
||||
*p = Priority(string(val))
|
||||
if !p.IsValid() {
|
||||
return fmt.Errorf("could not scan an invalid Priority (%v)", p)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Priority) Value() (driver.Value, error) {
|
||||
return string(*p), nil
|
||||
}
|
296
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/utils/types/version.go
generated
vendored
Normal file
296
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/utils/types/version.go
generated
vendored
Normal file
@ -0,0 +1,296 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// Version represents a package version
|
||||
type Version struct {
|
||||
epoch int
|
||||
version string
|
||||
revision string
|
||||
}
|
||||
|
||||
var (
|
||||
// MinVersion is a special package version which is always sorted first
|
||||
MinVersion = Version{version: "#MINV#"}
|
||||
// MaxVersion is a special package version which is always sorted last
|
||||
MaxVersion = Version{version: "#MAXV#"}
|
||||
|
||||
versionAllowedSymbols = []rune{'.', '-', '+', '~', ':', '_'}
|
||||
revisionAllowedSymbols = []rune{'.', '+', '~', '_'}
|
||||
)
|
||||
|
||||
// NewVersion function parses a string into a Version struct which can be compared
|
||||
//
|
||||
// The implementation is based on http://man.he.net/man5/deb-version
|
||||
// on https://www.debian.org/doc/debian-policy/ch-controlfields.html#s-f-Version
|
||||
//
|
||||
// It uses the dpkg-1.17.25's algorithm (lib/parsehelp.c)
|
||||
func NewVersion(str string) (Version, error) {
|
||||
var version Version
|
||||
|
||||
// Trim leading and trailing space
|
||||
str = strings.TrimSpace(str)
|
||||
|
||||
if len(str) == 0 {
|
||||
return Version{}, errors.New("Version string is empty")
|
||||
}
|
||||
|
||||
// Max/Min versions
|
||||
if str == MaxVersion.String() {
|
||||
return MaxVersion, nil
|
||||
}
|
||||
if str == MinVersion.String() {
|
||||
return MinVersion, nil
|
||||
}
|
||||
|
||||
// Find epoch
|
||||
sepepoch := strings.Index(str, ":")
|
||||
if sepepoch > -1 {
|
||||
intepoch, err := strconv.Atoi(str[:sepepoch])
|
||||
if err == nil {
|
||||
version.epoch = intepoch
|
||||
} else {
|
||||
return Version{}, errors.New("epoch in version is not a number")
|
||||
}
|
||||
if intepoch < 0 {
|
||||
return Version{}, errors.New("epoch in version is negative")
|
||||
}
|
||||
} else {
|
||||
version.epoch = 0
|
||||
}
|
||||
|
||||
// Find version / revision
|
||||
seprevision := strings.LastIndex(str, "-")
|
||||
if seprevision > -1 {
|
||||
version.version = str[sepepoch+1 : seprevision]
|
||||
version.revision = str[seprevision+1:]
|
||||
} else {
|
||||
version.version = str[sepepoch+1:]
|
||||
version.revision = ""
|
||||
}
|
||||
// Verify format
|
||||
if len(version.version) == 0 {
|
||||
return Version{}, errors.New("No version")
|
||||
}
|
||||
|
||||
if !unicode.IsDigit(rune(version.version[0])) {
|
||||
return Version{}, errors.New("version does not start with digit")
|
||||
}
|
||||
|
||||
for i := 0; i < len(version.version); i = i + 1 {
|
||||
r := rune(version.version[i])
|
||||
if !unicode.IsDigit(r) && !unicode.IsLetter(r) && !containsRune(versionAllowedSymbols, r) {
|
||||
return Version{}, errors.New("invalid character in version")
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < len(version.revision); i = i + 1 {
|
||||
r := rune(version.revision[i])
|
||||
if !unicode.IsDigit(r) && !unicode.IsLetter(r) && !containsRune(revisionAllowedSymbols, r) {
|
||||
return Version{}, errors.New("invalid character in revision")
|
||||
}
|
||||
}
|
||||
|
||||
return version, nil
|
||||
}
|
||||
|
||||
// NewVersionUnsafe is just a wrapper around NewVersion that ignore potentiel
|
||||
// parsing error. Useful for test purposes
|
||||
func NewVersionUnsafe(str string) Version {
|
||||
v, _ := NewVersion(str)
|
||||
return v
|
||||
}
|
||||
|
||||
// Compare function compares two Debian-like package version
|
||||
//
|
||||
// The implementation is based on http://man.he.net/man5/deb-version
|
||||
// on https://www.debian.org/doc/debian-policy/ch-controlfields.html#s-f-Version
|
||||
//
|
||||
// It uses the dpkg-1.17.25's algorithm (lib/version.c)
|
||||
func (a Version) Compare(b Version) int {
|
||||
// Quick check
|
||||
if a == b {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Max/Min comparison
|
||||
if a == MinVersion || b == MaxVersion {
|
||||
return -1
|
||||
}
|
||||
if b == MinVersion || a == MaxVersion {
|
||||
return 1
|
||||
}
|
||||
|
||||
// Compare epochs
|
||||
if a.epoch > b.epoch {
|
||||
return 1
|
||||
}
|
||||
if a.epoch < b.epoch {
|
||||
return -1
|
||||
}
|
||||
|
||||
// Compare version
|
||||
rc := verrevcmp(a.version, b.version)
|
||||
if rc != 0 {
|
||||
return signum(rc)
|
||||
}
|
||||
|
||||
// Compare revision
|
||||
return signum(verrevcmp(a.revision, b.revision))
|
||||
}
|
||||
|
||||
// String returns the string representation of a Version
|
||||
func (v Version) String() (s string) {
|
||||
if v.epoch != 0 {
|
||||
s = strconv.Itoa(v.epoch) + ":"
|
||||
}
|
||||
s += v.version
|
||||
if v.revision != "" {
|
||||
s += "-" + v.revision
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (v Version) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(v.String())
|
||||
}
|
||||
|
||||
func (v *Version) UnmarshalJSON(b []byte) (err error) {
|
||||
var str string
|
||||
json.Unmarshal(b, &str)
|
||||
vp := NewVersionUnsafe(str)
|
||||
*v = vp
|
||||
return
|
||||
}
|
||||
|
||||
func (v *Version) Scan(value interface{}) (err error) {
|
||||
val, ok := value.([]byte)
|
||||
if !ok {
|
||||
return errors.New("could not scan a Version from a non-string input")
|
||||
}
|
||||
*v, err = NewVersion(string(val))
|
||||
return
|
||||
}
|
||||
|
||||
func (v *Version) Value() (driver.Value, error) {
|
||||
return v.String(), nil
|
||||
}
|
||||
|
||||
func verrevcmp(t1, t2 string) int {
|
||||
t1, rt1 := nextRune(t1)
|
||||
t2, rt2 := nextRune(t2)
|
||||
|
||||
for rt1 != nil || rt2 != nil {
|
||||
firstDiff := 0
|
||||
|
||||
for (rt1 != nil && !unicode.IsDigit(*rt1)) || (rt2 != nil && !unicode.IsDigit(*rt2)) {
|
||||
ac := 0
|
||||
bc := 0
|
||||
if rt1 != nil {
|
||||
ac = order(*rt1)
|
||||
}
|
||||
if rt2 != nil {
|
||||
bc = order(*rt2)
|
||||
}
|
||||
|
||||
if ac != bc {
|
||||
return ac - bc
|
||||
}
|
||||
|
||||
t1, rt1 = nextRune(t1)
|
||||
t2, rt2 = nextRune(t2)
|
||||
}
|
||||
for rt1 != nil && *rt1 == '0' {
|
||||
t1, rt1 = nextRune(t1)
|
||||
}
|
||||
for rt2 != nil && *rt2 == '0' {
|
||||
t2, rt2 = nextRune(t2)
|
||||
}
|
||||
for rt1 != nil && unicode.IsDigit(*rt1) && rt2 != nil && unicode.IsDigit(*rt2) {
|
||||
if firstDiff == 0 {
|
||||
firstDiff = int(*rt1) - int(*rt2)
|
||||
}
|
||||
t1, rt1 = nextRune(t1)
|
||||
t2, rt2 = nextRune(t2)
|
||||
}
|
||||
if rt1 != nil && unicode.IsDigit(*rt1) {
|
||||
return 1
|
||||
}
|
||||
if rt2 != nil && unicode.IsDigit(*rt2) {
|
||||
return -1
|
||||
}
|
||||
if firstDiff != 0 {
|
||||
return firstDiff
|
||||
}
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// order compares runes using a modified ASCII table
|
||||
// so that letters are sorted earlier than non-letters
|
||||
// and so that tildes sorts before anything
|
||||
func order(r rune) int {
|
||||
if unicode.IsDigit(r) {
|
||||
return 0
|
||||
}
|
||||
|
||||
if unicode.IsLetter(r) {
|
||||
return int(r)
|
||||
}
|
||||
|
||||
if r == '~' {
|
||||
return -1
|
||||
}
|
||||
|
||||
return int(r) + 256
|
||||
}
|
||||
|
||||
func nextRune(str string) (string, *rune) {
|
||||
if len(str) >= 1 {
|
||||
r := rune(str[0])
|
||||
return str[1:], &r
|
||||
}
|
||||
return str, nil
|
||||
}
|
||||
|
||||
func containsRune(s []rune, e rune) bool {
|
||||
for _, a := range s {
|
||||
if a == e {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func signum(a int) int {
|
||||
switch {
|
||||
case a < 0:
|
||||
return -1
|
||||
case a > 0:
|
||||
return +1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
105
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/worker/detectors/data.go
generated
vendored
Normal file
105
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/worker/detectors/data.go
generated
vendored
Normal file
@ -0,0 +1,105 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package detectors exposes functions to register and use container
|
||||
// information extractors.
|
||||
package detectors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
)
|
||||
|
||||
// The DataDetector interface defines a way to detect the required data from input path
|
||||
type DataDetector interface {
|
||||
//Support check if the input path and format are supported by the underling detector
|
||||
Supported(path string, format string) bool
|
||||
// Detect detects the required data from input path
|
||||
Detect(layerReader io.ReadCloser, toExtract []string, maxFileSize int64) (data map[string][]byte, err error)
|
||||
}
|
||||
|
||||
var (
|
||||
dataDetectorsLock sync.Mutex
|
||||
dataDetectors = make(map[string]DataDetector)
|
||||
|
||||
log = capnslog.NewPackageLogger("github.com/coreos/clair", "detectors")
|
||||
|
||||
// ErrCouldNotFindLayer is returned when we could not download or open the layer file.
|
||||
ErrCouldNotFindLayer = cerrors.NewBadRequestError("could not find layer")
|
||||
)
|
||||
|
||||
// RegisterDataDetector provides a way to dynamically register an implementation of a
|
||||
// DataDetector.
|
||||
//
|
||||
// If RegisterDataDetector is called twice with the same name if DataDetector is nil,
|
||||
// or if the name is blank, it panics.
|
||||
func RegisterDataDetector(name string, f DataDetector) {
|
||||
if name == "" {
|
||||
panic("Could not register a DataDetector with an empty name")
|
||||
}
|
||||
if f == nil {
|
||||
panic("Could not register a nil DataDetector")
|
||||
}
|
||||
|
||||
dataDetectorsLock.Lock()
|
||||
defer dataDetectorsLock.Unlock()
|
||||
|
||||
if _, alreadyExists := dataDetectors[name]; alreadyExists {
|
||||
panic(fmt.Sprintf("Detector '%s' is already registered", name))
|
||||
}
|
||||
dataDetectors[name] = f
|
||||
}
|
||||
|
||||
// DetectData finds the Data of the layer by using every registered DataDetector
|
||||
func DetectData(path string, format string, toExtract []string, maxFileSize int64) (data map[string][]byte, err error) {
|
||||
var layerReader io.ReadCloser
|
||||
if strings.HasPrefix(path, "http://") || strings.HasPrefix(path, "https://") {
|
||||
r, err := http.Get(path)
|
||||
if err != nil {
|
||||
log.Warningf("could not download layer: %s", err)
|
||||
return nil, ErrCouldNotFindLayer
|
||||
}
|
||||
if math.Floor(float64(r.StatusCode/100)) != 2 {
|
||||
log.Warningf("could not download layer: got status code %d, expected 2XX", r.StatusCode)
|
||||
return nil, ErrCouldNotFindLayer
|
||||
}
|
||||
layerReader = r.Body
|
||||
} else {
|
||||
layerReader, err = os.Open(path)
|
||||
if err != nil {
|
||||
return nil, ErrCouldNotFindLayer
|
||||
}
|
||||
}
|
||||
defer layerReader.Close()
|
||||
|
||||
for _, detector := range dataDetectors {
|
||||
if detector.Supported(path, format) {
|
||||
data, err = detector.Detect(layerReader, toExtract, maxFileSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, cerrors.NewBadRequestError(fmt.Sprintf("unsupported image format '%s'", format))
|
||||
}
|
41
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/worker/detectors/data/aci/aci.go
generated
vendored
Normal file
41
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/worker/detectors/data/aci/aci.go
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package aci
|
||||
|
||||
import (
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/clair/utils"
|
||||
"github.com/coreos/clair/worker/detectors"
|
||||
)
|
||||
|
||||
// ACIDataDetector implements DataDetector and detects layer data in 'aci' format
|
||||
type ACIDataDetector struct{}
|
||||
|
||||
func init() {
|
||||
detectors.RegisterDataDetector("aci", &ACIDataDetector{})
|
||||
}
|
||||
|
||||
func (detector *ACIDataDetector) Supported(path string, format string) bool {
|
||||
if strings.EqualFold(format, "ACI") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (detector *ACIDataDetector) Detect(layerReader io.ReadCloser, toExtract []string, maxFileSize int64) (map[string][]byte, error) {
|
||||
return utils.SelectivelyExtractArchive(layerReader, "rootfs/", toExtract, maxFileSize)
|
||||
}
|
41
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/worker/detectors/data/docker/docker.go
generated
vendored
Normal file
41
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/worker/detectors/data/docker/docker.go
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/clair/utils"
|
||||
"github.com/coreos/clair/worker/detectors"
|
||||
)
|
||||
|
||||
// DockerDataDetector implements DataDetector and detects layer data in 'Docker' format
|
||||
type DockerDataDetector struct{}
|
||||
|
||||
func init() {
|
||||
detectors.RegisterDataDetector("Docker", &DockerDataDetector{})
|
||||
}
|
||||
|
||||
func (detector *DockerDataDetector) Supported(path string, format string) bool {
|
||||
if strings.EqualFold(format, "Docker") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (detector *DockerDataDetector) Detect(layerReader io.ReadCloser, toExtract []string, maxFileSize int64) (map[string][]byte, error) {
|
||||
return utils.SelectivelyExtractArchive(layerReader, "", toExtract, maxFileSize)
|
||||
}
|
115
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/worker/detectors/feature/dpkg/dpkg.go
generated
vendored
Normal file
115
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/worker/detectors/feature/dpkg/dpkg.go
generated
vendored
Normal file
@ -0,0 +1,115 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dpkg
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
"github.com/coreos/clair/worker/detectors"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
)
|
||||
|
||||
var (
|
||||
log = capnslog.NewPackageLogger("github.com/coreos/clair", "worker/detectors/packages")
|
||||
|
||||
dpkgSrcCaptureRegexp = regexp.MustCompile(`Source: (?P<name>[^\s]*)( \((?P<version>.*)\))?`)
|
||||
dpkgSrcCaptureRegexpNames = dpkgSrcCaptureRegexp.SubexpNames()
|
||||
)
|
||||
|
||||
// DpkgFeaturesDetector implements FeaturesDetector and detects dpkg packages
|
||||
type DpkgFeaturesDetector struct{}
|
||||
|
||||
func init() {
|
||||
detectors.RegisterFeaturesDetector("dpkg", &DpkgFeaturesDetector{})
|
||||
}
|
||||
|
||||
// Detect detects packages using var/lib/dpkg/status from the input data
|
||||
func (detector *DpkgFeaturesDetector) Detect(data map[string][]byte) ([]database.FeatureVersion, error) {
|
||||
f, hasFile := data["var/lib/dpkg/status"]
|
||||
if !hasFile {
|
||||
return []database.FeatureVersion{}, nil
|
||||
}
|
||||
|
||||
// Create a map to store packages and ensure their uniqueness
|
||||
packagesMap := make(map[string]database.FeatureVersion)
|
||||
|
||||
var pkg database.FeatureVersion
|
||||
var err error
|
||||
scanner := bufio.NewScanner(strings.NewReader(string(f)))
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
|
||||
if strings.HasPrefix(line, "Package: ") {
|
||||
// Package line
|
||||
// Defines the name of the package
|
||||
|
||||
pkg.Feature.Name = strings.TrimSpace(strings.TrimPrefix(line, "Package: "))
|
||||
pkg.Version = types.Version{}
|
||||
} else if strings.HasPrefix(line, "Source: ") {
|
||||
// Source line (Optionnal)
|
||||
// Gives the name of the source package
|
||||
// May also specifies a version
|
||||
|
||||
srcCapture := dpkgSrcCaptureRegexp.FindAllStringSubmatch(line, -1)[0]
|
||||
md := map[string]string{}
|
||||
for i, n := range srcCapture {
|
||||
md[dpkgSrcCaptureRegexpNames[i]] = strings.TrimSpace(n)
|
||||
}
|
||||
|
||||
pkg.Feature.Name = md["name"]
|
||||
if md["version"] != "" {
|
||||
pkg.Version, err = types.NewVersion(md["version"])
|
||||
if err != nil {
|
||||
log.Warningf("could not parse package version '%s': %s. skipping", line[1], err.Error())
|
||||
}
|
||||
}
|
||||
} else if strings.HasPrefix(line, "Version: ") && pkg.Version.String() == "" {
|
||||
// Version line
|
||||
// Defines the version of the package
|
||||
// This version is less important than a version retrieved from a Source line
|
||||
// because the Debian vulnerabilities often skips the epoch from the Version field
|
||||
// which is not present in the Source version, and because +bX revisions don't matter
|
||||
pkg.Version, err = types.NewVersion(strings.TrimPrefix(line, "Version: "))
|
||||
if err != nil {
|
||||
log.Warningf("could not parse package version '%s': %s. skipping", line[1], err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Add the package to the result array if we have all the informations
|
||||
if pkg.Feature.Name != "" && pkg.Version.String() != "" {
|
||||
packagesMap[pkg.Feature.Name+"#"+pkg.Version.String()] = pkg
|
||||
pkg.Feature.Name = ""
|
||||
pkg.Version = types.Version{}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert the map to a slice
|
||||
packages := make([]database.FeatureVersion, 0, len(packagesMap))
|
||||
for _, pkg := range packagesMap {
|
||||
packages = append(packages, pkg)
|
||||
}
|
||||
|
||||
return packages, nil
|
||||
}
|
||||
|
||||
// GetRequiredFiles returns the list of files required for Detect, without
|
||||
// leading /
|
||||
func (detector *DpkgFeaturesDetector) GetRequiredFiles() []string {
|
||||
return []string{"var/lib/dpkg/status"}
|
||||
}
|
120
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/worker/detectors/feature/rpm/rpm.go
generated
vendored
Normal file
120
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/worker/detectors/feature/rpm/rpm.go
generated
vendored
Normal file
@ -0,0 +1,120 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package rpm
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/utils"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
"github.com/coreos/clair/worker/detectors"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
)
|
||||
|
||||
var log = capnslog.NewPackageLogger("github.com/coreos/clair", "rpm")
|
||||
|
||||
// RpmFeaturesDetector implements FeaturesDetector and detects rpm packages
|
||||
// It requires the "rpm" binary to be in the PATH
|
||||
type RpmFeaturesDetector struct{}
|
||||
|
||||
func init() {
|
||||
detectors.RegisterFeaturesDetector("rpm", &RpmFeaturesDetector{})
|
||||
}
|
||||
|
||||
// Detect detects packages using var/lib/rpm/Packages from the input data
|
||||
func (detector *RpmFeaturesDetector) Detect(data map[string][]byte) ([]database.FeatureVersion, error) {
|
||||
f, hasFile := data["var/lib/rpm/Packages"]
|
||||
if !hasFile {
|
||||
return []database.FeatureVersion{}, nil
|
||||
}
|
||||
|
||||
// Create a map to store packages and ensure their uniqueness
|
||||
packagesMap := make(map[string]database.FeatureVersion)
|
||||
|
||||
// Write the required "Packages" file to disk
|
||||
tmpDir, err := ioutil.TempDir(os.TempDir(), "rpm")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
if err != nil {
|
||||
log.Errorf("could not create temporary folder for RPM detection: %s", err)
|
||||
return []database.FeatureVersion{}, cerrors.ErrFilesystem
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(tmpDir+"/Packages", f, 0700)
|
||||
if err != nil {
|
||||
log.Errorf("could not create temporary file for RPM detection: %s", err)
|
||||
return []database.FeatureVersion{}, cerrors.ErrFilesystem
|
||||
}
|
||||
|
||||
// Query RPM
|
||||
// We actually extract binary package names instead of source package names here because RHSA refers to package names
|
||||
// In the dpkg system, we extract the source instead
|
||||
out, err := utils.Exec(tmpDir, "rpm", "--dbpath", tmpDir, "-qa", "--qf", "%{NAME} %{EPOCH}:%{VERSION}-%{RELEASE}\n")
|
||||
if err != nil {
|
||||
log.Errorf("could not query RPM: %s. output: %s", err, string(out))
|
||||
// Do not bubble up because we probably won't be able to fix it,
|
||||
// the database must be corrupted
|
||||
return []database.FeatureVersion{}, nil
|
||||
}
|
||||
|
||||
scanner := bufio.NewScanner(strings.NewReader(string(out)))
|
||||
for scanner.Scan() {
|
||||
line := strings.Split(scanner.Text(), " ")
|
||||
if len(line) != 2 {
|
||||
// We may see warnings on some RPM versions:
|
||||
// "warning: Generating 12 missing index(es), please wait..."
|
||||
continue
|
||||
}
|
||||
|
||||
// Ignore gpg-pubkey packages which are fake packages used to store GPG keys - they are not versionned properly.
|
||||
if line[0] == "gpg-pubkey" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse version
|
||||
version, err := types.NewVersion(strings.Replace(line[1], "(none):", "", -1))
|
||||
if err != nil {
|
||||
log.Warningf("could not parse package version '%s': %s. skipping", line[1], err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
// Add package
|
||||
pkg := database.FeatureVersion{
|
||||
Feature: database.Feature{
|
||||
Name: line[0],
|
||||
},
|
||||
Version: version,
|
||||
}
|
||||
packagesMap[pkg.Feature.Name+"#"+pkg.Version.String()] = pkg
|
||||
}
|
||||
|
||||
// Convert the map to a slice
|
||||
packages := make([]database.FeatureVersion, 0, len(packagesMap))
|
||||
for _, pkg := range packagesMap {
|
||||
packages = append(packages, pkg)
|
||||
}
|
||||
|
||||
return packages, nil
|
||||
}
|
||||
|
||||
// GetRequiredFiles returns the list of files required for Detect, without
|
||||
// leading /
|
||||
func (detector *RpmFeaturesDetector) GetRequiredFiles() []string {
|
||||
return []string{"var/lib/rpm/Packages"}
|
||||
}
|
48
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/worker/detectors/feature/test.go
generated
vendored
Normal file
48
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/worker/detectors/feature/test.go
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package feature
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/worker/detectors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type FeatureVersionTest struct {
|
||||
FeatureVersions []database.FeatureVersion
|
||||
Data map[string][]byte
|
||||
}
|
||||
|
||||
func LoadFileForTest(name string) []byte {
|
||||
_, filename, _, _ := runtime.Caller(0)
|
||||
d, _ := ioutil.ReadFile(path.Join(path.Dir(filename)) + "/" + name)
|
||||
return d
|
||||
}
|
||||
|
||||
func TestFeaturesDetector(t *testing.T, detector detectors.FeaturesDetector, tests []FeatureVersionTest) {
|
||||
for _, test := range tests {
|
||||
featureVersions, err := detector.Detect(test.Data)
|
||||
if assert.Nil(t, err) && assert.Len(t, featureVersions, len(test.FeatureVersions)) {
|
||||
for _, expectedFeatureVersion := range test.FeatureVersions {
|
||||
assert.Contains(t, featureVersions, expectedFeatureVersion)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
79
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/worker/detectors/features.go
generated
vendored
Normal file
79
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/worker/detectors/features.go
generated
vendored
Normal file
@ -0,0 +1,79 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package detectors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
)
|
||||
|
||||
// The FeaturesDetector interface defines a way to detect packages from input data.
|
||||
type FeaturesDetector interface {
|
||||
// Detect detects a list of FeatureVersion from the input data.
|
||||
Detect(map[string][]byte) ([]database.FeatureVersion, error)
|
||||
// GetRequiredFiles returns the list of files required for Detect, without
|
||||
// leading /.
|
||||
GetRequiredFiles() []string
|
||||
}
|
||||
|
||||
var (
|
||||
featuresDetectorsLock sync.Mutex
|
||||
featuresDetectors = make(map[string]FeaturesDetector)
|
||||
)
|
||||
|
||||
// RegisterFeaturesDetector makes a FeaturesDetector available for DetectFeatures.
|
||||
func RegisterFeaturesDetector(name string, f FeaturesDetector) {
|
||||
if name == "" {
|
||||
panic("Could not register a FeaturesDetector with an empty name")
|
||||
}
|
||||
if f == nil {
|
||||
panic("Could not register a nil FeaturesDetector")
|
||||
}
|
||||
|
||||
featuresDetectorsLock.Lock()
|
||||
defer featuresDetectorsLock.Unlock()
|
||||
|
||||
if _, alreadyExists := featuresDetectors[name]; alreadyExists {
|
||||
panic(fmt.Sprintf("Detector '%s' is already registered", name))
|
||||
}
|
||||
featuresDetectors[name] = f
|
||||
}
|
||||
|
||||
// DetectFeatures detects a list of FeatureVersion using every registered FeaturesDetector.
|
||||
func DetectFeatures(data map[string][]byte) ([]database.FeatureVersion, error) {
|
||||
var packages []database.FeatureVersion
|
||||
|
||||
for _, detector := range featuresDetectors {
|
||||
pkgs, err := detector.Detect(data)
|
||||
if err != nil {
|
||||
return []database.FeatureVersion{}, err
|
||||
}
|
||||
packages = append(packages, pkgs...)
|
||||
}
|
||||
|
||||
return packages, nil
|
||||
}
|
||||
|
||||
// GetRequiredFilesFeatures returns the list of files required for Detect for every
|
||||
// registered FeaturesDetector, without leading /.
|
||||
func GetRequiredFilesFeatures() (files []string) {
|
||||
for _, detector := range featuresDetectors {
|
||||
files = append(files, detector.GetRequiredFiles()...)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
82
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/worker/detectors/namespace.go
generated
vendored
Normal file
82
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/worker/detectors/namespace.go
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package detectors exposes functions to register and use container
|
||||
// information extractors.
|
||||
package detectors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
)
|
||||
|
||||
// The NamespaceDetector interface defines a way to detect a Namespace from input data.
|
||||
// A namespace is usually made of an Operating System name and its version.
|
||||
type NamespaceDetector interface {
|
||||
// Detect detects a Namespace and its version from input data.
|
||||
Detect(map[string][]byte) *database.Namespace
|
||||
// GetRequiredFiles returns the list of files required for Detect, without
|
||||
// leading /.
|
||||
GetRequiredFiles() []string
|
||||
}
|
||||
|
||||
var (
|
||||
namespaceDetectorsLock sync.Mutex
|
||||
namespaceDetectors = make(map[string]NamespaceDetector)
|
||||
)
|
||||
|
||||
// RegisterNamespaceDetector provides a way to dynamically register an implementation of a
|
||||
// NamespaceDetector.
|
||||
//
|
||||
// If RegisterNamespaceDetector is called twice with the same name if NamespaceDetector is nil,
|
||||
// or if the name is blank, it panics.
|
||||
func RegisterNamespaceDetector(name string, f NamespaceDetector) {
|
||||
if name == "" {
|
||||
panic("Could not register a NamespaceDetector with an empty name")
|
||||
}
|
||||
if f == nil {
|
||||
panic("Could not register a nil NamespaceDetector")
|
||||
}
|
||||
|
||||
namespaceDetectorsLock.Lock()
|
||||
defer namespaceDetectorsLock.Unlock()
|
||||
|
||||
if _, alreadyExists := namespaceDetectors[name]; alreadyExists {
|
||||
panic(fmt.Sprintf("Detector '%s' is already registered", name))
|
||||
}
|
||||
namespaceDetectors[name] = f
|
||||
}
|
||||
|
||||
// DetectNamespace finds the OS of the layer by using every registered NamespaceDetector.
|
||||
func DetectNamespace(data map[string][]byte) *database.Namespace {
|
||||
for _, detector := range namespaceDetectors {
|
||||
if namespace := detector.Detect(data); namespace != nil {
|
||||
return namespace
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetRequiredFilesNamespace returns the list of files required for DetectNamespace for every
|
||||
// registered NamespaceDetector, without leading /.
|
||||
func GetRequiredFilesNamespace() (files []string) {
|
||||
for _, detector := range namespaceDetectors {
|
||||
files = append(files, detector.GetRequiredFiles()...)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
@ -0,0 +1,85 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package aptsources
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/worker/detectors"
|
||||
)
|
||||
|
||||
// AptSourcesNamespaceDetector implements NamespaceDetector and detects the Namespace from the
|
||||
// /etc/apt/sources.list file.
|
||||
//
|
||||
// This detector is necessary to determine precise Debian version when it is
|
||||
// an unstable version for instance.
|
||||
type AptSourcesNamespaceDetector struct{}
|
||||
|
||||
func init() {
|
||||
detectors.RegisterNamespaceDetector("apt-sources", &AptSourcesNamespaceDetector{})
|
||||
}
|
||||
|
||||
func (detector *AptSourcesNamespaceDetector) Detect(data map[string][]byte) *database.Namespace {
|
||||
f, hasFile := data["etc/apt/sources.list"]
|
||||
if !hasFile {
|
||||
return nil
|
||||
}
|
||||
|
||||
var OS, version string
|
||||
|
||||
scanner := bufio.NewScanner(strings.NewReader(string(f)))
|
||||
for scanner.Scan() {
|
||||
// Format: man sources.list | https://wiki.debian.org/SourcesList)
|
||||
// deb uri distribution component1 component2 component3
|
||||
// deb-src uri distribution component1 component2 component3
|
||||
line := strings.Split(scanner.Text(), " ")
|
||||
if len(line) > 3 {
|
||||
// Only consider main component
|
||||
isMainComponent := false
|
||||
for _, component := range line[3:] {
|
||||
if component == "main" {
|
||||
isMainComponent = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !isMainComponent {
|
||||
continue
|
||||
}
|
||||
|
||||
var found bool
|
||||
version, found = database.DebianReleasesMapping[line[2]]
|
||||
if found {
|
||||
OS = "debian"
|
||||
break
|
||||
}
|
||||
version, found = database.UbuntuReleasesMapping[line[2]]
|
||||
if found {
|
||||
OS = "ubuntu"
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if OS != "" && version != "" {
|
||||
return &database.Namespace{Name: OS + ":" + version}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (detector *AptSourcesNamespaceDetector) GetRequiredFiles() []string {
|
||||
return []string{"etc/apt/sources.list"}
|
||||
}
|
@ -0,0 +1,81 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package lsbrelease
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/worker/detectors"
|
||||
)
|
||||
|
||||
var (
|
||||
lsbReleaseOSRegexp = regexp.MustCompile(`^DISTRIB_ID=(.*)`)
|
||||
lsbReleaseVersionRegexp = regexp.MustCompile(`^DISTRIB_RELEASE=(.*)`)
|
||||
)
|
||||
|
||||
// AptSourcesNamespaceDetector implements NamespaceDetector and detects the Namespace from the
|
||||
// /etc/lsb-release file.
|
||||
//
|
||||
// This detector is necessary for Ubuntu Precise.
|
||||
type LsbReleaseNamespaceDetector struct{}
|
||||
|
||||
func init() {
|
||||
detectors.RegisterNamespaceDetector("lsb-release", &LsbReleaseNamespaceDetector{})
|
||||
}
|
||||
|
||||
func (detector *LsbReleaseNamespaceDetector) Detect(data map[string][]byte) *database.Namespace {
|
||||
f, hasFile := data["etc/lsb-release"]
|
||||
if !hasFile {
|
||||
return nil
|
||||
}
|
||||
|
||||
var OS, version string
|
||||
|
||||
scanner := bufio.NewScanner(strings.NewReader(string(f)))
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
|
||||
r := lsbReleaseOSRegexp.FindStringSubmatch(line)
|
||||
if len(r) == 2 {
|
||||
OS = strings.Replace(strings.ToLower(r[1]), "\"", "", -1)
|
||||
}
|
||||
|
||||
r = lsbReleaseVersionRegexp.FindStringSubmatch(line)
|
||||
if len(r) == 2 {
|
||||
version = strings.Replace(strings.ToLower(r[1]), "\"", "", -1)
|
||||
|
||||
// We care about the .04 for Ubuntu but not for Debian / CentOS
|
||||
if OS == "centos" || OS == "debian" {
|
||||
i := strings.Index(version, ".")
|
||||
if i >= 0 {
|
||||
version = version[:i]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if OS != "" && version != "" {
|
||||
return &database.Namespace{Name: OS + ":" + version}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetRequiredFiles returns the list of files that are required for Detect()
|
||||
func (detector *LsbReleaseNamespaceDetector) GetRequiredFiles() []string {
|
||||
return []string{"etc/lsb-release"}
|
||||
}
|
@ -0,0 +1,76 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package osrelease
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/worker/detectors"
|
||||
)
|
||||
|
||||
var (
|
||||
osReleaseOSRegexp = regexp.MustCompile(`^ID=(.*)`)
|
||||
osReleaseVersionRegexp = regexp.MustCompile(`^VERSION_ID=(.*)`)
|
||||
)
|
||||
|
||||
// OsReleaseNamespaceDetector implements NamespaceDetector and detects the OS from the
|
||||
// /etc/os-release and usr/lib/os-release files.
|
||||
type OsReleaseNamespaceDetector struct{}
|
||||
|
||||
func init() {
|
||||
detectors.RegisterNamespaceDetector("os-release", &OsReleaseNamespaceDetector{})
|
||||
}
|
||||
|
||||
// Detect tries to detect OS/Version using "/etc/os-release" and "/usr/lib/os-release"
|
||||
// Typically for Debian / Ubuntu
|
||||
// /etc/debian_version can't be used, it does not make any difference between testing and unstable, it returns stretch/sid
|
||||
func (detector *OsReleaseNamespaceDetector) Detect(data map[string][]byte) *database.Namespace {
|
||||
var OS, version string
|
||||
|
||||
for _, filePath := range detector.GetRequiredFiles() {
|
||||
f, hasFile := data[filePath]
|
||||
if !hasFile {
|
||||
continue
|
||||
}
|
||||
|
||||
scanner := bufio.NewScanner(strings.NewReader(string(f)))
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
|
||||
r := osReleaseOSRegexp.FindStringSubmatch(line)
|
||||
if len(r) == 2 {
|
||||
OS = strings.Replace(strings.ToLower(r[1]), "\"", "", -1)
|
||||
}
|
||||
|
||||
r = osReleaseVersionRegexp.FindStringSubmatch(line)
|
||||
if len(r) == 2 {
|
||||
version = strings.Replace(strings.ToLower(r[1]), "\"", "", -1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if OS != "" && version != "" {
|
||||
return &database.Namespace{Name: OS + ":" + version}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetRequiredFiles returns the list of files that are required for Detect()
|
||||
func (detector *OsReleaseNamespaceDetector) GetRequiredFiles() []string {
|
||||
return []string{"etc/os-release", "usr/lib/os-release"}
|
||||
}
|
@ -0,0 +1,59 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package redhatrelease
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/worker/detectors"
|
||||
)
|
||||
|
||||
var redhatReleaseRegexp = regexp.MustCompile(`(?P<os>[^\s]*) (Linux release|release) (?P<version>[\d]+)`)
|
||||
|
||||
// RedhatReleaseNamespaceDetector implements NamespaceDetector and detects the OS from the
|
||||
// /etc/centos-release, /etc/redhat-release and /etc/system-release files.
|
||||
//
|
||||
// Typically for CentOS and Red-Hat like systems
|
||||
// eg. CentOS release 5.11 (Final)
|
||||
// eg. CentOS release 6.6 (Final)
|
||||
// eg. CentOS Linux release 7.1.1503 (Core)
|
||||
type RedhatReleaseNamespaceDetector struct{}
|
||||
|
||||
func init() {
|
||||
detectors.RegisterNamespaceDetector("redhat-release", &RedhatReleaseNamespaceDetector{})
|
||||
}
|
||||
|
||||
func (detector *RedhatReleaseNamespaceDetector) Detect(data map[string][]byte) *database.Namespace {
|
||||
for _, filePath := range detector.GetRequiredFiles() {
|
||||
f, hasFile := data[filePath]
|
||||
if !hasFile {
|
||||
continue
|
||||
}
|
||||
|
||||
r := redhatReleaseRegexp.FindStringSubmatch(string(f))
|
||||
if len(r) == 4 {
|
||||
return &database.Namespace{Name: strings.ToLower(r[1]) + ":" + r[3]}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetRequiredFiles returns the list of files that are required for Detect()
|
||||
func (detector *RedhatReleaseNamespaceDetector) GetRequiredFiles() []string {
|
||||
return []string{"etc/centos-release", "etc/redhat-release", "etc/system-release"}
|
||||
}
|
34
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/worker/detectors/namespace/test.go
generated
vendored
Normal file
34
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/worker/detectors/namespace/test.go
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package namespace
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/worker/detectors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type NamespaceTest struct {
|
||||
Data map[string][]byte
|
||||
ExpectedNamespace database.Namespace
|
||||
}
|
||||
|
||||
func TestNamespaceDetector(t *testing.T, detector detectors.NamespaceDetector, tests []NamespaceTest) {
|
||||
for _, test := range tests {
|
||||
assert.Equal(t, test.ExpectedNamespace, *detector.Detect(test.Data))
|
||||
}
|
||||
}
|
192
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/worker/worker.go
generated
vendored
Normal file
192
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/clair/worker/worker.go
generated
vendored
Normal file
@ -0,0 +1,192 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package worker implements the logic to extract useful informations from a
|
||||
// container layer and store it in the database.
|
||||
package worker
|
||||
|
||||
import (
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/utils"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/coreos/clair/worker/detectors"
|
||||
)
|
||||
|
||||
const (
|
||||
// Version (integer) represents the worker version.
|
||||
// Increased each time the engine changes.
|
||||
Version = 2
|
||||
|
||||
// maxFileSize is the maximum size of a single file we should extract.
|
||||
maxFileSize = 200 * 1024 * 1024 // 200 MiB
|
||||
)
|
||||
|
||||
var (
|
||||
log = capnslog.NewPackageLogger("github.com/coreos/clair", "worker")
|
||||
|
||||
// ErrUnsupported is the error that should be raised when an OS or package
|
||||
// manager is not supported.
|
||||
ErrUnsupported = cerrors.NewBadRequestError("worker: OS and/or package manager are not supported")
|
||||
|
||||
// ErrParentUnknown is the error that should be raised when a parent layer
|
||||
// has yet to be processed for the current layer.
|
||||
ErrParentUnknown = cerrors.NewBadRequestError("worker: parent layer is unknown, it must be processed first")
|
||||
)
|
||||
|
||||
// Process detects the Namespace of a layer, the features it adds/removes, and
|
||||
// then stores everything in the database.
|
||||
// TODO(Quentin-M): We could have a goroutine that looks for layers that have been analyzed with an
|
||||
// older engine version and that processes them.
|
||||
func Process(datastore database.Datastore, name, parentName, path, imageFormat string) error {
|
||||
// Verify parameters.
|
||||
if name == "" {
|
||||
return cerrors.NewBadRequestError("could not process a layer which does not have a name")
|
||||
}
|
||||
|
||||
if path == "" {
|
||||
return cerrors.NewBadRequestError("could not process a layer which does not have a path")
|
||||
}
|
||||
|
||||
if imageFormat == "" {
|
||||
return cerrors.NewBadRequestError("could not process a layer which does not have a format")
|
||||
}
|
||||
|
||||
log.Debugf("layer %s: processing (Location: %s, Engine version: %d, Parent: %s, Format: %s)",
|
||||
name, utils.CleanURL(path), Version, parentName, imageFormat)
|
||||
|
||||
// Check to see if the layer is already in the database.
|
||||
layer, err := datastore.FindLayer(name, false, false)
|
||||
if err != nil && err != cerrors.ErrNotFound {
|
||||
return err
|
||||
}
|
||||
|
||||
if err == cerrors.ErrNotFound {
|
||||
// New layer case.
|
||||
layer = database.Layer{Name: name, EngineVersion: Version}
|
||||
|
||||
// Retrieve the parent if it has one.
|
||||
// We need to get it with its Features in order to diff them.
|
||||
if parentName != "" {
|
||||
parent, err := datastore.FindLayer(parentName, true, false)
|
||||
if err != nil && err != cerrors.ErrNotFound {
|
||||
return err
|
||||
}
|
||||
if err == cerrors.ErrNotFound {
|
||||
log.Warningf("layer %s: the parent layer (%s) is unknown. it must be processed first", name,
|
||||
parentName)
|
||||
return ErrParentUnknown
|
||||
}
|
||||
layer.Parent = &parent
|
||||
}
|
||||
} else {
|
||||
// The layer is already in the database, check if we need to update it.
|
||||
if layer.EngineVersion >= Version {
|
||||
log.Debugf(`layer %s: layer content has already been processed in the past with engine %d.
|
||||
Current engine is %d. skipping analysis`, name, layer.EngineVersion, Version)
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Debugf(`layer %s: layer content has been analyzed in the past with engine %d. Current
|
||||
engine is %d. analyzing again`, name, layer.EngineVersion, Version)
|
||||
}
|
||||
|
||||
// Analyze the content.
|
||||
layer.Namespace, layer.Features, err = detectContent(name, path, imageFormat, layer.Parent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return datastore.InsertLayer(layer)
|
||||
}
|
||||
|
||||
// detectContent downloads a layer's archive and extracts its Namespace and Features.
|
||||
func detectContent(name, path, imageFormat string, parent *database.Layer) (namespace *database.Namespace, features []database.FeatureVersion, err error) {
|
||||
data, err := detectors.DetectData(path, imageFormat, append(detectors.GetRequiredFilesFeatures(),
|
||||
detectors.GetRequiredFilesNamespace()...), maxFileSize)
|
||||
if err != nil {
|
||||
log.Errorf("layer %s: failed to extract data from %s: %s", name, utils.CleanURL(path), err)
|
||||
return
|
||||
}
|
||||
|
||||
// Detect namespace.
|
||||
namespace, err = detectNamespace(data, parent)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if namespace != nil {
|
||||
log.Debugf("layer %s: Namespace is %s.", name, namespace.Name)
|
||||
} else {
|
||||
log.Debugf("layer %s: OS is unknown.", name)
|
||||
}
|
||||
|
||||
// Detect features.
|
||||
features, err = detectFeatures(name, data, namespace)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// If there are no feature detected, use parent's features if possible.
|
||||
// TODO(Quentin-M): We eventually want to give the choice to each detectors to use none/some
|
||||
// parent's Features. It would be useful for detectors that can't find their entire result using
|
||||
// one Layer.
|
||||
if len(features) == 0 && parent != nil {
|
||||
features = parent.Features
|
||||
}
|
||||
|
||||
log.Debugf("layer %s: detected %d features", name, len(features))
|
||||
return
|
||||
}
|
||||
|
||||
func detectNamespace(data map[string][]byte, parent *database.Layer) (namespace *database.Namespace, err error) {
|
||||
namespace = detectors.DetectNamespace(data)
|
||||
|
||||
// Attempt to detect the OS from the parent layer.
|
||||
if namespace == nil && parent != nil {
|
||||
namespace = parent.Namespace
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func detectFeatures(name string, data map[string][]byte, namespace *database.Namespace) (features []database.FeatureVersion, err error) {
|
||||
// TODO(Quentin-M): We need to pass the parent image DetectFeatures because it's possible that
|
||||
// some detectors would need it in order to produce the entire feature list (if they can only
|
||||
// detect a diff). Also, we should probably pass the detected namespace so detectors could
|
||||
// make their own decision.
|
||||
features, err = detectors.DetectFeatures(data)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure that every feature has a Namespace associated, otherwise associate the detected
|
||||
// namespace. If there is no detected namespace, we'll throw an error.
|
||||
for i := 0; i < len(features); i++ {
|
||||
if features[i].Feature.Namespace.Name == "" {
|
||||
if namespace != nil {
|
||||
features[i].Feature.Namespace = *namespace
|
||||
} else {
|
||||
log.Warningf("layer %s: Layer's namespace is unknown but non-namespaced features have been detected", name)
|
||||
err = ErrUnsupported
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
191
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/go-systemd/LICENSE
generated
vendored
Normal file
191
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/go-systemd/LICENSE
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction, and
|
||||
distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by the copyright
|
||||
owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all other entities
|
||||
that control, are controlled by, or are under common control with that entity.
|
||||
For the purposes of this definition, "control" means (i) the power, direct or
|
||||
indirect, to cause the direction or management of such entity, whether by
|
||||
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity exercising
|
||||
permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications, including
|
||||
but not limited to software source code, documentation source, and configuration
|
||||
files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical transformation or
|
||||
translation of a Source form, including but not limited to compiled object code,
|
||||
generated documentation, and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or Object form, made
|
||||
available under the License, as indicated by a copyright notice that is included
|
||||
in or attached to the work (an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object form, that
|
||||
is based on (or derived from) the Work and for which the editorial revisions,
|
||||
annotations, elaborations, or other modifications represent, as a whole, an
|
||||
original work of authorship. For the purposes of this License, Derivative Works
|
||||
shall not include works that remain separable from, or merely link (or bind by
|
||||
name) to the interfaces of, the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including the original version
|
||||
of the Work and any modifications or additions to that Work or Derivative Works
|
||||
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
||||
by the copyright owner or by an individual or Legal Entity authorized to submit
|
||||
on behalf of the copyright owner. For the purposes of this definition,
|
||||
"submitted" means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems, and
|
||||
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
||||
the purpose of discussing and improving the Work, but excluding communication
|
||||
that is conspicuously marked or otherwise designated in writing by the copyright
|
||||
owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
|
||||
of whom a Contribution has been received by Licensor and subsequently
|
||||
incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the Work and such
|
||||
Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable (except as stated in this section) patent license to make, have
|
||||
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
||||
such license applies only to those patent claims licensable by such Contributor
|
||||
that are necessarily infringed by their Contribution(s) alone or by combination
|
||||
of their Contribution(s) with the Work to which such Contribution(s) was
|
||||
submitted. If You institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
||||
Contribution incorporated within the Work constitutes direct or contributory
|
||||
patent infringement, then any patent licenses granted to You under this License
|
||||
for that Work shall terminate as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution.
|
||||
|
||||
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
||||
in any medium, with or without modifications, and in Source or Object form,
|
||||
provided that You meet the following conditions:
|
||||
|
||||
You must give any other recipients of the Work or Derivative Works a copy of
|
||||
this License; and
|
||||
You must cause any modified files to carry prominent notices stating that You
|
||||
changed the files; and
|
||||
You must retain, in the Source form of any Derivative Works that You distribute,
|
||||
all copyright, patent, trademark, and attribution notices from the Source form
|
||||
of the Work, excluding those notices that do not pertain to any part of the
|
||||
Derivative Works; and
|
||||
If the Work includes a "NOTICE" text file as part of its distribution, then any
|
||||
Derivative Works that You distribute must include a readable copy of the
|
||||
attribution notices contained within such NOTICE file, excluding those notices
|
||||
that do not pertain to any part of the Derivative Works, in at least one of the
|
||||
following places: within a NOTICE text file distributed as part of the
|
||||
Derivative Works; within the Source form or documentation, if provided along
|
||||
with the Derivative Works; or, within a display generated by the Derivative
|
||||
Works, if and wherever such third-party notices normally appear. The contents of
|
||||
the NOTICE file are for informational purposes only and do not modify the
|
||||
License. You may add Your own attribution notices within Derivative Works that
|
||||
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
||||
provided that such additional attribution notices cannot be construed as
|
||||
modifying the License.
|
||||
You may add Your own copyright statement to Your modifications and may provide
|
||||
additional or different license terms and conditions for use, reproduction, or
|
||||
distribution of Your modifications, or for any such Derivative Works as a whole,
|
||||
provided Your use, reproduction, and distribution of the Work otherwise complies
|
||||
with the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions.
|
||||
|
||||
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
||||
for inclusion in the Work by You to the Licensor shall be under the terms and
|
||||
conditions of this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
||||
any separate license agreement you may have executed with Licensor regarding
|
||||
such Contributions.
|
||||
|
||||
6. Trademarks.
|
||||
|
||||
This License does not grant permission to use the trade names, trademarks,
|
||||
service marks, or product names of the Licensor, except as required for
|
||||
reasonable and customary use in describing the origin of the Work and
|
||||
reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty.
|
||||
|
||||
Unless required by applicable law or agreed to in writing, Licensor provides the
|
||||
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||
including, without limitation, any warranties or conditions of TITLE,
|
||||
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
||||
solely responsible for determining the appropriateness of using or
|
||||
redistributing the Work and assume any risks associated with Your exercise of
|
||||
permissions under this License.
|
||||
|
||||
8. Limitation of Liability.
|
||||
|
||||
In no event and under no legal theory, whether in tort (including negligence),
|
||||
contract, or otherwise, unless required by applicable law (such as deliberate
|
||||
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special, incidental,
|
||||
or consequential damages of any character arising as a result of this License or
|
||||
out of the use or inability to use the Work (including but not limited to
|
||||
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
||||
any and all other commercial damages or losses), even if such Contributor has
|
||||
been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability.
|
||||
|
||||
While redistributing the Work or Derivative Works thereof, You may choose to
|
||||
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
||||
other liability obligations and/or rights consistent with this License. However,
|
||||
in accepting such obligations, You may act only on Your own behalf and on Your
|
||||
sole responsibility, not on behalf of any other Contributor, and only if You
|
||||
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason of your
|
||||
accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work
|
||||
|
||||
To apply the Apache License to your work, attach the following boilerplate
|
||||
notice, with the fields enclosed by brackets "[]" replaced with your own
|
||||
identifying information. (Don't include the brackets!) The text should be
|
||||
enclosed in the appropriate comment syntax for the file format. We also
|
||||
recommend that a file or class name and description of purpose be included on
|
||||
the same "printed page" as the copyright notice for easier identification within
|
||||
third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
178
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/go-systemd/journal/journal.go
generated
vendored
Normal file
178
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/go-systemd/journal/journal.go
generated
vendored
Normal file
@ -0,0 +1,178 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package journal provides write bindings to the local systemd journal.
|
||||
// It is implemented in pure Go and connects to the journal directly over its
|
||||
// unix socket.
|
||||
//
|
||||
// To read from the journal, see the "sdjournal" package, which wraps the
|
||||
// sd-journal a C API.
|
||||
//
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
|
||||
package journal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// Priority of a journal message
|
||||
type Priority int
|
||||
|
||||
const (
|
||||
PriEmerg Priority = iota
|
||||
PriAlert
|
||||
PriCrit
|
||||
PriErr
|
||||
PriWarning
|
||||
PriNotice
|
||||
PriInfo
|
||||
PriDebug
|
||||
)
|
||||
|
||||
var conn net.Conn
|
||||
|
||||
func init() {
|
||||
var err error
|
||||
conn, err = net.Dial("unixgram", "/run/systemd/journal/socket")
|
||||
if err != nil {
|
||||
conn = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Enabled returns true if the local systemd journal is available for logging
|
||||
func Enabled() bool {
|
||||
return conn != nil
|
||||
}
|
||||
|
||||
// Send a message to the local systemd journal. vars is a map of journald
|
||||
// fields to values. Fields must be composed of uppercase letters, numbers,
|
||||
// and underscores, but must not start with an underscore. Within these
|
||||
// restrictions, any arbitrary field name may be used. Some names have special
|
||||
// significance: see the journalctl documentation
|
||||
// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html)
|
||||
// for more details. vars may be nil.
|
||||
func Send(message string, priority Priority, vars map[string]string) error {
|
||||
if conn == nil {
|
||||
return journalError("could not connect to journald socket")
|
||||
}
|
||||
|
||||
data := new(bytes.Buffer)
|
||||
appendVariable(data, "PRIORITY", strconv.Itoa(int(priority)))
|
||||
appendVariable(data, "MESSAGE", message)
|
||||
for k, v := range vars {
|
||||
appendVariable(data, k, v)
|
||||
}
|
||||
|
||||
_, err := io.Copy(conn, data)
|
||||
if err != nil && isSocketSpaceError(err) {
|
||||
file, err := tempFd()
|
||||
if err != nil {
|
||||
return journalError(err.Error())
|
||||
}
|
||||
_, err = io.Copy(file, data)
|
||||
if err != nil {
|
||||
return journalError(err.Error())
|
||||
}
|
||||
|
||||
rights := syscall.UnixRights(int(file.Fd()))
|
||||
|
||||
/* this connection should always be a UnixConn, but better safe than sorry */
|
||||
unixConn, ok := conn.(*net.UnixConn)
|
||||
if !ok {
|
||||
return journalError("can't send file through non-Unix connection")
|
||||
}
|
||||
unixConn.WriteMsgUnix([]byte{}, rights, nil)
|
||||
} else if err != nil {
|
||||
return journalError(err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Print prints a message to the local systemd journal using Send().
|
||||
func Print(priority Priority, format string, a ...interface{}) error {
|
||||
return Send(fmt.Sprintf(format, a...), priority, nil)
|
||||
}
|
||||
|
||||
func appendVariable(w io.Writer, name, value string) {
|
||||
if !validVarName(name) {
|
||||
journalError("variable name contains invalid character, ignoring")
|
||||
}
|
||||
if strings.ContainsRune(value, '\n') {
|
||||
/* When the value contains a newline, we write:
|
||||
* - the variable name, followed by a newline
|
||||
* - the size (in 64bit little endian format)
|
||||
* - the data, followed by a newline
|
||||
*/
|
||||
fmt.Fprintln(w, name)
|
||||
binary.Write(w, binary.LittleEndian, uint64(len(value)))
|
||||
fmt.Fprintln(w, value)
|
||||
} else {
|
||||
/* just write the variable and value all on one line */
|
||||
fmt.Fprintf(w, "%s=%s\n", name, value)
|
||||
}
|
||||
}
|
||||
|
||||
func validVarName(name string) bool {
|
||||
/* The variable name must be in uppercase and consist only of characters,
|
||||
* numbers and underscores, and may not begin with an underscore. (from the docs)
|
||||
*/
|
||||
|
||||
valid := name[0] != '_'
|
||||
for _, c := range name {
|
||||
valid = valid && ('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_'
|
||||
}
|
||||
return valid
|
||||
}
|
||||
|
||||
func isSocketSpaceError(err error) bool {
|
||||
opErr, ok := err.(*net.OpError)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
sysErr, ok := opErr.Err.(syscall.Errno)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
return sysErr == syscall.EMSGSIZE || sysErr == syscall.ENOBUFS
|
||||
}
|
||||
|
||||
func tempFd() (*os.File, error) {
|
||||
file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
syscall.Unlink(file.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return file, nil
|
||||
}
|
||||
|
||||
func journalError(s string) error {
|
||||
s = "journal error: " + s
|
||||
fmt.Fprintln(os.Stderr, s)
|
||||
return errors.New(s)
|
||||
}
|
202
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/pkg/LICENSE
generated
vendored
Normal file
202
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/pkg/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
5
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/pkg/NOTICE
generated
vendored
Normal file
5
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/pkg/NOTICE
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
CoreOS Project
|
||||
Copyright 2014 CoreOS, Inc
|
||||
|
||||
This product includes software developed at CoreOS, Inc.
|
||||
(http://www.coreos.com/).
|
39
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/pkg/capnslog/README.md
generated
vendored
Normal file
39
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/pkg/capnslog/README.md
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
# capnslog, the CoreOS logging package
|
||||
|
||||
There are far too many logging packages out there, with varying degrees of licenses, far too many features (colorization, all sorts of log frameworks) or are just a pain to use (lack of `Fatalln()`?).
|
||||
capnslog provides a simple but consistent logging interface suitable for all kinds of projects.
|
||||
|
||||
### Design Principles
|
||||
|
||||
##### `package main` is the place where logging gets turned on and routed
|
||||
|
||||
A library should not touch log options, only generate log entries. Libraries are silent until main lets them speak.
|
||||
|
||||
##### All log options are runtime-configurable.
|
||||
|
||||
Still the job of `main` to expose these configurations. `main` may delegate this to, say, a configuration webhook, but does so explicitly.
|
||||
|
||||
##### There is one log object per package. It is registered under its repository and package name.
|
||||
|
||||
`main` activates logging for its repository and any dependency repositories it would also like to have output in its logstream. `main` also dictates at which level each subpackage logs.
|
||||
|
||||
##### There is *one* output stream, and it is an `io.Writer` composed with a formatter.
|
||||
|
||||
Splitting streams is probably not the job of your program, but rather, your log aggregation framework. If you must split output streams, again, `main` configures this and you can write a very simple two-output struct that satisfies io.Writer.
|
||||
|
||||
Fancy colorful formatting and JSON output are beyond the scope of a basic logging framework -- they're application/log-collector dependant. These are, at best, provided as options, but more likely, provided by your application.
|
||||
|
||||
##### Log objects are an interface
|
||||
|
||||
An object knows best how to print itself. Log objects can collect more interesting metadata if they wish, however, because text isn't going away anytime soon, they must all be marshalable to text. The simplest log object is a string, which returns itself. If you wish to do more fancy tricks for printing your log objects, see also JSON output -- introspect and write a formatter which can handle your advanced log interface. Making strings is the only thing guaranteed.
|
||||
|
||||
##### Log levels have specific meanings:
|
||||
|
||||
* Critical: Unrecoverable. Must fail.
|
||||
* Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost
|
||||
* Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning.
|
||||
* Notice: Normal, but important (uncommon) log information.
|
||||
* Info: Normal, working log information, everything is fine, but helpful notices for auditing or common operations.
|
||||
* Debug: Everything is still fine, but even common operations may be logged, and less helpful but more quantity of notices.
|
||||
* Trace: Anything goes, from logging every function call as part of a common operation, to tracing execution of a query.
|
||||
|
57
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/pkg/capnslog/example/hello_dolly.go
generated
vendored
Normal file
57
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/pkg/capnslog/example/hello_dolly.go
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
oldlog "log"
|
||||
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
)
|
||||
|
||||
var logLevel = capnslog.INFO
|
||||
var log = capnslog.NewPackageLogger("github.com/coreos/pkg/capnslog/cmd", "main")
|
||||
var dlog = capnslog.NewPackageLogger("github.com/coreos/pkg/capnslog/cmd", "dolly")
|
||||
|
||||
func init() {
|
||||
flag.Var(&logLevel, "log-level", "Global log level.")
|
||||
}
|
||||
|
||||
func main() {
|
||||
rl := capnslog.MustRepoLogger("github.com/coreos/pkg/capnslog/cmd")
|
||||
|
||||
// We can parse the log level configs from the command line
|
||||
flag.Parse()
|
||||
if flag.NArg() > 1 {
|
||||
cfg, err := rl.ParseLogLevelConfig(flag.Arg(1))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
rl.SetLogLevel(cfg)
|
||||
log.Infof("Setting output to %s", flag.Arg(1))
|
||||
}
|
||||
|
||||
// Send some messages at different levels to the different packages
|
||||
dlog.Infof("Hello Dolly")
|
||||
dlog.Warningf("Well hello, Dolly")
|
||||
log.Errorf("It's so nice to have you back where you belong")
|
||||
dlog.Debugf("You're looking swell, Dolly")
|
||||
dlog.Tracef("I can tell, Dolly")
|
||||
|
||||
// We also have control over the built-in "log" package.
|
||||
capnslog.SetGlobalLogLevel(logLevel)
|
||||
oldlog.Println("You're still glowin', you're still crowin', you're still lookin' strong")
|
||||
log.Fatalf("Dolly'll never go away again")
|
||||
}
|
106
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/pkg/capnslog/formatters.go
generated
vendored
Normal file
106
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/pkg/capnslog/formatters.go
generated
vendored
Normal file
@ -0,0 +1,106 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package capnslog
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Formatter interface {
|
||||
Format(pkg string, level LogLevel, depth int, entries ...interface{})
|
||||
Flush()
|
||||
}
|
||||
|
||||
func NewStringFormatter(w io.Writer) *StringFormatter {
|
||||
return &StringFormatter{
|
||||
w: bufio.NewWriter(w),
|
||||
}
|
||||
}
|
||||
|
||||
type StringFormatter struct {
|
||||
w *bufio.Writer
|
||||
}
|
||||
|
||||
func (s *StringFormatter) Format(pkg string, l LogLevel, i int, entries ...interface{}) {
|
||||
now := time.Now().UTC()
|
||||
s.w.WriteString(now.Format(time.RFC3339))
|
||||
s.w.WriteByte(' ')
|
||||
writeEntries(s.w, pkg, l, i, entries...)
|
||||
s.Flush()
|
||||
}
|
||||
|
||||
func writeEntries(w *bufio.Writer, pkg string, _ LogLevel, _ int, entries ...interface{}) {
|
||||
if pkg != "" {
|
||||
w.WriteString(pkg + ": ")
|
||||
}
|
||||
str := fmt.Sprint(entries...)
|
||||
endsInNL := strings.HasSuffix(str, "\n")
|
||||
w.WriteString(str)
|
||||
if !endsInNL {
|
||||
w.WriteString("\n")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StringFormatter) Flush() {
|
||||
s.w.Flush()
|
||||
}
|
||||
|
||||
func NewPrettyFormatter(w io.Writer, debug bool) Formatter {
|
||||
return &PrettyFormatter{
|
||||
w: bufio.NewWriter(w),
|
||||
debug: debug,
|
||||
}
|
||||
}
|
||||
|
||||
type PrettyFormatter struct {
|
||||
w *bufio.Writer
|
||||
debug bool
|
||||
}
|
||||
|
||||
func (c *PrettyFormatter) Format(pkg string, l LogLevel, depth int, entries ...interface{}) {
|
||||
now := time.Now()
|
||||
ts := now.Format("2006-01-02 15:04:05")
|
||||
c.w.WriteString(ts)
|
||||
ms := now.Nanosecond() / 1000
|
||||
c.w.WriteString(fmt.Sprintf(".%06d", ms))
|
||||
if c.debug {
|
||||
_, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call.
|
||||
if !ok {
|
||||
file = "???"
|
||||
line = 1
|
||||
} else {
|
||||
slash := strings.LastIndex(file, "/")
|
||||
if slash >= 0 {
|
||||
file = file[slash+1:]
|
||||
}
|
||||
}
|
||||
if line < 0 {
|
||||
line = 0 // not a real line number
|
||||
}
|
||||
c.w.WriteString(fmt.Sprintf(" [%s:%d]", file, line))
|
||||
}
|
||||
c.w.WriteString(fmt.Sprint(" ", l.Char(), " | "))
|
||||
writeEntries(c.w, pkg, l, depth, entries...)
|
||||
c.Flush()
|
||||
}
|
||||
|
||||
func (c *PrettyFormatter) Flush() {
|
||||
c.w.Flush()
|
||||
}
|
96
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go
generated
vendored
Normal file
96
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go
generated
vendored
Normal file
@ -0,0 +1,96 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package capnslog
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var pid = os.Getpid()
|
||||
|
||||
type GlogFormatter struct {
|
||||
StringFormatter
|
||||
}
|
||||
|
||||
func NewGlogFormatter(w io.Writer) *GlogFormatter {
|
||||
g := &GlogFormatter{}
|
||||
g.w = bufio.NewWriter(w)
|
||||
return g
|
||||
}
|
||||
|
||||
func (g GlogFormatter) Format(pkg string, level LogLevel, depth int, entries ...interface{}) {
|
||||
g.w.Write(GlogHeader(level, depth+1))
|
||||
g.StringFormatter.Format(pkg, level, depth+1, entries...)
|
||||
}
|
||||
|
||||
func GlogHeader(level LogLevel, depth int) []byte {
|
||||
// Lmmdd hh:mm:ss.uuuuuu threadid file:line]
|
||||
now := time.Now().UTC()
|
||||
_, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call.
|
||||
if !ok {
|
||||
file = "???"
|
||||
line = 1
|
||||
} else {
|
||||
slash := strings.LastIndex(file, "/")
|
||||
if slash >= 0 {
|
||||
file = file[slash+1:]
|
||||
}
|
||||
}
|
||||
if line < 0 {
|
||||
line = 0 // not a real line number
|
||||
}
|
||||
buf := &bytes.Buffer{}
|
||||
buf.Grow(30)
|
||||
_, month, day := now.Date()
|
||||
hour, minute, second := now.Clock()
|
||||
buf.WriteString(level.Char())
|
||||
twoDigits(buf, int(month))
|
||||
twoDigits(buf, day)
|
||||
buf.WriteByte(' ')
|
||||
twoDigits(buf, hour)
|
||||
buf.WriteByte(':')
|
||||
twoDigits(buf, minute)
|
||||
buf.WriteByte(':')
|
||||
twoDigits(buf, second)
|
||||
buf.WriteByte('.')
|
||||
buf.WriteString(strconv.Itoa(now.Nanosecond() / 1000))
|
||||
buf.WriteByte('Z')
|
||||
buf.WriteByte(' ')
|
||||
buf.WriteString(strconv.Itoa(pid))
|
||||
buf.WriteByte(' ')
|
||||
buf.WriteString(file)
|
||||
buf.WriteByte(':')
|
||||
buf.WriteString(strconv.Itoa(line))
|
||||
buf.WriteByte(']')
|
||||
buf.WriteByte(' ')
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
const digits = "0123456789"
|
||||
|
||||
func twoDigits(b *bytes.Buffer, d int) {
|
||||
c2 := digits[d%10]
|
||||
d /= 10
|
||||
c1 := digits[d%10]
|
||||
b.WriteByte(c1)
|
||||
b.WriteByte(c2)
|
||||
}
|
49
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/pkg/capnslog/init.go
generated
vendored
Normal file
49
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/pkg/capnslog/init.go
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// +build !windows
|
||||
|
||||
package capnslog
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// Here's where the opinionation comes in. We need some sensible defaults,
|
||||
// especially after taking over the log package. Your project (whatever it may
|
||||
// be) may see things differently. That's okay; there should be no defaults in
|
||||
// the main package that cannot be controlled or overridden programatically,
|
||||
// otherwise it's a bug. Doing so is creating your own init_log.go file much
|
||||
// like this one.
|
||||
|
||||
func init() {
|
||||
initHijack()
|
||||
|
||||
// Go `log` pacakge uses os.Stderr.
|
||||
SetFormatter(NewDefaultFormatter(os.Stderr))
|
||||
SetGlobalLogLevel(INFO)
|
||||
}
|
||||
|
||||
func NewDefaultFormatter(out io.Writer) Formatter {
|
||||
if syscall.Getppid() == 1 {
|
||||
// We're running under init, which may be systemd.
|
||||
f, err := NewJournaldFormatter()
|
||||
if err == nil {
|
||||
return f
|
||||
}
|
||||
}
|
||||
return NewPrettyFormatter(out, false)
|
||||
}
|
25
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/pkg/capnslog/init_windows.go
generated
vendored
Normal file
25
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/pkg/capnslog/init_windows.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package capnslog
|
||||
|
||||
import "os"
|
||||
|
||||
func init() {
|
||||
initHijack()
|
||||
|
||||
// Go `log` package uses os.Stderr.
|
||||
SetFormatter(NewPrettyFormatter(os.Stderr, false))
|
||||
SetGlobalLogLevel(INFO)
|
||||
}
|
68
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go
generated
vendored
Normal file
68
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// +build !windows
|
||||
|
||||
package capnslog
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/coreos/go-systemd/journal"
|
||||
)
|
||||
|
||||
func NewJournaldFormatter() (Formatter, error) {
|
||||
if !journal.Enabled() {
|
||||
return nil, errors.New("No systemd detected")
|
||||
}
|
||||
return &journaldFormatter{}, nil
|
||||
}
|
||||
|
||||
type journaldFormatter struct{}
|
||||
|
||||
func (j *journaldFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) {
|
||||
var pri journal.Priority
|
||||
switch l {
|
||||
case CRITICAL:
|
||||
pri = journal.PriCrit
|
||||
case ERROR:
|
||||
pri = journal.PriErr
|
||||
case WARNING:
|
||||
pri = journal.PriWarning
|
||||
case NOTICE:
|
||||
pri = journal.PriNotice
|
||||
case INFO:
|
||||
pri = journal.PriInfo
|
||||
case DEBUG:
|
||||
pri = journal.PriDebug
|
||||
case TRACE:
|
||||
pri = journal.PriDebug
|
||||
default:
|
||||
panic("Unhandled loglevel")
|
||||
}
|
||||
msg := fmt.Sprint(entries...)
|
||||
tags := map[string]string{
|
||||
"PACKAGE": pkg,
|
||||
"SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]),
|
||||
}
|
||||
err := journal.Send(msg, pri, tags)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (j *journaldFormatter) Flush() {}
|
39
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/pkg/capnslog/log_hijack.go
generated
vendored
Normal file
39
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/pkg/capnslog/log_hijack.go
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package capnslog
|
||||
|
||||
import (
|
||||
"log"
|
||||
)
|
||||
|
||||
func initHijack() {
|
||||
pkg := NewPackageLogger("log", "")
|
||||
w := packageWriter{pkg}
|
||||
log.SetFlags(0)
|
||||
log.SetPrefix("")
|
||||
log.SetOutput(w)
|
||||
}
|
||||
|
||||
type packageWriter struct {
|
||||
pl *PackageLogger
|
||||
}
|
||||
|
||||
func (p packageWriter) Write(b []byte) (int, error) {
|
||||
if p.pl.level < INFO {
|
||||
return 0, nil
|
||||
}
|
||||
p.pl.internalLog(calldepth+2, INFO, string(b))
|
||||
return len(b), nil
|
||||
}
|
240
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/pkg/capnslog/logmap.go
generated
vendored
Normal file
240
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/pkg/capnslog/logmap.go
generated
vendored
Normal file
@ -0,0 +1,240 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package capnslog
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// LogLevel is the set of all log levels.
|
||||
type LogLevel int8
|
||||
|
||||
const (
|
||||
// CRITICAL is the lowest log level; only errors which will end the program will be propagated.
|
||||
CRITICAL LogLevel = iota - 1
|
||||
// ERROR is for errors that are not fatal but lead to troubling behavior.
|
||||
ERROR
|
||||
// WARNING is for errors which are not fatal and not errors, but are unusual. Often sourced from misconfigurations.
|
||||
WARNING
|
||||
// NOTICE is for normal but significant conditions.
|
||||
NOTICE
|
||||
// INFO is a log level for common, everyday log updates.
|
||||
INFO
|
||||
// DEBUG is the default hidden level for more verbose updates about internal processes.
|
||||
DEBUG
|
||||
// TRACE is for (potentially) call by call tracing of programs.
|
||||
TRACE
|
||||
)
|
||||
|
||||
// Char returns a single-character representation of the log level.
|
||||
func (l LogLevel) Char() string {
|
||||
switch l {
|
||||
case CRITICAL:
|
||||
return "C"
|
||||
case ERROR:
|
||||
return "E"
|
||||
case WARNING:
|
||||
return "W"
|
||||
case NOTICE:
|
||||
return "N"
|
||||
case INFO:
|
||||
return "I"
|
||||
case DEBUG:
|
||||
return "D"
|
||||
case TRACE:
|
||||
return "T"
|
||||
default:
|
||||
panic("Unhandled loglevel")
|
||||
}
|
||||
}
|
||||
|
||||
// String returns a multi-character representation of the log level.
|
||||
func (l LogLevel) String() string {
|
||||
switch l {
|
||||
case CRITICAL:
|
||||
return "CRITICAL"
|
||||
case ERROR:
|
||||
return "ERROR"
|
||||
case WARNING:
|
||||
return "WARNING"
|
||||
case NOTICE:
|
||||
return "NOTICE"
|
||||
case INFO:
|
||||
return "INFO"
|
||||
case DEBUG:
|
||||
return "DEBUG"
|
||||
case TRACE:
|
||||
return "TRACE"
|
||||
default:
|
||||
panic("Unhandled loglevel")
|
||||
}
|
||||
}
|
||||
|
||||
// Update using the given string value. Fulfills the flag.Value interface.
|
||||
func (l *LogLevel) Set(s string) error {
|
||||
value, err := ParseLevel(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*l = value
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseLevel translates some potential loglevel strings into their corresponding levels.
|
||||
func ParseLevel(s string) (LogLevel, error) {
|
||||
switch s {
|
||||
case "CRITICAL", "C":
|
||||
return CRITICAL, nil
|
||||
case "ERROR", "0", "E":
|
||||
return ERROR, nil
|
||||
case "WARNING", "1", "W":
|
||||
return WARNING, nil
|
||||
case "NOTICE", "2", "N":
|
||||
return NOTICE, nil
|
||||
case "INFO", "3", "I":
|
||||
return INFO, nil
|
||||
case "DEBUG", "4", "D":
|
||||
return DEBUG, nil
|
||||
case "TRACE", "5", "T":
|
||||
return TRACE, nil
|
||||
}
|
||||
return CRITICAL, errors.New("couldn't parse log level " + s)
|
||||
}
|
||||
|
||||
type RepoLogger map[string]*PackageLogger
|
||||
|
||||
type loggerStruct struct {
|
||||
sync.Mutex
|
||||
repoMap map[string]RepoLogger
|
||||
formatter Formatter
|
||||
}
|
||||
|
||||
// logger is the global logger
|
||||
var logger = new(loggerStruct)
|
||||
|
||||
// SetGlobalLogLevel sets the log level for all packages in all repositories
|
||||
// registered with capnslog.
|
||||
func SetGlobalLogLevel(l LogLevel) {
|
||||
logger.Lock()
|
||||
defer logger.Unlock()
|
||||
for _, r := range logger.repoMap {
|
||||
r.setRepoLogLevelInternal(l)
|
||||
}
|
||||
}
|
||||
|
||||
// GetRepoLogger may return the handle to the repository's set of packages' loggers.
|
||||
func GetRepoLogger(repo string) (RepoLogger, error) {
|
||||
logger.Lock()
|
||||
defer logger.Unlock()
|
||||
r, ok := logger.repoMap[repo]
|
||||
if !ok {
|
||||
return nil, errors.New("no packages registered for repo " + repo)
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// MustRepoLogger returns the handle to the repository's packages' loggers.
|
||||
func MustRepoLogger(repo string) RepoLogger {
|
||||
r, err := GetRepoLogger(repo)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// SetRepoLogLevel sets the log level for all packages in the repository.
|
||||
func (r RepoLogger) SetRepoLogLevel(l LogLevel) {
|
||||
logger.Lock()
|
||||
defer logger.Unlock()
|
||||
r.setRepoLogLevelInternal(l)
|
||||
}
|
||||
|
||||
func (r RepoLogger) setRepoLogLevelInternal(l LogLevel) {
|
||||
for _, v := range r {
|
||||
v.level = l
|
||||
}
|
||||
}
|
||||
|
||||
// ParseLogLevelConfig parses a comma-separated string of "package=loglevel", in
|
||||
// order, and returns a map of the results, for use in SetLogLevel.
|
||||
func (r RepoLogger) ParseLogLevelConfig(conf string) (map[string]LogLevel, error) {
|
||||
setlist := strings.Split(conf, ",")
|
||||
out := make(map[string]LogLevel)
|
||||
for _, setstring := range setlist {
|
||||
setting := strings.Split(setstring, "=")
|
||||
if len(setting) != 2 {
|
||||
return nil, errors.New("oddly structured `pkg=level` option: " + setstring)
|
||||
}
|
||||
l, err := ParseLevel(setting[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out[setting[0]] = l
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// SetLogLevel takes a map of package names within a repository to their desired
|
||||
// loglevel, and sets the levels appropriately. Unknown packages are ignored.
|
||||
// "*" is a special package name that corresponds to all packages, and will be
|
||||
// processed first.
|
||||
func (r RepoLogger) SetLogLevel(m map[string]LogLevel) {
|
||||
logger.Lock()
|
||||
defer logger.Unlock()
|
||||
if l, ok := m["*"]; ok {
|
||||
r.setRepoLogLevelInternal(l)
|
||||
}
|
||||
for k, v := range m {
|
||||
l, ok := r[k]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
l.level = v
|
||||
}
|
||||
}
|
||||
|
||||
// SetFormatter sets the formatting function for all logs.
|
||||
func SetFormatter(f Formatter) {
|
||||
logger.Lock()
|
||||
defer logger.Unlock()
|
||||
logger.formatter = f
|
||||
}
|
||||
|
||||
// NewPackageLogger creates a package logger object.
|
||||
// This should be defined as a global var in your package, referencing your repo.
|
||||
func NewPackageLogger(repo string, pkg string) (p *PackageLogger) {
|
||||
logger.Lock()
|
||||
defer logger.Unlock()
|
||||
if logger.repoMap == nil {
|
||||
logger.repoMap = make(map[string]RepoLogger)
|
||||
}
|
||||
r, rok := logger.repoMap[repo]
|
||||
if !rok {
|
||||
logger.repoMap[repo] = make(RepoLogger)
|
||||
r = logger.repoMap[repo]
|
||||
}
|
||||
p, pok := r[pkg]
|
||||
if !pok {
|
||||
r[pkg] = &PackageLogger{
|
||||
pkg: pkg,
|
||||
level: INFO,
|
||||
}
|
||||
p = r[pkg]
|
||||
}
|
||||
return
|
||||
}
|
158
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go
generated
vendored
Normal file
158
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go
generated
vendored
Normal file
@ -0,0 +1,158 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package capnslog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
type PackageLogger struct {
|
||||
pkg string
|
||||
level LogLevel
|
||||
}
|
||||
|
||||
const calldepth = 2
|
||||
|
||||
func (p *PackageLogger) internalLog(depth int, inLevel LogLevel, entries ...interface{}) {
|
||||
if inLevel != CRITICAL && p.level < inLevel {
|
||||
return
|
||||
}
|
||||
logger.Lock()
|
||||
defer logger.Unlock()
|
||||
if logger.formatter != nil {
|
||||
logger.formatter.Format(p.pkg, inLevel, depth+1, entries...)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PackageLogger) LevelAt(l LogLevel) bool {
|
||||
return p.level >= l
|
||||
}
|
||||
|
||||
// Log a formatted string at any level between ERROR and TRACE
|
||||
func (p *PackageLogger) Logf(l LogLevel, format string, args ...interface{}) {
|
||||
p.internalLog(calldepth, l, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
// Log a message at any level between ERROR and TRACE
|
||||
func (p *PackageLogger) Log(l LogLevel, args ...interface{}) {
|
||||
p.internalLog(calldepth, l, fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
// log stdlib compatibility
|
||||
|
||||
func (p *PackageLogger) Println(args ...interface{}) {
|
||||
p.internalLog(calldepth, INFO, fmt.Sprintln(args...))
|
||||
}
|
||||
|
||||
func (p *PackageLogger) Printf(format string, args ...interface{}) {
|
||||
p.internalLog(calldepth, INFO, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (p *PackageLogger) Print(args ...interface{}) {
|
||||
p.internalLog(calldepth, INFO, fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
// Panic and fatal
|
||||
|
||||
func (p *PackageLogger) Panicf(format string, args ...interface{}) {
|
||||
s := fmt.Sprintf(format, args...)
|
||||
p.internalLog(calldepth, CRITICAL, s)
|
||||
panic(s)
|
||||
}
|
||||
|
||||
func (p *PackageLogger) Panic(args ...interface{}) {
|
||||
s := fmt.Sprint(args...)
|
||||
p.internalLog(calldepth, CRITICAL, s)
|
||||
panic(s)
|
||||
}
|
||||
|
||||
func (p *PackageLogger) Fatalf(format string, args ...interface{}) {
|
||||
s := fmt.Sprintf(format, args...)
|
||||
p.internalLog(calldepth, CRITICAL, s)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (p *PackageLogger) Fatal(args ...interface{}) {
|
||||
s := fmt.Sprint(args...)
|
||||
p.internalLog(calldepth, CRITICAL, s)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Error Functions
|
||||
|
||||
func (p *PackageLogger) Errorf(format string, args ...interface{}) {
|
||||
p.internalLog(calldepth, ERROR, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (p *PackageLogger) Error(entries ...interface{}) {
|
||||
p.internalLog(calldepth, ERROR, entries...)
|
||||
}
|
||||
|
||||
// Warning Functions
|
||||
|
||||
func (p *PackageLogger) Warningf(format string, args ...interface{}) {
|
||||
p.internalLog(calldepth, WARNING, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (p *PackageLogger) Warning(entries ...interface{}) {
|
||||
p.internalLog(calldepth, WARNING, entries...)
|
||||
}
|
||||
|
||||
// Notice Functions
|
||||
|
||||
func (p *PackageLogger) Noticef(format string, args ...interface{}) {
|
||||
p.internalLog(calldepth, NOTICE, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (p *PackageLogger) Notice(entries ...interface{}) {
|
||||
p.internalLog(calldepth, NOTICE, entries...)
|
||||
}
|
||||
|
||||
// Info Functions
|
||||
|
||||
func (p *PackageLogger) Infof(format string, args ...interface{}) {
|
||||
p.internalLog(calldepth, INFO, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (p *PackageLogger) Info(entries ...interface{}) {
|
||||
p.internalLog(calldepth, INFO, entries...)
|
||||
}
|
||||
|
||||
// Debug Functions
|
||||
|
||||
func (p *PackageLogger) Debugf(format string, args ...interface{}) {
|
||||
p.internalLog(calldepth, DEBUG, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (p *PackageLogger) Debug(entries ...interface{}) {
|
||||
p.internalLog(calldepth, DEBUG, entries...)
|
||||
}
|
||||
|
||||
// Trace Functions
|
||||
|
||||
func (p *PackageLogger) Tracef(format string, args ...interface{}) {
|
||||
p.internalLog(calldepth, TRACE, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (p *PackageLogger) Trace(entries ...interface{}) {
|
||||
p.internalLog(calldepth, TRACE, entries...)
|
||||
}
|
||||
|
||||
func (p *PackageLogger) Flush() {
|
||||
logger.Lock()
|
||||
defer logger.Unlock()
|
||||
logger.formatter.Flush()
|
||||
}
|
65
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go
generated
vendored
Normal file
65
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go
generated
vendored
Normal file
@ -0,0 +1,65 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// +build !windows
|
||||
|
||||
package capnslog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log/syslog"
|
||||
)
|
||||
|
||||
func NewSyslogFormatter(w *syslog.Writer) Formatter {
|
||||
return &syslogFormatter{w}
|
||||
}
|
||||
|
||||
func NewDefaultSyslogFormatter(tag string) (Formatter, error) {
|
||||
w, err := syslog.New(syslog.LOG_DEBUG, tag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewSyslogFormatter(w), nil
|
||||
}
|
||||
|
||||
type syslogFormatter struct {
|
||||
w *syslog.Writer
|
||||
}
|
||||
|
||||
func (s *syslogFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) {
|
||||
for _, entry := range entries {
|
||||
str := fmt.Sprint(entry)
|
||||
switch l {
|
||||
case CRITICAL:
|
||||
s.w.Crit(str)
|
||||
case ERROR:
|
||||
s.w.Err(str)
|
||||
case WARNING:
|
||||
s.w.Warning(str)
|
||||
case NOTICE:
|
||||
s.w.Notice(str)
|
||||
case INFO:
|
||||
s.w.Info(str)
|
||||
case DEBUG:
|
||||
s.w.Debug(str)
|
||||
case TRACE:
|
||||
s.w.Debug(str)
|
||||
default:
|
||||
panic("Unhandled loglevel")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *syslogFormatter) Flush() {
|
||||
}
|
5
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/fatih/color/.travis.yml
generated
vendored
Normal file
5
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/fatih/color/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.6
|
||||
- tip
|
||||
|
20
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/fatih/color/LICENSE.md
generated
vendored
Normal file
20
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/fatih/color/LICENSE.md
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 Fatih Arslan
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
154
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/fatih/color/README.md
generated
vendored
Normal file
154
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/fatih/color/README.md
generated
vendored
Normal file
@ -0,0 +1,154 @@
|
||||
# Color [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/fatih/color) [![Build Status](http://img.shields.io/travis/fatih/color.svg?style=flat-square)](https://travis-ci.org/fatih/color)
|
||||
|
||||
|
||||
|
||||
Color lets you use colorized outputs in terms of [ANSI Escape
|
||||
Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It
|
||||
has support for Windows too! The API can be used in several ways, pick one that
|
||||
suits you.
|
||||
|
||||
|
||||
|
||||
![Color](http://i.imgur.com/c1JI0lA.png)
|
||||
|
||||
|
||||
## Install
|
||||
|
||||
```bash
|
||||
go get github.com/fatih/color
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### Standard colors
|
||||
|
||||
```go
|
||||
// Print with default helper functions
|
||||
color.Cyan("Prints text in cyan.")
|
||||
|
||||
// A newline will be appended automatically
|
||||
color.Blue("Prints %s in blue.", "text")
|
||||
|
||||
// These are using the default foreground colors
|
||||
color.Red("We have red")
|
||||
color.Magenta("And many others ..")
|
||||
|
||||
```
|
||||
|
||||
### Mix and reuse colors
|
||||
|
||||
```go
|
||||
// Create a new color object
|
||||
c := color.New(color.FgCyan).Add(color.Underline)
|
||||
c.Println("Prints cyan text with an underline.")
|
||||
|
||||
// Or just add them to New()
|
||||
d := color.New(color.FgCyan, color.Bold)
|
||||
d.Printf("This prints bold cyan %s\n", "too!.")
|
||||
|
||||
// Mix up foreground and background colors, create new mixes!
|
||||
red := color.New(color.FgRed)
|
||||
|
||||
boldRed := red.Add(color.Bold)
|
||||
boldRed.Println("This will print text in bold red.")
|
||||
|
||||
whiteBackground := red.Add(color.BgWhite)
|
||||
whiteBackground.Println("Red text with white background.")
|
||||
```
|
||||
|
||||
### Custom print functions (PrintFunc)
|
||||
|
||||
```go
|
||||
// Create a custom print function for convenience
|
||||
red := color.New(color.FgRed).PrintfFunc()
|
||||
red("Warning")
|
||||
red("Error: %s", err)
|
||||
|
||||
// Mix up multiple attributes
|
||||
notice := color.New(color.Bold, color.FgGreen).PrintlnFunc()
|
||||
notice("Don't forget this...")
|
||||
```
|
||||
|
||||
### Insert into noncolor strings (SprintFunc)
|
||||
|
||||
```go
|
||||
// Create SprintXxx functions to mix strings with other non-colorized strings:
|
||||
yellow := color.New(color.FgYellow).SprintFunc()
|
||||
red := color.New(color.FgRed).SprintFunc()
|
||||
fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error"))
|
||||
|
||||
info := color.New(color.FgWhite, color.BgGreen).SprintFunc()
|
||||
fmt.Printf("This %s rocks!\n", info("package"))
|
||||
|
||||
// Use helper functions
|
||||
fmt.Printf("This", color.RedString("warning"), "should be not neglected.")
|
||||
fmt.Printf(color.GreenString("Info:"), "an important message." )
|
||||
|
||||
// Windows supported too! Just don't forget to change the output to color.Output
|
||||
fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS"))
|
||||
```
|
||||
|
||||
### Plug into existing code
|
||||
|
||||
```go
|
||||
// Use handy standard colors
|
||||
color.Set(color.FgYellow)
|
||||
|
||||
fmt.Println("Existing text will now be in yellow")
|
||||
fmt.Printf("This one %s\n", "too")
|
||||
|
||||
color.Unset() // Don't forget to unset
|
||||
|
||||
// You can mix up parameters
|
||||
color.Set(color.FgMagenta, color.Bold)
|
||||
defer color.Unset() // Use it in your function
|
||||
|
||||
fmt.Println("All text will now be bold magenta.")
|
||||
```
|
||||
|
||||
### Disable color
|
||||
|
||||
There might be a case where you want to disable color output (for example to
|
||||
pipe the standard output of your app to somewhere else). `Color` has support to
|
||||
disable colors both globally and for single color definition. For example
|
||||
suppose you have a CLI app and a `--no-color` bool flag. You can easily disable
|
||||
the color output with:
|
||||
|
||||
```go
|
||||
|
||||
var flagNoColor = flag.Bool("no-color", false, "Disable color output")
|
||||
|
||||
if *flagNoColor {
|
||||
color.NoColor = true // disables colorized output
|
||||
}
|
||||
```
|
||||
|
||||
It also has support for single color definitions (local). You can
|
||||
disable/enable color output on the fly:
|
||||
|
||||
```go
|
||||
c := color.New(color.FgCyan)
|
||||
c.Println("Prints cyan text")
|
||||
|
||||
c.DisableColor()
|
||||
c.Println("This is printed without any color")
|
||||
|
||||
c.EnableColor()
|
||||
c.Println("This prints again cyan...")
|
||||
```
|
||||
|
||||
## Todo
|
||||
|
||||
* Save/Return previous values
|
||||
* Evaluate fmt.Formatter interface
|
||||
|
||||
|
||||
## Credits
|
||||
|
||||
* [Fatih Arslan](https://github.com/fatih)
|
||||
* Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable)
|
||||
|
||||
## License
|
||||
|
||||
The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details
|
||||
|
402
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/fatih/color/color.go
generated
vendored
Normal file
402
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/fatih/color/color.go
generated
vendored
Normal file
@ -0,0 +1,402 @@
|
||||
package color
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/mattn/go-colorable"
|
||||
"github.com/mattn/go-isatty"
|
||||
)
|
||||
|
||||
// NoColor defines if the output is colorized or not. It's dynamically set to
|
||||
// false or true based on the stdout's file descriptor referring to a terminal
|
||||
// or not. This is a global option and affects all colors. For more control
|
||||
// over each color block use the methods DisableColor() individually.
|
||||
var NoColor = !isatty.IsTerminal(os.Stdout.Fd())
|
||||
|
||||
// Color defines a custom color object which is defined by SGR parameters.
|
||||
type Color struct {
|
||||
params []Attribute
|
||||
noColor *bool
|
||||
}
|
||||
|
||||
// Attribute defines a single SGR Code
|
||||
type Attribute int
|
||||
|
||||
const escape = "\x1b"
|
||||
|
||||
// Base attributes
|
||||
const (
|
||||
Reset Attribute = iota
|
||||
Bold
|
||||
Faint
|
||||
Italic
|
||||
Underline
|
||||
BlinkSlow
|
||||
BlinkRapid
|
||||
ReverseVideo
|
||||
Concealed
|
||||
CrossedOut
|
||||
)
|
||||
|
||||
// Foreground text colors
|
||||
const (
|
||||
FgBlack Attribute = iota + 30
|
||||
FgRed
|
||||
FgGreen
|
||||
FgYellow
|
||||
FgBlue
|
||||
FgMagenta
|
||||
FgCyan
|
||||
FgWhite
|
||||
)
|
||||
|
||||
// Foreground Hi-Intensity text colors
|
||||
const (
|
||||
FgHiBlack Attribute = iota + 90
|
||||
FgHiRed
|
||||
FgHiGreen
|
||||
FgHiYellow
|
||||
FgHiBlue
|
||||
FgHiMagenta
|
||||
FgHiCyan
|
||||
FgHiWhite
|
||||
)
|
||||
|
||||
// Background text colors
|
||||
const (
|
||||
BgBlack Attribute = iota + 40
|
||||
BgRed
|
||||
BgGreen
|
||||
BgYellow
|
||||
BgBlue
|
||||
BgMagenta
|
||||
BgCyan
|
||||
BgWhite
|
||||
)
|
||||
|
||||
// Background Hi-Intensity text colors
|
||||
const (
|
||||
BgHiBlack Attribute = iota + 100
|
||||
BgHiRed
|
||||
BgHiGreen
|
||||
BgHiYellow
|
||||
BgHiBlue
|
||||
BgHiMagenta
|
||||
BgHiCyan
|
||||
BgHiWhite
|
||||
)
|
||||
|
||||
// New returns a newly created color object.
|
||||
func New(value ...Attribute) *Color {
|
||||
c := &Color{params: make([]Attribute, 0)}
|
||||
c.Add(value...)
|
||||
return c
|
||||
}
|
||||
|
||||
// Set sets the given parameters immediately. It will change the color of
|
||||
// output with the given SGR parameters until color.Unset() is called.
|
||||
func Set(p ...Attribute) *Color {
|
||||
c := New(p...)
|
||||
c.Set()
|
||||
return c
|
||||
}
|
||||
|
||||
// Unset resets all escape attributes and clears the output. Usually should
|
||||
// be called after Set().
|
||||
func Unset() {
|
||||
if NoColor {
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintf(Output, "%s[%dm", escape, Reset)
|
||||
}
|
||||
|
||||
// Set sets the SGR sequence.
|
||||
func (c *Color) Set() *Color {
|
||||
if c.isNoColorSet() {
|
||||
return c
|
||||
}
|
||||
|
||||
fmt.Fprintf(Output, c.format())
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Color) unset() {
|
||||
if c.isNoColorSet() {
|
||||
return
|
||||
}
|
||||
|
||||
Unset()
|
||||
}
|
||||
|
||||
// Add is used to chain SGR parameters. Use as many as parameters to combine
|
||||
// and create custom color objects. Example: Add(color.FgRed, color.Underline).
|
||||
func (c *Color) Add(value ...Attribute) *Color {
|
||||
c.params = append(c.params, value...)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Color) prepend(value Attribute) {
|
||||
c.params = append(c.params, 0)
|
||||
copy(c.params[1:], c.params[0:])
|
||||
c.params[0] = value
|
||||
}
|
||||
|
||||
// Output defines the standard output of the print functions. By default
|
||||
// os.Stdout is used.
|
||||
var Output = colorable.NewColorableStdout()
|
||||
|
||||
// Print formats using the default formats for its operands and writes to
|
||||
// standard output. Spaces are added between operands when neither is a
|
||||
// string. It returns the number of bytes written and any write error
|
||||
// encountered. This is the standard fmt.Print() method wrapped with the given
|
||||
// color.
|
||||
func (c *Color) Print(a ...interface{}) (n int, err error) {
|
||||
c.Set()
|
||||
defer c.unset()
|
||||
|
||||
return fmt.Fprint(Output, a...)
|
||||
}
|
||||
|
||||
// Printf formats according to a format specifier and writes to standard output.
|
||||
// It returns the number of bytes written and any write error encountered.
|
||||
// This is the standard fmt.Printf() method wrapped with the given color.
|
||||
func (c *Color) Printf(format string, a ...interface{}) (n int, err error) {
|
||||
c.Set()
|
||||
defer c.unset()
|
||||
|
||||
return fmt.Fprintf(Output, format, a...)
|
||||
}
|
||||
|
||||
// Println formats using the default formats for its operands and writes to
|
||||
// standard output. Spaces are always added between operands and a newline is
|
||||
// appended. It returns the number of bytes written and any write error
|
||||
// encountered. This is the standard fmt.Print() method wrapped with the given
|
||||
// color.
|
||||
func (c *Color) Println(a ...interface{}) (n int, err error) {
|
||||
c.Set()
|
||||
defer c.unset()
|
||||
|
||||
return fmt.Fprintln(Output, a...)
|
||||
}
|
||||
|
||||
// PrintFunc returns a new function that prints the passed arguments as
|
||||
// colorized with color.Print().
|
||||
func (c *Color) PrintFunc() func(a ...interface{}) {
|
||||
return func(a ...interface{}) { c.Print(a...) }
|
||||
}
|
||||
|
||||
// PrintfFunc returns a new function that prints the passed arguments as
|
||||
// colorized with color.Printf().
|
||||
func (c *Color) PrintfFunc() func(format string, a ...interface{}) {
|
||||
return func(format string, a ...interface{}) { c.Printf(format, a...) }
|
||||
}
|
||||
|
||||
// PrintlnFunc returns a new function that prints the passed arguments as
|
||||
// colorized with color.Println().
|
||||
func (c *Color) PrintlnFunc() func(a ...interface{}) {
|
||||
return func(a ...interface{}) { c.Println(a...) }
|
||||
}
|
||||
|
||||
// SprintFunc returns a new function that returns colorized strings for the
|
||||
// given arguments with fmt.Sprint(). Useful to put into or mix into other
|
||||
// string. Windows users should use this in conjuction with color.Output, example:
|
||||
//
|
||||
// put := New(FgYellow).SprintFunc()
|
||||
// fmt.Fprintf(color.Output, "This is a %s", put("warning"))
|
||||
func (c *Color) SprintFunc() func(a ...interface{}) string {
|
||||
return func(a ...interface{}) string {
|
||||
return c.wrap(fmt.Sprint(a...))
|
||||
}
|
||||
}
|
||||
|
||||
// SprintfFunc returns a new function that returns colorized strings for the
|
||||
// given arguments with fmt.Sprintf(). Useful to put into or mix into other
|
||||
// string. Windows users should use this in conjuction with color.Output.
|
||||
func (c *Color) SprintfFunc() func(format string, a ...interface{}) string {
|
||||
return func(format string, a ...interface{}) string {
|
||||
return c.wrap(fmt.Sprintf(format, a...))
|
||||
}
|
||||
}
|
||||
|
||||
// SprintlnFunc returns a new function that returns colorized strings for the
|
||||
// given arguments with fmt.Sprintln(). Useful to put into or mix into other
|
||||
// string. Windows users should use this in conjuction with color.Output.
|
||||
func (c *Color) SprintlnFunc() func(a ...interface{}) string {
|
||||
return func(a ...interface{}) string {
|
||||
return c.wrap(fmt.Sprintln(a...))
|
||||
}
|
||||
}
|
||||
|
||||
// sequence returns a formated SGR sequence to be plugged into a "\x1b[...m"
|
||||
// an example output might be: "1;36" -> bold cyan
|
||||
func (c *Color) sequence() string {
|
||||
format := make([]string, len(c.params))
|
||||
for i, v := range c.params {
|
||||
format[i] = strconv.Itoa(int(v))
|
||||
}
|
||||
|
||||
return strings.Join(format, ";")
|
||||
}
|
||||
|
||||
// wrap wraps the s string with the colors attributes. The string is ready to
|
||||
// be printed.
|
||||
func (c *Color) wrap(s string) string {
|
||||
if c.isNoColorSet() {
|
||||
return s
|
||||
}
|
||||
|
||||
return c.format() + s + c.unformat()
|
||||
}
|
||||
|
||||
func (c *Color) format() string {
|
||||
return fmt.Sprintf("%s[%sm", escape, c.sequence())
|
||||
}
|
||||
|
||||
func (c *Color) unformat() string {
|
||||
return fmt.Sprintf("%s[%dm", escape, Reset)
|
||||
}
|
||||
|
||||
// DisableColor disables the color output. Useful to not change any existing
|
||||
// code and still being able to output. Can be used for flags like
|
||||
// "--no-color". To enable back use EnableColor() method.
|
||||
func (c *Color) DisableColor() {
|
||||
c.noColor = boolPtr(true)
|
||||
}
|
||||
|
||||
// EnableColor enables the color output. Use it in conjuction with
|
||||
// DisableColor(). Otherwise this method has no side effects.
|
||||
func (c *Color) EnableColor() {
|
||||
c.noColor = boolPtr(false)
|
||||
}
|
||||
|
||||
func (c *Color) isNoColorSet() bool {
|
||||
// check first if we have user setted action
|
||||
if c.noColor != nil {
|
||||
return *c.noColor
|
||||
}
|
||||
|
||||
// if not return the global option, which is disabled by default
|
||||
return NoColor
|
||||
}
|
||||
|
||||
// Equals returns a boolean value indicating whether two colors are equal.
|
||||
func (c *Color) Equals(c2 *Color) bool {
|
||||
if len(c.params) != len(c2.params) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, attr := range c.params {
|
||||
if !c2.attrExists(attr) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *Color) attrExists(a Attribute) bool {
|
||||
for _, attr := range c.params {
|
||||
if attr == a {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func boolPtr(v bool) *bool {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Black is an convenient helper function to print with black foreground. A
|
||||
// newline is appended to format by default.
|
||||
func Black(format string, a ...interface{}) { printColor(format, FgBlack, a...) }
|
||||
|
||||
// Red is an convenient helper function to print with red foreground. A
|
||||
// newline is appended to format by default.
|
||||
func Red(format string, a ...interface{}) { printColor(format, FgRed, a...) }
|
||||
|
||||
// Green is an convenient helper function to print with green foreground. A
|
||||
// newline is appended to format by default.
|
||||
func Green(format string, a ...interface{}) { printColor(format, FgGreen, a...) }
|
||||
|
||||
// Yellow is an convenient helper function to print with yellow foreground.
|
||||
// A newline is appended to format by default.
|
||||
func Yellow(format string, a ...interface{}) { printColor(format, FgYellow, a...) }
|
||||
|
||||
// Blue is an convenient helper function to print with blue foreground. A
|
||||
// newline is appended to format by default.
|
||||
func Blue(format string, a ...interface{}) { printColor(format, FgBlue, a...) }
|
||||
|
||||
// Magenta is an convenient helper function to print with magenta foreground.
|
||||
// A newline is appended to format by default.
|
||||
func Magenta(format string, a ...interface{}) { printColor(format, FgMagenta, a...) }
|
||||
|
||||
// Cyan is an convenient helper function to print with cyan foreground. A
|
||||
// newline is appended to format by default.
|
||||
func Cyan(format string, a ...interface{}) { printColor(format, FgCyan, a...) }
|
||||
|
||||
// White is an convenient helper function to print with white foreground. A
|
||||
// newline is appended to format by default.
|
||||
func White(format string, a ...interface{}) { printColor(format, FgWhite, a...) }
|
||||
|
||||
func printColor(format string, p Attribute, a ...interface{}) {
|
||||
if !strings.HasSuffix(format, "\n") {
|
||||
format += "\n"
|
||||
}
|
||||
|
||||
c := &Color{params: []Attribute{p}}
|
||||
c.Printf(format, a...)
|
||||
}
|
||||
|
||||
// BlackString is an convenient helper function to return a string with black
|
||||
// foreground.
|
||||
func BlackString(format string, a ...interface{}) string {
|
||||
return New(FgBlack).SprintfFunc()(format, a...)
|
||||
}
|
||||
|
||||
// RedString is an convenient helper function to return a string with red
|
||||
// foreground.
|
||||
func RedString(format string, a ...interface{}) string {
|
||||
return New(FgRed).SprintfFunc()(format, a...)
|
||||
}
|
||||
|
||||
// GreenString is an convenient helper function to return a string with green
|
||||
// foreground.
|
||||
func GreenString(format string, a ...interface{}) string {
|
||||
return New(FgGreen).SprintfFunc()(format, a...)
|
||||
}
|
||||
|
||||
// YellowString is an convenient helper function to return a string with yellow
|
||||
// foreground.
|
||||
func YellowString(format string, a ...interface{}) string {
|
||||
return New(FgYellow).SprintfFunc()(format, a...)
|
||||
}
|
||||
|
||||
// BlueString is an convenient helper function to return a string with blue
|
||||
// foreground.
|
||||
func BlueString(format string, a ...interface{}) string {
|
||||
return New(FgBlue).SprintfFunc()(format, a...)
|
||||
}
|
||||
|
||||
// MagentaString is an convenient helper function to return a string with magenta
|
||||
// foreground.
|
||||
func MagentaString(format string, a ...interface{}) string {
|
||||
return New(FgMagenta).SprintfFunc()(format, a...)
|
||||
}
|
||||
|
||||
// CyanString is an convenient helper function to return a string with cyan
|
||||
// foreground.
|
||||
func CyanString(format string, a ...interface{}) string {
|
||||
return New(FgCyan).SprintfFunc()(format, a...)
|
||||
}
|
||||
|
||||
// WhiteString is an convenient helper function to return a string with white
|
||||
// foreground.
|
||||
func WhiteString(format string, a ...interface{}) string {
|
||||
return New(FgWhite).SprintfFunc()(format, a...)
|
||||
}
|
114
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/fatih/color/doc.go
generated
vendored
Normal file
114
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/fatih/color/doc.go
generated
vendored
Normal file
@ -0,0 +1,114 @@
|
||||
/*
|
||||
Package color is an ANSI color package to output colorized or SGR defined
|
||||
output to the standard output. The API can be used in several way, pick one
|
||||
that suits you.
|
||||
|
||||
Use simple and default helper functions with predefined foreground colors:
|
||||
|
||||
color.Cyan("Prints text in cyan.")
|
||||
|
||||
// a newline will be appended automatically
|
||||
color.Blue("Prints %s in blue.", "text")
|
||||
|
||||
// More default foreground colors..
|
||||
color.Red("We have red")
|
||||
color.Yellow("Yellow color too!")
|
||||
color.Magenta("And many others ..")
|
||||
|
||||
However there are times where custom color mixes are required. Below are some
|
||||
examples to create custom color objects and use the print functions of each
|
||||
separate color object.
|
||||
|
||||
// Create a new color object
|
||||
c := color.New(color.FgCyan).Add(color.Underline)
|
||||
c.Println("Prints cyan text with an underline.")
|
||||
|
||||
// Or just add them to New()
|
||||
d := color.New(color.FgCyan, color.Bold)
|
||||
d.Printf("This prints bold cyan %s\n", "too!.")
|
||||
|
||||
|
||||
// Mix up foreground and background colors, create new mixes!
|
||||
red := color.New(color.FgRed)
|
||||
|
||||
boldRed := red.Add(color.Bold)
|
||||
boldRed.Println("This will print text in bold red.")
|
||||
|
||||
whiteBackground := red.Add(color.BgWhite)
|
||||
whiteBackground.Println("Red text with White background.")
|
||||
|
||||
|
||||
You can create PrintXxx functions to simplify even more:
|
||||
|
||||
// Create a custom print function for convenient
|
||||
red := color.New(color.FgRed).PrintfFunc()
|
||||
red("warning")
|
||||
red("error: %s", err)
|
||||
|
||||
// Mix up multiple attributes
|
||||
notice := color.New(color.Bold, color.FgGreen).PrintlnFunc()
|
||||
notice("don't forget this...")
|
||||
|
||||
|
||||
Or create SprintXxx functions to mix strings with other non-colorized strings:
|
||||
|
||||
yellow := New(FgYellow).SprintFunc()
|
||||
red := New(FgRed).SprintFunc()
|
||||
|
||||
fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error"))
|
||||
|
||||
info := New(FgWhite, BgGreen).SprintFunc()
|
||||
fmt.Printf("this %s rocks!\n", info("package"))
|
||||
|
||||
Windows support is enabled by default. All Print functions works as intended.
|
||||
However only for color.SprintXXX functions, user should use fmt.FprintXXX and
|
||||
set the output to color.Output:
|
||||
|
||||
fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS"))
|
||||
|
||||
info := New(FgWhite, BgGreen).SprintFunc()
|
||||
fmt.Fprintf(color.Output, "this %s rocks!\n", info("package"))
|
||||
|
||||
Using with existing code is possible. Just use the Set() method to set the
|
||||
standard output to the given parameters. That way a rewrite of an existing
|
||||
code is not required.
|
||||
|
||||
// Use handy standard colors.
|
||||
color.Set(color.FgYellow)
|
||||
|
||||
fmt.Println("Existing text will be now in Yellow")
|
||||
fmt.Printf("This one %s\n", "too")
|
||||
|
||||
color.Unset() // don't forget to unset
|
||||
|
||||
// You can mix up parameters
|
||||
color.Set(color.FgMagenta, color.Bold)
|
||||
defer color.Unset() // use it in your function
|
||||
|
||||
fmt.Println("All text will be now bold magenta.")
|
||||
|
||||
There might be a case where you want to disable color output (for example to
|
||||
pipe the standard output of your app to somewhere else). `Color` has support to
|
||||
disable colors both globally and for single color definition. For example
|
||||
suppose you have a CLI app and a `--no-color` bool flag. You can easily disable
|
||||
the color output with:
|
||||
|
||||
var flagNoColor = flag.Bool("no-color", false, "Disable color output")
|
||||
|
||||
if *flagNoColor {
|
||||
color.NoColor = true // disables colorized output
|
||||
}
|
||||
|
||||
It also has support for single color definitions (local). You can
|
||||
disable/enable color output on the fly:
|
||||
|
||||
c := color.New(color.FgCyan)
|
||||
c.Println("Prints cyan text")
|
||||
|
||||
c.DisableColor()
|
||||
c.Println("This is printed without any color")
|
||||
|
||||
c.EnableColor()
|
||||
c.Println("This prints again cyan...")
|
||||
*/
|
||||
package color
|
21
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/fatih/color/vendor/github.com/mattn/go-colorable/LICENSE
generated
vendored
Normal file
21
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/fatih/color/vendor/github.com/mattn/go-colorable/LICENSE
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2016 Yasuhiro Matsumoto
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
43
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/fatih/color/vendor/github.com/mattn/go-colorable/README.md
generated
vendored
Normal file
43
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/fatih/color/vendor/github.com/mattn/go-colorable/README.md
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
# go-colorable
|
||||
|
||||
Colorable writer for windows.
|
||||
|
||||
For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.)
|
||||
This package is possible to handle escape sequence for ansi color on windows.
|
||||
|
||||
## Too Bad!
|
||||
|
||||
![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png)
|
||||
|
||||
|
||||
## So Good!
|
||||
|
||||
![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png)
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true})
|
||||
logrus.SetOutput(colorable.NewColorableStdout())
|
||||
|
||||
logrus.Info("succeeded")
|
||||
logrus.Warn("not correct")
|
||||
logrus.Error("something error")
|
||||
logrus.Fatal("panic")
|
||||
```
|
||||
|
||||
You can compile above code on non-windows OSs.
|
||||
|
||||
## Installation
|
||||
|
||||
```
|
||||
$ go get github.com/mattn/go-colorable
|
||||
```
|
||||
|
||||
# License
|
||||
|
||||
MIT
|
||||
|
||||
# Author
|
||||
|
||||
Yasuhiro Matsumoto (a.k.a mattn)
|
@ -0,0 +1,24 @@
|
||||
// +build !windows
|
||||
|
||||
package colorable
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
func NewColorable(file *os.File) io.Writer {
|
||||
if file == nil {
|
||||
panic("nil passed instead of *os.File to NewColorable()")
|
||||
}
|
||||
|
||||
return file
|
||||
}
|
||||
|
||||
func NewColorableStdout() io.Writer {
|
||||
return os.Stdout
|
||||
}
|
||||
|
||||
func NewColorableStderr() io.Writer {
|
||||
return os.Stderr
|
||||
}
|
@ -0,0 +1,783 @@
|
||||
package colorable
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/mattn/go-isatty"
|
||||
)
|
||||
|
||||
const (
|
||||
foregroundBlue = 0x1
|
||||
foregroundGreen = 0x2
|
||||
foregroundRed = 0x4
|
||||
foregroundIntensity = 0x8
|
||||
foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity)
|
||||
backgroundBlue = 0x10
|
||||
backgroundGreen = 0x20
|
||||
backgroundRed = 0x40
|
||||
backgroundIntensity = 0x80
|
||||
backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity)
|
||||
)
|
||||
|
||||
type wchar uint16
|
||||
type short int16
|
||||
type dword uint32
|
||||
type word uint16
|
||||
|
||||
type coord struct {
|
||||
x short
|
||||
y short
|
||||
}
|
||||
|
||||
type smallRect struct {
|
||||
left short
|
||||
top short
|
||||
right short
|
||||
bottom short
|
||||
}
|
||||
|
||||
type consoleScreenBufferInfo struct {
|
||||
size coord
|
||||
cursorPosition coord
|
||||
attributes word
|
||||
window smallRect
|
||||
maximumWindowSize coord
|
||||
}
|
||||
|
||||
var (
|
||||
kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
|
||||
procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute")
|
||||
procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
|
||||
procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW")
|
||||
procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute")
|
||||
)
|
||||
|
||||
type Writer struct {
|
||||
out io.Writer
|
||||
handle syscall.Handle
|
||||
lastbuf bytes.Buffer
|
||||
oldattr word
|
||||
}
|
||||
|
||||
func NewColorable(file *os.File) io.Writer {
|
||||
if file == nil {
|
||||
panic("nil passed instead of *os.File to NewColorable()")
|
||||
}
|
||||
|
||||
if isatty.IsTerminal(file.Fd()) {
|
||||
var csbi consoleScreenBufferInfo
|
||||
handle := syscall.Handle(file.Fd())
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
return &Writer{out: file, handle: handle, oldattr: csbi.attributes}
|
||||
} else {
|
||||
return file
|
||||
}
|
||||
}
|
||||
|
||||
func NewColorableStdout() io.Writer {
|
||||
return NewColorable(os.Stdout)
|
||||
}
|
||||
|
||||
func NewColorableStderr() io.Writer {
|
||||
return NewColorable(os.Stderr)
|
||||
}
|
||||
|
||||
var color256 = map[int]int{
|
||||
0: 0x000000,
|
||||
1: 0x800000,
|
||||
2: 0x008000,
|
||||
3: 0x808000,
|
||||
4: 0x000080,
|
||||
5: 0x800080,
|
||||
6: 0x008080,
|
||||
7: 0xc0c0c0,
|
||||
8: 0x808080,
|
||||
9: 0xff0000,
|
||||
10: 0x00ff00,
|
||||
11: 0xffff00,
|
||||
12: 0x0000ff,
|
||||
13: 0xff00ff,
|
||||
14: 0x00ffff,
|
||||
15: 0xffffff,
|
||||
16: 0x000000,
|
||||
17: 0x00005f,
|
||||
18: 0x000087,
|
||||
19: 0x0000af,
|
||||
20: 0x0000d7,
|
||||
21: 0x0000ff,
|
||||
22: 0x005f00,
|
||||
23: 0x005f5f,
|
||||
24: 0x005f87,
|
||||
25: 0x005faf,
|
||||
26: 0x005fd7,
|
||||
27: 0x005fff,
|
||||
28: 0x008700,
|
||||
29: 0x00875f,
|
||||
30: 0x008787,
|
||||
31: 0x0087af,
|
||||
32: 0x0087d7,
|
||||
33: 0x0087ff,
|
||||
34: 0x00af00,
|
||||
35: 0x00af5f,
|
||||
36: 0x00af87,
|
||||
37: 0x00afaf,
|
||||
38: 0x00afd7,
|
||||
39: 0x00afff,
|
||||
40: 0x00d700,
|
||||
41: 0x00d75f,
|
||||
42: 0x00d787,
|
||||
43: 0x00d7af,
|
||||
44: 0x00d7d7,
|
||||
45: 0x00d7ff,
|
||||
46: 0x00ff00,
|
||||
47: 0x00ff5f,
|
||||
48: 0x00ff87,
|
||||
49: 0x00ffaf,
|
||||
50: 0x00ffd7,
|
||||
51: 0x00ffff,
|
||||
52: 0x5f0000,
|
||||
53: 0x5f005f,
|
||||
54: 0x5f0087,
|
||||
55: 0x5f00af,
|
||||
56: 0x5f00d7,
|
||||
57: 0x5f00ff,
|
||||
58: 0x5f5f00,
|
||||
59: 0x5f5f5f,
|
||||
60: 0x5f5f87,
|
||||
61: 0x5f5faf,
|
||||
62: 0x5f5fd7,
|
||||
63: 0x5f5fff,
|
||||
64: 0x5f8700,
|
||||
65: 0x5f875f,
|
||||
66: 0x5f8787,
|
||||
67: 0x5f87af,
|
||||
68: 0x5f87d7,
|
||||
69: 0x5f87ff,
|
||||
70: 0x5faf00,
|
||||
71: 0x5faf5f,
|
||||
72: 0x5faf87,
|
||||
73: 0x5fafaf,
|
||||
74: 0x5fafd7,
|
||||
75: 0x5fafff,
|
||||
76: 0x5fd700,
|
||||
77: 0x5fd75f,
|
||||
78: 0x5fd787,
|
||||
79: 0x5fd7af,
|
||||
80: 0x5fd7d7,
|
||||
81: 0x5fd7ff,
|
||||
82: 0x5fff00,
|
||||
83: 0x5fff5f,
|
||||
84: 0x5fff87,
|
||||
85: 0x5fffaf,
|
||||
86: 0x5fffd7,
|
||||
87: 0x5fffff,
|
||||
88: 0x870000,
|
||||
89: 0x87005f,
|
||||
90: 0x870087,
|
||||
91: 0x8700af,
|
||||
92: 0x8700d7,
|
||||
93: 0x8700ff,
|
||||
94: 0x875f00,
|
||||
95: 0x875f5f,
|
||||
96: 0x875f87,
|
||||
97: 0x875faf,
|
||||
98: 0x875fd7,
|
||||
99: 0x875fff,
|
||||
100: 0x878700,
|
||||
101: 0x87875f,
|
||||
102: 0x878787,
|
||||
103: 0x8787af,
|
||||
104: 0x8787d7,
|
||||
105: 0x8787ff,
|
||||
106: 0x87af00,
|
||||
107: 0x87af5f,
|
||||
108: 0x87af87,
|
||||
109: 0x87afaf,
|
||||
110: 0x87afd7,
|
||||
111: 0x87afff,
|
||||
112: 0x87d700,
|
||||
113: 0x87d75f,
|
||||
114: 0x87d787,
|
||||
115: 0x87d7af,
|
||||
116: 0x87d7d7,
|
||||
117: 0x87d7ff,
|
||||
118: 0x87ff00,
|
||||
119: 0x87ff5f,
|
||||
120: 0x87ff87,
|
||||
121: 0x87ffaf,
|
||||
122: 0x87ffd7,
|
||||
123: 0x87ffff,
|
||||
124: 0xaf0000,
|
||||
125: 0xaf005f,
|
||||
126: 0xaf0087,
|
||||
127: 0xaf00af,
|
||||
128: 0xaf00d7,
|
||||
129: 0xaf00ff,
|
||||
130: 0xaf5f00,
|
||||
131: 0xaf5f5f,
|
||||
132: 0xaf5f87,
|
||||
133: 0xaf5faf,
|
||||
134: 0xaf5fd7,
|
||||
135: 0xaf5fff,
|
||||
136: 0xaf8700,
|
||||
137: 0xaf875f,
|
||||
138: 0xaf8787,
|
||||
139: 0xaf87af,
|
||||
140: 0xaf87d7,
|
||||
141: 0xaf87ff,
|
||||
142: 0xafaf00,
|
||||
143: 0xafaf5f,
|
||||
144: 0xafaf87,
|
||||
145: 0xafafaf,
|
||||
146: 0xafafd7,
|
||||
147: 0xafafff,
|
||||
148: 0xafd700,
|
||||
149: 0xafd75f,
|
||||
150: 0xafd787,
|
||||
151: 0xafd7af,
|
||||
152: 0xafd7d7,
|
||||
153: 0xafd7ff,
|
||||
154: 0xafff00,
|
||||
155: 0xafff5f,
|
||||
156: 0xafff87,
|
||||
157: 0xafffaf,
|
||||
158: 0xafffd7,
|
||||
159: 0xafffff,
|
||||
160: 0xd70000,
|
||||
161: 0xd7005f,
|
||||
162: 0xd70087,
|
||||
163: 0xd700af,
|
||||
164: 0xd700d7,
|
||||
165: 0xd700ff,
|
||||
166: 0xd75f00,
|
||||
167: 0xd75f5f,
|
||||
168: 0xd75f87,
|
||||
169: 0xd75faf,
|
||||
170: 0xd75fd7,
|
||||
171: 0xd75fff,
|
||||
172: 0xd78700,
|
||||
173: 0xd7875f,
|
||||
174: 0xd78787,
|
||||
175: 0xd787af,
|
||||
176: 0xd787d7,
|
||||
177: 0xd787ff,
|
||||
178: 0xd7af00,
|
||||
179: 0xd7af5f,
|
||||
180: 0xd7af87,
|
||||
181: 0xd7afaf,
|
||||
182: 0xd7afd7,
|
||||
183: 0xd7afff,
|
||||
184: 0xd7d700,
|
||||
185: 0xd7d75f,
|
||||
186: 0xd7d787,
|
||||
187: 0xd7d7af,
|
||||
188: 0xd7d7d7,
|
||||
189: 0xd7d7ff,
|
||||
190: 0xd7ff00,
|
||||
191: 0xd7ff5f,
|
||||
192: 0xd7ff87,
|
||||
193: 0xd7ffaf,
|
||||
194: 0xd7ffd7,
|
||||
195: 0xd7ffff,
|
||||
196: 0xff0000,
|
||||
197: 0xff005f,
|
||||
198: 0xff0087,
|
||||
199: 0xff00af,
|
||||
200: 0xff00d7,
|
||||
201: 0xff00ff,
|
||||
202: 0xff5f00,
|
||||
203: 0xff5f5f,
|
||||
204: 0xff5f87,
|
||||
205: 0xff5faf,
|
||||
206: 0xff5fd7,
|
||||
207: 0xff5fff,
|
||||
208: 0xff8700,
|
||||
209: 0xff875f,
|
||||
210: 0xff8787,
|
||||
211: 0xff87af,
|
||||
212: 0xff87d7,
|
||||
213: 0xff87ff,
|
||||
214: 0xffaf00,
|
||||
215: 0xffaf5f,
|
||||
216: 0xffaf87,
|
||||
217: 0xffafaf,
|
||||
218: 0xffafd7,
|
||||
219: 0xffafff,
|
||||
220: 0xffd700,
|
||||
221: 0xffd75f,
|
||||
222: 0xffd787,
|
||||
223: 0xffd7af,
|
||||
224: 0xffd7d7,
|
||||
225: 0xffd7ff,
|
||||
226: 0xffff00,
|
||||
227: 0xffff5f,
|
||||
228: 0xffff87,
|
||||
229: 0xffffaf,
|
||||
230: 0xffffd7,
|
||||
231: 0xffffff,
|
||||
232: 0x080808,
|
||||
233: 0x121212,
|
||||
234: 0x1c1c1c,
|
||||
235: 0x262626,
|
||||
236: 0x303030,
|
||||
237: 0x3a3a3a,
|
||||
238: 0x444444,
|
||||
239: 0x4e4e4e,
|
||||
240: 0x585858,
|
||||
241: 0x626262,
|
||||
242: 0x6c6c6c,
|
||||
243: 0x767676,
|
||||
244: 0x808080,
|
||||
245: 0x8a8a8a,
|
||||
246: 0x949494,
|
||||
247: 0x9e9e9e,
|
||||
248: 0xa8a8a8,
|
||||
249: 0xb2b2b2,
|
||||
250: 0xbcbcbc,
|
||||
251: 0xc6c6c6,
|
||||
252: 0xd0d0d0,
|
||||
253: 0xdadada,
|
||||
254: 0xe4e4e4,
|
||||
255: 0xeeeeee,
|
||||
}
|
||||
|
||||
func (w *Writer) Write(data []byte) (n int, err error) {
|
||||
var csbi consoleScreenBufferInfo
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
|
||||
er := bytes.NewBuffer(data)
|
||||
loop:
|
||||
for {
|
||||
r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
if r1 == 0 {
|
||||
break loop
|
||||
}
|
||||
|
||||
c1, _, err := er.ReadRune()
|
||||
if err != nil {
|
||||
break loop
|
||||
}
|
||||
if c1 != 0x1b {
|
||||
fmt.Fprint(w.out, string(c1))
|
||||
continue
|
||||
}
|
||||
c2, _, err := er.ReadRune()
|
||||
if err != nil {
|
||||
w.lastbuf.WriteRune(c1)
|
||||
break loop
|
||||
}
|
||||
if c2 != 0x5b {
|
||||
w.lastbuf.WriteRune(c1)
|
||||
w.lastbuf.WriteRune(c2)
|
||||
continue
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
var m rune
|
||||
for {
|
||||
c, _, err := er.ReadRune()
|
||||
if err != nil {
|
||||
w.lastbuf.WriteRune(c1)
|
||||
w.lastbuf.WriteRune(c2)
|
||||
w.lastbuf.Write(buf.Bytes())
|
||||
break loop
|
||||
}
|
||||
if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
|
||||
m = c
|
||||
break
|
||||
}
|
||||
buf.Write([]byte(string(c)))
|
||||
}
|
||||
|
||||
var csbi consoleScreenBufferInfo
|
||||
switch m {
|
||||
case 'A':
|
||||
n, err = strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
csbi.cursorPosition.y -= short(n)
|
||||
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
case 'B':
|
||||
n, err = strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
csbi.cursorPosition.y += short(n)
|
||||
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
case 'C':
|
||||
n, err = strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
csbi.cursorPosition.x -= short(n)
|
||||
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
case 'D':
|
||||
n, err = strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if n, err = strconv.Atoi(buf.String()); err == nil {
|
||||
var csbi consoleScreenBufferInfo
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
csbi.cursorPosition.x += short(n)
|
||||
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
}
|
||||
case 'E':
|
||||
n, err = strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
csbi.cursorPosition.x = 0
|
||||
csbi.cursorPosition.y += short(n)
|
||||
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
case 'F':
|
||||
n, err = strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
csbi.cursorPosition.x = 0
|
||||
csbi.cursorPosition.y -= short(n)
|
||||
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
case 'G':
|
||||
n, err = strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
csbi.cursorPosition.x = short(n)
|
||||
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
case 'H':
|
||||
token := strings.Split(buf.String(), ";")
|
||||
if len(token) != 2 {
|
||||
continue
|
||||
}
|
||||
n1, err := strconv.Atoi(token[0])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
n2, err := strconv.Atoi(token[1])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
csbi.cursorPosition.x = short(n2)
|
||||
csbi.cursorPosition.x = short(n1)
|
||||
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
case 'J':
|
||||
n, err := strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
var cursor coord
|
||||
switch n {
|
||||
case 0:
|
||||
cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
|
||||
case 1:
|
||||
cursor = coord{x: csbi.window.left, y: csbi.window.top}
|
||||
case 2:
|
||||
cursor = coord{x: csbi.window.left, y: csbi.window.top}
|
||||
}
|
||||
var count, written dword
|
||||
count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x)
|
||||
procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
|
||||
procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
|
||||
case 'K':
|
||||
n, err := strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
var cursor coord
|
||||
switch n {
|
||||
case 0:
|
||||
cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
|
||||
case 1:
|
||||
cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
|
||||
case 2:
|
||||
cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
|
||||
}
|
||||
var count, written dword
|
||||
count = dword(csbi.size.x - csbi.cursorPosition.x)
|
||||
procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
|
||||
procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
|
||||
case 'm':
|
||||
attr := csbi.attributes
|
||||
cs := buf.String()
|
||||
if cs == "" {
|
||||
procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr))
|
||||
continue
|
||||
}
|
||||
token := strings.Split(cs, ";")
|
||||
for i := 0; i < len(token); i += 1 {
|
||||
ns := token[i]
|
||||
if n, err = strconv.Atoi(ns); err == nil {
|
||||
switch {
|
||||
case n == 0 || n == 100:
|
||||
attr = w.oldattr
|
||||
case 1 <= n && n <= 5:
|
||||
attr |= foregroundIntensity
|
||||
case n == 7:
|
||||
attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
|
||||
case 22 == n || n == 25 || n == 25:
|
||||
attr |= foregroundIntensity
|
||||
case n == 27:
|
||||
attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
|
||||
case 30 <= n && n <= 37:
|
||||
attr = (attr & backgroundMask)
|
||||
if (n-30)&1 != 0 {
|
||||
attr |= foregroundRed
|
||||
}
|
||||
if (n-30)&2 != 0 {
|
||||
attr |= foregroundGreen
|
||||
}
|
||||
if (n-30)&4 != 0 {
|
||||
attr |= foregroundBlue
|
||||
}
|
||||
case n == 38: // set foreground color.
|
||||
if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") {
|
||||
if n256, err := strconv.Atoi(token[i+2]); err == nil {
|
||||
if n256foreAttr == nil {
|
||||
n256setup()
|
||||
}
|
||||
attr &= backgroundMask
|
||||
attr |= n256foreAttr[n256]
|
||||
i += 2
|
||||
}
|
||||
} else {
|
||||
attr = attr & (w.oldattr & backgroundMask)
|
||||
}
|
||||
case n == 39: // reset foreground color.
|
||||
attr &= backgroundMask
|
||||
attr |= w.oldattr & foregroundMask
|
||||
case 40 <= n && n <= 47:
|
||||
attr = (attr & foregroundMask)
|
||||
if (n-40)&1 != 0 {
|
||||
attr |= backgroundRed
|
||||
}
|
||||
if (n-40)&2 != 0 {
|
||||
attr |= backgroundGreen
|
||||
}
|
||||
if (n-40)&4 != 0 {
|
||||
attr |= backgroundBlue
|
||||
}
|
||||
case n == 48: // set background color.
|
||||
if i < len(token)-2 && token[i+1] == "5" {
|
||||
if n256, err := strconv.Atoi(token[i+2]); err == nil {
|
||||
if n256backAttr == nil {
|
||||
n256setup()
|
||||
}
|
||||
attr &= foregroundMask
|
||||
attr |= n256backAttr[n256]
|
||||
i += 2
|
||||
}
|
||||
} else {
|
||||
attr = attr & (w.oldattr & foregroundMask)
|
||||
}
|
||||
case n == 49: // reset foreground color.
|
||||
attr &= foregroundMask
|
||||
attr |= w.oldattr & backgroundMask
|
||||
case 90 <= n && n <= 97:
|
||||
attr = (attr & backgroundMask)
|
||||
attr |= foregroundIntensity
|
||||
if (n-90)&1 != 0 {
|
||||
attr |= foregroundRed
|
||||
}
|
||||
if (n-90)&2 != 0 {
|
||||
attr |= foregroundGreen
|
||||
}
|
||||
if (n-90)&4 != 0 {
|
||||
attr |= foregroundBlue
|
||||
}
|
||||
case 100 <= n && n <= 107:
|
||||
attr = (attr & foregroundMask)
|
||||
attr |= backgroundIntensity
|
||||
if (n-100)&1 != 0 {
|
||||
attr |= backgroundRed
|
||||
}
|
||||
if (n-100)&2 != 0 {
|
||||
attr |= backgroundGreen
|
||||
}
|
||||
if (n-100)&4 != 0 {
|
||||
attr |= backgroundBlue
|
||||
}
|
||||
}
|
||||
procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return len(data) - w.lastbuf.Len(), nil
|
||||
}
|
||||
|
||||
type consoleColor struct {
|
||||
rgb int
|
||||
red bool
|
||||
green bool
|
||||
blue bool
|
||||
intensity bool
|
||||
}
|
||||
|
||||
func (c consoleColor) foregroundAttr() (attr word) {
|
||||
if c.red {
|
||||
attr |= foregroundRed
|
||||
}
|
||||
if c.green {
|
||||
attr |= foregroundGreen
|
||||
}
|
||||
if c.blue {
|
||||
attr |= foregroundBlue
|
||||
}
|
||||
if c.intensity {
|
||||
attr |= foregroundIntensity
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c consoleColor) backgroundAttr() (attr word) {
|
||||
if c.red {
|
||||
attr |= backgroundRed
|
||||
}
|
||||
if c.green {
|
||||
attr |= backgroundGreen
|
||||
}
|
||||
if c.blue {
|
||||
attr |= backgroundBlue
|
||||
}
|
||||
if c.intensity {
|
||||
attr |= backgroundIntensity
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var color16 = []consoleColor{
|
||||
consoleColor{0x000000, false, false, false, false},
|
||||
consoleColor{0x000080, false, false, true, false},
|
||||
consoleColor{0x008000, false, true, false, false},
|
||||
consoleColor{0x008080, false, true, true, false},
|
||||
consoleColor{0x800000, true, false, false, false},
|
||||
consoleColor{0x800080, true, false, true, false},
|
||||
consoleColor{0x808000, true, true, false, false},
|
||||
consoleColor{0xc0c0c0, true, true, true, false},
|
||||
consoleColor{0x808080, false, false, false, true},
|
||||
consoleColor{0x0000ff, false, false, true, true},
|
||||
consoleColor{0x00ff00, false, true, false, true},
|
||||
consoleColor{0x00ffff, false, true, true, true},
|
||||
consoleColor{0xff0000, true, false, false, true},
|
||||
consoleColor{0xff00ff, true, false, true, true},
|
||||
consoleColor{0xffff00, true, true, false, true},
|
||||
consoleColor{0xffffff, true, true, true, true},
|
||||
}
|
||||
|
||||
type hsv struct {
|
||||
h, s, v float32
|
||||
}
|
||||
|
||||
func (a hsv) dist(b hsv) float32 {
|
||||
dh := a.h - b.h
|
||||
switch {
|
||||
case dh > 0.5:
|
||||
dh = 1 - dh
|
||||
case dh < -0.5:
|
||||
dh = -1 - dh
|
||||
}
|
||||
ds := a.s - b.s
|
||||
dv := a.v - b.v
|
||||
return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv)))
|
||||
}
|
||||
|
||||
func toHSV(rgb int) hsv {
|
||||
r, g, b := float32((rgb&0xFF0000)>>16)/256.0,
|
||||
float32((rgb&0x00FF00)>>8)/256.0,
|
||||
float32(rgb&0x0000FF)/256.0
|
||||
min, max := minmax3f(r, g, b)
|
||||
h := max - min
|
||||
if h > 0 {
|
||||
if max == r {
|
||||
h = (g - b) / h
|
||||
if h < 0 {
|
||||
h += 6
|
||||
}
|
||||
} else if max == g {
|
||||
h = 2 + (b-r)/h
|
||||
} else {
|
||||
h = 4 + (r-g)/h
|
||||
}
|
||||
}
|
||||
h /= 6.0
|
||||
s := max - min
|
||||
if max != 0 {
|
||||
s /= max
|
||||
}
|
||||
v := max
|
||||
return hsv{h: h, s: s, v: v}
|
||||
}
|
||||
|
||||
type hsvTable []hsv
|
||||
|
||||
func toHSVTable(rgbTable []consoleColor) hsvTable {
|
||||
t := make(hsvTable, len(rgbTable))
|
||||
for i, c := range rgbTable {
|
||||
t[i] = toHSV(c.rgb)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func (t hsvTable) find(rgb int) consoleColor {
|
||||
hsv := toHSV(rgb)
|
||||
n := 7
|
||||
l := float32(5.0)
|
||||
for i, p := range t {
|
||||
d := hsv.dist(p)
|
||||
if d < l {
|
||||
l, n = d, i
|
||||
}
|
||||
}
|
||||
return color16[n]
|
||||
}
|
||||
|
||||
func minmax3f(a, b, c float32) (min, max float32) {
|
||||
if a < b {
|
||||
if b < c {
|
||||
return a, c
|
||||
} else if a < c {
|
||||
return a, b
|
||||
} else {
|
||||
return c, b
|
||||
}
|
||||
} else {
|
||||
if a < c {
|
||||
return b, c
|
||||
} else if b < c {
|
||||
return b, a
|
||||
} else {
|
||||
return c, a
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var n256foreAttr []word
|
||||
var n256backAttr []word
|
||||
|
||||
func n256setup() {
|
||||
n256foreAttr = make([]word, 256)
|
||||
n256backAttr = make([]word, 256)
|
||||
t := toHSVTable(color16)
|
||||
for i, rgb := range color256 {
|
||||
c := t.find(rgb)
|
||||
n256foreAttr[i] = c.foregroundAttr()
|
||||
n256backAttr[i] = c.backgroundAttr()
|
||||
}
|
||||
}
|
9
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/fatih/color/vendor/github.com/mattn/go-isatty/LICENSE
generated
vendored
Normal file
9
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/fatih/color/vendor/github.com/mattn/go-isatty/LICENSE
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
Copyright (c) Yasuhiro MATSUMOTO <mattn.jp@gmail.com>
|
||||
|
||||
MIT License (Expat)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
37
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/fatih/color/vendor/github.com/mattn/go-isatty/README.md
generated
vendored
Normal file
37
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/fatih/color/vendor/github.com/mattn/go-isatty/README.md
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
# go-isatty
|
||||
|
||||
isatty for golang
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/mattn/go-isatty"
|
||||
"os"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if isatty.IsTerminal(os.Stdout.Fd()) {
|
||||
fmt.Println("Is Terminal")
|
||||
} else {
|
||||
fmt.Println("Is Not Terminal")
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Installation
|
||||
|
||||
```
|
||||
$ go get github.com/mattn/go-isatty
|
||||
```
|
||||
|
||||
# License
|
||||
|
||||
MIT
|
||||
|
||||
# Author
|
||||
|
||||
Yasuhiro Matsumoto (a.k.a mattn)
|
2
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/fatih/color/vendor/github.com/mattn/go-isatty/doc.go
generated
vendored
Normal file
2
vendor/github.com/coreos/clair/contrib/analyze-local-images/vendor/github.com/fatih/color/vendor/github.com/mattn/go-isatty/doc.go
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
// Package isatty implements interface to isatty
|
||||
package isatty
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user