Initial commit
This commit is contained in:
commit
3ec262dd51
7
.dockerignore
Normal file
7
.dockerignore
Normal file
@ -0,0 +1,7 @@
|
||||
.*
|
||||
*.md
|
||||
DCO
|
||||
LICENSE
|
||||
NOTICE
|
||||
docs
|
||||
cloudconfig
|
71
CONTRIBUTING.md
Executable file
71
CONTRIBUTING.md
Executable file
@ -0,0 +1,71 @@
|
||||
# How to Contribute
|
||||
|
||||
CoreOS projects are [Apache 2.0 licensed](LICENSE) and accept contributions via
|
||||
GitHub pull requests. This document outlines some of the conventions on
|
||||
development workflow, commit message formatting, contact points and other
|
||||
resources to make it easier to get your contribution accepted.
|
||||
|
||||
# Certificate of Origin
|
||||
|
||||
By contributing to this project you agree to the Developer Certificate of
|
||||
Origin (DCO). This document was created by the Linux Kernel community and is a
|
||||
simple statement that you, as a contributor, have the legal right to make the
|
||||
contribution. See the [DCO](DCO) file for details.
|
||||
|
||||
# Email and Chat
|
||||
|
||||
The project currently uses the general CoreOS email list and IRC channel:
|
||||
- Email: [coreos-dev](https://groups.google.com/forum/#!forum/coreos-dev)
|
||||
- IRC: #[coreos](irc://irc.freenode.org:6667/#coreos) IRC channel on freenode.org
|
||||
|
||||
Please avoid emailing maintainers found in the MAINTAINERS file directly. They
|
||||
are very busy and read the mailing lists.
|
||||
|
||||
## Getting Started
|
||||
|
||||
- Fork the repository on GitHub
|
||||
- Read the [README](README.md) for build and test instructions
|
||||
- Play with the project, submit bugs, submit patches!
|
||||
|
||||
## Contribution Flow
|
||||
|
||||
This is a rough outline of what a contributor's workflow looks like:
|
||||
|
||||
- Create a topic branch from where you want to base your work (usually master).
|
||||
- Make commits of logical units.
|
||||
- Make sure your commit messages are in the proper format (see below).
|
||||
- Push your changes to a topic branch in your fork of the repository.
|
||||
- Make sure the tests pass, and add any new tests as appropriate.
|
||||
- Submit a pull request to the original repository.
|
||||
|
||||
Thanks for your contributions!
|
||||
|
||||
### Format of the Commit Message
|
||||
|
||||
We follow a rough convention for commit messages that is designed to answer two
|
||||
questions: what changed and why. The subject line should feature the what and
|
||||
the body of the commit should describe the why.
|
||||
|
||||
```
|
||||
scripts: add the test-cluster command
|
||||
|
||||
this uses tmux to setup a test cluster that you can easily kill and
|
||||
start for debugging.
|
||||
|
||||
Fixes #38
|
||||
```
|
||||
|
||||
The format can be described more formally as follows:
|
||||
|
||||
```
|
||||
<subsystem>: <what changed>
|
||||
<BLANK LINE>
|
||||
<why this change was made>
|
||||
<BLANK LINE>
|
||||
<footer>
|
||||
```
|
||||
|
||||
The first line is the subject and should be no longer than 70 characters, the
|
||||
second line is always blank, and other lines should be wrapped at 80 characters.
|
||||
This allows the message to be easier to read on GitHub as well as in various
|
||||
git tools.
|
36
DCO
Executable file
36
DCO
Executable file
@ -0,0 +1,36 @@
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
18
Dockerfile
Normal file
18
Dockerfile
Normal file
@ -0,0 +1,18 @@
|
||||
FROM golang:1.5
|
||||
MAINTAINER Quentin Machu <quentin.machu@coreos.com>
|
||||
|
||||
RUN apt-get update && apt-get install -y bzr rpm && apt-get autoremove -y && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
RUN mkdir /db
|
||||
VOLUME /db
|
||||
|
||||
EXPOSE 6060 6061
|
||||
|
||||
ADD . /go/src/github.com/coreos/clair/
|
||||
WORKDIR /go/src/github.com/coreos/clair/
|
||||
|
||||
ENV GO15VENDOREXPERIMENT 1
|
||||
RUN go install -v
|
||||
RUN go test $(go list ./... | grep -v /vendor/) # https://github.com/golang/go/issues/11659
|
||||
|
||||
ENTRYPOINT ["clair"]
|
94
Godeps/Godeps.json
generated
Normal file
94
Godeps/Godeps.json
generated
Normal file
@ -0,0 +1,94 @@
|
||||
{
|
||||
"ImportPath": "github.com/coreos/clair",
|
||||
"GoVersion": "go1.5.1",
|
||||
"Packages": [
|
||||
"./..."
|
||||
],
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/alecthomas/template",
|
||||
"Rev": "b867cc6ab45cece8143cfcc6fc9c77cf3f2c23c0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/alecthomas/units",
|
||||
"Rev": "6b4e7dc5e3143b85ea77909c72caf89416fc2915"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/barakmich/glog",
|
||||
"Rev": "fafcb6128a8a2e6360ff034091434d547397d54a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/boltdb/bolt",
|
||||
"Comment": "v1.0-98-gafceb31",
|
||||
"Rev": "afceb316b96ea97cbac6d23afbdf69543d80748a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/go-systemd/journal",
|
||||
"Comment": "v3-15-gcfa48f3",
|
||||
"Rev": "cfa48f34d8dc4ff58f9b48725181a09f9092dc3c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/pkg/capnslog",
|
||||
"Rev": "42a8c3b1a6f917bb8346ef738f32712a7ca0ede7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/pkg/timeutil",
|
||||
"Rev": "42a8c3b1a6f917bb8346ef738f32712a7ca0ede7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gogo/protobuf/proto",
|
||||
"Rev": "58bbd41c1a2d1b7154f5d99a8d0d839b3093301a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cayley",
|
||||
"Comment": "v0.4.1-160-gcdf0154",
|
||||
"Rev": "cdf0154d1a34019651eb4f46ce666b31f4d8cae7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/julienschmidt/httprouter",
|
||||
"Comment": "v1.1",
|
||||
"Rev": "8c199fb6259ffc1af525cc3ad52ee60ba8359669"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/lib/pq",
|
||||
"Comment": "go1.0-cutoff-56-gdc50b6a",
|
||||
"Rev": "dc50b6ad2d3ee836442cf3389009c7cd1e64bb43"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/pborman/uuid",
|
||||
"Rev": "ca53cad383cad2479bbba7f7a1a05797ec1386e4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stretchr/testify/assert",
|
||||
"Comment": "v1.0-17-g089c718",
|
||||
"Rev": "089c7181b8c728499929ff09b62d3fdd8df8adff"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
|
||||
"Rev": "315fcfb05d4d46d4354b313d146ef688dda272a9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/gosnappy/snappy",
|
||||
"Rev": "156a073208e131d7d2e212cb749feae7c339e846"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/tylerb/graceful",
|
||||
"Comment": "v1.2.3",
|
||||
"Rev": "48afeb21e2fcbcff0f30bd5ad6b97747b0fae38e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/netutil",
|
||||
"Rev": "7654728e381988afd88e58cabfd6363a5ea91810"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/alecthomas/kingpin.v2",
|
||||
"Comment": "v2.0.10",
|
||||
"Rev": "e1f37920c1d0ced4d1c92f9526a2a433183f02e9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/mgo.v2",
|
||||
"Comment": "r2015.05.29",
|
||||
"Rev": "01ee097136da162d1dd3c9b44fbdf3abf4fd6552"
|
||||
}
|
||||
]
|
||||
}
|
5
Godeps/Readme
generated
Normal file
5
Godeps/Readme
generated
Normal file
@ -0,0 +1,5 @@
|
||||
This directory tree is generated automatically by godep.
|
||||
|
||||
Please do not edit.
|
||||
|
||||
See https://github.com/tools/godep for more information.
|
202
LICENSE
Executable file
202
LICENSE
Executable file
@ -0,0 +1,202 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
5
NOTICE
Executable file
5
NOTICE
Executable file
@ -0,0 +1,5 @@
|
||||
CoreOS Project
|
||||
Copyright 2015 CoreOS, Inc
|
||||
|
||||
This product includes software developed at CoreOS, Inc.
|
||||
(http://www.coreos.com/).
|
81
README.md
Normal file
81
README.md
Normal file
@ -0,0 +1,81 @@
|
||||
Clair
|
||||
=====
|
||||
|
||||
Clair is a container vulnerability analysis service. It provides the list of vulnerabilities that threaten each container and can sends notifications whenever new vulnerabilities that affect existing containers are released.
|
||||
|
||||
We named the project « Clair », which means in French *clear*, *bright*, *transparent* because we believe that it enables users to have a clear insight into the security of their container infrastructure.
|
||||
|
||||
## Why should I use Clair?
|
||||
|
||||
Clair is a single-binary server that exposes an JSON, HTTP API. It does not require any agent to sit on your containers neither does it need any specific container tweak to be done. It has been designed to perform massive analysis on the [Quay.io Container Registry](https://quay.io).
|
||||
|
||||
Whether you host a container registry, a continuous-integration system, or build dozens to thousands containers, you would benefit from Clair. More generally, if you consider that container security matters (and, honestly, you should), you should give it a shot.
|
||||
|
||||
## How Clair Detects Vulnerabilities
|
||||
|
||||
Clair has been designed to analyze a container layer only once, without running the container. The analysis has to extract all required data to detect the known vulnerabilities which may affect a layer but also any future vulnerabilities.
|
||||
|
||||
Detecting vulnerabilities can be achieved by several techniques. One possiblity is to compute hashes of binaries. These are presented on a layer and then compared with a database. However, building this database may become tricky considering the number of different packages and library versions.
|
||||
|
||||
To detect vulnerabilities Clair decided to take advantage of package managers, which quickly and comprehensively provide lists of installed binary and source packages. Package lists are extracted for each layer that composes of your container image, the difference between the layer’s package list, and its parent one is stored. Not only is this method storage-efficient, but it also enables us to scan a layer that may be used in many images only once. Coupled with vulnerability databases such as the Debian’s Security Bug Tracker, Clair is able to tell which vulnerabilities threaten a container, and which layer and package introduced them.
|
||||
|
||||
### Graph
|
||||
|
||||
Clair internally uses a graph, which has its model described in the [associated doc](docs/Model.md) to store and query data. Below is a non-exhaustive example graph that correspond to the following *Dockerfile*.
|
||||
|
||||
```
|
||||
1. MAINTAINER Quentin Machu <quentin.machu@coreos.com>
|
||||
2. FROM ubuntu:trusty
|
||||
3. RUN apt−get update && apt−get upgrade −y
|
||||
4. EXPOSE 22
|
||||
5. CMD ["/usr/sbin/sshd", "-D"]
|
||||
```
|
||||
|
||||
![Example graph](docs/Model.png)
|
||||
|
||||
The above image shows five layers represented by the purple nodes, associated with their ids and parents. Because the second layer imports *Ubuntu Trusty* in the container, Clair can detect the operating system and some packages, in green (we only show one here for the sake of simplicity). The third layer upgrades packages, so the graph reflects that this layer removes the previous version and installs the new one. Finally, the graph knows about a vulnerability, drawn in red, which is fixed by a particular package. Note that two synthetic package versions exist (0 and ∞): they ensure database consistency during parallel modification. ∞ also allows us to define very easily that a vulnerability is not yet fixed; thus, it affects every package version.
|
||||
|
||||
Querying this particular graph will tell us that our image is not vulnerable at all because none of the successor versions of its only package fix any vulnerability. However, an image based on the second layer could be vulnerable.
|
||||
|
||||
### Architecture
|
||||
|
||||
Clair is divided into X main modules (which represent Go packages):
|
||||
|
||||
- **api** defines how users interact with Clair and exposes a [documented HTTP API](docs/API.md).
|
||||
- **worker** extracts useful informations from layers and store everything in the database.
|
||||
- **updater** periodically updates Clair's vulnerability database from known vulnerability sources.
|
||||
- **notifier** dispatches [notifications](docs/Notifications.md) about vulnerable containers when vulnerabilities are released or updated.
|
||||
- **database** persists layers informations and vulnerabilities in [Cayley graph database](https://github.com/google/cayley).
|
||||
- **health** summarizes health checks of every Clair's services.
|
||||
|
||||
Multiple backend databases are supported, a testing deployment would use an in-memory storage while a production deployment should use [Bolt](https://github.com/boltdb/bolt) (single-instance deployment) or PostgreSQL (distributed deployment, probably behind a load-balancer). To learn more about how to run Clair, take a look at the [doc](docs/Run.md).
|
||||
|
||||
#### Detectors & Fetchers
|
||||
|
||||
Clair currently supports three operating systems and their package managers, which we believe are the most common ones: *Debian* (dpkg), *Ubuntu* (dpkg), *CentOS* (yum).
|
||||
|
||||
Supporting an operating system implies that we are able to extract the operating system's name and version from a layer and the list of package it has. This is done inside the *worker/detectors* package and extending that is straightforward.
|
||||
|
||||
All of this is useless if no vulnerability is known for any of these packages. The *updater/fetchers* package defines trusted sources of vulnerabilities, how to fetch them and parse them. For now, Clair uses three databases, one for each supported operating system:
|
||||
- [Ubuntu CVE Tracker](https://launchpad.net/ubuntu-cve-tracker)
|
||||
- [Debian Security Bug Tracker](https://security-tracker.debian.org/tracker/)
|
||||
- [Red Hat Security Data](https://www.redhat.com/security/data/metrics/)
|
||||
|
||||
Using these distro-specific sources gives us confidence that Clair can take into consideration *all* the different package implementations and backports without ever reporting anything possibly inaccurate.
|
||||
|
||||
# Coming Soon
|
||||
|
||||
- Improved performances.
|
||||
- Extended detection system
|
||||
- More package managers
|
||||
- Generic features such as detecting presence/absence of files
|
||||
- ...
|
||||
- Expose more informations about vulnerability
|
||||
- Access vector
|
||||
- Acess complexity
|
||||
- ...
|
||||
|
||||
# Related links
|
||||
|
||||
- Talk @ ContainerDays NYC 2015 [[Slides]](https://docs.google.com/presentation/d/1toUKgqLyy1b-pZlDgxONLduiLmt2yaLR0GliBB7b3L0/pub?start=false&loop=false&slide=id.p) [[Video]](https://www.youtube.com/watch?v=PA3oBAgjnkU)
|
||||
- [Quay](https://quay.io): First container registry using Clair.
|
126
api/api.go
Normal file
126
api/api.go
Normal file
@ -0,0 +1,126 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package api provides a RESTful HTTP API, enabling external apps to interact
|
||||
// with clair.
|
||||
package api
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"github.com/coreos/clair/utils"
|
||||
"github.com/tylerb/graceful"
|
||||
)
|
||||
|
||||
var log = capnslog.NewPackageLogger("github.com/coreos/clair", "api")
|
||||
|
||||
// Config represents the configuration for the Main API.
|
||||
type Config struct {
|
||||
Port int
|
||||
TimeOut time.Duration
|
||||
CertFile, KeyFile, CAFile string
|
||||
}
|
||||
|
||||
// RunMain launches the main API, which exposes every possible interactions
|
||||
// with clair.
|
||||
func RunMain(conf *Config, st *utils.Stopper) {
|
||||
log.Infof("starting API on port %d.", conf.Port)
|
||||
defer func() {
|
||||
log.Info("API stopped")
|
||||
st.End()
|
||||
}()
|
||||
|
||||
srv := &graceful.Server{
|
||||
Timeout: 0, // Already handled by our TimeOut middleware
|
||||
NoSignalHandling: true, // We want to use our own Stopper
|
||||
Server: &http.Server{
|
||||
Addr: ":" + strconv.Itoa(conf.Port),
|
||||
TLSConfig: setupClientCert(conf.CAFile),
|
||||
Handler: NewVersionRouter(conf.TimeOut),
|
||||
},
|
||||
}
|
||||
listenAndServeWithStopper(srv, st, conf.CertFile, conf.KeyFile)
|
||||
}
|
||||
|
||||
// RunHealth launches the Health API, which only exposes a method to fetch
|
||||
// clair's health without any security or authentification mechanism.
|
||||
func RunHealth(port int, st *utils.Stopper) {
|
||||
log.Infof("starting Health API on port %d.", port)
|
||||
defer func() {
|
||||
log.Info("Health API stopped")
|
||||
st.End()
|
||||
}()
|
||||
|
||||
srv := &graceful.Server{
|
||||
Timeout: 10 * time.Second, // Interrupt health checks when stopping
|
||||
NoSignalHandling: true, // We want to use our own Stopper
|
||||
Server: &http.Server{
|
||||
Addr: ":" + strconv.Itoa(port),
|
||||
Handler: NewHealthRouter(),
|
||||
},
|
||||
}
|
||||
listenAndServeWithStopper(srv, st, "", "")
|
||||
}
|
||||
|
||||
// listenAndServeWithStopper wraps graceful.Server's
|
||||
// ListenAndServe/ListenAndServeTLS and adds the ability to interrupt them with
|
||||
// the provided utils.Stopper
|
||||
func listenAndServeWithStopper(srv *graceful.Server, st *utils.Stopper, certFile, keyFile string) {
|
||||
go func() {
|
||||
<-st.Chan()
|
||||
srv.Stop(0)
|
||||
}()
|
||||
|
||||
var err error
|
||||
if certFile != "" && keyFile != "" {
|
||||
log.Info("API: TLS Enabled")
|
||||
err = srv.ListenAndServeTLS(certFile, keyFile)
|
||||
} else {
|
||||
err = srv.ListenAndServe()
|
||||
}
|
||||
|
||||
if opErr, ok := err.(*net.OpError); !ok || (ok && opErr.Op != "accept") {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// setupClientCert creates a tls.Config instance using a CA file path
|
||||
// (if provided) and and calls log.Fatal if it does not exist.
|
||||
func setupClientCert(caFile string) *tls.Config {
|
||||
if len(caFile) > 0 {
|
||||
log.Info("API: Client Certificate Authentification Enabled")
|
||||
caCert, err := ioutil.ReadFile(caFile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
caCertPool := x509.NewCertPool()
|
||||
caCertPool.AppendCertsFromPEM(caCert)
|
||||
return &tls.Config{
|
||||
ClientCAs: caCertPool,
|
||||
ClientAuth: tls.RequireAndVerifyClientCert,
|
||||
}
|
||||
}
|
||||
|
||||
return &tls.Config{
|
||||
ClientAuth: tls.NoClientCert,
|
||||
}
|
||||
}
|
78
api/jsonhttp/json.go
Normal file
78
api/jsonhttp/json.go
Normal file
@ -0,0 +1,78 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package jsonhttp provides helper functions to write JSON responses to
|
||||
// http.ResponseWriter and read JSON bodies from http.Request.
|
||||
package jsonhttp
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/coreos/clair/worker"
|
||||
)
|
||||
|
||||
// MaxPostSize is the maximum number of bytes that ParseBody reads from an
|
||||
// http.Request.Body.
|
||||
var MaxPostSize int64 = 1048576
|
||||
|
||||
// Render writes a JSON-encoded object to a http.ResponseWriter, as well as
|
||||
// a HTTP status code.
|
||||
func Render(w http.ResponseWriter, httpStatus int, v interface{}) {
|
||||
w.WriteHeader(httpStatus)
|
||||
if v != nil {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
result, _ := json.Marshal(v)
|
||||
w.Write(result)
|
||||
}
|
||||
}
|
||||
|
||||
// RenderError writes an error, wrapped in the Message field of a JSON-encoded
|
||||
// object to a http.ResponseWriter, as well as a HTTP status code.
|
||||
// If the status code is 0, RenderError tries to guess the proper HTTP status
|
||||
// code from the error type.
|
||||
func RenderError(w http.ResponseWriter, httpStatus int, err error) {
|
||||
if httpStatus == 0 {
|
||||
httpStatus = http.StatusInternalServerError
|
||||
// Try to guess the http status code from the error type
|
||||
if _, isBadRequestError := err.(*cerrors.ErrBadRequest); isBadRequestError {
|
||||
httpStatus = http.StatusBadRequest
|
||||
} else {
|
||||
switch err {
|
||||
case cerrors.ErrNotFound:
|
||||
httpStatus = http.StatusNotFound
|
||||
case database.ErrTransaction, database.ErrBackendException:
|
||||
httpStatus = http.StatusServiceUnavailable
|
||||
case worker.ErrParentUnknown, worker.ErrUnsupported:
|
||||
httpStatus = http.StatusBadRequest
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Render(w, httpStatus, struct{ Message string }{Message: err.Error()})
|
||||
}
|
||||
|
||||
// ParseBody reads a JSON-encoded body from a http.Request and unmarshals it
|
||||
// into the provided object.
|
||||
func ParseBody(r *http.Request, v interface{}) (int, error) {
|
||||
defer r.Body.Close()
|
||||
err := json.NewDecoder(io.LimitReader(r.Body, MaxPostSize)).Decode(v)
|
||||
if err != nil {
|
||||
return http.StatusUnsupportedMediaType, err
|
||||
}
|
||||
return 0, nil
|
||||
}
|
54
api/logic/general.go
Normal file
54
api/logic/general.go
Normal file
@ -0,0 +1,54 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package logic implements all the available API methods.
|
||||
// Every methods are documented in docs/API.md.
|
||||
package logic
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/coreos/clair/api/jsonhttp"
|
||||
"github.com/coreos/clair/health"
|
||||
"github.com/coreos/clair/worker"
|
||||
"github.com/julienschmidt/httprouter"
|
||||
)
|
||||
|
||||
// Version is an integer representing the API version.
|
||||
const Version = 1
|
||||
|
||||
// GETVersions returns API and Engine versions.
|
||||
func GETVersions(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
|
||||
jsonhttp.Render(w, http.StatusOK, struct {
|
||||
APIVersion string
|
||||
EngineVersion string
|
||||
}{
|
||||
APIVersion: strconv.Itoa(Version),
|
||||
EngineVersion: strconv.Itoa(worker.Version),
|
||||
})
|
||||
}
|
||||
|
||||
// GETHealth sums up the health of all the registered services.
|
||||
func GETHealth(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
|
||||
globalHealth, statuses := health.Healthcheck()
|
||||
|
||||
httpStatus := http.StatusOK
|
||||
if !globalHealth {
|
||||
httpStatus = http.StatusServiceUnavailable
|
||||
}
|
||||
|
||||
jsonhttp.Render(w, httpStatus, statuses)
|
||||
return
|
||||
}
|
365
api/logic/layers.go
Normal file
365
api/logic/layers.go
Normal file
@ -0,0 +1,365 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package logic
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/coreos/clair/api/jsonhttp"
|
||||
"github.com/coreos/clair/database"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
"github.com/coreos/clair/worker"
|
||||
"github.com/julienschmidt/httprouter"
|
||||
)
|
||||
|
||||
// POSTLayersParameters represents the expected parameters for POSTLayers.
|
||||
type POSTLayersParameters struct {
|
||||
ID, Path, ParentID string
|
||||
}
|
||||
|
||||
// POSTLayers analyzes a layer and returns the engine version that has been used
|
||||
// for the analysis.
|
||||
func POSTLayers(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
|
||||
var parameters POSTLayersParameters
|
||||
if s, err := jsonhttp.ParseBody(r, ¶meters); err != nil {
|
||||
jsonhttp.RenderError(w, s, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Process data.
|
||||
if err := worker.Process(parameters.ID, parameters.ParentID, parameters.Path); err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Get engine version and return.
|
||||
jsonhttp.Render(w, http.StatusCreated, struct{ Version string }{Version: strconv.Itoa(worker.Version)})
|
||||
}
|
||||
|
||||
// GETLayersOS returns the operating system of a layer if it exists.
|
||||
// It uses not only the specified layer but also its parent layers if necessary.
|
||||
// An empty OS string is returned if no OS has been detected.
|
||||
func GETLayersOS(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
// Find layer.
|
||||
layer, err := database.FindOneLayerByID(p.ByName("id"), []string{database.FieldLayerParent, database.FieldLayerOS})
|
||||
if err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Get OS.
|
||||
os, err := layer.OperatingSystem()
|
||||
if err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
jsonhttp.Render(w, http.StatusOK, struct{ OS string }{OS: os})
|
||||
}
|
||||
|
||||
// GETLayersParent returns the parent ID of a layer if it exists.
|
||||
// An empty ID string is returned if the layer has no parent.
|
||||
func GETLayersParent(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
// Find layer
|
||||
layer, err := database.FindOneLayerByID(p.ByName("id"), []string{database.FieldLayerParent})
|
||||
if err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Get layer's parent.
|
||||
parent, err := layer.Parent([]string{database.FieldLayerID})
|
||||
if err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
ID := ""
|
||||
if parent != nil {
|
||||
ID = parent.ID
|
||||
}
|
||||
jsonhttp.Render(w, http.StatusOK, struct{ ID string }{ID: ID})
|
||||
}
|
||||
|
||||
// GETLayersPackages returns the complete list of packages that a layer has
|
||||
// if it exists.
|
||||
func GETLayersPackages(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
// Find layer
|
||||
layer, err := database.FindOneLayerByID(p.ByName("id"), []string{database.FieldLayerParent, database.FieldLayerPackages})
|
||||
if err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Find layer's packages.
|
||||
packagesNodes, err := layer.AllPackages()
|
||||
if err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
packages := []*database.Package{}
|
||||
if len(packagesNodes) > 0 {
|
||||
packages, err = database.FindAllPackagesByNodes(packagesNodes, []string{database.FieldPackageOS, database.FieldPackageName, database.FieldPackageVersion})
|
||||
if err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
jsonhttp.Render(w, http.StatusOK, struct{ Packages []*database.Package }{Packages: packages})
|
||||
}
|
||||
|
||||
// GETLayersPackagesDiff returns the list of packages that a layer installs and
|
||||
// removes if it exists.
|
||||
func GETLayersPackagesDiff(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
// Find layer.
|
||||
layer, err := database.FindOneLayerByID(p.ByName("id"), []string{database.FieldLayerPackages})
|
||||
if err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Find layer's packages.
|
||||
installedPackages, removedPackages := make([]*database.Package, 0), make([]*database.Package, 0)
|
||||
if len(layer.InstalledPackagesNodes) > 0 {
|
||||
installedPackages, err = database.FindAllPackagesByNodes(layer.InstalledPackagesNodes, []string{database.FieldPackageOS, database.FieldPackageName, database.FieldPackageVersion})
|
||||
if err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
if len(layer.RemovedPackagesNodes) > 0 {
|
||||
removedPackages, err = database.FindAllPackagesByNodes(layer.RemovedPackagesNodes, []string{database.FieldPackageOS, database.FieldPackageName, database.FieldPackageVersion})
|
||||
if err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
jsonhttp.Render(w, http.StatusOK, struct{ InstalledPackages, RemovedPackages []*database.Package }{InstalledPackages: installedPackages, RemovedPackages: removedPackages})
|
||||
}
|
||||
|
||||
// GETLayersVulnerabilities returns the complete list of vulnerabilities that
|
||||
// a layer has if it exists.
|
||||
func GETLayersVulnerabilities(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
// Get minumum priority parameter.
|
||||
minimumPriority := types.Priority(r.URL.Query().Get("minimumPriority"))
|
||||
if minimumPriority == "" {
|
||||
minimumPriority = "High" // Set default priority to High
|
||||
} else if !minimumPriority.IsValid() {
|
||||
jsonhttp.RenderError(w, 0, cerrors.NewBadRequestError("invalid priority"))
|
||||
return
|
||||
}
|
||||
|
||||
// Find layer
|
||||
layer, err := database.FindOneLayerByID(p.ByName("id"), []string{database.FieldLayerParent, database.FieldLayerPackages})
|
||||
if err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Find layer's packages.
|
||||
packagesNodes, err := layer.AllPackages()
|
||||
if err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Find vulnerabilities.
|
||||
vulnerabilities, err := getVulnerabilitiesFromLayerPackagesNodes(packagesNodes, minimumPriority, []string{database.FieldVulnerabilityID, database.FieldVulnerabilityLink, database.FieldVulnerabilityPriority, database.FieldVulnerabilityDescription})
|
||||
if err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
jsonhttp.Render(w, http.StatusOK, struct{ Vulnerabilities []*database.Vulnerability }{Vulnerabilities: vulnerabilities})
|
||||
}
|
||||
|
||||
// GETLayersVulnerabilitiesDiff returns the list of vulnerabilities that a layer
|
||||
// adds and removes if it exists.
|
||||
func GETLayersVulnerabilitiesDiff(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
// Get minumum priority parameter.
|
||||
minimumPriority := types.Priority(r.URL.Query().Get("minimumPriority"))
|
||||
if minimumPriority == "" {
|
||||
minimumPriority = "High" // Set default priority to High
|
||||
} else if !minimumPriority.IsValid() {
|
||||
jsonhttp.RenderError(w, 0, cerrors.NewBadRequestError("invalid priority"))
|
||||
return
|
||||
}
|
||||
|
||||
// Find layer.
|
||||
layer, err := database.FindOneLayerByID(p.ByName("id"), []string{database.FieldLayerPackages})
|
||||
if err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Selected fields for vulnerabilities.
|
||||
selectedFields := []string{database.FieldVulnerabilityID, database.FieldVulnerabilityLink, database.FieldVulnerabilityPriority, database.FieldVulnerabilityDescription}
|
||||
|
||||
// Find vulnerabilities for installed packages.
|
||||
addedVulnerabilities, err := getVulnerabilitiesFromLayerPackagesNodes(layer.InstalledPackagesNodes, minimumPriority, selectedFields)
|
||||
if err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Find vulnerabilities for removed packages.
|
||||
removedVulnerabilities, err := getVulnerabilitiesFromLayerPackagesNodes(layer.RemovedPackagesNodes, minimumPriority, selectedFields)
|
||||
if err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Remove vulnerabilities which appears both in added and removed lists (eg. case of updated packages but still vulnerable).
|
||||
for ia, a := range addedVulnerabilities {
|
||||
for ir, r := range removedVulnerabilities {
|
||||
if a.ID == r.ID {
|
||||
addedVulnerabilities = append(addedVulnerabilities[:ia], addedVulnerabilities[ia+1:]...)
|
||||
removedVulnerabilities = append(removedVulnerabilities[:ir], removedVulnerabilities[ir+1:]...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
jsonhttp.Render(w, http.StatusOK, struct{ Adds, Removes []*database.Vulnerability }{Adds: addedVulnerabilities, Removes: removedVulnerabilities})
|
||||
}
|
||||
|
||||
// POSTBatchLayersVulnerabilitiesParameters represents the expected parameters
|
||||
// for POSTBatchLayersVulnerabilities.
|
||||
type POSTBatchLayersVulnerabilitiesParameters struct {
|
||||
LayersIDs []string
|
||||
}
|
||||
|
||||
// POSTBatchLayersVulnerabilities returns the complete list of vulnerabilities
|
||||
// that the provided layers have, if they all exist.
|
||||
func POSTBatchLayersVulnerabilities(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
// Parse body
|
||||
var parameters POSTBatchLayersVulnerabilitiesParameters
|
||||
if s, err := jsonhttp.ParseBody(r, ¶meters); err != nil {
|
||||
jsonhttp.RenderError(w, s, err)
|
||||
return
|
||||
}
|
||||
if len(parameters.LayersIDs) == 0 {
|
||||
jsonhttp.RenderError(w, http.StatusBadRequest, errors.New("at least one LayerID query parameter must be provided"))
|
||||
return
|
||||
}
|
||||
|
||||
// Get minumum priority parameter.
|
||||
minimumPriority := types.Priority(r.URL.Query().Get("minimumPriority"))
|
||||
if minimumPriority == "" {
|
||||
minimumPriority = "High" // Set default priority to High
|
||||
} else if !minimumPriority.IsValid() {
|
||||
jsonhttp.RenderError(w, 0, cerrors.NewBadRequestError("invalid priority"))
|
||||
return
|
||||
}
|
||||
|
||||
response := make(map[string]interface{})
|
||||
// For each LayerID parameter
|
||||
for _, layerID := range parameters.LayersIDs {
|
||||
// Find layer
|
||||
layer, err := database.FindOneLayerByID(layerID, []string{database.FieldLayerParent, database.FieldLayerPackages})
|
||||
if err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Find layer's packages.
|
||||
packagesNodes, err := layer.AllPackages()
|
||||
if err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Find vulnerabilities.
|
||||
vulnerabilities, err := getVulnerabilitiesFromLayerPackagesNodes(packagesNodes, minimumPriority, []string{database.FieldVulnerabilityID, database.FieldVulnerabilityLink, database.FieldVulnerabilityPriority, database.FieldVulnerabilityDescription})
|
||||
if err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
response[layerID] = struct{ Vulnerabilities []*database.Vulnerability }{Vulnerabilities: vulnerabilities}
|
||||
}
|
||||
|
||||
jsonhttp.Render(w, http.StatusOK, response)
|
||||
}
|
||||
|
||||
// getSuccessorsFromPackagesNodes returns the node list of packages that have
|
||||
// versions following the versions of the provided packages.
|
||||
func getSuccessorsFromPackagesNodes(packagesNodes []string) ([]string, error) {
|
||||
if len(packagesNodes) == 0 {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
// Get packages.
|
||||
packages, err := database.FindAllPackagesByNodes(packagesNodes, []string{database.FieldPackageNextVersion})
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
|
||||
// Find all packages' successors.
|
||||
var packagesNextVersions []string
|
||||
for _, pkg := range packages {
|
||||
nextVersions, err := pkg.NextVersions([]string{})
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
for _, version := range nextVersions {
|
||||
packagesNextVersions = append(packagesNextVersions, version.Node)
|
||||
}
|
||||
}
|
||||
|
||||
return packagesNextVersions, nil
|
||||
}
|
||||
|
||||
// getVulnerabilitiesFromLayerPackagesNodes returns the list of vulnerabilities
|
||||
// affecting the provided package nodes, filtered by Priority.
|
||||
func getVulnerabilitiesFromLayerPackagesNodes(packagesNodes []string, minimumPriority types.Priority, selectedFields []string) ([]*database.Vulnerability, error) {
|
||||
if len(packagesNodes) == 0 {
|
||||
return []*database.Vulnerability{}, nil
|
||||
}
|
||||
|
||||
// Get successors of the packages.
|
||||
packagesNextVersions, err := getSuccessorsFromPackagesNodes(packagesNodes)
|
||||
if err != nil {
|
||||
return []*database.Vulnerability{}, err
|
||||
}
|
||||
if len(packagesNextVersions) == 0 {
|
||||
return []*database.Vulnerability{}, nil
|
||||
}
|
||||
|
||||
// Find vulnerabilities fixed in these successors.
|
||||
vulnerabilities, err := database.FindAllVulnerabilitiesByFixedIn(packagesNextVersions, selectedFields)
|
||||
if err != nil {
|
||||
return []*database.Vulnerability{}, err
|
||||
}
|
||||
|
||||
// Filter vulnerabilities depending on their priority and remove duplicates.
|
||||
filteredVulnerabilities := []*database.Vulnerability{}
|
||||
seen := map[string]struct{}{}
|
||||
for _, v := range vulnerabilities {
|
||||
if minimumPriority.Compare(v.Priority) <= 0 {
|
||||
if _, alreadySeen := seen[v.ID]; !alreadySeen {
|
||||
filteredVulnerabilities = append(filteredVulnerabilities, v)
|
||||
seen[v.ID] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return filteredVulnerabilities, nil
|
||||
}
|
247
api/logic/vulnerabilities.go
Normal file
247
api/logic/vulnerabilities.go
Normal file
@ -0,0 +1,247 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package logic
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
|
||||
"github.com/coreos/clair/api/jsonhttp"
|
||||
"github.com/coreos/clair/database"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/julienschmidt/httprouter"
|
||||
)
|
||||
|
||||
// GETVulnerabilities returns a vulnerability identified by an ID if it exists.
|
||||
func GETVulnerabilities(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
// Find vulnerability.
|
||||
vulnerability, err := database.FindOneVulnerability(p.ByName("id"), []string{database.FieldVulnerabilityID, database.FieldVulnerabilityLink, database.FieldVulnerabilityPriority, database.FieldVulnerabilityDescription, database.FieldVulnerabilityFixedIn})
|
||||
if err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
abstractVulnerability, err := vulnerability.ToAbstractVulnerability()
|
||||
if err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
jsonhttp.Render(w, http.StatusOK, abstractVulnerability)
|
||||
}
|
||||
|
||||
// POSTVulnerabilities manually inserts a vulnerability into the database if it
|
||||
// does not exist yet.
|
||||
func POSTVulnerabilities(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
var parameters *database.AbstractVulnerability
|
||||
if s, err := jsonhttp.ParseBody(r, ¶meters); err != nil {
|
||||
jsonhttp.RenderError(w, s, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure that the vulnerability does not exist.
|
||||
vulnerability, err := database.FindOneVulnerability(parameters.ID, []string{})
|
||||
if err != nil && err != cerrors.ErrNotFound {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
if vulnerability != nil {
|
||||
jsonhttp.RenderError(w, 0, cerrors.NewBadRequestError("vulnerability already exists"))
|
||||
return
|
||||
}
|
||||
|
||||
// Insert packages.
|
||||
packages := database.AbstractPackagesToPackages(parameters.AffectedPackages)
|
||||
err = database.InsertPackages(packages)
|
||||
if err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
var pkgNodes []string
|
||||
for _, p := range packages {
|
||||
pkgNodes = append(pkgNodes, p.Node)
|
||||
}
|
||||
|
||||
// Insert vulnerability.
|
||||
notifications, err := database.InsertVulnerabilities([]*database.Vulnerability{parameters.ToVulnerability(pkgNodes)})
|
||||
if err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Insert notifications.
|
||||
err = database.InsertNotifications(notifications, database.GetDefaultNotificationWrapper())
|
||||
if err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
jsonhttp.Render(w, http.StatusCreated, nil)
|
||||
}
|
||||
|
||||
// PUTVulnerabilities updates a vulnerability if it exists.
|
||||
func PUTVulnerabilities(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
var parameters *database.AbstractVulnerability
|
||||
if s, err := jsonhttp.ParseBody(r, ¶meters); err != nil {
|
||||
jsonhttp.RenderError(w, s, err)
|
||||
return
|
||||
}
|
||||
parameters.ID = p.ByName("id")
|
||||
|
||||
// Ensure that the vulnerability exists.
|
||||
_, err := database.FindOneVulnerability(parameters.ID, []string{})
|
||||
if err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Insert packages.
|
||||
packages := database.AbstractPackagesToPackages(parameters.AffectedPackages)
|
||||
err = database.InsertPackages(packages)
|
||||
if err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
var pkgNodes []string
|
||||
for _, p := range packages {
|
||||
pkgNodes = append(pkgNodes, p.Node)
|
||||
}
|
||||
|
||||
// Insert vulnerability.
|
||||
notifications, err := database.InsertVulnerabilities([]*database.Vulnerability{parameters.ToVulnerability(pkgNodes)})
|
||||
if err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Insert notifications.
|
||||
err = database.InsertNotifications(notifications, database.GetDefaultNotificationWrapper())
|
||||
if err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
jsonhttp.Render(w, http.StatusCreated, nil)
|
||||
}
|
||||
|
||||
// DELVulnerabilities deletes a vulnerability if it exists.
|
||||
func DELVulnerabilities(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
err := database.DeleteVulnerability(p.ByName("id"))
|
||||
if err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
jsonhttp.Render(w, http.StatusNoContent, nil)
|
||||
}
|
||||
|
||||
// GETVulnerabilitiesIntroducingLayers returns the list of layers that
|
||||
// introduces a given vulnerability, if it exists.
|
||||
// To clarify, it does not return the list of every layers that have
|
||||
// the vulnerability.
|
||||
func GETVulnerabilitiesIntroducingLayers(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
// Find vulnerability to verify that it exists.
|
||||
_, err := database.FindOneVulnerability(p.ByName("id"), []string{})
|
||||
if err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
layers, err := database.FindAllLayersIntroducingVulnerability(p.ByName("id"), []string{database.FieldLayerID})
|
||||
if err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
layersIDs := []string{}
|
||||
for _, l := range layers {
|
||||
layersIDs = append(layersIDs, l.ID)
|
||||
}
|
||||
|
||||
jsonhttp.Render(w, http.StatusOK, struct{ IntroducingLayersIDs []string }{IntroducingLayersIDs: layersIDs})
|
||||
}
|
||||
|
||||
// POSTVulnerabilitiesAffectedLayersParameters represents the expected
|
||||
// parameters for POSTVulnerabilitiesAffectedLayers.
|
||||
type POSTVulnerabilitiesAffectedLayersParameters struct {
|
||||
LayersIDs []string
|
||||
}
|
||||
|
||||
// POSTVulnerabilitiesAffectedLayers returns whether the specified layers
|
||||
// (by their IDs) are vulnerable to the given Vulnerability or not.
|
||||
func POSTVulnerabilitiesAffectedLayers(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
// Parse body.
|
||||
var parameters POSTBatchLayersVulnerabilitiesParameters
|
||||
if s, err := jsonhttp.ParseBody(r, ¶meters); err != nil {
|
||||
jsonhttp.RenderError(w, s, err)
|
||||
return
|
||||
}
|
||||
if len(parameters.LayersIDs) == 0 {
|
||||
jsonhttp.RenderError(w, http.StatusBadRequest, errors.New("getting the entire list of affected layers is not supported yet: at least one LayerID query parameter must be provided"))
|
||||
return
|
||||
}
|
||||
|
||||
// Find vulnerability.
|
||||
vulnerability, err := database.FindOneVulnerability(p.ByName("id"), []string{database.FieldVulnerabilityFixedIn})
|
||||
if err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Save the fixed in nodes into a map for fast check.
|
||||
fixedInPackagesMap := make(map[string]struct{})
|
||||
for _, fixedInNode := range vulnerability.FixedInNodes {
|
||||
fixedInPackagesMap[fixedInNode] = struct{}{}
|
||||
}
|
||||
|
||||
response := make(map[string]interface{})
|
||||
// For each LayerID parameter.
|
||||
for _, layerID := range parameters.LayersIDs {
|
||||
// Find layer
|
||||
layer, err := database.FindOneLayerByID(layerID, []string{database.FieldLayerParent, database.FieldLayerPackages, database.FieldLayerPackages})
|
||||
if err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Find layer's packages.
|
||||
packagesNodes, err := layer.AllPackages()
|
||||
if err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Get successors packages of layer' packages.
|
||||
successors, err := getSuccessorsFromPackagesNodes(packagesNodes)
|
||||
if err != nil {
|
||||
jsonhttp.RenderError(w, 0, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Determine if the layer is vulnerable by verifying if one of the successors
|
||||
// of its packages are fixed by the vulnerability.
|
||||
vulnerable := false
|
||||
for _, p := range successors {
|
||||
if _, fixed := fixedInPackagesMap[p]; fixed {
|
||||
vulnerable = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
response[layerID] = struct{ Vulnerable bool }{Vulnerable: vulnerable}
|
||||
}
|
||||
|
||||
jsonhttp.Render(w, http.StatusOK, response)
|
||||
}
|
96
api/router.go
Normal file
96
api/router.go
Normal file
@ -0,0 +1,96 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/clair/api/logic"
|
||||
"github.com/coreos/clair/api/wrappers"
|
||||
"github.com/julienschmidt/httprouter"
|
||||
)
|
||||
|
||||
// VersionRouter is an HTTP router that forwards requests to the appropriate
|
||||
// router depending on the API version specified in the requested URI.
|
||||
type VersionRouter map[string]*httprouter.Router
|
||||
|
||||
// NewVersionRouter instantiates a VersionRouter and every sub-routers that are
|
||||
// necessary to handle supported API versions.
|
||||
func NewVersionRouter(to time.Duration) *VersionRouter {
|
||||
return &VersionRouter{
|
||||
"/v1": NewRouterV1(to),
|
||||
}
|
||||
}
|
||||
|
||||
// ServeHTTP forwards requests to the appropriate router depending on the API
|
||||
// version specified in the requested URI and remove the version information
|
||||
// from the request URL.Path, without modifying the request uRequestURI.
|
||||
func (vs VersionRouter) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
urlStr := r.URL.String()
|
||||
var version string
|
||||
if len(urlStr) >= 3 {
|
||||
version = urlStr[:3]
|
||||
}
|
||||
if router, _ := vs[version]; router != nil {
|
||||
// Remove the version number from the request path to let the router do its
|
||||
// job but do not update the RequestURI
|
||||
r.URL.Path = strings.Replace(r.URL.Path, version, "", 1)
|
||||
router.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
http.NotFound(w, r)
|
||||
}
|
||||
|
||||
// NewRouterV1 creates a new router for the API (Version 1)
|
||||
func NewRouterV1(to time.Duration) *httprouter.Router {
|
||||
router := httprouter.New()
|
||||
wrap := func(fn httprouter.Handle) httprouter.Handle {
|
||||
return wrappers.Log(wrappers.TimeOut(to, fn))
|
||||
}
|
||||
|
||||
// General
|
||||
router.GET("/versions", wrap(logic.GETVersions))
|
||||
router.GET("/health", wrap(logic.GETHealth))
|
||||
|
||||
// Layers
|
||||
router.POST("/layers", wrap(logic.POSTLayers))
|
||||
router.GET("/layers/:id/os", wrap(logic.GETLayersOS))
|
||||
router.GET("/layers/:id/parent", wrap(logic.GETLayersParent))
|
||||
router.GET("/layers/:id/packages", wrap(logic.GETLayersPackages))
|
||||
router.GET("/layers/:id/packages/diff", wrap(logic.GETLayersPackagesDiff))
|
||||
router.GET("/layers/:id/vulnerabilities", wrap(logic.GETLayersVulnerabilities))
|
||||
router.GET("/layers/:id/vulnerabilities/diff", wrap(logic.GETLayersVulnerabilitiesDiff))
|
||||
// # Batch version of "/layers/:id/vulnerabilities"
|
||||
router.POST("/batch/layers/vulnerabilities", wrap(logic.POSTBatchLayersVulnerabilities))
|
||||
|
||||
// Vulnerabilities
|
||||
router.POST("/vulnerabilities", wrap(logic.POSTVulnerabilities))
|
||||
router.PUT("/vulnerabilities/:id", wrap(logic.PUTVulnerabilities))
|
||||
router.GET("/vulnerabilities/:id", wrap(logic.GETVulnerabilities))
|
||||
router.DELETE("/vulnerabilities/:id", wrap(logic.DELVulnerabilities))
|
||||
router.GET("/vulnerabilities/:id/introducing-layers", wrap(logic.GETVulnerabilitiesIntroducingLayers))
|
||||
router.POST("/vulnerabilities/:id/affected-layers", wrap(logic.POSTVulnerabilitiesAffectedLayers))
|
||||
|
||||
return router
|
||||
}
|
||||
|
||||
// NewHealthRouter creates a new router that only serve the Health function on /
|
||||
func NewHealthRouter() *httprouter.Router {
|
||||
router := httprouter.New()
|
||||
router.GET("/", logic.GETHealth)
|
||||
return router
|
||||
}
|
75
api/wrappers/log.go
Normal file
75
api/wrappers/log.go
Normal file
@ -0,0 +1,75 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package logic implements all the available API methods.
|
||||
// Every methods are documented in docs/API.md.
|
||||
|
||||
// Package wrappers contains httprouter.Handle wrappers that are used in the API.
|
||||
package wrappers
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"github.com/julienschmidt/httprouter"
|
||||
)
|
||||
|
||||
var log = capnslog.NewPackageLogger("github.com/coreos/clair", "api")
|
||||
|
||||
type logWriter struct {
|
||||
http.ResponseWriter
|
||||
status int
|
||||
size int
|
||||
}
|
||||
|
||||
func (lw *logWriter) Header() http.Header {
|
||||
return lw.ResponseWriter.Header()
|
||||
}
|
||||
|
||||
func (lw *logWriter) Write(b []byte) (int, error) {
|
||||
if !lw.Written() {
|
||||
lw.WriteHeader(http.StatusOK)
|
||||
}
|
||||
size, err := lw.ResponseWriter.Write(b)
|
||||
lw.size += size
|
||||
return size, err
|
||||
}
|
||||
|
||||
func (lw *logWriter) WriteHeader(s int) {
|
||||
lw.status = s
|
||||
lw.ResponseWriter.WriteHeader(s)
|
||||
}
|
||||
|
||||
func (lw *logWriter) Size() int {
|
||||
return lw.size
|
||||
}
|
||||
|
||||
func (lw *logWriter) Written() bool {
|
||||
return lw.status != 0
|
||||
}
|
||||
|
||||
func (lw *logWriter) Status() int {
|
||||
return lw.status
|
||||
}
|
||||
|
||||
// Log wraps a http.HandlerFunc and logs the API call
|
||||
func Log(fn httprouter.Handle) httprouter.Handle {
|
||||
return func(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
lw := &logWriter{ResponseWriter: w}
|
||||
start := time.Now()
|
||||
fn(lw, r, p)
|
||||
log.Infof("%d %s %s (%s)", lw.Status(), r.Method, r.RequestURI, time.Since(start))
|
||||
}
|
||||
}
|
105
api/wrappers/timeout.go
Normal file
105
api/wrappers/timeout.go
Normal file
@ -0,0 +1,105 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package logic implements all the available API methods.
|
||||
// Every methods are documented in docs/API.md.
|
||||
|
||||
package wrappers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/clair/api/jsonhttp"
|
||||
"github.com/julienschmidt/httprouter"
|
||||
)
|
||||
|
||||
// ErrHandlerTimeout is returned on ResponseWriter Write calls
|
||||
// in handlers which have timed out.
|
||||
var ErrHandlerTimeout = errors.New("http: Handler timeout")
|
||||
|
||||
type timeoutWriter struct {
|
||||
http.ResponseWriter
|
||||
|
||||
mu sync.Mutex
|
||||
timedOut bool
|
||||
wroteHeader bool
|
||||
}
|
||||
|
||||
func (tw *timeoutWriter) Header() http.Header {
|
||||
return tw.ResponseWriter.Header()
|
||||
}
|
||||
|
||||
func (tw *timeoutWriter) Write(p []byte) (int, error) {
|
||||
tw.mu.Lock()
|
||||
defer tw.mu.Unlock()
|
||||
tw.wroteHeader = true // implicitly at least
|
||||
if tw.timedOut {
|
||||
return 0, ErrHandlerTimeout
|
||||
}
|
||||
return tw.ResponseWriter.Write(p)
|
||||
}
|
||||
|
||||
func (tw *timeoutWriter) WriteHeader(status int) {
|
||||
tw.mu.Lock()
|
||||
defer tw.mu.Unlock()
|
||||
if tw.timedOut || tw.wroteHeader {
|
||||
return
|
||||
}
|
||||
tw.wroteHeader = true
|
||||
tw.ResponseWriter.WriteHeader(status)
|
||||
}
|
||||
|
||||
// TimeOut wraps a http.HandlerFunc and ensure that a response is given under
|
||||
// the specified duration.
|
||||
//
|
||||
// If the handler takes longer than the time limit, the wrapper responds with
|
||||
// a Service Unavailable error, an error message and the handler response which
|
||||
// may come later is ignored.
|
||||
//
|
||||
// After a timeout, any write the handler to its ResponseWriter will return
|
||||
// ErrHandlerTimeout.
|
||||
//
|
||||
// If the duration is 0, the wrapper does nothing.
|
||||
func TimeOut(d time.Duration, fn httprouter.Handle) httprouter.Handle {
|
||||
if d == 0 {
|
||||
fmt.Println("nope timeout")
|
||||
return fn
|
||||
}
|
||||
|
||||
return func(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
done := make(chan bool)
|
||||
tw := &timeoutWriter{ResponseWriter: w}
|
||||
|
||||
go func() {
|
||||
fn(tw, r, p)
|
||||
done <- true
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
return
|
||||
case <-time.After(d):
|
||||
tw.mu.Lock()
|
||||
defer tw.mu.Unlock()
|
||||
if !tw.wroteHeader {
|
||||
jsonhttp.RenderError(tw.ResponseWriter, http.StatusServiceUnavailable, ErrHandlerTimeout)
|
||||
}
|
||||
tw.timedOut = true
|
||||
}
|
||||
}
|
||||
}
|
182
database/database.go
Normal file
182
database/database.go
Normal file
@ -0,0 +1,182 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package database implements every database models and the functions that
|
||||
// manipulate them.
|
||||
package database
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
"github.com/barakmich/glog"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"github.com/coreos/clair/health"
|
||||
"github.com/coreos/clair/utils"
|
||||
"github.com/google/cayley"
|
||||
"github.com/google/cayley/graph"
|
||||
"github.com/google/cayley/graph/path"
|
||||
|
||||
// Load all supported backends.
|
||||
_ "github.com/google/cayley/graph/bolt"
|
||||
_ "github.com/google/cayley/graph/leveldb"
|
||||
_ "github.com/google/cayley/graph/memstore"
|
||||
_ "github.com/google/cayley/graph/mongo"
|
||||
_ "github.com/google/cayley/graph/sql"
|
||||
)
|
||||
|
||||
const (
|
||||
// FieldIs is the graph predicate defining the type of an entity.
|
||||
FieldIs = "is"
|
||||
)
|
||||
|
||||
var (
|
||||
log = capnslog.NewPackageLogger("github.com/coreos/clair", "database")
|
||||
|
||||
// ErrTransaction is an error that occurs when a database transaction fails.
|
||||
ErrTransaction = errors.New("database: transaction failed (concurrent modification?)")
|
||||
// ErrBackendException is an error that occurs when the database backend does
|
||||
// not work properly (ie. unreachable).
|
||||
ErrBackendException = errors.New("database: could not query backend")
|
||||
// ErrInconsistent is an error that occurs when a database consistency check
|
||||
// fails (ie. when an entity which is supposed to be unique is detected twice)
|
||||
ErrInconsistent = errors.New("database: inconsistent database")
|
||||
// ErrCantOpen is an error that occurs when the database could not be opened
|
||||
ErrCantOpen = errors.New("database: could not open database")
|
||||
|
||||
store *cayley.Handle
|
||||
)
|
||||
|
||||
func init() {
|
||||
health.RegisterHealthchecker("database", Healthcheck)
|
||||
}
|
||||
|
||||
// Open opens a Cayley database, creating it if necessary and return its handle
|
||||
func Open(dbType, dbPath string) error {
|
||||
if store != nil {
|
||||
log.Errorf("could not open database at %s : a database is already opened", dbPath)
|
||||
return ErrCantOpen
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
// Try to create database if necessary
|
||||
if dbType == "bolt" || dbType == "leveldb" {
|
||||
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
|
||||
// No, initialize it if possible
|
||||
log.Infof("database at %s does not exist yet, creating it", dbPath)
|
||||
|
||||
if err = graph.InitQuadStore(dbType, dbPath, nil); err != nil {
|
||||
log.Errorf("could not create database at %s : %s", dbPath, err)
|
||||
return ErrCantOpen
|
||||
}
|
||||
}
|
||||
} else if dbType == "sql" {
|
||||
graph.InitQuadStore(dbType, dbPath, nil)
|
||||
}
|
||||
|
||||
store, err = cayley.NewGraph(dbType, dbPath, nil)
|
||||
if err != nil {
|
||||
log.Errorf("could not open database at %s : %s", dbPath, err)
|
||||
return ErrCantOpen
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes a Cayley database
|
||||
func Close() {
|
||||
if store != nil {
|
||||
store.Close()
|
||||
store = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Healthcheck simply adds and then remove a quad in Cayley to ensure it is working
|
||||
// It returns true when everything is ok
|
||||
func Healthcheck() health.Status {
|
||||
var err error
|
||||
if store != nil {
|
||||
t := cayley.NewTransaction()
|
||||
q := cayley.Quad("cayley", "is", "healthy", "")
|
||||
t.AddQuad(q)
|
||||
t.RemoveQuad(q)
|
||||
glog.SetStderrThreshold("FATAL") // TODO REMOVE ME
|
||||
err = store.ApplyTransaction(t)
|
||||
glog.SetStderrThreshold("ERROR") // TODO REMOVE ME
|
||||
}
|
||||
|
||||
return health.Status{IsEssential: true, IsHealthy: err == nil, Details: nil}
|
||||
}
|
||||
|
||||
// toValue returns a single value from a path
|
||||
// If the path does not lead to a value, an empty string is returned
|
||||
// If the path leads to multiple values or if a database error occurs, an empty string and an error are returned
|
||||
func toValue(p *path.Path) (string, error) {
|
||||
var value string
|
||||
|
||||
it, _ := p.BuildIterator().Optimize()
|
||||
defer it.Close()
|
||||
for cayley.RawNext(it) {
|
||||
if value != "" {
|
||||
log.Error("failed query in toValue: used on an iterator containing multiple values")
|
||||
return "", ErrInconsistent
|
||||
}
|
||||
|
||||
if it.Result() != nil {
|
||||
value = store.NameOf(it.Result())
|
||||
}
|
||||
}
|
||||
if it.Err() != nil {
|
||||
log.Errorf("failed query in toValue: %s", it.Err())
|
||||
return "", ErrBackendException
|
||||
}
|
||||
|
||||
return value, nil
|
||||
}
|
||||
|
||||
// toValues returns multiple values from a path
|
||||
// If the path does not lead to any value, an empty array is returned
|
||||
// If a database error occurs, an empty array and an error are returned
|
||||
func toValues(p *path.Path) ([]string, error) {
|
||||
var values []string
|
||||
|
||||
it, _ := p.BuildIterator().Optimize()
|
||||
defer it.Close()
|
||||
for cayley.RawNext(it) {
|
||||
if it.Result() != nil {
|
||||
value := store.NameOf(it.Result())
|
||||
if value != "" {
|
||||
values = append(values, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
if it.Err() != nil {
|
||||
log.Errorf("failed query in toValues: %s", it.Err())
|
||||
return []string{}, ErrBackendException
|
||||
}
|
||||
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// saveFields appends cayley's Save method to a path for each field in
|
||||
// selectedFields, except the ones that appears also in exceptFields
|
||||
func saveFields(p *path.Path, selectedFields []string, exceptFields []string) {
|
||||
for _, selectedField := range selectedFields {
|
||||
if utils.Contains(selectedField, exceptFields) {
|
||||
continue
|
||||
}
|
||||
p = p.Save(selectedField, selectedField)
|
||||
}
|
||||
}
|
81
database/database_test.go
Normal file
81
database/database_test.go
Normal file
@ -0,0 +1,81 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/cayley"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestHealthcheck(t *testing.T) {
|
||||
Open("memstore", "")
|
||||
defer Close()
|
||||
|
||||
b := Healthcheck()
|
||||
assert.True(t, b.IsHealthy, "Healthcheck failed")
|
||||
}
|
||||
|
||||
func TestToValue(t *testing.T) {
|
||||
Open("memstore", "")
|
||||
defer Close()
|
||||
|
||||
// toValue()
|
||||
v, err := toValue(cayley.StartPath(store, "tests").Out("are"))
|
||||
assert.Nil(t, err, "toValue should work even if the requested path leads to nothing")
|
||||
assert.Equal(t, "", v, "toValue should return an empty string if the requested path leads to nothing")
|
||||
|
||||
store.AddQuad(cayley.Quad("tests", "are", "awesome", ""))
|
||||
v, err = toValue(cayley.StartPath(store, "tests").Out("are"))
|
||||
assert.Nil(t, err, "toValue should have worked")
|
||||
assert.Equal(t, "awesome", v, "toValue did not return the expected value")
|
||||
|
||||
store.AddQuad(cayley.Quad("tests", "are", "running", ""))
|
||||
v, err = toValue(cayley.StartPath(store, "tests").Out("are"))
|
||||
assert.NotNil(t, err, "toValue should return an error and an empty string if the path leads to multiple values")
|
||||
assert.Equal(t, "", v, "toValue should return an error and an empty string if the path leads to multiple values")
|
||||
|
||||
// toValues()
|
||||
vs, err := toValues(cayley.StartPath(store, "CoreOS").Out(FieldIs))
|
||||
assert.Nil(t, err, "toValues should work even if the requested path leads to nothing")
|
||||
assert.Len(t, vs, 0, "toValue should return an empty array if the requested path leads to nothing")
|
||||
words := []string{"powerful", "lightweight"}
|
||||
for i, word := range words {
|
||||
store.AddQuad(cayley.Quad("CoreOS", FieldIs, word, ""))
|
||||
v, err := toValues(cayley.StartPath(store, "CoreOS").Out(FieldIs))
|
||||
assert.Nil(t, err, "toValues should have worked")
|
||||
assert.Len(t, v, i+1, "toValues did not return the right amount of values")
|
||||
for _, e := range words[:i+1] {
|
||||
assert.Contains(t, v, e, "toValues did not return the values we expected")
|
||||
}
|
||||
}
|
||||
|
||||
// toValue(s)() and empty values
|
||||
store.AddQuad(cayley.Quad("bob", "likes", "", ""))
|
||||
v, err = toValue(cayley.StartPath(store, "bob").Out("likes"))
|
||||
assert.Nil(t, err, "toValue should work even if the requested path leads to nothing")
|
||||
assert.Equal(t, "", v, "toValue should return an empty string if the requested path leads to nothing")
|
||||
|
||||
store.AddQuad(cayley.Quad("bob", "likes", "running", ""))
|
||||
v, err = toValue(cayley.StartPath(store, "bob").Out("likes"))
|
||||
assert.Nil(t, err, "toValue should have worked")
|
||||
assert.Equal(t, "running", v, "toValue did not return the expected value")
|
||||
|
||||
store.AddQuad(cayley.Quad("bob", "likes", "swimming", ""))
|
||||
va, err := toValues(cayley.StartPath(store, "bob").Out("likes"))
|
||||
assert.Nil(t, err, "toValues should have worked")
|
||||
assert.Len(t, va, 2, "toValues should have returned 2 values")
|
||||
}
|
58
database/flag.go
Normal file
58
database/flag.go
Normal file
@ -0,0 +1,58 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/google/cayley"
|
||||
)
|
||||
|
||||
// UpdateFlag creates a flag or update an existing flag's value
|
||||
func UpdateFlag(name, value string) error {
|
||||
if name == "" || value == "" {
|
||||
log.Warning("could not insert a flag which has an empty name or value")
|
||||
return cerrors.NewBadRequestError("could not insert a flag which has an empty name or value")
|
||||
}
|
||||
|
||||
// Initialize transaction
|
||||
t := cayley.NewTransaction()
|
||||
|
||||
// Get current flag value
|
||||
currentValue, err := GetFlagValue(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Build transaction
|
||||
name = "flag:" + name
|
||||
if currentValue != "" {
|
||||
t.RemoveQuad(cayley.Quad(name, "value", currentValue, ""))
|
||||
}
|
||||
t.AddQuad(cayley.Quad(name, "value", value, ""))
|
||||
|
||||
// Apply transaction
|
||||
if err = store.ApplyTransaction(t); err != nil {
|
||||
log.Errorf("failed transaction (UpdateFlag): %s", err)
|
||||
return ErrTransaction
|
||||
}
|
||||
|
||||
// Return
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetFlagValue returns the value of the flag given by its name (or an empty string if the flag does not exist)
|
||||
func GetFlagValue(name string) (string, error) {
|
||||
return toValue(cayley.StartPath(store, "flag:"+name).Out("value"))
|
||||
}
|
48
database/flag_test.go
Normal file
48
database/flag_test.go
Normal file
@ -0,0 +1,48 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestFlag(t *testing.T) {
|
||||
Open("memstore", "")
|
||||
defer Close()
|
||||
|
||||
// Get non existing flag
|
||||
f, err := GetFlagValue("test")
|
||||
assert.Nil(t, err, "GetFlagValue should have worked")
|
||||
assert.Empty(t, "", f, "Getting a non-existing flag should return an empty string")
|
||||
|
||||
// Try to insert invalid flags
|
||||
assert.Error(t, UpdateFlag("test", ""), "It should not accept a flag with an empty name or value")
|
||||
assert.Error(t, UpdateFlag("", "test"), "It should not accept a flag with an empty name or value")
|
||||
assert.Error(t, UpdateFlag("", ""), "It should not accept a flag with an empty name or value")
|
||||
|
||||
// Insert a flag and verify its value
|
||||
assert.Nil(t, UpdateFlag("test", "test1"))
|
||||
f, err = GetFlagValue("test")
|
||||
assert.Nil(t, err, "GetFlagValue should have worked")
|
||||
assert.Equal(t, "test1", f, "GetFlagValue did not return the expected value")
|
||||
|
||||
// Update a flag and verify its value
|
||||
assert.Nil(t, UpdateFlag("test", "test2"))
|
||||
f, err = GetFlagValue("test")
|
||||
assert.Nil(t, err, "GetFlagValue should have worked")
|
||||
assert.Equal(t, "test2", f, "GetFlagValue did not return the expected value")
|
||||
}
|
377
database/layer.go
Normal file
377
database/layer.go
Normal file
@ -0,0 +1,377 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/coreos/clair/utils"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/google/cayley"
|
||||
"github.com/google/cayley/graph"
|
||||
"github.com/google/cayley/graph/path"
|
||||
)
|
||||
|
||||
const (
|
||||
FieldLayerIsValue = "layer"
|
||||
FieldLayerID = "id"
|
||||
FieldLayerParent = "parent"
|
||||
FieldLayerSuccessors = "successors"
|
||||
FieldLayerOS = "os"
|
||||
FieldLayerInstalledPackages = "adds"
|
||||
FieldLayerRemovedPackages = "removes"
|
||||
FieldLayerEngineVersion = "engineVersion"
|
||||
|
||||
FieldLayerPackages = "adds/removes"
|
||||
)
|
||||
|
||||
var FieldLayerAll = []string{FieldLayerID, FieldLayerParent, FieldLayerSuccessors, FieldLayerOS, FieldLayerPackages, FieldLayerEngineVersion}
|
||||
|
||||
// Layer represents an unique container layer
|
||||
type Layer struct {
|
||||
Node string `json:"-"`
|
||||
ID string
|
||||
ParentNode string `json:"-"`
|
||||
SuccessorsNodes []string `json:"-"`
|
||||
OS string
|
||||
InstalledPackagesNodes []string `json:"-"`
|
||||
RemovedPackagesNodes []string `json:"-"`
|
||||
EngineVersion int
|
||||
}
|
||||
|
||||
// GetNode returns the node name of a Layer
|
||||
// Requires the key field: ID
|
||||
func (l *Layer) GetNode() string {
|
||||
return FieldLayerIsValue + ":" + utils.Hash(l.ID)
|
||||
}
|
||||
|
||||
// InsertLayer insert a single layer in the database
|
||||
//
|
||||
// ID, and EngineVersion fields are required.
|
||||
// ParentNode, OS, InstalledPackagesNodes and RemovedPackagesNodes are optional,
|
||||
// SuccessorsNodes is unnecessary.
|
||||
//
|
||||
// The ID MUST be unique for two different layers.
|
||||
//
|
||||
//
|
||||
// If the Layer already exists, nothing is done, except if the provided engine
|
||||
// version is higher than the existing one, in which case, the OS,
|
||||
// InstalledPackagesNodes and RemovedPackagesNodes fields will be replaced.
|
||||
//
|
||||
// The layer should only contains the newly installed/removed packages
|
||||
// There is no safeguard that prevents from marking a package as newly installed
|
||||
// while it has already been installed in one of its parent.
|
||||
func InsertLayer(layer *Layer) error {
|
||||
// Verify parameters
|
||||
if layer.ID == "" {
|
||||
log.Warning("could not insert a layer which has an empty ID")
|
||||
return cerrors.NewBadRequestError("could not insert a layer which has an empty ID")
|
||||
}
|
||||
|
||||
// Create required data structures
|
||||
t := cayley.NewTransaction()
|
||||
layer.Node = layer.GetNode()
|
||||
|
||||
// Try to find an existing layer
|
||||
existingLayer, err := FindOneLayerByNode(layer.Node, FieldLayerAll)
|
||||
if err != nil && err != cerrors.ErrNotFound {
|
||||
return err
|
||||
}
|
||||
|
||||
if existingLayer != nil && existingLayer.EngineVersion >= layer.EngineVersion {
|
||||
// The layer exists and has an equal or higher engine verison, do nothing
|
||||
return nil
|
||||
}
|
||||
|
||||
if existingLayer == nil {
|
||||
// Create case: add permanent nodes
|
||||
t.AddQuad(cayley.Quad(layer.Node, FieldIs, FieldLayerIsValue, ""))
|
||||
t.AddQuad(cayley.Quad(layer.Node, FieldLayerID, layer.ID, ""))
|
||||
t.AddQuad(cayley.Quad(layer.Node, FieldLayerParent, layer.ParentNode, ""))
|
||||
} else {
|
||||
// Update case: remove everything before we add updated data
|
||||
t.RemoveQuad(cayley.Quad(layer.Node, FieldLayerOS, existingLayer.OS, ""))
|
||||
for _, pkg := range existingLayer.InstalledPackagesNodes {
|
||||
t.RemoveQuad(cayley.Quad(layer.Node, FieldLayerInstalledPackages, pkg, ""))
|
||||
}
|
||||
for _, pkg := range existingLayer.RemovedPackagesNodes {
|
||||
t.RemoveQuad(cayley.Quad(layer.Node, FieldLayerRemovedPackages, pkg, ""))
|
||||
}
|
||||
t.RemoveQuad(cayley.Quad(layer.Node, FieldLayerEngineVersion, strconv.Itoa(existingLayer.EngineVersion), ""))
|
||||
}
|
||||
|
||||
// Add OS/Packages
|
||||
t.AddQuad(cayley.Quad(layer.Node, FieldLayerOS, layer.OS, ""))
|
||||
for _, pkg := range layer.InstalledPackagesNodes {
|
||||
t.AddQuad(cayley.Quad(layer.Node, FieldLayerInstalledPackages, pkg, ""))
|
||||
}
|
||||
for _, pkg := range layer.RemovedPackagesNodes {
|
||||
t.AddQuad(cayley.Quad(layer.Node, FieldLayerRemovedPackages, pkg, ""))
|
||||
}
|
||||
t.AddQuad(cayley.Quad(layer.Node, FieldLayerEngineVersion, strconv.Itoa(layer.EngineVersion), ""))
|
||||
|
||||
// Apply transaction
|
||||
if err = store.ApplyTransaction(t); err != nil {
|
||||
log.Errorf("failed transaction (InsertLayer): %s", err)
|
||||
return ErrTransaction
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FindOneLayerByID finds and returns a single layer having the given ID,
|
||||
// selecting the specified fields and hardcoding its ID
|
||||
func FindOneLayerByID(ID string, selectedFields []string) (*Layer, error) {
|
||||
t := &Layer{ID: ID}
|
||||
l, err := FindOneLayerByNode(t.GetNode(), selectedFields)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l.ID = ID
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// FindOneLayerByNode finds and returns a single package by its node, selecting the specified fields
|
||||
func FindOneLayerByNode(node string, selectedFields []string) (*Layer, error) {
|
||||
l, err := toLayers(cayley.StartPath(store, node).Has(FieldIs, FieldLayerIsValue), selectedFields)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(l) == 1 {
|
||||
return l[0], nil
|
||||
}
|
||||
if len(l) > 1 {
|
||||
log.Errorf("found multiple layers with identical node [Node: %s]", node)
|
||||
return nil, ErrInconsistent
|
||||
}
|
||||
|
||||
return nil, cerrors.ErrNotFound
|
||||
}
|
||||
|
||||
// FindAllLayersByAddedPackageNodes finds and returns all layers that add the
|
||||
// given packages (by their nodes), selecting the specified fields
|
||||
func FindAllLayersByAddedPackageNodes(nodes []string, selectedFields []string) ([]*Layer, error) {
|
||||
layers, err := toLayers(cayley.StartPath(store, nodes...).In(FieldLayerInstalledPackages), selectedFields)
|
||||
if err != nil {
|
||||
return []*Layer{}, err
|
||||
}
|
||||
return layers, nil
|
||||
}
|
||||
|
||||
// FindAllLayersByPackageNode finds and returns all layers that have the given package (by its node), selecting the specified fields
|
||||
// func FindAllLayersByPackageNode(node string, only map[string]struct{}) ([]*Layer, error) {
|
||||
// var layers []*Layer
|
||||
//
|
||||
// // We need the successors field
|
||||
// if only != nil {
|
||||
// only[FieldLayerSuccessors] = struct{}{}
|
||||
// }
|
||||
//
|
||||
// // Get all the layers which remove the package
|
||||
// layersNodesRemoving, err := toValues(cayley.StartPath(store, node).In(FieldLayerRemovedPackages).Has(FieldIs, FieldLayerIsValue))
|
||||
// if err != nil {
|
||||
// return []*Layer{}, err
|
||||
// }
|
||||
// layersNodesRemovingMap := make(map[string]struct{})
|
||||
// for _, l := range layersNodesRemoving {
|
||||
// layersNodesRemovingMap[l] = struct{}{}
|
||||
// }
|
||||
//
|
||||
// layersToBrowse, err := toLayers(cayley.StartPath(store, node).In(FieldLayerInstalledPackages).Has(FieldIs, FieldLayerIsValue), only)
|
||||
// if err != nil {
|
||||
// return []*Layer{}, err
|
||||
// }
|
||||
// for len(layersToBrowse) > 0 {
|
||||
// var newLayersToBrowse []*Layer
|
||||
// for _, layerToBrowse := range layersToBrowse {
|
||||
// if _, layerRemovesPackage := layersNodesRemovingMap[layerToBrowse.Node]; !layerRemovesPackage {
|
||||
// layers = append(layers, layerToBrowse)
|
||||
// successors, err := layerToBrowse.Successors(only)
|
||||
// if err != nil {
|
||||
// return []*Layer{}, err
|
||||
// }
|
||||
// newLayersToBrowse = append(newLayersToBrowse, successors...)
|
||||
// }
|
||||
// layersToBrowse = newLayersToBrowse
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// return layers, nil
|
||||
// }
|
||||
|
||||
// toLayers converts a path leading to one or multiple layers to Layer structs,
|
||||
// selecting the specified fields
|
||||
func toLayers(path *path.Path, selectedFields []string) ([]*Layer, error) {
|
||||
var layers []*Layer
|
||||
|
||||
saveFields(path, selectedFields, []string{FieldLayerSuccessors, FieldLayerPackages, FieldLayerInstalledPackages, FieldLayerRemovedPackages})
|
||||
it, _ := path.BuildIterator().Optimize()
|
||||
defer it.Close()
|
||||
for cayley.RawNext(it) {
|
||||
tags := make(map[string]graph.Value)
|
||||
it.TagResults(tags)
|
||||
|
||||
layer := Layer{Node: store.NameOf(it.Result())}
|
||||
for _, selectedField := range selectedFields {
|
||||
switch selectedField {
|
||||
case FieldLayerID:
|
||||
layer.ID = store.NameOf(tags[FieldLayerID])
|
||||
case FieldLayerParent:
|
||||
layer.ParentNode = store.NameOf(tags[FieldLayerParent])
|
||||
case FieldLayerSuccessors:
|
||||
var err error
|
||||
layer.SuccessorsNodes, err = toValues(cayley.StartPath(store, layer.Node).In(FieldLayerParent))
|
||||
if err != nil {
|
||||
log.Errorf("could not get successors of layer %s: %s.", layer.Node, err.Error())
|
||||
return nil, err
|
||||
}
|
||||
case FieldLayerOS:
|
||||
layer.OS = store.NameOf(tags[FieldLayerOS])
|
||||
case FieldLayerPackages:
|
||||
var err error
|
||||
it, _ := cayley.StartPath(store, layer.Node).OutWithTags([]string{"predicate"}, FieldLayerInstalledPackages, FieldLayerRemovedPackages).BuildIterator().Optimize()
|
||||
defer it.Close()
|
||||
for cayley.RawNext(it) {
|
||||
tags := make(map[string]graph.Value)
|
||||
it.TagResults(tags)
|
||||
|
||||
predicate := store.NameOf(tags["predicate"])
|
||||
if predicate == FieldLayerInstalledPackages {
|
||||
layer.InstalledPackagesNodes = append(layer.InstalledPackagesNodes, store.NameOf(it.Result()))
|
||||
} else if predicate == FieldLayerRemovedPackages {
|
||||
layer.RemovedPackagesNodes = append(layer.RemovedPackagesNodes, store.NameOf(it.Result()))
|
||||
}
|
||||
}
|
||||
if it.Err() != nil {
|
||||
log.Errorf("could not get installed/removed packages of layer %s: %s.", layer.Node, it.Err())
|
||||
return nil, err
|
||||
}
|
||||
case FieldLayerEngineVersion:
|
||||
layer.EngineVersion, _ = strconv.Atoi(store.NameOf(tags[FieldLayerEngineVersion]))
|
||||
default:
|
||||
panic("unknown selectedField")
|
||||
}
|
||||
}
|
||||
layers = append(layers, &layer)
|
||||
}
|
||||
if it.Err() != nil {
|
||||
log.Errorf("failed query in toLayers: %s", it.Err())
|
||||
return []*Layer{}, ErrBackendException
|
||||
}
|
||||
|
||||
return layers, nil
|
||||
}
|
||||
|
||||
// Successors find and returns all layers that define l as their parent,
|
||||
// selecting the specified fields
|
||||
// It requires that FieldLayerSuccessors field has been selected on l
|
||||
// func (l *Layer) Successors(selectedFields []string) ([]*Layer, error) {
|
||||
// if len(l.SuccessorsNodes) == 0 {
|
||||
// return []*Layer{}, nil
|
||||
// }
|
||||
//
|
||||
// return toLayers(cayley.StartPath(store, l.SuccessorsNodes...), only)
|
||||
// }
|
||||
|
||||
// Parent find and returns the parent layer of l, selecting the specified fields
|
||||
// It requires that FieldLayerParent field has been selected on l
|
||||
func (l *Layer) Parent(selectedFields []string) (*Layer, error) {
|
||||
if l.ParentNode == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
parent, err := toLayers(cayley.StartPath(store, l.ParentNode), selectedFields)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(parent) == 1 {
|
||||
return parent[0], nil
|
||||
}
|
||||
if len(parent) > 1 {
|
||||
log.Errorf("found multiple layers when getting parent layer of layer %s", l.ParentNode)
|
||||
return nil, ErrInconsistent
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Sublayers find and returns all layers that compose l, selecting the specified
|
||||
// fields
|
||||
// It requires that FieldLayerParent field has been selected on l
|
||||
// The base image comes first, and l is last
|
||||
// func (l *Layer) Sublayers(selectedFields []string) ([]*Layer, error) {
|
||||
// var sublayers []*Layer
|
||||
//
|
||||
// // We need the parent field
|
||||
// if only != nil {
|
||||
// only[FieldLayerParent] = struct{}{}
|
||||
// }
|
||||
//
|
||||
// parent, err := l.Parent(only)
|
||||
// if err != nil {
|
||||
// return []*Layer{}, err
|
||||
// }
|
||||
// if parent != nil {
|
||||
// parentSublayers, err := parent.Sublayers(only)
|
||||
// if err != nil {
|
||||
// return []*Layer{}, err
|
||||
// }
|
||||
// sublayers = append(sublayers, parentSublayers...)
|
||||
// }
|
||||
//
|
||||
// sublayers = append(sublayers, l)
|
||||
//
|
||||
// return sublayers, nil
|
||||
// }
|
||||
|
||||
// AllPackages computes the full list of packages that l has and return them as
|
||||
// nodes.
|
||||
// It requires that FieldLayerParent, FieldLayerContentInstalledPackages,
|
||||
// FieldLayerContentRemovedPackages fields has been selected on l
|
||||
func (l *Layer) AllPackages() ([]string, error) {
|
||||
var allPackages []string
|
||||
|
||||
parent, err := l.Parent([]string{FieldLayerParent, FieldLayerPackages})
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
if parent != nil {
|
||||
allPackages, err = parent.AllPackages()
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return append(utils.CompareStringLists(allPackages, l.RemovedPackagesNodes), l.InstalledPackagesNodes...), nil
|
||||
}
|
||||
|
||||
// OperatingSystem tries to find the Operating System of a layer using its
|
||||
// parents.
|
||||
// It requires that FieldLayerParent and FieldLayerOS fields has been
|
||||
// selected on l
|
||||
func (l *Layer) OperatingSystem() (string, error) {
|
||||
if l.OS != "" {
|
||||
return l.OS, nil
|
||||
}
|
||||
|
||||
// Try from the parent
|
||||
parent, err := l.Parent([]string{FieldLayerParent, FieldLayerOS})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if parent != nil {
|
||||
return parent.OperatingSystem()
|
||||
}
|
||||
return "", nil
|
||||
}
|
162
database/layer_test.go
Normal file
162
database/layer_test.go
Normal file
@ -0,0 +1,162 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/clair/utils"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// TestInvalidLayers tries to insert invalid layers
|
||||
func TestInvalidLayers(t *testing.T) {
|
||||
Open("memstore", "")
|
||||
defer Close()
|
||||
|
||||
assert.Error(t, InsertLayer(&Layer{ID: ""})) // No ID
|
||||
}
|
||||
|
||||
// TestLayerSimple inserts a single layer and ensures it can be retrieved and
|
||||
// that methods works
|
||||
func TestLayerSimple(t *testing.T) {
|
||||
Open("memstore", "")
|
||||
defer Close()
|
||||
|
||||
// Insert a layer and find it back
|
||||
l1 := &Layer{ID: "l1", OS: "os1", InstalledPackagesNodes: []string{"p1", "p2"}, EngineVersion: 1}
|
||||
if assert.Nil(t, InsertLayer(l1)) {
|
||||
fl1, err := FindOneLayerByID("l1", FieldLayerAll)
|
||||
if assert.Nil(t, err) && assert.NotNil(t, fl1) {
|
||||
// Saved = found
|
||||
assert.True(t, layerEqual(l1, fl1), "layers are not equal, expected %v, have %s", l1, fl1)
|
||||
|
||||
// No parent
|
||||
p, err := fl1.Parent(FieldLayerAll)
|
||||
assert.Nil(t, err)
|
||||
assert.Nil(t, p)
|
||||
|
||||
// AllPackages()
|
||||
pk, err := fl1.AllPackages()
|
||||
assert.Nil(t, err)
|
||||
if assert.Len(t, pk, 2) {
|
||||
assert.Contains(t, pk, l1.InstalledPackagesNodes[0])
|
||||
assert.Contains(t, pk, l1.InstalledPackagesNodes[1])
|
||||
}
|
||||
// OS()
|
||||
o, err := fl1.OperatingSystem()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, l1.OS, o)
|
||||
}
|
||||
|
||||
// FindAllLayersByAddedPackageNodes
|
||||
al1, err := FindAllLayersByAddedPackageNodes([]string{"p1", "p3"}, FieldLayerAll)
|
||||
if assert.Nil(t, err) && assert.Len(t, al1, 1) {
|
||||
assert.Equal(t, al1[0].Node, l1.Node)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestLayerTree inserts a tree of layers and ensure that the tree lgoic works
|
||||
func TestLayerTree(t *testing.T) {
|
||||
Open("memstore", "")
|
||||
defer Close()
|
||||
|
||||
var layers []*Layer
|
||||
layers = append(layers, &Layer{ID: "l1"})
|
||||
layers = append(layers, &Layer{ID: "l2", ParentNode: layers[0].GetNode(), OS: "os2", InstalledPackagesNodes: []string{"p1", "p2"}})
|
||||
layers = append(layers, &Layer{ID: "l3", ParentNode: layers[1].GetNode()}) // Repeat an empty layer archive (l1)
|
||||
layers = append(layers, &Layer{ID: "l4a", ParentNode: layers[2].GetNode(), InstalledPackagesNodes: []string{"p3"}, RemovedPackagesNodes: []string{"p1", "p4"}}) // p4 does not exists and thu can't actually be removed
|
||||
layers = append(layers, &Layer{ID: "l4b", ParentNode: layers[2].GetNode(), InstalledPackagesNodes: []string{}, RemovedPackagesNodes: []string{"p2", "p1"}})
|
||||
|
||||
var flayers []*Layer
|
||||
ok := true
|
||||
for _, l := range layers {
|
||||
ok = ok && assert.Nil(t, InsertLayer(l))
|
||||
|
||||
fl, err := FindOneLayerByID(l.ID, FieldLayerAll)
|
||||
ok = ok && assert.Nil(t, err)
|
||||
ok = ok && assert.NotNil(t, fl)
|
||||
flayers = append(flayers, fl)
|
||||
}
|
||||
if assert.True(t, ok) {
|
||||
// Start testing
|
||||
|
||||
// l4a
|
||||
// Parent()
|
||||
fl4ap, err := flayers[3].Parent(FieldLayerAll)
|
||||
assert.Nil(t, err, "l4a should has l3 as parent")
|
||||
if assert.NotNil(t, fl4ap, "l4a should has l3 as parent") {
|
||||
assert.Equal(t, "l3", fl4ap.ID, "l4a should has l3 as parent")
|
||||
}
|
||||
|
||||
// OS()
|
||||
fl4ao, err := flayers[3].OperatingSystem()
|
||||
assert.Nil(t, err, "l4a should inherits its OS from l2")
|
||||
assert.Equal(t, "os2", fl4ao, "l4a should inherits its OS from l2")
|
||||
// AllPackages()
|
||||
fl4apkg, err := flayers[3].AllPackages()
|
||||
assert.Nil(t, err)
|
||||
if assert.Len(t, fl4apkg, 2) {
|
||||
assert.Contains(t, fl4apkg, "p2")
|
||||
assert.Contains(t, fl4apkg, "p3")
|
||||
}
|
||||
|
||||
// l4b
|
||||
// AllPackages()
|
||||
fl4bpkg, err := flayers[4].AllPackages()
|
||||
assert.Nil(t, err)
|
||||
assert.Len(t, fl4bpkg, 0)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLayerUpdate(t *testing.T) {
|
||||
Open("memstore", "")
|
||||
defer Close()
|
||||
|
||||
l1 := &Layer{ID: "l1", OS: "os1", InstalledPackagesNodes: []string{"p1", "p2"}, RemovedPackagesNodes: []string{"p3", "p4"}, EngineVersion: 1}
|
||||
if assert.Nil(t, InsertLayer(l1)) {
|
||||
// Do not update layer content if the engine versions are equals
|
||||
l1b := &Layer{ID: "l1", OS: "os2", InstalledPackagesNodes: []string{"p1"}, RemovedPackagesNodes: []string{""}, EngineVersion: 1}
|
||||
if assert.Nil(t, InsertLayer(l1b)) {
|
||||
fl1b, err := FindOneLayerByID(l1.ID, FieldLayerAll)
|
||||
if assert.Nil(t, err) && assert.NotNil(t, fl1b) {
|
||||
assert.True(t, layerEqual(l1, fl1b), "layer contents are not equal, expected %v, have %s", l1, fl1b)
|
||||
}
|
||||
}
|
||||
|
||||
// Update the layer content with new data and a higher engine version
|
||||
l1c := &Layer{ID: "l1", OS: "os2", InstalledPackagesNodes: []string{"p1", "p5"}, RemovedPackagesNodes: []string{"p6", "p7"}, EngineVersion: 2}
|
||||
if assert.Nil(t, InsertLayer(l1c)) {
|
||||
fl1c, err := FindOneLayerByID(l1c.ID, FieldLayerAll)
|
||||
if assert.Nil(t, err) && assert.NotNil(t, fl1c) {
|
||||
assert.True(t, layerEqual(l1c, fl1c), "layer contents are not equal, expected %v, have %s", l1c, fl1c)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func layerEqual(expected, actual *Layer) bool {
|
||||
eq := true
|
||||
eq = eq && expected.Node == actual.Node
|
||||
eq = eq && expected.ID == actual.ID
|
||||
eq = eq && expected.ParentNode == actual.ParentNode
|
||||
eq = eq && expected.OS == actual.OS
|
||||
eq = eq && expected.EngineVersion == actual.EngineVersion
|
||||
eq = eq && len(utils.CompareStringLists(actual.SuccessorsNodes, expected.SuccessorsNodes)) == 0 && len(utils.CompareStringLists(expected.SuccessorsNodes, actual.SuccessorsNodes)) == 0
|
||||
eq = eq && len(utils.CompareStringLists(actual.RemovedPackagesNodes, expected.RemovedPackagesNodes)) == 0 && len(utils.CompareStringLists(expected.RemovedPackagesNodes, actual.RemovedPackagesNodes)) == 0
|
||||
eq = eq && len(utils.CompareStringLists(actual.InstalledPackagesNodes, expected.InstalledPackagesNodes)) == 0 && len(utils.CompareStringLists(expected.InstalledPackagesNodes, actual.InstalledPackagesNodes)) == 0
|
||||
return eq
|
||||
}
|
137
database/lock.go
Normal file
137
database/lock.go
Normal file
@ -0,0 +1,137 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/barakmich/glog"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/google/cayley"
|
||||
"github.com/google/cayley/graph"
|
||||
"github.com/google/cayley/graph/path"
|
||||
)
|
||||
|
||||
// Lock tries to set a temporary lock in the database.
|
||||
// If a lock already exists with the given name/owner, then the lock is renewed
|
||||
//
|
||||
// Lock does not block, instead, it returns true and its expiration time
|
||||
// is the lock has been successfully acquired or false otherwise
|
||||
func Lock(name string, duration time.Duration, owner string) (bool, time.Time) {
|
||||
pruneLocks()
|
||||
|
||||
until := time.Now().Add(duration)
|
||||
untilString := strconv.FormatInt(until.Unix(), 10)
|
||||
|
||||
// Try to get the expiration time of a lock with the same name/owner
|
||||
currentExpiration, err := toValue(cayley.StartPath(store, name).Has("locked_by", owner).Out("locked_until"))
|
||||
if err == nil && currentExpiration != "" {
|
||||
// Renew our lock
|
||||
if currentExpiration == untilString {
|
||||
return true, until
|
||||
}
|
||||
|
||||
t := cayley.NewTransaction()
|
||||
t.RemoveQuad(cayley.Quad(name, "locked_until", currentExpiration, ""))
|
||||
t.AddQuad(cayley.Quad(name, "locked_until", untilString, ""))
|
||||
// It is not necessary to verify if the lock is ours again in the transaction
|
||||
// because if someone took it, the lock's current expiration probably changed and the transaction will fail
|
||||
return store.ApplyTransaction(t) == nil, until
|
||||
}
|
||||
|
||||
t := cayley.NewTransaction()
|
||||
t.AddQuad(cayley.Quad(name, "locked", "locked", "")) // Necessary to make the transaction fails if the lock already exists (and has not been pruned)
|
||||
t.AddQuad(cayley.Quad(name, "locked_until", untilString, ""))
|
||||
t.AddQuad(cayley.Quad(name, "locked_by", owner, ""))
|
||||
|
||||
glog.SetStderrThreshold("FATAL")
|
||||
success := store.ApplyTransaction(t) == nil
|
||||
glog.SetStderrThreshold("ERROR")
|
||||
|
||||
return success, until
|
||||
}
|
||||
|
||||
// Unlock unlocks a lock specified by its name if I own it
|
||||
func Unlock(name, owner string) {
|
||||
pruneLocks()
|
||||
|
||||
t := cayley.NewTransaction()
|
||||
it, _ := cayley.StartPath(store, name).Has("locked", "locked").Has("locked_by", owner).Save("locked_until", "locked_until").BuildIterator().Optimize()
|
||||
defer it.Close()
|
||||
|
||||
for cayley.RawNext(it) {
|
||||
tags := make(map[string]graph.Value)
|
||||
it.TagResults(tags)
|
||||
|
||||
t.RemoveQuad(cayley.Quad(name, "locked", "locked", ""))
|
||||
t.RemoveQuad(cayley.Quad(name, "locked_until", store.NameOf(tags["locked_until"]), ""))
|
||||
t.RemoveQuad(cayley.Quad(name, "locked_by", owner, ""))
|
||||
}
|
||||
|
||||
store.ApplyTransaction(t)
|
||||
}
|
||||
|
||||
// LockInfo returns the owner of a lock specified by its name and its
|
||||
// expiration time
|
||||
func LockInfo(name string) (string, time.Time, error) {
|
||||
it, _ := cayley.StartPath(store, name).Has("locked", "locked").Save("locked_until", "locked_until").Save("locked_by", "locked_by").BuildIterator().Optimize()
|
||||
defer it.Close()
|
||||
for cayley.RawNext(it) {
|
||||
tags := make(map[string]graph.Value)
|
||||
it.TagResults(tags)
|
||||
|
||||
tt, _ := strconv.ParseInt(store.NameOf(tags["locked_until"]), 10, 64)
|
||||
return store.NameOf(tags["locked_by"]), time.Unix(tt, 0), nil
|
||||
}
|
||||
if it.Err() != nil {
|
||||
log.Errorf("failed query in LockInfo: %s", it.Err())
|
||||
return "", time.Time{}, ErrBackendException
|
||||
}
|
||||
|
||||
return "", time.Time{}, cerrors.ErrNotFound
|
||||
}
|
||||
|
||||
// pruneLocks removes every expired locks from the database
|
||||
func pruneLocks() {
|
||||
now := time.Now()
|
||||
|
||||
// Delete every expired locks
|
||||
tr := cayley.NewTransaction()
|
||||
it, _ := cayley.StartPath(store, "locked").In("locked").Save("locked_until", "locked_until").Save("locked_by", "locked_by").BuildIterator().Optimize()
|
||||
defer it.Close()
|
||||
for cayley.RawNext(it) {
|
||||
tags := make(map[string]graph.Value)
|
||||
it.TagResults(tags)
|
||||
|
||||
n := store.NameOf(it.Result())
|
||||
t := store.NameOf(tags["locked_until"])
|
||||
o := store.NameOf(tags["locked_by"])
|
||||
tt, _ := strconv.ParseInt(t, 10, 64)
|
||||
|
||||
if now.Unix() > tt {
|
||||
log.Debugf("Lock %s owned by %s has expired.", n, o)
|
||||
tr.RemoveQuad(cayley.Quad(n, "locked", "locked", ""))
|
||||
tr.RemoveQuad(cayley.Quad(n, "locked_until", t, ""))
|
||||
tr.RemoveQuad(cayley.Quad(n, "locked_by", o, ""))
|
||||
}
|
||||
}
|
||||
store.ApplyTransaction(tr)
|
||||
}
|
||||
|
||||
// getLockedNodes returns every nodes that are currently locked
|
||||
func getLockedNodes() *path.Path {
|
||||
return cayley.StartPath(store, "locked").In("locked")
|
||||
}
|
56
database/lock_test.go
Normal file
56
database/lock_test.go
Normal file
@ -0,0 +1,56 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestLock(t *testing.T) {
|
||||
Open("memstore", "")
|
||||
defer Close()
|
||||
|
||||
var l bool
|
||||
var et time.Time
|
||||
|
||||
// Create a first lock
|
||||
l, _ = Lock("test1", time.Minute, "owner1")
|
||||
assert.True(t, l)
|
||||
// Try to lock the same lock with another owner
|
||||
l, _ = Lock("test1", time.Minute, "owner2")
|
||||
assert.False(t, l)
|
||||
// Renew the lock
|
||||
l, _ = Lock("test1", time.Minute, "owner1")
|
||||
assert.True(t, l)
|
||||
// Unlock and then relock by someone else
|
||||
Unlock("test1", "owner1")
|
||||
l, et = Lock("test1", time.Minute, "owner2")
|
||||
assert.True(t, l)
|
||||
// LockInfo
|
||||
o, et2, err := LockInfo("test1")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "owner2", o)
|
||||
assert.Equal(t, et.Second(), et2.Second())
|
||||
|
||||
// Create a second lock which is actually already expired ...
|
||||
l, _ = Lock("test2", -time.Minute, "owner1")
|
||||
assert.True(t, l)
|
||||
// Take over the lock
|
||||
l, _ = Lock("test2", time.Minute, "owner2")
|
||||
assert.True(t, l)
|
||||
}
|
402
database/notification.go
Normal file
402
database/notification.go
Normal file
@ -0,0 +1,402 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
|
||||
"github.com/coreos/clair/utils"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
"github.com/google/cayley"
|
||||
"github.com/google/cayley/graph"
|
||||
"github.com/pborman/uuid"
|
||||
)
|
||||
|
||||
// maxNotifications is the number of notifications that InsertNotifications
|
||||
// will accept at the same time. Above this number, notifications are ignored.
|
||||
const maxNotifications = 100
|
||||
|
||||
// A Notification defines an interface to a message that can be sent by a
|
||||
// notifier.Notifier.
|
||||
// A NotificationWrapper has to be used to convert it into a NotificationWrap,
|
||||
// which can be stored in the database.
|
||||
type Notification interface {
|
||||
// GetName returns the explicit (humanly meaningful) name of a notification.
|
||||
GetName() string
|
||||
// GetType returns the type of a notification, which is used by a
|
||||
// NotificationWrapper to determine the concrete type of a Notification.
|
||||
GetType() string
|
||||
// GetContent returns the content of the notification.
|
||||
GetContent() (interface{}, error)
|
||||
}
|
||||
|
||||
// NotificationWrapper is an interface defined how to convert a Notification to
|
||||
// a NotificationWrap object and vice-versa.
|
||||
type NotificationWrapper interface {
|
||||
// Wrap packs a Notification instance into a new NotificationWrap.
|
||||
Wrap(n Notification) (*NotificationWrap, error)
|
||||
// Unwrap unpacks an instance of NotificationWrap into a new Notification.
|
||||
Unwrap(nw *NotificationWrap) (Notification, error)
|
||||
}
|
||||
|
||||
// A NotificationWrap wraps a Notification into something that can be stored in
|
||||
// the database. A NotificationWrapper has to be used to convert it into a
|
||||
// Notification.
|
||||
type NotificationWrap struct {
|
||||
Type string
|
||||
Data string
|
||||
}
|
||||
|
||||
// DefaultWrapper is an implementation of NotificationWrapper that supports
|
||||
// NewVulnerabilityNotification notifications.
|
||||
type DefaultWrapper struct{}
|
||||
|
||||
func (w *DefaultWrapper) Wrap(n Notification) (*NotificationWrap, error) {
|
||||
data, err := json.Marshal(n)
|
||||
if err != nil {
|
||||
log.Warningf("could not marshal notification [ID: %s, Type: %s]: %s", n.GetName(), n.GetType(), err)
|
||||
return nil, cerrors.NewBadRequestError("could not marshal notification with DefaultWrapper")
|
||||
}
|
||||
|
||||
return &NotificationWrap{Type: n.GetType(), Data: string(data)}, nil
|
||||
}
|
||||
|
||||
func (w *DefaultWrapper) Unwrap(nw *NotificationWrap) (Notification, error) {
|
||||
var v Notification
|
||||
|
||||
// Create struct depending on the type
|
||||
switch nw.Type {
|
||||
case "NewVulnerabilityNotification":
|
||||
v = &NewVulnerabilityNotification{}
|
||||
case "VulnerabilityPriorityIncreasedNotification":
|
||||
v = &VulnerabilityPriorityIncreasedNotification{}
|
||||
case "VulnerabilityPackageChangedNotification":
|
||||
v = &VulnerabilityPackageChangedNotification{}
|
||||
default:
|
||||
log.Warningf("could not unwrap notification [Type: %s]: unknown type for DefaultWrapper", nw.Type)
|
||||
return nil, cerrors.NewBadRequestError("could not unwrap notification")
|
||||
}
|
||||
|
||||
// Unmarshal notification
|
||||
err := json.Unmarshal([]byte(nw.Data), v)
|
||||
if err != nil {
|
||||
log.Warningf("could not unmarshal notification with DefaultWrapper [Type: %s]: %s", nw.Type, err)
|
||||
return nil, cerrors.NewBadRequestError("could not unmarshal notification")
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// GetDefaultNotificationWrapper returns the default wrapper
|
||||
func GetDefaultNotificationWrapper() NotificationWrapper {
|
||||
return &DefaultWrapper{}
|
||||
}
|
||||
|
||||
// A NewVulnerabilityNotification is a notification that informs about a new
|
||||
// vulnerability and contains all the layers that introduce that vulnerability
|
||||
type NewVulnerabilityNotification struct {
|
||||
VulnerabilityID string
|
||||
}
|
||||
|
||||
func (n *NewVulnerabilityNotification) GetName() string {
|
||||
return n.VulnerabilityID
|
||||
}
|
||||
|
||||
func (n *NewVulnerabilityNotification) GetType() string {
|
||||
return "NewVulnerabilityNotification"
|
||||
}
|
||||
|
||||
func (n *NewVulnerabilityNotification) GetContent() (interface{}, error) {
|
||||
// This notification is about a new vulnerability
|
||||
// Returns the list of layers that introduce this vulnerability
|
||||
|
||||
// Find vulnerability.
|
||||
vulnerability, err := FindOneVulnerability(n.VulnerabilityID, []string{FieldVulnerabilityID, FieldVulnerabilityLink, FieldVulnerabilityPriority, FieldVulnerabilityDescription, FieldVulnerabilityFixedIn})
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
abstractVulnerability, err := vulnerability.ToAbstractVulnerability()
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
|
||||
layers, err := FindAllLayersIntroducingVulnerability(n.VulnerabilityID, []string{FieldLayerID})
|
||||
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
|
||||
layersIDs := []string{} // empty slice, not null
|
||||
for _, l := range layers {
|
||||
layersIDs = append(layersIDs, l.ID)
|
||||
}
|
||||
|
||||
return struct {
|
||||
Vulnerability *AbstractVulnerability
|
||||
IntroducingLayersIDs []string
|
||||
}{
|
||||
Vulnerability: abstractVulnerability,
|
||||
IntroducingLayersIDs: layersIDs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// A VulnerabilityPriorityIncreasedNotification is a notification that informs
|
||||
// about the fact that the priority of a vulnerability increased
|
||||
// vulnerability and contains all the layers that introduce that vulnerability.
|
||||
type VulnerabilityPriorityIncreasedNotification struct {
|
||||
VulnerabilityID string
|
||||
OldPriority, NewPriority types.Priority
|
||||
}
|
||||
|
||||
func (n *VulnerabilityPriorityIncreasedNotification) GetName() string {
|
||||
return n.VulnerabilityID
|
||||
}
|
||||
|
||||
func (n *VulnerabilityPriorityIncreasedNotification) GetType() string {
|
||||
return "VulnerabilityPriorityIncreasedNotification"
|
||||
}
|
||||
|
||||
func (n *VulnerabilityPriorityIncreasedNotification) GetContent() (interface{}, error) {
|
||||
// Returns the list of layers that introduce this vulnerability
|
||||
// And both the old and new priorities
|
||||
|
||||
// Find vulnerability.
|
||||
vulnerability, err := FindOneVulnerability(n.VulnerabilityID, []string{FieldVulnerabilityID, FieldVulnerabilityLink, FieldVulnerabilityPriority, FieldVulnerabilityDescription, FieldVulnerabilityFixedIn})
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
abstractVulnerability, err := vulnerability.ToAbstractVulnerability()
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
|
||||
layers, err := FindAllLayersIntroducingVulnerability(n.VulnerabilityID, []string{FieldLayerID})
|
||||
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
|
||||
layersIDs := []string{} // empty slice, not null
|
||||
for _, l := range layers {
|
||||
layersIDs = append(layersIDs, l.ID)
|
||||
}
|
||||
|
||||
return struct {
|
||||
Vulnerability *AbstractVulnerability
|
||||
OldPriority, NewPriority types.Priority
|
||||
IntroducingLayersIDs []string
|
||||
}{
|
||||
Vulnerability: abstractVulnerability,
|
||||
OldPriority: n.OldPriority,
|
||||
NewPriority: n.NewPriority,
|
||||
IntroducingLayersIDs: layersIDs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// A VulnerabilityPackageChangedNotification is a notification that informs that
|
||||
// an existing vulnerability's fixed package list has been updated and may not
|
||||
// affect some layers anymore or may affect new layers.
|
||||
type VulnerabilityPackageChangedNotification struct {
|
||||
VulnerabilityID string
|
||||
AddedFixedInNodes, RemovedFixedInNodes []string
|
||||
}
|
||||
|
||||
func (n *VulnerabilityPackageChangedNotification) GetName() string {
|
||||
return n.VulnerabilityID
|
||||
}
|
||||
|
||||
func (n *VulnerabilityPackageChangedNotification) GetType() string {
|
||||
return "VulnerabilityPackageChangedNotification"
|
||||
}
|
||||
|
||||
func (n *VulnerabilityPackageChangedNotification) GetContent() (interface{}, error) {
|
||||
// Returns the removed and added packages as well as the layers that
|
||||
// introduced the vulnerability in the past but don't anymore because of the
|
||||
// removed packages and the layers that now introduce the vulnerability
|
||||
// because of the added packages
|
||||
|
||||
// Find vulnerability.
|
||||
vulnerability, err := FindOneVulnerability(n.VulnerabilityID, []string{FieldVulnerabilityID, FieldVulnerabilityLink, FieldVulnerabilityPriority, FieldVulnerabilityDescription, FieldVulnerabilityFixedIn})
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
abstractVulnerability, err := vulnerability.ToAbstractVulnerability()
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
|
||||
// First part of the answer : added/removed packages
|
||||
addedPackages, err := FindAllPackagesByNodes(n.AddedFixedInNodes, []string{FieldPackageOS, FieldPackageName, FieldPackageVersion, FieldPackagePreviousVersion})
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
removedPackages, err := FindAllPackagesByNodes(n.RemovedFixedInNodes, []string{FieldPackageOS, FieldPackageName, FieldPackageVersion, FieldPackagePreviousVersion})
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
|
||||
// Second part of the answer
|
||||
var addedPackagesPreviousVersions []string
|
||||
for _, pkg := range addedPackages {
|
||||
previousVersions, err := pkg.PreviousVersions([]string{})
|
||||
if err != nil {
|
||||
return []*Layer{}, err
|
||||
}
|
||||
for _, version := range previousVersions {
|
||||
addedPackagesPreviousVersions = append(addedPackagesPreviousVersions, version.Node)
|
||||
}
|
||||
}
|
||||
var removedPackagesPreviousVersions []string
|
||||
for _, pkg := range removedPackages {
|
||||
previousVersions, err := pkg.PreviousVersions([]string{})
|
||||
if err != nil {
|
||||
return []*Layer{}, err
|
||||
}
|
||||
for _, version := range previousVersions {
|
||||
removedPackagesPreviousVersions = append(removedPackagesPreviousVersions, version.Node)
|
||||
}
|
||||
}
|
||||
|
||||
newIntroducingLayers, err := FindAllLayersByAddedPackageNodes(addedPackagesPreviousVersions, []string{FieldLayerID})
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
formerIntroducingLayers, err := FindAllLayersByAddedPackageNodes(removedPackagesPreviousVersions, []string{FieldLayerID})
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
|
||||
newIntroducingLayersIDs := []string{} // empty slice, not null
|
||||
for _, l := range newIntroducingLayers {
|
||||
newIntroducingLayersIDs = append(newIntroducingLayersIDs, l.ID)
|
||||
}
|
||||
formerIntroducingLayersIDs := []string{} // empty slice, not null
|
||||
for _, l := range formerIntroducingLayers {
|
||||
formerIntroducingLayersIDs = append(formerIntroducingLayersIDs, l.ID)
|
||||
}
|
||||
|
||||
// Remove layers which appears both in new and former lists (eg. case of updated packages but still vulnerable)
|
||||
filteredNewIntroducingLayersIDs := utils.CompareStringLists(newIntroducingLayersIDs, formerIntroducingLayersIDs)
|
||||
filteredFormerIntroducingLayersIDs := utils.CompareStringLists(formerIntroducingLayersIDs, newIntroducingLayersIDs)
|
||||
|
||||
return struct {
|
||||
Vulnerability *AbstractVulnerability
|
||||
AddedAffectedPackages, RemovedAffectedPackages []*AbstractPackage
|
||||
NewIntroducingLayersIDs, FormerIntroducingLayerIDs []string
|
||||
}{
|
||||
Vulnerability: abstractVulnerability,
|
||||
AddedAffectedPackages: PackagesToAbstractPackages(addedPackages),
|
||||
RemovedAffectedPackages: PackagesToAbstractPackages(removedPackages),
|
||||
NewIntroducingLayersIDs: filteredNewIntroducingLayersIDs,
|
||||
FormerIntroducingLayerIDs: filteredFormerIntroducingLayersIDs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// InsertNotifications stores multiple Notification in the database
|
||||
// It uses the given NotificationWrapper to convert these notifications to
|
||||
// something that can be stored in the database.
|
||||
func InsertNotifications(notifications []Notification, wrapper NotificationWrapper) error {
|
||||
if len(notifications) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do not send notifications if there are too many of them (first update for example)
|
||||
if len(notifications) > maxNotifications {
|
||||
log.Noticef("Ignoring %d notifications", len(notifications))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Initialize transaction
|
||||
t := cayley.NewTransaction()
|
||||
|
||||
// Iterate over all the vulnerabilities we need to insert
|
||||
for _, notification := range notifications {
|
||||
// Wrap notification
|
||||
wrappedNotification, err := wrapper.Wrap(notification)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
node := "notification:" + uuid.New()
|
||||
t.AddQuad(cayley.Quad(node, FieldIs, "notification", ""))
|
||||
t.AddQuad(cayley.Quad(node, "type", wrappedNotification.Type, ""))
|
||||
t.AddQuad(cayley.Quad(node, "data", wrappedNotification.Data, ""))
|
||||
t.AddQuad(cayley.Quad(node, "isSent", strconv.FormatBool(false), ""))
|
||||
}
|
||||
|
||||
// Apply transaction
|
||||
if err := store.ApplyTransaction(t); err != nil {
|
||||
log.Errorf("failed transaction (InsertNotifications): %s", err)
|
||||
return ErrTransaction
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FindOneNotificationToSend finds and returns a notification that is not sent
|
||||
// yet and not locked. Returns nil if there is none.
|
||||
func FindOneNotificationToSend(wrapper NotificationWrapper) (string, Notification, error) {
|
||||
it, _ := cayley.StartPath(store, "notification").In(FieldIs).Has("isSent", strconv.FormatBool(false)).Except(getLockedNodes()).Save("type", "type").Save("data", "data").BuildIterator().Optimize()
|
||||
defer it.Close()
|
||||
for cayley.RawNext(it) {
|
||||
tags := make(map[string]graph.Value)
|
||||
it.TagResults(tags)
|
||||
|
||||
notification, err := wrapper.Unwrap(&NotificationWrap{Type: store.NameOf(tags["type"]), Data: store.NameOf(tags["data"])})
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
return store.NameOf(it.Result()), notification, nil
|
||||
}
|
||||
if it.Err() != nil {
|
||||
log.Errorf("failed query in FindOneNotificationToSend: %s", it.Err())
|
||||
return "", nil, ErrBackendException
|
||||
}
|
||||
|
||||
return "", nil, nil
|
||||
}
|
||||
|
||||
// CountNotificationsToSend returns the number of pending notifications
|
||||
// Note that it also count the locked notifications.
|
||||
func CountNotificationsToSend() (int, error) {
|
||||
c := 0
|
||||
|
||||
it, _ := cayley.StartPath(store, "notification").In(FieldIs).Has("isSent", strconv.FormatBool(false)).BuildIterator().Optimize()
|
||||
defer it.Close()
|
||||
for cayley.RawNext(it) {
|
||||
c = c + 1
|
||||
}
|
||||
if it.Err() != nil {
|
||||
log.Errorf("failed query in CountNotificationsToSend: %s", it.Err())
|
||||
return 0, ErrBackendException
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// MarkNotificationAsSent marks a notification as sent.
|
||||
func MarkNotificationAsSent(node string) {
|
||||
// Initialize transaction
|
||||
t := cayley.NewTransaction()
|
||||
|
||||
t.RemoveQuad(cayley.Quad(node, "isSent", strconv.FormatBool(false), ""))
|
||||
t.AddQuad(cayley.Quad(node, "isSent", strconv.FormatBool(true), ""))
|
||||
|
||||
// Apply transaction
|
||||
store.ApplyTransaction(t)
|
||||
}
|
144
database/notification_test.go
Normal file
144
database/notification_test.go
Normal file
@ -0,0 +1,144 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type TestWrapper struct{}
|
||||
|
||||
func (w *TestWrapper) Wrap(n Notification) (*NotificationWrap, error) {
|
||||
data, err := json.Marshal(n)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &NotificationWrap{Type: n.GetType(), Data: string(data)}, nil
|
||||
}
|
||||
|
||||
func (w *TestWrapper) Unwrap(nw *NotificationWrap) (Notification, error) {
|
||||
var v Notification
|
||||
|
||||
switch nw.Type {
|
||||
case "ntest1":
|
||||
v = &NotificationTest1{}
|
||||
case "ntest2":
|
||||
v = &NotificationTest2{}
|
||||
default:
|
||||
return nil, fmt.Errorf("Could not Unwrap NotificationWrapper [Type: %s, Data: %s]: Unknown notification type.", nw.Type, nw.Data)
|
||||
}
|
||||
|
||||
err := json.Unmarshal([]byte(nw.Data), v)
|
||||
return v, err
|
||||
}
|
||||
|
||||
type NotificationTest1 struct {
|
||||
Test1 string
|
||||
}
|
||||
|
||||
func (n NotificationTest1) GetName() string {
|
||||
return n.Test1
|
||||
}
|
||||
|
||||
func (n NotificationTest1) GetType() string {
|
||||
return "ntest1"
|
||||
}
|
||||
|
||||
func (n NotificationTest1) GetContent() (interface{}, error) {
|
||||
return struct{ Test1 string }{Test1: n.Test1}, nil
|
||||
}
|
||||
|
||||
type NotificationTest2 struct {
|
||||
Test2 string
|
||||
}
|
||||
|
||||
func (n NotificationTest2) GetName() string {
|
||||
return n.Test2
|
||||
}
|
||||
|
||||
func (n NotificationTest2) GetType() string {
|
||||
return "ntest2"
|
||||
}
|
||||
|
||||
func (n NotificationTest2) GetContent() (interface{}, error) {
|
||||
return struct{ Test2 string }{Test2: n.Test2}, nil
|
||||
}
|
||||
|
||||
func TestNotification(t *testing.T) {
|
||||
Open("memstore", "")
|
||||
defer Close()
|
||||
|
||||
wrapper := &TestWrapper{}
|
||||
|
||||
// Insert two notifications of different types
|
||||
n1 := &NotificationTest1{Test1: "test1"}
|
||||
n2 := &NotificationTest2{Test2: "test2"}
|
||||
err := InsertNotifications([]Notification{n1, n2}, &TestWrapper{})
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Count notifications to send
|
||||
c, err := CountNotificationsToSend()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 2, c)
|
||||
|
||||
foundN1 := false
|
||||
foundN2 := false
|
||||
|
||||
// Select the first one
|
||||
node, n, err := FindOneNotificationToSend(wrapper)
|
||||
assert.Nil(t, err)
|
||||
if assert.NotNil(t, n) {
|
||||
if reflect.DeepEqual(n1, n) {
|
||||
foundN1 = true
|
||||
} else if reflect.DeepEqual(n2, n) {
|
||||
foundN2 = true
|
||||
} else {
|
||||
assert.Fail(t, "did not find any expected notification")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Mark the first one as sent
|
||||
MarkNotificationAsSent(node)
|
||||
|
||||
// Count notifications to send
|
||||
c, err = CountNotificationsToSend()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 1, c)
|
||||
|
||||
// Select again
|
||||
node, n, err = FindOneNotificationToSend(wrapper)
|
||||
assert.Nil(t, err)
|
||||
if foundN1 {
|
||||
assert.Equal(t, n2, n)
|
||||
} else if foundN2 {
|
||||
assert.Equal(t, n1, n)
|
||||
}
|
||||
|
||||
// Lock the second one
|
||||
Lock(node, time.Minute, "TestNotification")
|
||||
|
||||
// Select again
|
||||
_, n, err = FindOneNotificationToSend(wrapper)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, nil, n)
|
||||
}
|
44
database/os_mapping.go
Normal file
44
database/os_mapping.go
Normal file
@ -0,0 +1,44 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
// DebianReleasesMapping translates Debian code names and class names to version numbers
|
||||
// TODO That should probably be stored in the database or in a file
|
||||
var DebianReleasesMapping = map[string]string{
|
||||
// Code names
|
||||
"squeeze": "6",
|
||||
"wheezy": "7",
|
||||
"jessie": "8",
|
||||
"stretch": "9",
|
||||
"sid": "unstable",
|
||||
|
||||
// Class names
|
||||
"oldstable": "7",
|
||||
"stable": "8",
|
||||
"testing": "9",
|
||||
"unstable": "unstable",
|
||||
}
|
||||
|
||||
// UbuntuReleasesMapping translates Ubuntu code names to version numbers
|
||||
// TODO That should probably be stored in the database or in a file
|
||||
var UbuntuReleasesMapping = map[string]string{
|
||||
"precise": "12.04",
|
||||
"quantal": "12.10",
|
||||
"raring": "13.04",
|
||||
"trusty": "14.04",
|
||||
"utopic": "14.10",
|
||||
"vivid": "15.04",
|
||||
"wily": "15.10",
|
||||
}
|
485
database/package.go
Normal file
485
database/package.go
Normal file
@ -0,0 +1,485 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/coreos/clair/utils"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
"github.com/google/cayley"
|
||||
"github.com/google/cayley/graph"
|
||||
"github.com/google/cayley/graph/path"
|
||||
)
|
||||
|
||||
const (
|
||||
FieldPackageIsValue = "package"
|
||||
FieldPackageOS = "os"
|
||||
FieldPackageName = "name"
|
||||
FieldPackageVersion = "version"
|
||||
FieldPackageNextVersion = "nextVersion"
|
||||
FieldPackagePreviousVersion = "previousVersion"
|
||||
|
||||
insertPackagesBatchSize = 5
|
||||
)
|
||||
|
||||
var FieldPackageAll = []string{FieldPackageOS, FieldPackageName, FieldPackageVersion, FieldPackageNextVersion, FieldPackagePreviousVersion}
|
||||
|
||||
// Package represents a package
|
||||
type Package struct {
|
||||
Node string `json:"-"`
|
||||
OS string
|
||||
Name string
|
||||
Version types.Version
|
||||
NextVersionNode string `json:"-"`
|
||||
PreviousVersionNode string `json:"-"`
|
||||
}
|
||||
|
||||
// GetNode returns an unique identifier for the graph node
|
||||
// Requires the key fields: OS, Name, Version
|
||||
func (p *Package) GetNode() string {
|
||||
return FieldPackageIsValue + ":" + utils.Hash(p.Key())
|
||||
}
|
||||
|
||||
// Key returns an unique string defining p
|
||||
// Requires the key fields: OS, Name, Version
|
||||
func (p *Package) Key() string {
|
||||
return p.OS + ":" + p.Name + ":" + p.Version.String()
|
||||
}
|
||||
|
||||
// Branch returns an unique string defined the Branch of p (os, name)
|
||||
// Requires the key fields: OS, Name
|
||||
func (p *Package) Branch() string {
|
||||
return p.OS + ":" + p.Name
|
||||
}
|
||||
|
||||
// AbstractPackage is a package that abstract types.MaxVersion by modifying
|
||||
// using a AllVersion boolean field and renaming Version to BeforeVersion
|
||||
// which makes more sense for an usage with a Vulnerability
|
||||
type AbstractPackage struct {
|
||||
OS string
|
||||
Name string
|
||||
|
||||
AllVersions bool
|
||||
BeforeVersion types.Version
|
||||
}
|
||||
|
||||
// PackagesToAbstractPackages converts several Packages to AbstractPackages
|
||||
func PackagesToAbstractPackages(packages []*Package) (abstractPackages []*AbstractPackage) {
|
||||
for _, p := range packages {
|
||||
ap := &AbstractPackage{OS: p.OS, Name: p.Name}
|
||||
if p.Version != types.MaxVersion {
|
||||
ap.BeforeVersion = p.Version
|
||||
} else {
|
||||
ap.AllVersions = true
|
||||
}
|
||||
abstractPackages = append(abstractPackages, ap)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AbstractPackagesToPackages converts several AbstractPackages to Packages
|
||||
func AbstractPackagesToPackages(abstractPackages []*AbstractPackage) (packages []*Package) {
|
||||
for _, ap := range abstractPackages {
|
||||
p := &Package{OS: ap.OS, Name: ap.Name}
|
||||
if ap.AllVersions {
|
||||
p.Version = types.MaxVersion
|
||||
} else {
|
||||
p.Version = ap.BeforeVersion
|
||||
}
|
||||
packages = append(packages, p)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// InsertPackages inserts several packages in the database in one transaction
|
||||
// Packages are stored in linked lists, one per Branch. Each linked list has a start package and an end package defined with types.MinVersion/types.MaxVersion versions
|
||||
//
|
||||
// OS, Name and Version fields have to be specified.
|
||||
// If the insertion is successfull, the Node field is filled and represents the graph node identifier.
|
||||
func InsertPackages(packageParameters []*Package) error {
|
||||
if len(packageParameters) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify parameters
|
||||
for _, pkg := range packageParameters {
|
||||
if pkg.OS == "" || pkg.Name == "" || pkg.Version.String() == "" {
|
||||
log.Warningf("could not insert an incomplete package [OS: %s, Name: %s, Version: %s]", pkg.OS, pkg.Name, pkg.Version)
|
||||
return cerrors.NewBadRequestError("could not insert an incomplete package")
|
||||
}
|
||||
}
|
||||
|
||||
// Create required data structures
|
||||
t := cayley.NewTransaction()
|
||||
packagesInTransaction := 0
|
||||
cachedPackagesByBranch := make(map[string]map[string]*Package)
|
||||
|
||||
// Iterate over all the packages we need to insert
|
||||
for _, packageParameter := range packageParameters {
|
||||
branch := packageParameter.Branch()
|
||||
|
||||
// Is the package already existing ?
|
||||
if _, branchExistsLocally := cachedPackagesByBranch[branch]; branchExistsLocally {
|
||||
if pkg, _ := cachedPackagesByBranch[branch][packageParameter.Key()]; pkg != nil {
|
||||
packageParameter.Node = pkg.Node
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
cachedPackagesByBranch[branch] = make(map[string]*Package)
|
||||
}
|
||||
pkg, err := FindOnePackage(packageParameter.OS, packageParameter.Name, packageParameter.Version, []string{})
|
||||
if err != nil && err != cerrors.ErrNotFound {
|
||||
return err
|
||||
}
|
||||
if pkg != nil {
|
||||
packageParameter.Node = pkg.Node
|
||||
continue
|
||||
}
|
||||
|
||||
// Get all packages of the same branch (both from local cache and database)
|
||||
branchPackages, err := FindAllPackagesByBranch(packageParameter.OS, packageParameter.Name, []string{FieldPackageOS, FieldPackageName, FieldPackageVersion, FieldPackageNextVersion})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, p := range cachedPackagesByBranch[branch] {
|
||||
branchPackages = append(branchPackages, p)
|
||||
}
|
||||
|
||||
if len(branchPackages) == 0 {
|
||||
// The branch does not exist yet
|
||||
insertingStartPackage := packageParameter.Version == types.MinVersion
|
||||
insertingEndPackage := packageParameter.Version == types.MaxVersion
|
||||
|
||||
// Create and insert a end package
|
||||
endPackage := &Package{
|
||||
OS: packageParameter.OS,
|
||||
Name: packageParameter.Name,
|
||||
Version: types.MaxVersion,
|
||||
}
|
||||
endPackage.Node = endPackage.GetNode()
|
||||
cachedPackagesByBranch[branch][endPackage.Key()] = endPackage
|
||||
|
||||
t.AddQuad(cayley.Quad(endPackage.Node, FieldIs, FieldPackageIsValue, ""))
|
||||
t.AddQuad(cayley.Quad(endPackage.Node, FieldPackageOS, endPackage.OS, ""))
|
||||
t.AddQuad(cayley.Quad(endPackage.Node, FieldPackageName, endPackage.Name, ""))
|
||||
t.AddQuad(cayley.Quad(endPackage.Node, FieldPackageVersion, endPackage.Version.String(), ""))
|
||||
t.AddQuad(cayley.Quad(endPackage.Node, FieldPackageNextVersion, "", ""))
|
||||
|
||||
// Create the inserted package if it is different than a start/end package
|
||||
var newPackage *Package
|
||||
if !insertingStartPackage && !insertingEndPackage {
|
||||
newPackage = &Package{
|
||||
OS: packageParameter.OS,
|
||||
Name: packageParameter.Name,
|
||||
Version: packageParameter.Version,
|
||||
}
|
||||
newPackage.Node = newPackage.GetNode()
|
||||
cachedPackagesByBranch[branch][newPackage.Key()] = newPackage
|
||||
|
||||
t.AddQuad(cayley.Quad(newPackage.Node, FieldIs, FieldPackageIsValue, ""))
|
||||
t.AddQuad(cayley.Quad(newPackage.Node, FieldPackageOS, newPackage.OS, ""))
|
||||
t.AddQuad(cayley.Quad(newPackage.Node, FieldPackageName, newPackage.Name, ""))
|
||||
t.AddQuad(cayley.Quad(newPackage.Node, FieldPackageVersion, newPackage.Version.String(), ""))
|
||||
t.AddQuad(cayley.Quad(newPackage.Node, FieldPackageNextVersion, endPackage.Node, ""))
|
||||
|
||||
packageParameter.Node = newPackage.Node
|
||||
}
|
||||
|
||||
// Create and insert a start package
|
||||
startPackage := &Package{
|
||||
OS: packageParameter.OS,
|
||||
Name: packageParameter.Name,
|
||||
Version: types.MinVersion,
|
||||
}
|
||||
startPackage.Node = startPackage.GetNode()
|
||||
cachedPackagesByBranch[branch][startPackage.Key()] = startPackage
|
||||
|
||||
t.AddQuad(cayley.Quad(startPackage.Node, FieldIs, FieldPackageIsValue, ""))
|
||||
t.AddQuad(cayley.Quad(startPackage.Node, FieldPackageOS, startPackage.OS, ""))
|
||||
t.AddQuad(cayley.Quad(startPackage.Node, FieldPackageName, startPackage.Name, ""))
|
||||
t.AddQuad(cayley.Quad(startPackage.Node, FieldPackageVersion, startPackage.Version.String(), ""))
|
||||
if !insertingStartPackage && !insertingEndPackage {
|
||||
t.AddQuad(cayley.Quad(startPackage.Node, FieldPackageNextVersion, newPackage.Node, ""))
|
||||
} else {
|
||||
t.AddQuad(cayley.Quad(startPackage.Node, FieldPackageNextVersion, endPackage.Node, ""))
|
||||
}
|
||||
|
||||
// Set package node
|
||||
if insertingEndPackage {
|
||||
packageParameter.Node = endPackage.Node
|
||||
} else if insertingStartPackage {
|
||||
packageParameter.Node = startPackage.Node
|
||||
}
|
||||
} else {
|
||||
// The branch already exists
|
||||
|
||||
// Create the package
|
||||
newPackage := &Package{OS: packageParameter.OS, Name: packageParameter.Name, Version: packageParameter.Version}
|
||||
newPackage.Node = "package:" + utils.Hash(newPackage.Key())
|
||||
cachedPackagesByBranch[branch][newPackage.Key()] = newPackage
|
||||
packageParameter.Node = newPackage.Node
|
||||
|
||||
t.AddQuad(cayley.Quad(newPackage.Node, FieldIs, FieldPackageIsValue, ""))
|
||||
t.AddQuad(cayley.Quad(newPackage.Node, FieldPackageOS, newPackage.OS, ""))
|
||||
t.AddQuad(cayley.Quad(newPackage.Node, FieldPackageName, newPackage.Name, ""))
|
||||
t.AddQuad(cayley.Quad(newPackage.Node, FieldPackageVersion, newPackage.Version.String(), ""))
|
||||
|
||||
// Sort branchPackages by version (including the new package)
|
||||
branchPackages = append(branchPackages, newPackage)
|
||||
sort.Sort(ByVersion(branchPackages))
|
||||
|
||||
// Find my prec/succ GraphID in the sorted slice now
|
||||
newPackageKey := newPackage.Key()
|
||||
var pred, succ *Package
|
||||
var found bool
|
||||
for _, p := range branchPackages {
|
||||
equal := p.Key() == newPackageKey
|
||||
if !equal && !found {
|
||||
pred = p
|
||||
} else if found {
|
||||
succ = p
|
||||
break
|
||||
} else if equal {
|
||||
found = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
if pred == nil || succ == nil {
|
||||
log.Warningf("could not find any package predecessor/successor of: [OS: %s, Name: %s, Version: %s].", packageParameter.OS, packageParameter.Name, packageParameter.Version)
|
||||
return cerrors.NewBadRequestError("could not find package predecessor/successor")
|
||||
}
|
||||
|
||||
// Link the new packages with the branch
|
||||
t.RemoveQuad(cayley.Quad(pred.Node, FieldPackageNextVersion, succ.Node, ""))
|
||||
|
||||
pred.NextVersionNode = newPackage.Node
|
||||
t.AddQuad(cayley.Quad(pred.Node, FieldPackageNextVersion, newPackage.Node, ""))
|
||||
|
||||
newPackage.NextVersionNode = succ.Node
|
||||
t.AddQuad(cayley.Quad(newPackage.Node, FieldPackageNextVersion, succ.Node, ""))
|
||||
}
|
||||
|
||||
packagesInTransaction = packagesInTransaction + 1
|
||||
|
||||
// Apply transaction
|
||||
if packagesInTransaction >= insertPackagesBatchSize {
|
||||
if err := store.ApplyTransaction(t); err != nil {
|
||||
log.Errorf("failed transaction (InsertPackages): %s", err)
|
||||
return ErrTransaction
|
||||
}
|
||||
|
||||
t = cayley.NewTransaction()
|
||||
cachedPackagesByBranch = make(map[string]map[string]*Package)
|
||||
packagesInTransaction = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Apply transaction
|
||||
if packagesInTransaction > 0 {
|
||||
if err := store.ApplyTransaction(t); err != nil {
|
||||
log.Errorf("failed transaction (InsertPackages): %s", err)
|
||||
return ErrTransaction
|
||||
}
|
||||
}
|
||||
|
||||
// Return
|
||||
return nil
|
||||
}
|
||||
|
||||
// FindOnePackage finds and returns a single package having the given OS, name and version, selecting the specified fields
|
||||
func FindOnePackage(OS, name string, version types.Version, selectedFields []string) (*Package, error) {
|
||||
packageParameter := Package{OS: OS, Name: name, Version: version}
|
||||
p, err := toPackages(cayley.StartPath(store, packageParameter.GetNode()).Has(FieldIs, FieldPackageIsValue), selectedFields)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(p) == 1 {
|
||||
return p[0], nil
|
||||
}
|
||||
if len(p) > 1 {
|
||||
log.Errorf("found multiple packages with identical data [OS: %s, Name: %s, Version: %s]", OS, name, version)
|
||||
return nil, ErrInconsistent
|
||||
}
|
||||
return nil, cerrors.ErrNotFound
|
||||
}
|
||||
|
||||
// FindAllPackagesByNodes finds and returns all packages given by their nodes, selecting the specified fields
|
||||
func FindAllPackagesByNodes(nodes []string, selectedFields []string) ([]*Package, error) {
|
||||
if len(nodes) == 0 {
|
||||
log.Warning("could not FindAllPackagesByNodes with an empty nodes array.")
|
||||
return []*Package{}, nil
|
||||
}
|
||||
|
||||
return toPackages(cayley.StartPath(store, nodes...).Has(FieldIs, FieldPackageIsValue), selectedFields)
|
||||
}
|
||||
|
||||
// FindAllPackagesByBranch finds and returns all packages that belong to the given Branch, selecting the specified fields
|
||||
func FindAllPackagesByBranch(OS, name string, selectedFields []string) ([]*Package, error) {
|
||||
return toPackages(cayley.StartPath(store, name).In(FieldPackageName).Has(FieldPackageOS, OS), selectedFields)
|
||||
}
|
||||
|
||||
// toPackages converts a path leading to one or multiple packages to Package structs, selecting the specified fields
|
||||
func toPackages(path *path.Path, selectedFields []string) ([]*Package, error) {
|
||||
var packages []*Package
|
||||
var err error
|
||||
|
||||
saveFields(path, selectedFields, []string{FieldPackagePreviousVersion})
|
||||
it, _ := path.BuildIterator().Optimize()
|
||||
defer it.Close()
|
||||
for cayley.RawNext(it) {
|
||||
tags := make(map[string]graph.Value)
|
||||
it.TagResults(tags)
|
||||
|
||||
pkg := Package{Node: store.NameOf(it.Result())}
|
||||
for _, selectedField := range selectedFields {
|
||||
switch selectedField {
|
||||
case FieldPackageOS:
|
||||
pkg.OS = store.NameOf(tags[FieldPackageOS])
|
||||
case FieldPackageName:
|
||||
pkg.Name = store.NameOf(tags[FieldPackageName])
|
||||
case FieldPackageVersion:
|
||||
pkg.Version, err = types.NewVersion(store.NameOf(tags[FieldPackageVersion]))
|
||||
if err != nil {
|
||||
log.Warningf("could not parse version of package %s: %s", pkg.Node, err.Error())
|
||||
}
|
||||
case FieldPackageNextVersion:
|
||||
pkg.NextVersionNode = store.NameOf(tags[FieldPackageNextVersion])
|
||||
case FieldPackagePreviousVersion:
|
||||
pkg.PreviousVersionNode, err = toValue(cayley.StartPath(store, pkg.Node).In(FieldPackageNextVersion))
|
||||
if err != nil {
|
||||
log.Warningf("could not get previousVersion on package %s: %s.", pkg.Node, err.Error())
|
||||
return []*Package{}, ErrInconsistent
|
||||
}
|
||||
default:
|
||||
panic("unknown selectedField")
|
||||
}
|
||||
}
|
||||
packages = append(packages, &pkg)
|
||||
}
|
||||
if it.Err() != nil {
|
||||
log.Errorf("failed query in toPackages: %s", it.Err())
|
||||
return []*Package{}, ErrBackendException
|
||||
}
|
||||
|
||||
return packages, nil
|
||||
}
|
||||
|
||||
// NextVersion find and returns the package of the same branch that has a higher version number, selecting the specified fields
|
||||
// It requires that FieldPackageNextVersion field has been selected on p
|
||||
func (p *Package) NextVersion(selectedFields []string) (*Package, error) {
|
||||
if p.NextVersionNode == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
v, err := FindAllPackagesByNodes([]string{p.NextVersionNode}, selectedFields)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(v) != 1 {
|
||||
log.Errorf("found multiple packages when getting next version of package %s", p.Node)
|
||||
return nil, ErrInconsistent
|
||||
}
|
||||
return v[0], nil
|
||||
}
|
||||
|
||||
// NextVersions find and returns all the packages of the same branch that have
|
||||
// a higher version number, selecting the specified fields
|
||||
// It requires that FieldPackageNextVersion field has been selected on p
|
||||
// The immediate higher version is listed first, and the special end-of-Branch package is last, p is not listed
|
||||
func (p *Package) NextVersions(selectedFields []string) ([]*Package, error) {
|
||||
var nextVersions []*Package
|
||||
|
||||
if !utils.Contains(FieldPackageNextVersion, selectedFields) {
|
||||
selectedFields = append(selectedFields, FieldPackageNextVersion)
|
||||
}
|
||||
|
||||
nextVersion, err := p.NextVersion(selectedFields)
|
||||
if err != nil {
|
||||
return []*Package{}, err
|
||||
}
|
||||
if nextVersion != nil {
|
||||
nextVersions = append(nextVersions, nextVersion)
|
||||
|
||||
nextNextVersions, err := nextVersion.NextVersions(selectedFields)
|
||||
if err != nil {
|
||||
return []*Package{}, err
|
||||
}
|
||||
nextVersions = append(nextVersions, nextNextVersions...)
|
||||
}
|
||||
|
||||
return nextVersions, nil
|
||||
}
|
||||
|
||||
// PreviousVersion find and returns the package of the same branch that has an
|
||||
// immediate lower version number, selecting the specified fields
|
||||
// It requires that FieldPackagePreviousVersion field has been selected on p
|
||||
func (p *Package) PreviousVersion(selectedFields []string) (*Package, error) {
|
||||
if p.PreviousVersionNode == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
v, err := FindAllPackagesByNodes([]string{p.PreviousVersionNode}, selectedFields)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(v) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if len(v) != 1 {
|
||||
log.Errorf("found multiple packages when getting previous version of package %s", p.Node)
|
||||
return nil, ErrInconsistent
|
||||
}
|
||||
return v[0], nil
|
||||
}
|
||||
|
||||
// PreviousVersions find and returns all the packages of the same branch that
|
||||
// have a lower version number, selecting the specified fields
|
||||
// It requires that FieldPackageNextVersion field has been selected on p
|
||||
// The immediate lower version is listed first, and the special start-of-Branch
|
||||
// package is last, p is not listed
|
||||
func (p *Package) PreviousVersions(selectedFields []string) ([]*Package, error) {
|
||||
var previousVersions []*Package
|
||||
|
||||
if !utils.Contains(FieldPackagePreviousVersion, selectedFields) {
|
||||
selectedFields = append(selectedFields, FieldPackagePreviousVersion)
|
||||
}
|
||||
|
||||
previousVersion, err := p.PreviousVersion(selectedFields)
|
||||
if err != nil {
|
||||
return []*Package{}, err
|
||||
}
|
||||
if previousVersion != nil {
|
||||
previousVersions = append(previousVersions, previousVersion)
|
||||
|
||||
previousPreviousVersions, err := previousVersion.PreviousVersions(selectedFields)
|
||||
if err != nil {
|
||||
return []*Package{}, err
|
||||
}
|
||||
previousVersions = append(previousVersions, previousPreviousVersions...)
|
||||
}
|
||||
|
||||
return previousVersions, nil
|
||||
}
|
||||
|
||||
// ByVersion implements sort.Interface for []*Package based on the Version field
|
||||
// It uses github.com/quentin-m/dpkgcomp internally and makes use of types.MinVersion/types.MaxVersion
|
||||
type ByVersion []*Package
|
||||
|
||||
func (p ByVersion) Len() int { return len(p) }
|
||||
func (p ByVersion) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
func (p ByVersion) Less(i, j int) bool { return p[i].Version.Compare(p[j].Version) < 0 }
|
193
database/package_test.go
Normal file
193
database/package_test.go
Normal file
@ -0,0 +1,193 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/clair/utils/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestPackage(t *testing.T) {
|
||||
Open("memstore", "")
|
||||
defer Close()
|
||||
|
||||
// Try to insert invalid packages
|
||||
for _, invalidPkg := range []*Package{
|
||||
&Package{OS: "", Name: "testpkg1", Version: types.NewVersionUnsafe("1.0")},
|
||||
&Package{OS: "testOS", Name: "", Version: types.NewVersionUnsafe("1.0")},
|
||||
&Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("")},
|
||||
&Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("bad version")},
|
||||
&Package{OS: "", Name: "", Version: types.NewVersionUnsafe("")},
|
||||
} {
|
||||
err := InsertPackages([]*Package{invalidPkg})
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// Insert a package
|
||||
pkg1 := &Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("1.0")}
|
||||
err := InsertPackages([]*Package{pkg1})
|
||||
if assert.Nil(t, err) {
|
||||
// Find the inserted package and verify its content
|
||||
pkg1b, err := FindOnePackage(pkg1.OS, pkg1.Name, pkg1.Version, FieldPackageAll)
|
||||
if assert.Nil(t, err) && assert.NotNil(t, pkg1b) {
|
||||
assert.Equal(t, pkg1.Node, pkg1b.Node)
|
||||
assert.Equal(t, pkg1.OS, pkg1b.OS)
|
||||
assert.Equal(t, pkg1.Name, pkg1b.Name)
|
||||
assert.Equal(t, pkg1.Version, pkg1b.Version)
|
||||
}
|
||||
|
||||
// Find packages from the inserted branch and verify their content
|
||||
// (the first one should be a start package, the second one the inserted one and the third one the end package)
|
||||
pkgs1c, err := FindAllPackagesByBranch(pkg1.OS, pkg1.Name, FieldPackageAll)
|
||||
if assert.Nil(t, err) && assert.Equal(t, 3, len(pkgs1c)) {
|
||||
sort.Sort(ByVersion(pkgs1c))
|
||||
|
||||
assert.Equal(t, pkg1.OS, pkgs1c[0].OS)
|
||||
assert.Equal(t, pkg1.Name, pkgs1c[0].Name)
|
||||
assert.Equal(t, types.MinVersion, pkgs1c[0].Version)
|
||||
|
||||
assert.Equal(t, pkg1.OS, pkgs1c[1].OS)
|
||||
assert.Equal(t, pkg1.Name, pkgs1c[1].Name)
|
||||
assert.Equal(t, pkg1.Version, pkgs1c[1].Version)
|
||||
|
||||
assert.Equal(t, pkg1.OS, pkgs1c[2].OS)
|
||||
assert.Equal(t, pkg1.Name, pkgs1c[2].Name)
|
||||
assert.Equal(t, types.MaxVersion, pkgs1c[2].Version)
|
||||
}
|
||||
}
|
||||
|
||||
// Insert multiple packages in the same branch, one in another branch, insert local duplicates and database duplicates as well
|
||||
pkg2 := []*Package{
|
||||
&Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("0.8")},
|
||||
&Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("0.9")},
|
||||
&Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("1.0")}, // Already present in the database
|
||||
&Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("1.1")},
|
||||
&Package{OS: "testOS", Name: "testpkg2", Version: types.NewVersionUnsafe("1.0")}, // Another branch
|
||||
&Package{OS: "testOS", Name: "testpkg2", Version: types.NewVersionUnsafe("1.0")}, // Local duplicates
|
||||
}
|
||||
nbInSameBranch := 4 + 2 // (start/end packages)
|
||||
|
||||
err = InsertPackages(shuffle(pkg2))
|
||||
if assert.Nil(t, err) {
|
||||
// Find packages from the inserted branch, verify their order and NextVersion / PreviousVersion
|
||||
pkgs2b, err := FindAllPackagesByBranch("testOS", "testpkg1", FieldPackageAll)
|
||||
if assert.Nil(t, err) && assert.Equal(t, nbInSameBranch, len(pkgs2b)) {
|
||||
sort.Sort(ByVersion(pkgs2b))
|
||||
|
||||
for i := 0; i < nbInSameBranch; i = i + 1 {
|
||||
if i == 0 {
|
||||
assert.Equal(t, types.MinVersion, pkgs2b[0].Version)
|
||||
} else if i < nbInSameBranch-2 {
|
||||
assert.Equal(t, pkg2[i].Version, pkgs2b[i+1].Version)
|
||||
|
||||
nv, err := pkgs2b[i+1].NextVersion(FieldPackageAll)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, pkgs2b[i+2], nv)
|
||||
|
||||
if i > 0 {
|
||||
pv, err := pkgs2b[i].PreviousVersion(FieldPackageAll)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, pkgs2b[i-1], pv)
|
||||
} else {
|
||||
pv, err := pkgs2b[i].PreviousVersion(FieldPackageAll)
|
||||
assert.Nil(t, err)
|
||||
assert.Nil(t, pv)
|
||||
}
|
||||
} else {
|
||||
assert.Equal(t, types.MaxVersion, pkgs2b[nbInSameBranch-1].Version)
|
||||
|
||||
nv, err := pkgs2b[nbInSameBranch-1].NextVersion(FieldPackageAll)
|
||||
assert.Nil(t, err)
|
||||
assert.Nil(t, nv)
|
||||
|
||||
pv, err := pkgs2b[i].PreviousVersion(FieldPackageAll)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, pkgs2b[i-1], pv)
|
||||
}
|
||||
}
|
||||
|
||||
// NextVersions
|
||||
nv, err := pkgs2b[0].NextVersions(FieldPackageAll)
|
||||
if assert.Nil(t, err) && assert.Len(t, nv, nbInSameBranch-1) {
|
||||
for i := 0; i < nbInSameBranch-1; i = i + 1 {
|
||||
if i < nbInSameBranch-2 {
|
||||
assert.Equal(t, pkg2[i].Version, nv[i].Version)
|
||||
} else {
|
||||
assert.Equal(t, types.MaxVersion, nv[i].Version)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PreviousVersions
|
||||
pv, err := pkgs2b[nbInSameBranch-1].PreviousVersions(FieldPackageAll)
|
||||
if assert.Nil(t, err) && assert.Len(t, pv, nbInSameBranch-1) {
|
||||
for i := 0; i < len(pv); i = i + 1 {
|
||||
assert.Equal(t, pkgs2b[len(pkgs2b)-i-2], pv[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Verify that the one we added which was already present in the database has the same node value (meaning that we just fetched it actually)
|
||||
assert.Contains(t, pkg2, pkg1)
|
||||
}
|
||||
|
||||
// Insert duplicated latest packages directly, ensure only one is actually inserted. Then insert another package in the branch and ensure that its next version is the latest one
|
||||
pkg3a := &Package{OS: "testOS", Name: "testpkg3", Version: types.MaxVersion}
|
||||
pkg3b := &Package{OS: "testOS", Name: "testpkg3", Version: types.MaxVersion}
|
||||
pkg3c := &Package{OS: "testOS", Name: "testpkg3", Version: types.MaxVersion}
|
||||
err1 := InsertPackages([]*Package{pkg3a, pkg3b})
|
||||
err2 := InsertPackages([]*Package{pkg3c})
|
||||
if assert.Nil(t, err1) && assert.Nil(t, err2) {
|
||||
assert.Equal(t, pkg3a, pkg3b)
|
||||
assert.Equal(t, pkg3b, pkg3c)
|
||||
}
|
||||
pkg4 := Package{OS: "testOS", Name: "testpkg3", Version: types.NewVersionUnsafe("1.0")}
|
||||
InsertPackages([]*Package{&pkg4})
|
||||
pkgs34, _ := FindAllPackagesByBranch("testOS", "testpkg3", FieldPackageAll)
|
||||
if assert.Len(t, pkgs34, 3) {
|
||||
sort.Sort(ByVersion(pkgs34))
|
||||
assert.Equal(t, pkg4.Node, pkgs34[1].Node)
|
||||
assert.Equal(t, pkg3a.Node, pkgs34[2].Node)
|
||||
assert.Equal(t, pkg3a.Node, pkgs34[1].NextVersionNode)
|
||||
}
|
||||
|
||||
// Insert two identical packages but with "different" versions
|
||||
// The second version should be simplified to the first one
|
||||
// Therefore, we should just have three packages (the inserted one and the start/end packages of the branch)
|
||||
InsertPackages([]*Package{&Package{OS: "testOS", Name: "testdirtypkg", Version: types.NewVersionUnsafe("0.1")}})
|
||||
InsertPackages([]*Package{&Package{OS: "testOS", Name: "testdirtypkg", Version: types.NewVersionUnsafe("0:0.1")}})
|
||||
dirtypkgs, err := FindAllPackagesByBranch("testOS", "testdirtypkg", FieldPackageAll)
|
||||
assert.Nil(t, err)
|
||||
assert.Len(t, dirtypkgs, 3)
|
||||
}
|
||||
|
||||
func shuffle(packageParameters []*Package) []*Package {
|
||||
rand.Seed(int64(time.Now().Nanosecond()))
|
||||
|
||||
sPackage := make([]*Package, len(packageParameters))
|
||||
copy(sPackage, packageParameters)
|
||||
|
||||
for i := len(sPackage) - 1; i > 0; i-- {
|
||||
j := rand.Intn(i)
|
||||
sPackage[i], sPackage[j] = sPackage[j], sPackage[i]
|
||||
}
|
||||
|
||||
return sPackage
|
||||
}
|
51
database/requests.go
Normal file
51
database/requests.go
Normal file
@ -0,0 +1,51 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import cerrors "github.com/coreos/clair/utils/errors"
|
||||
|
||||
// FindAllLayersIntroducingVulnerability finds and returns the list of layers
|
||||
// that introduce the given vulnerability (by its ID), selecting the specified fields
|
||||
func FindAllLayersIntroducingVulnerability(vulnerabilityID string, selectedFields []string) ([]*Layer, error) {
|
||||
// Find vulnerability
|
||||
vulnerability, err := FindOneVulnerability(vulnerabilityID, []string{FieldVulnerabilityFixedIn})
|
||||
if err != nil {
|
||||
return []*Layer{}, err
|
||||
}
|
||||
if vulnerability == nil {
|
||||
return []*Layer{}, cerrors.ErrNotFound
|
||||
}
|
||||
|
||||
// Find FixedIn packages
|
||||
fixedInPackages, err := FindAllPackagesByNodes(vulnerability.FixedInNodes, []string{FieldPackagePreviousVersion})
|
||||
if err != nil {
|
||||
return []*Layer{}, err
|
||||
}
|
||||
|
||||
// Find all FixedIn packages's ancestors packages (which are therefore vulnerable to the vulnerability)
|
||||
var vulnerablePackagesNodes []string
|
||||
for _, pkg := range fixedInPackages {
|
||||
previousVersions, err := pkg.PreviousVersions([]string{})
|
||||
if err != nil {
|
||||
return []*Layer{}, err
|
||||
}
|
||||
for _, version := range previousVersions {
|
||||
vulnerablePackagesNodes = append(vulnerablePackagesNodes, version.Node)
|
||||
}
|
||||
}
|
||||
|
||||
// Return all the layers that add these packages
|
||||
return FindAllLayersByAddedPackageNodes(vulnerablePackagesNodes, selectedFields)
|
||||
}
|
387
database/vulnerability.go
Normal file
387
database/vulnerability.go
Normal file
@ -0,0 +1,387 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"github.com/coreos/clair/utils"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
"github.com/google/cayley"
|
||||
"github.com/google/cayley/graph"
|
||||
"github.com/google/cayley/graph/path"
|
||||
)
|
||||
|
||||
const (
|
||||
FieldVulnerabilityIsValue = "vulnerability"
|
||||
FieldVulnerabilityID = "id"
|
||||
FieldVulnerabilityLink = "link"
|
||||
FieldVulnerabilityPriority = "priority"
|
||||
FieldVulnerabilityDescription = "description"
|
||||
FieldVulnerabilityFixedIn = "fixedIn"
|
||||
)
|
||||
|
||||
var FieldVulnerabilityAll = []string{FieldVulnerabilityID, FieldVulnerabilityLink, FieldVulnerabilityPriority, FieldVulnerabilityDescription, FieldVulnerabilityFixedIn}
|
||||
|
||||
// Vulnerability represents a vulnerability that is fixed in some Packages
|
||||
type Vulnerability struct {
|
||||
Node string `json:"-"`
|
||||
ID string
|
||||
Link string
|
||||
Priority types.Priority
|
||||
Description string `json:",omitempty"`
|
||||
FixedInNodes []string `json:"-"`
|
||||
}
|
||||
|
||||
// GetNode returns an unique identifier for the graph node
|
||||
// Requires the key field: ID
|
||||
func (v *Vulnerability) GetNode() string {
|
||||
return FieldVulnerabilityIsValue + ":" + utils.Hash(v.ID)
|
||||
}
|
||||
|
||||
// ToAbstractVulnerability converts a Vulnerability into an
|
||||
// AbstractVulnerability.
|
||||
func (v *Vulnerability) ToAbstractVulnerability() (*AbstractVulnerability, error) {
|
||||
// Find FixedIn packages.
|
||||
fixedInPackages, err := FindAllPackagesByNodes(v.FixedInNodes, []string{FieldPackageOS, FieldPackageName, FieldPackageVersion})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &AbstractVulnerability{
|
||||
ID: v.ID,
|
||||
Link: v.Link,
|
||||
Priority: v.Priority,
|
||||
Description: v.Description,
|
||||
AffectedPackages: PackagesToAbstractPackages(fixedInPackages),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// AbstractVulnerability represents a Vulnerability as it is defined in the database
|
||||
// package but exposes directly a list of AbstractPackage instead of
|
||||
// nodes to packages.
|
||||
type AbstractVulnerability struct {
|
||||
ID string
|
||||
Link string
|
||||
Priority types.Priority
|
||||
Description string
|
||||
AffectedPackages []*AbstractPackage
|
||||
}
|
||||
|
||||
// ToVulnerability converts an abstractVulnerability into
|
||||
// a Vulnerability
|
||||
func (av *AbstractVulnerability) ToVulnerability(fixedInNodes []string) *Vulnerability {
|
||||
return &Vulnerability{
|
||||
ID: av.ID,
|
||||
Link: av.Link,
|
||||
Priority: av.Priority,
|
||||
Description: av.Description,
|
||||
FixedInNodes: fixedInNodes,
|
||||
}
|
||||
}
|
||||
|
||||
// InsertVulnerabilities inserts or updates several vulnerabilities in the database in one transaction
|
||||
// It ensures that a vulnerability can't be fixed by two packages belonging the same Branch.
|
||||
// During an update, if the vulnerability was previously fixed by a version in a branch and a new package of that branch is specified, the previous one is deleted
|
||||
// Otherwise, it simply adds the defined packages, there is currently no way to delete affected packages.
|
||||
//
|
||||
// ID, Link, Priority and FixedInNodes fields have to be specified. Description is optionnal.
|
||||
func InsertVulnerabilities(vulnerabilities []*Vulnerability) ([]Notification, error) {
|
||||
if len(vulnerabilities) == 0 {
|
||||
return []Notification{}, nil
|
||||
}
|
||||
|
||||
// Create required data structure
|
||||
var err error
|
||||
t := cayley.NewTransaction()
|
||||
cachedVulnerabilities := make(map[string]*Vulnerability)
|
||||
newVulnerabilityNotifications := make(map[string]*NewVulnerabilityNotification)
|
||||
vulnerabilityPriorityIncreasedNotifications := make(map[string]*VulnerabilityPriorityIncreasedNotification)
|
||||
vulnerabilityPackageChangedNotifications := make(map[string]*VulnerabilityPackageChangedNotification)
|
||||
|
||||
// Iterate over all the vulnerabilities we need to insert/update
|
||||
for _, vulnerability := range vulnerabilities {
|
||||
// Is the vulnerability already existing ?
|
||||
existingVulnerability, _ := cachedVulnerabilities[vulnerability.ID]
|
||||
if existingVulnerability == nil {
|
||||
existingVulnerability, err = FindOneVulnerability(vulnerability.ID, FieldVulnerabilityAll)
|
||||
if err != nil && err != cerrors.ErrNotFound {
|
||||
return []Notification{}, err
|
||||
}
|
||||
if existingVulnerability != nil {
|
||||
cachedVulnerabilities[vulnerability.ID] = existingVulnerability
|
||||
}
|
||||
}
|
||||
|
||||
// Don't allow inserting/updating a vulnerability which is fixed in two packages of the same branch
|
||||
if len(vulnerability.FixedInNodes) > 0 {
|
||||
fixedInPackages, err := FindAllPackagesByNodes(vulnerability.FixedInNodes, []string{FieldPackageOS, FieldPackageName})
|
||||
if err != nil {
|
||||
return []Notification{}, err
|
||||
}
|
||||
fixedInBranches := make(map[string]struct{})
|
||||
for _, fixedInPackage := range fixedInPackages {
|
||||
branch := fixedInPackage.Branch()
|
||||
if _, branchExists := fixedInBranches[branch]; branchExists {
|
||||
log.Warningf("could not insert vulnerability %s because it is fixed in two packages of the same branch", vulnerability.ID)
|
||||
return []Notification{}, cerrors.NewBadRequestError("could not insert a vulnerability which is fixed in two packages of the same branch")
|
||||
}
|
||||
fixedInBranches[branch] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Insert/Update vulnerability
|
||||
if existingVulnerability == nil {
|
||||
// The vulnerability does not exist, create it
|
||||
|
||||
// Verify parameters
|
||||
if vulnerability.ID == "" || vulnerability.Link == "" || vulnerability.Priority == "" {
|
||||
log.Warningf("could not insert an incomplete vulnerability [ID: %s, Link: %s, Priority: %s]", vulnerability.ID, vulnerability.Link, vulnerability.Priority)
|
||||
return []Notification{}, cerrors.NewBadRequestError("Could not insert an incomplete vulnerability")
|
||||
}
|
||||
if !vulnerability.Priority.IsValid() {
|
||||
log.Warningf("could not insert a vulnerability which has an invalid priority [ID: %s, Link: %s, Priority: %s]. Valid priorities are: %v.", vulnerability.ID, vulnerability.Link, vulnerability.Priority, types.Priorities)
|
||||
return []Notification{}, cerrors.NewBadRequestError("Could not insert a vulnerability which has an invalid priority")
|
||||
}
|
||||
if len(vulnerability.FixedInNodes) == 0 {
|
||||
log.Warningf("could not insert a vulnerability which doesn't affect any package [ID: %s].", vulnerability.ID)
|
||||
return []Notification{}, cerrors.NewBadRequestError("could not insert a vulnerability which doesn't affect any package")
|
||||
}
|
||||
|
||||
// Insert it
|
||||
vulnerability.Node = vulnerability.GetNode()
|
||||
cachedVulnerabilities[vulnerability.ID] = vulnerability
|
||||
|
||||
t.AddQuad(cayley.Quad(vulnerability.Node, FieldIs, FieldVulnerabilityIsValue, ""))
|
||||
t.AddQuad(cayley.Quad(vulnerability.Node, FieldVulnerabilityID, vulnerability.ID, ""))
|
||||
t.AddQuad(cayley.Quad(vulnerability.Node, FieldVulnerabilityLink, vulnerability.Link, ""))
|
||||
t.AddQuad(cayley.Quad(vulnerability.Node, FieldVulnerabilityPriority, string(vulnerability.Priority), ""))
|
||||
t.AddQuad(cayley.Quad(vulnerability.Node, FieldVulnerabilityDescription, vulnerability.Description, ""))
|
||||
for _, p := range vulnerability.FixedInNodes {
|
||||
t.AddQuad(cayley.Quad(vulnerability.Node, FieldVulnerabilityFixedIn, p, ""))
|
||||
}
|
||||
|
||||
// Add a notification
|
||||
newVulnerabilityNotifications[vulnerability.ID] = &NewVulnerabilityNotification{VulnerabilityID: vulnerability.ID}
|
||||
} else {
|
||||
// The vulnerability already exists, update it
|
||||
if vulnerability.Link != "" && existingVulnerability.Link != vulnerability.Link {
|
||||
t.RemoveQuad(cayley.Quad(existingVulnerability.Node, FieldVulnerabilityLink, existingVulnerability.Link, ""))
|
||||
t.AddQuad(cayley.Quad(existingVulnerability.Node, FieldVulnerabilityLink, vulnerability.Link, ""))
|
||||
existingVulnerability.Link = vulnerability.Link
|
||||
}
|
||||
if vulnerability.Priority != "" && vulnerability.Priority != types.Unknown && existingVulnerability.Priority != vulnerability.Priority {
|
||||
if !vulnerability.Priority.IsValid() {
|
||||
log.Warningf("could not update a vulnerability which has an invalid priority [ID: %s, Link: %s, Priority: %s]. Valid priorities are: %v.", vulnerability.ID, vulnerability.Link, vulnerability.Priority, types.Priorities)
|
||||
return []Notification{}, cerrors.NewBadRequestError("Could not update a vulnerability which has an invalid priority")
|
||||
}
|
||||
|
||||
// Add a notification about the priority change if the new priority is higher and the vulnerability is not new
|
||||
if vulnerability.Priority.Compare(existingVulnerability.Priority) > 0 {
|
||||
if _, newVulnerabilityNotificationExists := newVulnerabilityNotifications[vulnerability.ID]; !newVulnerabilityNotificationExists {
|
||||
// Any priorityChangeNotification already ?
|
||||
if existingPriorityNotification, _ := vulnerabilityPriorityIncreasedNotifications[vulnerability.ID]; existingPriorityNotification != nil {
|
||||
// There is a priority change notification, replace it but keep the old priority field
|
||||
vulnerabilityPriorityIncreasedNotifications[vulnerability.ID] = &VulnerabilityPriorityIncreasedNotification{OldPriority: existingPriorityNotification.OldPriority, NewPriority: vulnerability.Priority, VulnerabilityID: existingVulnerability.ID}
|
||||
} else {
|
||||
// No previous notification, just add a new one
|
||||
vulnerabilityPriorityIncreasedNotifications[vulnerability.ID] = &VulnerabilityPriorityIncreasedNotification{OldPriority: existingVulnerability.Priority, NewPriority: vulnerability.Priority, VulnerabilityID: existingVulnerability.ID}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
t.RemoveQuad(cayley.Quad(existingVulnerability.Node, FieldVulnerabilityPriority, string(existingVulnerability.Priority), ""))
|
||||
t.AddQuad(cayley.Quad(existingVulnerability.Node, FieldVulnerabilityPriority, string(vulnerability.Priority), ""))
|
||||
existingVulnerability.Priority = vulnerability.Priority
|
||||
}
|
||||
if vulnerability.Description != "" && existingVulnerability.Description != vulnerability.Description {
|
||||
t.RemoveQuad(cayley.Quad(existingVulnerability.Node, FieldVulnerabilityDescription, existingVulnerability.Description, ""))
|
||||
t.AddQuad(cayley.Quad(existingVulnerability.Node, FieldVulnerabilityDescription, vulnerability.Description, ""))
|
||||
existingVulnerability.Description = vulnerability.Description
|
||||
}
|
||||
if len(vulnerability.FixedInNodes) > 0 && len(utils.CompareStringLists(vulnerability.FixedInNodes, existingVulnerability.FixedInNodes)) != 0 {
|
||||
var removedNodes []string
|
||||
var addedNodes []string
|
||||
|
||||
existingVulnerabilityFixedInPackages, err := FindAllPackagesByNodes(existingVulnerability.FixedInNodes, []string{FieldPackageOS, FieldPackageName, FieldPackageVersion})
|
||||
if err != nil {
|
||||
return []Notification{}, err
|
||||
}
|
||||
vulnerabilityFixedInPackages, err := FindAllPackagesByNodes(vulnerability.FixedInNodes, []string{FieldPackageOS, FieldPackageName, FieldPackageVersion})
|
||||
if err != nil {
|
||||
return []Notification{}, err
|
||||
}
|
||||
|
||||
for _, p := range vulnerabilityFixedInPackages {
|
||||
// Any already existing link ?
|
||||
fixedInLinkAlreadyExists := false
|
||||
for _, ep := range existingVulnerabilityFixedInPackages {
|
||||
if *p == *ep {
|
||||
// This exact link already exists, we won't insert it again
|
||||
fixedInLinkAlreadyExists = true
|
||||
} else if p.Branch() == ep.Branch() {
|
||||
// A link to this package branch already exist and is not the same version, we will delete it
|
||||
t.RemoveQuad(cayley.Quad(existingVulnerability.Node, FieldVulnerabilityFixedIn, ep.Node, ""))
|
||||
|
||||
var index int
|
||||
for i, n := range existingVulnerability.FixedInNodes {
|
||||
if n == ep.Node {
|
||||
index = i
|
||||
break
|
||||
}
|
||||
}
|
||||
existingVulnerability.FixedInNodes = append(existingVulnerability.FixedInNodes[index:], existingVulnerability.FixedInNodes[index+1:]...)
|
||||
removedNodes = append(removedNodes, ep.Node)
|
||||
}
|
||||
}
|
||||
|
||||
if fixedInLinkAlreadyExists == false {
|
||||
t.AddQuad(cayley.Quad(existingVulnerability.Node, FieldVulnerabilityFixedIn, p.Node, ""))
|
||||
existingVulnerability.FixedInNodes = append(existingVulnerability.FixedInNodes, p.Node)
|
||||
addedNodes = append(addedNodes, p.Node)
|
||||
}
|
||||
}
|
||||
|
||||
// Add notification about the FixedIn modification if the vulnerability is not new
|
||||
if len(removedNodes) > 0 || len(addedNodes) > 0 {
|
||||
if _, newVulnerabilityNotificationExists := newVulnerabilityNotifications[vulnerability.ID]; !newVulnerabilityNotificationExists {
|
||||
// Any VulnerabilityPackageChangedNotification already ?
|
||||
if existingPackageNotification, _ := vulnerabilityPackageChangedNotifications[vulnerability.ID]; existingPackageNotification != nil {
|
||||
// There is a priority change notification, add the packages modifications to it
|
||||
existingPackageNotification.AddedFixedInNodes = append(existingPackageNotification.AddedFixedInNodes, addedNodes...)
|
||||
existingPackageNotification.RemovedFixedInNodes = append(existingPackageNotification.RemovedFixedInNodes, removedNodes...)
|
||||
} else {
|
||||
// No previous notification, just add a new one
|
||||
vulnerabilityPackageChangedNotifications[vulnerability.ID] = &VulnerabilityPackageChangedNotification{VulnerabilityID: vulnerability.ID, AddedFixedInNodes: addedNodes, RemovedFixedInNodes: removedNodes}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Apply transaction
|
||||
if err = store.ApplyTransaction(t); err != nil {
|
||||
log.Errorf("failed transaction (InsertVulnerabilities): %s", err)
|
||||
return []Notification{}, ErrTransaction
|
||||
}
|
||||
|
||||
// Group all notifications
|
||||
var allNotifications []Notification
|
||||
for _, notification := range newVulnerabilityNotifications {
|
||||
allNotifications = append(allNotifications, notification)
|
||||
}
|
||||
for _, notification := range vulnerabilityPriorityIncreasedNotifications {
|
||||
allNotifications = append(allNotifications, notification)
|
||||
}
|
||||
for _, notification := range vulnerabilityPackageChangedNotifications {
|
||||
allNotifications = append(allNotifications, notification)
|
||||
}
|
||||
|
||||
return allNotifications, nil
|
||||
}
|
||||
|
||||
// DeleteVulnerability deletes the vulnerability having the given ID
|
||||
func DeleteVulnerability(id string) error {
|
||||
vulnerability, err := FindOneVulnerability(id, FieldVulnerabilityAll)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t := cayley.NewTransaction()
|
||||
t.RemoveQuad(cayley.Quad(vulnerability.Node, FieldVulnerabilityID, vulnerability.ID, ""))
|
||||
t.RemoveQuad(cayley.Quad(vulnerability.Node, FieldVulnerabilityLink, vulnerability.Link, ""))
|
||||
t.RemoveQuad(cayley.Quad(vulnerability.Node, FieldVulnerabilityPriority, string(vulnerability.Priority), ""))
|
||||
t.RemoveQuad(cayley.Quad(vulnerability.Node, FieldVulnerabilityDescription, vulnerability.Description, ""))
|
||||
for _, p := range vulnerability.FixedInNodes {
|
||||
t.RemoveQuad(cayley.Quad(vulnerability.Node, FieldVulnerabilityFixedIn, p, ""))
|
||||
}
|
||||
|
||||
if err := store.ApplyTransaction(t); err != nil {
|
||||
log.Errorf("failed transaction (DeleteVulnerability): %s", err)
|
||||
return ErrTransaction
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FindOneVulnerability finds and returns a single vulnerability having the given ID selecting the specified fields
|
||||
func FindOneVulnerability(id string, selectedFields []string) (*Vulnerability, error) {
|
||||
t := &Vulnerability{ID: id}
|
||||
v, err := toVulnerabilities(cayley.StartPath(store, t.GetNode()).Has(FieldIs, FieldVulnerabilityIsValue), selectedFields)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(v) == 1 {
|
||||
return v[0], nil
|
||||
}
|
||||
if len(v) > 1 {
|
||||
log.Errorf("found multiple vulnerabilities with identical ID [ID: %s]", id)
|
||||
return nil, ErrInconsistent
|
||||
}
|
||||
return nil, cerrors.ErrNotFound
|
||||
}
|
||||
|
||||
// FindAllVulnerabilitiesByFixedIn finds and returns all vulnerabilities that are fixed in the given packages (speficied by their nodes), selecting the specified fields
|
||||
func FindAllVulnerabilitiesByFixedIn(nodes []string, selectedFields []string) ([]*Vulnerability, error) {
|
||||
if len(nodes) == 0 {
|
||||
log.Warning("Could not FindAllVulnerabilitiesByFixedIn with an empty nodes array.")
|
||||
return []*Vulnerability{}, nil
|
||||
}
|
||||
return toVulnerabilities(cayley.StartPath(store, nodes...).In(FieldVulnerabilityFixedIn), selectedFields)
|
||||
}
|
||||
|
||||
// toVulnerabilities converts a path leading to one or multiple vulnerabilities to Vulnerability structs, selecting the specified fields
|
||||
func toVulnerabilities(path *path.Path, selectedFields []string) ([]*Vulnerability, error) {
|
||||
var vulnerabilities []*Vulnerability
|
||||
|
||||
saveFields(path, selectedFields, []string{FieldVulnerabilityFixedIn})
|
||||
it, _ := path.BuildIterator().Optimize()
|
||||
defer it.Close()
|
||||
for cayley.RawNext(it) {
|
||||
tags := make(map[string]graph.Value)
|
||||
it.TagResults(tags)
|
||||
|
||||
vulnerability := Vulnerability{Node: store.NameOf(it.Result())}
|
||||
for _, selectedField := range selectedFields {
|
||||
switch selectedField {
|
||||
case FieldVulnerabilityID:
|
||||
vulnerability.ID = store.NameOf(tags[FieldVulnerabilityID])
|
||||
case FieldVulnerabilityLink:
|
||||
vulnerability.Link = store.NameOf(tags[FieldVulnerabilityLink])
|
||||
case FieldVulnerabilityPriority:
|
||||
vulnerability.Priority = types.Priority(store.NameOf(tags[FieldVulnerabilityPriority]))
|
||||
case FieldVulnerabilityDescription:
|
||||
vulnerability.Description = store.NameOf(tags[FieldVulnerabilityDescription])
|
||||
case FieldVulnerabilityFixedIn:
|
||||
var err error
|
||||
vulnerability.FixedInNodes, err = toValues(cayley.StartPath(store, vulnerability.Node).Out(FieldVulnerabilityFixedIn))
|
||||
if err != nil {
|
||||
log.Errorf("could not get fixedIn on vulnerability %s: %s.", vulnerability.Node, err.Error())
|
||||
return []*Vulnerability{}, err
|
||||
}
|
||||
default:
|
||||
panic("unknown selectedField")
|
||||
}
|
||||
}
|
||||
vulnerabilities = append(vulnerabilities, &vulnerability)
|
||||
}
|
||||
if it.Err() != nil {
|
||||
log.Errorf("failed query in toVulnerabilities: %s", it.Err())
|
||||
return []*Vulnerability{}, ErrBackendException
|
||||
}
|
||||
|
||||
return vulnerabilities, nil
|
||||
}
|
243
database/vulnerability_test.go
Normal file
243
database/vulnerability_test.go
Normal file
@ -0,0 +1,243 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestVulnerability(t *testing.T) {
|
||||
Open("memstore", "")
|
||||
defer Close()
|
||||
|
||||
// Insert invalid vulnerabilities
|
||||
for _, vulnerability := range []Vulnerability{
|
||||
Vulnerability{ID: "", Link: "link1", Priority: types.Medium, FixedInNodes: []string{"pkg1"}},
|
||||
Vulnerability{ID: "test1", Link: "", Priority: types.Medium, FixedInNodes: []string{"pkg1"}},
|
||||
Vulnerability{ID: "test1", Link: "link1", Priority: "InvalidPriority", FixedInNodes: []string{"pkg1"}},
|
||||
Vulnerability{ID: "test1", Link: "link1", Priority: types.Medium, FixedInNodes: []string{}},
|
||||
} {
|
||||
_, err := InsertVulnerabilities([]*Vulnerability{&vulnerability})
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// Some data
|
||||
vuln1 := &Vulnerability{ID: "test1", Link: "link1", Priority: types.Medium, Description: "testDescription1", FixedInNodes: []string{"pkg1"}}
|
||||
vuln2 := &Vulnerability{ID: "test2", Link: "link2", Priority: types.High, Description: "testDescription2", FixedInNodes: []string{"pkg1", "pkg2"}}
|
||||
vuln3 := &Vulnerability{ID: "test3", Link: "link3", Priority: types.High, FixedInNodes: []string{"pkg3"}} // Empty description
|
||||
|
||||
// Insert some vulnerabilities
|
||||
_, err := InsertVulnerabilities([]*Vulnerability{vuln1, vuln2, vuln3})
|
||||
if assert.Nil(t, err) {
|
||||
// Find one of the vulnerabilities we just inserted and verify its content
|
||||
v1, err := FindOneVulnerability(vuln1.ID, FieldVulnerabilityAll)
|
||||
if assert.Nil(t, err) && assert.NotNil(t, v1) {
|
||||
assert.Equal(t, vuln1.ID, v1.ID)
|
||||
assert.Equal(t, vuln1.Link, v1.Link)
|
||||
assert.Equal(t, vuln1.Priority, v1.Priority)
|
||||
assert.Equal(t, vuln1.Description, v1.Description)
|
||||
if assert.Len(t, v1.FixedInNodes, 1) {
|
||||
assert.Equal(t, vuln1.FixedInNodes[0], v1.FixedInNodes[0])
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that vulnerabilities with empty descriptions work as well
|
||||
v3, err := FindOneVulnerability(vuln3.ID, FieldVulnerabilityAll)
|
||||
if assert.Nil(t, err) && assert.NotNil(t, v3) {
|
||||
assert.Equal(t, vuln3.Description, v3.Description)
|
||||
}
|
||||
|
||||
// Find vulnerabilities by fixed packages
|
||||
vulnsFixedInPkg1AndPkg3, err := FindAllVulnerabilitiesByFixedIn([]string{"pkg2", "pkg3"}, FieldVulnerabilityAll)
|
||||
assert.Nil(t, err)
|
||||
assert.Len(t, vulnsFixedInPkg1AndPkg3, 2)
|
||||
|
||||
// Delete vulnerability
|
||||
if assert.Nil(t, DeleteVulnerability(vuln1.ID)) {
|
||||
v1, err := FindOneVulnerability(vuln1.ID, FieldVulnerabilityAll)
|
||||
assert.Equal(t, cerrors.ErrNotFound, err)
|
||||
assert.Nil(t, v1)
|
||||
}
|
||||
}
|
||||
|
||||
// Update a vulnerability and verify its new content
|
||||
pkg1 := &Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("1.0")}
|
||||
InsertPackages([]*Package{pkg1})
|
||||
vuln5 := &Vulnerability{ID: "test5", Link: "link5", Priority: types.Medium, Description: "testDescription5", FixedInNodes: []string{pkg1.Node}}
|
||||
|
||||
_, err = InsertVulnerabilities([]*Vulnerability{vuln5})
|
||||
if assert.Nil(t, err) {
|
||||
// Partial updates
|
||||
// # Just a field update
|
||||
vuln5b := &Vulnerability{ID: "test5", Priority: types.High}
|
||||
_, err := InsertVulnerabilities([]*Vulnerability{vuln5b})
|
||||
if assert.Nil(t, err) {
|
||||
v5b, err := FindOneVulnerability(vuln5b.ID, FieldVulnerabilityAll)
|
||||
if assert.Nil(t, err) && assert.NotNil(t, v5b) {
|
||||
assert.Equal(t, vuln5b.ID, v5b.ID)
|
||||
assert.Equal(t, vuln5b.Priority, v5b.Priority)
|
||||
|
||||
if assert.Len(t, v5b.FixedInNodes, 1) {
|
||||
assert.Contains(t, v5b.FixedInNodes, pkg1.Node)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// # Just a field update, twice in the same transaction
|
||||
vuln5b1 := &Vulnerability{ID: "test5", Link: "http://foo.bar"}
|
||||
vuln5b2 := &Vulnerability{ID: "test5", Link: "http://bar.foo"}
|
||||
_, err = InsertVulnerabilities([]*Vulnerability{vuln5b1, vuln5b2})
|
||||
if assert.Nil(t, err) {
|
||||
v5b2, err := FindOneVulnerability(vuln5b2.ID, FieldVulnerabilityAll)
|
||||
if assert.Nil(t, err) && assert.NotNil(t, v5b2) {
|
||||
assert.Equal(t, vuln5b2.Link, v5b2.Link)
|
||||
}
|
||||
}
|
||||
|
||||
// # All fields except fixedIn update
|
||||
vuln5c := &Vulnerability{ID: "test5", Link: "link5c", Priority: types.Critical, Description: "testDescription5c"}
|
||||
_, err = InsertVulnerabilities([]*Vulnerability{vuln5c})
|
||||
if assert.Nil(t, err) {
|
||||
v5c, err := FindOneVulnerability(vuln5c.ID, FieldVulnerabilityAll)
|
||||
if assert.Nil(t, err) && assert.NotNil(t, v5c) {
|
||||
assert.Equal(t, vuln5c.ID, v5c.ID)
|
||||
assert.Equal(t, vuln5c.Link, v5c.Link)
|
||||
assert.Equal(t, vuln5c.Priority, v5c.Priority)
|
||||
assert.Equal(t, vuln5c.Description, v5c.Description)
|
||||
|
||||
if assert.Len(t, v5c.FixedInNodes, 1) {
|
||||
assert.Contains(t, v5c.FixedInNodes, pkg1.Node)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Complete update
|
||||
pkg2 := &Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("1.1")}
|
||||
pkg3 := &Package{OS: "testOS", Name: "testpkg2", Version: types.NewVersionUnsafe("1.0")}
|
||||
InsertPackages([]*Package{pkg2, pkg3})
|
||||
vuln5d := &Vulnerability{ID: "test5", Link: "link5d", Priority: types.Low, Description: "testDescription5d", FixedInNodes: []string{pkg2.Node, pkg3.Node}}
|
||||
|
||||
_, err = InsertVulnerabilities([]*Vulnerability{vuln5d})
|
||||
if assert.Nil(t, err) {
|
||||
v5d, err := FindOneVulnerability(vuln5d.ID, FieldVulnerabilityAll)
|
||||
if assert.Nil(t, err) && assert.NotNil(t, v5d) {
|
||||
assert.Equal(t, vuln5d.ID, v5d.ID)
|
||||
assert.Equal(t, vuln5d.Link, v5d.Link)
|
||||
assert.Equal(t, vuln5d.Priority, v5d.Priority)
|
||||
assert.Equal(t, vuln5d.Description, v5d.Description)
|
||||
|
||||
// Here, we ensure that a vulnerability can only be fixed by one package of a given branch at a given time
|
||||
// And that we can add new fixed packages as well
|
||||
if assert.Len(t, v5d.FixedInNodes, 2) {
|
||||
assert.NotContains(t, v5d.FixedInNodes, pkg1.Node)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create and update a vulnerability's packages (and from the same branch) in the same batch
|
||||
pkg1 = &Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("1.0")}
|
||||
pkg1b := &Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("1.1")}
|
||||
InsertPackages([]*Package{pkg1, pkg1b})
|
||||
// # A vulnerability can't be inserted if fixed by two packages of the same branch
|
||||
_, err = InsertVulnerabilities([]*Vulnerability{&Vulnerability{ID: "test6", Link: "link6", Priority: types.Medium, Description: "testDescription6", FixedInNodes: []string{pkg1.Node, pkg1b.Node}}})
|
||||
assert.Error(t, err)
|
||||
// # Two updates of the same vulnerability in the same batch with packages of the same branch
|
||||
pkg0 := &Package{OS: "testOS", Name: "testpkg0", Version: types.NewVersionUnsafe("1.0")}
|
||||
InsertPackages([]*Package{pkg0})
|
||||
_, err = InsertVulnerabilities([]*Vulnerability{&Vulnerability{ID: "test7", Link: "link7", Priority: types.Medium, Description: "testDescription7", FixedInNodes: []string{pkg0.Node}}})
|
||||
if assert.Nil(t, err) {
|
||||
vuln7b := &Vulnerability{ID: "test7", FixedInNodes: []string{pkg1.Node}}
|
||||
vuln7c := &Vulnerability{ID: "test7", FixedInNodes: []string{pkg1b.Node}}
|
||||
_, err = InsertVulnerabilities([]*Vulnerability{vuln7b, vuln7c})
|
||||
if assert.Nil(t, err) {
|
||||
v7, err := FindOneVulnerability("test7", FieldVulnerabilityAll)
|
||||
if assert.Nil(t, err) && assert.Len(t, v7.FixedInNodes, 2) {
|
||||
assert.Contains(t, v7.FixedInNodes, pkg0.Node)
|
||||
assert.NotContains(t, v7.FixedInNodes, pkg1.Node)
|
||||
assert.Contains(t, v7.FixedInNodes, pkg1b.Node)
|
||||
}
|
||||
|
||||
// # A vulnerability can't be updated if fixed by two packages of the same branch
|
||||
_, err = InsertVulnerabilities([]*Vulnerability{&Vulnerability{ID: "test7", FixedInNodes: []string{pkg1.Node, pkg1b.Node}}})
|
||||
assert.Error(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInsertVulnerabilityNotifications(t *testing.T) {
|
||||
Open("memstore", "")
|
||||
defer Close()
|
||||
|
||||
pkg1 := &Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("1.0")}
|
||||
pkg1b := &Package{OS: "testOS", Name: "testpkg1", Version: types.NewVersionUnsafe("1.2")}
|
||||
pkg2 := &Package{OS: "testOS", Name: "testpkg2", Version: types.NewVersionUnsafe("1.0")}
|
||||
InsertPackages([]*Package{pkg1, pkg1b, pkg2})
|
||||
|
||||
// NewVulnerabilityNotification
|
||||
vuln1 := &Vulnerability{ID: "test1", Link: "link1", Priority: types.Medium, Description: "testDescription1", FixedInNodes: []string{pkg1.Node}}
|
||||
vuln2 := &Vulnerability{ID: "test2", Link: "link2", Priority: types.High, Description: "testDescription2", FixedInNodes: []string{pkg1.Node, pkg2.Node}}
|
||||
vuln1b := &Vulnerability{ID: "test1", Priority: types.High, FixedInNodes: []string{"pkg3"}}
|
||||
notifications, err := InsertVulnerabilities([]*Vulnerability{vuln1, vuln2, vuln1b})
|
||||
if assert.Nil(t, err) {
|
||||
// We should only have two NewVulnerabilityNotification notifications: one for test1 and one for test2
|
||||
// We should not have a VulnerabilityPriorityIncreasedNotification or a VulnerabilityPackageChangedNotification
|
||||
// for test1 because it is in the same batch
|
||||
if assert.Len(t, notifications, 2) {
|
||||
for _, n := range notifications {
|
||||
_, ok := n.(*NewVulnerabilityNotification)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// VulnerabilityPriorityIncreasedNotification
|
||||
vuln1c := &Vulnerability{ID: "test1", Priority: types.Critical}
|
||||
notifications, err = InsertVulnerabilities([]*Vulnerability{vuln1c})
|
||||
if assert.Nil(t, err) {
|
||||
if assert.Len(t, notifications, 1) {
|
||||
if nn, ok := notifications[0].(*VulnerabilityPriorityIncreasedNotification); assert.True(t, ok) {
|
||||
assert.Equal(t, vuln1b.Priority, nn.OldPriority)
|
||||
assert.Equal(t, vuln1c.Priority, nn.NewPriority)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
notifications, err = InsertVulnerabilities([]*Vulnerability{&Vulnerability{ID: "test1", Priority: types.Low}})
|
||||
assert.Nil(t, err)
|
||||
assert.Len(t, notifications, 0)
|
||||
|
||||
// VulnerabilityPackageChangedNotification
|
||||
vuln1e := &Vulnerability{ID: "test1", FixedInNodes: []string{pkg1b.Node}}
|
||||
vuln1f := &Vulnerability{ID: "test1", FixedInNodes: []string{pkg2.Node}}
|
||||
notifications, err = InsertVulnerabilities([]*Vulnerability{vuln1e, vuln1f})
|
||||
if assert.Nil(t, err) {
|
||||
if assert.Len(t, notifications, 1) {
|
||||
if nn, ok := notifications[0].(*VulnerabilityPackageChangedNotification); assert.True(t, ok) {
|
||||
// Here, we say that pkg1b fixes the vulnerability, but as pkg1b is in
|
||||
// the same branch as pkg1, pkg1 should be removed and pkg1b added
|
||||
// We also add pkg2 as fixed
|
||||
assert.Contains(t, nn.AddedFixedInNodes, pkg1b.Node)
|
||||
assert.Contains(t, nn.RemovedFixedInNodes, pkg1.Node)
|
||||
|
||||
assert.Contains(t, nn.AddedFixedInNodes, pkg2.Node)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
760
docs/API.md
Normal file
760
docs/API.md
Normal file
@ -0,0 +1,760 @@
|
||||
# General
|
||||
|
||||
## Fetch API Version
|
||||
|
||||
It returns the versions of the API and the layer processing engine.
|
||||
|
||||
GET /v1/versions
|
||||
|
||||
* The versions are integers.
|
||||
* The API version number is raised each time there is an structural change.
|
||||
* The Engine version is increased when the a new layer analysis could find new
|
||||
relevant data.
|
||||
|
||||
### Example
|
||||
|
||||
```
|
||||
curl -s 127.0.0.1:6060/v1/versions | python -m json.tool
|
||||
```
|
||||
|
||||
### Response
|
||||
|
||||
```
|
||||
HTTP/1.1 200 OK
|
||||
{
|
||||
"APIVersion": "1",
|
||||
"EngineVersion": "1"
|
||||
}
|
||||
```
|
||||
|
||||
## Fetch Health status
|
||||
|
||||
GET /v1/health
|
||||
|
||||
Returns 200 if essential services are healthy (ie. database) and 503 otherwise.
|
||||
|
||||
This call is also available on the API port + 1, without any security, allowing
|
||||
external monitoring systems to easily access it.
|
||||
|
||||
### Example
|
||||
|
||||
```
|
||||
curl -s 127.0.0.1:6060/v1/health | python -m json.tool
|
||||
```
|
||||
|
||||
```
|
||||
curl -s 127.0.0.1:6061/ | python -m json.tool
|
||||
```
|
||||
|
||||
### Success Response
|
||||
|
||||
```
|
||||
HTTP/1.1 200 OK
|
||||
{
|
||||
"database":{
|
||||
"IsHealthy":true
|
||||
},
|
||||
"notifier":{
|
||||
"IsHealthy":true,
|
||||
"Details":{
|
||||
"QueueSize":0
|
||||
}
|
||||
},
|
||||
"updater":{
|
||||
"IsHealthy":true,
|
||||
"Details":{
|
||||
"HealthIdentifier":"cf65a8f6-425c-4a9c-87fe-f59ddf75fc87",
|
||||
"HealthLockOwner":"1e7fce65-ee67-4ca5-b2e9-61e9f5e0d3ed",
|
||||
"LatestSuccessfulUpdate":"2015-09-30T14:47:47Z",
|
||||
"ConsecutiveLocalFailures":0
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Error Response
|
||||
|
||||
```
|
||||
HTTP/1.1 503 Service unavailable
|
||||
{
|
||||
"database":{
|
||||
"IsHealthy":false
|
||||
},
|
||||
"notifier":{
|
||||
"IsHealthy":true,
|
||||
"Details":{
|
||||
"QueueSize":0
|
||||
}
|
||||
},
|
||||
"updater":{
|
||||
"IsHealthy":true,
|
||||
"Details":{
|
||||
"HealthIdentifier":"cf65a8f6-425c-4a9c-87fe-f59ddf75fc87",
|
||||
"HealthLockOwner":"1e7fce65-ee67-4ca5-b2e9-61e9f5e0d3ed",
|
||||
"LatestSuccessfulUpdate":"2015-09-30T14:47:47Z",
|
||||
"ConsecutiveLocalFailures":0
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
# Layers
|
||||
|
||||
## Insert a new Layer
|
||||
|
||||
It processes and inserts a new Layer in the database.
|
||||
|
||||
POST /v1/layers
|
||||
|
||||
### Parameters
|
||||
|
||||
|Name|Type|Description|
|
||||
|------|-----|-------------|
|
||||
|ID|String|Unique ID of the Layer|
|
||||
|Path|String|Absolute path or HTTP link pointing to the Layer's tar file|
|
||||
|ParentID|String|(Optionnal) Unique ID of the Layer's parent
|
||||
|
||||
If the Layer has not parent, the ParentID field should be omitted or empty.
|
||||
|
||||
### Example
|
||||
|
||||
```
|
||||
curl -s -H "Content-Type: application/json" -X POST -d \
|
||||
'{
|
||||
"ID": "39bb80489af75406073b5364c9c326134015140e1f7976a370a8bd446889e6f8",
|
||||
"Path": "https://layers_storage/39bb80489af75406073b5364c9c326134015140e1f7976a370a8bd446889e6f8.tar",
|
||||
"ParentID": "df2a0347c9d081fa05ecb83669dcae5830c67b0676a6d6358218e55d8a45969c"
|
||||
}' \
|
||||
127.0.0.1:6060/v1/layers
|
||||
```
|
||||
|
||||
### Success Response
|
||||
|
||||
If the layer has been successfully processed, the version of the engine which processed it is returned.
|
||||
|
||||
```
|
||||
HTTP/1.1 201 Created
|
||||
{
|
||||
"Version": "1"
|
||||
}
|
||||
```
|
||||
|
||||
### Error Response
|
||||
|
||||
```
|
||||
HTTP/1.1 400 Bad Request
|
||||
{
|
||||
"Message": "Layer 39bb80489af75406073b5364c9c326134015140e1f7976a370a8bd446889e6f8's parent (df2a0347c9d081fa05ecb83669dcae5830c67b0676a6d6358218e55d8a45969c) is unknown."
|
||||
}
|
||||
```
|
||||
|
||||
It could also return a `415 Unsupported Media Type` response with a `Message` if the request content is not valid JSON.
|
||||
|
||||
## Get a Layer's operating system
|
||||
|
||||
It returns the operating system a given Layer.
|
||||
|
||||
GET /v1/layers/{ID}/os
|
||||
|
||||
### Parameters
|
||||
|
||||
|Name|Type|Description|
|
||||
|------|-----|-------------|
|
||||
|ID|String|Unique ID of the Layer|
|
||||
|
||||
### Example
|
||||
|
||||
curl -s 127.0.0.1:6060/v1/layers/39bb80489af75406073b5364c9c326134015140e1f7976a370a8bd446889e6f8/os | python -m json.tool
|
||||
|
||||
### Success Response
|
||||
|
||||
```
|
||||
HTTP/1.1 200 OK
|
||||
{
|
||||
"OS": "debian:8",
|
||||
}
|
||||
```
|
||||
|
||||
### Error Response
|
||||
```
|
||||
HTTP/1.1 404 Not Found
|
||||
{
|
||||
"Message": "the resource cannot be found"
|
||||
}
|
||||
```
|
||||
|
||||
## Get a Layer's parent
|
||||
|
||||
It returns the parent's ID of a given Layer.
|
||||
It returns an empty ID string when the layer has no parent.
|
||||
|
||||
GET /v1/layers/{ID}/parent
|
||||
|
||||
### Parameters
|
||||
|
||||
|Name|Type|Description|
|
||||
|------|-----|-------------|
|
||||
|ID|String|Unique ID of the Layer|
|
||||
|
||||
### Example
|
||||
|
||||
curl -s 127.0.0.1:6060/v1/layers/39bb80489af75406073b5364c9c326134015140e1f7976a370a8bd446889e6f8/parent | python -m json.tool
|
||||
|
||||
### Success Response
|
||||
|
||||
```
|
||||
HTTP/1.1 200 OK
|
||||
{
|
||||
"ID": "df2a0347c9d081fa05ecb83669dcae5830c67b0676a6d6358218e55d8a45969c",
|
||||
}
|
||||
```
|
||||
|
||||
### Error Response
|
||||
```
|
||||
HTTP/1.1 404 Not Found
|
||||
{
|
||||
"Message": "the resource cannot be found"
|
||||
}
|
||||
```
|
||||
|
||||
## Get a Layer's package list
|
||||
|
||||
It returns the package list of a given Layer.
|
||||
|
||||
GET /v1/layers/{ID}/packages
|
||||
|
||||
### Parameters
|
||||
|
||||
|Name|Type|Description|
|
||||
|------|-----|-------------|
|
||||
|ID|String|Unique ID of the Layer|
|
||||
|
||||
### Example
|
||||
|
||||
curl -s 127.0.0.1:6060/v1/layers/39bb80489af75406073b5364c9c326134015140e1f7976a370a8bd446889e6f8/packages | python -m json.tool
|
||||
|
||||
### Success Response
|
||||
|
||||
```
|
||||
HTTP/1.1 200 OK
|
||||
{
|
||||
"Packages": [
|
||||
{
|
||||
"Name": "gcc-4.9",
|
||||
"OS": "debian:8",
|
||||
"Version": "4.9.2-10"
|
||||
},
|
||||
[...]
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Error Response
|
||||
```
|
||||
HTTP/1.1 404 Not Found
|
||||
{
|
||||
"Message": "the resource cannot be found"
|
||||
}
|
||||
```
|
||||
|
||||
## Get a Layer's package diff
|
||||
|
||||
It returns the lists of packages a given Layer installs and removes.
|
||||
|
||||
GET /v1/layers/{ID}/packages/diff
|
||||
|
||||
### Parameters
|
||||
|
||||
|Name|Type|Description|
|
||||
|------|-----|-------------|
|
||||
|ID|String|Unique ID of the Layer|
|
||||
|
||||
### Example
|
||||
|
||||
curl -s 127.0.0.1:6060/v1/layers/39bb80489af75406073b5364c9c326134015140e1f7976a370a8bd446889e6f8/packages/diff | python -m json.tool
|
||||
|
||||
### Success Response
|
||||
|
||||
```
|
||||
HTTP/1.1 200 OK
|
||||
{
|
||||
"InstalledPackages": [
|
||||
{
|
||||
"Name": "gcc-4.9",
|
||||
"OS": "debian:8",
|
||||
"Version": "4.9.2-10"
|
||||
},
|
||||
[...]
|
||||
],
|
||||
"RemovedPackages": null
|
||||
}
|
||||
```
|
||||
|
||||
### Error Response
|
||||
|
||||
```
|
||||
HTTP/1.1 404 Not Found
|
||||
{
|
||||
"Message": "the resource cannot be found"
|
||||
}
|
||||
```
|
||||
|
||||
## Get a Layer's vulnerabilities
|
||||
|
||||
It returns the lists of vulnerabilities which affect a given Layer.
|
||||
|
||||
GET /v1/layers/{ID}/vulnerabilities(?minimumPriority=Low)
|
||||
|
||||
### Parameters
|
||||
|
||||
|Name|Type|Description|
|
||||
|------|-----|-------------|
|
||||
|ID|String|Unique ID of the Layer|
|
||||
|minimumPriority|Priority|(Optionnal) The minimum priority of the returned vulnerabilities. Defaults to High|
|
||||
|
||||
### Example
|
||||
|
||||
curl -s "127.0.0.1:6060/v1/layers/39bb80489af75406073b5364c9c326134015140e1f7976a370a8bd446889e6f8/vulnerabilities?minimumPriority=Negligible" | python -m json.tool
|
||||
|
||||
### Success Response
|
||||
|
||||
```
|
||||
HTTP/1.1 200 OK
|
||||
{
|
||||
"Vulnerabilities": [
|
||||
{
|
||||
"ID": "CVE-2014-2583",
|
||||
"Link": "http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2014-2583",
|
||||
"Priority": "Low",
|
||||
"Description": "Multiple directory traversal vulnerabilities in pam_timestamp.c in the pam_timestamp module for Linux-PAM (aka pam) 1.1.8 allow local users to create aribitrary files or possibly bypass authentication via a .. (dot dot) in the (1) PAM_RUSER value to the get_ruser function or (2) PAM_TTY value to the check_tty funtion, which is used by the format_timestamp_name function."
|
||||
},
|
||||
[...]
|
||||
}
|
||||
```
|
||||
|
||||
### Error Response
|
||||
|
||||
```
|
||||
HTTP/1.1 404 Not Found
|
||||
{
|
||||
"Message": "the resource cannot be found"
|
||||
}
|
||||
```
|
||||
|
||||
## Get vulnerabilities that a layer introduces and removes
|
||||
|
||||
It returns the lists of vulnerabilities which are introduced and removed by the given Layer.
|
||||
|
||||
GET /v1/layers/{ID}/vulnerabilities/diff(?minimumPriority=Low)
|
||||
|
||||
### Parameters
|
||||
|
||||
|Name|Type|Description|
|
||||
|------|-----|-------------|
|
||||
|ID|String|Unique ID of the Layer|
|
||||
|minimumPriority|Priority|(Optionnal) The minimum priority of the returned vulnerabilities|
|
||||
|
||||
### Example
|
||||
|
||||
curl -s "127.0.0.1:6060/v1/layers/39bb80489af75406073b5364c9c326134015140e1f7976a370a8bd446889e6f8/vulnerabilities?minimumPriority=Negligible" | python -m json.tool
|
||||
|
||||
### Success Response
|
||||
|
||||
```
|
||||
HTTP/1.1 200 OK
|
||||
{
|
||||
"Adds": [
|
||||
{
|
||||
"ID": "CVE-2014-2583",
|
||||
"Link": "http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2014-2583",
|
||||
"Priority": "Low",
|
||||
"Description": "Multiple directory traversal vulnerabilities in pam_timestamp.c in the pam_timestamp module for Linux-PAM (aka pam) 1.1.8 allow local users to create aribitrary files or possibly bypass authentication via a .. (dot dot) in the (1) PAM_RUSER value to the get_ruser function or (2) PAM_TTY value to the check_tty funtion, which is used by the format_timestamp_name function."
|
||||
},
|
||||
[...]
|
||||
],
|
||||
"Removes": null
|
||||
}
|
||||
```
|
||||
|
||||
### Error Response
|
||||
|
||||
```
|
||||
HTTP/1.1 404 Not Found
|
||||
{
|
||||
"Message": "the resource cannot be found"
|
||||
}
|
||||
```
|
||||
|
||||
## Get a Layers' vulnerabilities (Batch)
|
||||
|
||||
It returns the lists of vulnerabilities which affect the given Layers.
|
||||
|
||||
POST /v1/batch/layers/vulnerabilities(?minimumPriority=Low)
|
||||
|
||||
Counterintuitively, this request is actually a POST to be able to pass a lot of parameters.
|
||||
|
||||
### Parameters
|
||||
|
||||
|Name|Type|Description|
|
||||
|------|-----|-------------|
|
||||
|LayersIDs|Array of strings|Unique IDs of Layers|
|
||||
|minimumPriority|Priority|(Optionnal) The minimum priority of the returned vulnerabilities. Defaults to High|
|
||||
|
||||
### Example
|
||||
|
||||
```
|
||||
curl -s -H "Content-Type: application/json" -X POST -d \
|
||||
'{
|
||||
"LayersIDs": [
|
||||
"a005304e4e74c1541988d3d1abb170e338c1d45daee7151f8e82f8460634d329",
|
||||
"f1b10cd842498c23d206ee0cbeaa9de8d2ae09ff3c7af2723a9e337a6965d639"
|
||||
]
|
||||
}' \
|
||||
127.0.0.1:6060/v1/batch/layers/vulnerabilities
|
||||
```
|
||||
|
||||
### Success Response
|
||||
|
||||
```
|
||||
HTTP/1.1 200 OK
|
||||
{
|
||||
"a005304e4e74c1541988d3d1abb170e338c1d45daee7151f8e82f8460634d329": {
|
||||
"Vulnerabilities": [
|
||||
{
|
||||
"ID": "CVE-2014-2583",
|
||||
"Link": "http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2014-2583",
|
||||
"Priority": "Low",
|
||||
"Description": "Multiple directory traversal vulnerabilities in pam_timestamp.c in the pam_timestamp module for Linux-PAM (aka pam) 1.1.8 allow local users to create aribitrary files or possibly bypass authentication via a .. (dot dot) in the (1) PAM_RUSER value to the get_ruser function or (2) PAM_TTY value to the check_tty funtion, which is used by the format_timestamp_name function."
|
||||
},
|
||||
[...]
|
||||
]
|
||||
},
|
||||
[...]
|
||||
}
|
||||
```
|
||||
|
||||
### Error Response
|
||||
|
||||
```
|
||||
HTTP/1.1 404 Not Found
|
||||
{
|
||||
"Message": "the resource cannot be found"
|
||||
}
|
||||
```
|
||||
|
||||
# Vulnerabilities
|
||||
|
||||
## Get a vulnerability's informations
|
||||
|
||||
It returns all known informations about a Vulnerability and its fixes.
|
||||
|
||||
GET /v1/vulnerabilities/{ID}
|
||||
|
||||
### Parameters
|
||||
|
||||
|Name|Type|Description|
|
||||
|------|-----|-------------|
|
||||
|ID|String|Unique ID of the Vulnerability|
|
||||
|
||||
### Example
|
||||
|
||||
curl -s 127.0.0.1:6060/v1/vulnerabilities/CVE-2015-0235 | python -m json.tool
|
||||
|
||||
### Success Response
|
||||
|
||||
```
|
||||
HTTP/1.1 200 OK
|
||||
{
|
||||
"ID": "CVE-2015-0235",
|
||||
"Link": "https://security-tracker.debian.org/tracker/CVE-2015-0235",
|
||||
"Priority": "High",
|
||||
"Description": "Heap-based buffer overflow in the __nss_hostname_digits_dots function in glibc 2.2, and other 2.x versions before 2.18, allows context-dependent attackers to execute arbitrary code via vectors related to the (1) gethostbyname or (2) gethostbyname2 function, aka \"GHOST.\"",
|
||||
"AffectedPackages": [
|
||||
{
|
||||
"Name": "eglibc",
|
||||
"OS": "debian:7",
|
||||
"AllVersions": false,
|
||||
"BeforeVersion": "2.13-38+deb7u7"
|
||||
},
|
||||
{
|
||||
"Name": "glibc",
|
||||
"OS": "debian:8",
|
||||
"AllVersions": false,
|
||||
"BeforeVersion": "2.18-1"
|
||||
},
|
||||
{
|
||||
"Name": "glibc",
|
||||
"OS": "debian:9",
|
||||
"AllVersions": false,
|
||||
"BeforeVersion": "2.18-1"
|
||||
},
|
||||
{
|
||||
"Name": "glibc",
|
||||
"OS": "debian:unstable",
|
||||
"AllVersions": false,
|
||||
"BeforeVersion": "2.18-1"
|
||||
},
|
||||
{
|
||||
"Name": "eglibc",
|
||||
"OS": "debian:6",
|
||||
"AllVersions": true,
|
||||
"BeforeVersion": "",
|
||||
}
|
||||
],
|
||||
}
|
||||
```
|
||||
|
||||
The `AffectedPackages` array represents the list of affected packages and provides the first known versions in which the Vulnerability has been fixed - each previous versions may be vulnerable. If `AllVersions` is equal to `true`, no fix exists, thus, all versions may be vulnerable.
|
||||
|
||||
### Error Response
|
||||
|
||||
```
|
||||
HTTP/1.1 404 Not Found
|
||||
{
|
||||
"Message":"the resource cannot be found"
|
||||
}
|
||||
```
|
||||
|
||||
## Insert a new Vulnerability
|
||||
|
||||
It manually inserts a new Vulnerability.
|
||||
|
||||
POST /v1/vulnerabilities
|
||||
|
||||
### Parameters
|
||||
|
||||
|Name|Type|Description|
|
||||
|------|-----|-------------|
|
||||
|ID|String|Unique ID of the Vulnerability|
|
||||
|Link|String|Link to the Vulnerability tracker|
|
||||
|Priority|Priority|Priority of the Vulnerability|
|
||||
|AffectedPackages|Array of Package|Affected packages (Name, OS) and fixed version (or all versions)|
|
||||
|
||||
If no fix exists for a package, `AllVersions` should be set to `true`.
|
||||
|
||||
Valid Priorities are based on [Ubuntu CVE Tracker/README](http://bazaar.launchpad.net/~ubuntu-security/ubuntu-cve-tracker/master/view/head:/README)
|
||||
|
||||
* **Unknown** is either a security problem that has not been ssigned to a priority yet or a priority that our system did not recognize
|
||||
* **Negligible** is technically a security problem, but is only theoretical in nature, requires a very special situation, has almost no install base, or does no real damage. These tend not to get backport from upstreams, and will likely not be included in security updates unless there is an easy fix and some other issue causes an update.
|
||||
* **Low** is a security problem, but is hard to exploit due to environment, requires a user-assisted attack, a small install base, or does very little damage. These tend to be included in security updates only when higher priority issues require an update, or if many low priority issues have built up.
|
||||
* **Medium** is a real security problem, and is exploitable for many people. Includes network daemon denial of service attacks, cross-site scripting, and gaining user privileges. Updates should be made soon for this priority of issue.
|
||||
* **High** is a real problem, exploitable for many people in a default installation. Includes serious remote denial of services, local root privilege escalations, or data loss.
|
||||
* **Critical** is a world-burning problem, exploitable for nearly all people in a default installation of Ubuntu. Includes remote root privilege escalations, or massive data loss.
|
||||
* **Defcon1** is a **Critical** problem which has been manually highlighted by the team. It requires an immediate attention.
|
||||
|
||||
### Example
|
||||
|
||||
```
|
||||
curl -s -H "Content-Type: application/json" -X POST -d \
|
||||
'{
|
||||
"ID": "CVE-2015-0235",
|
||||
"Link": "https:security-tracker.debian.org/tracker/CVE-2015-0235",
|
||||
"Priority": "High",
|
||||
"Description": "Heap-based buffer overflow in the __nss_hostname_digits_dots function in glibc 2.2, and other 2.x versions before 2.18, allows context-dependent attackers to execute arbitrary code via vectors related to the (1) gethostbyname or (2) gethostbyname2 function, aka \"GHOST.\"",
|
||||
"AffectedPackages": [
|
||||
{
|
||||
"Name": "eglibc",
|
||||
"OS": "debian:7",
|
||||
"BeforeVersion": "2.13-38+deb7u7"
|
||||
},
|
||||
{
|
||||
"Name": "glibc",
|
||||
"OS": "debian:8",
|
||||
"BeforeVersion": "2.18-1"
|
||||
},
|
||||
{
|
||||
"Name": "glibc",
|
||||
"OS": "debian:9",
|
||||
"BeforeVersion": "2.18-1"
|
||||
},
|
||||
{
|
||||
"Name": "glibc",
|
||||
"OS": "debian:unstable",
|
||||
"BeforeVersion": "2.18-1"
|
||||
},
|
||||
{
|
||||
"Name": "eglibc",
|
||||
"OS": "debian:6",
|
||||
"AllVersions": true,
|
||||
"BeforeVersion": ""
|
||||
}
|
||||
]
|
||||
}' \
|
||||
127.0.0.1:6060/v1/vulnerabilities
|
||||
```
|
||||
|
||||
### Success Response
|
||||
|
||||
HTTP/1.1 201 Created
|
||||
|
||||
### Error Response
|
||||
|
||||
```
|
||||
HTTP/1.1 400 Bad Request
|
||||
{
|
||||
"Message":"Could not insert a vulnerability which has an invalid priority"
|
||||
}
|
||||
```
|
||||
|
||||
It could also return a `415 Unsupported Media Type` response with a `Message` if the request content is not valid JSON.
|
||||
|
||||
## Update a Vulnerability
|
||||
|
||||
It updates an existing Vulnerability.
|
||||
|
||||
PUT /v1/vulnerabilities/{ID}
|
||||
|
||||
The Link, Priority and Description fields can be updated. FixedIn packages are added to the vulnerability. However, as a vulnerability can be fixed by only one package on a given branch (OS, Name): old FixedIn packages, which belong to the same branch as a new added one, will be removed.
|
||||
|
||||
### Parameters
|
||||
|
||||
|Name|Type|Description|
|
||||
|------|-----|-------------|
|
||||
|Link|String|Link to the Vulnerability tracker|
|
||||
|Priority|Priority|Priority of the Vulnerability|
|
||||
|FixedIn|Array of Package|Affected packages (Name, OS) and fixed version (or all versions)|
|
||||
|
||||
If no fix exists for a package, `AllVersions` should be set to `true`.
|
||||
|
||||
### Example
|
||||
|
||||
curl -s -H "Content-Type: application/json" -X PUT -d '{"Priority": "Critical" }' 127.0.0.1:6060/v1/vulnerabilities/CVE-2015-0235
|
||||
|
||||
### Success Response
|
||||
|
||||
```
|
||||
HTTP/1.1 204 No content
|
||||
```
|
||||
|
||||
### Error Response
|
||||
|
||||
```
|
||||
HTTP/1.1 404 Not Found
|
||||
{
|
||||
"Message":"the resource cannot be found"
|
||||
}
|
||||
```
|
||||
|
||||
It could also return a `415 Unsupported Media Type` response with a `Message` if the request content is not valid JSON.
|
||||
|
||||
## Delete a Vulnerability
|
||||
|
||||
It deletes an existing Vulnerability.
|
||||
|
||||
DEL /v1/vulnerabilities/{ID}
|
||||
|
||||
Be aware that it does not prevent fetcher's to re-create it. Therefore it is only useful to remove manually inserted vulnerabilities.
|
||||
|
||||
### Parameters
|
||||
|
||||
|Name|Type|Description|
|
||||
|------|-----|-------------|
|
||||
|ID|String|Unique ID of the Vulnerability|
|
||||
|
||||
### Example
|
||||
|
||||
curl -s -X DEL 127.0.0.1:6060/v1/vulnerabilities/CVE-2015-0235
|
||||
|
||||
### Success Response
|
||||
|
||||
```
|
||||
HTTP/1.1 204 No content
|
||||
```
|
||||
|
||||
### Error Response
|
||||
|
||||
```
|
||||
HTTP/1.1 404 Not Found
|
||||
{
|
||||
"Message":"the resource cannot be found"
|
||||
}
|
||||
```
|
||||
|
||||
## Get layers introducing a vulnerability
|
||||
|
||||
It gets all the layers (their IDs) that introduce the given vulnerability.
|
||||
|
||||
GET /v1/vulnerabilities/:id/introducing-layers
|
||||
|
||||
### Parameters
|
||||
|
||||
|Name|Type|Description|
|
||||
|------|-----|-------------|
|
||||
|ID|String|Unique ID of the Vulnerability|
|
||||
|
||||
### Example
|
||||
|
||||
curl -s -X GET 127.0.0.1:6060/v1/vulnerabilities/CVE-2015-0235/introducing-layers
|
||||
|
||||
### Success Response
|
||||
|
||||
```
|
||||
HTTP/1.1 200
|
||||
{
|
||||
"IntroducingLayers":[
|
||||
"fb9cc58bde0c0a8fe53e6fdd23898e45041783f2d7869d939d7364f5777fde6f"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Error Response
|
||||
|
||||
```
|
||||
HTTP/1.1 404 Not Found
|
||||
{
|
||||
"Message":"the resource cannot be found"
|
||||
}
|
||||
```
|
||||
|
||||
## Get layers affected by a vulnerability
|
||||
|
||||
It returns whether the specified Layers are vulnerable to the given Vulnerability or not.
|
||||
|
||||
POST /v1/vulnerabilities/{ID}/affected-layers
|
||||
|
||||
Counterintuitively, this request is actually a POST to be able to pass a lot of parameters.
|
||||
|
||||
### Parameters
|
||||
|
||||
|Name|Type|Description|
|
||||
|------|-----|-------------|
|
||||
|ID|String|Unique ID of the Vulnerability|
|
||||
|LayersIDs|Array of strings|Unique IDs of Layers|
|
||||
|
||||
### Example
|
||||
|
||||
```
|
||||
curl -s -H "Content-Type: application/json" -X POST -d \
|
||||
'{
|
||||
"LayersIDs": [
|
||||
"a005304e4e74c1541988d3d1abb170e338c1d45daee7151f8e82f8460634d329",
|
||||
"f1b10cd842498c23d206ee0cbeaa9de8d2ae09ff3c7af2723a9e337a6965d639"
|
||||
]
|
||||
}' \
|
||||
127.0.0.1:6060/v1/vulnerabilities/CVE-2015-0235/affected-layers
|
||||
```
|
||||
|
||||
### Success Response
|
||||
|
||||
```
|
||||
HTTP/1.1 200 OK
|
||||
{
|
||||
"f1b10cd842498c23d206ee0cbeaa9de8d2ae09ff3c7af2723a9e337a6965d639": {
|
||||
"Vulnerable": false
|
||||
},
|
||||
"fb9cc58bde0c0a8fe53e6fdd23898e45041783f2d7869d939d7364f5777fde6f": {
|
||||
"Vulnerable": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Error Response
|
||||
|
||||
Returned when the Layer or the Vulnerability does not exist.
|
||||
|
||||
```
|
||||
HTTP/1.1 404 Not Found
|
||||
{
|
||||
"Message": "the resource cannot be found"
|
||||
}
|
||||
```
|
BIN
docs/Model.graffle
Normal file
BIN
docs/Model.graffle
Normal file
Binary file not shown.
70
docs/Model.md
Normal file
70
docs/Model.md
Normal file
@ -0,0 +1,70 @@
|
||||
# Legend
|
||||
-> outbound edges
|
||||
<- inbound edges
|
||||
|
||||
# Layer
|
||||
|
||||
Key: "layer:" + Hash(id)
|
||||
|
||||
-> is = "layer"
|
||||
-> id
|
||||
-> parent (my ancestor is)
|
||||
|
||||
-> os
|
||||
-> adds*
|
||||
-> removes*
|
||||
-> engineVersion
|
||||
|
||||
<- parent* (is ancestor of)
|
||||
|
||||
# Package
|
||||
|
||||
Key: "package:" + Hash(os + ":" + name + ":" + version)
|
||||
|
||||
-> is = "package"
|
||||
-> os
|
||||
-> name
|
||||
-> version
|
||||
-> nextVersion
|
||||
|
||||
<- nextVersion
|
||||
<- adds*
|
||||
<- removes*
|
||||
<- fixed_in*
|
||||
|
||||
Packages are organized in linked lists : there is one linked list for one os/name couple. Each linked list has a tail and a head with special versions.
|
||||
|
||||
# Vulnerability
|
||||
|
||||
Key: "vulnerability:" + Hash(name)
|
||||
|
||||
-> is = "vulnerability"
|
||||
-> name
|
||||
-> priority
|
||||
-> link
|
||||
-> fixed_in*
|
||||
|
||||
# Notification
|
||||
|
||||
Key: "notification:" + random uuid
|
||||
|
||||
-> is = "notification"
|
||||
-> type
|
||||
-> data
|
||||
-> isSent
|
||||
|
||||
# Flag
|
||||
|
||||
Key: "flag:" + name
|
||||
|
||||
-> value
|
||||
|
||||
# Lock
|
||||
|
||||
Key: name
|
||||
|
||||
-> locked = "locked"
|
||||
-> locked_until (timestamp)
|
||||
-> locked_by
|
||||
|
||||
A lock can be used to lock a specific graph node by using the node Key as the lock name.
|
BIN
docs/Model.png
Normal file
BIN
docs/Model.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 88 KiB |
131
docs/Notifications.md
Normal file
131
docs/Notifications.md
Normal file
@ -0,0 +1,131 @@
|
||||
# Notifications
|
||||
|
||||
This tool can send notifications to external services when specific events happen, such as vulnerability updates.
|
||||
|
||||
For now, it only supports transmitting them to an HTTP endpoint using POST requests, but it may be extended quite easily.
|
||||
To enable the notification system, specify the following command-line arguments:
|
||||
|
||||
--notifier-type=http --notifier-http-url="http://your-notification-endpoint"
|
||||
|
||||
# Types of notifications
|
||||
|
||||
## A new vulnerability has been released
|
||||
|
||||
A notification of this kind is sent as soon as a new vulnerability is added in the system, via the updater or the API.
|
||||
|
||||
### Example
|
||||
|
||||
```
|
||||
{
|
||||
"Name":"CVE-2016-0001",
|
||||
"Type":"NewVulnerabilityNotification",
|
||||
"Content":{
|
||||
"Vulnerability":{
|
||||
"ID":"CVE-2016-0001",
|
||||
"Link":"https:security-tracker.debian.org/tracker/CVE-2016-0001",
|
||||
"Priority":"Medium",
|
||||
"Description":"A futurist vulnerability",
|
||||
"AffectedPackages":[
|
||||
{
|
||||
"OS":"centos:6",
|
||||
"Name":"bash",
|
||||
"AllVersions":true,
|
||||
"BeforeVersion":""
|
||||
}
|
||||
]
|
||||
},
|
||||
"IntroducingLayersIDs":[
|
||||
"fb9cc58bde0c0a8fe53e6fdd23898e45041783f2d7869d939d7364f5777fde6f"
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The `IntroducingLayersIDs` array contains every layers that install at least one affected package.
|
||||
|
||||
## A vulnerability's priority has increased
|
||||
|
||||
This notification is sent when a vulnerability's priority has increased.
|
||||
|
||||
### Example
|
||||
|
||||
```
|
||||
{
|
||||
"Name":"CVE-2016-0001",
|
||||
"Type":"VulnerabilityPriorityIncreasedNotification",
|
||||
"Content":{
|
||||
"Vulnerability":{
|
||||
"ID":"CVE-2016-0001",
|
||||
"Link":"https:security-tracker.debian.org/tracker/CVE-2016-0001",
|
||||
"Priority":"Critical",
|
||||
"Description":"A futurist vulnerability",
|
||||
"AffectedPackages":[
|
||||
{
|
||||
"OS":"centos:6",
|
||||
"Name":"bash",
|
||||
"AllVersions":true,
|
||||
"BeforeVersion":""
|
||||
}
|
||||
]
|
||||
},
|
||||
"OldPriority":"Medium",
|
||||
"NewPriority":"Critical",
|
||||
"IntroducingLayersIDs":[
|
||||
"fb9cc58bde0c0a8fe53e6fdd23898e45041783f2d7869d939d7364f5777fde6f"
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The `IntroducingLayersIDs` array contains every layers that install at least one affected package.
|
||||
|
||||
## A vulnerability's affected package list changed
|
||||
|
||||
This notification is sent when the affected packages of a vulnerability changes.
|
||||
|
||||
### Example
|
||||
|
||||
```
|
||||
{
|
||||
"Name":"CVE-2016-0001",
|
||||
"Type":"VulnerabilityPackageChangedNotification",
|
||||
"Content":{
|
||||
"Vulnerability":{
|
||||
"ID":"CVE-2016-0001",
|
||||
"Link":"https:security-tracker.debian.org/tracker/CVE-2016-0001",
|
||||
"Priority":"Critical",
|
||||
"Description":"A futurist vulnerability",
|
||||
"AffectedPackages":[
|
||||
{
|
||||
"OS":"centos:6",
|
||||
"Name":"bash",
|
||||
"AllVersions":false,
|
||||
"BeforeVersion":"4.0"
|
||||
}
|
||||
]
|
||||
},
|
||||
"AddedAffectedPackages":[
|
||||
{
|
||||
"OS":"centos:6",
|
||||
"Name":"bash",
|
||||
"AllVersions":false,
|
||||
"BeforeVersion":"4.0"
|
||||
}
|
||||
],
|
||||
"RemovedAffectedPackages":[
|
||||
{
|
||||
"OS":"centos:6",
|
||||
"Name":"bash",
|
||||
"AllVersions":true,
|
||||
"BeforeVersion":""
|
||||
}
|
||||
],
|
||||
"NewIntroducingLayersIDs": [],
|
||||
"FormerIntroducingLayerIDs":[
|
||||
"fb9cc58bde0c0a8fe53e6fdd23898e45041783f2d7869d939d7364f5777fde6f",
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The `NewIntroducingLayersIDs` array contains the layers that install at least one of the newly affected package, and thus which are now vulnerable because of this change. In the other hand, the `FormerIntroducingLayerIDs` array contains the layers that are not introducing the vulnerability anymore.
|
21
docs/Run.md
Normal file
21
docs/Run.md
Normal file
@ -0,0 +1,21 @@
|
||||
# Build and Run with Docker
|
||||
|
||||
The easiest way to run this tool is to deploy it using Docker.
|
||||
If you prefer to run it locally, reading the Dockerfile will tell you how.
|
||||
|
||||
To deploy it from the latest sources, follow this procedure:
|
||||
* Clone the repository and change your current directory
|
||||
* Build the container: `docker build -t <TAG> .`
|
||||
* Run it like this to see the available commands: `docker run -it <TAG>`. To get help about a specific command, use `docker run -it <TAG> help <COMMAND>`
|
||||
|
||||
## Command-Line examples
|
||||
|
||||
When running multiple instances is not desired, using BoltDB backend is the best choice as it is lightning fast:
|
||||
|
||||
docker run -it <TAG> --db-type=bolt --db-path=/db/database
|
||||
|
||||
Using PostgreSQL enables running multiple instances concurrently. Here is a command line example:
|
||||
|
||||
docker run -it <TAG> --db-type=sql --db-path='host=awesome-database.us-east-1.rds.amazonaws.com port=5432 user=SuperSheep password=SuperSecret' --update-interval=2h --notifier-type=http --notifier-http-url="http://your-notification-endpoint"
|
||||
|
||||
The default API port is 6060, read the [API Documentation](API.md) to learn more.
|
54
docs/Security.md
Normal file
54
docs/Security.md
Normal file
@ -0,0 +1,54 @@
|
||||
# Security
|
||||
|
||||
# Enabling HTTPS
|
||||
HTTPS provides clients the ability to verify the server identity and provide transport security.
|
||||
|
||||
For this you need your CA certificate (ca.crt) and signed key pair (server.crt, server.key) ready.
|
||||
To enable it, provide signed key pair using `--api-cert-file` and `--api-key-file` arguments.
|
||||
|
||||
To test it, you want to use curl like this:
|
||||
|
||||
curl --cacert ca.crt -L https://127.0.0.1:6060/v1/versions
|
||||
|
||||
You should be able to see the handshake succeed. Because we use self-signed certificates with our own certificate authorities you need to provide the CA to curl using the --cacert option. Another possibility would be to add your CA certificate to the trusted certificates on your system (usually in /etc/ssl/certs).
|
||||
|
||||
**OSX 10.9+ Users**: curl 7.30.0 on OSX 10.9+ doesn't understand certificates passed in on the command line. Instead you must import the dummy ca.crt directly into the keychain or add the -k flag to curl to ignore errors. If you want to test without the -k flag run open ca.crt and follow the prompts. Please remove this certificate after you are done testing!
|
||||
|
||||
# Enabling Client Certificate Auth
|
||||
|
||||
We can also use client certificates to prevent unauthorized access to the API.
|
||||
|
||||
The clients will provide their certificates to the server and the server will check whether the cert is signed by the supplied CA and decide whether to serve the request.
|
||||
|
||||
You need the same files mentioned in the HTTPS section, as well as a key pair for the client (client.crt, client.key) signed by the same certificate authority. To enable it, use the same arguments as above for HTTPS and the additional `--api-ca-file` parameter with the CA certificate.
|
||||
|
||||
The test command from the HTTPS section should be rejected, instead we need to provide the client key pair:
|
||||
|
||||
curl --cacert ca.crt --cert client.crt --key client.key -L https://127.0.0.1:6060/v1/versions
|
||||
|
||||
**OSX 10.10+ Users**: A bundle in P12 (PKCS#12) format must be used. To convert your key pair, the following command should be used, in which the password is mandatory. Then, `--cert client.p12` along with `--password pass` replace `--cert client.crt --key client.key`. You may also import the P12 certificate into your Keychain and specify its name as it appears in the Keychain instead of the path to the file.
|
||||
|
||||
openssl pkcs12 -export -in client.crt -inkey client1.key -out certs/client.p12 -password pass:pass
|
||||
|
||||
# Generating self-signed certificates
|
||||
[etcd-ca](https://github.com/coreos/etcd-ca) is a great tool when it comes to easily generate certificates. Below is an example to generate a new CA, server and client key pairs, inspired by their example.
|
||||
|
||||
```
|
||||
git clone https://github.com/coreos/etcd-ca
|
||||
cd etcd-ca
|
||||
./build
|
||||
|
||||
# Create CA
|
||||
./bin/etcd-ca init
|
||||
./bin/etcd-ca export | tar xvf -
|
||||
|
||||
# Create certificate for server
|
||||
./bin/etcd-ca new-cert --passphrase $passphrase --ip $server1ip --domain $server1hostname server1
|
||||
./bin/etcd-ca sign --passphrase $passphrase server1
|
||||
./bin/etcd-ca export --insecure --passphrase $passphrase server1 | tar xvf -
|
||||
|
||||
# Create certificate for client
|
||||
./bin/etcd-ca new-cert --passphrase $passphrase client1
|
||||
./bin/etcd-ca sign --passphrase $passphrase client1
|
||||
./bin/etcd-ca export --insecure --passphrase $passphrase client1 | tar xvf -
|
||||
```
|
80
health/health.go
Normal file
80
health/health.go
Normal file
@ -0,0 +1,80 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package health defines a standard healthcheck response format and expose
|
||||
// a function that summarizes registered healthchecks.
|
||||
package health
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Status defines a way to know the health status of a service
|
||||
type Status struct {
|
||||
// IsEssential determines if the service is essential to the app, which can't
|
||||
// run in case of a failure
|
||||
IsEssential bool
|
||||
// IsHealthy defines whether the service is working or not
|
||||
IsHealthy bool
|
||||
// Details gives informations specific to the service
|
||||
Details interface{}
|
||||
}
|
||||
|
||||
// A Healthchecker function is a method returning the Status of the tested service
|
||||
type Healthchecker func() Status
|
||||
|
||||
var (
|
||||
healthcheckersLock sync.Mutex
|
||||
healthcheckers = make(map[string]Healthchecker)
|
||||
)
|
||||
|
||||
// RegisterHealthchecker registers a Healthchecker function which will be part of Healthchecks
|
||||
func RegisterHealthchecker(name string, f Healthchecker) {
|
||||
if name == "" {
|
||||
panic("Could not register a Healthchecker with an empty name")
|
||||
}
|
||||
if f == nil {
|
||||
panic("Could not register a nil Healthchecker")
|
||||
}
|
||||
|
||||
healthcheckersLock.Lock()
|
||||
defer healthcheckersLock.Unlock()
|
||||
|
||||
if _, alreadyExists := healthcheckers[name]; alreadyExists {
|
||||
panic(fmt.Sprintf("Healthchecker '%s' is already registered", name))
|
||||
}
|
||||
healthcheckers[name] = f
|
||||
}
|
||||
|
||||
// Healthcheck calls every registered Healthchecker and summarize their output
|
||||
func Healthcheck() (bool, map[string]interface{}) {
|
||||
globalHealth := true
|
||||
|
||||
statuses := make(map[string]interface{})
|
||||
for serviceName, serviceChecker := range healthcheckers {
|
||||
status := serviceChecker()
|
||||
|
||||
globalHealth = globalHealth && (!status.IsEssential || status.IsHealthy)
|
||||
statuses[serviceName] = struct {
|
||||
IsHealthy bool
|
||||
Details interface{} `json:",omitempty"`
|
||||
}{
|
||||
IsHealthy: status.IsHealthy,
|
||||
Details: status.Details,
|
||||
}
|
||||
}
|
||||
|
||||
return globalHealth, statuses
|
||||
}
|
148
main.go
Normal file
148
main.go
Normal file
@ -0,0 +1,148 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime/pprof"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/clair/api"
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/notifier"
|
||||
"github.com/coreos/clair/updater"
|
||||
"github.com/coreos/clair/utils"
|
||||
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"gopkg.in/alecthomas/kingpin.v2"
|
||||
|
||||
// Register components
|
||||
_ "github.com/coreos/clair/updater/fetchers"
|
||||
_ "github.com/coreos/clair/worker/detectors/os"
|
||||
_ "github.com/coreos/clair/worker/detectors/packages"
|
||||
)
|
||||
|
||||
var (
|
||||
log = capnslog.NewPackageLogger("github.com/coreos/clair", "main")
|
||||
|
||||
// Database configuration
|
||||
cfgDbType = kingpin.Flag("db-type", "Type of the database to use").Default("bolt").Enum("bolt", "leveldb", "memstore", "mongo", "sql")
|
||||
cfgDbPath = kingpin.Flag("db-path", "Path to the database to use").String()
|
||||
|
||||
// Notifier configuration
|
||||
cfgNotifierType = kingpin.Flag("notifier-type", "Type of the notifier to use").Default("none").Enum("none", "http")
|
||||
cfgNotifierHTTPURL = kingpin.Flag("notifier-http-url", "URL that will receive POST notifications").String()
|
||||
|
||||
// Updater configuration
|
||||
cfgUpdateInterval = kingpin.Flag("update-interval", "Frequency at which the vulnerability updater will run. Use 0 to disable the updater entirely.").Default("1h").Duration()
|
||||
|
||||
// API configuration
|
||||
cfgAPIPort = kingpin.Flag("api-port", "Port on which the API will listen").Default("6060").Int()
|
||||
cfgAPITimeout = kingpin.Flag("api-timeout", "Timeout of API calls").Default("900s").Duration()
|
||||
cfgAPICertFile = kingpin.Flag("api-cert-file", "Path to TLS Cert file").ExistingFile()
|
||||
cfgAPIKeyFile = kingpin.Flag("api-key-file", "Path to TLS Key file").ExistingFile()
|
||||
cfgAPICAFile = kingpin.Flag("api-ca-file", "Path to CA for verifying TLS client certs").ExistingFile()
|
||||
|
||||
// Other flags
|
||||
cfgCPUProfilePath = kingpin.Flag("cpu-profile-path", "Path to a write CPU profiling data").String()
|
||||
cfgLogLevel = kingpin.Flag("log-level", "How much console-spam do you want globally").Default("info").Enum("trace", "debug", "info", "notice", "warning", "error", "critical")
|
||||
)
|
||||
|
||||
func main() {
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
|
||||
var err error
|
||||
st := utils.NewStopper()
|
||||
|
||||
// Parse command-line arguments
|
||||
kingpin.Parse()
|
||||
if *cfgDbType != "memstore" && *cfgDbPath == "" {
|
||||
kingpin.Errorf("required flag --db-path not provided, try --help")
|
||||
os.Exit(1)
|
||||
}
|
||||
if *cfgNotifierType == "http" && *cfgNotifierHTTPURL == "" {
|
||||
kingpin.Errorf("required flag --notifier-http-url not provided, try --help")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Initialize error/logging system
|
||||
logLevel, err := capnslog.ParseLevel(strings.ToUpper(*cfgLogLevel))
|
||||
capnslog.SetGlobalLogLevel(logLevel)
|
||||
capnslog.SetFormatter(capnslog.NewPrettyFormatter(os.Stdout, false))
|
||||
|
||||
// Enable CPU Profiling if specified
|
||||
if *cfgCPUProfilePath != "" {
|
||||
f, err := os.Create(*cfgCPUProfilePath)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to create profile file: %s", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
pprof.StartCPUProfile(f)
|
||||
log.Info("started profiling")
|
||||
|
||||
defer func() {
|
||||
pprof.StopCPUProfile()
|
||||
log.Info("stopped profiling")
|
||||
}()
|
||||
}
|
||||
|
||||
// Open database
|
||||
err = database.Open(*cfgDbType, *cfgDbPath)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer database.Close()
|
||||
|
||||
// Start notifier
|
||||
var notifierService notifier.Notifier
|
||||
switch *cfgNotifierType {
|
||||
case "http":
|
||||
notifierService, err = notifier.NewHTTPNotifier(*cfgNotifierHTTPURL)
|
||||
if err != nil {
|
||||
log.Fatalf("could not initialize HTTP notifier: %s", err)
|
||||
}
|
||||
}
|
||||
if notifierService != nil {
|
||||
st.Begin()
|
||||
go notifierService.Run(st)
|
||||
}
|
||||
|
||||
// Start Main API and Health API
|
||||
st.Begin()
|
||||
go api.RunMain(&api.Config{
|
||||
Port: *cfgAPIPort,
|
||||
TimeOut: *cfgAPITimeout,
|
||||
CertFile: *cfgAPICertFile,
|
||||
KeyFile: *cfgAPIKeyFile,
|
||||
CAFile: *cfgAPICAFile,
|
||||
}, st)
|
||||
st.Begin()
|
||||
go api.RunHealth(*cfgAPIPort+1, st)
|
||||
|
||||
// Start updater
|
||||
st.Begin()
|
||||
go updater.Run(*cfgUpdateInterval, st)
|
||||
|
||||
// This blocks the main goroutine which is required to keep all the other goroutines running
|
||||
interrupts := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupts, os.Interrupt)
|
||||
<-interrupts
|
||||
log.Info("Received interruption, gracefully stopping ...")
|
||||
st.Stop()
|
||||
}
|
173
notifier/notifier.go
Normal file
173
notifier/notifier.go
Normal file
@ -0,0 +1,173 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package notifier fetches notifications from the database and sends them
|
||||
// to the specified remote handler.
|
||||
package notifier
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"github.com/coreos/pkg/timeutil"
|
||||
"github.com/coreos/clair/database"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/coreos/clair/health"
|
||||
"github.com/coreos/clair/utils"
|
||||
"github.com/pborman/uuid"
|
||||
)
|
||||
|
||||
// A Notifier dispatches notifications
|
||||
type Notifier interface {
|
||||
Run(*utils.Stopper)
|
||||
}
|
||||
|
||||
var log = capnslog.NewPackageLogger("github.com/coreos/clair", "notifier")
|
||||
|
||||
const (
|
||||
maxBackOff = 5 * time.Minute
|
||||
checkInterval = 5 * time.Second
|
||||
|
||||
refreshLockAnticipation = time.Minute * 2
|
||||
lockDuration = time.Minute*8 + refreshLockAnticipation
|
||||
)
|
||||
|
||||
// A HTTPNotifier dispatches notifications to an HTTP endpoint with POST requests
|
||||
type HTTPNotifier struct {
|
||||
url string
|
||||
}
|
||||
|
||||
// NewHTTPNotifier initializes a new HTTPNotifier
|
||||
func NewHTTPNotifier(URL string) (*HTTPNotifier, error) {
|
||||
if _, err := url.Parse(URL); err != nil {
|
||||
return nil, cerrors.NewBadRequestError("could not create a notifier with an invalid URL")
|
||||
}
|
||||
|
||||
notifier := &HTTPNotifier{url: URL}
|
||||
health.RegisterHealthchecker("notifier", notifier.Healthcheck)
|
||||
|
||||
return notifier, nil
|
||||
}
|
||||
|
||||
// Run pops notifications from the database, lock them, send them, mark them as
|
||||
// send and unlock them
|
||||
//
|
||||
// It uses an exponential backoff when POST requests fail
|
||||
func (notifier *HTTPNotifier) Run(st *utils.Stopper) {
|
||||
defer st.End()
|
||||
|
||||
whoAmI := uuid.New()
|
||||
log.Infof("HTTP notifier started. URL: %s. Lock Identifier: %s", notifier.url, whoAmI)
|
||||
|
||||
for {
|
||||
node, notification, err := database.FindOneNotificationToSend(database.GetDefaultNotificationWrapper())
|
||||
if notification == nil || err != nil {
|
||||
if err != nil {
|
||||
log.Warningf("could not get notification to send: %s.", err)
|
||||
}
|
||||
|
||||
if !st.Sleep(checkInterval) {
|
||||
break
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// Try to lock the notification
|
||||
hasLock, hasLockUntil := database.Lock(node, lockDuration, whoAmI)
|
||||
if !hasLock {
|
||||
continue
|
||||
}
|
||||
|
||||
for backOff := time.Duration(0); ; backOff = timeutil.ExpBackoff(backOff, maxBackOff) {
|
||||
// Backoff, it happens when an error occurs during the communication
|
||||
// with the notification endpoint
|
||||
if backOff > 0 {
|
||||
// Renew lock before going to sleep if necessary
|
||||
if time.Now().Add(backOff).After(hasLockUntil.Add(-refreshLockAnticipation)) {
|
||||
hasLock, hasLockUntil = database.Lock(node, lockDuration, whoAmI)
|
||||
if !hasLock {
|
||||
log.Warning("lost lock ownership, aborting")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Sleep
|
||||
if !st.Sleep(backOff) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Get notification content
|
||||
content, err := notification.GetContent()
|
||||
if err != nil {
|
||||
log.Warningf("could not get content of notification '%s': %s", notification.GetName(), err.Error())
|
||||
break
|
||||
}
|
||||
|
||||
// Marshal the notification content
|
||||
jsonContent, err := json.Marshal(struct {
|
||||
Name, Type string
|
||||
Content interface{}
|
||||
}{
|
||||
Name: notification.GetName(),
|
||||
Type: notification.GetType(),
|
||||
Content: content,
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("could not marshal content of notification '%s': %s", notification.GetName(), err.Error())
|
||||
break
|
||||
}
|
||||
|
||||
// Post notification
|
||||
req, _ := http.NewRequest("POST", notifier.url, bytes.NewBuffer(jsonContent))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
client := &http.Client{}
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
log.Warningf("could not post notification '%s': %s", notification.GetName(), err.Error())
|
||||
continue
|
||||
}
|
||||
res.Body.Close()
|
||||
|
||||
if res.StatusCode != 200 && res.StatusCode != 201 {
|
||||
log.Warningf("could not post notification '%s': got status code %d", notification.GetName(), res.StatusCode)
|
||||
continue
|
||||
}
|
||||
|
||||
// Mark the notification as sent
|
||||
database.MarkNotificationAsSent(node)
|
||||
|
||||
log.Infof("sent notification '%s' successfully", notification.GetName())
|
||||
break
|
||||
}
|
||||
|
||||
if hasLock {
|
||||
database.Unlock(node, whoAmI)
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("HTTP notifier stopped")
|
||||
}
|
||||
|
||||
// Healthcheck returns the health of the notifier service
|
||||
func (notifier *HTTPNotifier) Healthcheck() health.Status {
|
||||
queueSize, err := database.CountNotificationsToSend()
|
||||
return health.Status{IsEssential: false, IsHealthy: err == nil, Details: struct{ QueueSize int }{QueueSize: queueSize}}
|
||||
}
|
64
updater/fetchers.go
Normal file
64
updater/fetchers.go
Normal file
@ -0,0 +1,64 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package updater
|
||||
|
||||
import (
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
)
|
||||
|
||||
var fetchers = make(map[string]Fetcher)
|
||||
|
||||
// Fetcher represents anything that can fetch vulnerabilities.
|
||||
type Fetcher interface {
|
||||
FetchUpdate() (FetcherResponse, error)
|
||||
}
|
||||
|
||||
// FetcherResponse represents the sum of results of an update.
|
||||
type FetcherResponse struct {
|
||||
FlagName string
|
||||
FlagValue string
|
||||
Notes []string
|
||||
Vulnerabilities []FetcherVulnerability
|
||||
}
|
||||
|
||||
// FetcherVulnerability represents an individual vulnerability processed from
|
||||
// an update.
|
||||
type FetcherVulnerability struct {
|
||||
ID string
|
||||
Link string
|
||||
Description string
|
||||
Priority types.Priority
|
||||
FixedIn []*database.Package
|
||||
}
|
||||
|
||||
// RegisterFetcher makes a Fetcher available by the provided name.
|
||||
// If Register is called twice with the same name or if driver is nil,
|
||||
// it panics.
|
||||
func RegisterFetcher(name string, f Fetcher) {
|
||||
if name == "" {
|
||||
panic("updater: could not register a Fetcher with an empty name")
|
||||
}
|
||||
|
||||
if f == nil {
|
||||
panic("updater: could not register a nil Fetcher")
|
||||
}
|
||||
|
||||
if _, dup := fetchers[name]; dup {
|
||||
panic("updater: RegisterFetcher called twice for " + name)
|
||||
}
|
||||
|
||||
fetchers[name] = f
|
||||
}
|
240
updater/fetchers/debian.go
Normal file
240
updater/fetchers/debian.go
Normal file
@ -0,0 +1,240 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package fetchers
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/coreos/clair/updater"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
)
|
||||
|
||||
const (
|
||||
url = "https://security-tracker.debian.org/tracker/data/json"
|
||||
cveURLPrefix = "https://security-tracker.debian.org/tracker"
|
||||
debianUpdaterFlag = "debianUpdater"
|
||||
)
|
||||
|
||||
type jsonData map[string]map[string]jsonVuln
|
||||
|
||||
type jsonVuln struct {
|
||||
Description string `json:"description"`
|
||||
Releases map[string]jsonRel `json:"releases"`
|
||||
}
|
||||
|
||||
type jsonRel struct {
|
||||
FixedVersion string `json:"fixed_version"`
|
||||
Status string `json:"status"`
|
||||
Urgency string `json:"urgency"`
|
||||
}
|
||||
|
||||
// DebianFetcher implements updater.Fetcher for the Debian Security Tracker
|
||||
// (https://security-tracker.debian.org).
|
||||
type DebianFetcher struct{}
|
||||
|
||||
func init() {
|
||||
updater.RegisterFetcher("debian", &DebianFetcher{})
|
||||
}
|
||||
|
||||
// FetchUpdate fetches vulnerability updates from the Debian Security Tracker.
|
||||
func (fetcher *DebianFetcher) FetchUpdate() (resp updater.FetcherResponse, err error) {
|
||||
log.Info("fetching Debian vulneratibilities")
|
||||
|
||||
// Download JSON.
|
||||
r, err := http.Get(url)
|
||||
if err != nil {
|
||||
log.Errorf("could not download Debian's update: %s", err)
|
||||
return resp, cerrors.ErrCouldNotDownload
|
||||
}
|
||||
|
||||
// Get the SHA-1 of the latest update's JSON data
|
||||
latestHash, err := database.GetFlagValue(debianUpdaterFlag)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// Parse the JSON.
|
||||
resp, err = buildResponse(r.Body, latestHash)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func buildResponse(jsonReader io.Reader, latestKnownHash string) (resp updater.FetcherResponse, err error) {
|
||||
hash := latestKnownHash
|
||||
|
||||
// Defer the addition of flag information to the response.
|
||||
defer func() {
|
||||
if err == nil {
|
||||
resp.FlagName = debianUpdaterFlag
|
||||
resp.FlagValue = hash
|
||||
}
|
||||
}()
|
||||
|
||||
// Create a TeeReader so that we can unmarshal into JSON and write to a SHA-1
|
||||
// digest at the same time.
|
||||
jsonSHA := sha1.New()
|
||||
teedJSONReader := io.TeeReader(jsonReader, jsonSHA)
|
||||
|
||||
// Unmarshal JSON.
|
||||
var data jsonData
|
||||
err = json.NewDecoder(teedJSONReader).Decode(&data)
|
||||
if err != nil {
|
||||
log.Errorf("could not unmarshal Debian's JSON: %s", err)
|
||||
return resp, ErrCouldNotParse
|
||||
}
|
||||
|
||||
// Calculate the hash and skip updating if the hash has been seen before.
|
||||
hash = hex.EncodeToString(jsonSHA.Sum(nil))
|
||||
if latestKnownHash == hash {
|
||||
log.Debug("no Debian update")
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Extract vulnerability data from Debian's JSON schema.
|
||||
vulnerabilities, unknownReleases := parseDebianJSON(&data)
|
||||
|
||||
// Log unknown releases
|
||||
for k := range unknownReleases {
|
||||
note := fmt.Sprintf("Debian %s is not mapped to any version number (eg. Jessie->8). Please update me.", k)
|
||||
resp.Notes = append(resp.Notes, note)
|
||||
log.Warning(note)
|
||||
}
|
||||
|
||||
// Convert the vulnerabilities map to a slice in the response
|
||||
for _, v := range vulnerabilities {
|
||||
resp.Vulnerabilities = append(resp.Vulnerabilities, v)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func parseDebianJSON(data *jsonData) (vulnerabilities map[string]updater.FetcherVulnerability, unknownReleases map[string]struct{}) {
|
||||
vulnerabilities = make(map[string]updater.FetcherVulnerability)
|
||||
unknownReleases = make(map[string]struct{})
|
||||
|
||||
for pkgName, pkgNode := range *data {
|
||||
for vulnName, vulnNode := range pkgNode {
|
||||
for releaseName, releaseNode := range vulnNode.Releases {
|
||||
// Attempt to detect the release number.
|
||||
if _, isReleaseKnown := database.DebianReleasesMapping[releaseName]; !isReleaseKnown {
|
||||
unknownReleases[releaseName] = struct{}{}
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip if the release is not affected.
|
||||
if releaseNode.FixedVersion == "0" || releaseNode.Status == "undetermined" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Get or create the vulnerability.
|
||||
vulnerability, vulnerabilityAlreadyExists := vulnerabilities[vulnName]
|
||||
if !vulnerabilityAlreadyExists {
|
||||
vulnerability = updater.FetcherVulnerability{
|
||||
ID: vulnName,
|
||||
Link: strings.Join([]string{cveURLPrefix, "/", vulnName}, ""),
|
||||
Priority: types.Unknown,
|
||||
Description: vulnNode.Description,
|
||||
}
|
||||
}
|
||||
|
||||
// Set the priority of the vulnerability.
|
||||
// In the JSON, a vulnerability has one urgency per package it affects.
|
||||
// The highest urgency should be the one set.
|
||||
urgency := urgencyToPriority(releaseNode.Urgency)
|
||||
if urgency.Compare(vulnerability.Priority) > 0 {
|
||||
vulnerability.Priority = urgency
|
||||
}
|
||||
|
||||
// Determine the version of the package the vulnerability affects.
|
||||
var version types.Version
|
||||
var err error
|
||||
if releaseNode.Status == "open" {
|
||||
// Open means that the package is currently vulnerable in the latest
|
||||
// version of this Debian release.
|
||||
version = types.MaxVersion
|
||||
} else if releaseNode.Status == "resolved" {
|
||||
// Resolved means that the vulnerability has been fixed in
|
||||
// "fixed_version" (if affected).
|
||||
version, err = types.NewVersion(releaseNode.FixedVersion)
|
||||
if err != nil {
|
||||
log.Warningf("could not parse package version '%s': %s. skipping", releaseNode.FixedVersion, err.Error())
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Create and add the package.
|
||||
pkg := &database.Package{
|
||||
OS: "debian:" + database.DebianReleasesMapping[releaseName],
|
||||
Name: pkgName,
|
||||
Version: version,
|
||||
}
|
||||
vulnerability.FixedIn = append(vulnerability.FixedIn, pkg)
|
||||
|
||||
// Store the vulnerability.
|
||||
vulnerabilities[vulnName] = vulnerability
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func urgencyToPriority(urgency string) types.Priority {
|
||||
switch urgency {
|
||||
case "not yet assigned":
|
||||
return types.Unknown
|
||||
|
||||
case "end-of-life":
|
||||
fallthrough
|
||||
case "unimportant":
|
||||
return types.Negligible
|
||||
|
||||
case "low":
|
||||
fallthrough
|
||||
case "low*":
|
||||
fallthrough
|
||||
case "low**":
|
||||
return types.Low
|
||||
|
||||
case "medium":
|
||||
fallthrough
|
||||
case "medium*":
|
||||
fallthrough
|
||||
case "medium**":
|
||||
return types.Medium
|
||||
|
||||
case "high":
|
||||
fallthrough
|
||||
case "high*":
|
||||
fallthrough
|
||||
case "high**":
|
||||
return types.High
|
||||
|
||||
default:
|
||||
log.Warningf("could not determine vulnerability priority from: %s", urgency)
|
||||
return types.Unknown
|
||||
}
|
||||
}
|
80
updater/fetchers/debian_test.go
Normal file
80
updater/fetchers/debian_test.go
Normal file
@ -0,0 +1,80 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package fetchers
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDebianParser(t *testing.T) {
|
||||
_, filename, _, _ := runtime.Caller(0)
|
||||
|
||||
// Test parsing testdata/fetcher_debian_test.json
|
||||
testFile, _ := os.Open(path.Join(path.Dir(filename)) + "/testdata/fetcher_debian_test.json")
|
||||
response, err := buildResponse(testFile, "")
|
||||
if assert.Nil(t, err) && assert.Len(t, response.Vulnerabilities, 2) {
|
||||
for _, vulnerability := range response.Vulnerabilities {
|
||||
if vulnerability.ID == "CVE-2015-1323" {
|
||||
assert.Equal(t, "https://security-tracker.debian.org/tracker/CVE-2015-1323", vulnerability.Link)
|
||||
assert.Equal(t, types.Low, vulnerability.Priority)
|
||||
assert.Equal(t, "This vulnerability is not very dangerous.", vulnerability.Description)
|
||||
|
||||
if assert.Len(t, vulnerability.FixedIn, 2) {
|
||||
assert.Contains(t, vulnerability.FixedIn, &database.Package{
|
||||
OS: "debian:8",
|
||||
Name: "aptdaemon",
|
||||
Version: types.MaxVersion,
|
||||
})
|
||||
assert.Contains(t, vulnerability.FixedIn, &database.Package{
|
||||
OS: "debian:unstable",
|
||||
Name: "aptdaemon",
|
||||
Version: types.NewVersionUnsafe("1.1.1+bzr982-1"),
|
||||
})
|
||||
}
|
||||
} else if vulnerability.ID == "CVE-2003-0779" {
|
||||
assert.Equal(t, "https://security-tracker.debian.org/tracker/CVE-2003-0779", vulnerability.Link)
|
||||
assert.Equal(t, types.High, vulnerability.Priority)
|
||||
assert.Equal(t, "But this one is very dangerous.", vulnerability.Description)
|
||||
|
||||
if assert.Len(t, vulnerability.FixedIn, 3) {
|
||||
assert.Contains(t, vulnerability.FixedIn, &database.Package{
|
||||
OS: "debian:8",
|
||||
Name: "aptdaemon",
|
||||
Version: types.NewVersionUnsafe("0.7.0"),
|
||||
})
|
||||
assert.Contains(t, vulnerability.FixedIn, &database.Package{
|
||||
OS: "debian:unstable",
|
||||
Name: "aptdaemon",
|
||||
Version: types.NewVersionUnsafe("0.7.0"),
|
||||
})
|
||||
assert.Contains(t, vulnerability.FixedIn, &database.Package{
|
||||
OS: "debian:8",
|
||||
Name: "asterisk",
|
||||
Version: types.NewVersionUnsafe("0.5.56"),
|
||||
})
|
||||
}
|
||||
} else {
|
||||
assert.Fail(t, "Wrong vulnerability name: ", vulnerability.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
32
updater/fetchers/fetchers.go
Normal file
32
updater/fetchers/fetchers.go
Normal file
@ -0,0 +1,32 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package fetchers implements vulnerability fetchers for several sources.
|
||||
package fetchers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
)
|
||||
|
||||
var (
|
||||
log = capnslog.NewPackageLogger("github.com/coreos/clair", "updater/fetchers")
|
||||
|
||||
// ErrCouldNotParse is returned when a fetcher fails to parse the update data.
|
||||
ErrCouldNotParse = errors.New("updater/fetchers: could not parse")
|
||||
|
||||
// ErrFilesystem is returned when a fetcher fails to interact with the local filesystem.
|
||||
ErrFilesystem = errors.New("updater/fetchers: something went wrong when interacting with the fs")
|
||||
)
|
353
updater/fetchers/rhel.go
Normal file
353
updater/fetchers/rhel.go
Normal file
@ -0,0 +1,353 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package fetchers
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/coreos/clair/updater"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// Before this RHSA, it deals only with RHEL <= 4.
|
||||
firstRHEL5RHSA = 20070044
|
||||
firstConsideredRHEL = 5
|
||||
|
||||
ovalURI = "https://www.redhat.com/security/data/oval/"
|
||||
rhsaFilePrefix = "com.redhat.rhsa-"
|
||||
rhelUpdaterFlag = "rhelUpdater"
|
||||
)
|
||||
|
||||
var (
|
||||
ignoredCriterions = []string{
|
||||
" is signed with Red Hat ",
|
||||
" Client is installed",
|
||||
" Workstation is installed",
|
||||
" ComputeNode is installed",
|
||||
}
|
||||
|
||||
rhsaRegexp = regexp.MustCompile(`com.redhat.rhsa-(\d+).xml`)
|
||||
)
|
||||
|
||||
type oval struct {
|
||||
Definitions []definition `xml:"definitions>definition"`
|
||||
}
|
||||
|
||||
type definition struct {
|
||||
Title string `xml:"metadata>title"`
|
||||
Description string `xml:"metadata>description"`
|
||||
References []reference `xml:"metadata>reference"`
|
||||
Criteria criteria `xml:"criteria"`
|
||||
}
|
||||
|
||||
type reference struct {
|
||||
Source string `xml:"source,attr"`
|
||||
URI string `xml:"ref_url,attr"`
|
||||
}
|
||||
|
||||
type criteria struct {
|
||||
Operator string `xml:"operator,attr"`
|
||||
Criterias []*criteria `xml:"criteria"`
|
||||
Criterions []criterion `xml:"criterion"`
|
||||
}
|
||||
|
||||
type criterion struct {
|
||||
Comment string `xml:"comment,attr"`
|
||||
}
|
||||
|
||||
// RHELFetcher implements updater.Fetcher and gets vulnerability updates from
|
||||
// the Red Hat OVAL definitions.
|
||||
type RHELFetcher struct{}
|
||||
|
||||
func init() {
|
||||
updater.RegisterFetcher("Red Hat", &RHELFetcher{})
|
||||
}
|
||||
|
||||
// FetchUpdate gets vulnerability updates from the Red Hat OVAL definitions.
|
||||
func (f *RHELFetcher) FetchUpdate() (resp updater.FetcherResponse, err error) {
|
||||
log.Info("fetching Red Hat vulneratibilities")
|
||||
|
||||
// Get the first RHSA we have to manage.
|
||||
flagValue, err := database.GetFlagValue(rhelUpdaterFlag)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
firstRHSA, err := strconv.Atoi(flagValue)
|
||||
if firstRHSA == 0 || err != nil {
|
||||
firstRHSA = firstRHEL5RHSA
|
||||
}
|
||||
|
||||
// Fetch the update list.
|
||||
r, err := http.Get(ovalURI)
|
||||
if err != nil {
|
||||
log.Errorf("could not download RHEL's update list: %s", err)
|
||||
return resp, cerrors.ErrCouldNotDownload
|
||||
}
|
||||
|
||||
// Get the list of RHSAs that we have to process.
|
||||
var rhsaList []int
|
||||
scanner := bufio.NewScanner(r.Body)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
r := rhsaRegexp.FindStringSubmatch(line)
|
||||
if len(r) == 2 {
|
||||
rhsaNo, _ := strconv.Atoi(r[1])
|
||||
if rhsaNo > firstRHSA {
|
||||
rhsaList = append(rhsaList, rhsaNo)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, rhsa := range rhsaList {
|
||||
// Download the RHSA's XML file.
|
||||
r, err := http.Get(ovalURI + rhsaFilePrefix + strconv.Itoa(rhsa) + ".xml")
|
||||
if err != nil {
|
||||
log.Errorf("could not download RHEL's update file: %s", err)
|
||||
return resp, cerrors.ErrCouldNotDownload
|
||||
}
|
||||
|
||||
// Parse the XML.
|
||||
vs, err := parseRHSA(r.Body)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// Collect vulnerabilities.
|
||||
for _, v := range vs {
|
||||
if len(v.FixedIn) > 0 {
|
||||
resp.Vulnerabilities = append(resp.Vulnerabilities, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Set the flag if we found anything.
|
||||
if len(rhsaList) > 0 {
|
||||
resp.FlagName = rhelUpdaterFlag
|
||||
resp.FlagValue = strconv.Itoa(rhsaList[len(rhsaList)-1])
|
||||
} else {
|
||||
log.Debug("no Red Hat update.")
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func parseRHSA(ovalReader io.Reader) (vulnerabilities []updater.FetcherVulnerability, err error) {
|
||||
// Decode the XML.
|
||||
var ov oval
|
||||
err = xml.NewDecoder(ovalReader).Decode(&ov)
|
||||
if err != nil {
|
||||
log.Errorf("could not decode RHEL's XML: %s.", err)
|
||||
err = ErrCouldNotParse
|
||||
return
|
||||
}
|
||||
|
||||
// Iterate over the definitions and collect any vulnerabilities that affect
|
||||
// more than one package.
|
||||
for _, definition := range ov.Definitions {
|
||||
packages := toPackages(definition.Criteria)
|
||||
if len(packages) > 0 {
|
||||
vuln := updater.FetcherVulnerability{
|
||||
ID: name(definition),
|
||||
Link: link(definition),
|
||||
Priority: priority(definition),
|
||||
Description: description(definition),
|
||||
FixedIn: packages,
|
||||
}
|
||||
vulnerabilities = append(vulnerabilities, vuln)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func getCriterions(node criteria) [][]criterion {
|
||||
// Filter useless criterions.
|
||||
var criterions []criterion
|
||||
for _, c := range node.Criterions {
|
||||
ignored := false
|
||||
|
||||
for _, ignoredItem := range ignoredCriterions {
|
||||
if strings.Contains(c.Comment, ignoredItem) {
|
||||
ignored = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !ignored {
|
||||
criterions = append(criterions, c)
|
||||
}
|
||||
}
|
||||
|
||||
if node.Operator == "AND" {
|
||||
return [][]criterion{criterions}
|
||||
} else if node.Operator == "OR" {
|
||||
var possibilities [][]criterion
|
||||
for _, c := range criterions {
|
||||
possibilities = append(possibilities, []criterion{c})
|
||||
}
|
||||
return possibilities
|
||||
}
|
||||
|
||||
return [][]criterion{}
|
||||
}
|
||||
|
||||
func getPossibilities(node criteria) [][]criterion {
|
||||
if len(node.Criterias) == 0 {
|
||||
return getCriterions(node)
|
||||
}
|
||||
|
||||
var possibilitiesToCompose [][][]criterion
|
||||
for _, criteria := range node.Criterias {
|
||||
possibilitiesToCompose = append(possibilitiesToCompose, getPossibilities(*criteria))
|
||||
}
|
||||
if len(node.Criterions) > 0 {
|
||||
possibilitiesToCompose = append(possibilitiesToCompose, getCriterions(node))
|
||||
}
|
||||
|
||||
var possibilities [][]criterion
|
||||
if node.Operator == "AND" {
|
||||
for _, possibility := range possibilitiesToCompose[0] {
|
||||
possibilities = append(possibilities, possibility)
|
||||
}
|
||||
|
||||
for _, possibilityGroup := range possibilitiesToCompose[1:] {
|
||||
var newPossibilities [][]criterion
|
||||
|
||||
for _, possibility := range possibilities {
|
||||
for _, possibilityInGroup := range possibilityGroup {
|
||||
var p []criterion
|
||||
p = append(p, possibility...)
|
||||
p = append(p, possibilityInGroup...)
|
||||
newPossibilities = append(newPossibilities, p)
|
||||
}
|
||||
}
|
||||
|
||||
possibilities = newPossibilities
|
||||
}
|
||||
} else if node.Operator == "OR" {
|
||||
for _, possibilityGroup := range possibilitiesToCompose {
|
||||
for _, possibility := range possibilityGroup {
|
||||
possibilities = append(possibilities, possibility)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return possibilities
|
||||
}
|
||||
|
||||
func toPackages(criteria criteria) []*database.Package {
|
||||
// There are duplicates in Red Hat .xml files.
|
||||
// This map is for deduplication.
|
||||
packagesParameters := make(map[string]*database.Package)
|
||||
|
||||
possibilities := getPossibilities(criteria)
|
||||
for _, criterions := range possibilities {
|
||||
var (
|
||||
pkg database.Package
|
||||
osVersion int
|
||||
err error
|
||||
)
|
||||
|
||||
// Attempt to parse package data from trees of criterions.
|
||||
for _, c := range criterions {
|
||||
if strings.Contains(c.Comment, " is installed") {
|
||||
const prefixLen = len("Red Hat Enterprise Linux ")
|
||||
osVersion, err = strconv.Atoi(strings.TrimSpace(c.Comment[prefixLen : prefixLen+strings.Index(c.Comment[prefixLen:], " ")]))
|
||||
if err != nil {
|
||||
log.Warningf("could not parse Red Hat release version from: '%s'.", c.Comment)
|
||||
}
|
||||
} else if strings.Contains(c.Comment, " is earlier than ") {
|
||||
const prefixLen = len(" is earlier than ")
|
||||
pkg.Name = strings.TrimSpace(c.Comment[:strings.Index(c.Comment, " is earlier than ")])
|
||||
pkg.Version, err = types.NewVersion(c.Comment[strings.Index(c.Comment, " is earlier than ")+prefixLen:])
|
||||
if err != nil {
|
||||
log.Warningf("could not parse package version '%s': %s. skipping", c.Comment[strings.Index(c.Comment, " is earlier than ")+prefixLen:], err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if osVersion > firstConsideredRHEL {
|
||||
pkg.OS = "centos" + ":" + strconv.Itoa(osVersion)
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
|
||||
if pkg.OS != "" && pkg.Name != "" && pkg.Version.String() != "" {
|
||||
packagesParameters[pkg.Key()] = &pkg
|
||||
} else {
|
||||
log.Warningf("could not determine a valid package from criterions: %v", criterions)
|
||||
}
|
||||
}
|
||||
|
||||
// Convert the map to slice.
|
||||
var packagesParametersArray []*database.Package
|
||||
for _, p := range packagesParameters {
|
||||
packagesParametersArray = append(packagesParametersArray, p)
|
||||
}
|
||||
|
||||
return packagesParametersArray
|
||||
}
|
||||
|
||||
func description(def definition) (desc string) {
|
||||
// It is much more faster to proceed like this than using a Replacer.
|
||||
desc = strings.Replace(def.Description, "\n\n\n", " ", -1)
|
||||
desc = strings.Replace(desc, "\n\n", " ", -1)
|
||||
desc = strings.Replace(desc, "\n", " ", -1)
|
||||
return
|
||||
}
|
||||
|
||||
func name(def definition) string {
|
||||
return strings.TrimSpace(def.Title[:strings.Index(def.Title, ": ")])
|
||||
}
|
||||
|
||||
func link(def definition) (link string) {
|
||||
for _, reference := range def.References {
|
||||
if reference.Source == "RHSA" {
|
||||
link = reference.URI
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func priority(def definition) types.Priority {
|
||||
// Parse the priority.
|
||||
priority := strings.TrimSpace(def.Title[strings.LastIndex(def.Title, "(")+1 : len(def.Title)-1])
|
||||
|
||||
// Normalize the priority.
|
||||
switch priority {
|
||||
case "Low":
|
||||
return types.Low
|
||||
case "Moderate":
|
||||
return types.Medium
|
||||
case "Important":
|
||||
return types.High
|
||||
case "Critical":
|
||||
return types.Critical
|
||||
default:
|
||||
log.Warning("could not determine vulnerability priority from: %s.", priority)
|
||||
return types.Unknown
|
||||
}
|
||||
}
|
82
updater/fetchers/rhel_test.go
Normal file
82
updater/fetchers/rhel_test.go
Normal file
@ -0,0 +1,82 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package fetchers
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestRHELParser(t *testing.T) {
|
||||
_, filename, _, _ := runtime.Caller(0)
|
||||
path := path.Join(path.Dir(filename))
|
||||
|
||||
// Test parsing testdata/fetcher_rhel_test.1.xml
|
||||
testFile, _ := os.Open(path + "/testdata/fetcher_rhel_test.1.xml")
|
||||
vulnerabilities, err := parseRHSA(testFile)
|
||||
if assert.Nil(t, err) && assert.Len(t, vulnerabilities, 1) {
|
||||
assert.Equal(t, "RHSA-2015:1193", vulnerabilities[0].ID)
|
||||
assert.Equal(t, "https://rhn.redhat.com/errata/RHSA-2015-1193.html", vulnerabilities[0].Link)
|
||||
assert.Equal(t, types.Medium, vulnerabilities[0].Priority)
|
||||
assert.Equal(t, `Xerces-C is a validating XML parser written in a portable subset of C++. A flaw was found in the way the Xerces-C XML parser processed certain XML documents. A remote attacker could provide specially crafted XML input that, when parsed by an application using Xerces-C, would cause that application to crash.`, vulnerabilities[0].Description)
|
||||
|
||||
if assert.Len(t, vulnerabilities[0].FixedIn, 3) {
|
||||
assert.Contains(t, vulnerabilities[0].FixedIn, &database.Package{
|
||||
OS: "centos:7",
|
||||
Name: "xerces-c",
|
||||
Version: types.NewVersionUnsafe("3.1.1-7.el7_1"),
|
||||
})
|
||||
assert.Contains(t, vulnerabilities[0].FixedIn, &database.Package{
|
||||
OS: "centos:7",
|
||||
Name: "xerces-c-devel",
|
||||
Version: types.NewVersionUnsafe("3.1.1-7.el7_1"),
|
||||
})
|
||||
assert.Contains(t, vulnerabilities[0].FixedIn, &database.Package{
|
||||
OS: "centos:7",
|
||||
Name: "xerces-c-doc",
|
||||
Version: types.NewVersionUnsafe("3.1.1-7.el7_1"),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test parsing testdata/fetcher_rhel_test.2.xml
|
||||
testFile, _ = os.Open(path + "/testdata/fetcher_rhel_test.2.xml")
|
||||
vulnerabilities, err = parseRHSA(testFile)
|
||||
if assert.Nil(t, err) && assert.Len(t, vulnerabilities, 1) {
|
||||
assert.Equal(t, "RHSA-2015:1207", vulnerabilities[0].ID)
|
||||
assert.Equal(t, "https://rhn.redhat.com/errata/RHSA-2015-1207.html", vulnerabilities[0].Link)
|
||||
assert.Equal(t, types.Critical, vulnerabilities[0].Priority)
|
||||
assert.Equal(t, `Mozilla Firefox is an open source web browser. XULRunner provides the XUL Runtime environment for Mozilla Firefox. Several flaws were found in the processing of malformed web content. A web page containing malicious content could cause Firefox to crash or, potentially, execute arbitrary code with the privileges of the user running Firefox.`, vulnerabilities[0].Description)
|
||||
|
||||
if assert.Len(t, vulnerabilities[0].FixedIn, 2) {
|
||||
assert.Contains(t, vulnerabilities[0].FixedIn, &database.Package{
|
||||
OS: "centos:6",
|
||||
Name: "firefox",
|
||||
Version: types.NewVersionUnsafe("38.1.0-1.el6_6"),
|
||||
})
|
||||
assert.Contains(t, vulnerabilities[0].FixedIn, &database.Package{
|
||||
OS: "centos:7",
|
||||
Name: "firefox",
|
||||
Version: types.NewVersionUnsafe("38.1.0-1.el7_1"),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
99
updater/fetchers/testdata/fetcher_debian_test.json
vendored
Normal file
99
updater/fetchers/testdata/fetcher_debian_test.json
vendored
Normal file
@ -0,0 +1,99 @@
|
||||
{
|
||||
"aptdaemon": {
|
||||
"CVE-2015-1323": {
|
||||
"_comment": "Two standard cases with a non-fixed package and a fixed one.",
|
||||
"description": "This vulnerability is not very dangerous.",
|
||||
"debianbug": 789162,
|
||||
"releases": {
|
||||
"wheezy": {
|
||||
"repositories": {
|
||||
"jessie": "bad version"
|
||||
},
|
||||
"status": "resolved",
|
||||
"urgency": "low**"
|
||||
},
|
||||
"jessie": {
|
||||
"repositories": {
|
||||
"jessie": "1.1.1-4"
|
||||
},
|
||||
"status": "open",
|
||||
"urgency": "low**"
|
||||
},
|
||||
"sid": {
|
||||
"fixed_version": "1.1.1+bzr982-1",
|
||||
"repositories": {
|
||||
"sid": "1.1.1+bzr982-1"
|
||||
},
|
||||
"status": "resolved",
|
||||
"urgency": "not yet assigned"
|
||||
}
|
||||
}
|
||||
},
|
||||
"CVE-2003-0779": {
|
||||
"_comment": "Just another CVE affecting the same package.",
|
||||
"description": "But this one is very dangerous.",
|
||||
"releases": {
|
||||
"jessie": {
|
||||
"fixed_version": "0.7.0",
|
||||
"repositories": {
|
||||
"jessie": "1:11.13.1~dfsg-2"
|
||||
},
|
||||
"status": "resolved",
|
||||
"urgency": "high**"
|
||||
},
|
||||
"sid": {
|
||||
"fixed_version": "0.7.0",
|
||||
"repositories": {
|
||||
"sid": "1:13.1.0~dfsg-1.1"
|
||||
},
|
||||
"status": "resolved",
|
||||
"urgency": "high**"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"asterisk": {
|
||||
"CVE-2013-2685": {
|
||||
"description": "Un-affected packages",
|
||||
"releases": {
|
||||
"jessie": {
|
||||
"fixed_version": "0",
|
||||
"repositories": {
|
||||
"jessie": "1:11.13.1~dfsg-2"
|
||||
},
|
||||
"status": "resolved",
|
||||
"urgency": "unimportant"
|
||||
},
|
||||
"wheezy": {
|
||||
"repositories": {
|
||||
"sid": "1:13.1.0~dfsg-1.1"
|
||||
},
|
||||
"status": "undetermined",
|
||||
"urgency": "unimportant"
|
||||
},
|
||||
"sid": {
|
||||
"fixed_version": "0",
|
||||
"repositories": {
|
||||
"sid": "1:13.1.0~dfsg-1.1"
|
||||
},
|
||||
"status": "resolved",
|
||||
"urgency": "unimportant"
|
||||
}
|
||||
}
|
||||
},
|
||||
"CVE-2003-0779": {
|
||||
"_comment": "A CVE which affect aptdaemon, and which also affects asterisk",
|
||||
"description": "But this one is very dangerous.",
|
||||
"releases": {
|
||||
"jessie": {
|
||||
"fixed_version": "0.5.56",
|
||||
"repositories": {
|
||||
"jessie": "1:1.17.2"
|
||||
},
|
||||
"status": "resolved",
|
||||
"urgency": "high"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
154
updater/fetchers/testdata/fetcher_rhel_test.1.xml
vendored
Normal file
154
updater/fetchers/testdata/fetcher_rhel_test.1.xml
vendored
Normal file
@ -0,0 +1,154 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<oval_definitions xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5" xmlns:oval="http://oval.mitre.org/XMLSchema/oval-common-5" xmlns:oval-def="http://oval.mitre.org/XMLSchema/oval-definitions-5" xmlns:unix-def="http://oval.mitre.org/XMLSchema/oval-definitions-5#unix" xmlns:red-def="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://oval.mitre.org/XMLSchema/oval-common-5 oval-common-schema.xsd http://oval.mitre.org/XMLSchema/oval-definitions-5 oval-definitions-schema.xsd http://oval.mitre.org/XMLSchema/oval-definitions-5#unix unix-definitions-schema.xsd http://oval.mitre.org/XMLSchema/oval-definitions-5#linux linux-definitions-schema.xsd">
|
||||
<generator>
|
||||
<oval:product_name>Red Hat Errata System</oval:product_name>
|
||||
<oval:schema_version>5.10.1</oval:schema_version>
|
||||
<oval:timestamp>2015-06-29T12:11:23</oval:timestamp>
|
||||
</generator>
|
||||
|
||||
<definitions>
|
||||
<definition id="oval:com.redhat.rhsa:def:20151193" version="601" class="patch">
|
||||
<metadata>
|
||||
<title>RHSA-2015:1193: xerces-c security update (Moderate)</title>
|
||||
<affected family="unix">
|
||||
<platform>Red Hat Enterprise Linux 7</platform>
|
||||
</affected>
|
||||
<reference source="RHSA" ref_id="RHSA-2015:1193-00" ref_url="https://rhn.redhat.com/errata/RHSA-2015-1193.html"/>
|
||||
<reference source="CVE" ref_id="CVE-2015-0252" ref_url="https://access.redhat.com/security/cve/CVE-2015-0252"/>
|
||||
<description>Xerces-C is a validating XML parser written in a portable subset of C++.
|
||||
|
||||
A flaw was found in the way the Xerces-C XML parser processed certain XML
|
||||
documents. A remote attacker could provide specially crafted XML input
|
||||
that, when parsed by an application using Xerces-C, would cause that
|
||||
application to crash.</description>
|
||||
|
||||
<!-- ~~~~~~~~~~~~~~~~~~~~ advisory details ~~~~~~~~~~~~~~~~~~~ -->
|
||||
|
||||
<advisory from="secalert@redhat.com">
|
||||
<severity>Moderate</severity>
|
||||
<rights>Copyright 2015 Red Hat, Inc.</rights>
|
||||
<issued date="2015-06-29"/>
|
||||
<updated date="2015-06-29"/>
|
||||
<cve href="https://access.redhat.com/security/cve/CVE-2015-0252">CVE-2015-0252</cve>
|
||||
<bugzilla href="https://bugzilla.redhat.com/1199103" id="1199103">CVE-2015-0252 xerces-c: crashes on malformed input</bugzilla>
|
||||
<affected_cpe_list>
|
||||
<cpe>cpe:/o:redhat:enterprise_linux:7</cpe>
|
||||
</affected_cpe_list>
|
||||
</advisory>
|
||||
</metadata>
|
||||
<criteria operator="AND">
|
||||
|
||||
<criteria operator="OR">
|
||||
<criterion test_ref="oval:com.redhat.rhsa:tst:20151193001" comment="Red Hat Enterprise Linux 7 Client is installed" /><criterion test_ref="oval:com.redhat.rhsa:tst:20151193002" comment="Red Hat Enterprise Linux 7 Server is installed" /><criterion test_ref="oval:com.redhat.rhsa:tst:20151193003" comment="Red Hat Enterprise Linux 7 Workstation is installed" /><criterion test_ref="oval:com.redhat.rhsa:tst:20151193004" comment="Red Hat Enterprise Linux 7 ComputeNode is installed" />
|
||||
|
||||
</criteria>
|
||||
<criteria operator="OR">
|
||||
|
||||
<criteria operator="AND">
|
||||
<criterion test_ref="oval:com.redhat.rhsa:tst:20151193005" comment="xerces-c is earlier than 0:3.1.1-7.el7_1" /><criterion test_ref="oval:com.redhat.rhsa:tst:20151193006" comment="xerces-c is signed with Red Hat redhatrelease2 key" />
|
||||
|
||||
</criteria>
|
||||
<criteria operator="AND">
|
||||
<criterion test_ref="oval:com.redhat.rhsa:tst:20151193007" comment="xerces-c-devel is earlier than 0:3.1.1-7.el7_1" /><criterion test_ref="oval:com.redhat.rhsa:tst:20151193008" comment="xerces-c-devel is signed with Red Hat redhatrelease2 key" />
|
||||
|
||||
</criteria>
|
||||
<criteria operator="AND">
|
||||
<criterion test_ref="oval:com.redhat.rhsa:tst:20151193009" comment="xerces-c-doc is earlier than 0:3.1.1-7.el7_1" /><criterion test_ref="oval:com.redhat.rhsa:tst:20151193010" comment="xerces-c-doc is signed with Red Hat redhatrelease2 key" />
|
||||
|
||||
</criteria>
|
||||
<criteria operator="AND">
|
||||
<criterion test_ref="oval:com.redhat.rhsa:tst:20151193009" comment="xerces-c-x is earlier than invalid version" /><criterion test_ref="oval:com.redhat.rhsa:tst:20151193010" comment="xerces-c-doc is signed with Red Hat redhatrelease2 key" />
|
||||
|
||||
</criteria>
|
||||
|
||||
</criteria>
|
||||
|
||||
</criteria>
|
||||
|
||||
</definition>
|
||||
</definitions>
|
||||
<tests>
|
||||
<!-- ~~~~~~~~~~~~~~~~~~~~~ rpminfo tests ~~~~~~~~~~~~~~~~~~~~~ -->
|
||||
<rpminfo_test id="oval:com.redhat.rhsa:tst:20151193001" version="601" comment="Red Hat Enterprise Linux 7 Client is installed" check="at least one" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<object object_ref="oval:com.redhat.rhsa:obj:20151193001" />
|
||||
<state state_ref="oval:com.redhat.rhsa:ste:20151193002" />
|
||||
</rpminfo_test>
|
||||
<rpminfo_test id="oval:com.redhat.rhsa:tst:20151193002" version="601" comment="Red Hat Enterprise Linux 7 Server is installed" check="at least one" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<object object_ref="oval:com.redhat.rhsa:obj:20151193002" />
|
||||
<state state_ref="oval:com.redhat.rhsa:ste:20151193002" />
|
||||
</rpminfo_test>
|
||||
<rpminfo_test id="oval:com.redhat.rhsa:tst:20151193003" version="601" comment="Red Hat Enterprise Linux 7 Workstation is installed" check="at least one" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<object object_ref="oval:com.redhat.rhsa:obj:20151193003" />
|
||||
<state state_ref="oval:com.redhat.rhsa:ste:20151193002" />
|
||||
</rpminfo_test>
|
||||
<rpminfo_test id="oval:com.redhat.rhsa:tst:20151193004" version="601" comment="Red Hat Enterprise Linux 7 ComputeNode is installed" check="at least one" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<object object_ref="oval:com.redhat.rhsa:obj:20151193004" />
|
||||
<state state_ref="oval:com.redhat.rhsa:ste:20151193002" />
|
||||
</rpminfo_test>
|
||||
<rpminfo_test id="oval:com.redhat.rhsa:tst:20151193005" version="601" comment="xerces-c is earlier than 0:3.1.1-7.el7_1" check="at least one" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<object object_ref="oval:com.redhat.rhsa:obj:20151193005" />
|
||||
<state state_ref="oval:com.redhat.rhsa:ste:20151193003" />
|
||||
</rpminfo_test>
|
||||
<rpminfo_test id="oval:com.redhat.rhsa:tst:20151193006" version="601" comment="xerces-c is signed with Red Hat redhatrelease2 key" check="at least one" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<object object_ref="oval:com.redhat.rhsa:obj:20151193005" />
|
||||
<state state_ref="oval:com.redhat.rhsa:ste:20151193001" />
|
||||
</rpminfo_test>
|
||||
<rpminfo_test id="oval:com.redhat.rhsa:tst:20151193007" version="601" comment="xerces-c-devel is earlier than 0:3.1.1-7.el7_1" check="at least one" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<object object_ref="oval:com.redhat.rhsa:obj:20151193006" />
|
||||
<state state_ref="oval:com.redhat.rhsa:ste:20151193003" />
|
||||
</rpminfo_test>
|
||||
<rpminfo_test id="oval:com.redhat.rhsa:tst:20151193008" version="601" comment="xerces-c-devel is signed with Red Hat redhatrelease2 key" check="at least one" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<object object_ref="oval:com.redhat.rhsa:obj:20151193006" />
|
||||
<state state_ref="oval:com.redhat.rhsa:ste:20151193001" />
|
||||
</rpminfo_test>
|
||||
<rpminfo_test id="oval:com.redhat.rhsa:tst:20151193009" version="601" comment="xerces-c-doc is earlier than 0:3.1.1-7.el7_1" check="at least one" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<object object_ref="oval:com.redhat.rhsa:obj:20151193007" />
|
||||
<state state_ref="oval:com.redhat.rhsa:ste:20151193003" />
|
||||
</rpminfo_test>
|
||||
<rpminfo_test id="oval:com.redhat.rhsa:tst:20151193010" version="601" comment="xerces-c-doc is signed with Red Hat redhatrelease2 key" check="at least one" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<object object_ref="oval:com.redhat.rhsa:obj:20151193007" />
|
||||
<state state_ref="oval:com.redhat.rhsa:ste:20151193001" />
|
||||
</rpminfo_test>
|
||||
|
||||
</tests>
|
||||
|
||||
<objects>
|
||||
<!-- ~~~~~~~~~~~~~~~~~~~~ rpminfo objects ~~~~~~~~~~~~~~~~~~~~ -->
|
||||
<rpminfo_object id="oval:com.redhat.rhsa:obj:20151193001" version="601" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<name>redhat-release-client</name>
|
||||
</rpminfo_object>
|
||||
<rpminfo_object id="oval:com.redhat.rhsa:obj:20151193004" version="601" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<name>redhat-release-computenode</name>
|
||||
</rpminfo_object>
|
||||
<rpminfo_object id="oval:com.redhat.rhsa:obj:20151193002" version="601" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<name>redhat-release-server</name>
|
||||
</rpminfo_object>
|
||||
<rpminfo_object id="oval:com.redhat.rhsa:obj:20151193003" version="601" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<name>redhat-release-workstation</name>
|
||||
</rpminfo_object>
|
||||
<rpminfo_object id="oval:com.redhat.rhsa:obj:20151193005" version="601" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<name>xerces-c</name>
|
||||
</rpminfo_object>
|
||||
<rpminfo_object id="oval:com.redhat.rhsa:obj:20151193006" version="601" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<name>xerces-c-devel</name>
|
||||
</rpminfo_object>
|
||||
<rpminfo_object id="oval:com.redhat.rhsa:obj:20151193007" version="601" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<name>xerces-c-doc</name>
|
||||
</rpminfo_object>
|
||||
|
||||
</objects>
|
||||
<states>
|
||||
<!-- ~~~~~~~~~~~~~~~~~~~~ rpminfo states ~~~~~~~~~~~~~~~~~~~~~ -->
|
||||
<rpminfo_state id="oval:com.redhat.rhsa:ste:20151193001" version="601" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<signature_keyid operation="equals">199e2f91fd431d51</signature_keyid>
|
||||
</rpminfo_state>
|
||||
<rpminfo_state id="oval:com.redhat.rhsa:ste:20151193002" version="601" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<version operation="pattern match">^7[^\d]</version>
|
||||
</rpminfo_state>
|
||||
<rpminfo_state id="oval:com.redhat.rhsa:ste:20151193003" version="601" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<evr datatype="evr_string" operation="less than">0:3.1.1-7.el7_1</evr>
|
||||
</rpminfo_state>
|
||||
|
||||
</states>
|
||||
</oval_definitions>
|
224
updater/fetchers/testdata/fetcher_rhel_test.2.xml
vendored
Normal file
224
updater/fetchers/testdata/fetcher_rhel_test.2.xml
vendored
Normal file
@ -0,0 +1,224 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<oval_definitions xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5" xmlns:oval="http://oval.mitre.org/XMLSchema/oval-common-5" xmlns:oval-def="http://oval.mitre.org/XMLSchema/oval-definitions-5" xmlns:unix-def="http://oval.mitre.org/XMLSchema/oval-definitions-5#unix" xmlns:red-def="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://oval.mitre.org/XMLSchema/oval-common-5 oval-common-schema.xsd http://oval.mitre.org/XMLSchema/oval-definitions-5 oval-definitions-schema.xsd http://oval.mitre.org/XMLSchema/oval-definitions-5#unix unix-definitions-schema.xsd http://oval.mitre.org/XMLSchema/oval-definitions-5#linux linux-definitions-schema.xsd">
|
||||
<generator>
|
||||
<oval:product_name>Red Hat Errata System</oval:product_name>
|
||||
<oval:schema_version>5.10.1</oval:schema_version>
|
||||
<oval:timestamp>2015-07-03T01:12:29</oval:timestamp>
|
||||
</generator>
|
||||
|
||||
<definitions>
|
||||
<definition id="oval:com.redhat.rhsa:def:20151207" version="601" class="patch">
|
||||
<metadata>
|
||||
<title>RHSA-2015:1207: firefox security update (Critical)</title>
|
||||
<affected family="unix">
|
||||
<platform>Red Hat Enterprise Linux 7</platform>
|
||||
<platform>Red Hat Enterprise Linux 6</platform>
|
||||
<platform>Red Hat Enterprise Linux 5</platform>
|
||||
</affected>
|
||||
<reference source="RHSA" ref_id="RHSA-2015:1207-00" ref_url="https://rhn.redhat.com/errata/RHSA-2015-1207.html"/>
|
||||
<reference source="CVE" ref_id="CVE-2015-2722" ref_url="https://access.redhat.com/security/cve/CVE-2015-2722"/>
|
||||
<reference source="CVE" ref_id="CVE-2015-2724" ref_url="https://access.redhat.com/security/cve/CVE-2015-2724"/>
|
||||
<reference source="CVE" ref_id="CVE-2015-2725" ref_url="https://access.redhat.com/security/cve/CVE-2015-2725"/>
|
||||
<reference source="CVE" ref_id="CVE-2015-2727" ref_url="https://access.redhat.com/security/cve/CVE-2015-2727"/>
|
||||
<reference source="CVE" ref_id="CVE-2015-2728" ref_url="https://access.redhat.com/security/cve/CVE-2015-2728"/>
|
||||
<reference source="CVE" ref_id="CVE-2015-2729" ref_url="https://access.redhat.com/security/cve/CVE-2015-2729"/>
|
||||
<reference source="CVE" ref_id="CVE-2015-2731" ref_url="https://access.redhat.com/security/cve/CVE-2015-2731"/>
|
||||
<reference source="CVE" ref_id="CVE-2015-2733" ref_url="https://access.redhat.com/security/cve/CVE-2015-2733"/>
|
||||
<reference source="CVE" ref_id="CVE-2015-2734" ref_url="https://access.redhat.com/security/cve/CVE-2015-2734"/>
|
||||
<reference source="CVE" ref_id="CVE-2015-2735" ref_url="https://access.redhat.com/security/cve/CVE-2015-2735"/>
|
||||
<reference source="CVE" ref_id="CVE-2015-2736" ref_url="https://access.redhat.com/security/cve/CVE-2015-2736"/>
|
||||
<reference source="CVE" ref_id="CVE-2015-2737" ref_url="https://access.redhat.com/security/cve/CVE-2015-2737"/>
|
||||
<reference source="CVE" ref_id="CVE-2015-2738" ref_url="https://access.redhat.com/security/cve/CVE-2015-2738"/>
|
||||
<reference source="CVE" ref_id="CVE-2015-2739" ref_url="https://access.redhat.com/security/cve/CVE-2015-2739"/>
|
||||
<reference source="CVE" ref_id="CVE-2015-2740" ref_url="https://access.redhat.com/security/cve/CVE-2015-2740"/>
|
||||
<reference source="CVE" ref_id="CVE-2015-2741" ref_url="https://access.redhat.com/security/cve/CVE-2015-2741"/>
|
||||
<reference source="CVE" ref_id="CVE-2015-2743" ref_url="https://access.redhat.com/security/cve/CVE-2015-2743"/>
|
||||
<description>Mozilla Firefox is an open source web browser. XULRunner provides the XUL
|
||||
Runtime environment for Mozilla Firefox.
|
||||
|
||||
|
||||
Several flaws were found in the processing of malformed web content. A web
|
||||
page containing malicious content could cause Firefox to crash or,
|
||||
potentially, execute arbitrary code with the privileges of the user running
|
||||
Firefox.</description>
|
||||
|
||||
<!-- ~~~~~~~~~~~~~~~~~~~~ advisory details ~~~~~~~~~~~~~~~~~~~ -->
|
||||
|
||||
<advisory from="secalert@redhat.com">
|
||||
<severity>Critical</severity>
|
||||
<rights>Copyright 2015 Red Hat, Inc.</rights>
|
||||
<issued date="2015-07-02"/>
|
||||
<updated date="2015-07-02"/>
|
||||
<cve href="https://access.redhat.com/security/cve/CVE-2015-2722">CVE-2015-2722</cve>
|
||||
<cve href="https://access.redhat.com/security/cve/CVE-2015-2724">CVE-2015-2724</cve>
|
||||
<cve href="https://access.redhat.com/security/cve/CVE-2015-2725">CVE-2015-2725</cve>
|
||||
<cve href="https://access.redhat.com/security/cve/CVE-2015-2727">CVE-2015-2727</cve>
|
||||
<cve href="https://access.redhat.com/security/cve/CVE-2015-2728">CVE-2015-2728</cve>
|
||||
<cve href="https://access.redhat.com/security/cve/CVE-2015-2729">CVE-2015-2729</cve>
|
||||
<cve href="https://access.redhat.com/security/cve/CVE-2015-2731">CVE-2015-2731</cve>
|
||||
<cve href="https://access.redhat.com/security/cve/CVE-2015-2733">CVE-2015-2733</cve>
|
||||
<cve href="https://access.redhat.com/security/cve/CVE-2015-2734">CVE-2015-2734</cve>
|
||||
<cve href="https://access.redhat.com/security/cve/CVE-2015-2735">CVE-2015-2735</cve>
|
||||
<cve href="https://access.redhat.com/security/cve/CVE-2015-2736">CVE-2015-2736</cve>
|
||||
<cve href="https://access.redhat.com/security/cve/CVE-2015-2737">CVE-2015-2737</cve>
|
||||
<cve href="https://access.redhat.com/security/cve/CVE-2015-2738">CVE-2015-2738</cve>
|
||||
<cve href="https://access.redhat.com/security/cve/CVE-2015-2739">CVE-2015-2739</cve>
|
||||
<cve href="https://access.redhat.com/security/cve/CVE-2015-2740">CVE-2015-2740</cve>
|
||||
<cve href="https://access.redhat.com/security/cve/CVE-2015-2741">CVE-2015-2741</cve>
|
||||
<cve href="https://access.redhat.com/security/cve/CVE-2015-2743">CVE-2015-2743</cve>
|
||||
<bugzilla href="https://bugzilla.redhat.com/1236947" id="1236947">CVE-2015-2724 CVE-2015-2725 Mozilla: Miscellaneous memory safety hazards (rv:31.8 / rv:38.1) (MFSA 2015-59)</bugzilla>
|
||||
<bugzilla href="https://bugzilla.redhat.com/1236950" id="1236950">CVE-2015-2727 Mozilla: Local files or privileged URLs in pages can be opened into new tabs (MFSA 2015-60)</bugzilla>
|
||||
<bugzilla href="https://bugzilla.redhat.com/1236951" id="1236951">CVE-2015-2728 Mozilla: Type confusion in Indexed Database Manager (MFSA 2015-61)</bugzilla>
|
||||
<bugzilla href="https://bugzilla.redhat.com/1236952" id="1236952">CVE-2015-2729 Mozilla: Out-of-bound read while computing an oscillator rendering range in Web Audio (MFSA 2015-62)</bugzilla>
|
||||
<bugzilla href="https://bugzilla.redhat.com/1236953" id="1236953">CVE-2015-2731 Mozilla: Use-after-free in Content Policy due to microtask execution error (MFSA 2015-63)</bugzilla>
|
||||
<bugzilla href="https://bugzilla.redhat.com/1236955" id="1236955">CVE-2015-2722 CVE-2015-2733 Mozilla: Use-after-free in workers while using XMLHttpRequest (MFSA 2015-65)</bugzilla>
|
||||
<bugzilla href="https://bugzilla.redhat.com/1236956" id="1236956">CVE-2015-2734 CVE-2015-2735 CVE-2015-2736 CVE-2015-2737 CVE-2015-2738 CVE-2015-2739 CVE-2015-2740 Mozilla: Vulnerabilities found through code inspection (MFSA 2015-66)</bugzilla>
|
||||
<bugzilla href="https://bugzilla.redhat.com/1236963" id="1236963">CVE-2015-2741 Mozilla: Key pinning is ignored when overridable errors are encountered (MFSA 2015-67)</bugzilla>
|
||||
<bugzilla href="https://bugzilla.redhat.com/1236964" id="1236964">CVE-2015-2743 Mozilla: Privilege escalation in PDF.js (MFSA 2015-69)</bugzilla>
|
||||
<affected_cpe_list>
|
||||
<cpe>cpe:/o:redhat:enterprise_linux:5</cpe>
|
||||
<cpe>cpe:/o:redhat:enterprise_linux:6</cpe>
|
||||
<cpe>cpe:/o:redhat:enterprise_linux:7</cpe>
|
||||
</affected_cpe_list>
|
||||
</advisory>
|
||||
</metadata>
|
||||
<criteria operator="OR">
|
||||
|
||||
<criteria operator="AND">
|
||||
<criterion test_ref="oval:com.redhat.rhsa:tst:20151207001" comment="Red Hat Enterprise Linux 5 is installed" /><criterion test_ref="oval:com.redhat.rhsa:tst:20151207002" comment="firefox is earlier than 0:38.1.0-1.el5_11" /><criterion test_ref="oval:com.redhat.rhsa:tst:20151207003" comment="firefox is signed with Red Hat redhatrelease key" />
|
||||
|
||||
</criteria>
|
||||
<criteria operator="AND">
|
||||
<criterion test_ref="oval:com.redhat.rhsa:tst:20151207008" comment="firefox is earlier than 0:38.1.0-1.el6_6" /><criterion test_ref="oval:com.redhat.rhsa:tst:20151207009" comment="firefox is signed with Red Hat redhatrelease2 key" />
|
||||
<criteria operator="OR">
|
||||
<criterion test_ref="oval:com.redhat.rhsa:tst:20151207004" comment="Red Hat Enterprise Linux 6 Client is installed" /><criterion test_ref="oval:com.redhat.rhsa:tst:20151207005" comment="Red Hat Enterprise Linux 6 Server is installed" /><criterion test_ref="oval:com.redhat.rhsa:tst:20151207006" comment="Red Hat Enterprise Linux 6 Workstation is installed" /><criterion test_ref="oval:com.redhat.rhsa:tst:20151207007" comment="Red Hat Enterprise Linux 6 ComputeNode is installed" />
|
||||
|
||||
</criteria>
|
||||
|
||||
</criteria>
|
||||
<criteria operator="AND">
|
||||
<criterion test_ref="oval:com.redhat.rhsa:tst:20151207014" comment="firefox is earlier than 0:38.1.0-1.el7_1" /><criterion test_ref="oval:com.redhat.rhsa:tst:20151207009" comment="firefox is signed with Red Hat redhatrelease2 key" />
|
||||
<criteria operator="OR">
|
||||
<criterion test_ref="oval:com.redhat.rhsa:tst:20151207010" comment="Red Hat Enterprise Linux 7 Client is installed" /><criterion test_ref="oval:com.redhat.rhsa:tst:20151207011" comment="Red Hat Enterprise Linux 7 Server is installed" /><criterion test_ref="oval:com.redhat.rhsa:tst:20151207012" comment="Red Hat Enterprise Linux 7 Workstation is installed" /><criterion test_ref="oval:com.redhat.rhsa:tst:20151207013" comment="Red Hat Enterprise Linux 7 ComputeNode is installed" />
|
||||
|
||||
</criteria>
|
||||
|
||||
</criteria>
|
||||
|
||||
</criteria>
|
||||
|
||||
</definition>
|
||||
</definitions>
|
||||
<tests>
|
||||
<!-- ~~~~~~~~~~~~~~~~~~~~~ rpminfo tests ~~~~~~~~~~~~~~~~~~~~~ -->
|
||||
<rpminfo_test id="oval:com.redhat.rhsa:tst:20151207001" version="601" comment="Red Hat Enterprise Linux 5 is installed" check="at least one" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<object object_ref="oval:com.redhat.rhsa:obj:20151207001" />
|
||||
<state state_ref="oval:com.redhat.rhsa:ste:20151207003" />
|
||||
</rpminfo_test>
|
||||
<rpminfo_test id="oval:com.redhat.rhsa:tst:20151207002" version="601" comment="firefox is earlier than 0:38.1.0-1.el5_11" check="at least one" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<object object_ref="oval:com.redhat.rhsa:obj:20151207002" />
|
||||
<state state_ref="oval:com.redhat.rhsa:ste:20151207004" />
|
||||
</rpminfo_test>
|
||||
<rpminfo_test id="oval:com.redhat.rhsa:tst:20151207003" version="601" comment="firefox is signed with Red Hat redhatrelease key" check="at least one" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<object object_ref="oval:com.redhat.rhsa:obj:20151207002" />
|
||||
<state state_ref="oval:com.redhat.rhsa:ste:20151207002" />
|
||||
</rpminfo_test>
|
||||
<rpminfo_test id="oval:com.redhat.rhsa:tst:20151207004" version="601" comment="Red Hat Enterprise Linux 6 Client is installed" check="at least one" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<object object_ref="oval:com.redhat.rhsa:obj:20151207003" />
|
||||
<state state_ref="oval:com.redhat.rhsa:ste:20151207005" />
|
||||
</rpminfo_test>
|
||||
<rpminfo_test id="oval:com.redhat.rhsa:tst:20151207005" version="601" comment="Red Hat Enterprise Linux 6 Server is installed" check="at least one" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<object object_ref="oval:com.redhat.rhsa:obj:20151207004" />
|
||||
<state state_ref="oval:com.redhat.rhsa:ste:20151207005" />
|
||||
</rpminfo_test>
|
||||
<rpminfo_test id="oval:com.redhat.rhsa:tst:20151207006" version="601" comment="Red Hat Enterprise Linux 6 Workstation is installed" check="at least one" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<object object_ref="oval:com.redhat.rhsa:obj:20151207005" />
|
||||
<state state_ref="oval:com.redhat.rhsa:ste:20151207005" />
|
||||
</rpminfo_test>
|
||||
<rpminfo_test id="oval:com.redhat.rhsa:tst:20151207007" version="601" comment="Red Hat Enterprise Linux 6 ComputeNode is installed" check="at least one" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<object object_ref="oval:com.redhat.rhsa:obj:20151207006" />
|
||||
<state state_ref="oval:com.redhat.rhsa:ste:20151207005" />
|
||||
</rpminfo_test>
|
||||
<rpminfo_test id="oval:com.redhat.rhsa:tst:20151207008" version="601" comment="firefox is earlier than 0:38.1.0-1.el6_6" check="at least one" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<object object_ref="oval:com.redhat.rhsa:obj:20151207002" />
|
||||
<state state_ref="oval:com.redhat.rhsa:ste:20151207006" />
|
||||
</rpminfo_test>
|
||||
<rpminfo_test id="oval:com.redhat.rhsa:tst:20151207009" version="601" comment="firefox is signed with Red Hat redhatrelease2 key" check="at least one" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<object object_ref="oval:com.redhat.rhsa:obj:20151207002" />
|
||||
<state state_ref="oval:com.redhat.rhsa:ste:20151207001" />
|
||||
</rpminfo_test>
|
||||
<rpminfo_test id="oval:com.redhat.rhsa:tst:20151207010" version="601" comment="Red Hat Enterprise Linux 7 Client is installed" check="at least one" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<object object_ref="oval:com.redhat.rhsa:obj:20151207003" />
|
||||
<state state_ref="oval:com.redhat.rhsa:ste:20151207007" />
|
||||
</rpminfo_test>
|
||||
<rpminfo_test id="oval:com.redhat.rhsa:tst:20151207011" version="601" comment="Red Hat Enterprise Linux 7 Server is installed" check="at least one" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<object object_ref="oval:com.redhat.rhsa:obj:20151207004" />
|
||||
<state state_ref="oval:com.redhat.rhsa:ste:20151207007" />
|
||||
</rpminfo_test>
|
||||
<rpminfo_test id="oval:com.redhat.rhsa:tst:20151207012" version="601" comment="Red Hat Enterprise Linux 7 Workstation is installed" check="at least one" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<object object_ref="oval:com.redhat.rhsa:obj:20151207005" />
|
||||
<state state_ref="oval:com.redhat.rhsa:ste:20151207007" />
|
||||
</rpminfo_test>
|
||||
<rpminfo_test id="oval:com.redhat.rhsa:tst:20151207013" version="601" comment="Red Hat Enterprise Linux 7 ComputeNode is installed" check="at least one" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<object object_ref="oval:com.redhat.rhsa:obj:20151207006" />
|
||||
<state state_ref="oval:com.redhat.rhsa:ste:20151207007" />
|
||||
</rpminfo_test>
|
||||
<rpminfo_test id="oval:com.redhat.rhsa:tst:20151207014" version="601" comment="firefox is earlier than 0:38.1.0-1.el7_1" check="at least one" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<object object_ref="oval:com.redhat.rhsa:obj:20151207002" />
|
||||
<state state_ref="oval:com.redhat.rhsa:ste:20151207008" />
|
||||
</rpminfo_test>
|
||||
|
||||
</tests>
|
||||
|
||||
<objects>
|
||||
<!-- ~~~~~~~~~~~~~~~~~~~~ rpminfo objects ~~~~~~~~~~~~~~~~~~~~ -->
|
||||
<rpminfo_object id="oval:com.redhat.rhsa:obj:20151207002" version="601" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<name>firefox</name>
|
||||
</rpminfo_object>
|
||||
<rpminfo_object id="oval:com.redhat.rhsa:obj:20151207001" version="601" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<name>redhat-release</name>
|
||||
</rpminfo_object>
|
||||
<rpminfo_object id="oval:com.redhat.rhsa:obj:20151207003" version="601" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<name>redhat-release-client</name>
|
||||
</rpminfo_object>
|
||||
<rpminfo_object id="oval:com.redhat.rhsa:obj:20151207006" version="601" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<name>redhat-release-computenode</name>
|
||||
</rpminfo_object>
|
||||
<rpminfo_object id="oval:com.redhat.rhsa:obj:20151207004" version="601" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<name>redhat-release-server</name>
|
||||
</rpminfo_object>
|
||||
<rpminfo_object id="oval:com.redhat.rhsa:obj:20151207005" version="601" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<name>redhat-release-workstation</name>
|
||||
</rpminfo_object>
|
||||
|
||||
</objects>
|
||||
<states>
|
||||
<!-- ~~~~~~~~~~~~~~~~~~~~ rpminfo states ~~~~~~~~~~~~~~~~~~~~~ -->
|
||||
<rpminfo_state id="oval:com.redhat.rhsa:ste:20151207001" version="601" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<signature_keyid operation="equals">199e2f91fd431d51</signature_keyid>
|
||||
</rpminfo_state>
|
||||
<rpminfo_state id="oval:com.redhat.rhsa:ste:20151207002" version="601" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<signature_keyid operation="equals">5326810137017186</signature_keyid>
|
||||
</rpminfo_state>
|
||||
<rpminfo_state id="oval:com.redhat.rhsa:ste:20151207003" version="601" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<version operation="pattern match">^5[^\d]</version>
|
||||
</rpminfo_state>
|
||||
<rpminfo_state id="oval:com.redhat.rhsa:ste:20151207004" version="601" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<evr datatype="evr_string" operation="less than">0:38.1.0-1.el5_11</evr>
|
||||
</rpminfo_state>
|
||||
<rpminfo_state id="oval:com.redhat.rhsa:ste:20151207005" version="601" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<version operation="pattern match">^6[^\d]</version>
|
||||
</rpminfo_state>
|
||||
<rpminfo_state id="oval:com.redhat.rhsa:ste:20151207006" version="601" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<evr datatype="evr_string" operation="less than">0:38.1.0-1.el6_6</evr>
|
||||
</rpminfo_state>
|
||||
<rpminfo_state id="oval:com.redhat.rhsa:ste:20151207007" version="601" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<version operation="pattern match">^7[^\d]</version>
|
||||
</rpminfo_state>
|
||||
<rpminfo_state id="oval:com.redhat.rhsa:ste:20151207008" version="601" xmlns="http://oval.mitre.org/XMLSchema/oval-definitions-5#linux">
|
||||
<evr datatype="evr_string" operation="less than">0:38.1.0-1.el7_1</evr>
|
||||
</rpminfo_state>
|
||||
|
||||
</states>
|
||||
</oval_definitions>
|
35
updater/fetchers/testdata/fetcher_ubuntu_test.txt
vendored
Normal file
35
updater/fetchers/testdata/fetcher_ubuntu_test.txt
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
Candidate: CVE-2015-4471
|
||||
PublicDate: 2015-06-11
|
||||
References:
|
||||
http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2015-4471
|
||||
http://www.openwall.com/lists/oss-security/2015/02/03/11
|
||||
https://github.com/kyz/libmspack/commit/18b6a2cc0b87536015bedd4f7763e6b02d5aa4f3
|
||||
https://bugs.debian.org/775499
|
||||
http://openwall.com/lists/oss-security/2015/02/03/11
|
||||
Description:
|
||||
Off-by-one error in the lzxd_decompress function in lzxd.c in libmspack
|
||||
before 0.5 allows remote attackers to cause a denial of service (buffer
|
||||
under-read and application crash) via a crafted CAB archive.
|
||||
Ubuntu-Description:
|
||||
Notes:
|
||||
Bugs:
|
||||
http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=775499
|
||||
Priority: medium (wrong-syntax)
|
||||
Discovered-by:
|
||||
Assigned-to:
|
||||
|
||||
Patches_libmspack:
|
||||
upstream_libmspack: not-affected (0.5-1)
|
||||
precise_libmspack: DNE
|
||||
trusty_libmspack: needed
|
||||
utopic_libmspack: ignored (reached end-of-life)
|
||||
vivid_libmspack : released ( 0.4-3 )
|
||||
devel_libmspack: not-affected
|
||||
unknown_libmspack: needed
|
||||
|
||||
Patches_libmspack-anotherpkg: wrong-syntax
|
||||
wily_libmspack-anotherpkg: released ((0.1)
|
||||
utopic_libmspack-anotherpkg: not-affected
|
||||
trusty_libmspack-anotherpkg: needs-triage
|
||||
precise_libmspack-anotherpkg: released
|
||||
saucy_libmspack-anotherpkg: needed
|
414
updater/fetchers/ubuntu.go
Normal file
414
updater/fetchers/ubuntu.go
Normal file
@ -0,0 +1,414 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package fetchers
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/updater"
|
||||
"github.com/coreos/clair/utils"
|
||||
cerrors "github.com/coreos/clair/utils/errors"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
)
|
||||
|
||||
const (
|
||||
ubuntuTrackerURI = "https://launchpad.net/ubuntu-cve-tracker"
|
||||
ubuntuTracker = "lp:ubuntu-cve-tracker"
|
||||
ubuntuUpdaterFlag = "ubuntuUpdater"
|
||||
)
|
||||
|
||||
var (
|
||||
repositoryLocalPath string
|
||||
|
||||
ubuntuIgnoredReleases = map[string]struct{}{
|
||||
"upstream": struct{}{},
|
||||
"devel": struct{}{},
|
||||
|
||||
"dapper": struct{}{},
|
||||
"edgy": struct{}{},
|
||||
"feisty": struct{}{},
|
||||
"gutsy": struct{}{},
|
||||
"hardy": struct{}{},
|
||||
"intrepid": struct{}{},
|
||||
"jaunty": struct{}{},
|
||||
"karmic": struct{}{},
|
||||
"lucid": struct{}{},
|
||||
"maverick": struct{}{},
|
||||
"natty": struct{}{},
|
||||
"oneiric": struct{}{},
|
||||
"saucy": struct{}{},
|
||||
|
||||
// Syntax error
|
||||
"Patches": struct{}{},
|
||||
// Product
|
||||
"product": struct{}{},
|
||||
}
|
||||
|
||||
branchedRegexp = regexp.MustCompile(`Branched (\d+) revisions.`)
|
||||
revisionRegexp = regexp.MustCompile(`Now on revision (\d+).`)
|
||||
affectsCaptureRegexp = regexp.MustCompile(`(?P<release>.*)_(?P<package>.*): (?P<status>[^\s]*)( \(+(?P<note>[^()]*)\)+)?`)
|
||||
affectsCaptureRegexpNames = affectsCaptureRegexp.SubexpNames()
|
||||
)
|
||||
|
||||
// UbuntuFetcher implements updater.Fetcher and get vulnerability updates from
|
||||
// the Ubuntu CVE Tracker.
|
||||
type UbuntuFetcher struct{}
|
||||
|
||||
func init() {
|
||||
updater.RegisterFetcher("Ubuntu", &UbuntuFetcher{})
|
||||
}
|
||||
|
||||
// FetchUpdate gets vulnerability updates from the Ubuntu CVE Tracker.
|
||||
func (fetcher *UbuntuFetcher) FetchUpdate() (resp updater.FetcherResponse, err error) {
|
||||
log.Info("fetching Ubuntu vulneratibilities")
|
||||
|
||||
// Check to see if the repository does not already exist.
|
||||
var revisionNumber int
|
||||
if _, pathExists := os.Stat(repositoryLocalPath); repositoryLocalPath == "" || os.IsNotExist(pathExists) {
|
||||
// Create a temporary folder and download the repository.
|
||||
p, err := ioutil.TempDir(os.TempDir(), "ubuntu-cve-tracker")
|
||||
if err != nil {
|
||||
return resp, ErrFilesystem
|
||||
}
|
||||
|
||||
// bzr wants an empty target directory.
|
||||
repositoryLocalPath = p + "/repository"
|
||||
|
||||
// Create the new repository.
|
||||
revisionNumber, err = createRepository(repositoryLocalPath)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
} else {
|
||||
// Update the repository that's already on disk.
|
||||
revisionNumber, err = updateRepository(repositoryLocalPath)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
}
|
||||
|
||||
// Get the latest revision number we successfully applied in the database.
|
||||
dbRevisionNumber, err := database.GetFlagValue("ubuntuUpdater")
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// Get the list of vulnerabilities that we have to update.
|
||||
modifiedCVE, err := collectModifiedVulnerabilities(revisionNumber, dbRevisionNumber, repositoryLocalPath)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// Parse and add the vulnerabilities.
|
||||
for cvePath := range modifiedCVE {
|
||||
file, err := os.Open(repositoryLocalPath + "/" + cvePath)
|
||||
if err != nil {
|
||||
// This can happen when a file is modified and then moved in another
|
||||
// commit.
|
||||
continue
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
v, unknownReleases, err := parseUbuntuCVE(file)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
if len(v.FixedIn) > 0 {
|
||||
resp.Vulnerabilities = append(resp.Vulnerabilities, v)
|
||||
}
|
||||
|
||||
// Log any unknown releases.
|
||||
for k := range unknownReleases {
|
||||
note := fmt.Sprintf("Ubuntu %s is not mapped to any version number (eg. trusty->14.04). Please update me.", k)
|
||||
resp.Notes = append(resp.Notes, note)
|
||||
log.Warning(note)
|
||||
|
||||
// If we encountered unknown Ubuntu release, we don't want the revision
|
||||
// number to be considered as managed.
|
||||
dbRevisionNumberInt, _ := strconv.Atoi(dbRevisionNumber)
|
||||
revisionNumber = dbRevisionNumberInt
|
||||
}
|
||||
}
|
||||
|
||||
// Add flag information
|
||||
resp.FlagName = ubuntuUpdaterFlag
|
||||
resp.FlagValue = strconv.Itoa(revisionNumber)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func collectModifiedVulnerabilities(revision int, dbRevision, repositoryLocalPath string) (map[string]struct{}, error) {
|
||||
modifiedCVE := make(map[string]struct{})
|
||||
|
||||
// Handle a brand new database.
|
||||
if dbRevision == "" {
|
||||
for _, folder := range []string{"active", "retired"} {
|
||||
d, err := os.Open(repositoryLocalPath + "/" + folder)
|
||||
if err != nil {
|
||||
log.Errorf("could not open Ubuntu vulnerabilities repository's folder: %s", err)
|
||||
return nil, ErrFilesystem
|
||||
}
|
||||
defer d.Close()
|
||||
|
||||
// Get the FileInfo of all the files in the directory.
|
||||
names, err := d.Readdirnames(-1)
|
||||
if err != nil {
|
||||
log.Errorf("could not read Ubuntu vulnerabilities repository's folder:: %s.", err)
|
||||
return nil, ErrFilesystem
|
||||
}
|
||||
|
||||
// Add the vulnerabilities to the list.
|
||||
for _, name := range names {
|
||||
if strings.HasPrefix(name, "CVE-") {
|
||||
modifiedCVE[folder+"/"+name] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return modifiedCVE, nil
|
||||
}
|
||||
|
||||
// Handle an up to date database.
|
||||
dbRevisionInt, _ := strconv.Atoi(dbRevision)
|
||||
if revision == dbRevisionInt {
|
||||
log.Debug("no Ubuntu update")
|
||||
return modifiedCVE, nil
|
||||
}
|
||||
|
||||
// Handle a database that needs upgrading.
|
||||
out, err := utils.Exec(repositoryLocalPath, "bzr", "log", "--verbose", "-r"+strconv.Itoa(dbRevisionInt+1)+"..", "-n0")
|
||||
if err != nil {
|
||||
log.Errorf("could not get Ubuntu vulnerabilities repository logs: %s. output: %s", err, string(out))
|
||||
return nil, cerrors.ErrCouldNotDownload
|
||||
}
|
||||
|
||||
scanner := bufio.NewScanner(bytes.NewReader(out))
|
||||
for scanner.Scan() {
|
||||
text := strings.TrimSpace(scanner.Text())
|
||||
if strings.Contains(text, "CVE-") && (strings.HasPrefix(text, "active/") || strings.HasPrefix(text, "retired/")) {
|
||||
if strings.Contains(text, " => ") {
|
||||
text = text[strings.Index(text, " => ")+4:]
|
||||
}
|
||||
modifiedCVE[text] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
return modifiedCVE, nil
|
||||
}
|
||||
|
||||
func createRepository(pathToRepo string) (int, error) {
|
||||
// Branch repository
|
||||
out, err := utils.Exec("/tmp/", "bzr", "branch", ubuntuTracker, pathToRepo)
|
||||
if err != nil {
|
||||
log.Errorf("could not branch Ubuntu repository: %s. output: %s", err, string(out))
|
||||
return 0, cerrors.ErrCouldNotDownload
|
||||
}
|
||||
|
||||
// Get revision number
|
||||
regexpMatches := branchedRegexp.FindStringSubmatch(string(out))
|
||||
if len(regexpMatches) != 2 {
|
||||
log.Error("could not parse bzr branch output to get the revision number")
|
||||
return 0, cerrors.ErrCouldNotDownload
|
||||
}
|
||||
|
||||
revision, err := strconv.Atoi(regexpMatches[1])
|
||||
if err != nil {
|
||||
log.Error("could not parse bzr branch output to get the revision number")
|
||||
return 0, cerrors.ErrCouldNotDownload
|
||||
}
|
||||
|
||||
return revision, err
|
||||
}
|
||||
|
||||
func updateRepository(pathToRepo string) (int, error) {
|
||||
// Pull repository
|
||||
out, err := utils.Exec(pathToRepo, "bzr", "pull", "--overwrite")
|
||||
if err != nil {
|
||||
log.Errorf("could not pull Ubuntu repository: %s. output: %s", err, string(out))
|
||||
return 0, cerrors.ErrCouldNotDownload
|
||||
}
|
||||
|
||||
// Get revision number
|
||||
if strings.Contains(string(out), "No revisions or tags to pull") {
|
||||
out, _ = utils.Exec(pathToRepo, "bzr", "revno")
|
||||
revno, err := strconv.Atoi(string(out[:len(out)-1]))
|
||||
if err != nil {
|
||||
log.Errorf("could not parse Ubuntu repository revision number: %s. output: %s", err, string(out))
|
||||
return 0, cerrors.ErrCouldNotDownload
|
||||
}
|
||||
return revno, nil
|
||||
}
|
||||
|
||||
regexpMatches := revisionRegexp.FindStringSubmatch(string(out))
|
||||
if len(regexpMatches) != 2 {
|
||||
log.Error("could not parse bzr pull output to get the revision number")
|
||||
return 0, cerrors.ErrCouldNotDownload
|
||||
}
|
||||
|
||||
revno, err := strconv.Atoi(regexpMatches[1])
|
||||
if err != nil {
|
||||
log.Error("could not parse bzr pull output to get the revision number")
|
||||
return 0, cerrors.ErrCouldNotDownload
|
||||
}
|
||||
|
||||
return revno, nil
|
||||
}
|
||||
|
||||
func parseUbuntuCVE(fileContent io.Reader) (vulnerability updater.FetcherVulnerability, unknownReleases map[string]struct{}, err error) {
|
||||
unknownReleases = make(map[string]struct{})
|
||||
readingDescription := false
|
||||
scanner := bufio.NewScanner(fileContent)
|
||||
|
||||
for scanner.Scan() {
|
||||
line := strings.TrimSpace(scanner.Text())
|
||||
|
||||
// Skip any comments.
|
||||
if strings.HasPrefix(line, "#") {
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse the name.
|
||||
if strings.HasPrefix(line, "Candidate:") {
|
||||
vulnerability.ID = strings.TrimSpace(strings.TrimPrefix(line, "Candidate:"))
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse the link.
|
||||
if vulnerability.Link == "" && strings.HasPrefix(line, "http") {
|
||||
vulnerability.Link = strings.TrimSpace(line)
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse the priority.
|
||||
if strings.HasPrefix(line, "Priority:") {
|
||||
priority := strings.TrimSpace(strings.TrimPrefix(line, "Priority:"))
|
||||
|
||||
// Handle syntax error: Priority: medium (heap-protector)
|
||||
if strings.Contains(priority, " ") {
|
||||
priority = priority[:strings.Index(priority, " ")]
|
||||
}
|
||||
|
||||
vulnerability.Priority = ubuntuPriorityToPriority(priority)
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse the description.
|
||||
if strings.HasPrefix(line, "Description:") {
|
||||
readingDescription = true
|
||||
vulnerability.Description = strings.TrimSpace(strings.TrimPrefix(line, "Description:")) // In case there is a formatting error and the description starts on the same line
|
||||
continue
|
||||
}
|
||||
if readingDescription {
|
||||
if strings.HasPrefix(line, "Ubuntu-Description:") || strings.HasPrefix(line, "Notes:") || strings.HasPrefix(line, "Bugs:") || strings.HasPrefix(line, "Priority:") || strings.HasPrefix(line, "Discovered-by:") || strings.HasPrefix(line, "Assigned-to:") {
|
||||
readingDescription = false
|
||||
} else {
|
||||
vulnerability.Description = vulnerability.Description + " " + line
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Try to parse the package that the vulnerability affects.
|
||||
affectsCaptureArr := affectsCaptureRegexp.FindAllStringSubmatch(line, -1)
|
||||
if len(affectsCaptureArr) > 0 {
|
||||
affectsCapture := affectsCaptureArr[0]
|
||||
|
||||
md := map[string]string{}
|
||||
for i, n := range affectsCapture {
|
||||
md[affectsCaptureRegexpNames[i]] = strings.TrimSpace(n)
|
||||
}
|
||||
|
||||
// Ignore Linux kernels.
|
||||
if strings.HasPrefix(md["package"], "linux") {
|
||||
continue
|
||||
}
|
||||
|
||||
// Only consider the package if its status is needed, active, deferred
|
||||
// or released. Ignore DNE, needs-triage, not-affected, ignored, pending.
|
||||
if md["status"] == "needed" || md["status"] == "active" || md["status"] == "deferred" || md["status"] == "released" {
|
||||
if _, isReleaseIgnored := ubuntuIgnoredReleases[md["release"]]; isReleaseIgnored {
|
||||
continue
|
||||
}
|
||||
if _, isReleaseKnown := database.UbuntuReleasesMapping[md["release"]]; !isReleaseKnown {
|
||||
unknownReleases[md["release"]] = struct{}{}
|
||||
continue
|
||||
}
|
||||
|
||||
var version types.Version
|
||||
if md["status"] == "released" {
|
||||
if md["note"] != "" {
|
||||
var err error
|
||||
version, err = types.NewVersion(md["note"])
|
||||
if err != nil {
|
||||
log.Warningf("could not parse package version '%s': %s. skipping", md["note"], err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
version = types.MaxVersion
|
||||
}
|
||||
if version.String() == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Create and add the new package.
|
||||
vulnerability.FixedIn = append(vulnerability.FixedIn, &database.Package{OS: "ubuntu:" + database.UbuntuReleasesMapping[md["release"]], Name: md["package"], Version: version})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Trim extra spaces in the description
|
||||
vulnerability.Description = strings.TrimSpace(vulnerability.Description)
|
||||
|
||||
// If no link has been provided (CVE-2006-NNN0 for instance), add the link to the tracker
|
||||
if vulnerability.Link == "" {
|
||||
vulnerability.Link = ubuntuTrackerURI
|
||||
}
|
||||
|
||||
// If no priority has been provided (CVE-2007-0667 for instance), set the priority to Unknown
|
||||
if vulnerability.Priority == "" {
|
||||
vulnerability.Priority = types.Unknown
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func ubuntuPriorityToPriority(priority string) types.Priority {
|
||||
switch priority {
|
||||
case "untriaged":
|
||||
return types.Unknown
|
||||
case "negligible":
|
||||
return types.Negligible
|
||||
case "low":
|
||||
return types.Low
|
||||
case "medium":
|
||||
return types.Medium
|
||||
case "high":
|
||||
return types.High
|
||||
case "critical":
|
||||
return types.Critical
|
||||
}
|
||||
|
||||
log.Warning("Could not determine a vulnerability priority from: %s", priority)
|
||||
return types.Unknown
|
||||
}
|
63
updater/fetchers/ubuntu_test.go
Normal file
63
updater/fetchers/ubuntu_test.go
Normal file
@ -0,0 +1,63 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package fetchers
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/utils/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestUbuntuParser(t *testing.T) {
|
||||
_, filename, _, _ := runtime.Caller(0)
|
||||
path := path.Join(path.Dir(filename))
|
||||
|
||||
// Test parsing testdata/fetcher_
|
||||
testData, _ := os.Open(path + "/testdata/fetcher_ubuntu_test.txt")
|
||||
defer testData.Close()
|
||||
vulnerability, unknownReleases, err := parseUbuntuCVE(testData)
|
||||
if assert.Nil(t, err) {
|
||||
assert.Equal(t, "CVE-2015-4471", vulnerability.ID)
|
||||
assert.Equal(t, types.Medium, vulnerability.Priority)
|
||||
assert.Equal(t, "Off-by-one error in the lzxd_decompress function in lzxd.c in libmspack before 0.5 allows remote attackers to cause a denial of service (buffer under-read and application crash) via a crafted CAB archive.", vulnerability.Description)
|
||||
|
||||
// Unknown release (line 28)
|
||||
_, hasUnkownRelease := unknownReleases["unknown"]
|
||||
assert.True(t, hasUnkownRelease)
|
||||
|
||||
if assert.Len(t, vulnerability.FixedIn, 3) {
|
||||
assert.Contains(t, vulnerability.FixedIn, &database.Package{
|
||||
OS: "ubuntu:14.04",
|
||||
Name: "libmspack",
|
||||
Version: types.MaxVersion,
|
||||
})
|
||||
assert.Contains(t, vulnerability.FixedIn, &database.Package{
|
||||
OS: "ubuntu:15.04",
|
||||
Name: "libmspack",
|
||||
Version: types.NewVersionUnsafe("0.4-3"),
|
||||
})
|
||||
assert.Contains(t, vulnerability.FixedIn, &database.Package{
|
||||
OS: "ubuntu:15.10",
|
||||
Name: "libmspack-anotherpkg",
|
||||
Version: types.NewVersionUnsafe("0.1"),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
286
updater/updater.go
Normal file
286
updater/updater.go
Normal file
@ -0,0 +1,286 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package updater updates the vulnerability database periodically using
|
||||
// the registered vulnerability fetchers.
|
||||
package updater
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"github.com/coreos/clair/database"
|
||||
"github.com/coreos/clair/health"
|
||||
"github.com/coreos/clair/utils"
|
||||
"github.com/pborman/uuid"
|
||||
)
|
||||
|
||||
const (
|
||||
flagName = "updater"
|
||||
refreshLockDuration = time.Minute * 8
|
||||
lockDuration = refreshLockDuration + time.Minute*2
|
||||
|
||||
// healthMaxConsecutiveLocalFailures defines the number of times the updater
|
||||
// can fail before we should tag it as unhealthy
|
||||
healthMaxConsecutiveLocalFailures = 5
|
||||
)
|
||||
|
||||
var (
|
||||
log = capnslog.NewPackageLogger("github.com/coreos/clair", "updater")
|
||||
|
||||
healthLatestSuccessfulUpdate time.Time
|
||||
healthLockOwner string
|
||||
healthIdentifier string
|
||||
healthConsecutiveLocalFailures int
|
||||
healthNotes []string
|
||||
)
|
||||
|
||||
func init() {
|
||||
health.RegisterHealthchecker("updater", Healthcheck)
|
||||
}
|
||||
|
||||
// Run updates the vulnerability database at regular intervals
|
||||
func Run(interval time.Duration, st *utils.Stopper) {
|
||||
defer st.End()
|
||||
|
||||
// Do not run the updater if the interval is 0
|
||||
if interval == 0 {
|
||||
log.Infof("updater service is disabled.")
|
||||
return
|
||||
}
|
||||
|
||||
whoAmI := uuid.New()
|
||||
healthIdentifier = whoAmI
|
||||
log.Infof("updater service started. lock identifier: %s", whoAmI)
|
||||
|
||||
for {
|
||||
// Set the next update time to (last update time + interval) or now if there
|
||||
// is no last update time stored in database (first update) or if an error
|
||||
// occurs
|
||||
nextUpdate := time.Now().UTC()
|
||||
if lastUpdateTSS, err := database.GetFlagValue(flagName); err == nil && lastUpdateTSS != "" {
|
||||
if lastUpdateTS, err := strconv.ParseInt(lastUpdateTSS, 10, 64); err == nil {
|
||||
healthLatestSuccessfulUpdate = time.Unix(lastUpdateTS, 0)
|
||||
nextUpdate = time.Unix(lastUpdateTS, 0).Add(interval)
|
||||
}
|
||||
}
|
||||
|
||||
// If the next update timer is in the past, then try to update.
|
||||
if nextUpdate.Before(time.Now().UTC()) {
|
||||
// Attempt to get a lock on the the update.
|
||||
log.Debug("attempting to obtain update lock")
|
||||
hasLock, hasLockUntil := database.Lock(flagName, lockDuration, whoAmI)
|
||||
if hasLock {
|
||||
healthLockOwner = healthIdentifier
|
||||
|
||||
// Launch update in a new go routine.
|
||||
doneC := make(chan bool, 1)
|
||||
go func() {
|
||||
Update()
|
||||
doneC <- true
|
||||
}()
|
||||
|
||||
// Refresh the lock until the update is done.
|
||||
for done := false; !done; {
|
||||
select {
|
||||
case <-doneC:
|
||||
done = true
|
||||
case <-time.After(refreshLockDuration):
|
||||
database.Lock(flagName, lockDuration, whoAmI)
|
||||
}
|
||||
}
|
||||
|
||||
// Write the last update time to the database and set the next update
|
||||
// time.
|
||||
now := time.Now().UTC()
|
||||
database.UpdateFlag(flagName, strconv.FormatInt(now.Unix(), 10))
|
||||
healthLatestSuccessfulUpdate = now
|
||||
nextUpdate = now.Add(interval)
|
||||
|
||||
// Unlock the update.
|
||||
database.Unlock(flagName, whoAmI)
|
||||
} else {
|
||||
lockOwner, lockExpiration, err := database.LockInfo(flagName)
|
||||
if err != nil {
|
||||
log.Debug("update lock is already taken")
|
||||
nextUpdate = hasLockUntil
|
||||
} else {
|
||||
log.Debugf("update lock is already taken by %s until %v", lockOwner, lockExpiration)
|
||||
nextUpdate = lockExpiration
|
||||
healthLockOwner = lockOwner
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sleep, but remain stoppable until approximately the next update time.
|
||||
now := time.Now().UTC()
|
||||
waitUntil := nextUpdate.Add(time.Duration(rand.ExpFloat64()/0.5) * time.Second)
|
||||
log.Debugf("next update attempt scheduled for %v.", waitUntil)
|
||||
if !waitUntil.Before(now) {
|
||||
if !st.Sleep(waitUntil.Sub(time.Now())) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("updater service stopped")
|
||||
}
|
||||
|
||||
// Update fetches all the vulnerabilities from the registered fetchers, upserts
|
||||
// them into the database and then sends notifications.
|
||||
func Update() {
|
||||
log.Info("updating vulnerabilities")
|
||||
|
||||
// Fetch updates in parallel.
|
||||
var status = true
|
||||
var responseC = make(chan *FetcherResponse, 0)
|
||||
for n, f := range fetchers {
|
||||
go func(name string, fetcher Fetcher) {
|
||||
response, err := fetcher.FetchUpdate()
|
||||
if err != nil {
|
||||
log.Errorf("an error occured when fetching update '%s': %s.", name, err)
|
||||
status = false
|
||||
responseC <- nil
|
||||
return
|
||||
}
|
||||
|
||||
responseC <- &response
|
||||
}(n, f)
|
||||
}
|
||||
|
||||
// Collect results of updates.
|
||||
var responses []*FetcherResponse
|
||||
var notes []string
|
||||
for i := 0; i < len(fetchers); {
|
||||
select {
|
||||
case resp := <-responseC:
|
||||
if resp != nil {
|
||||
responses = append(responses, resp)
|
||||
notes = append(notes, resp.Notes...)
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
close(responseC)
|
||||
|
||||
// TODO(Quentin-M): Merge responses together
|
||||
// TODO(Quentin-M): Complete informations using NVD
|
||||
|
||||
// Store flags out of the response struct.
|
||||
flags := make(map[string]string)
|
||||
for _, response := range responses {
|
||||
if response.FlagName != "" && response.FlagValue != "" {
|
||||
flags[response.FlagName] = response.FlagValue
|
||||
}
|
||||
}
|
||||
|
||||
// Update health notes.
|
||||
healthNotes = notes
|
||||
|
||||
// Build list of packages.
|
||||
var packages []*database.Package
|
||||
for _, response := range responses {
|
||||
for _, v := range response.Vulnerabilities {
|
||||
packages = append(packages, v.FixedIn...)
|
||||
}
|
||||
}
|
||||
|
||||
// Insert packages into the database.
|
||||
log.Tracef("beginning insertion of %d packages for update", len(packages))
|
||||
t := time.Now()
|
||||
err := database.InsertPackages(packages)
|
||||
log.Tracef("inserting %d packages took %v", len(packages), time.Since(t))
|
||||
if err != nil {
|
||||
log.Errorf("an error occured when inserting packages for update: %s", err)
|
||||
updateHealth(false)
|
||||
return
|
||||
}
|
||||
packages = nil
|
||||
|
||||
// Build a list of vulnerabilties.
|
||||
var vulnerabilities []*database.Vulnerability
|
||||
for _, response := range responses {
|
||||
for _, v := range response.Vulnerabilities {
|
||||
var packageNodes []string
|
||||
for _, pkg := range v.FixedIn {
|
||||
packageNodes = append(packageNodes, pkg.Node)
|
||||
}
|
||||
vulnerabilities = append(vulnerabilities, &database.Vulnerability{ID: v.ID, Link: v.Link, Priority: v.Priority, Description: v.Description, FixedInNodes: packageNodes})
|
||||
}
|
||||
}
|
||||
responses = nil
|
||||
|
||||
// Insert vulnerabilities into the database.
|
||||
log.Tracef("beginning insertion of %d vulnerabilities for update", len(vulnerabilities))
|
||||
t = time.Now()
|
||||
notifications, err := database.InsertVulnerabilities(vulnerabilities)
|
||||
log.Tracef("inserting %d vulnerabilities took %v", len(vulnerabilities), time.Since(t))
|
||||
if err != nil {
|
||||
log.Errorf("an error occured when inserting vulnerabilities for update: %s", err)
|
||||
updateHealth(false)
|
||||
return
|
||||
}
|
||||
vulnerabilities = nil
|
||||
|
||||
// Insert notifications into the database.
|
||||
err = database.InsertNotifications(notifications, database.GetDefaultNotificationWrapper())
|
||||
if err != nil {
|
||||
log.Errorf("an error occured when inserting notifications for update: %s", err)
|
||||
updateHealth(false)
|
||||
return
|
||||
}
|
||||
notifications = nil
|
||||
|
||||
// Update flags in the database.
|
||||
for flagName, flagValue := range flags {
|
||||
database.UpdateFlag(flagName, flagValue)
|
||||
}
|
||||
|
||||
// Update health depending on the status of the fetchers.
|
||||
updateHealth(status)
|
||||
|
||||
log.Info("update finished")
|
||||
}
|
||||
|
||||
func updateHealth(s bool) {
|
||||
if s == false {
|
||||
healthConsecutiveLocalFailures++
|
||||
} else {
|
||||
healthConsecutiveLocalFailures = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Healthcheck returns the health of the updater service.
|
||||
func Healthcheck() health.Status {
|
||||
return health.Status{
|
||||
IsEssential: false,
|
||||
IsHealthy: healthConsecutiveLocalFailures < healthMaxConsecutiveLocalFailures,
|
||||
Details: struct {
|
||||
HealthIdentifier string
|
||||
HealthLockOwner string
|
||||
LatestSuccessfulUpdate time.Time
|
||||
ConsecutiveLocalFailures int
|
||||
Notes []string `json:",omitempty"`
|
||||
}{
|
||||
HealthIdentifier: healthIdentifier,
|
||||
HealthLockOwner: healthLockOwner,
|
||||
LatestSuccessfulUpdate: healthLatestSuccessfulUpdate,
|
||||
ConsecutiveLocalFailures: healthConsecutiveLocalFailures,
|
||||
Notes: healthNotes,
|
||||
},
|
||||
}
|
||||
}
|
41
utils/errors/errors.go
Normal file
41
utils/errors/errors.go
Normal file
@ -0,0 +1,41 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package errors defines error types that are used in several modules
|
||||
package errors
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
// ErrFilesystem occurs when a filesystem interaction fails.
|
||||
ErrFilesystem = errors.New("something went wrong when interacting with the fs")
|
||||
// ErrCouldNotDownload occurs when a download fails.
|
||||
ErrCouldNotDownload = errors.New("could not download requested ressource")
|
||||
// ErrNotFound occurs when a resource could not be found.
|
||||
ErrNotFound = errors.New("the resource cannot be found")
|
||||
)
|
||||
|
||||
// ErrBadRequest occurs when a method has been passed an inappropriate argument.
|
||||
type ErrBadRequest struct {
|
||||
s string
|
||||
}
|
||||
|
||||
// NewBadRequestError instantiates a ErrBadRequest with the specified message.
|
||||
func NewBadRequestError(message string) error {
|
||||
return &ErrBadRequest{s: message}
|
||||
}
|
||||
|
||||
func (e *ErrBadRequest) Error() string {
|
||||
return e.s
|
||||
}
|
39
utils/exec.go
Normal file
39
utils/exec.go
Normal file
@ -0,0 +1,39 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package utils simply defines utility functions and types.
|
||||
package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
// Exec runs the given binary with arguments
|
||||
func Exec(dir string, bin string, args ...string) ([]byte, error) {
|
||||
_, err := exec.LookPath(bin)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cmd := exec.Command(bin, args...)
|
||||
cmd.Dir = dir
|
||||
|
||||
var buf bytes.Buffer
|
||||
cmd.Stdout = &buf
|
||||
cmd.Stderr = &buf
|
||||
|
||||
err = cmd.Run()
|
||||
return buf.Bytes(), err
|
||||
}
|
65
utils/stopper.go
Normal file
65
utils/stopper.go
Normal file
@ -0,0 +1,65 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Stopper eases the graceful termination of a group of goroutines
|
||||
type Stopper struct {
|
||||
wg sync.WaitGroup
|
||||
stop chan struct{}
|
||||
}
|
||||
|
||||
// NewStopper initializes a new Stopper instance
|
||||
func NewStopper() *Stopper {
|
||||
return &Stopper{stop: make(chan struct{}, 0)}
|
||||
}
|
||||
|
||||
// Begin indicates that a new goroutine has started.
|
||||
func (s *Stopper) Begin() {
|
||||
s.wg.Add(1)
|
||||
}
|
||||
|
||||
// End indicates that a goroutine has stopped.
|
||||
func (s *Stopper) End() {
|
||||
s.wg.Done()
|
||||
}
|
||||
|
||||
// Chan returns the channel on which goroutines could listen to determine if
|
||||
// they should stop. The channel is closed when Stop() is called.
|
||||
func (s *Stopper) Chan() chan struct{} {
|
||||
return s.stop
|
||||
}
|
||||
|
||||
// Sleep puts the current goroutine on sleep during a duration d
|
||||
// Sleep could be interrupted in the case the goroutine should stop itself,
|
||||
// in which case Sleep returns false.
|
||||
func (s *Stopper) Sleep(d time.Duration) bool {
|
||||
select {
|
||||
case <-time.After(d):
|
||||
return true
|
||||
case <-s.stop:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Stop asks every goroutine to end.
|
||||
func (s *Stopper) Stop() {
|
||||
close(s.stop)
|
||||
s.wg.Wait()
|
||||
}
|
68
utils/string.go
Normal file
68
utils/string.go
Normal file
@ -0,0 +1,68 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
var urlParametersRegexp = regexp.MustCompile(`(\?|\&)([^=]+)\=([^ &]+)`)
|
||||
|
||||
// Hash returns an unique hash of the given string
|
||||
func Hash(str string) string {
|
||||
h := sha1.New()
|
||||
h.Write([]byte(str))
|
||||
bs := h.Sum(nil)
|
||||
return hex.EncodeToString(bs)
|
||||
}
|
||||
|
||||
// CleanURL removes all parameters from an URL
|
||||
func CleanURL(str string) string {
|
||||
return urlParametersRegexp.ReplaceAllString(str, "")
|
||||
}
|
||||
|
||||
// Contains looks for a string into an array of strings and returns whether
|
||||
// the string exists
|
||||
func Contains(needle string, haystack []string) bool {
|
||||
for _, h := range haystack {
|
||||
if h == needle {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// CompareStringLists returns the strings which are present in X but not in Y
|
||||
func CompareStringLists(X, Y []string) []string {
|
||||
m := make(map[string]int)
|
||||
|
||||
for _, y := range Y {
|
||||
m[y] = 1
|
||||
}
|
||||
|
||||
diff := []string{}
|
||||
for _, x := range X {
|
||||
if m[x] > 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
diff = append(diff, x)
|
||||
m[x] = 1
|
||||
}
|
||||
|
||||
return diff
|
||||
}
|
107
utils/tar.go
Normal file
107
utils/tar.go
Normal file
@ -0,0 +1,107 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrCouldNotExtract occurs when an extraction fails.
|
||||
ErrCouldNotExtract = errors.New("utils: could not extract the archive")
|
||||
|
||||
// ErrExtractedFileTooBig occurs when a file to extract is too big.
|
||||
ErrExtractedFileTooBig = errors.New("utils: could not extract one or more files from the archive: file too big")
|
||||
|
||||
gzipHeader = []byte{0x1f, 0x8b}
|
||||
)
|
||||
|
||||
// SelectivelyExtractArchive extracts the specified files and folders
|
||||
// from targz data read from the given reader and store them in a map indexed by file paths
|
||||
func SelectivelyExtractArchive(r io.Reader, toExtract []string, maxFileSize int64) (map[string][]byte, error) {
|
||||
data := make(map[string][]byte)
|
||||
|
||||
// Create a tar or tar/tar-gzip reader
|
||||
tr, err := getTarReader(r)
|
||||
if err != nil {
|
||||
return data, ErrCouldNotExtract
|
||||
}
|
||||
|
||||
// For each element in the archive
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return data, ErrCouldNotExtract
|
||||
}
|
||||
|
||||
// Get element filename
|
||||
filename := hdr.Name
|
||||
filename = strings.TrimPrefix(filename, "./")
|
||||
|
||||
// Determine if we should extract the element
|
||||
toBeExtracted := false
|
||||
for _, s := range toExtract {
|
||||
if strings.HasPrefix(filename, s) {
|
||||
toBeExtracted = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if toBeExtracted {
|
||||
// File size limit
|
||||
if maxFileSize > 0 && hdr.Size > maxFileSize {
|
||||
return data, ErrExtractedFileTooBig
|
||||
}
|
||||
|
||||
// Extract the element
|
||||
if hdr.Typeflag == tar.TypeSymlink || hdr.Typeflag == tar.TypeLink || hdr.Typeflag == tar.TypeReg {
|
||||
d, _ := ioutil.ReadAll(tr)
|
||||
data[filename] = d
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// getTarReader returns a tar.Reader associated with the specified io.Reader,
|
||||
// optionally backed by a gzip.Reader if gzip compression is detected.
|
||||
//
|
||||
// Gzip detection is done by using the magic numbers defined in the RFC1952 :
|
||||
// the first two bytes should be 0x1f and 0x8b..
|
||||
func getTarReader(r io.Reader) (*tar.Reader, error) {
|
||||
br := bufio.NewReader(r)
|
||||
header, err := br.Peek(2)
|
||||
|
||||
if err == nil && bytes.Equal(header, gzipHeader) {
|
||||
gr, err := gzip.NewReader(br)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return tar.NewReader(gr), nil
|
||||
}
|
||||
|
||||
return tar.NewReader(br), nil
|
||||
}
|
BIN
utils/testdata/utils_test.tar
vendored
Normal file
BIN
utils/testdata/utils_test.tar
vendored
Normal file
Binary file not shown.
BIN
utils/testdata/utils_test.tar.gz
vendored
Normal file
BIN
utils/testdata/utils_test.tar.gz
vendored
Normal file
Binary file not shown.
88
utils/types/priority.go
Normal file
88
utils/types/priority.go
Normal file
@ -0,0 +1,88 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package types defines useful types that are used in database models.
|
||||
package types
|
||||
|
||||
// Priority defines a vulnerability priority
|
||||
type Priority string
|
||||
|
||||
const (
|
||||
// Unknown is either a security problem that has not been
|
||||
// assigned to a priority yet or a priority that our system
|
||||
// did not recognize
|
||||
Unknown Priority = "Unknown"
|
||||
// Negligible is technically a security problem, but is
|
||||
// only theoretical in nature, requires a very special
|
||||
// situation, has almost no install base, or does no real
|
||||
// damage. These tend not to get backport from upstreams,
|
||||
// and will likely not be included in security updates unless
|
||||
// there is an easy fix and some other issue causes an update.
|
||||
Negligible Priority = "Negligible"
|
||||
// Low is a security problem, but is hard to
|
||||
// exploit due to environment, requires a user-assisted
|
||||
// attack, a small install base, or does very little damage.
|
||||
// These tend to be included in security updates only when
|
||||
// higher priority issues require an update, or if many
|
||||
// low priority issues have built up.
|
||||
Low Priority = "Low"
|
||||
// Medium is a real security problem, and is exploitable
|
||||
// for many people. Includes network daemon denial of service
|
||||
// attacks, cross-site scripting, and gaining user privileges.
|
||||
// Updates should be made soon for this priority of issue.
|
||||
Medium Priority = "Medium"
|
||||
// High is a real problem, exploitable for many people in a default
|
||||
// installation. Includes serious remote denial of services,
|
||||
// local root privilege escalations, or data loss.
|
||||
High Priority = "High"
|
||||
// Critical is a world-burning problem, exploitable for nearly all people
|
||||
// in a default installation of Linux. Includes remote root
|
||||
// privilege escalations, or massive data loss.
|
||||
Critical Priority = "Critical"
|
||||
// Defcon1 is a Critical problem which has been manually highlighted by
|
||||
// the team. It requires an immediate attention.
|
||||
Defcon1 Priority = "Defcon1"
|
||||
)
|
||||
|
||||
// Priorities lists all known priorities, ordered from lower to higher
|
||||
var Priorities = []Priority{Unknown, Negligible, Low, Medium, High, Critical, Defcon1}
|
||||
|
||||
// IsValid determines if the priority is a valid one
|
||||
func (p Priority) IsValid() bool {
|
||||
for _, pp := range Priorities {
|
||||
if p == pp {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Compare compares two priorities
|
||||
func (p Priority) Compare(p2 Priority) int {
|
||||
var i1, i2 int
|
||||
|
||||
for i1 = 0; i1 < len(Priorities); i1 = i1 + 1 {
|
||||
if p == Priorities[i1] {
|
||||
break
|
||||
}
|
||||
}
|
||||
for i2 = 0; i2 < len(Priorities); i2 = i2 + 1 {
|
||||
if p2 == Priorities[i2] {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return i1 - i2
|
||||
}
|
32
utils/types/priority_test.go
Normal file
32
utils/types/priority_test.go
Normal file
@ -0,0 +1,32 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestComparePriority(t *testing.T) {
|
||||
assert.Equal(t, Medium.Compare(Medium), 0, "Priority comparison failed")
|
||||
assert.True(t, Medium.Compare(High) < 0, "Priority comparison failed")
|
||||
assert.True(t, Critical.Compare(Low) > 0, "Priority comparison failed")
|
||||
}
|
||||
|
||||
func TestIsValid(t *testing.T) {
|
||||
assert.False(t, Priority("Test").IsValid())
|
||||
assert.True(t, Unknown.IsValid())
|
||||
}
|
282
utils/types/version.go
Normal file
282
utils/types/version.go
Normal file
@ -0,0 +1,282 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// Version represents a package version
|
||||
type Version struct {
|
||||
epoch int
|
||||
version string
|
||||
revision string
|
||||
}
|
||||
|
||||
var (
|
||||
// MinVersion is a special package version which is always sorted first
|
||||
MinVersion = Version{version: "#MINV#"}
|
||||
// MaxVersion is a special package version which is always sorted last
|
||||
MaxVersion = Version{version: "#MAXV#"}
|
||||
|
||||
versionAllowedSymbols = []rune{'.', '-', '+', '~', ':', '_'}
|
||||
revisionAllowedSymbols = []rune{'.', '+', '~', '_'}
|
||||
)
|
||||
|
||||
// NewVersion function parses a string into a Version struct which can be compared
|
||||
//
|
||||
// The implementation is based on http://man.he.net/man5/deb-version
|
||||
// on https://www.debian.org/doc/debian-policy/ch-controlfields.html#s-f-Version
|
||||
//
|
||||
// It uses the dpkg-1.17.25's algorithm (lib/parsehelp.c)
|
||||
func NewVersion(str string) (Version, error) {
|
||||
var version Version
|
||||
|
||||
// Trim leading and trailing space
|
||||
str = strings.TrimSpace(str)
|
||||
|
||||
if len(str) == 0 {
|
||||
return Version{}, errors.New("Version string is empty")
|
||||
}
|
||||
|
||||
// Max/Min versions
|
||||
if str == MaxVersion.String() {
|
||||
return MaxVersion, nil
|
||||
}
|
||||
if str == MinVersion.String() {
|
||||
return MinVersion, nil
|
||||
}
|
||||
|
||||
// Find epoch
|
||||
sepepoch := strings.Index(str, ":")
|
||||
if sepepoch > -1 {
|
||||
intepoch, err := strconv.Atoi(str[:sepepoch])
|
||||
if err == nil {
|
||||
version.epoch = intepoch
|
||||
} else {
|
||||
return Version{}, errors.New("epoch in version is not a number")
|
||||
}
|
||||
if intepoch < 0 {
|
||||
return Version{}, errors.New("epoch in version is negative")
|
||||
}
|
||||
} else {
|
||||
version.epoch = 0
|
||||
}
|
||||
|
||||
// Find version / revision
|
||||
seprevision := strings.LastIndex(str, "-")
|
||||
if seprevision > -1 {
|
||||
version.version = str[sepepoch+1 : seprevision]
|
||||
version.revision = str[seprevision+1:]
|
||||
} else {
|
||||
version.version = str[sepepoch+1:]
|
||||
version.revision = ""
|
||||
}
|
||||
// Verify format
|
||||
if len(version.version) == 0 {
|
||||
return Version{}, errors.New("No version")
|
||||
}
|
||||
|
||||
if !unicode.IsDigit(rune(version.version[0])) {
|
||||
return Version{}, errors.New("version does not start with digit")
|
||||
}
|
||||
|
||||
for i := 0; i < len(version.version); i = i + 1 {
|
||||
r := rune(version.version[i])
|
||||
if !unicode.IsDigit(r) && !unicode.IsLetter(r) && !containsRune(versionAllowedSymbols, r) {
|
||||
return Version{}, errors.New("invalid character in version")
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < len(version.revision); i = i + 1 {
|
||||
r := rune(version.revision[i])
|
||||
if !unicode.IsDigit(r) && !unicode.IsLetter(r) && !containsRune(revisionAllowedSymbols, r) {
|
||||
return Version{}, errors.New("invalid character in revision")
|
||||
}
|
||||
}
|
||||
|
||||
return version, nil
|
||||
}
|
||||
|
||||
// NewVersionUnsafe is just a wrapper around NewVersion that ignore potentiel
|
||||
// parsing error. Useful for test purposes
|
||||
func NewVersionUnsafe(str string) Version {
|
||||
v, _ := NewVersion(str)
|
||||
return v
|
||||
}
|
||||
|
||||
// Compare function compares two Debian-like package version
|
||||
//
|
||||
// The implementation is based on http://man.he.net/man5/deb-version
|
||||
// on https://www.debian.org/doc/debian-policy/ch-controlfields.html#s-f-Version
|
||||
//
|
||||
// It uses the dpkg-1.17.25's algorithm (lib/version.c)
|
||||
func (a Version) Compare(b Version) int {
|
||||
// Quick check
|
||||
if a == b {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Max/Min comparison
|
||||
if a == MinVersion || b == MaxVersion {
|
||||
return -1
|
||||
}
|
||||
if b == MinVersion || a == MaxVersion {
|
||||
return 1
|
||||
}
|
||||
|
||||
// Compare epochs
|
||||
if a.epoch > b.epoch {
|
||||
return 1
|
||||
}
|
||||
if a.epoch < b.epoch {
|
||||
return -1
|
||||
}
|
||||
|
||||
// Compare version
|
||||
rc := verrevcmp(a.version, b.version)
|
||||
if rc != 0 {
|
||||
return signum(rc)
|
||||
}
|
||||
|
||||
// Compare revision
|
||||
return signum(verrevcmp(a.revision, b.revision))
|
||||
}
|
||||
|
||||
// String returns the string representation of a Version
|
||||
func (v Version) String() (s string) {
|
||||
if v.epoch != 0 {
|
||||
s = strconv.Itoa(v.epoch) + ":"
|
||||
}
|
||||
s += v.version
|
||||
if v.revision != "" {
|
||||
s += "-" + v.revision
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (v Version) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(v.String())
|
||||
}
|
||||
|
||||
func (v *Version) UnmarshalJSON(b []byte) (err error) {
|
||||
var str string
|
||||
json.Unmarshal(b, &str)
|
||||
vp, err := NewVersion(str)
|
||||
*v = vp
|
||||
return
|
||||
}
|
||||
|
||||
func verrevcmp(t1, t2 string) int {
|
||||
t1, rt1 := nextRune(t1)
|
||||
t2, rt2 := nextRune(t2)
|
||||
|
||||
for rt1 != nil || rt2 != nil {
|
||||
firstDiff := 0
|
||||
|
||||
for (rt1 != nil && !unicode.IsDigit(*rt1)) || (rt2 != nil && !unicode.IsDigit(*rt2)) {
|
||||
ac := 0
|
||||
bc := 0
|
||||
if rt1 != nil {
|
||||
ac = order(*rt1)
|
||||
}
|
||||
if rt2 != nil {
|
||||
bc = order(*rt2)
|
||||
}
|
||||
|
||||
if ac != bc {
|
||||
return ac - bc
|
||||
}
|
||||
|
||||
t1, rt1 = nextRune(t1)
|
||||
t2, rt2 = nextRune(t2)
|
||||
}
|
||||
for rt1 != nil && *rt1 == '0' {
|
||||
t1, rt1 = nextRune(t1)
|
||||
}
|
||||
for rt2 != nil && *rt2 == '0' {
|
||||
t2, rt2 = nextRune(t2)
|
||||
}
|
||||
for rt1 != nil && unicode.IsDigit(*rt1) && rt2 != nil && unicode.IsDigit(*rt2) {
|
||||
if firstDiff == 0 {
|
||||
firstDiff = int(*rt1) - int(*rt2)
|
||||
}
|
||||
t1, rt1 = nextRune(t1)
|
||||
t2, rt2 = nextRune(t2)
|
||||
}
|
||||
if rt1 != nil && unicode.IsDigit(*rt1) {
|
||||
return 1
|
||||
}
|
||||
if rt2 != nil && unicode.IsDigit(*rt2) {
|
||||
return -1
|
||||
}
|
||||
if firstDiff != 0 {
|
||||
return firstDiff
|
||||
}
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// order compares runes using a modified ASCII table
|
||||
// so that letters are sorted earlier than non-letters
|
||||
// and so that tildes sorts before anything
|
||||
func order(r rune) int {
|
||||
if unicode.IsDigit(r) {
|
||||
return 0
|
||||
}
|
||||
|
||||
if unicode.IsLetter(r) {
|
||||
return int(r)
|
||||
}
|
||||
|
||||
if r == '~' {
|
||||
return -1
|
||||
}
|
||||
|
||||
return int(r) + 256
|
||||
}
|
||||
|
||||
func nextRune(str string) (string, *rune) {
|
||||
if len(str) >= 1 {
|
||||
r := rune(str[0])
|
||||
return str[1:], &r
|
||||
}
|
||||
return str, nil
|
||||
}
|
||||
|
||||
func containsRune(s []rune, e rune) bool {
|
||||
for _, a := range s {
|
||||
if a == e {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func signum(a int) int {
|
||||
switch {
|
||||
case a < 0:
|
||||
return -1
|
||||
case a > 0:
|
||||
return +1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
243
utils/types/version_test.go
Normal file
243
utils/types/version_test.go
Normal file
@ -0,0 +1,243 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const (
|
||||
LESS = -1
|
||||
EQUAL = 0
|
||||
GREATER = 1
|
||||
)
|
||||
|
||||
func TestCompareSimpleVersion(t *testing.T) {
|
||||
cases := []struct {
|
||||
v1 Version
|
||||
expected int
|
||||
v2 Version
|
||||
}{
|
||||
{Version{}, EQUAL, Version{}},
|
||||
{Version{epoch: 1}, LESS, Version{epoch: 2}},
|
||||
{Version{epoch: 0, version: "1", revision: "1"}, LESS, Version{epoch: 0, version: "2", revision: "1"}},
|
||||
{Version{epoch: 0, version: "a", revision: "0"}, LESS, Version{epoch: 0, version: "b", revision: "0"}},
|
||||
{Version{epoch: 0, version: "1", revision: "1"}, LESS, Version{epoch: 0, version: "1", revision: "2"}},
|
||||
{Version{epoch: 0, version: "0", revision: "0"}, EQUAL, Version{epoch: 0, version: "0", revision: "0"}},
|
||||
{Version{epoch: 0, version: "0", revision: "00"}, EQUAL, Version{epoch: 0, version: "00", revision: "0"}},
|
||||
{Version{epoch: 1, version: "2", revision: "3"}, EQUAL, Version{epoch: 1, version: "2", revision: "3"}},
|
||||
{Version{epoch: 0, version: "0", revision: "a"}, LESS, Version{epoch: 0, version: "0", revision: "b"}},
|
||||
{MinVersion, LESS, MaxVersion},
|
||||
{MinVersion, LESS, Version{}},
|
||||
{MinVersion, LESS, Version{version: "0"}},
|
||||
{MaxVersion, GREATER, Version{}},
|
||||
{MaxVersion, GREATER, Version{epoch: 9999999, version: "9999999", revision: "9999999"}},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
cmp := c.v1.Compare(c.v2)
|
||||
assert.Equal(t, c.expected, cmp, "%s vs. %s, = %d, expected %d", c.v1, c.v2, cmp, c.expected)
|
||||
|
||||
cmp = c.v2.Compare(c.v1)
|
||||
assert.Equal(t, -c.expected, cmp, "%s vs. %s, = %d, expected %d", c.v2, c.v1, cmp, -c.expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
cases := []struct {
|
||||
str string
|
||||
ver Version
|
||||
err bool
|
||||
}{
|
||||
// Test 0
|
||||
{"0", Version{epoch: 0, version: "0", revision: ""}, false},
|
||||
{"0:0", Version{epoch: 0, version: "0", revision: ""}, false},
|
||||
{"0:0-", Version{epoch: 0, version: "0", revision: ""}, false},
|
||||
{"0:0-0", Version{epoch: 0, version: "0", revision: "0"}, false},
|
||||
{"0:0.0-0.0", Version{epoch: 0, version: "0.0", revision: "0.0"}, false},
|
||||
// Test epoched
|
||||
{"1:0", Version{epoch: 1, version: "0", revision: ""}, false},
|
||||
{"5:1", Version{epoch: 5, version: "1", revision: ""}, false},
|
||||
// Test multiple hypens
|
||||
{"0:0-0-0", Version{epoch: 0, version: "0-0", revision: "0"}, false},
|
||||
{"0:0-0-0-0", Version{epoch: 0, version: "0-0-0", revision: "0"}, false},
|
||||
// Test multiple colons
|
||||
{"0:0:0-0", Version{epoch: 0, version: "0:0", revision: "0"}, false},
|
||||
{"0:0:0:0-0", Version{epoch: 0, version: "0:0:0", revision: "0"}, false},
|
||||
// Test multiple hyphens and colons
|
||||
{"0:0:0-0-0", Version{epoch: 0, version: "0:0-0", revision: "0"}, false},
|
||||
{"0:0-0:0-0", Version{epoch: 0, version: "0-0:0", revision: "0"}, false},
|
||||
// Test valid characters in version
|
||||
{"0:09azAZ.-+~:_-0", Version{epoch: 0, version: "09azAZ.-+~:_", revision: "0"}, false},
|
||||
// Test valid characters in debian revision
|
||||
{"0:0-azAZ09.+~_", Version{epoch: 0, version: "0", revision: "azAZ09.+~_"}, false},
|
||||
// Test version with leading and trailing spaces
|
||||
{" 0:0-1", Version{epoch: 0, version: "0", revision: "1"}, false},
|
||||
{"0:0-1 ", Version{epoch: 0, version: "0", revision: "1"}, false},
|
||||
{" 0:0-1 ", Version{epoch: 0, version: "0", revision: "1"}, false},
|
||||
// Test empty version
|
||||
{"", Version{}, true},
|
||||
{" ", Version{}, true},
|
||||
{"0:", Version{}, true},
|
||||
// Test version with embedded spaces
|
||||
{"0:0 0-1", Version{}, true},
|
||||
// Test version with negative epoch
|
||||
{"-1:0-1", Version{}, true},
|
||||
// Test invalid characters in epoch
|
||||
{"a:0-0", Version{}, true},
|
||||
{"A:0-0", Version{}, true},
|
||||
// Test version not starting with a digit
|
||||
{"0:abc3-0", Version{}, true},
|
||||
}
|
||||
for _, c := range cases {
|
||||
v, err := NewVersion(c.str)
|
||||
|
||||
if c.err {
|
||||
assert.Error(t, err, "When parsing '%s'", c.str)
|
||||
} else {
|
||||
assert.Nil(t, err, "When parsing '%s'", c.str)
|
||||
}
|
||||
assert.Equal(t, c.ver, v, "When parsing '%s'", c.str)
|
||||
}
|
||||
|
||||
// Test invalid characters in version
|
||||
versym := []rune{'!', '#', '@', '$', '%', '&', '/', '|', '\\', '<', '>', '(', ')', '[', ']', '{', '}', ';', ',', '=', '*', '^', '\''}
|
||||
for _, r := range versym {
|
||||
_, err := NewVersion(strings.Join([]string{"0:0", string(r), "-0"}, ""))
|
||||
assert.Error(t, err, "Parsing with invalid character '%s' in version should have failed", string(r))
|
||||
}
|
||||
|
||||
// Test invalid characters in revision
|
||||
versym = []rune{'!', '#', '@', '$', '%', '&', '/', '|', '\\', '<', '>', '(', ')', '[', ']', '{', '}', ':', ';', ',', '=', '*', '^', '\''}
|
||||
for _, r := range versym {
|
||||
_, err := NewVersion(strings.Join([]string{"0:0-", string(r)}, ""))
|
||||
assert.Error(t, err, "Parsing with invalid character '%s' in revision should have failed", string(r))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseAndCompare(t *testing.T) {
|
||||
const LESS = -1
|
||||
const EQUAL = 0
|
||||
const GREATER = 1
|
||||
|
||||
cases := []struct {
|
||||
v1 string
|
||||
expected int
|
||||
v2 string
|
||||
}{
|
||||
{"7.6p2-4", GREATER, "7.6-0"},
|
||||
{"1.0.3-3", GREATER, "1.0-1"},
|
||||
{"1.3", GREATER, "1.2.2-2"},
|
||||
{"1.3", GREATER, "1.2.2"},
|
||||
// Some properties of text strings
|
||||
{"0-pre", EQUAL, "0-pre"},
|
||||
{"0-pre", LESS, "0-pree"},
|
||||
{"1.1.6r2-2", GREATER, "1.1.6r-1"},
|
||||
{"2.6b2-1", GREATER, "2.6b-2"},
|
||||
{"98.1p5-1", LESS, "98.1-pre2-b6-2"},
|
||||
{"0.4a6-2", GREATER, "0.4-1"},
|
||||
{"1:3.0.5-2", LESS, "1:3.0.5.1"},
|
||||
// epochs
|
||||
{"1:0.4", GREATER, "10.3"},
|
||||
{"1:1.25-4", LESS, "1:1.25-8"},
|
||||
{"0:1.18.36", EQUAL, "1.18.36"},
|
||||
{"1.18.36", GREATER, "1.18.35"},
|
||||
{"0:1.18.36", GREATER, "1.18.35"},
|
||||
// Funky, but allowed, characters in upstream version
|
||||
{"9:1.18.36:5.4-20", LESS, "10:0.5.1-22"},
|
||||
{"9:1.18.36:5.4-20", LESS, "9:1.18.36:5.5-1"},
|
||||
{"9:1.18.36:5.4-20", LESS, " 9:1.18.37:4.3-22"},
|
||||
{"1.18.36-0.17.35-18", GREATER, "1.18.36-19"},
|
||||
// Junk
|
||||
{"1:1.2.13-3", LESS, "1:1.2.13-3.1"},
|
||||
{"2.0.7pre1-4", LESS, "2.0.7r-1"},
|
||||
// if a version includes a dash, it should be the debrev dash - policy says so
|
||||
{"0:0-0-0", GREATER, "0-0"},
|
||||
// do we like strange versions? Yes we like strange versions…
|
||||
{"0", EQUAL, "0"},
|
||||
{"0", EQUAL, "00"},
|
||||
// #205960
|
||||
{"3.0~rc1-1", LESS, "3.0-1"},
|
||||
// #573592 - debian policy 5.6.12
|
||||
{"1.0", EQUAL, "1.0-0"},
|
||||
{"0.2", LESS, "1.0-0"},
|
||||
{"1.0", LESS, "1.0-0+b1"},
|
||||
{"1.0", GREATER, "1.0-0~"},
|
||||
// "steal" the testcases from (old perl) cupt
|
||||
{"1.2.3", EQUAL, "1.2.3"}, // identical
|
||||
{"4.4.3-2", EQUAL, "4.4.3-2"}, // identical
|
||||
{"1:2ab:5", EQUAL, "1:2ab:5"}, // this is correct...
|
||||
{"7:1-a:b-5", EQUAL, "7:1-a:b-5"}, // and this
|
||||
{"57:1.2.3abYZ+~-4-5", EQUAL, "57:1.2.3abYZ+~-4-5"}, // and those too
|
||||
{"1.2.3", EQUAL, "0:1.2.3"}, // zero epoch
|
||||
{"1.2.3", EQUAL, "1.2.3-0"}, // zero revision
|
||||
{"009", EQUAL, "9"}, // zeroes…
|
||||
{"009ab5", EQUAL, "9ab5"}, // there as well
|
||||
{"1.2.3", LESS, "1.2.3-1"}, // added non-zero revision
|
||||
{"1.2.3", LESS, "1.2.4"}, // just bigger
|
||||
{"1.2.4", GREATER, "1.2.3"}, // order doesn't matter
|
||||
{"1.2.24", GREATER, "1.2.3"}, // bigger, eh?
|
||||
{"0.10.0", GREATER, "0.8.7"}, // bigger, eh?
|
||||
{"3.2", GREATER, "2.3"}, // major number rocks
|
||||
{"1.3.2a", GREATER, "1.3.2"}, // letters rock
|
||||
{"0.5.0~git", LESS, "0.5.0~git2"}, // numbers rock
|
||||
{"2a", LESS, "21"}, // but not in all places
|
||||
{"1.3.2a", LESS, "1.3.2b"}, // but there is another letter
|
||||
{"1:1.2.3", GREATER, "1.2.4"}, // epoch rocks
|
||||
{"1:1.2.3", LESS, "1:1.2.4"}, // bigger anyway
|
||||
{"1.2a+~bCd3", LESS, "1.2a++"}, // tilde doesn't rock
|
||||
{"1.2a+~bCd3", GREATER, "1.2a+~"}, // but first is longer!
|
||||
{"5:2", GREATER, "304-2"}, // epoch rocks
|
||||
{"5:2", LESS, "304:2"}, // so big epoch?
|
||||
{"25:2", GREATER, "3:2"}, // 25 > 3, obviously
|
||||
{"1:2:123", LESS, "1:12:3"}, // 12 > 2
|
||||
{"1.2-5", LESS, "1.2-3-5"}, // 1.2 < 1.2-3
|
||||
{"5.10.0", GREATER, "5.005"}, // preceding zeroes don't matters
|
||||
{"3a9.8", LESS, "3.10.2"}, // letters are before all letter symbols
|
||||
{"3a9.8", GREATER, "3~10"}, // but after the tilde
|
||||
{"1.4+OOo3.0.0~", LESS, "1.4+OOo3.0.0-4"}, // another tilde check
|
||||
{"2.4.7-1", LESS, "2.4.7-z"}, // revision comparing
|
||||
{"1.002-1+b2", GREATER, "1.00"}, // whatever...
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
v1, err1 := NewVersion(c.v1)
|
||||
v2, err2 := NewVersion(c.v2)
|
||||
if assert.Nil(t, err1) && assert.Nil(t, err2) {
|
||||
cmp := v1.Compare(v2)
|
||||
assert.Equal(t, c.expected, cmp, "%s vs. %s, = %d, expected %d", c.v1, c.v2, cmp, c.expected)
|
||||
|
||||
cmp = v2.Compare(v1)
|
||||
assert.Equal(t, -c.expected, cmp, "%s vs. %s, = %d, expected %d", c.v2, c.v1, cmp, -c.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestVersionJson(t *testing.T) {
|
||||
v, _ := NewVersion("57:1.2.3abYZ+~-4-5")
|
||||
|
||||
// Marshal
|
||||
json, err := v.MarshalJSON()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "\""+v.String()+"\"", string(json))
|
||||
|
||||
// Unmarshal
|
||||
var v2 Version
|
||||
v2.UnmarshalJSON(json)
|
||||
assert.Equal(t, v, v2)
|
||||
}
|
96
utils/utils_test.go
Normal file
96
utils/utils_test.go
Normal file
@ -0,0 +1,96 @@
|
||||
// Copyright 2015 clair authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/pborman/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const fileToDownload = "http://www.google.com/robots.txt"
|
||||
|
||||
// TestDiff tests the diff.go source file
|
||||
func TestDiff(t *testing.T) {
|
||||
assert.NotContains(t, CompareStringLists([]string{"a", "b", "a"}, []string{"a", "c"}), "a")
|
||||
}
|
||||
|
||||
// TestExec tests the exec.go source file
|
||||
func TestExec(t *testing.T) {
|
||||
_, err := Exec(uuid.New(), "touch", uuid.New())
|
||||
assert.Error(t, err, "Exec should not be able to run in a not existing directory")
|
||||
|
||||
o, err := Exec("/tmp", "echo", "test")
|
||||
assert.Nil(t, err, "Could not exec echo")
|
||||
assert.Equal(t, "test\n", string(o), "Could not exec echo")
|
||||
|
||||
_, err = Exec("/tmp", uuid.New())
|
||||
assert.Error(t, err, "An invalid command should return an error")
|
||||
}
|
||||
|
||||
// TestString tests the string.go file
|
||||
func TestString(t *testing.T) {
|
||||
assert.Equal(t, Hash("abc123"), Hash("abc123"))
|
||||
assert.NotEqual(t, Hash("abc123."), Hash("abc123"))
|
||||
|
||||
assert.False(t, Contains("", []string{}))
|
||||
assert.True(t, Contains("a", []string{"a", "b"}))
|
||||
assert.False(t, Contains("c", []string{"a", "b"}))
|
||||
}
|
||||
|
||||
// TestTar tests the tar.go file
|
||||
func TestTar(t *testing.T) {
|
||||
var err error
|
||||
var data map[string][]byte
|
||||
_, filepath, _, _ := runtime.Caller(0)
|
||||
|
||||
for _, filename := range []string{"/testdata/utils_test.tar.gz", "/testdata/utils_test.tar"} {
|
||||
testArchivePath := path.Join(path.Dir(filepath)) + filename
|
||||
|
||||
// Extract non compressed data
|
||||
data, err = SelectivelyExtractArchive(bytes.NewReader([]byte("that string does not represent a tar or tar-gzip file")), []string{}, 0)
|
||||
assert.Error(t, err, "Extracting non compressed data should return an error")
|
||||
|
||||
// Extract an archive
|
||||
f, _ := os.Open(testArchivePath)
|
||||
defer f.Close()
|
||||
data, err = SelectivelyExtractArchive(f, []string{"test/"}, 0)
|
||||
assert.Nil(t, err)
|
||||
|
||||
if c, n := data["test/test.txt"]; !n {
|
||||
assert.Fail(t, "test/test.txt should have been extracted")
|
||||
} else {
|
||||
assert.NotEqual(t, 0, len(c) > 0, "test/test.txt file is empty")
|
||||
}
|
||||
if _, n := data["test.txt"]; n {
|
||||
assert.Fail(t, "test.txt should not be extracted")
|
||||
}
|
||||
|
||||
// File size limit
|
||||
f, _ = os.Open(testArchivePath)
|
||||
defer f.Close()
|
||||
data, err = SelectivelyExtractArchive(f, []string{"test"}, 50)
|
||||
assert.Equal(t, ErrExtractedFileTooBig, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanURL(t *testing.T) {
|
||||
assert.Equal(t, "Test http://test.cn/test Test", CleanURL("Test http://test.cn/test?foo=bar&bar=foo Test"))
|
||||
}
|
25
vendor/github.com/alecthomas/template/README.md
generated
vendored
Normal file
25
vendor/github.com/alecthomas/template/README.md
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
# Go's `text/template` package with newline elision
|
||||
|
||||
This is a fork of Go 1.4's [text/template](http://golang.org/pkg/text/template/) package with one addition: a backslash immediately after a closing delimiter will delete all subsequent newlines until a non-newline.
|
||||
|
||||
eg.
|
||||
|
||||
```
|
||||
{{if true}}\
|
||||
hello
|
||||
{{end}}\
|
||||
```
|
||||
|
||||
Will result in:
|
||||
|
||||
```
|
||||
hello\n
|
||||
```
|
||||
|
||||
Rather than:
|
||||
|
||||
```
|
||||
\n
|
||||
hello\n
|
||||
\n
|
||||
```
|
406
vendor/github.com/alecthomas/template/doc.go
generated
vendored
Normal file
406
vendor/github.com/alecthomas/template/doc.go
generated
vendored
Normal file
@ -0,0 +1,406 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package template implements data-driven templates for generating textual output.
|
||||
|
||||
To generate HTML output, see package html/template, which has the same interface
|
||||
as this package but automatically secures HTML output against certain attacks.
|
||||
|
||||
Templates are executed by applying them to a data structure. Annotations in the
|
||||
template refer to elements of the data structure (typically a field of a struct
|
||||
or a key in a map) to control execution and derive values to be displayed.
|
||||
Execution of the template walks the structure and sets the cursor, represented
|
||||
by a period '.' and called "dot", to the value at the current location in the
|
||||
structure as execution proceeds.
|
||||
|
||||
The input text for a template is UTF-8-encoded text in any format.
|
||||
"Actions"--data evaluations or control structures--are delimited by
|
||||
"{{" and "}}"; all text outside actions is copied to the output unchanged.
|
||||
Actions may not span newlines, although comments can.
|
||||
|
||||
Once parsed, a template may be executed safely in parallel.
|
||||
|
||||
Here is a trivial example that prints "17 items are made of wool".
|
||||
|
||||
type Inventory struct {
|
||||
Material string
|
||||
Count uint
|
||||
}
|
||||
sweaters := Inventory{"wool", 17}
|
||||
tmpl, err := template.New("test").Parse("{{.Count}} items are made of {{.Material}}")
|
||||
if err != nil { panic(err) }
|
||||
err = tmpl.Execute(os.Stdout, sweaters)
|
||||
if err != nil { panic(err) }
|
||||
|
||||
More intricate examples appear below.
|
||||
|
||||
Actions
|
||||
|
||||
Here is the list of actions. "Arguments" and "pipelines" are evaluations of
|
||||
data, defined in detail below.
|
||||
|
||||
*/
|
||||
// {{/* a comment */}}
|
||||
// A comment; discarded. May contain newlines.
|
||||
// Comments do not nest and must start and end at the
|
||||
// delimiters, as shown here.
|
||||
/*
|
||||
|
||||
{{pipeline}}
|
||||
The default textual representation of the value of the pipeline
|
||||
is copied to the output.
|
||||
|
||||
{{if pipeline}} T1 {{end}}
|
||||
If the value of the pipeline is empty, no output is generated;
|
||||
otherwise, T1 is executed. The empty values are false, 0, any
|
||||
nil pointer or interface value, and any array, slice, map, or
|
||||
string of length zero.
|
||||
Dot is unaffected.
|
||||
|
||||
{{if pipeline}} T1 {{else}} T0 {{end}}
|
||||
If the value of the pipeline is empty, T0 is executed;
|
||||
otherwise, T1 is executed. Dot is unaffected.
|
||||
|
||||
{{if pipeline}} T1 {{else if pipeline}} T0 {{end}}
|
||||
To simplify the appearance of if-else chains, the else action
|
||||
of an if may include another if directly; the effect is exactly
|
||||
the same as writing
|
||||
{{if pipeline}} T1 {{else}}{{if pipeline}} T0 {{end}}{{end}}
|
||||
|
||||
{{range pipeline}} T1 {{end}}
|
||||
The value of the pipeline must be an array, slice, map, or channel.
|
||||
If the value of the pipeline has length zero, nothing is output;
|
||||
otherwise, dot is set to the successive elements of the array,
|
||||
slice, or map and T1 is executed. If the value is a map and the
|
||||
keys are of basic type with a defined order ("comparable"), the
|
||||
elements will be visited in sorted key order.
|
||||
|
||||
{{range pipeline}} T1 {{else}} T0 {{end}}
|
||||
The value of the pipeline must be an array, slice, map, or channel.
|
||||
If the value of the pipeline has length zero, dot is unaffected and
|
||||
T0 is executed; otherwise, dot is set to the successive elements
|
||||
of the array, slice, or map and T1 is executed.
|
||||
|
||||
{{template "name"}}
|
||||
The template with the specified name is executed with nil data.
|
||||
|
||||
{{template "name" pipeline}}
|
||||
The template with the specified name is executed with dot set
|
||||
to the value of the pipeline.
|
||||
|
||||
{{with pipeline}} T1 {{end}}
|
||||
If the value of the pipeline is empty, no output is generated;
|
||||
otherwise, dot is set to the value of the pipeline and T1 is
|
||||
executed.
|
||||
|
||||
{{with pipeline}} T1 {{else}} T0 {{end}}
|
||||
If the value of the pipeline is empty, dot is unaffected and T0
|
||||
is executed; otherwise, dot is set to the value of the pipeline
|
||||
and T1 is executed.
|
||||
|
||||
Arguments
|
||||
|
||||
An argument is a simple value, denoted by one of the following.
|
||||
|
||||
- A boolean, string, character, integer, floating-point, imaginary
|
||||
or complex constant in Go syntax. These behave like Go's untyped
|
||||
constants, although raw strings may not span newlines.
|
||||
- The keyword nil, representing an untyped Go nil.
|
||||
- The character '.' (period):
|
||||
.
|
||||
The result is the value of dot.
|
||||
- A variable name, which is a (possibly empty) alphanumeric string
|
||||
preceded by a dollar sign, such as
|
||||
$piOver2
|
||||
or
|
||||
$
|
||||
The result is the value of the variable.
|
||||
Variables are described below.
|
||||
- The name of a field of the data, which must be a struct, preceded
|
||||
by a period, such as
|
||||
.Field
|
||||
The result is the value of the field. Field invocations may be
|
||||
chained:
|
||||
.Field1.Field2
|
||||
Fields can also be evaluated on variables, including chaining:
|
||||
$x.Field1.Field2
|
||||
- The name of a key of the data, which must be a map, preceded
|
||||
by a period, such as
|
||||
.Key
|
||||
The result is the map element value indexed by the key.
|
||||
Key invocations may be chained and combined with fields to any
|
||||
depth:
|
||||
.Field1.Key1.Field2.Key2
|
||||
Although the key must be an alphanumeric identifier, unlike with
|
||||
field names they do not need to start with an upper case letter.
|
||||
Keys can also be evaluated on variables, including chaining:
|
||||
$x.key1.key2
|
||||
- The name of a niladic method of the data, preceded by a period,
|
||||
such as
|
||||
.Method
|
||||
The result is the value of invoking the method with dot as the
|
||||
receiver, dot.Method(). Such a method must have one return value (of
|
||||
any type) or two return values, the second of which is an error.
|
||||
If it has two and the returned error is non-nil, execution terminates
|
||||
and an error is returned to the caller as the value of Execute.
|
||||
Method invocations may be chained and combined with fields and keys
|
||||
to any depth:
|
||||
.Field1.Key1.Method1.Field2.Key2.Method2
|
||||
Methods can also be evaluated on variables, including chaining:
|
||||
$x.Method1.Field
|
||||
- The name of a niladic function, such as
|
||||
fun
|
||||
The result is the value of invoking the function, fun(). The return
|
||||
types and values behave as in methods. Functions and function
|
||||
names are described below.
|
||||
- A parenthesized instance of one the above, for grouping. The result
|
||||
may be accessed by a field or map key invocation.
|
||||
print (.F1 arg1) (.F2 arg2)
|
||||
(.StructValuedMethod "arg").Field
|
||||
|
||||
Arguments may evaluate to any type; if they are pointers the implementation
|
||||
automatically indirects to the base type when required.
|
||||
If an evaluation yields a function value, such as a function-valued
|
||||
field of a struct, the function is not invoked automatically, but it
|
||||
can be used as a truth value for an if action and the like. To invoke
|
||||
it, use the call function, defined below.
|
||||
|
||||
A pipeline is a possibly chained sequence of "commands". A command is a simple
|
||||
value (argument) or a function or method call, possibly with multiple arguments:
|
||||
|
||||
Argument
|
||||
The result is the value of evaluating the argument.
|
||||
.Method [Argument...]
|
||||
The method can be alone or the last element of a chain but,
|
||||
unlike methods in the middle of a chain, it can take arguments.
|
||||
The result is the value of calling the method with the
|
||||
arguments:
|
||||
dot.Method(Argument1, etc.)
|
||||
functionName [Argument...]
|
||||
The result is the value of calling the function associated
|
||||
with the name:
|
||||
function(Argument1, etc.)
|
||||
Functions and function names are described below.
|
||||
|
||||
Pipelines
|
||||
|
||||
A pipeline may be "chained" by separating a sequence of commands with pipeline
|
||||
characters '|'. In a chained pipeline, the result of the each command is
|
||||
passed as the last argument of the following command. The output of the final
|
||||
command in the pipeline is the value of the pipeline.
|
||||
|
||||
The output of a command will be either one value or two values, the second of
|
||||
which has type error. If that second value is present and evaluates to
|
||||
non-nil, execution terminates and the error is returned to the caller of
|
||||
Execute.
|
||||
|
||||
Variables
|
||||
|
||||
A pipeline inside an action may initialize a variable to capture the result.
|
||||
The initialization has syntax
|
||||
|
||||
$variable := pipeline
|
||||
|
||||
where $variable is the name of the variable. An action that declares a
|
||||
variable produces no output.
|
||||
|
||||
If a "range" action initializes a variable, the variable is set to the
|
||||
successive elements of the iteration. Also, a "range" may declare two
|
||||
variables, separated by a comma:
|
||||
|
||||
range $index, $element := pipeline
|
||||
|
||||
in which case $index and $element are set to the successive values of the
|
||||
array/slice index or map key and element, respectively. Note that if there is
|
||||
only one variable, it is assigned the element; this is opposite to the
|
||||
convention in Go range clauses.
|
||||
|
||||
A variable's scope extends to the "end" action of the control structure ("if",
|
||||
"with", or "range") in which it is declared, or to the end of the template if
|
||||
there is no such control structure. A template invocation does not inherit
|
||||
variables from the point of its invocation.
|
||||
|
||||
When execution begins, $ is set to the data argument passed to Execute, that is,
|
||||
to the starting value of dot.
|
||||
|
||||
Examples
|
||||
|
||||
Here are some example one-line templates demonstrating pipelines and variables.
|
||||
All produce the quoted word "output":
|
||||
|
||||
{{"\"output\""}}
|
||||
A string constant.
|
||||
{{`"output"`}}
|
||||
A raw string constant.
|
||||
{{printf "%q" "output"}}
|
||||
A function call.
|
||||
{{"output" | printf "%q"}}
|
||||
A function call whose final argument comes from the previous
|
||||
command.
|
||||
{{printf "%q" (print "out" "put")}}
|
||||
A parenthesized argument.
|
||||
{{"put" | printf "%s%s" "out" | printf "%q"}}
|
||||
A more elaborate call.
|
||||
{{"output" | printf "%s" | printf "%q"}}
|
||||
A longer chain.
|
||||
{{with "output"}}{{printf "%q" .}}{{end}}
|
||||
A with action using dot.
|
||||
{{with $x := "output" | printf "%q"}}{{$x}}{{end}}
|
||||
A with action that creates and uses a variable.
|
||||
{{with $x := "output"}}{{printf "%q" $x}}{{end}}
|
||||
A with action that uses the variable in another action.
|
||||
{{with $x := "output"}}{{$x | printf "%q"}}{{end}}
|
||||
The same, but pipelined.
|
||||
|
||||
Functions
|
||||
|
||||
During execution functions are found in two function maps: first in the
|
||||
template, then in the global function map. By default, no functions are defined
|
||||
in the template but the Funcs method can be used to add them.
|
||||
|
||||
Predefined global functions are named as follows.
|
||||
|
||||
and
|
||||
Returns the boolean AND of its arguments by returning the
|
||||
first empty argument or the last argument, that is,
|
||||
"and x y" behaves as "if x then y else x". All the
|
||||
arguments are evaluated.
|
||||
call
|
||||
Returns the result of calling the first argument, which
|
||||
must be a function, with the remaining arguments as parameters.
|
||||
Thus "call .X.Y 1 2" is, in Go notation, dot.X.Y(1, 2) where
|
||||
Y is a func-valued field, map entry, or the like.
|
||||
The first argument must be the result of an evaluation
|
||||
that yields a value of function type (as distinct from
|
||||
a predefined function such as print). The function must
|
||||
return either one or two result values, the second of which
|
||||
is of type error. If the arguments don't match the function
|
||||
or the returned error value is non-nil, execution stops.
|
||||
html
|
||||
Returns the escaped HTML equivalent of the textual
|
||||
representation of its arguments.
|
||||
index
|
||||
Returns the result of indexing its first argument by the
|
||||
following arguments. Thus "index x 1 2 3" is, in Go syntax,
|
||||
x[1][2][3]. Each indexed item must be a map, slice, or array.
|
||||
js
|
||||
Returns the escaped JavaScript equivalent of the textual
|
||||
representation of its arguments.
|
||||
len
|
||||
Returns the integer length of its argument.
|
||||
not
|
||||
Returns the boolean negation of its single argument.
|
||||
or
|
||||
Returns the boolean OR of its arguments by returning the
|
||||
first non-empty argument or the last argument, that is,
|
||||
"or x y" behaves as "if x then x else y". All the
|
||||
arguments are evaluated.
|
||||
print
|
||||
An alias for fmt.Sprint
|
||||
printf
|
||||
An alias for fmt.Sprintf
|
||||
println
|
||||
An alias for fmt.Sprintln
|
||||
urlquery
|
||||
Returns the escaped value of the textual representation of
|
||||
its arguments in a form suitable for embedding in a URL query.
|
||||
|
||||
The boolean functions take any zero value to be false and a non-zero
|
||||
value to be true.
|
||||
|
||||
There is also a set of binary comparison operators defined as
|
||||
functions:
|
||||
|
||||
eq
|
||||
Returns the boolean truth of arg1 == arg2
|
||||
ne
|
||||
Returns the boolean truth of arg1 != arg2
|
||||
lt
|
||||
Returns the boolean truth of arg1 < arg2
|
||||
le
|
||||
Returns the boolean truth of arg1 <= arg2
|
||||
gt
|
||||
Returns the boolean truth of arg1 > arg2
|
||||
ge
|
||||
Returns the boolean truth of arg1 >= arg2
|
||||
|
||||
For simpler multi-way equality tests, eq (only) accepts two or more
|
||||
arguments and compares the second and subsequent to the first,
|
||||
returning in effect
|
||||
|
||||
arg1==arg2 || arg1==arg3 || arg1==arg4 ...
|
||||
|
||||
(Unlike with || in Go, however, eq is a function call and all the
|
||||
arguments will be evaluated.)
|
||||
|
||||
The comparison functions work on basic types only (or named basic
|
||||
types, such as "type Celsius float32"). They implement the Go rules
|
||||
for comparison of values, except that size and exact type are
|
||||
ignored, so any integer value, signed or unsigned, may be compared
|
||||
with any other integer value. (The arithmetic value is compared,
|
||||
not the bit pattern, so all negative integers are less than all
|
||||
unsigned integers.) However, as usual, one may not compare an int
|
||||
with a float32 and so on.
|
||||
|
||||
Associated templates
|
||||
|
||||
Each template is named by a string specified when it is created. Also, each
|
||||
template is associated with zero or more other templates that it may invoke by
|
||||
name; such associations are transitive and form a name space of templates.
|
||||
|
||||
A template may use a template invocation to instantiate another associated
|
||||
template; see the explanation of the "template" action above. The name must be
|
||||
that of a template associated with the template that contains the invocation.
|
||||
|
||||
Nested template definitions
|
||||
|
||||
When parsing a template, another template may be defined and associated with the
|
||||
template being parsed. Template definitions must appear at the top level of the
|
||||
template, much like global variables in a Go program.
|
||||
|
||||
The syntax of such definitions is to surround each template declaration with a
|
||||
"define" and "end" action.
|
||||
|
||||
The define action names the template being created by providing a string
|
||||
constant. Here is a simple example:
|
||||
|
||||
`{{define "T1"}}ONE{{end}}
|
||||
{{define "T2"}}TWO{{end}}
|
||||
{{define "T3"}}{{template "T1"}} {{template "T2"}}{{end}}
|
||||
{{template "T3"}}`
|
||||
|
||||
This defines two templates, T1 and T2, and a third T3 that invokes the other two
|
||||
when it is executed. Finally it invokes T3. If executed this template will
|
||||
produce the text
|
||||
|
||||
ONE TWO
|
||||
|
||||
By construction, a template may reside in only one association. If it's
|
||||
necessary to have a template addressable from multiple associations, the
|
||||
template definition must be parsed multiple times to create distinct *Template
|
||||
values, or must be copied with the Clone or AddParseTree method.
|
||||
|
||||
Parse may be called multiple times to assemble the various associated templates;
|
||||
see the ParseFiles and ParseGlob functions and methods for simple ways to parse
|
||||
related templates stored in files.
|
||||
|
||||
A template may be executed directly or through ExecuteTemplate, which executes
|
||||
an associated template identified by name. To invoke our example above, we
|
||||
might write,
|
||||
|
||||
err := tmpl.Execute(os.Stdout, "no data needed")
|
||||
if err != nil {
|
||||
log.Fatalf("execution failed: %s", err)
|
||||
}
|
||||
|
||||
or to invoke a particular template explicitly by name,
|
||||
|
||||
err := tmpl.ExecuteTemplate(os.Stdout, "T2", "no data needed")
|
||||
if err != nil {
|
||||
log.Fatalf("execution failed: %s", err)
|
||||
}
|
||||
|
||||
*/
|
||||
package template
|
844
vendor/github.com/alecthomas/template/exec.go
generated
vendored
Normal file
844
vendor/github.com/alecthomas/template/exec.go
generated
vendored
Normal file
@ -0,0 +1,844 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"github.com/alecthomas/template/parse"
|
||||
)
|
||||
|
||||
// state represents the state of an execution. It's not part of the
|
||||
// template so that multiple executions of the same template
|
||||
// can execute in parallel.
|
||||
type state struct {
|
||||
tmpl *Template
|
||||
wr io.Writer
|
||||
node parse.Node // current node, for errors
|
||||
vars []variable // push-down stack of variable values.
|
||||
}
|
||||
|
||||
// variable holds the dynamic value of a variable such as $, $x etc.
|
||||
type variable struct {
|
||||
name string
|
||||
value reflect.Value
|
||||
}
|
||||
|
||||
// push pushes a new variable on the stack.
|
||||
func (s *state) push(name string, value reflect.Value) {
|
||||
s.vars = append(s.vars, variable{name, value})
|
||||
}
|
||||
|
||||
// mark returns the length of the variable stack.
|
||||
func (s *state) mark() int {
|
||||
return len(s.vars)
|
||||
}
|
||||
|
||||
// pop pops the variable stack up to the mark.
|
||||
func (s *state) pop(mark int) {
|
||||
s.vars = s.vars[0:mark]
|
||||
}
|
||||
|
||||
// setVar overwrites the top-nth variable on the stack. Used by range iterations.
|
||||
func (s *state) setVar(n int, value reflect.Value) {
|
||||
s.vars[len(s.vars)-n].value = value
|
||||
}
|
||||
|
||||
// varValue returns the value of the named variable.
|
||||
func (s *state) varValue(name string) reflect.Value {
|
||||
for i := s.mark() - 1; i >= 0; i-- {
|
||||
if s.vars[i].name == name {
|
||||
return s.vars[i].value
|
||||
}
|
||||
}
|
||||
s.errorf("undefined variable: %s", name)
|
||||
return zero
|
||||
}
|
||||
|
||||
var zero reflect.Value
|
||||
|
||||
// at marks the state to be on node n, for error reporting.
|
||||
func (s *state) at(node parse.Node) {
|
||||
s.node = node
|
||||
}
|
||||
|
||||
// doublePercent returns the string with %'s replaced by %%, if necessary,
|
||||
// so it can be used safely inside a Printf format string.
|
||||
func doublePercent(str string) string {
|
||||
if strings.Contains(str, "%") {
|
||||
str = strings.Replace(str, "%", "%%", -1)
|
||||
}
|
||||
return str
|
||||
}
|
||||
|
||||
// errorf formats the error and terminates processing.
|
||||
func (s *state) errorf(format string, args ...interface{}) {
|
||||
name := doublePercent(s.tmpl.Name())
|
||||
if s.node == nil {
|
||||
format = fmt.Sprintf("template: %s: %s", name, format)
|
||||
} else {
|
||||
location, context := s.tmpl.ErrorContext(s.node)
|
||||
format = fmt.Sprintf("template: %s: executing %q at <%s>: %s", location, name, doublePercent(context), format)
|
||||
}
|
||||
panic(fmt.Errorf(format, args...))
|
||||
}
|
||||
|
||||
// errRecover is the handler that turns panics into returns from the top
|
||||
// level of Parse.
|
||||
func errRecover(errp *error) {
|
||||
e := recover()
|
||||
if e != nil {
|
||||
switch err := e.(type) {
|
||||
case runtime.Error:
|
||||
panic(e)
|
||||
case error:
|
||||
*errp = err
|
||||
default:
|
||||
panic(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ExecuteTemplate applies the template associated with t that has the given name
|
||||
// to the specified data object and writes the output to wr.
|
||||
// If an error occurs executing the template or writing its output,
|
||||
// execution stops, but partial results may already have been written to
|
||||
// the output writer.
|
||||
// A template may be executed safely in parallel.
|
||||
func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{}) error {
|
||||
tmpl := t.tmpl[name]
|
||||
if tmpl == nil {
|
||||
return fmt.Errorf("template: no template %q associated with template %q", name, t.name)
|
||||
}
|
||||
return tmpl.Execute(wr, data)
|
||||
}
|
||||
|
||||
// Execute applies a parsed template to the specified data object,
|
||||
// and writes the output to wr.
|
||||
// If an error occurs executing the template or writing its output,
|
||||
// execution stops, but partial results may already have been written to
|
||||
// the output writer.
|
||||
// A template may be executed safely in parallel.
|
||||
func (t *Template) Execute(wr io.Writer, data interface{}) (err error) {
|
||||
defer errRecover(&err)
|
||||
value := reflect.ValueOf(data)
|
||||
state := &state{
|
||||
tmpl: t,
|
||||
wr: wr,
|
||||
vars: []variable{{"$", value}},
|
||||
}
|
||||
t.init()
|
||||
if t.Tree == nil || t.Root == nil {
|
||||
var b bytes.Buffer
|
||||
for name, tmpl := range t.tmpl {
|
||||
if tmpl.Tree == nil || tmpl.Root == nil {
|
||||
continue
|
||||
}
|
||||
if b.Len() > 0 {
|
||||
b.WriteString(", ")
|
||||
}
|
||||
fmt.Fprintf(&b, "%q", name)
|
||||
}
|
||||
var s string
|
||||
if b.Len() > 0 {
|
||||
s = "; defined templates are: " + b.String()
|
||||
}
|
||||
state.errorf("%q is an incomplete or empty template%s", t.Name(), s)
|
||||
}
|
||||
state.walk(value, t.Root)
|
||||
return
|
||||
}
|
||||
|
||||
// Walk functions step through the major pieces of the template structure,
|
||||
// generating output as they go.
|
||||
func (s *state) walk(dot reflect.Value, node parse.Node) {
|
||||
s.at(node)
|
||||
switch node := node.(type) {
|
||||
case *parse.ActionNode:
|
||||
// Do not pop variables so they persist until next end.
|
||||
// Also, if the action declares variables, don't print the result.
|
||||
val := s.evalPipeline(dot, node.Pipe)
|
||||
if len(node.Pipe.Decl) == 0 {
|
||||
s.printValue(node, val)
|
||||
}
|
||||
case *parse.IfNode:
|
||||
s.walkIfOrWith(parse.NodeIf, dot, node.Pipe, node.List, node.ElseList)
|
||||
case *parse.ListNode:
|
||||
for _, node := range node.Nodes {
|
||||
s.walk(dot, node)
|
||||
}
|
||||
case *parse.RangeNode:
|
||||
s.walkRange(dot, node)
|
||||
case *parse.TemplateNode:
|
||||
s.walkTemplate(dot, node)
|
||||
case *parse.TextNode:
|
||||
if _, err := s.wr.Write(node.Text); err != nil {
|
||||
s.errorf("%s", err)
|
||||
}
|
||||
case *parse.WithNode:
|
||||
s.walkIfOrWith(parse.NodeWith, dot, node.Pipe, node.List, node.ElseList)
|
||||
default:
|
||||
s.errorf("unknown node: %s", node)
|
||||
}
|
||||
}
|
||||
|
||||
// walkIfOrWith walks an 'if' or 'with' node. The two control structures
|
||||
// are identical in behavior except that 'with' sets dot.
|
||||
func (s *state) walkIfOrWith(typ parse.NodeType, dot reflect.Value, pipe *parse.PipeNode, list, elseList *parse.ListNode) {
|
||||
defer s.pop(s.mark())
|
||||
val := s.evalPipeline(dot, pipe)
|
||||
truth, ok := isTrue(val)
|
||||
if !ok {
|
||||
s.errorf("if/with can't use %v", val)
|
||||
}
|
||||
if truth {
|
||||
if typ == parse.NodeWith {
|
||||
s.walk(val, list)
|
||||
} else {
|
||||
s.walk(dot, list)
|
||||
}
|
||||
} else if elseList != nil {
|
||||
s.walk(dot, elseList)
|
||||
}
|
||||
}
|
||||
|
||||
// isTrue reports whether the value is 'true', in the sense of not the zero of its type,
|
||||
// and whether the value has a meaningful truth value.
|
||||
func isTrue(val reflect.Value) (truth, ok bool) {
|
||||
if !val.IsValid() {
|
||||
// Something like var x interface{}, never set. It's a form of nil.
|
||||
return false, true
|
||||
}
|
||||
switch val.Kind() {
|
||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||
truth = val.Len() > 0
|
||||
case reflect.Bool:
|
||||
truth = val.Bool()
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
truth = val.Complex() != 0
|
||||
case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface:
|
||||
truth = !val.IsNil()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
truth = val.Int() != 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
truth = val.Float() != 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
truth = val.Uint() != 0
|
||||
case reflect.Struct:
|
||||
truth = true // Struct values are always true.
|
||||
default:
|
||||
return
|
||||
}
|
||||
return truth, true
|
||||
}
|
||||
|
||||
func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) {
|
||||
s.at(r)
|
||||
defer s.pop(s.mark())
|
||||
val, _ := indirect(s.evalPipeline(dot, r.Pipe))
|
||||
// mark top of stack before any variables in the body are pushed.
|
||||
mark := s.mark()
|
||||
oneIteration := func(index, elem reflect.Value) {
|
||||
// Set top var (lexically the second if there are two) to the element.
|
||||
if len(r.Pipe.Decl) > 0 {
|
||||
s.setVar(1, elem)
|
||||
}
|
||||
// Set next var (lexically the first if there are two) to the index.
|
||||
if len(r.Pipe.Decl) > 1 {
|
||||
s.setVar(2, index)
|
||||
}
|
||||
s.walk(elem, r.List)
|
||||
s.pop(mark)
|
||||
}
|
||||
switch val.Kind() {
|
||||
case reflect.Array, reflect.Slice:
|
||||
if val.Len() == 0 {
|
||||
break
|
||||
}
|
||||
for i := 0; i < val.Len(); i++ {
|
||||
oneIteration(reflect.ValueOf(i), val.Index(i))
|
||||
}
|
||||
return
|
||||
case reflect.Map:
|
||||
if val.Len() == 0 {
|
||||
break
|
||||
}
|
||||
for _, key := range sortKeys(val.MapKeys()) {
|
||||
oneIteration(key, val.MapIndex(key))
|
||||
}
|
||||
return
|
||||
case reflect.Chan:
|
||||
if val.IsNil() {
|
||||
break
|
||||
}
|
||||
i := 0
|
||||
for ; ; i++ {
|
||||
elem, ok := val.Recv()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
oneIteration(reflect.ValueOf(i), elem)
|
||||
}
|
||||
if i == 0 {
|
||||
break
|
||||
}
|
||||
return
|
||||
case reflect.Invalid:
|
||||
break // An invalid value is likely a nil map, etc. and acts like an empty map.
|
||||
default:
|
||||
s.errorf("range can't iterate over %v", val)
|
||||
}
|
||||
if r.ElseList != nil {
|
||||
s.walk(dot, r.ElseList)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) walkTemplate(dot reflect.Value, t *parse.TemplateNode) {
|
||||
s.at(t)
|
||||
tmpl := s.tmpl.tmpl[t.Name]
|
||||
if tmpl == nil {
|
||||
s.errorf("template %q not defined", t.Name)
|
||||
}
|
||||
// Variables declared by the pipeline persist.
|
||||
dot = s.evalPipeline(dot, t.Pipe)
|
||||
newState := *s
|
||||
newState.tmpl = tmpl
|
||||
// No dynamic scoping: template invocations inherit no variables.
|
||||
newState.vars = []variable{{"$", dot}}
|
||||
newState.walk(dot, tmpl.Root)
|
||||
}
|
||||
|
||||
// Eval functions evaluate pipelines, commands, and their elements and extract
|
||||
// values from the data structure by examining fields, calling methods, and so on.
|
||||
// The printing of those values happens only through walk functions.
|
||||
|
||||
// evalPipeline returns the value acquired by evaluating a pipeline. If the
|
||||
// pipeline has a variable declaration, the variable will be pushed on the
|
||||
// stack. Callers should therefore pop the stack after they are finished
|
||||
// executing commands depending on the pipeline value.
|
||||
func (s *state) evalPipeline(dot reflect.Value, pipe *parse.PipeNode) (value reflect.Value) {
|
||||
if pipe == nil {
|
||||
return
|
||||
}
|
||||
s.at(pipe)
|
||||
for _, cmd := range pipe.Cmds {
|
||||
value = s.evalCommand(dot, cmd, value) // previous value is this one's final arg.
|
||||
// If the object has type interface{}, dig down one level to the thing inside.
|
||||
if value.Kind() == reflect.Interface && value.Type().NumMethod() == 0 {
|
||||
value = reflect.ValueOf(value.Interface()) // lovely!
|
||||
}
|
||||
}
|
||||
for _, variable := range pipe.Decl {
|
||||
s.push(variable.Ident[0], value)
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
func (s *state) notAFunction(args []parse.Node, final reflect.Value) {
|
||||
if len(args) > 1 || final.IsValid() {
|
||||
s.errorf("can't give argument to non-function %s", args[0])
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) evalCommand(dot reflect.Value, cmd *parse.CommandNode, final reflect.Value) reflect.Value {
|
||||
firstWord := cmd.Args[0]
|
||||
switch n := firstWord.(type) {
|
||||
case *parse.FieldNode:
|
||||
return s.evalFieldNode(dot, n, cmd.Args, final)
|
||||
case *parse.ChainNode:
|
||||
return s.evalChainNode(dot, n, cmd.Args, final)
|
||||
case *parse.IdentifierNode:
|
||||
// Must be a function.
|
||||
return s.evalFunction(dot, n, cmd, cmd.Args, final)
|
||||
case *parse.PipeNode:
|
||||
// Parenthesized pipeline. The arguments are all inside the pipeline; final is ignored.
|
||||
return s.evalPipeline(dot, n)
|
||||
case *parse.VariableNode:
|
||||
return s.evalVariableNode(dot, n, cmd.Args, final)
|
||||
}
|
||||
s.at(firstWord)
|
||||
s.notAFunction(cmd.Args, final)
|
||||
switch word := firstWord.(type) {
|
||||
case *parse.BoolNode:
|
||||
return reflect.ValueOf(word.True)
|
||||
case *parse.DotNode:
|
||||
return dot
|
||||
case *parse.NilNode:
|
||||
s.errorf("nil is not a command")
|
||||
case *parse.NumberNode:
|
||||
return s.idealConstant(word)
|
||||
case *parse.StringNode:
|
||||
return reflect.ValueOf(word.Text)
|
||||
}
|
||||
s.errorf("can't evaluate command %q", firstWord)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
// idealConstant is called to return the value of a number in a context where
|
||||
// we don't know the type. In that case, the syntax of the number tells us
|
||||
// its type, and we use Go rules to resolve. Note there is no such thing as
|
||||
// a uint ideal constant in this situation - the value must be of int type.
|
||||
func (s *state) idealConstant(constant *parse.NumberNode) reflect.Value {
|
||||
// These are ideal constants but we don't know the type
|
||||
// and we have no context. (If it was a method argument,
|
||||
// we'd know what we need.) The syntax guides us to some extent.
|
||||
s.at(constant)
|
||||
switch {
|
||||
case constant.IsComplex:
|
||||
return reflect.ValueOf(constant.Complex128) // incontrovertible.
|
||||
case constant.IsFloat && !isHexConstant(constant.Text) && strings.IndexAny(constant.Text, ".eE") >= 0:
|
||||
return reflect.ValueOf(constant.Float64)
|
||||
case constant.IsInt:
|
||||
n := int(constant.Int64)
|
||||
if int64(n) != constant.Int64 {
|
||||
s.errorf("%s overflows int", constant.Text)
|
||||
}
|
||||
return reflect.ValueOf(n)
|
||||
case constant.IsUint:
|
||||
s.errorf("%s overflows int", constant.Text)
|
||||
}
|
||||
return zero
|
||||
}
|
||||
|
||||
func isHexConstant(s string) bool {
|
||||
return len(s) > 2 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X')
|
||||
}
|
||||
|
||||
func (s *state) evalFieldNode(dot reflect.Value, field *parse.FieldNode, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
s.at(field)
|
||||
return s.evalFieldChain(dot, dot, field, field.Ident, args, final)
|
||||
}
|
||||
|
||||
func (s *state) evalChainNode(dot reflect.Value, chain *parse.ChainNode, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
s.at(chain)
|
||||
// (pipe).Field1.Field2 has pipe as .Node, fields as .Field. Eval the pipeline, then the fields.
|
||||
pipe := s.evalArg(dot, nil, chain.Node)
|
||||
if len(chain.Field) == 0 {
|
||||
s.errorf("internal error: no fields in evalChainNode")
|
||||
}
|
||||
return s.evalFieldChain(dot, pipe, chain, chain.Field, args, final)
|
||||
}
|
||||
|
||||
func (s *state) evalVariableNode(dot reflect.Value, variable *parse.VariableNode, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
// $x.Field has $x as the first ident, Field as the second. Eval the var, then the fields.
|
||||
s.at(variable)
|
||||
value := s.varValue(variable.Ident[0])
|
||||
if len(variable.Ident) == 1 {
|
||||
s.notAFunction(args, final)
|
||||
return value
|
||||
}
|
||||
return s.evalFieldChain(dot, value, variable, variable.Ident[1:], args, final)
|
||||
}
|
||||
|
||||
// evalFieldChain evaluates .X.Y.Z possibly followed by arguments.
|
||||
// dot is the environment in which to evaluate arguments, while
|
||||
// receiver is the value being walked along the chain.
|
||||
func (s *state) evalFieldChain(dot, receiver reflect.Value, node parse.Node, ident []string, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
n := len(ident)
|
||||
for i := 0; i < n-1; i++ {
|
||||
receiver = s.evalField(dot, ident[i], node, nil, zero, receiver)
|
||||
}
|
||||
// Now if it's a method, it gets the arguments.
|
||||
return s.evalField(dot, ident[n-1], node, args, final, receiver)
|
||||
}
|
||||
|
||||
func (s *state) evalFunction(dot reflect.Value, node *parse.IdentifierNode, cmd parse.Node, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
s.at(node)
|
||||
name := node.Ident
|
||||
function, ok := findFunction(name, s.tmpl)
|
||||
if !ok {
|
||||
s.errorf("%q is not a defined function", name)
|
||||
}
|
||||
return s.evalCall(dot, function, cmd, name, args, final)
|
||||
}
|
||||
|
||||
// evalField evaluates an expression like (.Field) or (.Field arg1 arg2).
|
||||
// The 'final' argument represents the return value from the preceding
|
||||
// value of the pipeline, if any.
|
||||
func (s *state) evalField(dot reflect.Value, fieldName string, node parse.Node, args []parse.Node, final, receiver reflect.Value) reflect.Value {
|
||||
if !receiver.IsValid() {
|
||||
return zero
|
||||
}
|
||||
typ := receiver.Type()
|
||||
receiver, _ = indirect(receiver)
|
||||
// Unless it's an interface, need to get to a value of type *T to guarantee
|
||||
// we see all methods of T and *T.
|
||||
ptr := receiver
|
||||
if ptr.Kind() != reflect.Interface && ptr.CanAddr() {
|
||||
ptr = ptr.Addr()
|
||||
}
|
||||
if method := ptr.MethodByName(fieldName); method.IsValid() {
|
||||
return s.evalCall(dot, method, node, fieldName, args, final)
|
||||
}
|
||||
hasArgs := len(args) > 1 || final.IsValid()
|
||||
// It's not a method; must be a field of a struct or an element of a map. The receiver must not be nil.
|
||||
receiver, isNil := indirect(receiver)
|
||||
if isNil {
|
||||
s.errorf("nil pointer evaluating %s.%s", typ, fieldName)
|
||||
}
|
||||
switch receiver.Kind() {
|
||||
case reflect.Struct:
|
||||
tField, ok := receiver.Type().FieldByName(fieldName)
|
||||
if ok {
|
||||
field := receiver.FieldByIndex(tField.Index)
|
||||
if tField.PkgPath != "" { // field is unexported
|
||||
s.errorf("%s is an unexported field of struct type %s", fieldName, typ)
|
||||
}
|
||||
// If it's a function, we must call it.
|
||||
if hasArgs {
|
||||
s.errorf("%s has arguments but cannot be invoked as function", fieldName)
|
||||
}
|
||||
return field
|
||||
}
|
||||
s.errorf("%s is not a field of struct type %s", fieldName, typ)
|
||||
case reflect.Map:
|
||||
// If it's a map, attempt to use the field name as a key.
|
||||
nameVal := reflect.ValueOf(fieldName)
|
||||
if nameVal.Type().AssignableTo(receiver.Type().Key()) {
|
||||
if hasArgs {
|
||||
s.errorf("%s is not a method but has arguments", fieldName)
|
||||
}
|
||||
return receiver.MapIndex(nameVal)
|
||||
}
|
||||
}
|
||||
s.errorf("can't evaluate field %s in type %s", fieldName, typ)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
var (
|
||||
errorType = reflect.TypeOf((*error)(nil)).Elem()
|
||||
fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
|
||||
)
|
||||
|
||||
// evalCall executes a function or method call. If it's a method, fun already has the receiver bound, so
|
||||
// it looks just like a function call. The arg list, if non-nil, includes (in the manner of the shell), arg[0]
|
||||
// as the function itself.
|
||||
func (s *state) evalCall(dot, fun reflect.Value, node parse.Node, name string, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
if args != nil {
|
||||
args = args[1:] // Zeroth arg is function name/node; not passed to function.
|
||||
}
|
||||
typ := fun.Type()
|
||||
numIn := len(args)
|
||||
if final.IsValid() {
|
||||
numIn++
|
||||
}
|
||||
numFixed := len(args)
|
||||
if typ.IsVariadic() {
|
||||
numFixed = typ.NumIn() - 1 // last arg is the variadic one.
|
||||
if numIn < numFixed {
|
||||
s.errorf("wrong number of args for %s: want at least %d got %d", name, typ.NumIn()-1, len(args))
|
||||
}
|
||||
} else if numIn < typ.NumIn()-1 || !typ.IsVariadic() && numIn != typ.NumIn() {
|
||||
s.errorf("wrong number of args for %s: want %d got %d", name, typ.NumIn(), len(args))
|
||||
}
|
||||
if !goodFunc(typ) {
|
||||
// TODO: This could still be a confusing error; maybe goodFunc should provide info.
|
||||
s.errorf("can't call method/function %q with %d results", name, typ.NumOut())
|
||||
}
|
||||
// Build the arg list.
|
||||
argv := make([]reflect.Value, numIn)
|
||||
// Args must be evaluated. Fixed args first.
|
||||
i := 0
|
||||
for ; i < numFixed && i < len(args); i++ {
|
||||
argv[i] = s.evalArg(dot, typ.In(i), args[i])
|
||||
}
|
||||
// Now the ... args.
|
||||
if typ.IsVariadic() {
|
||||
argType := typ.In(typ.NumIn() - 1).Elem() // Argument is a slice.
|
||||
for ; i < len(args); i++ {
|
||||
argv[i] = s.evalArg(dot, argType, args[i])
|
||||
}
|
||||
}
|
||||
// Add final value if necessary.
|
||||
if final.IsValid() {
|
||||
t := typ.In(typ.NumIn() - 1)
|
||||
if typ.IsVariadic() {
|
||||
t = t.Elem()
|
||||
}
|
||||
argv[i] = s.validateType(final, t)
|
||||
}
|
||||
result := fun.Call(argv)
|
||||
// If we have an error that is not nil, stop execution and return that error to the caller.
|
||||
if len(result) == 2 && !result[1].IsNil() {
|
||||
s.at(node)
|
||||
s.errorf("error calling %s: %s", name, result[1].Interface().(error))
|
||||
}
|
||||
return result[0]
|
||||
}
|
||||
|
||||
// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero.
|
||||
func canBeNil(typ reflect.Type) bool {
|
||||
switch typ.Kind() {
|
||||
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// validateType guarantees that the value is valid and assignable to the type.
|
||||
func (s *state) validateType(value reflect.Value, typ reflect.Type) reflect.Value {
|
||||
if !value.IsValid() {
|
||||
if typ == nil || canBeNil(typ) {
|
||||
// An untyped nil interface{}. Accept as a proper nil value.
|
||||
return reflect.Zero(typ)
|
||||
}
|
||||
s.errorf("invalid value; expected %s", typ)
|
||||
}
|
||||
if typ != nil && !value.Type().AssignableTo(typ) {
|
||||
if value.Kind() == reflect.Interface && !value.IsNil() {
|
||||
value = value.Elem()
|
||||
if value.Type().AssignableTo(typ) {
|
||||
return value
|
||||
}
|
||||
// fallthrough
|
||||
}
|
||||
// Does one dereference or indirection work? We could do more, as we
|
||||
// do with method receivers, but that gets messy and method receivers
|
||||
// are much more constrained, so it makes more sense there than here.
|
||||
// Besides, one is almost always all you need.
|
||||
switch {
|
||||
case value.Kind() == reflect.Ptr && value.Type().Elem().AssignableTo(typ):
|
||||
value = value.Elem()
|
||||
if !value.IsValid() {
|
||||
s.errorf("dereference of nil pointer of type %s", typ)
|
||||
}
|
||||
case reflect.PtrTo(value.Type()).AssignableTo(typ) && value.CanAddr():
|
||||
value = value.Addr()
|
||||
default:
|
||||
s.errorf("wrong type for value; expected %s; got %s", typ, value.Type())
|
||||
}
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
func (s *state) evalArg(dot reflect.Value, typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
switch arg := n.(type) {
|
||||
case *parse.DotNode:
|
||||
return s.validateType(dot, typ)
|
||||
case *parse.NilNode:
|
||||
if canBeNil(typ) {
|
||||
return reflect.Zero(typ)
|
||||
}
|
||||
s.errorf("cannot assign nil to %s", typ)
|
||||
case *parse.FieldNode:
|
||||
return s.validateType(s.evalFieldNode(dot, arg, []parse.Node{n}, zero), typ)
|
||||
case *parse.VariableNode:
|
||||
return s.validateType(s.evalVariableNode(dot, arg, nil, zero), typ)
|
||||
case *parse.PipeNode:
|
||||
return s.validateType(s.evalPipeline(dot, arg), typ)
|
||||
case *parse.IdentifierNode:
|
||||
return s.evalFunction(dot, arg, arg, nil, zero)
|
||||
case *parse.ChainNode:
|
||||
return s.validateType(s.evalChainNode(dot, arg, nil, zero), typ)
|
||||
}
|
||||
switch typ.Kind() {
|
||||
case reflect.Bool:
|
||||
return s.evalBool(typ, n)
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return s.evalComplex(typ, n)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return s.evalFloat(typ, n)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return s.evalInteger(typ, n)
|
||||
case reflect.Interface:
|
||||
if typ.NumMethod() == 0 {
|
||||
return s.evalEmptyInterface(dot, n)
|
||||
}
|
||||
case reflect.String:
|
||||
return s.evalString(typ, n)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return s.evalUnsignedInteger(typ, n)
|
||||
}
|
||||
s.errorf("can't handle %s for arg of type %s", n, typ)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalBool(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
if n, ok := n.(*parse.BoolNode); ok {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetBool(n.True)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected bool; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalString(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
if n, ok := n.(*parse.StringNode); ok {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetString(n.Text)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected string; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalInteger(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
if n, ok := n.(*parse.NumberNode); ok && n.IsInt {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetInt(n.Int64)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected integer; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalUnsignedInteger(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
if n, ok := n.(*parse.NumberNode); ok && n.IsUint {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetUint(n.Uint64)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected unsigned integer; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalFloat(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
if n, ok := n.(*parse.NumberNode); ok && n.IsFloat {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetFloat(n.Float64)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected float; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalComplex(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
if n, ok := n.(*parse.NumberNode); ok && n.IsComplex {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetComplex(n.Complex128)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected complex; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalEmptyInterface(dot reflect.Value, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
switch n := n.(type) {
|
||||
case *parse.BoolNode:
|
||||
return reflect.ValueOf(n.True)
|
||||
case *parse.DotNode:
|
||||
return dot
|
||||
case *parse.FieldNode:
|
||||
return s.evalFieldNode(dot, n, nil, zero)
|
||||
case *parse.IdentifierNode:
|
||||
return s.evalFunction(dot, n, n, nil, zero)
|
||||
case *parse.NilNode:
|
||||
// NilNode is handled in evalArg, the only place that calls here.
|
||||
s.errorf("evalEmptyInterface: nil (can't happen)")
|
||||
case *parse.NumberNode:
|
||||
return s.idealConstant(n)
|
||||
case *parse.StringNode:
|
||||
return reflect.ValueOf(n.Text)
|
||||
case *parse.VariableNode:
|
||||
return s.evalVariableNode(dot, n, nil, zero)
|
||||
case *parse.PipeNode:
|
||||
return s.evalPipeline(dot, n)
|
||||
}
|
||||
s.errorf("can't handle assignment of %s to empty interface argument", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
// indirect returns the item at the end of indirection, and a bool to indicate if it's nil.
|
||||
// We indirect through pointers and empty interfaces (only) because
|
||||
// non-empty interfaces have methods we might need.
|
||||
func indirect(v reflect.Value) (rv reflect.Value, isNil bool) {
|
||||
for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() {
|
||||
if v.IsNil() {
|
||||
return v, true
|
||||
}
|
||||
if v.Kind() == reflect.Interface && v.NumMethod() > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return v, false
|
||||
}
|
||||
|
||||
// printValue writes the textual representation of the value to the output of
|
||||
// the template.
|
||||
func (s *state) printValue(n parse.Node, v reflect.Value) {
|
||||
s.at(n)
|
||||
iface, ok := printableValue(v)
|
||||
if !ok {
|
||||
s.errorf("can't print %s of type %s", n, v.Type())
|
||||
}
|
||||
fmt.Fprint(s.wr, iface)
|
||||
}
|
||||
|
||||
// printableValue returns the, possibly indirected, interface value inside v that
|
||||
// is best for a call to formatted printer.
|
||||
func printableValue(v reflect.Value) (interface{}, bool) {
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v, _ = indirect(v) // fmt.Fprint handles nil.
|
||||
}
|
||||
if !v.IsValid() {
|
||||
return "<no value>", true
|
||||
}
|
||||
|
||||
if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) {
|
||||
if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) {
|
||||
v = v.Addr()
|
||||
} else {
|
||||
switch v.Kind() {
|
||||
case reflect.Chan, reflect.Func:
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
}
|
||||
return v.Interface(), true
|
||||
}
|
||||
|
||||
// Types to help sort the keys in a map for reproducible output.
|
||||
|
||||
type rvs []reflect.Value
|
||||
|
||||
func (x rvs) Len() int { return len(x) }
|
||||
func (x rvs) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
type rvInts struct{ rvs }
|
||||
|
||||
func (x rvInts) Less(i, j int) bool { return x.rvs[i].Int() < x.rvs[j].Int() }
|
||||
|
||||
type rvUints struct{ rvs }
|
||||
|
||||
func (x rvUints) Less(i, j int) bool { return x.rvs[i].Uint() < x.rvs[j].Uint() }
|
||||
|
||||
type rvFloats struct{ rvs }
|
||||
|
||||
func (x rvFloats) Less(i, j int) bool { return x.rvs[i].Float() < x.rvs[j].Float() }
|
||||
|
||||
type rvStrings struct{ rvs }
|
||||
|
||||
func (x rvStrings) Less(i, j int) bool { return x.rvs[i].String() < x.rvs[j].String() }
|
||||
|
||||
// sortKeys sorts (if it can) the slice of reflect.Values, which is a slice of map keys.
|
||||
func sortKeys(v []reflect.Value) []reflect.Value {
|
||||
if len(v) <= 1 {
|
||||
return v
|
||||
}
|
||||
switch v[0].Kind() {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
sort.Sort(rvFloats{v})
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
sort.Sort(rvInts{v})
|
||||
case reflect.String:
|
||||
sort.Sort(rvStrings{v})
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
sort.Sort(rvUints{v})
|
||||
}
|
||||
return v
|
||||
}
|
598
vendor/github.com/alecthomas/template/funcs.go
generated
vendored
Normal file
598
vendor/github.com/alecthomas/template/funcs.go
generated
vendored
Normal file
@ -0,0 +1,598 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// FuncMap is the type of the map defining the mapping from names to functions.
|
||||
// Each function must have either a single return value, or two return values of
|
||||
// which the second has type error. In that case, if the second (error)
|
||||
// return value evaluates to non-nil during execution, execution terminates and
|
||||
// Execute returns that error.
|
||||
type FuncMap map[string]interface{}
|
||||
|
||||
var builtins = FuncMap{
|
||||
"and": and,
|
||||
"call": call,
|
||||
"html": HTMLEscaper,
|
||||
"index": index,
|
||||
"js": JSEscaper,
|
||||
"len": length,
|
||||
"not": not,
|
||||
"or": or,
|
||||
"print": fmt.Sprint,
|
||||
"printf": fmt.Sprintf,
|
||||
"println": fmt.Sprintln,
|
||||
"urlquery": URLQueryEscaper,
|
||||
|
||||
// Comparisons
|
||||
"eq": eq, // ==
|
||||
"ge": ge, // >=
|
||||
"gt": gt, // >
|
||||
"le": le, // <=
|
||||
"lt": lt, // <
|
||||
"ne": ne, // !=
|
||||
}
|
||||
|
||||
var builtinFuncs = createValueFuncs(builtins)
|
||||
|
||||
// createValueFuncs turns a FuncMap into a map[string]reflect.Value
|
||||
func createValueFuncs(funcMap FuncMap) map[string]reflect.Value {
|
||||
m := make(map[string]reflect.Value)
|
||||
addValueFuncs(m, funcMap)
|
||||
return m
|
||||
}
|
||||
|
||||
// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values.
|
||||
func addValueFuncs(out map[string]reflect.Value, in FuncMap) {
|
||||
for name, fn := range in {
|
||||
v := reflect.ValueOf(fn)
|
||||
if v.Kind() != reflect.Func {
|
||||
panic("value for " + name + " not a function")
|
||||
}
|
||||
if !goodFunc(v.Type()) {
|
||||
panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut()))
|
||||
}
|
||||
out[name] = v
|
||||
}
|
||||
}
|
||||
|
||||
// addFuncs adds to values the functions in funcs. It does no checking of the input -
|
||||
// call addValueFuncs first.
|
||||
func addFuncs(out, in FuncMap) {
|
||||
for name, fn := range in {
|
||||
out[name] = fn
|
||||
}
|
||||
}
|
||||
|
||||
// goodFunc checks that the function or method has the right result signature.
|
||||
func goodFunc(typ reflect.Type) bool {
|
||||
// We allow functions with 1 result or 2 results where the second is an error.
|
||||
switch {
|
||||
case typ.NumOut() == 1:
|
||||
return true
|
||||
case typ.NumOut() == 2 && typ.Out(1) == errorType:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// findFunction looks for a function in the template, and global map.
|
||||
func findFunction(name string, tmpl *Template) (reflect.Value, bool) {
|
||||
if tmpl != nil && tmpl.common != nil {
|
||||
if fn := tmpl.execFuncs[name]; fn.IsValid() {
|
||||
return fn, true
|
||||
}
|
||||
}
|
||||
if fn := builtinFuncs[name]; fn.IsValid() {
|
||||
return fn, true
|
||||
}
|
||||
return reflect.Value{}, false
|
||||
}
|
||||
|
||||
// Indexing.
|
||||
|
||||
// index returns the result of indexing its first argument by the following
|
||||
// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each
|
||||
// indexed item must be a map, slice, or array.
|
||||
func index(item interface{}, indices ...interface{}) (interface{}, error) {
|
||||
v := reflect.ValueOf(item)
|
||||
for _, i := range indices {
|
||||
index := reflect.ValueOf(i)
|
||||
var isNil bool
|
||||
if v, isNil = indirect(v); isNil {
|
||||
return nil, fmt.Errorf("index of nil pointer")
|
||||
}
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Slice, reflect.String:
|
||||
var x int64
|
||||
switch index.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
x = index.Int()
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
x = int64(index.Uint())
|
||||
default:
|
||||
return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type())
|
||||
}
|
||||
if x < 0 || x >= int64(v.Len()) {
|
||||
return nil, fmt.Errorf("index out of range: %d", x)
|
||||
}
|
||||
v = v.Index(int(x))
|
||||
case reflect.Map:
|
||||
if !index.IsValid() {
|
||||
index = reflect.Zero(v.Type().Key())
|
||||
}
|
||||
if !index.Type().AssignableTo(v.Type().Key()) {
|
||||
return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type())
|
||||
}
|
||||
if x := v.MapIndex(index); x.IsValid() {
|
||||
v = x
|
||||
} else {
|
||||
v = reflect.Zero(v.Type().Elem())
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("can't index item of type %s", v.Type())
|
||||
}
|
||||
}
|
||||
return v.Interface(), nil
|
||||
}
|
||||
|
||||
// Length
|
||||
|
||||
// length returns the length of the item, with an error if it has no defined length.
|
||||
func length(item interface{}) (int, error) {
|
||||
v, isNil := indirect(reflect.ValueOf(item))
|
||||
if isNil {
|
||||
return 0, fmt.Errorf("len of nil pointer")
|
||||
}
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
|
||||
return v.Len(), nil
|
||||
}
|
||||
return 0, fmt.Errorf("len of type %s", v.Type())
|
||||
}
|
||||
|
||||
// Function invocation
|
||||
|
||||
// call returns the result of evaluating the first argument as a function.
|
||||
// The function must return 1 result, or 2 results, the second of which is an error.
|
||||
func call(fn interface{}, args ...interface{}) (interface{}, error) {
|
||||
v := reflect.ValueOf(fn)
|
||||
typ := v.Type()
|
||||
if typ.Kind() != reflect.Func {
|
||||
return nil, fmt.Errorf("non-function of type %s", typ)
|
||||
}
|
||||
if !goodFunc(typ) {
|
||||
return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut())
|
||||
}
|
||||
numIn := typ.NumIn()
|
||||
var dddType reflect.Type
|
||||
if typ.IsVariadic() {
|
||||
if len(args) < numIn-1 {
|
||||
return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1)
|
||||
}
|
||||
dddType = typ.In(numIn - 1).Elem()
|
||||
} else {
|
||||
if len(args) != numIn {
|
||||
return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn)
|
||||
}
|
||||
}
|
||||
argv := make([]reflect.Value, len(args))
|
||||
for i, arg := range args {
|
||||
value := reflect.ValueOf(arg)
|
||||
// Compute the expected type. Clumsy because of variadics.
|
||||
var argType reflect.Type
|
||||
if !typ.IsVariadic() || i < numIn-1 {
|
||||
argType = typ.In(i)
|
||||
} else {
|
||||
argType = dddType
|
||||
}
|
||||
if !value.IsValid() && canBeNil(argType) {
|
||||
value = reflect.Zero(argType)
|
||||
}
|
||||
if !value.Type().AssignableTo(argType) {
|
||||
return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType)
|
||||
}
|
||||
argv[i] = value
|
||||
}
|
||||
result := v.Call(argv)
|
||||
if len(result) == 2 && !result[1].IsNil() {
|
||||
return result[0].Interface(), result[1].Interface().(error)
|
||||
}
|
||||
return result[0].Interface(), nil
|
||||
}
|
||||
|
||||
// Boolean logic.
|
||||
|
||||
func truth(a interface{}) bool {
|
||||
t, _ := isTrue(reflect.ValueOf(a))
|
||||
return t
|
||||
}
|
||||
|
||||
// and computes the Boolean AND of its arguments, returning
|
||||
// the first false argument it encounters, or the last argument.
|
||||
func and(arg0 interface{}, args ...interface{}) interface{} {
|
||||
if !truth(arg0) {
|
||||
return arg0
|
||||
}
|
||||
for i := range args {
|
||||
arg0 = args[i]
|
||||
if !truth(arg0) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return arg0
|
||||
}
|
||||
|
||||
// or computes the Boolean OR of its arguments, returning
|
||||
// the first true argument it encounters, or the last argument.
|
||||
func or(arg0 interface{}, args ...interface{}) interface{} {
|
||||
if truth(arg0) {
|
||||
return arg0
|
||||
}
|
||||
for i := range args {
|
||||
arg0 = args[i]
|
||||
if truth(arg0) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return arg0
|
||||
}
|
||||
|
||||
// not returns the Boolean negation of its argument.
|
||||
func not(arg interface{}) (truth bool) {
|
||||
truth, _ = isTrue(reflect.ValueOf(arg))
|
||||
return !truth
|
||||
}
|
||||
|
||||
// Comparison.
|
||||
|
||||
// TODO: Perhaps allow comparison between signed and unsigned integers.
|
||||
|
||||
var (
|
||||
errBadComparisonType = errors.New("invalid type for comparison")
|
||||
errBadComparison = errors.New("incompatible types for comparison")
|
||||
errNoComparison = errors.New("missing argument for comparison")
|
||||
)
|
||||
|
||||
type kind int
|
||||
|
||||
const (
|
||||
invalidKind kind = iota
|
||||
boolKind
|
||||
complexKind
|
||||
intKind
|
||||
floatKind
|
||||
integerKind
|
||||
stringKind
|
||||
uintKind
|
||||
)
|
||||
|
||||
func basicKind(v reflect.Value) (kind, error) {
|
||||
switch v.Kind() {
|
||||
case reflect.Bool:
|
||||
return boolKind, nil
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return intKind, nil
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return uintKind, nil
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return floatKind, nil
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return complexKind, nil
|
||||
case reflect.String:
|
||||
return stringKind, nil
|
||||
}
|
||||
return invalidKind, errBadComparisonType
|
||||
}
|
||||
|
||||
// eq evaluates the comparison a == b || a == c || ...
|
||||
func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) {
|
||||
v1 := reflect.ValueOf(arg1)
|
||||
k1, err := basicKind(v1)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(arg2) == 0 {
|
||||
return false, errNoComparison
|
||||
}
|
||||
for _, arg := range arg2 {
|
||||
v2 := reflect.ValueOf(arg)
|
||||
k2, err := basicKind(v2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
truth := false
|
||||
if k1 != k2 {
|
||||
// Special case: Can compare integer values regardless of type's sign.
|
||||
switch {
|
||||
case k1 == intKind && k2 == uintKind:
|
||||
truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint()
|
||||
case k1 == uintKind && k2 == intKind:
|
||||
truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int())
|
||||
default:
|
||||
return false, errBadComparison
|
||||
}
|
||||
} else {
|
||||
switch k1 {
|
||||
case boolKind:
|
||||
truth = v1.Bool() == v2.Bool()
|
||||
case complexKind:
|
||||
truth = v1.Complex() == v2.Complex()
|
||||
case floatKind:
|
||||
truth = v1.Float() == v2.Float()
|
||||
case intKind:
|
||||
truth = v1.Int() == v2.Int()
|
||||
case stringKind:
|
||||
truth = v1.String() == v2.String()
|
||||
case uintKind:
|
||||
truth = v1.Uint() == v2.Uint()
|
||||
default:
|
||||
panic("invalid kind")
|
||||
}
|
||||
}
|
||||
if truth {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// ne evaluates the comparison a != b.
|
||||
func ne(arg1, arg2 interface{}) (bool, error) {
|
||||
// != is the inverse of ==.
|
||||
equal, err := eq(arg1, arg2)
|
||||
return !equal, err
|
||||
}
|
||||
|
||||
// lt evaluates the comparison a < b.
|
||||
func lt(arg1, arg2 interface{}) (bool, error) {
|
||||
v1 := reflect.ValueOf(arg1)
|
||||
k1, err := basicKind(v1)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
v2 := reflect.ValueOf(arg2)
|
||||
k2, err := basicKind(v2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
truth := false
|
||||
if k1 != k2 {
|
||||
// Special case: Can compare integer values regardless of type's sign.
|
||||
switch {
|
||||
case k1 == intKind && k2 == uintKind:
|
||||
truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint()
|
||||
case k1 == uintKind && k2 == intKind:
|
||||
truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int())
|
||||
default:
|
||||
return false, errBadComparison
|
||||
}
|
||||
} else {
|
||||
switch k1 {
|
||||
case boolKind, complexKind:
|
||||
return false, errBadComparisonType
|
||||
case floatKind:
|
||||
truth = v1.Float() < v2.Float()
|
||||
case intKind:
|
||||
truth = v1.Int() < v2.Int()
|
||||
case stringKind:
|
||||
truth = v1.String() < v2.String()
|
||||
case uintKind:
|
||||
truth = v1.Uint() < v2.Uint()
|
||||
default:
|
||||
panic("invalid kind")
|
||||
}
|
||||
}
|
||||
return truth, nil
|
||||
}
|
||||
|
||||
// le evaluates the comparison <= b.
|
||||
func le(arg1, arg2 interface{}) (bool, error) {
|
||||
// <= is < or ==.
|
||||
lessThan, err := lt(arg1, arg2)
|
||||
if lessThan || err != nil {
|
||||
return lessThan, err
|
||||
}
|
||||
return eq(arg1, arg2)
|
||||
}
|
||||
|
||||
// gt evaluates the comparison a > b.
|
||||
func gt(arg1, arg2 interface{}) (bool, error) {
|
||||
// > is the inverse of <=.
|
||||
lessOrEqual, err := le(arg1, arg2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !lessOrEqual, nil
|
||||
}
|
||||
|
||||
// ge evaluates the comparison a >= b.
|
||||
func ge(arg1, arg2 interface{}) (bool, error) {
|
||||
// >= is the inverse of <.
|
||||
lessThan, err := lt(arg1, arg2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !lessThan, nil
|
||||
}
|
||||
|
||||
// HTML escaping.
|
||||
|
||||
var (
|
||||
htmlQuot = []byte(""") // shorter than """
|
||||
htmlApos = []byte("'") // shorter than "'" and apos was not in HTML until HTML5
|
||||
htmlAmp = []byte("&")
|
||||
htmlLt = []byte("<")
|
||||
htmlGt = []byte(">")
|
||||
)
|
||||
|
||||
// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
|
||||
func HTMLEscape(w io.Writer, b []byte) {
|
||||
last := 0
|
||||
for i, c := range b {
|
||||
var html []byte
|
||||
switch c {
|
||||
case '"':
|
||||
html = htmlQuot
|
||||
case '\'':
|
||||
html = htmlApos
|
||||
case '&':
|
||||
html = htmlAmp
|
||||
case '<':
|
||||
html = htmlLt
|
||||
case '>':
|
||||
html = htmlGt
|
||||
default:
|
||||
continue
|
||||
}
|
||||
w.Write(b[last:i])
|
||||
w.Write(html)
|
||||
last = i + 1
|
||||
}
|
||||
w.Write(b[last:])
|
||||
}
|
||||
|
||||
// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
|
||||
func HTMLEscapeString(s string) string {
|
||||
// Avoid allocation if we can.
|
||||
if strings.IndexAny(s, `'"&<>`) < 0 {
|
||||
return s
|
||||
}
|
||||
var b bytes.Buffer
|
||||
HTMLEscape(&b, []byte(s))
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// HTMLEscaper returns the escaped HTML equivalent of the textual
|
||||
// representation of its arguments.
|
||||
func HTMLEscaper(args ...interface{}) string {
|
||||
return HTMLEscapeString(evalArgs(args))
|
||||
}
|
||||
|
||||
// JavaScript escaping.
|
||||
|
||||
var (
|
||||
jsLowUni = []byte(`\u00`)
|
||||
hex = []byte("0123456789ABCDEF")
|
||||
|
||||
jsBackslash = []byte(`\\`)
|
||||
jsApos = []byte(`\'`)
|
||||
jsQuot = []byte(`\"`)
|
||||
jsLt = []byte(`\x3C`)
|
||||
jsGt = []byte(`\x3E`)
|
||||
)
|
||||
|
||||
// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
|
||||
func JSEscape(w io.Writer, b []byte) {
|
||||
last := 0
|
||||
for i := 0; i < len(b); i++ {
|
||||
c := b[i]
|
||||
|
||||
if !jsIsSpecial(rune(c)) {
|
||||
// fast path: nothing to do
|
||||
continue
|
||||
}
|
||||
w.Write(b[last:i])
|
||||
|
||||
if c < utf8.RuneSelf {
|
||||
// Quotes, slashes and angle brackets get quoted.
|
||||
// Control characters get written as \u00XX.
|
||||
switch c {
|
||||
case '\\':
|
||||
w.Write(jsBackslash)
|
||||
case '\'':
|
||||
w.Write(jsApos)
|
||||
case '"':
|
||||
w.Write(jsQuot)
|
||||
case '<':
|
||||
w.Write(jsLt)
|
||||
case '>':
|
||||
w.Write(jsGt)
|
||||
default:
|
||||
w.Write(jsLowUni)
|
||||
t, b := c>>4, c&0x0f
|
||||
w.Write(hex[t : t+1])
|
||||
w.Write(hex[b : b+1])
|
||||
}
|
||||
} else {
|
||||
// Unicode rune.
|
||||
r, size := utf8.DecodeRune(b[i:])
|
||||
if unicode.IsPrint(r) {
|
||||
w.Write(b[i : i+size])
|
||||
} else {
|
||||
fmt.Fprintf(w, "\\u%04X", r)
|
||||
}
|
||||
i += size - 1
|
||||
}
|
||||
last = i + 1
|
||||
}
|
||||
w.Write(b[last:])
|
||||
}
|
||||
|
||||
// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s.
|
||||
func JSEscapeString(s string) string {
|
||||
// Avoid allocation if we can.
|
||||
if strings.IndexFunc(s, jsIsSpecial) < 0 {
|
||||
return s
|
||||
}
|
||||
var b bytes.Buffer
|
||||
JSEscape(&b, []byte(s))
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func jsIsSpecial(r rune) bool {
|
||||
switch r {
|
||||
case '\\', '\'', '"', '<', '>':
|
||||
return true
|
||||
}
|
||||
return r < ' ' || utf8.RuneSelf <= r
|
||||
}
|
||||
|
||||
// JSEscaper returns the escaped JavaScript equivalent of the textual
|
||||
// representation of its arguments.
|
||||
func JSEscaper(args ...interface{}) string {
|
||||
return JSEscapeString(evalArgs(args))
|
||||
}
|
||||
|
||||
// URLQueryEscaper returns the escaped value of the textual representation of
|
||||
// its arguments in a form suitable for embedding in a URL query.
|
||||
func URLQueryEscaper(args ...interface{}) string {
|
||||
return url.QueryEscape(evalArgs(args))
|
||||
}
|
||||
|
||||
// evalArgs formats the list of arguments into a string. It is therefore equivalent to
|
||||
// fmt.Sprint(args...)
|
||||
// except that each argument is indirected (if a pointer), as required,
|
||||
// using the same rules as the default string evaluation during template
|
||||
// execution.
|
||||
func evalArgs(args []interface{}) string {
|
||||
ok := false
|
||||
var s string
|
||||
// Fast path for simple common case.
|
||||
if len(args) == 1 {
|
||||
s, ok = args[0].(string)
|
||||
}
|
||||
if !ok {
|
||||
for i, arg := range args {
|
||||
a, ok := printableValue(reflect.ValueOf(arg))
|
||||
if ok {
|
||||
args[i] = a
|
||||
} // else left fmt do its thing
|
||||
}
|
||||
s = fmt.Sprint(args...)
|
||||
}
|
||||
return s
|
||||
}
|
108
vendor/github.com/alecthomas/template/helper.go
generated
vendored
Normal file
108
vendor/github.com/alecthomas/template/helper.go
generated
vendored
Normal file
@ -0,0 +1,108 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Helper functions to make constructing templates easier.
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// Functions and methods to parse templates.
|
||||
|
||||
// Must is a helper that wraps a call to a function returning (*Template, error)
|
||||
// and panics if the error is non-nil. It is intended for use in variable
|
||||
// initializations such as
|
||||
// var t = template.Must(template.New("name").Parse("text"))
|
||||
func Must(t *Template, err error) *Template {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// ParseFiles creates a new Template and parses the template definitions from
|
||||
// the named files. The returned template's name will have the (base) name and
|
||||
// (parsed) contents of the first file. There must be at least one file.
|
||||
// If an error occurs, parsing stops and the returned *Template is nil.
|
||||
func ParseFiles(filenames ...string) (*Template, error) {
|
||||
return parseFiles(nil, filenames...)
|
||||
}
|
||||
|
||||
// ParseFiles parses the named files and associates the resulting templates with
|
||||
// t. If an error occurs, parsing stops and the returned template is nil;
|
||||
// otherwise it is t. There must be at least one file.
|
||||
func (t *Template) ParseFiles(filenames ...string) (*Template, error) {
|
||||
return parseFiles(t, filenames...)
|
||||
}
|
||||
|
||||
// parseFiles is the helper for the method and function. If the argument
|
||||
// template is nil, it is created from the first file.
|
||||
func parseFiles(t *Template, filenames ...string) (*Template, error) {
|
||||
if len(filenames) == 0 {
|
||||
// Not really a problem, but be consistent.
|
||||
return nil, fmt.Errorf("template: no files named in call to ParseFiles")
|
||||
}
|
||||
for _, filename := range filenames {
|
||||
b, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := string(b)
|
||||
name := filepath.Base(filename)
|
||||
// First template becomes return value if not already defined,
|
||||
// and we use that one for subsequent New calls to associate
|
||||
// all the templates together. Also, if this file has the same name
|
||||
// as t, this file becomes the contents of t, so
|
||||
// t, err := New(name).Funcs(xxx).ParseFiles(name)
|
||||
// works. Otherwise we create a new template associated with t.
|
||||
var tmpl *Template
|
||||
if t == nil {
|
||||
t = New(name)
|
||||
}
|
||||
if name == t.Name() {
|
||||
tmpl = t
|
||||
} else {
|
||||
tmpl = t.New(name)
|
||||
}
|
||||
_, err = tmpl.Parse(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// ParseGlob creates a new Template and parses the template definitions from the
|
||||
// files identified by the pattern, which must match at least one file. The
|
||||
// returned template will have the (base) name and (parsed) contents of the
|
||||
// first file matched by the pattern. ParseGlob is equivalent to calling
|
||||
// ParseFiles with the list of files matched by the pattern.
|
||||
func ParseGlob(pattern string) (*Template, error) {
|
||||
return parseGlob(nil, pattern)
|
||||
}
|
||||
|
||||
// ParseGlob parses the template definitions in the files identified by the
|
||||
// pattern and associates the resulting templates with t. The pattern is
|
||||
// processed by filepath.Glob and must match at least one file. ParseGlob is
|
||||
// equivalent to calling t.ParseFiles with the list of files matched by the
|
||||
// pattern.
|
||||
func (t *Template) ParseGlob(pattern string) (*Template, error) {
|
||||
return parseGlob(t, pattern)
|
||||
}
|
||||
|
||||
// parseGlob is the implementation of the function and method ParseGlob.
|
||||
func parseGlob(t *Template, pattern string) (*Template, error) {
|
||||
filenames, err := filepath.Glob(pattern)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(filenames) == 0 {
|
||||
return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern)
|
||||
}
|
||||
return parseFiles(t, filenames...)
|
||||
}
|
556
vendor/github.com/alecthomas/template/parse/lex.go
generated
vendored
Normal file
556
vendor/github.com/alecthomas/template/parse/lex.go
generated
vendored
Normal file
@ -0,0 +1,556 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package parse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// item represents a token or text string returned from the scanner.
|
||||
type item struct {
|
||||
typ itemType // The type of this item.
|
||||
pos Pos // The starting position, in bytes, of this item in the input string.
|
||||
val string // The value of this item.
|
||||
}
|
||||
|
||||
func (i item) String() string {
|
||||
switch {
|
||||
case i.typ == itemEOF:
|
||||
return "EOF"
|
||||
case i.typ == itemError:
|
||||
return i.val
|
||||
case i.typ > itemKeyword:
|
||||
return fmt.Sprintf("<%s>", i.val)
|
||||
case len(i.val) > 10:
|
||||
return fmt.Sprintf("%.10q...", i.val)
|
||||
}
|
||||
return fmt.Sprintf("%q", i.val)
|
||||
}
|
||||
|
||||
// itemType identifies the type of lex items.
|
||||
type itemType int
|
||||
|
||||
const (
|
||||
itemError itemType = iota // error occurred; value is text of error
|
||||
itemBool // boolean constant
|
||||
itemChar // printable ASCII character; grab bag for comma etc.
|
||||
itemCharConstant // character constant
|
||||
itemComplex // complex constant (1+2i); imaginary is just a number
|
||||
itemColonEquals // colon-equals (':=') introducing a declaration
|
||||
itemEOF
|
||||
itemField // alphanumeric identifier starting with '.'
|
||||
itemIdentifier // alphanumeric identifier not starting with '.'
|
||||
itemLeftDelim // left action delimiter
|
||||
itemLeftParen // '(' inside action
|
||||
itemNumber // simple number, including imaginary
|
||||
itemPipe // pipe symbol
|
||||
itemRawString // raw quoted string (includes quotes)
|
||||
itemRightDelim // right action delimiter
|
||||
itemElideNewline // elide newline after right delim
|
||||
itemRightParen // ')' inside action
|
||||
itemSpace // run of spaces separating arguments
|
||||
itemString // quoted string (includes quotes)
|
||||
itemText // plain text
|
||||
itemVariable // variable starting with '$', such as '$' or '$1' or '$hello'
|
||||
// Keywords appear after all the rest.
|
||||
itemKeyword // used only to delimit the keywords
|
||||
itemDot // the cursor, spelled '.'
|
||||
itemDefine // define keyword
|
||||
itemElse // else keyword
|
||||
itemEnd // end keyword
|
||||
itemIf // if keyword
|
||||
itemNil // the untyped nil constant, easiest to treat as a keyword
|
||||
itemRange // range keyword
|
||||
itemTemplate // template keyword
|
||||
itemWith // with keyword
|
||||
)
|
||||
|
||||
var key = map[string]itemType{
|
||||
".": itemDot,
|
||||
"define": itemDefine,
|
||||
"else": itemElse,
|
||||
"end": itemEnd,
|
||||
"if": itemIf,
|
||||
"range": itemRange,
|
||||
"nil": itemNil,
|
||||
"template": itemTemplate,
|
||||
"with": itemWith,
|
||||
}
|
||||
|
||||
const eof = -1
|
||||
|
||||
// stateFn represents the state of the scanner as a function that returns the next state.
|
||||
type stateFn func(*lexer) stateFn
|
||||
|
||||
// lexer holds the state of the scanner.
|
||||
type lexer struct {
|
||||
name string // the name of the input; used only for error reports
|
||||
input string // the string being scanned
|
||||
leftDelim string // start of action
|
||||
rightDelim string // end of action
|
||||
state stateFn // the next lexing function to enter
|
||||
pos Pos // current position in the input
|
||||
start Pos // start position of this item
|
||||
width Pos // width of last rune read from input
|
||||
lastPos Pos // position of most recent item returned by nextItem
|
||||
items chan item // channel of scanned items
|
||||
parenDepth int // nesting depth of ( ) exprs
|
||||
}
|
||||
|
||||
// next returns the next rune in the input.
|
||||
func (l *lexer) next() rune {
|
||||
if int(l.pos) >= len(l.input) {
|
||||
l.width = 0
|
||||
return eof
|
||||
}
|
||||
r, w := utf8.DecodeRuneInString(l.input[l.pos:])
|
||||
l.width = Pos(w)
|
||||
l.pos += l.width
|
||||
return r
|
||||
}
|
||||
|
||||
// peek returns but does not consume the next rune in the input.
|
||||
func (l *lexer) peek() rune {
|
||||
r := l.next()
|
||||
l.backup()
|
||||
return r
|
||||
}
|
||||
|
||||
// backup steps back one rune. Can only be called once per call of next.
|
||||
func (l *lexer) backup() {
|
||||
l.pos -= l.width
|
||||
}
|
||||
|
||||
// emit passes an item back to the client.
|
||||
func (l *lexer) emit(t itemType) {
|
||||
l.items <- item{t, l.start, l.input[l.start:l.pos]}
|
||||
l.start = l.pos
|
||||
}
|
||||
|
||||
// ignore skips over the pending input before this point.
|
||||
func (l *lexer) ignore() {
|
||||
l.start = l.pos
|
||||
}
|
||||
|
||||
// accept consumes the next rune if it's from the valid set.
|
||||
func (l *lexer) accept(valid string) bool {
|
||||
if strings.IndexRune(valid, l.next()) >= 0 {
|
||||
return true
|
||||
}
|
||||
l.backup()
|
||||
return false
|
||||
}
|
||||
|
||||
// acceptRun consumes a run of runes from the valid set.
|
||||
func (l *lexer) acceptRun(valid string) {
|
||||
for strings.IndexRune(valid, l.next()) >= 0 {
|
||||
}
|
||||
l.backup()
|
||||
}
|
||||
|
||||
// lineNumber reports which line we're on, based on the position of
|
||||
// the previous item returned by nextItem. Doing it this way
|
||||
// means we don't have to worry about peek double counting.
|
||||
func (l *lexer) lineNumber() int {
|
||||
return 1 + strings.Count(l.input[:l.lastPos], "\n")
|
||||
}
|
||||
|
||||
// errorf returns an error token and terminates the scan by passing
|
||||
// back a nil pointer that will be the next state, terminating l.nextItem.
|
||||
func (l *lexer) errorf(format string, args ...interface{}) stateFn {
|
||||
l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)}
|
||||
return nil
|
||||
}
|
||||
|
||||
// nextItem returns the next item from the input.
|
||||
func (l *lexer) nextItem() item {
|
||||
item := <-l.items
|
||||
l.lastPos = item.pos
|
||||
return item
|
||||
}
|
||||
|
||||
// lex creates a new scanner for the input string.
|
||||
func lex(name, input, left, right string) *lexer {
|
||||
if left == "" {
|
||||
left = leftDelim
|
||||
}
|
||||
if right == "" {
|
||||
right = rightDelim
|
||||
}
|
||||
l := &lexer{
|
||||
name: name,
|
||||
input: input,
|
||||
leftDelim: left,
|
||||
rightDelim: right,
|
||||
items: make(chan item),
|
||||
}
|
||||
go l.run()
|
||||
return l
|
||||
}
|
||||
|
||||
// run runs the state machine for the lexer.
|
||||
func (l *lexer) run() {
|
||||
for l.state = lexText; l.state != nil; {
|
||||
l.state = l.state(l)
|
||||
}
|
||||
}
|
||||
|
||||
// state functions
|
||||
|
||||
const (
|
||||
leftDelim = "{{"
|
||||
rightDelim = "}}"
|
||||
leftComment = "/*"
|
||||
rightComment = "*/"
|
||||
)
|
||||
|
||||
// lexText scans until an opening action delimiter, "{{".
|
||||
func lexText(l *lexer) stateFn {
|
||||
for {
|
||||
if strings.HasPrefix(l.input[l.pos:], l.leftDelim) {
|
||||
if l.pos > l.start {
|
||||
l.emit(itemText)
|
||||
}
|
||||
return lexLeftDelim
|
||||
}
|
||||
if l.next() == eof {
|
||||
break
|
||||
}
|
||||
}
|
||||
// Correctly reached EOF.
|
||||
if l.pos > l.start {
|
||||
l.emit(itemText)
|
||||
}
|
||||
l.emit(itemEOF)
|
||||
return nil
|
||||
}
|
||||
|
||||
// lexLeftDelim scans the left delimiter, which is known to be present.
|
||||
func lexLeftDelim(l *lexer) stateFn {
|
||||
l.pos += Pos(len(l.leftDelim))
|
||||
if strings.HasPrefix(l.input[l.pos:], leftComment) {
|
||||
return lexComment
|
||||
}
|
||||
l.emit(itemLeftDelim)
|
||||
l.parenDepth = 0
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexComment scans a comment. The left comment marker is known to be present.
|
||||
func lexComment(l *lexer) stateFn {
|
||||
l.pos += Pos(len(leftComment))
|
||||
i := strings.Index(l.input[l.pos:], rightComment)
|
||||
if i < 0 {
|
||||
return l.errorf("unclosed comment")
|
||||
}
|
||||
l.pos += Pos(i + len(rightComment))
|
||||
if !strings.HasPrefix(l.input[l.pos:], l.rightDelim) {
|
||||
return l.errorf("comment ends before closing delimiter")
|
||||
|
||||
}
|
||||
l.pos += Pos(len(l.rightDelim))
|
||||
l.ignore()
|
||||
return lexText
|
||||
}
|
||||
|
||||
// lexRightDelim scans the right delimiter, which is known to be present.
|
||||
func lexRightDelim(l *lexer) stateFn {
|
||||
l.pos += Pos(len(l.rightDelim))
|
||||
l.emit(itemRightDelim)
|
||||
if l.peek() == '\\' {
|
||||
l.pos++
|
||||
l.emit(itemElideNewline)
|
||||
}
|
||||
return lexText
|
||||
}
|
||||
|
||||
// lexInsideAction scans the elements inside action delimiters.
|
||||
func lexInsideAction(l *lexer) stateFn {
|
||||
// Either number, quoted string, or identifier.
|
||||
// Spaces separate arguments; runs of spaces turn into itemSpace.
|
||||
// Pipe symbols separate and are emitted.
|
||||
if strings.HasPrefix(l.input[l.pos:], l.rightDelim+"\\") || strings.HasPrefix(l.input[l.pos:], l.rightDelim) {
|
||||
if l.parenDepth == 0 {
|
||||
return lexRightDelim
|
||||
}
|
||||
return l.errorf("unclosed left paren")
|
||||
}
|
||||
switch r := l.next(); {
|
||||
case r == eof || isEndOfLine(r):
|
||||
return l.errorf("unclosed action")
|
||||
case isSpace(r):
|
||||
return lexSpace
|
||||
case r == ':':
|
||||
if l.next() != '=' {
|
||||
return l.errorf("expected :=")
|
||||
}
|
||||
l.emit(itemColonEquals)
|
||||
case r == '|':
|
||||
l.emit(itemPipe)
|
||||
case r == '"':
|
||||
return lexQuote
|
||||
case r == '`':
|
||||
return lexRawQuote
|
||||
case r == '$':
|
||||
return lexVariable
|
||||
case r == '\'':
|
||||
return lexChar
|
||||
case r == '.':
|
||||
// special look-ahead for ".field" so we don't break l.backup().
|
||||
if l.pos < Pos(len(l.input)) {
|
||||
r := l.input[l.pos]
|
||||
if r < '0' || '9' < r {
|
||||
return lexField
|
||||
}
|
||||
}
|
||||
fallthrough // '.' can start a number.
|
||||
case r == '+' || r == '-' || ('0' <= r && r <= '9'):
|
||||
l.backup()
|
||||
return lexNumber
|
||||
case isAlphaNumeric(r):
|
||||
l.backup()
|
||||
return lexIdentifier
|
||||
case r == '(':
|
||||
l.emit(itemLeftParen)
|
||||
l.parenDepth++
|
||||
return lexInsideAction
|
||||
case r == ')':
|
||||
l.emit(itemRightParen)
|
||||
l.parenDepth--
|
||||
if l.parenDepth < 0 {
|
||||
return l.errorf("unexpected right paren %#U", r)
|
||||
}
|
||||
return lexInsideAction
|
||||
case r <= unicode.MaxASCII && unicode.IsPrint(r):
|
||||
l.emit(itemChar)
|
||||
return lexInsideAction
|
||||
default:
|
||||
return l.errorf("unrecognized character in action: %#U", r)
|
||||
}
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexSpace scans a run of space characters.
|
||||
// One space has already been seen.
|
||||
func lexSpace(l *lexer) stateFn {
|
||||
for isSpace(l.peek()) {
|
||||
l.next()
|
||||
}
|
||||
l.emit(itemSpace)
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexIdentifier scans an alphanumeric.
|
||||
func lexIdentifier(l *lexer) stateFn {
|
||||
Loop:
|
||||
for {
|
||||
switch r := l.next(); {
|
||||
case isAlphaNumeric(r):
|
||||
// absorb.
|
||||
default:
|
||||
l.backup()
|
||||
word := l.input[l.start:l.pos]
|
||||
if !l.atTerminator() {
|
||||
return l.errorf("bad character %#U", r)
|
||||
}
|
||||
switch {
|
||||
case key[word] > itemKeyword:
|
||||
l.emit(key[word])
|
||||
case word[0] == '.':
|
||||
l.emit(itemField)
|
||||
case word == "true", word == "false":
|
||||
l.emit(itemBool)
|
||||
default:
|
||||
l.emit(itemIdentifier)
|
||||
}
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexField scans a field: .Alphanumeric.
|
||||
// The . has been scanned.
|
||||
func lexField(l *lexer) stateFn {
|
||||
return lexFieldOrVariable(l, itemField)
|
||||
}
|
||||
|
||||
// lexVariable scans a Variable: $Alphanumeric.
|
||||
// The $ has been scanned.
|
||||
func lexVariable(l *lexer) stateFn {
|
||||
if l.atTerminator() { // Nothing interesting follows -> "$".
|
||||
l.emit(itemVariable)
|
||||
return lexInsideAction
|
||||
}
|
||||
return lexFieldOrVariable(l, itemVariable)
|
||||
}
|
||||
|
||||
// lexVariable scans a field or variable: [.$]Alphanumeric.
|
||||
// The . or $ has been scanned.
|
||||
func lexFieldOrVariable(l *lexer, typ itemType) stateFn {
|
||||
if l.atTerminator() { // Nothing interesting follows -> "." or "$".
|
||||
if typ == itemVariable {
|
||||
l.emit(itemVariable)
|
||||
} else {
|
||||
l.emit(itemDot)
|
||||
}
|
||||
return lexInsideAction
|
||||
}
|
||||
var r rune
|
||||
for {
|
||||
r = l.next()
|
||||
if !isAlphaNumeric(r) {
|
||||
l.backup()
|
||||
break
|
||||
}
|
||||
}
|
||||
if !l.atTerminator() {
|
||||
return l.errorf("bad character %#U", r)
|
||||
}
|
||||
l.emit(typ)
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// atTerminator reports whether the input is at valid termination character to
|
||||
// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases
|
||||
// like "$x+2" not being acceptable without a space, in case we decide one
|
||||
// day to implement arithmetic.
|
||||
func (l *lexer) atTerminator() bool {
|
||||
r := l.peek()
|
||||
if isSpace(r) || isEndOfLine(r) {
|
||||
return true
|
||||
}
|
||||
switch r {
|
||||
case eof, '.', ',', '|', ':', ')', '(':
|
||||
return true
|
||||
}
|
||||
// Does r start the delimiter? This can be ambiguous (with delim=="//", $x/2 will
|
||||
// succeed but should fail) but only in extremely rare cases caused by willfully
|
||||
// bad choice of delimiter.
|
||||
if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// lexChar scans a character constant. The initial quote is already
|
||||
// scanned. Syntax checking is done by the parser.
|
||||
func lexChar(l *lexer) stateFn {
|
||||
Loop:
|
||||
for {
|
||||
switch l.next() {
|
||||
case '\\':
|
||||
if r := l.next(); r != eof && r != '\n' {
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
case eof, '\n':
|
||||
return l.errorf("unterminated character constant")
|
||||
case '\'':
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
l.emit(itemCharConstant)
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This
|
||||
// isn't a perfect number scanner - for instance it accepts "." and "0x0.2"
|
||||
// and "089" - but when it's wrong the input is invalid and the parser (via
|
||||
// strconv) will notice.
|
||||
func lexNumber(l *lexer) stateFn {
|
||||
if !l.scanNumber() {
|
||||
return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
|
||||
}
|
||||
if sign := l.peek(); sign == '+' || sign == '-' {
|
||||
// Complex: 1+2i. No spaces, must end in 'i'.
|
||||
if !l.scanNumber() || l.input[l.pos-1] != 'i' {
|
||||
return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
|
||||
}
|
||||
l.emit(itemComplex)
|
||||
} else {
|
||||
l.emit(itemNumber)
|
||||
}
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
func (l *lexer) scanNumber() bool {
|
||||
// Optional leading sign.
|
||||
l.accept("+-")
|
||||
// Is it hex?
|
||||
digits := "0123456789"
|
||||
if l.accept("0") && l.accept("xX") {
|
||||
digits = "0123456789abcdefABCDEF"
|
||||
}
|
||||
l.acceptRun(digits)
|
||||
if l.accept(".") {
|
||||
l.acceptRun(digits)
|
||||
}
|
||||
if l.accept("eE") {
|
||||
l.accept("+-")
|
||||
l.acceptRun("0123456789")
|
||||
}
|
||||
// Is it imaginary?
|
||||
l.accept("i")
|
||||
// Next thing mustn't be alphanumeric.
|
||||
if isAlphaNumeric(l.peek()) {
|
||||
l.next()
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// lexQuote scans a quoted string.
|
||||
func lexQuote(l *lexer) stateFn {
|
||||
Loop:
|
||||
for {
|
||||
switch l.next() {
|
||||
case '\\':
|
||||
if r := l.next(); r != eof && r != '\n' {
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
case eof, '\n':
|
||||
return l.errorf("unterminated quoted string")
|
||||
case '"':
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
l.emit(itemString)
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexRawQuote scans a raw quoted string.
|
||||
func lexRawQuote(l *lexer) stateFn {
|
||||
Loop:
|
||||
for {
|
||||
switch l.next() {
|
||||
case eof, '\n':
|
||||
return l.errorf("unterminated raw quoted string")
|
||||
case '`':
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
l.emit(itemRawString)
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// isSpace reports whether r is a space character.
|
||||
func isSpace(r rune) bool {
|
||||
return r == ' ' || r == '\t'
|
||||
}
|
||||
|
||||
// isEndOfLine reports whether r is an end-of-line character.
|
||||
func isEndOfLine(r rune) bool {
|
||||
return r == '\r' || r == '\n'
|
||||
}
|
||||
|
||||
// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
|
||||
func isAlphaNumeric(r rune) bool {
|
||||
return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
|
||||
}
|
834
vendor/github.com/alecthomas/template/parse/node.go
generated
vendored
Normal file
834
vendor/github.com/alecthomas/template/parse/node.go
generated
vendored
Normal file
@ -0,0 +1,834 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Parse nodes.
|
||||
|
||||
package parse
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var textFormat = "%s" // Changed to "%q" in tests for better error messages.
|
||||
|
||||
// A Node is an element in the parse tree. The interface is trivial.
|
||||
// The interface contains an unexported method so that only
|
||||
// types local to this package can satisfy it.
|
||||
type Node interface {
|
||||
Type() NodeType
|
||||
String() string
|
||||
// Copy does a deep copy of the Node and all its components.
|
||||
// To avoid type assertions, some XxxNodes also have specialized
|
||||
// CopyXxx methods that return *XxxNode.
|
||||
Copy() Node
|
||||
Position() Pos // byte position of start of node in full original input string
|
||||
// tree returns the containing *Tree.
|
||||
// It is unexported so all implementations of Node are in this package.
|
||||
tree() *Tree
|
||||
}
|
||||
|
||||
// NodeType identifies the type of a parse tree node.
|
||||
type NodeType int
|
||||
|
||||
// Pos represents a byte position in the original input text from which
|
||||
// this template was parsed.
|
||||
type Pos int
|
||||
|
||||
func (p Pos) Position() Pos {
|
||||
return p
|
||||
}
|
||||
|
||||
// Type returns itself and provides an easy default implementation
|
||||
// for embedding in a Node. Embedded in all non-trivial Nodes.
|
||||
func (t NodeType) Type() NodeType {
|
||||
return t
|
||||
}
|
||||
|
||||
const (
|
||||
NodeText NodeType = iota // Plain text.
|
||||
NodeAction // A non-control action such as a field evaluation.
|
||||
NodeBool // A boolean constant.
|
||||
NodeChain // A sequence of field accesses.
|
||||
NodeCommand // An element of a pipeline.
|
||||
NodeDot // The cursor, dot.
|
||||
nodeElse // An else action. Not added to tree.
|
||||
nodeEnd // An end action. Not added to tree.
|
||||
NodeField // A field or method name.
|
||||
NodeIdentifier // An identifier; always a function name.
|
||||
NodeIf // An if action.
|
||||
NodeList // A list of Nodes.
|
||||
NodeNil // An untyped nil constant.
|
||||
NodeNumber // A numerical constant.
|
||||
NodePipe // A pipeline of commands.
|
||||
NodeRange // A range action.
|
||||
NodeString // A string constant.
|
||||
NodeTemplate // A template invocation action.
|
||||
NodeVariable // A $ variable.
|
||||
NodeWith // A with action.
|
||||
)
|
||||
|
||||
// Nodes.
|
||||
|
||||
// ListNode holds a sequence of nodes.
|
||||
type ListNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Nodes []Node // The element nodes in lexical order.
|
||||
}
|
||||
|
||||
func (t *Tree) newList(pos Pos) *ListNode {
|
||||
return &ListNode{tr: t, NodeType: NodeList, Pos: pos}
|
||||
}
|
||||
|
||||
func (l *ListNode) append(n Node) {
|
||||
l.Nodes = append(l.Nodes, n)
|
||||
}
|
||||
|
||||
func (l *ListNode) tree() *Tree {
|
||||
return l.tr
|
||||
}
|
||||
|
||||
func (l *ListNode) String() string {
|
||||
b := new(bytes.Buffer)
|
||||
for _, n := range l.Nodes {
|
||||
fmt.Fprint(b, n)
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (l *ListNode) CopyList() *ListNode {
|
||||
if l == nil {
|
||||
return l
|
||||
}
|
||||
n := l.tr.newList(l.Pos)
|
||||
for _, elem := range l.Nodes {
|
||||
n.append(elem.Copy())
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (l *ListNode) Copy() Node {
|
||||
return l.CopyList()
|
||||
}
|
||||
|
||||
// TextNode holds plain text.
|
||||
type TextNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Text []byte // The text; may span newlines.
|
||||
}
|
||||
|
||||
func (t *Tree) newText(pos Pos, text string) *TextNode {
|
||||
return &TextNode{tr: t, NodeType: NodeText, Pos: pos, Text: []byte(text)}
|
||||
}
|
||||
|
||||
func (t *TextNode) String() string {
|
||||
return fmt.Sprintf(textFormat, t.Text)
|
||||
}
|
||||
|
||||
func (t *TextNode) tree() *Tree {
|
||||
return t.tr
|
||||
}
|
||||
|
||||
func (t *TextNode) Copy() Node {
|
||||
return &TextNode{tr: t.tr, NodeType: NodeText, Pos: t.Pos, Text: append([]byte{}, t.Text...)}
|
||||
}
|
||||
|
||||
// PipeNode holds a pipeline with optional declaration
|
||||
type PipeNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Line int // The line number in the input (deprecated; kept for compatibility)
|
||||
Decl []*VariableNode // Variable declarations in lexical order.
|
||||
Cmds []*CommandNode // The commands in lexical order.
|
||||
}
|
||||
|
||||
func (t *Tree) newPipeline(pos Pos, line int, decl []*VariableNode) *PipeNode {
|
||||
return &PipeNode{tr: t, NodeType: NodePipe, Pos: pos, Line: line, Decl: decl}
|
||||
}
|
||||
|
||||
func (p *PipeNode) append(command *CommandNode) {
|
||||
p.Cmds = append(p.Cmds, command)
|
||||
}
|
||||
|
||||
func (p *PipeNode) String() string {
|
||||
s := ""
|
||||
if len(p.Decl) > 0 {
|
||||
for i, v := range p.Decl {
|
||||
if i > 0 {
|
||||
s += ", "
|
||||
}
|
||||
s += v.String()
|
||||
}
|
||||
s += " := "
|
||||
}
|
||||
for i, c := range p.Cmds {
|
||||
if i > 0 {
|
||||
s += " | "
|
||||
}
|
||||
s += c.String()
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (p *PipeNode) tree() *Tree {
|
||||
return p.tr
|
||||
}
|
||||
|
||||
func (p *PipeNode) CopyPipe() *PipeNode {
|
||||
if p == nil {
|
||||
return p
|
||||
}
|
||||
var decl []*VariableNode
|
||||
for _, d := range p.Decl {
|
||||
decl = append(decl, d.Copy().(*VariableNode))
|
||||
}
|
||||
n := p.tr.newPipeline(p.Pos, p.Line, decl)
|
||||
for _, c := range p.Cmds {
|
||||
n.append(c.Copy().(*CommandNode))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *PipeNode) Copy() Node {
|
||||
return p.CopyPipe()
|
||||
}
|
||||
|
||||
// ActionNode holds an action (something bounded by delimiters).
|
||||
// Control actions have their own nodes; ActionNode represents simple
|
||||
// ones such as field evaluations and parenthesized pipelines.
|
||||
type ActionNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Line int // The line number in the input (deprecated; kept for compatibility)
|
||||
Pipe *PipeNode // The pipeline in the action.
|
||||
}
|
||||
|
||||
func (t *Tree) newAction(pos Pos, line int, pipe *PipeNode) *ActionNode {
|
||||
return &ActionNode{tr: t, NodeType: NodeAction, Pos: pos, Line: line, Pipe: pipe}
|
||||
}
|
||||
|
||||
func (a *ActionNode) String() string {
|
||||
return fmt.Sprintf("{{%s}}", a.Pipe)
|
||||
|
||||
}
|
||||
|
||||
func (a *ActionNode) tree() *Tree {
|
||||
return a.tr
|
||||
}
|
||||
|
||||
func (a *ActionNode) Copy() Node {
|
||||
return a.tr.newAction(a.Pos, a.Line, a.Pipe.CopyPipe())
|
||||
|
||||
}
|
||||
|
||||
// CommandNode holds a command (a pipeline inside an evaluating action).
|
||||
type CommandNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Args []Node // Arguments in lexical order: Identifier, field, or constant.
|
||||
}
|
||||
|
||||
func (t *Tree) newCommand(pos Pos) *CommandNode {
|
||||
return &CommandNode{tr: t, NodeType: NodeCommand, Pos: pos}
|
||||
}
|
||||
|
||||
func (c *CommandNode) append(arg Node) {
|
||||
c.Args = append(c.Args, arg)
|
||||
}
|
||||
|
||||
func (c *CommandNode) String() string {
|
||||
s := ""
|
||||
for i, arg := range c.Args {
|
||||
if i > 0 {
|
||||
s += " "
|
||||
}
|
||||
if arg, ok := arg.(*PipeNode); ok {
|
||||
s += "(" + arg.String() + ")"
|
||||
continue
|
||||
}
|
||||
s += arg.String()
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (c *CommandNode) tree() *Tree {
|
||||
return c.tr
|
||||
}
|
||||
|
||||
func (c *CommandNode) Copy() Node {
|
||||
if c == nil {
|
||||
return c
|
||||
}
|
||||
n := c.tr.newCommand(c.Pos)
|
||||
for _, c := range c.Args {
|
||||
n.append(c.Copy())
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// IdentifierNode holds an identifier.
|
||||
type IdentifierNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Ident string // The identifier's name.
|
||||
}
|
||||
|
||||
// NewIdentifier returns a new IdentifierNode with the given identifier name.
|
||||
func NewIdentifier(ident string) *IdentifierNode {
|
||||
return &IdentifierNode{NodeType: NodeIdentifier, Ident: ident}
|
||||
}
|
||||
|
||||
// SetPos sets the position. NewIdentifier is a public method so we can't modify its signature.
|
||||
// Chained for convenience.
|
||||
// TODO: fix one day?
|
||||
func (i *IdentifierNode) SetPos(pos Pos) *IdentifierNode {
|
||||
i.Pos = pos
|
||||
return i
|
||||
}
|
||||
|
||||
// SetTree sets the parent tree for the node. NewIdentifier is a public method so we can't modify its signature.
|
||||
// Chained for convenience.
|
||||
// TODO: fix one day?
|
||||
func (i *IdentifierNode) SetTree(t *Tree) *IdentifierNode {
|
||||
i.tr = t
|
||||
return i
|
||||
}
|
||||
|
||||
func (i *IdentifierNode) String() string {
|
||||
return i.Ident
|
||||
}
|
||||
|
||||
func (i *IdentifierNode) tree() *Tree {
|
||||
return i.tr
|
||||
}
|
||||
|
||||
func (i *IdentifierNode) Copy() Node {
|
||||
return NewIdentifier(i.Ident).SetTree(i.tr).SetPos(i.Pos)
|
||||
}
|
||||
|
||||
// VariableNode holds a list of variable names, possibly with chained field
|
||||
// accesses. The dollar sign is part of the (first) name.
|
||||
type VariableNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Ident []string // Variable name and fields in lexical order.
|
||||
}
|
||||
|
||||
func (t *Tree) newVariable(pos Pos, ident string) *VariableNode {
|
||||
return &VariableNode{tr: t, NodeType: NodeVariable, Pos: pos, Ident: strings.Split(ident, ".")}
|
||||
}
|
||||
|
||||
func (v *VariableNode) String() string {
|
||||
s := ""
|
||||
for i, id := range v.Ident {
|
||||
if i > 0 {
|
||||
s += "."
|
||||
}
|
||||
s += id
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (v *VariableNode) tree() *Tree {
|
||||
return v.tr
|
||||
}
|
||||
|
||||
func (v *VariableNode) Copy() Node {
|
||||
return &VariableNode{tr: v.tr, NodeType: NodeVariable, Pos: v.Pos, Ident: append([]string{}, v.Ident...)}
|
||||
}
|
||||
|
||||
// DotNode holds the special identifier '.'.
|
||||
type DotNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
}
|
||||
|
||||
func (t *Tree) newDot(pos Pos) *DotNode {
|
||||
return &DotNode{tr: t, NodeType: NodeDot, Pos: pos}
|
||||
}
|
||||
|
||||
func (d *DotNode) Type() NodeType {
|
||||
// Override method on embedded NodeType for API compatibility.
|
||||
// TODO: Not really a problem; could change API without effect but
|
||||
// api tool complains.
|
||||
return NodeDot
|
||||
}
|
||||
|
||||
func (d *DotNode) String() string {
|
||||
return "."
|
||||
}
|
||||
|
||||
func (d *DotNode) tree() *Tree {
|
||||
return d.tr
|
||||
}
|
||||
|
||||
func (d *DotNode) Copy() Node {
|
||||
return d.tr.newDot(d.Pos)
|
||||
}
|
||||
|
||||
// NilNode holds the special identifier 'nil' representing an untyped nil constant.
|
||||
type NilNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
}
|
||||
|
||||
func (t *Tree) newNil(pos Pos) *NilNode {
|
||||
return &NilNode{tr: t, NodeType: NodeNil, Pos: pos}
|
||||
}
|
||||
|
||||
func (n *NilNode) Type() NodeType {
|
||||
// Override method on embedded NodeType for API compatibility.
|
||||
// TODO: Not really a problem; could change API without effect but
|
||||
// api tool complains.
|
||||
return NodeNil
|
||||
}
|
||||
|
||||
func (n *NilNode) String() string {
|
||||
return "nil"
|
||||
}
|
||||
|
||||
func (n *NilNode) tree() *Tree {
|
||||
return n.tr
|
||||
}
|
||||
|
||||
func (n *NilNode) Copy() Node {
|
||||
return n.tr.newNil(n.Pos)
|
||||
}
|
||||
|
||||
// FieldNode holds a field (identifier starting with '.').
|
||||
// The names may be chained ('.x.y').
|
||||
// The period is dropped from each ident.
|
||||
type FieldNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Ident []string // The identifiers in lexical order.
|
||||
}
|
||||
|
||||
func (t *Tree) newField(pos Pos, ident string) *FieldNode {
|
||||
return &FieldNode{tr: t, NodeType: NodeField, Pos: pos, Ident: strings.Split(ident[1:], ".")} // [1:] to drop leading period
|
||||
}
|
||||
|
||||
func (f *FieldNode) String() string {
|
||||
s := ""
|
||||
for _, id := range f.Ident {
|
||||
s += "." + id
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (f *FieldNode) tree() *Tree {
|
||||
return f.tr
|
||||
}
|
||||
|
||||
func (f *FieldNode) Copy() Node {
|
||||
return &FieldNode{tr: f.tr, NodeType: NodeField, Pos: f.Pos, Ident: append([]string{}, f.Ident...)}
|
||||
}
|
||||
|
||||
// ChainNode holds a term followed by a chain of field accesses (identifier starting with '.').
|
||||
// The names may be chained ('.x.y').
|
||||
// The periods are dropped from each ident.
|
||||
type ChainNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Node Node
|
||||
Field []string // The identifiers in lexical order.
|
||||
}
|
||||
|
||||
func (t *Tree) newChain(pos Pos, node Node) *ChainNode {
|
||||
return &ChainNode{tr: t, NodeType: NodeChain, Pos: pos, Node: node}
|
||||
}
|
||||
|
||||
// Add adds the named field (which should start with a period) to the end of the chain.
|
||||
func (c *ChainNode) Add(field string) {
|
||||
if len(field) == 0 || field[0] != '.' {
|
||||
panic("no dot in field")
|
||||
}
|
||||
field = field[1:] // Remove leading dot.
|
||||
if field == "" {
|
||||
panic("empty field")
|
||||
}
|
||||
c.Field = append(c.Field, field)
|
||||
}
|
||||
|
||||
func (c *ChainNode) String() string {
|
||||
s := c.Node.String()
|
||||
if _, ok := c.Node.(*PipeNode); ok {
|
||||
s = "(" + s + ")"
|
||||
}
|
||||
for _, field := range c.Field {
|
||||
s += "." + field
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (c *ChainNode) tree() *Tree {
|
||||
return c.tr
|
||||
}
|
||||
|
||||
func (c *ChainNode) Copy() Node {
|
||||
return &ChainNode{tr: c.tr, NodeType: NodeChain, Pos: c.Pos, Node: c.Node, Field: append([]string{}, c.Field...)}
|
||||
}
|
||||
|
||||
// BoolNode holds a boolean constant.
|
||||
type BoolNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
True bool // The value of the boolean constant.
|
||||
}
|
||||
|
||||
func (t *Tree) newBool(pos Pos, true bool) *BoolNode {
|
||||
return &BoolNode{tr: t, NodeType: NodeBool, Pos: pos, True: true}
|
||||
}
|
||||
|
||||
func (b *BoolNode) String() string {
|
||||
if b.True {
|
||||
return "true"
|
||||
}
|
||||
return "false"
|
||||
}
|
||||
|
||||
func (b *BoolNode) tree() *Tree {
|
||||
return b.tr
|
||||
}
|
||||
|
||||
func (b *BoolNode) Copy() Node {
|
||||
return b.tr.newBool(b.Pos, b.True)
|
||||
}
|
||||
|
||||
// NumberNode holds a number: signed or unsigned integer, float, or complex.
|
||||
// The value is parsed and stored under all the types that can represent the value.
|
||||
// This simulates in a small amount of code the behavior of Go's ideal constants.
|
||||
type NumberNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
IsInt bool // Number has an integral value.
|
||||
IsUint bool // Number has an unsigned integral value.
|
||||
IsFloat bool // Number has a floating-point value.
|
||||
IsComplex bool // Number is complex.
|
||||
Int64 int64 // The signed integer value.
|
||||
Uint64 uint64 // The unsigned integer value.
|
||||
Float64 float64 // The floating-point value.
|
||||
Complex128 complex128 // The complex value.
|
||||
Text string // The original textual representation from the input.
|
||||
}
|
||||
|
||||
func (t *Tree) newNumber(pos Pos, text string, typ itemType) (*NumberNode, error) {
|
||||
n := &NumberNode{tr: t, NodeType: NodeNumber, Pos: pos, Text: text}
|
||||
switch typ {
|
||||
case itemCharConstant:
|
||||
rune, _, tail, err := strconv.UnquoteChar(text[1:], text[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tail != "'" {
|
||||
return nil, fmt.Errorf("malformed character constant: %s", text)
|
||||
}
|
||||
n.Int64 = int64(rune)
|
||||
n.IsInt = true
|
||||
n.Uint64 = uint64(rune)
|
||||
n.IsUint = true
|
||||
n.Float64 = float64(rune) // odd but those are the rules.
|
||||
n.IsFloat = true
|
||||
return n, nil
|
||||
case itemComplex:
|
||||
// fmt.Sscan can parse the pair, so let it do the work.
|
||||
if _, err := fmt.Sscan(text, &n.Complex128); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
n.IsComplex = true
|
||||
n.simplifyComplex()
|
||||
return n, nil
|
||||
}
|
||||
// Imaginary constants can only be complex unless they are zero.
|
||||
if len(text) > 0 && text[len(text)-1] == 'i' {
|
||||
f, err := strconv.ParseFloat(text[:len(text)-1], 64)
|
||||
if err == nil {
|
||||
n.IsComplex = true
|
||||
n.Complex128 = complex(0, f)
|
||||
n.simplifyComplex()
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
// Do integer test first so we get 0x123 etc.
|
||||
u, err := strconv.ParseUint(text, 0, 64) // will fail for -0; fixed below.
|
||||
if err == nil {
|
||||
n.IsUint = true
|
||||
n.Uint64 = u
|
||||
}
|
||||
i, err := strconv.ParseInt(text, 0, 64)
|
||||
if err == nil {
|
||||
n.IsInt = true
|
||||
n.Int64 = i
|
||||
if i == 0 {
|
||||
n.IsUint = true // in case of -0.
|
||||
n.Uint64 = u
|
||||
}
|
||||
}
|
||||
// If an integer extraction succeeded, promote the float.
|
||||
if n.IsInt {
|
||||
n.IsFloat = true
|
||||
n.Float64 = float64(n.Int64)
|
||||
} else if n.IsUint {
|
||||
n.IsFloat = true
|
||||
n.Float64 = float64(n.Uint64)
|
||||
} else {
|
||||
f, err := strconv.ParseFloat(text, 64)
|
||||
if err == nil {
|
||||
n.IsFloat = true
|
||||
n.Float64 = f
|
||||
// If a floating-point extraction succeeded, extract the int if needed.
|
||||
if !n.IsInt && float64(int64(f)) == f {
|
||||
n.IsInt = true
|
||||
n.Int64 = int64(f)
|
||||
}
|
||||
if !n.IsUint && float64(uint64(f)) == f {
|
||||
n.IsUint = true
|
||||
n.Uint64 = uint64(f)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !n.IsInt && !n.IsUint && !n.IsFloat {
|
||||
return nil, fmt.Errorf("illegal number syntax: %q", text)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// simplifyComplex pulls out any other types that are represented by the complex number.
|
||||
// These all require that the imaginary part be zero.
|
||||
func (n *NumberNode) simplifyComplex() {
|
||||
n.IsFloat = imag(n.Complex128) == 0
|
||||
if n.IsFloat {
|
||||
n.Float64 = real(n.Complex128)
|
||||
n.IsInt = float64(int64(n.Float64)) == n.Float64
|
||||
if n.IsInt {
|
||||
n.Int64 = int64(n.Float64)
|
||||
}
|
||||
n.IsUint = float64(uint64(n.Float64)) == n.Float64
|
||||
if n.IsUint {
|
||||
n.Uint64 = uint64(n.Float64)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *NumberNode) String() string {
|
||||
return n.Text
|
||||
}
|
||||
|
||||
func (n *NumberNode) tree() *Tree {
|
||||
return n.tr
|
||||
}
|
||||
|
||||
func (n *NumberNode) Copy() Node {
|
||||
nn := new(NumberNode)
|
||||
*nn = *n // Easy, fast, correct.
|
||||
return nn
|
||||
}
|
||||
|
||||
// StringNode holds a string constant. The value has been "unquoted".
|
||||
type StringNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Quoted string // The original text of the string, with quotes.
|
||||
Text string // The string, after quote processing.
|
||||
}
|
||||
|
||||
func (t *Tree) newString(pos Pos, orig, text string) *StringNode {
|
||||
return &StringNode{tr: t, NodeType: NodeString, Pos: pos, Quoted: orig, Text: text}
|
||||
}
|
||||
|
||||
func (s *StringNode) String() string {
|
||||
return s.Quoted
|
||||
}
|
||||
|
||||
func (s *StringNode) tree() *Tree {
|
||||
return s.tr
|
||||
}
|
||||
|
||||
func (s *StringNode) Copy() Node {
|
||||
return s.tr.newString(s.Pos, s.Quoted, s.Text)
|
||||
}
|
||||
|
||||
// endNode represents an {{end}} action.
|
||||
// It does not appear in the final parse tree.
|
||||
type endNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
}
|
||||
|
||||
func (t *Tree) newEnd(pos Pos) *endNode {
|
||||
return &endNode{tr: t, NodeType: nodeEnd, Pos: pos}
|
||||
}
|
||||
|
||||
func (e *endNode) String() string {
|
||||
return "{{end}}"
|
||||
}
|
||||
|
||||
func (e *endNode) tree() *Tree {
|
||||
return e.tr
|
||||
}
|
||||
|
||||
func (e *endNode) Copy() Node {
|
||||
return e.tr.newEnd(e.Pos)
|
||||
}
|
||||
|
||||
// elseNode represents an {{else}} action. Does not appear in the final tree.
|
||||
type elseNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Line int // The line number in the input (deprecated; kept for compatibility)
|
||||
}
|
||||
|
||||
func (t *Tree) newElse(pos Pos, line int) *elseNode {
|
||||
return &elseNode{tr: t, NodeType: nodeElse, Pos: pos, Line: line}
|
||||
}
|
||||
|
||||
func (e *elseNode) Type() NodeType {
|
||||
return nodeElse
|
||||
}
|
||||
|
||||
func (e *elseNode) String() string {
|
||||
return "{{else}}"
|
||||
}
|
||||
|
||||
func (e *elseNode) tree() *Tree {
|
||||
return e.tr
|
||||
}
|
||||
|
||||
func (e *elseNode) Copy() Node {
|
||||
return e.tr.newElse(e.Pos, e.Line)
|
||||
}
|
||||
|
||||
// BranchNode is the common representation of if, range, and with.
|
||||
type BranchNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Line int // The line number in the input (deprecated; kept for compatibility)
|
||||
Pipe *PipeNode // The pipeline to be evaluated.
|
||||
List *ListNode // What to execute if the value is non-empty.
|
||||
ElseList *ListNode // What to execute if the value is empty (nil if absent).
|
||||
}
|
||||
|
||||
func (b *BranchNode) String() string {
|
||||
name := ""
|
||||
switch b.NodeType {
|
||||
case NodeIf:
|
||||
name = "if"
|
||||
case NodeRange:
|
||||
name = "range"
|
||||
case NodeWith:
|
||||
name = "with"
|
||||
default:
|
||||
panic("unknown branch type")
|
||||
}
|
||||
if b.ElseList != nil {
|
||||
return fmt.Sprintf("{{%s %s}}%s{{else}}%s{{end}}", name, b.Pipe, b.List, b.ElseList)
|
||||
}
|
||||
return fmt.Sprintf("{{%s %s}}%s{{end}}", name, b.Pipe, b.List)
|
||||
}
|
||||
|
||||
func (b *BranchNode) tree() *Tree {
|
||||
return b.tr
|
||||
}
|
||||
|
||||
func (b *BranchNode) Copy() Node {
|
||||
switch b.NodeType {
|
||||
case NodeIf:
|
||||
return b.tr.newIf(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
|
||||
case NodeRange:
|
||||
return b.tr.newRange(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
|
||||
case NodeWith:
|
||||
return b.tr.newWith(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
|
||||
default:
|
||||
panic("unknown branch type")
|
||||
}
|
||||
}
|
||||
|
||||
// IfNode represents an {{if}} action and its commands.
|
||||
type IfNode struct {
|
||||
BranchNode
|
||||
}
|
||||
|
||||
func (t *Tree) newIf(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *IfNode {
|
||||
return &IfNode{BranchNode{tr: t, NodeType: NodeIf, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
|
||||
}
|
||||
|
||||
func (i *IfNode) Copy() Node {
|
||||
return i.tr.newIf(i.Pos, i.Line, i.Pipe.CopyPipe(), i.List.CopyList(), i.ElseList.CopyList())
|
||||
}
|
||||
|
||||
// RangeNode represents a {{range}} action and its commands.
|
||||
type RangeNode struct {
|
||||
BranchNode
|
||||
}
|
||||
|
||||
func (t *Tree) newRange(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *RangeNode {
|
||||
return &RangeNode{BranchNode{tr: t, NodeType: NodeRange, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
|
||||
}
|
||||
|
||||
func (r *RangeNode) Copy() Node {
|
||||
return r.tr.newRange(r.Pos, r.Line, r.Pipe.CopyPipe(), r.List.CopyList(), r.ElseList.CopyList())
|
||||
}
|
||||
|
||||
// WithNode represents a {{with}} action and its commands.
|
||||
type WithNode struct {
|
||||
BranchNode
|
||||
}
|
||||
|
||||
func (t *Tree) newWith(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *WithNode {
|
||||
return &WithNode{BranchNode{tr: t, NodeType: NodeWith, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
|
||||
}
|
||||
|
||||
func (w *WithNode) Copy() Node {
|
||||
return w.tr.newWith(w.Pos, w.Line, w.Pipe.CopyPipe(), w.List.CopyList(), w.ElseList.CopyList())
|
||||
}
|
||||
|
||||
// TemplateNode represents a {{template}} action.
|
||||
type TemplateNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Line int // The line number in the input (deprecated; kept for compatibility)
|
||||
Name string // The name of the template (unquoted).
|
||||
Pipe *PipeNode // The command to evaluate as dot for the template.
|
||||
}
|
||||
|
||||
func (t *Tree) newTemplate(pos Pos, line int, name string, pipe *PipeNode) *TemplateNode {
|
||||
return &TemplateNode{tr: t, NodeType: NodeTemplate, Pos: pos, Line: line, Name: name, Pipe: pipe}
|
||||
}
|
||||
|
||||
func (t *TemplateNode) String() string {
|
||||
if t.Pipe == nil {
|
||||
return fmt.Sprintf("{{template %q}}", t.Name)
|
||||
}
|
||||
return fmt.Sprintf("{{template %q %s}}", t.Name, t.Pipe)
|
||||
}
|
||||
|
||||
func (t *TemplateNode) tree() *Tree {
|
||||
return t.tr
|
||||
}
|
||||
|
||||
func (t *TemplateNode) Copy() Node {
|
||||
return t.tr.newTemplate(t.Pos, t.Line, t.Name, t.Pipe.CopyPipe())
|
||||
}
|
700
vendor/github.com/alecthomas/template/parse/parse.go
generated
vendored
Normal file
700
vendor/github.com/alecthomas/template/parse/parse.go
generated
vendored
Normal file
@ -0,0 +1,700 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package parse builds parse trees for templates as defined by text/template
|
||||
// and html/template. Clients should use those packages to construct templates
|
||||
// rather than this one, which provides shared internal data structures not
|
||||
// intended for general use.
|
||||
package parse
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Tree is the representation of a single parsed template.
|
||||
type Tree struct {
|
||||
Name string // name of the template represented by the tree.
|
||||
ParseName string // name of the top-level template during parsing, for error messages.
|
||||
Root *ListNode // top-level root of the tree.
|
||||
text string // text parsed to create the template (or its parent)
|
||||
// Parsing only; cleared after parse.
|
||||
funcs []map[string]interface{}
|
||||
lex *lexer
|
||||
token [3]item // three-token lookahead for parser.
|
||||
peekCount int
|
||||
vars []string // variables defined at the moment.
|
||||
}
|
||||
|
||||
// Copy returns a copy of the Tree. Any parsing state is discarded.
|
||||
func (t *Tree) Copy() *Tree {
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
return &Tree{
|
||||
Name: t.Name,
|
||||
ParseName: t.ParseName,
|
||||
Root: t.Root.CopyList(),
|
||||
text: t.text,
|
||||
}
|
||||
}
|
||||
|
||||
// Parse returns a map from template name to parse.Tree, created by parsing the
|
||||
// templates described in the argument string. The top-level template will be
|
||||
// given the specified name. If an error is encountered, parsing stops and an
|
||||
// empty map is returned with the error.
|
||||
func Parse(name, text, leftDelim, rightDelim string, funcs ...map[string]interface{}) (treeSet map[string]*Tree, err error) {
|
||||
treeSet = make(map[string]*Tree)
|
||||
t := New(name)
|
||||
t.text = text
|
||||
_, err = t.Parse(text, leftDelim, rightDelim, treeSet, funcs...)
|
||||
return
|
||||
}
|
||||
|
||||
// next returns the next token.
|
||||
func (t *Tree) next() item {
|
||||
if t.peekCount > 0 {
|
||||
t.peekCount--
|
||||
} else {
|
||||
t.token[0] = t.lex.nextItem()
|
||||
}
|
||||
return t.token[t.peekCount]
|
||||
}
|
||||
|
||||
// backup backs the input stream up one token.
|
||||
func (t *Tree) backup() {
|
||||
t.peekCount++
|
||||
}
|
||||
|
||||
// backup2 backs the input stream up two tokens.
|
||||
// The zeroth token is already there.
|
||||
func (t *Tree) backup2(t1 item) {
|
||||
t.token[1] = t1
|
||||
t.peekCount = 2
|
||||
}
|
||||
|
||||
// backup3 backs the input stream up three tokens
|
||||
// The zeroth token is already there.
|
||||
func (t *Tree) backup3(t2, t1 item) { // Reverse order: we're pushing back.
|
||||
t.token[1] = t1
|
||||
t.token[2] = t2
|
||||
t.peekCount = 3
|
||||
}
|
||||
|
||||
// peek returns but does not consume the next token.
|
||||
func (t *Tree) peek() item {
|
||||
if t.peekCount > 0 {
|
||||
return t.token[t.peekCount-1]
|
||||
}
|
||||
t.peekCount = 1
|
||||
t.token[0] = t.lex.nextItem()
|
||||
return t.token[0]
|
||||
}
|
||||
|
||||
// nextNonSpace returns the next non-space token.
|
||||
func (t *Tree) nextNonSpace() (token item) {
|
||||
for {
|
||||
token = t.next()
|
||||
if token.typ != itemSpace {
|
||||
break
|
||||
}
|
||||
}
|
||||
return token
|
||||
}
|
||||
|
||||
// peekNonSpace returns but does not consume the next non-space token.
|
||||
func (t *Tree) peekNonSpace() (token item) {
|
||||
for {
|
||||
token = t.next()
|
||||
if token.typ != itemSpace {
|
||||
break
|
||||
}
|
||||
}
|
||||
t.backup()
|
||||
return token
|
||||
}
|
||||
|
||||
// Parsing.
|
||||
|
||||
// New allocates a new parse tree with the given name.
|
||||
func New(name string, funcs ...map[string]interface{}) *Tree {
|
||||
return &Tree{
|
||||
Name: name,
|
||||
funcs: funcs,
|
||||
}
|
||||
}
|
||||
|
||||
// ErrorContext returns a textual representation of the location of the node in the input text.
|
||||
// The receiver is only used when the node does not have a pointer to the tree inside,
|
||||
// which can occur in old code.
|
||||
func (t *Tree) ErrorContext(n Node) (location, context string) {
|
||||
pos := int(n.Position())
|
||||
tree := n.tree()
|
||||
if tree == nil {
|
||||
tree = t
|
||||
}
|
||||
text := tree.text[:pos]
|
||||
byteNum := strings.LastIndex(text, "\n")
|
||||
if byteNum == -1 {
|
||||
byteNum = pos // On first line.
|
||||
} else {
|
||||
byteNum++ // After the newline.
|
||||
byteNum = pos - byteNum
|
||||
}
|
||||
lineNum := 1 + strings.Count(text, "\n")
|
||||
context = n.String()
|
||||
if len(context) > 20 {
|
||||
context = fmt.Sprintf("%.20s...", context)
|
||||
}
|
||||
return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context
|
||||
}
|
||||
|
||||
// errorf formats the error and terminates processing.
|
||||
func (t *Tree) errorf(format string, args ...interface{}) {
|
||||
t.Root = nil
|
||||
format = fmt.Sprintf("template: %s:%d: %s", t.ParseName, t.lex.lineNumber(), format)
|
||||
panic(fmt.Errorf(format, args...))
|
||||
}
|
||||
|
||||
// error terminates processing.
|
||||
func (t *Tree) error(err error) {
|
||||
t.errorf("%s", err)
|
||||
}
|
||||
|
||||
// expect consumes the next token and guarantees it has the required type.
|
||||
func (t *Tree) expect(expected itemType, context string) item {
|
||||
token := t.nextNonSpace()
|
||||
if token.typ != expected {
|
||||
t.unexpected(token, context)
|
||||
}
|
||||
return token
|
||||
}
|
||||
|
||||
// expectOneOf consumes the next token and guarantees it has one of the required types.
|
||||
func (t *Tree) expectOneOf(expected1, expected2 itemType, context string) item {
|
||||
token := t.nextNonSpace()
|
||||
if token.typ != expected1 && token.typ != expected2 {
|
||||
t.unexpected(token, context)
|
||||
}
|
||||
return token
|
||||
}
|
||||
|
||||
// unexpected complains about the token and terminates processing.
|
||||
func (t *Tree) unexpected(token item, context string) {
|
||||
t.errorf("unexpected %s in %s", token, context)
|
||||
}
|
||||
|
||||
// recover is the handler that turns panics into returns from the top level of Parse.
|
||||
func (t *Tree) recover(errp *error) {
|
||||
e := recover()
|
||||
if e != nil {
|
||||
if _, ok := e.(runtime.Error); ok {
|
||||
panic(e)
|
||||
}
|
||||
if t != nil {
|
||||
t.stopParse()
|
||||
}
|
||||
*errp = e.(error)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// startParse initializes the parser, using the lexer.
|
||||
func (t *Tree) startParse(funcs []map[string]interface{}, lex *lexer) {
|
||||
t.Root = nil
|
||||
t.lex = lex
|
||||
t.vars = []string{"$"}
|
||||
t.funcs = funcs
|
||||
}
|
||||
|
||||
// stopParse terminates parsing.
|
||||
func (t *Tree) stopParse() {
|
||||
t.lex = nil
|
||||
t.vars = nil
|
||||
t.funcs = nil
|
||||
}
|
||||
|
||||
// Parse parses the template definition string to construct a representation of
|
||||
// the template for execution. If either action delimiter string is empty, the
|
||||
// default ("{{" or "}}") is used. Embedded template definitions are added to
|
||||
// the treeSet map.
|
||||
func (t *Tree) Parse(text, leftDelim, rightDelim string, treeSet map[string]*Tree, funcs ...map[string]interface{}) (tree *Tree, err error) {
|
||||
defer t.recover(&err)
|
||||
t.ParseName = t.Name
|
||||
t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim))
|
||||
t.text = text
|
||||
t.parse(treeSet)
|
||||
t.add(treeSet)
|
||||
t.stopParse()
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// add adds tree to the treeSet.
|
||||
func (t *Tree) add(treeSet map[string]*Tree) {
|
||||
tree := treeSet[t.Name]
|
||||
if tree == nil || IsEmptyTree(tree.Root) {
|
||||
treeSet[t.Name] = t
|
||||
return
|
||||
}
|
||||
if !IsEmptyTree(t.Root) {
|
||||
t.errorf("template: multiple definition of template %q", t.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// IsEmptyTree reports whether this tree (node) is empty of everything but space.
|
||||
func IsEmptyTree(n Node) bool {
|
||||
switch n := n.(type) {
|
||||
case nil:
|
||||
return true
|
||||
case *ActionNode:
|
||||
case *IfNode:
|
||||
case *ListNode:
|
||||
for _, node := range n.Nodes {
|
||||
if !IsEmptyTree(node) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case *RangeNode:
|
||||
case *TemplateNode:
|
||||
case *TextNode:
|
||||
return len(bytes.TrimSpace(n.Text)) == 0
|
||||
case *WithNode:
|
||||
default:
|
||||
panic("unknown node: " + n.String())
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// parse is the top-level parser for a template, essentially the same
|
||||
// as itemList except it also parses {{define}} actions.
|
||||
// It runs to EOF.
|
||||
func (t *Tree) parse(treeSet map[string]*Tree) (next Node) {
|
||||
t.Root = t.newList(t.peek().pos)
|
||||
for t.peek().typ != itemEOF {
|
||||
if t.peek().typ == itemLeftDelim {
|
||||
delim := t.next()
|
||||
if t.nextNonSpace().typ == itemDefine {
|
||||
newT := New("definition") // name will be updated once we know it.
|
||||
newT.text = t.text
|
||||
newT.ParseName = t.ParseName
|
||||
newT.startParse(t.funcs, t.lex)
|
||||
newT.parseDefinition(treeSet)
|
||||
continue
|
||||
}
|
||||
t.backup2(delim)
|
||||
}
|
||||
n := t.textOrAction()
|
||||
if n.Type() == nodeEnd {
|
||||
t.errorf("unexpected %s", n)
|
||||
}
|
||||
t.Root.append(n)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseDefinition parses a {{define}} ... {{end}} template definition and
|
||||
// installs the definition in the treeSet map. The "define" keyword has already
|
||||
// been scanned.
|
||||
func (t *Tree) parseDefinition(treeSet map[string]*Tree) {
|
||||
const context = "define clause"
|
||||
name := t.expectOneOf(itemString, itemRawString, context)
|
||||
var err error
|
||||
t.Name, err = strconv.Unquote(name.val)
|
||||
if err != nil {
|
||||
t.error(err)
|
||||
}
|
||||
t.expect(itemRightDelim, context)
|
||||
var end Node
|
||||
t.Root, end = t.itemList()
|
||||
if end.Type() != nodeEnd {
|
||||
t.errorf("unexpected %s in %s", end, context)
|
||||
}
|
||||
t.add(treeSet)
|
||||
t.stopParse()
|
||||
}
|
||||
|
||||
// itemList:
|
||||
// textOrAction*
|
||||
// Terminates at {{end}} or {{else}}, returned separately.
|
||||
func (t *Tree) itemList() (list *ListNode, next Node) {
|
||||
list = t.newList(t.peekNonSpace().pos)
|
||||
for t.peekNonSpace().typ != itemEOF {
|
||||
n := t.textOrAction()
|
||||
switch n.Type() {
|
||||
case nodeEnd, nodeElse:
|
||||
return list, n
|
||||
}
|
||||
list.append(n)
|
||||
}
|
||||
t.errorf("unexpected EOF")
|
||||
return
|
||||
}
|
||||
|
||||
// textOrAction:
|
||||
// text | action
|
||||
func (t *Tree) textOrAction() Node {
|
||||
switch token := t.nextNonSpace(); token.typ {
|
||||
case itemElideNewline:
|
||||
return t.elideNewline()
|
||||
case itemText:
|
||||
return t.newText(token.pos, token.val)
|
||||
case itemLeftDelim:
|
||||
return t.action()
|
||||
default:
|
||||
t.unexpected(token, "input")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// elideNewline:
|
||||
// Remove newlines trailing rightDelim if \\ is present.
|
||||
func (t *Tree) elideNewline() Node {
|
||||
token := t.peek()
|
||||
if token.typ != itemText {
|
||||
t.unexpected(token, "input")
|
||||
return nil
|
||||
}
|
||||
|
||||
t.next()
|
||||
stripped := strings.TrimLeft(token.val, "\n\r")
|
||||
diff := len(token.val) - len(stripped)
|
||||
if diff > 0 {
|
||||
// This is a bit nasty. We mutate the token in-place to remove
|
||||
// preceding newlines.
|
||||
token.pos += Pos(diff)
|
||||
token.val = stripped
|
||||
}
|
||||
return t.newText(token.pos, token.val)
|
||||
}
|
||||
|
||||
// Action:
|
||||
// control
|
||||
// command ("|" command)*
|
||||
// Left delim is past. Now get actions.
|
||||
// First word could be a keyword such as range.
|
||||
func (t *Tree) action() (n Node) {
|
||||
switch token := t.nextNonSpace(); token.typ {
|
||||
case itemElse:
|
||||
return t.elseControl()
|
||||
case itemEnd:
|
||||
return t.endControl()
|
||||
case itemIf:
|
||||
return t.ifControl()
|
||||
case itemRange:
|
||||
return t.rangeControl()
|
||||
case itemTemplate:
|
||||
return t.templateControl()
|
||||
case itemWith:
|
||||
return t.withControl()
|
||||
}
|
||||
t.backup()
|
||||
// Do not pop variables; they persist until "end".
|
||||
return t.newAction(t.peek().pos, t.lex.lineNumber(), t.pipeline("command"))
|
||||
}
|
||||
|
||||
// Pipeline:
|
||||
// declarations? command ('|' command)*
|
||||
func (t *Tree) pipeline(context string) (pipe *PipeNode) {
|
||||
var decl []*VariableNode
|
||||
pos := t.peekNonSpace().pos
|
||||
// Are there declarations?
|
||||
for {
|
||||
if v := t.peekNonSpace(); v.typ == itemVariable {
|
||||
t.next()
|
||||
// Since space is a token, we need 3-token look-ahead here in the worst case:
|
||||
// in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an
|
||||
// argument variable rather than a declaration. So remember the token
|
||||
// adjacent to the variable so we can push it back if necessary.
|
||||
tokenAfterVariable := t.peek()
|
||||
if next := t.peekNonSpace(); next.typ == itemColonEquals || (next.typ == itemChar && next.val == ",") {
|
||||
t.nextNonSpace()
|
||||
variable := t.newVariable(v.pos, v.val)
|
||||
decl = append(decl, variable)
|
||||
t.vars = append(t.vars, v.val)
|
||||
if next.typ == itemChar && next.val == "," {
|
||||
if context == "range" && len(decl) < 2 {
|
||||
continue
|
||||
}
|
||||
t.errorf("too many declarations in %s", context)
|
||||
}
|
||||
} else if tokenAfterVariable.typ == itemSpace {
|
||||
t.backup3(v, tokenAfterVariable)
|
||||
} else {
|
||||
t.backup2(v)
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
pipe = t.newPipeline(pos, t.lex.lineNumber(), decl)
|
||||
for {
|
||||
switch token := t.nextNonSpace(); token.typ {
|
||||
case itemRightDelim, itemRightParen:
|
||||
if len(pipe.Cmds) == 0 {
|
||||
t.errorf("missing value for %s", context)
|
||||
}
|
||||
if token.typ == itemRightParen {
|
||||
t.backup()
|
||||
}
|
||||
return
|
||||
case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier,
|
||||
itemNumber, itemNil, itemRawString, itemString, itemVariable, itemLeftParen:
|
||||
t.backup()
|
||||
pipe.append(t.command())
|
||||
default:
|
||||
t.unexpected(token, context)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) {
|
||||
defer t.popVars(len(t.vars))
|
||||
line = t.lex.lineNumber()
|
||||
pipe = t.pipeline(context)
|
||||
var next Node
|
||||
list, next = t.itemList()
|
||||
switch next.Type() {
|
||||
case nodeEnd: //done
|
||||
case nodeElse:
|
||||
if allowElseIf {
|
||||
// Special case for "else if". If the "else" is followed immediately by an "if",
|
||||
// the elseControl will have left the "if" token pending. Treat
|
||||
// {{if a}}_{{else if b}}_{{end}}
|
||||
// as
|
||||
// {{if a}}_{{else}}{{if b}}_{{end}}{{end}}.
|
||||
// To do this, parse the if as usual and stop at it {{end}}; the subsequent{{end}}
|
||||
// is assumed. This technique works even for long if-else-if chains.
|
||||
// TODO: Should we allow else-if in with and range?
|
||||
if t.peek().typ == itemIf {
|
||||
t.next() // Consume the "if" token.
|
||||
elseList = t.newList(next.Position())
|
||||
elseList.append(t.ifControl())
|
||||
// Do not consume the next item - only one {{end}} required.
|
||||
break
|
||||
}
|
||||
}
|
||||
elseList, next = t.itemList()
|
||||
if next.Type() != nodeEnd {
|
||||
t.errorf("expected end; found %s", next)
|
||||
}
|
||||
}
|
||||
return pipe.Position(), line, pipe, list, elseList
|
||||
}
|
||||
|
||||
// If:
|
||||
// {{if pipeline}} itemList {{end}}
|
||||
// {{if pipeline}} itemList {{else}} itemList {{end}}
|
||||
// If keyword is past.
|
||||
func (t *Tree) ifControl() Node {
|
||||
return t.newIf(t.parseControl(true, "if"))
|
||||
}
|
||||
|
||||
// Range:
|
||||
// {{range pipeline}} itemList {{end}}
|
||||
// {{range pipeline}} itemList {{else}} itemList {{end}}
|
||||
// Range keyword is past.
|
||||
func (t *Tree) rangeControl() Node {
|
||||
return t.newRange(t.parseControl(false, "range"))
|
||||
}
|
||||
|
||||
// With:
|
||||
// {{with pipeline}} itemList {{end}}
|
||||
// {{with pipeline}} itemList {{else}} itemList {{end}}
|
||||
// If keyword is past.
|
||||
func (t *Tree) withControl() Node {
|
||||
return t.newWith(t.parseControl(false, "with"))
|
||||
}
|
||||
|
||||
// End:
|
||||
// {{end}}
|
||||
// End keyword is past.
|
||||
func (t *Tree) endControl() Node {
|
||||
return t.newEnd(t.expect(itemRightDelim, "end").pos)
|
||||
}
|
||||
|
||||
// Else:
|
||||
// {{else}}
|
||||
// Else keyword is past.
|
||||
func (t *Tree) elseControl() Node {
|
||||
// Special case for "else if".
|
||||
peek := t.peekNonSpace()
|
||||
if peek.typ == itemIf {
|
||||
// We see "{{else if ... " but in effect rewrite it to {{else}}{{if ... ".
|
||||
return t.newElse(peek.pos, t.lex.lineNumber())
|
||||
}
|
||||
return t.newElse(t.expect(itemRightDelim, "else").pos, t.lex.lineNumber())
|
||||
}
|
||||
|
||||
// Template:
|
||||
// {{template stringValue pipeline}}
|
||||
// Template keyword is past. The name must be something that can evaluate
|
||||
// to a string.
|
||||
func (t *Tree) templateControl() Node {
|
||||
var name string
|
||||
token := t.nextNonSpace()
|
||||
switch token.typ {
|
||||
case itemString, itemRawString:
|
||||
s, err := strconv.Unquote(token.val)
|
||||
if err != nil {
|
||||
t.error(err)
|
||||
}
|
||||
name = s
|
||||
default:
|
||||
t.unexpected(token, "template invocation")
|
||||
}
|
||||
var pipe *PipeNode
|
||||
if t.nextNonSpace().typ != itemRightDelim {
|
||||
t.backup()
|
||||
// Do not pop variables; they persist until "end".
|
||||
pipe = t.pipeline("template")
|
||||
}
|
||||
return t.newTemplate(token.pos, t.lex.lineNumber(), name, pipe)
|
||||
}
|
||||
|
||||
// command:
|
||||
// operand (space operand)*
|
||||
// space-separated arguments up to a pipeline character or right delimiter.
|
||||
// we consume the pipe character but leave the right delim to terminate the action.
|
||||
func (t *Tree) command() *CommandNode {
|
||||
cmd := t.newCommand(t.peekNonSpace().pos)
|
||||
for {
|
||||
t.peekNonSpace() // skip leading spaces.
|
||||
operand := t.operand()
|
||||
if operand != nil {
|
||||
cmd.append(operand)
|
||||
}
|
||||
switch token := t.next(); token.typ {
|
||||
case itemSpace:
|
||||
continue
|
||||
case itemError:
|
||||
t.errorf("%s", token.val)
|
||||
case itemRightDelim, itemRightParen:
|
||||
t.backup()
|
||||
case itemPipe:
|
||||
default:
|
||||
t.errorf("unexpected %s in operand; missing space?", token)
|
||||
}
|
||||
break
|
||||
}
|
||||
if len(cmd.Args) == 0 {
|
||||
t.errorf("empty command")
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
// operand:
|
||||
// term .Field*
|
||||
// An operand is a space-separated component of a command,
|
||||
// a term possibly followed by field accesses.
|
||||
// A nil return means the next item is not an operand.
|
||||
func (t *Tree) operand() Node {
|
||||
node := t.term()
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
if t.peek().typ == itemField {
|
||||
chain := t.newChain(t.peek().pos, node)
|
||||
for t.peek().typ == itemField {
|
||||
chain.Add(t.next().val)
|
||||
}
|
||||
// Compatibility with original API: If the term is of type NodeField
|
||||
// or NodeVariable, just put more fields on the original.
|
||||
// Otherwise, keep the Chain node.
|
||||
// TODO: Switch to Chains always when we can.
|
||||
switch node.Type() {
|
||||
case NodeField:
|
||||
node = t.newField(chain.Position(), chain.String())
|
||||
case NodeVariable:
|
||||
node = t.newVariable(chain.Position(), chain.String())
|
||||
default:
|
||||
node = chain
|
||||
}
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// term:
|
||||
// literal (number, string, nil, boolean)
|
||||
// function (identifier)
|
||||
// .
|
||||
// .Field
|
||||
// $
|
||||
// '(' pipeline ')'
|
||||
// A term is a simple "expression".
|
||||
// A nil return means the next item is not a term.
|
||||
func (t *Tree) term() Node {
|
||||
switch token := t.nextNonSpace(); token.typ {
|
||||
case itemError:
|
||||
t.errorf("%s", token.val)
|
||||
case itemIdentifier:
|
||||
if !t.hasFunction(token.val) {
|
||||
t.errorf("function %q not defined", token.val)
|
||||
}
|
||||
return NewIdentifier(token.val).SetTree(t).SetPos(token.pos)
|
||||
case itemDot:
|
||||
return t.newDot(token.pos)
|
||||
case itemNil:
|
||||
return t.newNil(token.pos)
|
||||
case itemVariable:
|
||||
return t.useVar(token.pos, token.val)
|
||||
case itemField:
|
||||
return t.newField(token.pos, token.val)
|
||||
case itemBool:
|
||||
return t.newBool(token.pos, token.val == "true")
|
||||
case itemCharConstant, itemComplex, itemNumber:
|
||||
number, err := t.newNumber(token.pos, token.val, token.typ)
|
||||
if err != nil {
|
||||
t.error(err)
|
||||
}
|
||||
return number
|
||||
case itemLeftParen:
|
||||
pipe := t.pipeline("parenthesized pipeline")
|
||||
if token := t.next(); token.typ != itemRightParen {
|
||||
t.errorf("unclosed right paren: unexpected %s", token)
|
||||
}
|
||||
return pipe
|
||||
case itemString, itemRawString:
|
||||
s, err := strconv.Unquote(token.val)
|
||||
if err != nil {
|
||||
t.error(err)
|
||||
}
|
||||
return t.newString(token.pos, token.val, s)
|
||||
}
|
||||
t.backup()
|
||||
return nil
|
||||
}
|
||||
|
||||
// hasFunction reports if a function name exists in the Tree's maps.
|
||||
func (t *Tree) hasFunction(name string) bool {
|
||||
for _, funcMap := range t.funcs {
|
||||
if funcMap == nil {
|
||||
continue
|
||||
}
|
||||
if funcMap[name] != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// popVars trims the variable list to the specified length
|
||||
func (t *Tree) popVars(n int) {
|
||||
t.vars = t.vars[:n]
|
||||
}
|
||||
|
||||
// useVar returns a node for a variable reference. It errors if the
|
||||
// variable is not defined.
|
||||
func (t *Tree) useVar(pos Pos, name string) Node {
|
||||
v := t.newVariable(pos, name)
|
||||
for _, varName := range t.vars {
|
||||
if varName == v.Ident[0] {
|
||||
return v
|
||||
}
|
||||
}
|
||||
t.errorf("undefined variable %q", v.Ident[0])
|
||||
return nil
|
||||
}
|
217
vendor/github.com/alecthomas/template/template.go
generated
vendored
Normal file
217
vendor/github.com/alecthomas/template/template.go
generated
vendored
Normal file
@ -0,0 +1,217 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"github.com/alecthomas/template/parse"
|
||||
)
|
||||
|
||||
// common holds the information shared by related templates.
|
||||
type common struct {
|
||||
tmpl map[string]*Template
|
||||
// We use two maps, one for parsing and one for execution.
|
||||
// This separation makes the API cleaner since it doesn't
|
||||
// expose reflection to the client.
|
||||
parseFuncs FuncMap
|
||||
execFuncs map[string]reflect.Value
|
||||
}
|
||||
|
||||
// Template is the representation of a parsed template. The *parse.Tree
|
||||
// field is exported only for use by html/template and should be treated
|
||||
// as unexported by all other clients.
|
||||
type Template struct {
|
||||
name string
|
||||
*parse.Tree
|
||||
*common
|
||||
leftDelim string
|
||||
rightDelim string
|
||||
}
|
||||
|
||||
// New allocates a new template with the given name.
|
||||
func New(name string) *Template {
|
||||
return &Template{
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the name of the template.
|
||||
func (t *Template) Name() string {
|
||||
return t.name
|
||||
}
|
||||
|
||||
// New allocates a new template associated with the given one and with the same
|
||||
// delimiters. The association, which is transitive, allows one template to
|
||||
// invoke another with a {{template}} action.
|
||||
func (t *Template) New(name string) *Template {
|
||||
t.init()
|
||||
return &Template{
|
||||
name: name,
|
||||
common: t.common,
|
||||
leftDelim: t.leftDelim,
|
||||
rightDelim: t.rightDelim,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Template) init() {
|
||||
if t.common == nil {
|
||||
t.common = new(common)
|
||||
t.tmpl = make(map[string]*Template)
|
||||
t.parseFuncs = make(FuncMap)
|
||||
t.execFuncs = make(map[string]reflect.Value)
|
||||
}
|
||||
}
|
||||
|
||||
// Clone returns a duplicate of the template, including all associated
|
||||
// templates. The actual representation is not copied, but the name space of
|
||||
// associated templates is, so further calls to Parse in the copy will add
|
||||
// templates to the copy but not to the original. Clone can be used to prepare
|
||||
// common templates and use them with variant definitions for other templates
|
||||
// by adding the variants after the clone is made.
|
||||
func (t *Template) Clone() (*Template, error) {
|
||||
nt := t.copy(nil)
|
||||
nt.init()
|
||||
nt.tmpl[t.name] = nt
|
||||
for k, v := range t.tmpl {
|
||||
if k == t.name { // Already installed.
|
||||
continue
|
||||
}
|
||||
// The associated templates share nt's common structure.
|
||||
tmpl := v.copy(nt.common)
|
||||
nt.tmpl[k] = tmpl
|
||||
}
|
||||
for k, v := range t.parseFuncs {
|
||||
nt.parseFuncs[k] = v
|
||||
}
|
||||
for k, v := range t.execFuncs {
|
||||
nt.execFuncs[k] = v
|
||||
}
|
||||
return nt, nil
|
||||
}
|
||||
|
||||
// copy returns a shallow copy of t, with common set to the argument.
|
||||
func (t *Template) copy(c *common) *Template {
|
||||
nt := New(t.name)
|
||||
nt.Tree = t.Tree
|
||||
nt.common = c
|
||||
nt.leftDelim = t.leftDelim
|
||||
nt.rightDelim = t.rightDelim
|
||||
return nt
|
||||
}
|
||||
|
||||
// AddParseTree creates a new template with the name and parse tree
|
||||
// and associates it with t.
|
||||
func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) {
|
||||
if t.common != nil && t.tmpl[name] != nil {
|
||||
return nil, fmt.Errorf("template: redefinition of template %q", name)
|
||||
}
|
||||
nt := t.New(name)
|
||||
nt.Tree = tree
|
||||
t.tmpl[name] = nt
|
||||
return nt, nil
|
||||
}
|
||||
|
||||
// Templates returns a slice of the templates associated with t, including t
|
||||
// itself.
|
||||
func (t *Template) Templates() []*Template {
|
||||
if t.common == nil {
|
||||
return nil
|
||||
}
|
||||
// Return a slice so we don't expose the map.
|
||||
m := make([]*Template, 0, len(t.tmpl))
|
||||
for _, v := range t.tmpl {
|
||||
m = append(m, v)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// Delims sets the action delimiters to the specified strings, to be used in
|
||||
// subsequent calls to Parse, ParseFiles, or ParseGlob. Nested template
|
||||
// definitions will inherit the settings. An empty delimiter stands for the
|
||||
// corresponding default: {{ or }}.
|
||||
// The return value is the template, so calls can be chained.
|
||||
func (t *Template) Delims(left, right string) *Template {
|
||||
t.leftDelim = left
|
||||
t.rightDelim = right
|
||||
return t
|
||||
}
|
||||
|
||||
// Funcs adds the elements of the argument map to the template's function map.
|
||||
// It panics if a value in the map is not a function with appropriate return
|
||||
// type. However, it is legal to overwrite elements of the map. The return
|
||||
// value is the template, so calls can be chained.
|
||||
func (t *Template) Funcs(funcMap FuncMap) *Template {
|
||||
t.init()
|
||||
addValueFuncs(t.execFuncs, funcMap)
|
||||
addFuncs(t.parseFuncs, funcMap)
|
||||
return t
|
||||
}
|
||||
|
||||
// Lookup returns the template with the given name that is associated with t,
|
||||
// or nil if there is no such template.
|
||||
func (t *Template) Lookup(name string) *Template {
|
||||
if t.common == nil {
|
||||
return nil
|
||||
}
|
||||
return t.tmpl[name]
|
||||
}
|
||||
|
||||
// Parse parses a string into a template. Nested template definitions will be
|
||||
// associated with the top-level template t. Parse may be called multiple times
|
||||
// to parse definitions of templates to associate with t. It is an error if a
|
||||
// resulting template is non-empty (contains content other than template
|
||||
// definitions) and would replace a non-empty template with the same name.
|
||||
// (In multiple calls to Parse with the same receiver template, only one call
|
||||
// can contain text other than space, comments, and template definitions.)
|
||||
func (t *Template) Parse(text string) (*Template, error) {
|
||||
t.init()
|
||||
trees, err := parse.Parse(t.name, text, t.leftDelim, t.rightDelim, t.parseFuncs, builtins)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Add the newly parsed trees, including the one for t, into our common structure.
|
||||
for name, tree := range trees {
|
||||
// If the name we parsed is the name of this template, overwrite this template.
|
||||
// The associate method checks it's not a redefinition.
|
||||
tmpl := t
|
||||
if name != t.name {
|
||||
tmpl = t.New(name)
|
||||
}
|
||||
// Even if t == tmpl, we need to install it in the common.tmpl map.
|
||||
if replace, err := t.associate(tmpl, tree); err != nil {
|
||||
return nil, err
|
||||
} else if replace {
|
||||
tmpl.Tree = tree
|
||||
}
|
||||
tmpl.leftDelim = t.leftDelim
|
||||
tmpl.rightDelim = t.rightDelim
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// associate installs the new template into the group of templates associated
|
||||
// with t. It is an error to reuse a name except to overwrite an empty
|
||||
// template. The two are already known to share the common structure.
|
||||
// The boolean return value reports wither to store this tree as t.Tree.
|
||||
func (t *Template) associate(new *Template, tree *parse.Tree) (bool, error) {
|
||||
if new.common != t.common {
|
||||
panic("internal error: associate not common")
|
||||
}
|
||||
name := new.name
|
||||
if old := t.tmpl[name]; old != nil {
|
||||
oldIsEmpty := parse.IsEmptyTree(old.Root)
|
||||
newIsEmpty := parse.IsEmptyTree(tree.Root)
|
||||
if newIsEmpty {
|
||||
// Whether old is empty or not, new is empty; no reason to replace old.
|
||||
return false, nil
|
||||
}
|
||||
if !oldIsEmpty {
|
||||
return false, fmt.Errorf("template: redefinition of template %q", name)
|
||||
}
|
||||
}
|
||||
t.tmpl[name] = new
|
||||
return true, nil
|
||||
}
|
19
vendor/github.com/alecthomas/units/COPYING
generated
vendored
Normal file
19
vendor/github.com/alecthomas/units/COPYING
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
Copyright (C) 2014 Alec Thomas
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
11
vendor/github.com/alecthomas/units/README.md
generated
vendored
Normal file
11
vendor/github.com/alecthomas/units/README.md
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
# Units - Helpful unit multipliers and functions for Go
|
||||
|
||||
The goal of this package is to have functionality similar to the [time](http://golang.org/pkg/time/) package.
|
||||
|
||||
It allows for code like this:
|
||||
|
||||
```go
|
||||
n, err := ParseBase2Bytes("1KB")
|
||||
// n == 1024
|
||||
n = units.Mebibyte * 512
|
||||
```
|
83
vendor/github.com/alecthomas/units/bytes.go
generated
vendored
Normal file
83
vendor/github.com/alecthomas/units/bytes.go
generated
vendored
Normal file
@ -0,0 +1,83 @@
|
||||
package units
|
||||
|
||||
// Base2Bytes is the old non-SI power-of-2 byte scale (1024 bytes in a kilobyte,
|
||||
// etc.).
|
||||
type Base2Bytes int64
|
||||
|
||||
// Base-2 byte units.
|
||||
const (
|
||||
Kibibyte Base2Bytes = 1024
|
||||
KiB = Kibibyte
|
||||
Mebibyte = Kibibyte * 1024
|
||||
MiB = Mebibyte
|
||||
Gibibyte = Mebibyte * 1024
|
||||
GiB = Gibibyte
|
||||
Tebibyte = Gibibyte * 1024
|
||||
TiB = Tebibyte
|
||||
Pebibyte = Tebibyte * 1024
|
||||
PiB = Pebibyte
|
||||
Exbibyte = Pebibyte * 1024
|
||||
EiB = Exbibyte
|
||||
)
|
||||
|
||||
var (
|
||||
bytesUnitMap = MakeUnitMap("iB", "B", 1024)
|
||||
oldBytesUnitMap = MakeUnitMap("B", "B", 1024)
|
||||
)
|
||||
|
||||
// ParseBase2Bytes supports both iB and B in base-2 multipliers. That is, KB
|
||||
// and KiB are both 1024.
|
||||
func ParseBase2Bytes(s string) (Base2Bytes, error) {
|
||||
n, err := ParseUnit(s, bytesUnitMap)
|
||||
if err != nil {
|
||||
n, err = ParseUnit(s, oldBytesUnitMap)
|
||||
}
|
||||
return Base2Bytes(n), err
|
||||
}
|
||||
|
||||
func (b Base2Bytes) String() string {
|
||||
return ToString(int64(b), 1024, "iB", "B")
|
||||
}
|
||||
|
||||
var (
|
||||
metricBytesUnitMap = MakeUnitMap("B", "B", 1000)
|
||||
)
|
||||
|
||||
// MetricBytes are SI byte units (1000 bytes in a kilobyte).
|
||||
type MetricBytes SI
|
||||
|
||||
// SI base-10 byte units.
|
||||
const (
|
||||
Kilobyte MetricBytes = 1000
|
||||
KB = Kilobyte
|
||||
Megabyte = Kilobyte * 1000
|
||||
MB = Megabyte
|
||||
Gigabyte = Megabyte * 1000
|
||||
GB = Gigabyte
|
||||
Terabyte = Gigabyte * 1000
|
||||
TB = Terabyte
|
||||
Petabyte = Terabyte * 1000
|
||||
PB = Petabyte
|
||||
Exabyte = Petabyte * 1000
|
||||
EB = Exabyte
|
||||
)
|
||||
|
||||
// ParseMetricBytes parses base-10 metric byte units. That is, KB is 1000 bytes.
|
||||
func ParseMetricBytes(s string) (MetricBytes, error) {
|
||||
n, err := ParseUnit(s, metricBytesUnitMap)
|
||||
return MetricBytes(n), err
|
||||
}
|
||||
|
||||
func (m MetricBytes) String() string {
|
||||
return ToString(int64(m), 1000, "B", "B")
|
||||
}
|
||||
|
||||
// ParseStrictBytes supports both iB and B suffixes for base 2 and metric,
|
||||
// respectively. That is, KiB represents 1024 and KB represents 1000.
|
||||
func ParseStrictBytes(s string) (int64, error) {
|
||||
n, err := ParseUnit(s, bytesUnitMap)
|
||||
if err != nil {
|
||||
n, err = ParseUnit(s, metricBytesUnitMap)
|
||||
}
|
||||
return int64(n), err
|
||||
}
|
13
vendor/github.com/alecthomas/units/doc.go
generated
vendored
Normal file
13
vendor/github.com/alecthomas/units/doc.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
// Package units provides helpful unit multipliers and functions for Go.
|
||||
//
|
||||
// The goal of this package is to have functionality similar to the time [1] package.
|
||||
//
|
||||
//
|
||||
// [1] http://golang.org/pkg/time/
|
||||
//
|
||||
// It allows for code like this:
|
||||
//
|
||||
// n, err := ParseBase2Bytes("1KB")
|
||||
// // n == 1024
|
||||
// n = units.Mebibyte * 512
|
||||
package units
|
26
vendor/github.com/alecthomas/units/si.go
generated
vendored
Normal file
26
vendor/github.com/alecthomas/units/si.go
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
package units
|
||||
|
||||
// SI units.
|
||||
type SI int64
|
||||
|
||||
// SI unit multiples.
|
||||
const (
|
||||
Kilo SI = 1000
|
||||
Mega = Kilo * 1000
|
||||
Giga = Mega * 1000
|
||||
Tera = Giga * 1000
|
||||
Peta = Tera * 1000
|
||||
Exa = Peta * 1000
|
||||
)
|
||||
|
||||
func MakeUnitMap(suffix, shortSuffix string, scale int64) map[string]float64 {
|
||||
return map[string]float64{
|
||||
shortSuffix: 1,
|
||||
"K" + suffix: float64(scale),
|
||||
"M" + suffix: float64(scale * scale),
|
||||
"G" + suffix: float64(scale * scale * scale),
|
||||
"T" + suffix: float64(scale * scale * scale * scale),
|
||||
"P" + suffix: float64(scale * scale * scale * scale * scale),
|
||||
"E" + suffix: float64(scale * scale * scale * scale * scale * scale),
|
||||
}
|
||||
}
|
138
vendor/github.com/alecthomas/units/util.go
generated
vendored
Normal file
138
vendor/github.com/alecthomas/units/util.go
generated
vendored
Normal file
@ -0,0 +1,138 @@
|
||||
package units
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
siUnits = []string{"", "K", "M", "G", "T", "P", "E"}
|
||||
)
|
||||
|
||||
func ToString(n int64, scale int64, suffix, baseSuffix string) string {
|
||||
mn := len(siUnits)
|
||||
out := make([]string, mn)
|
||||
for i, m := range siUnits {
|
||||
if n%scale != 0 || i == 0 && n == 0 {
|
||||
s := suffix
|
||||
if i == 0 {
|
||||
s = baseSuffix
|
||||
}
|
||||
out[mn-1-i] = fmt.Sprintf("%d%s%s", n%scale, m, s)
|
||||
}
|
||||
n /= scale
|
||||
if n == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return strings.Join(out, "")
|
||||
}
|
||||
|
||||
// Below code ripped straight from http://golang.org/src/pkg/time/format.go?s=33392:33438#L1123
|
||||
var errLeadingInt = errors.New("units: bad [0-9]*") // never printed
|
||||
|
||||
// leadingInt consumes the leading [0-9]* from s.
|
||||
func leadingInt(s string) (x int64, rem string, err error) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if c < '0' || c > '9' {
|
||||
break
|
||||
}
|
||||
if x >= (1<<63-10)/10 {
|
||||
// overflow
|
||||
return 0, "", errLeadingInt
|
||||
}
|
||||
x = x*10 + int64(c) - '0'
|
||||
}
|
||||
return x, s[i:], nil
|
||||
}
|
||||
|
||||
func ParseUnit(s string, unitMap map[string]float64) (int64, error) {
|
||||
// [-+]?([0-9]*(\.[0-9]*)?[a-z]+)+
|
||||
orig := s
|
||||
f := float64(0)
|
||||
neg := false
|
||||
|
||||
// Consume [-+]?
|
||||
if s != "" {
|
||||
c := s[0]
|
||||
if c == '-' || c == '+' {
|
||||
neg = c == '-'
|
||||
s = s[1:]
|
||||
}
|
||||
}
|
||||
// Special case: if all that is left is "0", this is zero.
|
||||
if s == "0" {
|
||||
return 0, nil
|
||||
}
|
||||
if s == "" {
|
||||
return 0, errors.New("units: invalid " + orig)
|
||||
}
|
||||
for s != "" {
|
||||
g := float64(0) // this element of the sequence
|
||||
|
||||
var x int64
|
||||
var err error
|
||||
|
||||
// The next character must be [0-9.]
|
||||
if !(s[0] == '.' || ('0' <= s[0] && s[0] <= '9')) {
|
||||
return 0, errors.New("units: invalid " + orig)
|
||||
}
|
||||
// Consume [0-9]*
|
||||
pl := len(s)
|
||||
x, s, err = leadingInt(s)
|
||||
if err != nil {
|
||||
return 0, errors.New("units: invalid " + orig)
|
||||
}
|
||||
g = float64(x)
|
||||
pre := pl != len(s) // whether we consumed anything before a period
|
||||
|
||||
// Consume (\.[0-9]*)?
|
||||
post := false
|
||||
if s != "" && s[0] == '.' {
|
||||
s = s[1:]
|
||||
pl := len(s)
|
||||
x, s, err = leadingInt(s)
|
||||
if err != nil {
|
||||
return 0, errors.New("units: invalid " + orig)
|
||||
}
|
||||
scale := 1.0
|
||||
for n := pl - len(s); n > 0; n-- {
|
||||
scale *= 10
|
||||
}
|
||||
g += float64(x) / scale
|
||||
post = pl != len(s)
|
||||
}
|
||||
if !pre && !post {
|
||||
// no digits (e.g. ".s" or "-.s")
|
||||
return 0, errors.New("units: invalid " + orig)
|
||||
}
|
||||
|
||||
// Consume unit.
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if c == '.' || ('0' <= c && c <= '9') {
|
||||
break
|
||||
}
|
||||
}
|
||||
u := s[:i]
|
||||
s = s[i:]
|
||||
unit, ok := unitMap[u]
|
||||
if !ok {
|
||||
return 0, errors.New("units: unknown unit " + u + " in " + orig)
|
||||
}
|
||||
|
||||
f += g * unit
|
||||
}
|
||||
|
||||
if neg {
|
||||
f = -f
|
||||
}
|
||||
if f < float64(-1<<63) || f > float64(1<<63-1) {
|
||||
return 0, errors.New("units: overflow parsing unit")
|
||||
}
|
||||
return int64(f), nil
|
||||
}
|
191
vendor/github.com/barakmich/glog/LICENSE
generated
vendored
Normal file
191
vendor/github.com/barakmich/glog/LICENSE
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction, and
|
||||
distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by the copyright
|
||||
owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all other entities
|
||||
that control, are controlled by, or are under common control with that entity.
|
||||
For the purposes of this definition, "control" means (i) the power, direct or
|
||||
indirect, to cause the direction or management of such entity, whether by
|
||||
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity exercising
|
||||
permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications, including
|
||||
but not limited to software source code, documentation source, and configuration
|
||||
files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical transformation or
|
||||
translation of a Source form, including but not limited to compiled object code,
|
||||
generated documentation, and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or Object form, made
|
||||
available under the License, as indicated by a copyright notice that is included
|
||||
in or attached to the work (an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object form, that
|
||||
is based on (or derived from) the Work and for which the editorial revisions,
|
||||
annotations, elaborations, or other modifications represent, as a whole, an
|
||||
original work of authorship. For the purposes of this License, Derivative Works
|
||||
shall not include works that remain separable from, or merely link (or bind by
|
||||
name) to the interfaces of, the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including the original version
|
||||
of the Work and any modifications or additions to that Work or Derivative Works
|
||||
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
||||
by the copyright owner or by an individual or Legal Entity authorized to submit
|
||||
on behalf of the copyright owner. For the purposes of this definition,
|
||||
"submitted" means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems, and
|
||||
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
||||
the purpose of discussing and improving the Work, but excluding communication
|
||||
that is conspicuously marked or otherwise designated in writing by the copyright
|
||||
owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
|
||||
of whom a Contribution has been received by Licensor and subsequently
|
||||
incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the Work and such
|
||||
Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable (except as stated in this section) patent license to make, have
|
||||
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
||||
such license applies only to those patent claims licensable by such Contributor
|
||||
that are necessarily infringed by their Contribution(s) alone or by combination
|
||||
of their Contribution(s) with the Work to which such Contribution(s) was
|
||||
submitted. If You institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
||||
Contribution incorporated within the Work constitutes direct or contributory
|
||||
patent infringement, then any patent licenses granted to You under this License
|
||||
for that Work shall terminate as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution.
|
||||
|
||||
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
||||
in any medium, with or without modifications, and in Source or Object form,
|
||||
provided that You meet the following conditions:
|
||||
|
||||
You must give any other recipients of the Work or Derivative Works a copy of
|
||||
this License; and
|
||||
You must cause any modified files to carry prominent notices stating that You
|
||||
changed the files; and
|
||||
You must retain, in the Source form of any Derivative Works that You distribute,
|
||||
all copyright, patent, trademark, and attribution notices from the Source form
|
||||
of the Work, excluding those notices that do not pertain to any part of the
|
||||
Derivative Works; and
|
||||
If the Work includes a "NOTICE" text file as part of its distribution, then any
|
||||
Derivative Works that You distribute must include a readable copy of the
|
||||
attribution notices contained within such NOTICE file, excluding those notices
|
||||
that do not pertain to any part of the Derivative Works, in at least one of the
|
||||
following places: within a NOTICE text file distributed as part of the
|
||||
Derivative Works; within the Source form or documentation, if provided along
|
||||
with the Derivative Works; or, within a display generated by the Derivative
|
||||
Works, if and wherever such third-party notices normally appear. The contents of
|
||||
the NOTICE file are for informational purposes only and do not modify the
|
||||
License. You may add Your own attribution notices within Derivative Works that
|
||||
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
||||
provided that such additional attribution notices cannot be construed as
|
||||
modifying the License.
|
||||
You may add Your own copyright statement to Your modifications and may provide
|
||||
additional or different license terms and conditions for use, reproduction, or
|
||||
distribution of Your modifications, or for any such Derivative Works as a whole,
|
||||
provided Your use, reproduction, and distribution of the Work otherwise complies
|
||||
with the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions.
|
||||
|
||||
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
||||
for inclusion in the Work by You to the Licensor shall be under the terms and
|
||||
conditions of this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
||||
any separate license agreement you may have executed with Licensor regarding
|
||||
such Contributions.
|
||||
|
||||
6. Trademarks.
|
||||
|
||||
This License does not grant permission to use the trade names, trademarks,
|
||||
service marks, or product names of the Licensor, except as required for
|
||||
reasonable and customary use in describing the origin of the Work and
|
||||
reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty.
|
||||
|
||||
Unless required by applicable law or agreed to in writing, Licensor provides the
|
||||
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||
including, without limitation, any warranties or conditions of TITLE,
|
||||
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
||||
solely responsible for determining the appropriateness of using or
|
||||
redistributing the Work and assume any risks associated with Your exercise of
|
||||
permissions under this License.
|
||||
|
||||
8. Limitation of Liability.
|
||||
|
||||
In no event and under no legal theory, whether in tort (including negligence),
|
||||
contract, or otherwise, unless required by applicable law (such as deliberate
|
||||
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special, incidental,
|
||||
or consequential damages of any character arising as a result of this License or
|
||||
out of the use or inability to use the Work (including but not limited to
|
||||
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
||||
any and all other commercial damages or losses), even if such Contributor has
|
||||
been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability.
|
||||
|
||||
While redistributing the Work or Derivative Works thereof, You may choose to
|
||||
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
||||
other liability obligations and/or rights consistent with this License. However,
|
||||
in accepting such obligations, You may act only on Your own behalf and on Your
|
||||
sole responsibility, not on behalf of any other Contributor, and only if You
|
||||
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason of your
|
||||
accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work
|
||||
|
||||
To apply the Apache License to your work, attach the following boilerplate
|
||||
notice, with the fields enclosed by brackets "[]" replaced with your own
|
||||
identifying information. (Don't include the brackets!) The text should be
|
||||
enclosed in the appropriate comment syntax for the file format. We also
|
||||
recommend that a file or class name and description of purpose be included on
|
||||
the same "printed page" as the copyright notice for easier identification within
|
||||
third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
44
vendor/github.com/barakmich/glog/README
generated
vendored
Normal file
44
vendor/github.com/barakmich/glog/README
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
glog
|
||||
====
|
||||
|
||||
Leveled execution logs for Go.
|
||||
|
||||
This is an efficient pure Go implementation of leveled logs in the
|
||||
manner of the open source C++ package
|
||||
http://code.google.com/p/google-glog
|
||||
|
||||
By binding methods to booleans it is possible to use the log package
|
||||
without paying the expense of evaluating the arguments to the log.
|
||||
Through the -vmodule flag, the package also provides fine-grained
|
||||
control over logging at the file level.
|
||||
|
||||
The comment from glog.go introduces the ideas:
|
||||
|
||||
Package glog implements logging analogous to the Google-internal
|
||||
C++ INFO/ERROR/V setup. It provides functions Info, Warning,
|
||||
Error, Fatal, plus formatting variants such as Infof. It
|
||||
also provides V-style logging controlled by the -v and
|
||||
-vmodule=file=2 flags.
|
||||
|
||||
Basic examples:
|
||||
|
||||
glog.Info("Prepare to repel boarders")
|
||||
|
||||
glog.Fatalf("Initialization failed: %s", err)
|
||||
|
||||
See the documentation for the V function for an explanation
|
||||
of these examples:
|
||||
|
||||
if glog.V(2) {
|
||||
glog.Info("Starting transaction...")
|
||||
}
|
||||
|
||||
glog.V(2).Infoln("Processed", nItems, "elements")
|
||||
|
||||
|
||||
The repository contains an open source version of the log package
|
||||
used inside Google. The master copy of the source lives inside
|
||||
Google, not here. The code in this repo is for export only and is not itself
|
||||
under development. Feature requests will be ignored.
|
||||
|
||||
Send bug reports to golang-nuts@googlegroups.com.
|
1118
vendor/github.com/barakmich/glog/glog.go
generated
vendored
Normal file
1118
vendor/github.com/barakmich/glog/glog.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
124
vendor/github.com/barakmich/glog/glog_file.go
generated
vendored
Normal file
124
vendor/github.com/barakmich/glog/glog_file.go
generated
vendored
Normal file
@ -0,0 +1,124 @@
|
||||
// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
|
||||
//
|
||||
// Copyright 2013 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// File I/O for logs.
|
||||
|
||||
package glog
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// MaxSize is the maximum size of a log file in bytes.
|
||||
var MaxSize uint64 = 1024 * 1024 * 1800
|
||||
|
||||
// logDirs lists the candidate directories for new log files.
|
||||
var logDirs []string
|
||||
|
||||
// If non-empty, overrides the choice of directory in which to write logs.
|
||||
// See createLogDirs for the full list of possible destinations.
|
||||
var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory")
|
||||
|
||||
func createLogDirs() {
|
||||
if *logDir != "" {
|
||||
logDirs = append(logDirs, *logDir)
|
||||
}
|
||||
logDirs = append(logDirs, os.TempDir())
|
||||
}
|
||||
|
||||
var (
|
||||
pid = os.Getpid()
|
||||
program = filepath.Base(os.Args[0])
|
||||
host = "unknownhost"
|
||||
userName = "unknownuser"
|
||||
)
|
||||
|
||||
func init() {
|
||||
h, err := os.Hostname()
|
||||
if err == nil {
|
||||
host = shortHostname(h)
|
||||
}
|
||||
|
||||
current, err := user.Current()
|
||||
if err == nil {
|
||||
userName = current.Username
|
||||
}
|
||||
|
||||
// Sanitize userName since it may contain filepath separators on Windows.
|
||||
userName = strings.Replace(userName, `\`, "_", -1)
|
||||
}
|
||||
|
||||
// shortHostname returns its argument, truncating at the first period.
|
||||
// For instance, given "www.google.com" it returns "www".
|
||||
func shortHostname(hostname string) string {
|
||||
if i := strings.Index(hostname, "."); i >= 0 {
|
||||
return hostname[:i]
|
||||
}
|
||||
return hostname
|
||||
}
|
||||
|
||||
// logName returns a new log file name containing tag, with start time t, and
|
||||
// the name for the symlink for tag.
|
||||
func logName(tag string, t time.Time) (name, link string) {
|
||||
name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d",
|
||||
program,
|
||||
host,
|
||||
userName,
|
||||
tag,
|
||||
t.Year(),
|
||||
t.Month(),
|
||||
t.Day(),
|
||||
t.Hour(),
|
||||
t.Minute(),
|
||||
t.Second(),
|
||||
pid)
|
||||
return name, program + "." + tag
|
||||
}
|
||||
|
||||
var onceLogDirs sync.Once
|
||||
|
||||
// create creates a new log file and returns the file and its filename, which
|
||||
// contains tag ("INFO", "FATAL", etc.) and t. If the file is created
|
||||
// successfully, create also attempts to update the symlink for that tag, ignoring
|
||||
// errors.
|
||||
func create(tag string, t time.Time) (f *os.File, filename string, err error) {
|
||||
onceLogDirs.Do(createLogDirs)
|
||||
if len(logDirs) == 0 {
|
||||
return nil, "", errors.New("log: no log dirs")
|
||||
}
|
||||
name, link := logName(tag, t)
|
||||
var lastErr error
|
||||
for _, dir := range logDirs {
|
||||
fname := filepath.Join(dir, name)
|
||||
f, err := os.Create(fname)
|
||||
if err == nil {
|
||||
symlink := filepath.Join(dir, link)
|
||||
os.Remove(symlink) // ignore err
|
||||
os.Symlink(fname, symlink) // ignore err
|
||||
return f, fname, nil
|
||||
}
|
||||
lastErr = err
|
||||
}
|
||||
return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
|
||||
}
|
71
vendor/github.com/barakmich/glog/glog_logstash.go
generated
vendored
Normal file
71
vendor/github.com/barakmich/glog/glog_logstash.go
generated
vendored
Normal file
@ -0,0 +1,71 @@
|
||||
package glog
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type logstashMessage struct {
|
||||
Type string `json:"type"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// handleLogstashMessages sends logs to logstash.
|
||||
func (l *loggingT) handleLogstashMessages() {
|
||||
var conn net.Conn
|
||||
ticker := time.Tick(1 * time.Second)
|
||||
for {
|
||||
select {
|
||||
case _ = <-l.logstashStop:
|
||||
conn.Close()
|
||||
return
|
||||
case _ = <-ticker:
|
||||
var err error
|
||||
if conn == nil {
|
||||
fmt.Fprintln(os.Stderr, "Trying to connect to logstash server...")
|
||||
conn, err = net.Dial("tcp", l.logstashURL)
|
||||
if err != nil {
|
||||
conn = nil
|
||||
} else {
|
||||
fmt.Fprintln(os.Stderr, "Connected to logstash server.")
|
||||
}
|
||||
}
|
||||
case data := <-l.logstashChan:
|
||||
lm := logstashMessage{}
|
||||
lm.Type = l.logstashType
|
||||
lm.Message = strings.TrimSpace(data)
|
||||
packet, err := json.Marshal(lm)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "Failed to marshal logstashMessage.")
|
||||
continue
|
||||
}
|
||||
if conn != nil {
|
||||
_, err := fmt.Fprintln(conn, string(packet))
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "Not connected to logstash server, attempting reconnect.")
|
||||
conn = nil
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
// There is no connection, so the log line is dropped.
|
||||
// Might be nice to add a buffer here so that we can ship
|
||||
// logs after the connection is up.
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// StartLogstash creates the logstash channel and kicks off handleLogstashMessages.
|
||||
func (l *loggingT) startLogstash() {
|
||||
l.logstashChan = make(chan string, 100)
|
||||
go l.handleLogstashMessages()
|
||||
}
|
||||
|
||||
// StopLogstash signals handleLogstashMessages to exit.
|
||||
func (l *loggingT) StopLogstash() {
|
||||
l.logstashStop <- true
|
||||
}
|
3
vendor/github.com/boltdb/bolt/.gitignore
generated
vendored
Normal file
3
vendor/github.com/boltdb/bolt/.gitignore
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
*.prof
|
||||
*.test
|
||||
/bin/
|
20
vendor/github.com/boltdb/bolt/LICENSE
generated
vendored
Normal file
20
vendor/github.com/boltdb/bolt/LICENSE
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 Ben Johnson
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
54
vendor/github.com/boltdb/bolt/Makefile
generated
vendored
Normal file
54
vendor/github.com/boltdb/bolt/Makefile
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
TEST=.
|
||||
BENCH=.
|
||||
COVERPROFILE=/tmp/c.out
|
||||
BRANCH=`git rev-parse --abbrev-ref HEAD`
|
||||
COMMIT=`git rev-parse --short HEAD`
|
||||
GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
|
||||
|
||||
default: build
|
||||
|
||||
bench:
|
||||
go test -v -test.run=NOTHINCONTAINSTHIS -test.bench=$(BENCH)
|
||||
|
||||
# http://cloc.sourceforge.net/
|
||||
cloc:
|
||||
@cloc --not-match-f='Makefile|_test.go' .
|
||||
|
||||
cover: fmt
|
||||
go test -coverprofile=$(COVERPROFILE) -test.run=$(TEST) $(COVERFLAG) .
|
||||
go tool cover -html=$(COVERPROFILE)
|
||||
rm $(COVERPROFILE)
|
||||
|
||||
cpuprofile: fmt
|
||||
@go test -c
|
||||
@./bolt.test -test.v -test.run=$(TEST) -test.cpuprofile cpu.prof
|
||||
|
||||
# go get github.com/kisielk/errcheck
|
||||
errcheck:
|
||||
@echo "=== errcheck ==="
|
||||
@errcheck github.com/boltdb/bolt
|
||||
|
||||
fmt:
|
||||
@go fmt ./...
|
||||
|
||||
get:
|
||||
@go get -d ./...
|
||||
|
||||
build: get
|
||||
@mkdir -p bin
|
||||
@go build -ldflags=$(GOLDFLAGS) -a -o bin/bolt ./cmd/bolt
|
||||
|
||||
test: fmt
|
||||
@go get github.com/stretchr/testify/assert
|
||||
@echo "=== TESTS ==="
|
||||
@go test -v -cover -test.run=$(TEST)
|
||||
@echo ""
|
||||
@echo ""
|
||||
@echo "=== CLI ==="
|
||||
@go test -v -test.run=$(TEST) ./cmd/bolt
|
||||
@echo ""
|
||||
@echo ""
|
||||
@echo "=== RACE DETECTOR ==="
|
||||
@go test -v -race -test.run="TestSimulate_(100op|1000op)"
|
||||
|
||||
.PHONY: bench cloc cover cpuprofile fmt memprofile test
|
619
vendor/github.com/boltdb/bolt/README.md
generated
vendored
Normal file
619
vendor/github.com/boltdb/bolt/README.md
generated
vendored
Normal file
@ -0,0 +1,619 @@
|
||||
Bolt [![Build Status](https://drone.io/github.com/boltdb/bolt/status.png)](https://drone.io/github.com/boltdb/bolt/latest) [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.png?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.png)](https://godoc.org/github.com/boltdb/bolt) ![Version](http://img.shields.io/badge/version-1.0-green.png)
|
||||
====
|
||||
|
||||
Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] and
|
||||
the [LMDB project][lmdb]. The goal of the project is to provide a simple,
|
||||
fast, and reliable database for projects that don't require a full database
|
||||
server such as Postgres or MySQL.
|
||||
|
||||
Since Bolt is meant to be used as such a low-level piece of functionality,
|
||||
simplicity is key. The API will be small and only focus on getting values
|
||||
and setting values. That's it.
|
||||
|
||||
[hyc_symas]: https://twitter.com/hyc_symas
|
||||
[lmdb]: http://symas.com/mdb/
|
||||
|
||||
|
||||
## Project Status
|
||||
|
||||
Bolt is stable and the API is fixed. Full unit test coverage and randomized
|
||||
black box testing are used to ensure database consistency and thread safety.
|
||||
Bolt is currently in high-load production environments serving databases as
|
||||
large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed
|
||||
services every day.
|
||||
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Installing
|
||||
|
||||
To start using Bolt, install Go and run `go get`:
|
||||
|
||||
```sh
|
||||
$ go get github.com/boltdb/bolt/...
|
||||
```
|
||||
|
||||
This will retrieve the library and install the `bolt` command line utility into
|
||||
your `$GOBIN` path.
|
||||
|
||||
|
||||
### Opening a database
|
||||
|
||||
The top-level object in Bolt is a `DB`. It is represented as a single file on
|
||||
your disk and represents a consistent snapshot of your data.
|
||||
|
||||
To open your database, simply use the `bolt.Open()` function:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Open the my.db data file in your current directory.
|
||||
// It will be created if it doesn't exist.
|
||||
db, err := bolt.Open("my.db", 0600, nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
Please note that Bolt obtains a file lock on the data file so multiple processes
|
||||
cannot open the same database at the same time. Opening an already open Bolt
|
||||
database will cause it to hang until the other process closes it. To prevent
|
||||
an indefinite wait you can pass a timeout option to the `Open()` function:
|
||||
|
||||
```go
|
||||
db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second})
|
||||
```
|
||||
|
||||
|
||||
### Transactions
|
||||
|
||||
Bolt allows only one read-write transaction at a time but allows as many
|
||||
read-only transactions as you want at a time. Each transaction has a consistent
|
||||
view of the data as it existed when the transaction started.
|
||||
|
||||
Individual transactions and all objects created from them (e.g. buckets, keys)
|
||||
are not thread safe. To work with data in multiple goroutines you must start
|
||||
a transaction for each one or use locking to ensure only one goroutine accesses
|
||||
a transaction at a time. Creating transaction from the `DB` is thread safe.
|
||||
|
||||
Read-only transactions and read-write transactions should not depend on one
|
||||
another and generally shouldn't be opened simultaneously in the same goroutine.
|
||||
This can cause a deadlock as the read-write transaction needs to periodically
|
||||
re-map the data file but it cannot do so while a read-only transaction is open.
|
||||
|
||||
|
||||
#### Read-write transactions
|
||||
|
||||
To start a read-write transaction, you can use the `DB.Update()` function:
|
||||
|
||||
```go
|
||||
err := db.Update(func(tx *bolt.Tx) error {
|
||||
...
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
Inside the closure, you have a consistent view of the database. You commit the
|
||||
transaction by returning `nil` at the end. You can also rollback the transaction
|
||||
at any point by returning an error. All database operations are allowed inside
|
||||
a read-write transaction.
|
||||
|
||||
Always check the return error as it will report any disk failures that can cause
|
||||
your transaction to not complete. If you return an error within your closure
|
||||
it will be passed through.
|
||||
|
||||
|
||||
#### Read-only transactions
|
||||
|
||||
To start a read-only transaction, you can use the `DB.View()` function:
|
||||
|
||||
```go
|
||||
err := db.View(func(tx *bolt.Tx) error {
|
||||
...
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
You also get a consistent view of the database within this closure, however,
|
||||
no mutating operations are allowed within a read-only transaction. You can only
|
||||
retrieve buckets, retrieve values, and copy the database within a read-only
|
||||
transaction.
|
||||
|
||||
|
||||
#### Batch read-write transactions
|
||||
|
||||
Each `DB.Update()` waits for disk to commit the writes. This overhead
|
||||
can be minimized by combining multiple updates with the `DB.Batch()`
|
||||
function:
|
||||
|
||||
```go
|
||||
err := db.Batch(func(tx *bolt.Tx) error {
|
||||
...
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
Concurrent Batch calls are opportunistically combined into larger
|
||||
transactions. Batch is only useful when there are multiple goroutines
|
||||
calling it.
|
||||
|
||||
The trade-off is that `Batch` can call the given
|
||||
function multiple times, if parts of the transaction fail. The
|
||||
function must be idempotent and side effects must take effect only
|
||||
after a successful return from `DB.Batch()`.
|
||||
|
||||
For example: don't display messages from inside the function, instead
|
||||
set variables in the enclosing scope:
|
||||
|
||||
```go
|
||||
var id uint64
|
||||
err := db.Batch(func(tx *bolt.Tx) error {
|
||||
// Find last key in bucket, decode as bigendian uint64, increment
|
||||
// by one, encode back to []byte, and add new key.
|
||||
...
|
||||
id = newValue
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return ...
|
||||
}
|
||||
fmt.Println("Allocated ID %d", id)
|
||||
```
|
||||
|
||||
|
||||
#### Managing transactions manually
|
||||
|
||||
The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()`
|
||||
function. These helper functions will start the transaction, execute a function,
|
||||
and then safely close your transaction if an error is returned. This is the
|
||||
recommended way to use Bolt transactions.
|
||||
|
||||
However, sometimes you may want to manually start and end your transactions.
|
||||
You can use the `Tx.Begin()` function directly but _please_ be sure to close the
|
||||
transaction.
|
||||
|
||||
```go
|
||||
// Start a writable transaction.
|
||||
tx, err := db.Begin(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
// Use the transaction...
|
||||
_, err := tx.CreateBucket([]byte("MyBucket"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Commit the transaction and check for error.
|
||||
if err := tx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
The first argument to `DB.Begin()` is a boolean stating if the transaction
|
||||
should be writable.
|
||||
|
||||
|
||||
### Using buckets
|
||||
|
||||
Buckets are collections of key/value pairs within the database. All keys in a
|
||||
bucket must be unique. You can create a bucket using the `DB.CreateBucket()`
|
||||
function:
|
||||
|
||||
```go
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("MyBucket"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("create bucket: %s", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
You can also create a bucket only if it doesn't exist by using the
|
||||
`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this
|
||||
function for all your top-level buckets after you open your database so you can
|
||||
guarantee that they exist for future transactions.
|
||||
|
||||
To delete a bucket, simply call the `Tx.DeleteBucket()` function.
|
||||
|
||||
|
||||
### Using key/value pairs
|
||||
|
||||
To save a key/value pair to a bucket, use the `Bucket.Put()` function:
|
||||
|
||||
```go
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
err := b.Put([]byte("answer"), []byte("42"))
|
||||
return err
|
||||
})
|
||||
```
|
||||
|
||||
This will set the value of the `"answer"` key to `"42"` in the `MyBucket`
|
||||
bucket. To retrieve this value, we can use the `Bucket.Get()` function:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
v := b.Get([]byte("answer"))
|
||||
fmt.Printf("The answer is: %s\n", v)
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
The `Get()` function does not return an error because its operation is
|
||||
guarenteed to work (unless there is some kind of system failure). If the key
|
||||
exists then it will return its byte slice value. If it doesn't exist then it
|
||||
will return `nil`. It's important to note that you can have a zero-length value
|
||||
set to a key which is different than the key not existing.
|
||||
|
||||
Use the `Bucket.Delete()` function to delete a key from the bucket.
|
||||
|
||||
Please note that values returned from `Get()` are only valid while the
|
||||
transaction is open. If you need to use a value outside of the transaction
|
||||
then you must use `copy()` to copy it to another byte slice.
|
||||
|
||||
|
||||
### Iterating over keys
|
||||
|
||||
Bolt stores its keys in byte-sorted order within a bucket. This makes sequential
|
||||
iteration over these keys extremely fast. To iterate over keys we'll use a
|
||||
`Cursor`:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
c := b.Cursor()
|
||||
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
fmt.Printf("key=%s, value=%s\n", k, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
The cursor allows you to move to a specific point in the list of keys and move
|
||||
forward or backward through the keys one at a time.
|
||||
|
||||
The following functions are available on the cursor:
|
||||
|
||||
```
|
||||
First() Move to the first key.
|
||||
Last() Move to the last key.
|
||||
Seek() Move to a specific key.
|
||||
Next() Move to the next key.
|
||||
Prev() Move to the previous key.
|
||||
```
|
||||
|
||||
When you have iterated to the end of the cursor then `Next()` will return `nil`.
|
||||
You must seek to a position using `First()`, `Last()`, or `Seek()` before
|
||||
calling `Next()` or `Prev()`. If you do not seek to a position then these
|
||||
functions will return `nil`.
|
||||
|
||||
|
||||
#### Prefix scans
|
||||
|
||||
To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
c := tx.Bucket([]byte("MyBucket")).Cursor()
|
||||
|
||||
prefix := []byte("1234")
|
||||
for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() {
|
||||
fmt.Printf("key=%s, value=%s\n", k, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
#### Range scans
|
||||
|
||||
Another common use case is scanning over a range such as a time range. If you
|
||||
use a sortable time encoding such as RFC3339 then you can query a specific
|
||||
date range like this:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
// Assume our events bucket has RFC3339 encoded time keys.
|
||||
c := tx.Bucket([]byte("Events")).Cursor()
|
||||
|
||||
// Our time range spans the 90's decade.
|
||||
min := []byte("1990-01-01T00:00:00Z")
|
||||
max := []byte("2000-01-01T00:00:00Z")
|
||||
|
||||
// Iterate over the 90's.
|
||||
for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() {
|
||||
fmt.Printf("%s: %s\n", k, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
|
||||
#### ForEach()
|
||||
|
||||
You can also use the function `ForEach()` if you know you'll be iterating over
|
||||
all the keys in a bucket:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
b.ForEach(func(k, v []byte) error {
|
||||
fmt.Printf("key=%s, value=%s\n", k, v)
|
||||
return nil
|
||||
})
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
|
||||
### Nested buckets
|
||||
|
||||
You can also store a bucket in a key to create nested buckets. The API is the
|
||||
same as the bucket management API on the `DB` object:
|
||||
|
||||
```go
|
||||
func (*Bucket) CreateBucket(key []byte) (*Bucket, error)
|
||||
func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error)
|
||||
func (*Bucket) DeleteBucket(key []byte) error
|
||||
```
|
||||
|
||||
|
||||
### Database backups
|
||||
|
||||
Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()`
|
||||
function to write a consistent view of the database to a writer. If you call
|
||||
this from a read-only transaction, it will perform a hot backup and not block
|
||||
your other database reads and writes. It will also use `O_DIRECT` when available
|
||||
to prevent page cache trashing.
|
||||
|
||||
One common use case is to backup over HTTP so you can use tools like `cURL` to
|
||||
do database backups:
|
||||
|
||||
```go
|
||||
func BackupHandleFunc(w http.ResponseWriter, req *http.Request) {
|
||||
err := db.View(func(tx *bolt.Tx) error {
|
||||
w.Header().Set("Content-Type", "application/octet-stream")
|
||||
w.Header().Set("Content-Disposition", `attachment; filename="my.db"`)
|
||||
w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size())))
|
||||
_, err := tx.WriteTo(w)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Then you can backup using this command:
|
||||
|
||||
```sh
|
||||
$ curl http://localhost/backup > my.db
|
||||
```
|
||||
|
||||
Or you can open your browser to `http://localhost/backup` and it will download
|
||||
automatically.
|
||||
|
||||
If you want to backup to another file you can use the `Tx.CopyFile()` helper
|
||||
function.
|
||||
|
||||
|
||||
### Statistics
|
||||
|
||||
The database keeps a running count of many of the internal operations it
|
||||
performs so you can better understand what's going on. By grabbing a snapshot
|
||||
of these stats at two points in time we can see what operations were performed
|
||||
in that time range.
|
||||
|
||||
For example, we could start a goroutine to log stats every 10 seconds:
|
||||
|
||||
```go
|
||||
go func() {
|
||||
// Grab the initial stats.
|
||||
prev := db.Stats()
|
||||
|
||||
for {
|
||||
// Wait for 10s.
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
// Grab the current stats and diff them.
|
||||
stats := db.Stats()
|
||||
diff := stats.Sub(&prev)
|
||||
|
||||
// Encode stats to JSON and print to STDERR.
|
||||
json.NewEncoder(os.Stderr).Encode(diff)
|
||||
|
||||
// Save stats for the next loop.
|
||||
prev = stats
|
||||
}
|
||||
}()
|
||||
```
|
||||
|
||||
It's also useful to pipe these stats to a service such as statsd for monitoring
|
||||
or to provide an HTTP endpoint that will perform a fixed-length sample.
|
||||
|
||||
|
||||
### Read-Only Mode
|
||||
|
||||
Sometimes it is useful to create a shared, read-only Bolt database. To this,
|
||||
set the `Options.ReadOnly` flag when opening your database. Read-only mode
|
||||
uses a shared lock to allow multiple processes to read from the database but
|
||||
it will block any processes from opening the database in read-write mode.
|
||||
|
||||
```go
|
||||
db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Resources
|
||||
|
||||
For more information on getting started with Bolt, check out the following articles:
|
||||
|
||||
* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch).
|
||||
* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville
|
||||
|
||||
|
||||
## Comparison with other databases
|
||||
|
||||
### Postgres, MySQL, & other relational databases
|
||||
|
||||
Relational databases structure data into rows and are only accessible through
|
||||
the use of SQL. This approach provides flexibility in how you store and query
|
||||
your data but also incurs overhead in parsing and planning SQL statements. Bolt
|
||||
accesses all data by a byte slice key. This makes Bolt fast to read and write
|
||||
data by key but provides no built-in support for joining values together.
|
||||
|
||||
Most relational databases (with the exception of SQLite) are standalone servers
|
||||
that run separately from your application. This gives your systems
|
||||
flexibility to connect multiple application servers to a single database
|
||||
server but also adds overhead in serializing and transporting data over the
|
||||
network. Bolt runs as a library included in your application so all data access
|
||||
has to go through your application's process. This brings data closer to your
|
||||
application but limits multi-process access to the data.
|
||||
|
||||
|
||||
### LevelDB, RocksDB
|
||||
|
||||
LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that
|
||||
they are libraries bundled into the application, however, their underlying
|
||||
structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes
|
||||
random writes by using a write ahead log and multi-tiered, sorted files called
|
||||
SSTables. Bolt uses a B+tree internally and only a single file. Both approaches
|
||||
have trade offs.
|
||||
|
||||
If you require a high random write throughput (>10,000 w/sec) or you need to use
|
||||
spinning disks then LevelDB could be a good choice. If your application is
|
||||
read-heavy or does a lot of range scans then Bolt could be a good choice.
|
||||
|
||||
One other important consideration is that LevelDB does not have transactions.
|
||||
It supports batch writing of key/values pairs and it supports read snapshots
|
||||
but it will not give you the ability to do a compare-and-swap operation safely.
|
||||
Bolt supports fully serializable ACID transactions.
|
||||
|
||||
|
||||
### LMDB
|
||||
|
||||
Bolt was originally a port of LMDB so it is architecturally similar. Both use
|
||||
a B+tree, have ACID semantics with fully serializable transactions, and support
|
||||
lock-free MVCC using a single writer and multiple readers.
|
||||
|
||||
The two projects have somewhat diverged. LMDB heavily focuses on raw performance
|
||||
while Bolt has focused on simplicity and ease of use. For example, LMDB allows
|
||||
several unsafe actions such as direct writes for the sake of performance. Bolt
|
||||
opts to disallow actions which can leave the database in a corrupted state. The
|
||||
only exception to this in Bolt is `DB.NoSync`.
|
||||
|
||||
There are also a few differences in API. LMDB requires a maximum mmap size when
|
||||
opening an `mdb_env` whereas Bolt will handle incremental mmap resizing
|
||||
automatically. LMDB overloads the getter and setter functions with multiple
|
||||
flags whereas Bolt splits these specialized cases into their own functions.
|
||||
|
||||
|
||||
## Caveats & Limitations
|
||||
|
||||
It's important to pick the right tool for the job and Bolt is no exception.
|
||||
Here are a few things to note when evaluating and using Bolt:
|
||||
|
||||
* Bolt is good for read intensive workloads. Sequential write performance is
|
||||
also fast but random writes can be slow. You can add a write-ahead log or
|
||||
[transaction coalescer](https://github.com/boltdb/coalescer) in front of Bolt
|
||||
to mitigate this issue.
|
||||
|
||||
* Bolt uses a B+tree internally so there can be a lot of random page access.
|
||||
SSDs provide a significant performance boost over spinning disks.
|
||||
|
||||
* Try to avoid long running read transactions. Bolt uses copy-on-write so
|
||||
old pages cannot be reclaimed while an old transaction is using them.
|
||||
|
||||
* Byte slices returned from Bolt are only valid during a transaction. Once the
|
||||
transaction has been committed or rolled back then the memory they point to
|
||||
can be reused by a new page or can be unmapped from virtual memory and you'll
|
||||
see an `unexpected fault address` panic when accessing it.
|
||||
|
||||
* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for
|
||||
buckets that have random inserts will cause your database to have very poor
|
||||
page utilization.
|
||||
|
||||
* Use larger buckets in general. Smaller buckets causes poor page utilization
|
||||
once they become larger than the page size (typically 4KB).
|
||||
|
||||
* Bulk loading a lot of random writes into a new bucket can be slow as the
|
||||
page will not split until the transaction is committed. Randomly inserting
|
||||
more than 100,000 key/value pairs into a single new bucket in a single
|
||||
transaction is not advised.
|
||||
|
||||
* Bolt uses a memory-mapped file so the underlying operating system handles the
|
||||
caching of the data. Typically, the OS will cache as much of the file as it
|
||||
can in memory and will release memory as needed to other processes. This means
|
||||
that Bolt can show very high memory usage when working with large databases.
|
||||
However, this is expected and the OS will release memory as needed. Bolt can
|
||||
handle databases much larger than the available physical RAM.
|
||||
|
||||
* The data structures in the Bolt database are memory mapped so the data file
|
||||
will be endian specific. This means that you cannot copy a Bolt file from a
|
||||
little endian machine to a big endian machine and have it work. For most
|
||||
users this is not a concern since most modern CPUs are little endian.
|
||||
|
||||
* Because of the way pages are laid out on disk, Bolt cannot truncate data files
|
||||
and return free pages back to the disk. Instead, Bolt maintains a free list
|
||||
of unused pages within its data file. These free pages can be reused by later
|
||||
transactions. This works well for many use cases as databases generally tend
|
||||
to grow. However, it's important to note that deleting large chunks of data
|
||||
will not allow you to reclaim that space on disk.
|
||||
|
||||
For more information on page allocation, [see this comment][page-allocation].
|
||||
|
||||
[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638
|
||||
|
||||
|
||||
## Other Projects Using Bolt
|
||||
|
||||
Below is a list of public, open source projects that use Bolt:
|
||||
|
||||
* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
|
||||
* [Bazil](https://github.com/bazillion/bazil) - A file system that lets your data reside where it is most convenient for it to reside.
|
||||
* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
|
||||
* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
|
||||
* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
|
||||
* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday.
|
||||
* [ChainStore](https://github.com/nulayer/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
|
||||
* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
|
||||
* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
|
||||
* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka.
|
||||
* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
|
||||
* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt.
|
||||
* [photosite/session](http://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
|
||||
* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
|
||||
* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
|
||||
* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
|
||||
* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
|
||||
* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
|
||||
* [SkyDB](https://github.com/skydb/sky) - Behavioral analytics database.
|
||||
* [Seaweed File System](https://github.com/chrislusf/weed-fs) - Highly scalable distributed key~file system with O(1) disk read.
|
||||
* [InfluxDB](http://influxdb.com) - Scalable datastore for metrics, events, and real-time analytics.
|
||||
* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
|
||||
* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
|
||||
* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
|
||||
|
||||
If you are using Bolt in a project please send a pull request to add it to the list.
|
135
vendor/github.com/boltdb/bolt/batch.go
generated
vendored
Normal file
135
vendor/github.com/boltdb/bolt/batch.go
generated
vendored
Normal file
@ -0,0 +1,135 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Batch calls fn as part of a batch. It behaves similar to Update,
|
||||
// except:
|
||||
//
|
||||
// 1. concurrent Batch calls can be combined into a single Bolt
|
||||
// transaction.
|
||||
//
|
||||
// 2. the function passed to Batch may be called multiple times,
|
||||
// regardless of whether it returns error or not.
|
||||
//
|
||||
// This means that Batch function side effects must be idempotent and
|
||||
// take permanent effect only after a successful return is seen in
|
||||
// caller.
|
||||
//
|
||||
// Batch is only useful when there are multiple goroutines calling it.
|
||||
func (db *DB) Batch(fn func(*Tx) error) error {
|
||||
errCh := make(chan error, 1)
|
||||
|
||||
db.batchMu.Lock()
|
||||
if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) {
|
||||
// There is no existing batch, or the existing batch is full; start a new one.
|
||||
db.batch = &batch{
|
||||
db: db,
|
||||
}
|
||||
db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger)
|
||||
}
|
||||
db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh})
|
||||
if len(db.batch.calls) >= db.MaxBatchSize {
|
||||
// wake up batch, it's ready to run
|
||||
go db.batch.trigger()
|
||||
}
|
||||
db.batchMu.Unlock()
|
||||
|
||||
err := <-errCh
|
||||
if err == trySolo {
|
||||
err = db.Update(fn)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type call struct {
|
||||
fn func(*Tx) error
|
||||
err chan<- error
|
||||
}
|
||||
|
||||
type batch struct {
|
||||
db *DB
|
||||
timer *time.Timer
|
||||
start sync.Once
|
||||
calls []call
|
||||
}
|
||||
|
||||
// trigger runs the batch if it hasn't already been run.
|
||||
func (b *batch) trigger() {
|
||||
b.start.Do(b.run)
|
||||
}
|
||||
|
||||
// run performs the transactions in the batch and communicates results
|
||||
// back to DB.Batch.
|
||||
func (b *batch) run() {
|
||||
b.db.batchMu.Lock()
|
||||
b.timer.Stop()
|
||||
// Make sure no new work is added to this batch, but don't break
|
||||
// other batches.
|
||||
if b.db.batch == b {
|
||||
b.db.batch = nil
|
||||
}
|
||||
b.db.batchMu.Unlock()
|
||||
|
||||
retry:
|
||||
for len(b.calls) > 0 {
|
||||
var failIdx = -1
|
||||
err := b.db.Update(func(tx *Tx) error {
|
||||
for i, c := range b.calls {
|
||||
if err := safelyCall(c.fn, tx); err != nil {
|
||||
failIdx = i
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if failIdx >= 0 {
|
||||
// take the failing transaction out of the batch. it's
|
||||
// safe to shorten b.calls here because db.batch no longer
|
||||
// points to us, and we hold the mutex anyway.
|
||||
c := b.calls[failIdx]
|
||||
b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1]
|
||||
// tell the submitter re-run it solo, continue with the rest of the batch
|
||||
c.err <- trySolo
|
||||
continue retry
|
||||
}
|
||||
|
||||
// pass success, or bolt internal errors, to all callers
|
||||
for _, c := range b.calls {
|
||||
if c.err != nil {
|
||||
c.err <- err
|
||||
}
|
||||
}
|
||||
break retry
|
||||
}
|
||||
}
|
||||
|
||||
// trySolo is a special sentinel error value used for signaling that a
|
||||
// transaction function should be re-run. It should never be seen by
|
||||
// callers.
|
||||
var trySolo = errors.New("batch function returned an error and should be re-run solo")
|
||||
|
||||
type panicked struct {
|
||||
reason interface{}
|
||||
}
|
||||
|
||||
func (p panicked) Error() string {
|
||||
if err, ok := p.reason.(error); ok {
|
||||
return err.Error()
|
||||
}
|
||||
return fmt.Sprintf("panic: %v", p.reason)
|
||||
}
|
||||
|
||||
func safelyCall(fn func(*Tx) error, tx *Tx) (err error) {
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
err = panicked{p}
|
||||
}
|
||||
}()
|
||||
return fn(tx)
|
||||
}
|
7
vendor/github.com/boltdb/bolt/bolt_386.go
generated
vendored
Normal file
7
vendor/github.com/boltdb/bolt/bolt_386.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
7
vendor/github.com/boltdb/bolt/bolt_amd64.go
generated
vendored
Normal file
7
vendor/github.com/boltdb/bolt/bolt_amd64.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
7
vendor/github.com/boltdb/bolt/bolt_arm.go
generated
vendored
Normal file
7
vendor/github.com/boltdb/bolt/bolt_arm.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
12
vendor/github.com/boltdb/bolt/bolt_linux.go
generated
vendored
Normal file
12
vendor/github.com/boltdb/bolt/bolt_linux.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var odirect = syscall.O_DIRECT
|
||||
|
||||
// fdatasync flushes written data to a file descriptor.
|
||||
func fdatasync(db *DB) error {
|
||||
return syscall.Fdatasync(int(db.file.Fd()))
|
||||
}
|
29
vendor/github.com/boltdb/bolt/bolt_openbsd.go
generated
vendored
Normal file
29
vendor/github.com/boltdb/bolt/bolt_openbsd.go
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
msAsync = 1 << iota // perform asynchronous writes
|
||||
msSync // perform synchronous writes
|
||||
msInvalidate // invalidate cached data
|
||||
)
|
||||
|
||||
var odirect int
|
||||
|
||||
func msync(db *DB) error {
|
||||
_, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate)
|
||||
if errno != 0 {
|
||||
return errno
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func fdatasync(db *DB) error {
|
||||
if db.data != nil {
|
||||
return msync(db)
|
||||
}
|
||||
return db.file.Sync()
|
||||
}
|
86
vendor/github.com/boltdb/bolt/bolt_unix.go
generated
vendored
Normal file
86
vendor/github.com/boltdb/bolt/bolt_unix.go
generated
vendored
Normal file
@ -0,0 +1,86 @@
|
||||
// +build !windows,!plan9
|
||||
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// flock acquires an advisory lock on a file descriptor.
|
||||
func flock(f *os.File, exclusive bool, timeout time.Duration) error {
|
||||
var t time.Time
|
||||
for {
|
||||
// If we're beyond our timeout then return an error.
|
||||
// This can only occur after we've attempted a flock once.
|
||||
if t.IsZero() {
|
||||
t = time.Now()
|
||||
} else if timeout > 0 && time.Since(t) > timeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
flag := syscall.LOCK_SH
|
||||
if exclusive {
|
||||
flag = syscall.LOCK_EX
|
||||
}
|
||||
|
||||
// Otherwise attempt to obtain an exclusive lock.
|
||||
err := syscall.Flock(int(f.Fd()), flag|syscall.LOCK_NB)
|
||||
if err == nil {
|
||||
return nil
|
||||
} else if err != syscall.EWOULDBLOCK {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for a bit and try again.
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
// funlock releases an advisory lock on a file descriptor.
|
||||
func funlock(f *os.File) error {
|
||||
return syscall.Flock(int(f.Fd()), syscall.LOCK_UN)
|
||||
}
|
||||
|
||||
// mmap memory maps a DB's data file.
|
||||
func mmap(db *DB, sz int) error {
|
||||
// Truncate and fsync to ensure file size metadata is flushed.
|
||||
// https://github.com/boltdb/bolt/issues/284
|
||||
if !db.NoGrowSync && !db.readOnly {
|
||||
if err := db.file.Truncate(int64(sz)); err != nil {
|
||||
return fmt.Errorf("file resize error: %s", err)
|
||||
}
|
||||
if err := db.file.Sync(); err != nil {
|
||||
return fmt.Errorf("file sync error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Map the data file to memory.
|
||||
b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Save the original byte slice and convert to a byte array pointer.
|
||||
db.dataref = b
|
||||
db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
|
||||
db.datasz = sz
|
||||
return nil
|
||||
}
|
||||
|
||||
// munmap unmaps a DB's data file from memory.
|
||||
func munmap(db *DB) error {
|
||||
// Ignore the unmap if we have no mapped data.
|
||||
if db.dataref == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmap using the original byte slice.
|
||||
err := syscall.Munmap(db.dataref)
|
||||
db.dataref = nil
|
||||
db.data = nil
|
||||
db.datasz = 0
|
||||
return err
|
||||
}
|
76
vendor/github.com/boltdb/bolt/bolt_windows.go
generated
vendored
Normal file
76
vendor/github.com/boltdb/bolt/bolt_windows.go
generated
vendored
Normal file
@ -0,0 +1,76 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var odirect int
|
||||
|
||||
// fdatasync flushes written data to a file descriptor.
|
||||
func fdatasync(db *DB) error {
|
||||
return db.file.Sync()
|
||||
}
|
||||
|
||||
// flock acquires an advisory lock on a file descriptor.
|
||||
func flock(f *os.File, _ bool, _ time.Duration) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// funlock releases an advisory lock on a file descriptor.
|
||||
func funlock(f *os.File) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// mmap memory maps a DB's data file.
|
||||
// Based on: https://github.com/edsrzf/mmap-go
|
||||
func mmap(db *DB, sz int) error {
|
||||
if !db.readOnly {
|
||||
// Truncate the database to the size of the mmap.
|
||||
if err := db.file.Truncate(int64(sz)); err != nil {
|
||||
return fmt.Errorf("truncate: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Open a file mapping handle.
|
||||
sizelo := uint32(sz >> 32)
|
||||
sizehi := uint32(sz) & 0xffffffff
|
||||
h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil)
|
||||
if h == 0 {
|
||||
return os.NewSyscallError("CreateFileMapping", errno)
|
||||
}
|
||||
|
||||
// Create the memory map.
|
||||
addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz))
|
||||
if addr == 0 {
|
||||
return os.NewSyscallError("MapViewOfFile", errno)
|
||||
}
|
||||
|
||||
// Close mapping handle.
|
||||
if err := syscall.CloseHandle(syscall.Handle(h)); err != nil {
|
||||
return os.NewSyscallError("CloseHandle", err)
|
||||
}
|
||||
|
||||
// Convert to a byte array.
|
||||
db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr)))
|
||||
db.datasz = sz
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// munmap unmaps a pointer from a file.
|
||||
// Based on: https://github.com/edsrzf/mmap-go
|
||||
func munmap(db *DB) error {
|
||||
if db.data == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
addr := (uintptr)(unsafe.Pointer(&db.data[0]))
|
||||
if err := syscall.UnmapViewOfFile(addr); err != nil {
|
||||
return os.NewSyscallError("UnmapViewOfFile", err)
|
||||
}
|
||||
return nil
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user