Merge branch 'master' of https://github.com/coreos/clair into erik-v2
This commit is contained in:
commit
bcc2929cc1
11
.travis.yml
11
.travis.yml
@ -1,14 +1,23 @@
|
|||||||
language: go
|
language: go
|
||||||
|
|
||||||
go:
|
go:
|
||||||
- 1.8
|
- "1.10"
|
||||||
|
|
||||||
sudo: required
|
sudo: required
|
||||||
|
|
||||||
|
env:
|
||||||
|
global:
|
||||||
|
- PATH=$HOME/.local/bin:$PATH
|
||||||
|
|
||||||
install:
|
install:
|
||||||
- curl https://glide.sh/get | sh
|
- curl https://glide.sh/get | sh
|
||||||
|
- mkdir -p $HOME/.local/bin
|
||||||
|
- curl -o $HOME/.local/bin/prototool -sSL https://github.com/uber/prototool/releases/download/v0.1.0/prototool-$(uname -s)-$(uname -m)
|
||||||
|
- chmod +x $HOME/.local/bin/prototool
|
||||||
|
|
||||||
script:
|
script:
|
||||||
|
- prototool format -d api/v3/clairpb/clair.proto
|
||||||
|
- prototool lint api/v3/clairpb/clair.proto
|
||||||
- go test $(glide novendor | grep -v contrib)
|
- go test $(glide novendor | grep -v contrib)
|
||||||
|
|
||||||
dist: trusty
|
dist: trusty
|
||||||
|
@ -12,7 +12,7 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
FROM golang:1.8-alpine
|
FROM golang:1.10-alpine
|
||||||
|
|
||||||
VOLUME /config
|
VOLUME /config
|
||||||
EXPOSE 6060 6061
|
EXPOSE 6060 6061
|
||||||
@ -20,7 +20,7 @@ EXPOSE 6060 6061
|
|||||||
ADD . /go/src/github.com/coreos/clair/
|
ADD . /go/src/github.com/coreos/clair/
|
||||||
WORKDIR /go/src/github.com/coreos/clair/
|
WORKDIR /go/src/github.com/coreos/clair/
|
||||||
|
|
||||||
RUN apk add --no-cache git bzr rpm xz dumb-init && \
|
RUN apk add --no-cache git rpm xz dumb-init && \
|
||||||
go install -v github.com/coreos/clair/cmd/clair && \
|
go install -v github.com/coreos/clair/cmd/clair && \
|
||||||
mv /go/bin/clair /clair && \
|
mv /go/bin/clair /clair && \
|
||||||
rm -rf /go /usr/local/go
|
rm -rf /go /usr/local/go
|
||||||
|
@ -10,6 +10,8 @@ This document tracks projects that integrate with Clair. [Join the community](ht
|
|||||||
|
|
||||||
[Yair](https://github.com/yfoelling/yair): a lightweight command-line for working with clair with many different outputs. Mainly designed for usage in a CI Job.
|
[Yair](https://github.com/yfoelling/yair): a lightweight command-line for working with clair with many different outputs. Mainly designed for usage in a CI Job.
|
||||||
|
|
||||||
|
[Paclair](https://github.com/yebinama/paclair): a Python3 CLI tool to interact with Clair (easily configurable to access private registries).
|
||||||
|
|
||||||
[Clairctl](https://github.com/jgsqware/clairctl): a lightweight command-line tool for working locally with Clair and generate HTML report.
|
[Clairctl](https://github.com/jgsqware/clairctl): a lightweight command-line tool for working locally with Clair and generate HTML report.
|
||||||
|
|
||||||
[Clair-SQS](https://github.com/zalando-incubator/clair-sqs): a container containing Clair and additional processes that integrate Clair with [Amazon SQS][sqs].
|
[Clair-SQS](https://github.com/zalando-incubator/clair-sqs): a container containing Clair and additional processes that integrate Clair with [Amazon SQS][sqs].
|
||||||
|
@ -44,7 +44,7 @@ A [PostgreSQL 9.4+] database instance is required for all instructions.
|
|||||||
#### Kubernetes (Helm)
|
#### Kubernetes (Helm)
|
||||||
|
|
||||||
If you don't have a local Kubernetes cluster already, check out [minikube].
|
If you don't have a local Kubernetes cluster already, check out [minikube].
|
||||||
This assumes you've already ran `helm init` and you have access to a currently running instance of Tiller.
|
This assumes you've already ran `helm init`, you have access to a currently running instance of Tiller and that you are running the latest version of helm.
|
||||||
|
|
||||||
[minikube]: https://github.com/kubernetes/minikube
|
[minikube]: https://github.com/kubernetes/minikube
|
||||||
|
|
||||||
@ -53,6 +53,7 @@ git clone https://github.com/coreos/clair
|
|||||||
cd clair/contrib/helm
|
cd clair/contrib/helm
|
||||||
cp clair/values.yaml ~/my_custom_values.yaml
|
cp clair/values.yaml ~/my_custom_values.yaml
|
||||||
vi ~/my_custom_values.yaml
|
vi ~/my_custom_values.yaml
|
||||||
|
helm dependency update clair
|
||||||
helm install clair -f ~/my_custom_values.yaml
|
helm install clair -f ~/my_custom_values.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -86,14 +87,12 @@ To build Clair, you need to latest stable version of [Go] and a working [Go envi
|
|||||||
In addition, Clair requires some additional binaries be installed on the system [$PATH] as runtime dependencies:
|
In addition, Clair requires some additional binaries be installed on the system [$PATH] as runtime dependencies:
|
||||||
|
|
||||||
* [git]
|
* [git]
|
||||||
* [bzr]
|
|
||||||
* [rpm]
|
* [rpm]
|
||||||
* [xz]
|
* [xz]
|
||||||
|
|
||||||
[Go]: https://github.com/golang/go/releases
|
[Go]: https://github.com/golang/go/releases
|
||||||
[Go environment]: https://golang.org/doc/code.html
|
[Go environment]: https://golang.org/doc/code.html
|
||||||
[git]: https://git-scm.com
|
[git]: https://git-scm.com
|
||||||
[bzr]: http://bazaar.canonical.com/en
|
|
||||||
[rpm]: http://www.rpm.org
|
[rpm]: http://www.rpm.org
|
||||||
[xz]: http://tukaani.org/xz
|
[xz]: http://tukaani.org/xz
|
||||||
[$PATH]: https://en.wikipedia.org/wiki/PATH_(variable)
|
[$PATH]: https://en.wikipedia.org/wiki/PATH_(variable)
|
||||||
|
@ -47,7 +47,7 @@ Thus, the project was named `Clair` after the French term which translates to *c
|
|||||||
## Contact
|
## Contact
|
||||||
|
|
||||||
- IRC: #[clair](irc://irc.freenode.org:6667/#clair) on freenode.org
|
- IRC: #[clair](irc://irc.freenode.org:6667/#clair) on freenode.org
|
||||||
- Bugs: [issues](https://github.com/coreos/etcd/issues)
|
- Bugs: [issues](https://github.com/coreos/clair/issues)
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
|
||||||
|
7
api/v3/clairpb/Dockerfile
Normal file
7
api/v3/clairpb/Dockerfile
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
FROM golang:alpine
|
||||||
|
|
||||||
|
RUN apk add --update --no-cache git bash protobuf-dev
|
||||||
|
|
||||||
|
RUN go get -u github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway
|
||||||
|
RUN go get -u github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger
|
||||||
|
RUN go get -u github.com/golang/protobuf/protoc-gen-go
|
@ -1,17 +0,0 @@
|
|||||||
all:
|
|
||||||
protoc -I/usr/local/include -I. \
|
|
||||||
-I${GOPATH}/src \
|
|
||||||
-I${GOPATH}/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis \
|
|
||||||
--go_out=plugins=grpc:. \
|
|
||||||
clair.proto
|
|
||||||
protoc -I/usr/local/include -I. \
|
|
||||||
-I${GOPATH}/src \
|
|
||||||
-I${GOPATH}/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis \
|
|
||||||
--grpc-gateway_out=logtostderr=true:. \
|
|
||||||
clair.proto
|
|
||||||
protoc -I/usr/local/include -I. \
|
|
||||||
-I${GOPATH}/src \
|
|
||||||
-I${GOPATH}/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis \
|
|
||||||
--swagger_out=logtostderr=true:. \
|
|
||||||
clair.proto
|
|
||||||
go generate .
|
|
File diff suppressed because it is too large
Load Diff
@ -1,6 +1,5 @@
|
|||||||
// Code generated by protoc-gen-grpc-gateway
|
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
|
||||||
// source: clair.proto
|
// source: api/v3/clairpb/clair.proto
|
||||||
// DO NOT EDIT!
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Package clairpb is a reverse proxy.
|
Package clairpb is a reverse proxy.
|
||||||
@ -29,19 +28,6 @@ var _ status.Status
|
|||||||
var _ = runtime.String
|
var _ = runtime.String
|
||||||
var _ = utilities.NewDoubleArray
|
var _ = utilities.NewDoubleArray
|
||||||
|
|
||||||
func request_AncestryService_PostAncestry_0(ctx context.Context, marshaler runtime.Marshaler, client AncestryServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
|
||||||
var protoReq PostAncestryRequest
|
|
||||||
var metadata runtime.ServerMetadata
|
|
||||||
|
|
||||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
|
|
||||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
msg, err := client.PostAncestry(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
|
||||||
return msg, metadata, err
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
filter_AncestryService_GetAncestry_0 = &utilities.DoubleArray{Encoding: map[string]int{"ancestry_name": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
|
filter_AncestryService_GetAncestry_0 = &utilities.DoubleArray{Encoding: map[string]int{"ancestry_name": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
|
||||||
)
|
)
|
||||||
@ -65,7 +51,7 @@ func request_AncestryService_GetAncestry_0(ctx context.Context, marshaler runtim
|
|||||||
protoReq.AncestryName, err = runtime.String(val)
|
protoReq.AncestryName, err = runtime.String(val)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, metadata, err
|
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "ancestry_name", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_AncestryService_GetAncestry_0); err != nil {
|
if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_AncestryService_GetAncestry_0); err != nil {
|
||||||
@ -77,6 +63,21 @@ func request_AncestryService_GetAncestry_0(ctx context.Context, marshaler runtim
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func request_AncestryService_PostAncestry_0(ctx context.Context, marshaler runtime.Marshaler, client AncestryServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||||
|
var protoReq PostAncestryRequest
|
||||||
|
var metadata runtime.ServerMetadata
|
||||||
|
|
||||||
|
if req.ContentLength > 0 {
|
||||||
|
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
|
||||||
|
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
msg, err := client.PostAncestry(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||||
|
return msg, metadata, err
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
filter_NotificationService_GetNotification_0 = &utilities.DoubleArray{Encoding: map[string]int{"name": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
|
filter_NotificationService_GetNotification_0 = &utilities.DoubleArray{Encoding: map[string]int{"name": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
|
||||||
)
|
)
|
||||||
@ -100,7 +101,7 @@ func request_NotificationService_GetNotification_0(ctx context.Context, marshale
|
|||||||
protoReq.Name, err = runtime.String(val)
|
protoReq.Name, err = runtime.String(val)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, metadata, err
|
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_NotificationService_GetNotification_0); err != nil {
|
if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_NotificationService_GetNotification_0); err != nil {
|
||||||
@ -131,7 +132,7 @@ func request_NotificationService_MarkNotificationAsRead_0(ctx context.Context, m
|
|||||||
protoReq.Name, err = runtime.String(val)
|
protoReq.Name, err = runtime.String(val)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, metadata, err
|
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
msg, err := client.MarkNotificationAsRead(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
msg, err := client.MarkNotificationAsRead(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||||
@ -167,39 +168,18 @@ func RegisterAncestryServiceHandlerFromEndpoint(ctx context.Context, mux *runtim
|
|||||||
// RegisterAncestryServiceHandler registers the http handlers for service AncestryService to "mux".
|
// RegisterAncestryServiceHandler registers the http handlers for service AncestryService to "mux".
|
||||||
// The handlers forward requests to the grpc endpoint over "conn".
|
// The handlers forward requests to the grpc endpoint over "conn".
|
||||||
func RegisterAncestryServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
|
func RegisterAncestryServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
|
||||||
client := NewAncestryServiceClient(conn)
|
return RegisterAncestryServiceHandlerClient(ctx, mux, NewAncestryServiceClient(conn))
|
||||||
|
}
|
||||||
|
|
||||||
mux.Handle("POST", pattern_AncestryService_PostAncestry_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
// RegisterAncestryServiceHandler registers the http handlers for service AncestryService to "mux".
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
// The handlers forward requests to the grpc endpoint over the given implementation of "AncestryServiceClient".
|
||||||
defer cancel()
|
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "AncestryServiceClient"
|
||||||
if cn, ok := w.(http.CloseNotifier); ok {
|
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
|
||||||
go func(done <-chan struct{}, closed <-chan bool) {
|
// "AncestryServiceClient" to call the correct interceptors.
|
||||||
select {
|
func RegisterAncestryServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client AncestryServiceClient) error {
|
||||||
case <-done:
|
|
||||||
case <-closed:
|
|
||||||
cancel()
|
|
||||||
}
|
|
||||||
}(ctx.Done(), cn.CloseNotify())
|
|
||||||
}
|
|
||||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
|
||||||
rctx, err := runtime.AnnotateContext(ctx, mux, req)
|
|
||||||
if err != nil {
|
|
||||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
resp, md, err := request_AncestryService_PostAncestry_0(rctx, inboundMarshaler, client, req, pathParams)
|
|
||||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
|
||||||
if err != nil {
|
|
||||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
forward_AncestryService_PostAncestry_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
|
||||||
|
|
||||||
})
|
|
||||||
|
|
||||||
mux.Handle("GET", pattern_AncestryService_GetAncestry_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
mux.Handle("GET", pattern_AncestryService_GetAncestry_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(req.Context())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
if cn, ok := w.(http.CloseNotifier); ok {
|
if cn, ok := w.(http.CloseNotifier); ok {
|
||||||
go func(done <-chan struct{}, closed <-chan bool) {
|
go func(done <-chan struct{}, closed <-chan bool) {
|
||||||
@ -227,19 +207,48 @@ func RegisterAncestryServiceHandler(ctx context.Context, mux *runtime.ServeMux,
|
|||||||
|
|
||||||
})
|
})
|
||||||
|
|
||||||
|
mux.Handle("POST", pattern_AncestryService_PostAncestry_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||||
|
ctx, cancel := context.WithCancel(req.Context())
|
||||||
|
defer cancel()
|
||||||
|
if cn, ok := w.(http.CloseNotifier); ok {
|
||||||
|
go func(done <-chan struct{}, closed <-chan bool) {
|
||||||
|
select {
|
||||||
|
case <-done:
|
||||||
|
case <-closed:
|
||||||
|
cancel()
|
||||||
|
}
|
||||||
|
}(ctx.Done(), cn.CloseNotify())
|
||||||
|
}
|
||||||
|
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||||
|
rctx, err := runtime.AnnotateContext(ctx, mux, req)
|
||||||
|
if err != nil {
|
||||||
|
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
resp, md, err := request_AncestryService_PostAncestry_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||||
|
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||||
|
if err != nil {
|
||||||
|
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
forward_AncestryService_PostAncestry_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||||
|
|
||||||
|
})
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
pattern_AncestryService_PostAncestry_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0}, []string{"ancestry"}, ""))
|
|
||||||
|
|
||||||
pattern_AncestryService_GetAncestry_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 1, 0, 4, 1, 5, 1}, []string{"ancestry", "ancestry_name"}, ""))
|
pattern_AncestryService_GetAncestry_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 1, 0, 4, 1, 5, 1}, []string{"ancestry", "ancestry_name"}, ""))
|
||||||
|
|
||||||
|
pattern_AncestryService_PostAncestry_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0}, []string{"ancestry"}, ""))
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
forward_AncestryService_PostAncestry_0 = runtime.ForwardResponseMessage
|
|
||||||
|
|
||||||
forward_AncestryService_GetAncestry_0 = runtime.ForwardResponseMessage
|
forward_AncestryService_GetAncestry_0 = runtime.ForwardResponseMessage
|
||||||
|
|
||||||
|
forward_AncestryService_PostAncestry_0 = runtime.ForwardResponseMessage
|
||||||
)
|
)
|
||||||
|
|
||||||
// RegisterNotificationServiceHandlerFromEndpoint is same as RegisterNotificationServiceHandler but
|
// RegisterNotificationServiceHandlerFromEndpoint is same as RegisterNotificationServiceHandler but
|
||||||
@ -270,10 +279,18 @@ func RegisterNotificationServiceHandlerFromEndpoint(ctx context.Context, mux *ru
|
|||||||
// RegisterNotificationServiceHandler registers the http handlers for service NotificationService to "mux".
|
// RegisterNotificationServiceHandler registers the http handlers for service NotificationService to "mux".
|
||||||
// The handlers forward requests to the grpc endpoint over "conn".
|
// The handlers forward requests to the grpc endpoint over "conn".
|
||||||
func RegisterNotificationServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
|
func RegisterNotificationServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
|
||||||
client := NewNotificationServiceClient(conn)
|
return RegisterNotificationServiceHandlerClient(ctx, mux, NewNotificationServiceClient(conn))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterNotificationServiceHandler registers the http handlers for service NotificationService to "mux".
|
||||||
|
// The handlers forward requests to the grpc endpoint over the given implementation of "NotificationServiceClient".
|
||||||
|
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "NotificationServiceClient"
|
||||||
|
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
|
||||||
|
// "NotificationServiceClient" to call the correct interceptors.
|
||||||
|
func RegisterNotificationServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client NotificationServiceClient) error {
|
||||||
|
|
||||||
mux.Handle("GET", pattern_NotificationService_GetNotification_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
mux.Handle("GET", pattern_NotificationService_GetNotification_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(req.Context())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
if cn, ok := w.(http.CloseNotifier); ok {
|
if cn, ok := w.(http.CloseNotifier); ok {
|
||||||
go func(done <-chan struct{}, closed <-chan bool) {
|
go func(done <-chan struct{}, closed <-chan bool) {
|
||||||
@ -302,7 +319,7 @@ func RegisterNotificationServiceHandler(ctx context.Context, mux *runtime.ServeM
|
|||||||
})
|
})
|
||||||
|
|
||||||
mux.Handle("DELETE", pattern_NotificationService_MarkNotificationAsRead_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
mux.Handle("DELETE", pattern_NotificationService_MarkNotificationAsRead_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(req.Context())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
if cn, ok := w.(http.CloseNotifier); ok {
|
if cn, ok := w.(http.CloseNotifier); ok {
|
||||||
go func(done <-chan struct{}, closed <-chan bool) {
|
go func(done <-chan struct{}, closed <-chan bool) {
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2017 clair authors
|
// Copyright 2018 clair authors
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
@ -13,150 +13,197 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
syntax = "proto3";
|
syntax = "proto3";
|
||||||
option go_package = "clairpb";
|
|
||||||
|
|
||||||
package clairpb;
|
|
||||||
import "google/api/annotations.proto";
|
|
||||||
import "google/protobuf/empty.proto";
|
|
||||||
import "google/protobuf/timestamp.proto";
|
import "google/protobuf/timestamp.proto";
|
||||||
|
|
||||||
|
import "google/api/annotations.proto";
|
||||||
|
|
||||||
|
package coreos.clair;
|
||||||
|
|
||||||
|
option go_package = "clairpb";
|
||||||
|
option java_package = "com.coreos.clair.pb";
|
||||||
|
|
||||||
message Vulnerability {
|
message Vulnerability {
|
||||||
|
// The name of the vulnerability.
|
||||||
string name = 1;
|
string name = 1;
|
||||||
|
// The name of the namespace in which the vulnerability was detected.
|
||||||
string namespace_name = 2;
|
string namespace_name = 2;
|
||||||
|
// A description of the vulnerability according to the source for the namespace.
|
||||||
string description = 3;
|
string description = 3;
|
||||||
|
// A link to the vulnerability according to the source for the namespace.
|
||||||
string link = 4;
|
string link = 4;
|
||||||
|
// How dangerous the vulnerability is.
|
||||||
string severity = 5;
|
string severity = 5;
|
||||||
|
// Namespace agnostic metadata about the vulnerability.
|
||||||
string metadata = 6;
|
string metadata = 6;
|
||||||
// fixed_by exists when vulnerability is under feature.
|
// The feature that fixes this vulnerability.
|
||||||
|
// This field only exists when a vulnerability is a part of a Feature.
|
||||||
string fixed_by = 7;
|
string fixed_by = 7;
|
||||||
// affected_versions exists when vulnerability is under notification.
|
// The Features that are affected by the vulnerability.
|
||||||
|
// This field only exists when a vulnerability is a part of a Notification.
|
||||||
repeated Feature affected_versions = 8;
|
repeated Feature affected_versions = 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ClairStatus {
|
message Feature {
|
||||||
// listers and detectors are processors implemented in this Clair and used to
|
// The name of the feature.
|
||||||
// scan ancestries
|
|
||||||
repeated string listers = 1;
|
|
||||||
repeated string detectors = 2;
|
|
||||||
google.protobuf.Timestamp last_update_time = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
message Feature{
|
|
||||||
string name = 1;
|
string name = 1;
|
||||||
|
// The name of the namespace in which the feature is detected.
|
||||||
string namespace_name = 2;
|
string namespace_name = 2;
|
||||||
|
// The specific version of this feature.
|
||||||
string version = 3;
|
string version = 3;
|
||||||
|
// The format used to parse version numbers for the feature.
|
||||||
string version_format = 4;
|
string version_format = 4;
|
||||||
|
// The list of vulnerabilities that affect the feature.
|
||||||
repeated Vulnerability vulnerabilities = 5;
|
repeated Vulnerability vulnerabilities = 5;
|
||||||
}
|
}
|
||||||
|
|
||||||
message Ancestry {
|
|
||||||
string name = 1;
|
|
||||||
repeated Feature features = 2;
|
|
||||||
repeated Layer layers = 3;
|
|
||||||
|
|
||||||
// scanned_listers and scanned_detectors are used to scan this ancestry, it
|
|
||||||
// may be different from listers and detectors in ClairStatus since the
|
|
||||||
// ancestry could be scanned by previous version of Clair.
|
|
||||||
repeated string scanned_listers = 4;
|
|
||||||
repeated string scanned_detectors = 5;
|
|
||||||
}
|
|
||||||
|
|
||||||
message Layer {
|
message Layer {
|
||||||
|
// The sha256 tarsum for the layer.
|
||||||
string hash = 1;
|
string hash = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message Notification {
|
service AncestryService {
|
||||||
string name = 1;
|
// The RPC used to read the results of scanning for a particular ancestry.
|
||||||
string created = 2;
|
rpc GetAncestry(GetAncestryRequest) returns (GetAncestryResponse) {
|
||||||
string notified = 3;
|
option (google.api.http) = { get: "/ancestry/{ancestry_name}" };
|
||||||
string deleted = 4;
|
|
||||||
PagedVulnerableAncestries old = 5;
|
|
||||||
PagedVulnerableAncestries new = 6;
|
|
||||||
}
|
|
||||||
|
|
||||||
message IndexedAncestryName {
|
|
||||||
// index is unique to name in all streams simultaneously streamed, increasing
|
|
||||||
// and larger than all indexes in previous page in same stream.
|
|
||||||
int32 index = 1;
|
|
||||||
string name = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message PagedVulnerableAncestries {
|
|
||||||
string current_page = 1;
|
|
||||||
// if next_page is empty, it signals the end of all pages.
|
|
||||||
string next_page = 2;
|
|
||||||
int32 limit = 3;
|
|
||||||
Vulnerability vulnerability = 4;
|
|
||||||
repeated IndexedAncestryName ancestries = 5;
|
|
||||||
}
|
|
||||||
|
|
||||||
message PostAncestryRequest {
|
|
||||||
message PostLayer {
|
|
||||||
string hash = 1;
|
|
||||||
string path = 2;
|
|
||||||
map<string, string> headers = 3;
|
|
||||||
}
|
}
|
||||||
string ancestry_name = 1;
|
// The RPC used to create a new scan of an ancestry.
|
||||||
string format = 2;
|
|
||||||
repeated PostLayer layers = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
message PostAncestryResponse {
|
|
||||||
ClairStatus status = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message GetAncestryRequest {
|
|
||||||
string ancestry_name = 1;
|
|
||||||
bool with_vulnerabilities = 2;
|
|
||||||
bool with_features = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
message GetAncestryResponse {
|
|
||||||
Ancestry ancestry = 1;
|
|
||||||
ClairStatus status = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message GetNotificationRequest {
|
|
||||||
// if the vulnerability_page is empty, it implies the first page.
|
|
||||||
string old_vulnerability_page = 1;
|
|
||||||
string new_vulnerability_page = 2;
|
|
||||||
int32 limit = 3;
|
|
||||||
string name = 4;
|
|
||||||
}
|
|
||||||
|
|
||||||
message GetNotificationResponse {
|
|
||||||
Notification notification = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message MarkNotificationAsReadRequest {
|
|
||||||
string name = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
service AncestryService{
|
|
||||||
rpc PostAncestry(PostAncestryRequest) returns (PostAncestryResponse) {
|
rpc PostAncestry(PostAncestryRequest) returns (PostAncestryResponse) {
|
||||||
option (google.api.http) = {
|
option (google.api.http) = {
|
||||||
post: "/ancestry"
|
post: "/ancestry"
|
||||||
body: "*"
|
body: "*"
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
rpc GetAncestry(GetAncestryRequest) returns (GetAncestryResponse) {
|
|
||||||
option (google.api.http) = {
|
|
||||||
get: "/ancestry/{ancestry_name}"
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
service NotificationService{
|
message ClairStatus {
|
||||||
|
// The configured list of feature listers used to scan an ancestry.
|
||||||
|
repeated string listers = 1;
|
||||||
|
// The configured list of namespace detectors used to scan an ancestry.
|
||||||
|
repeated string detectors = 2;
|
||||||
|
// The time at which the updater last ran.
|
||||||
|
google.protobuf.Timestamp last_update_time = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message GetAncestryRequest {
|
||||||
|
// The name of the desired ancestry.
|
||||||
|
string ancestry_name = 1;
|
||||||
|
// Whether to include vulnerabilities or not in the response.
|
||||||
|
bool with_vulnerabilities = 2;
|
||||||
|
// Whether to include features or not in the response.
|
||||||
|
bool with_features = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message GetAncestryResponse {
|
||||||
|
message Ancestry {
|
||||||
|
// The name of the desired ancestry.
|
||||||
|
string name = 1;
|
||||||
|
// The list of features present in the ancestry.
|
||||||
|
// This will only be provided if requested.
|
||||||
|
repeated Feature features = 2;
|
||||||
|
// The layers present in the ancestry.
|
||||||
|
repeated Layer layers = 3;
|
||||||
|
// The configured list of feature listers used to scan this ancestry.
|
||||||
|
repeated string scanned_listers = 4;
|
||||||
|
// The configured list of namespace detectors used to scan an ancestry.
|
||||||
|
repeated string scanned_detectors = 5;
|
||||||
|
}
|
||||||
|
// The ancestry requested.
|
||||||
|
Ancestry ancestry = 1;
|
||||||
|
// The status of Clair at the time of the request.
|
||||||
|
ClairStatus status = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message PostAncestryRequest {
|
||||||
|
message PostLayer {
|
||||||
|
// The hash of the layer.
|
||||||
|
string hash = 1;
|
||||||
|
// The location of the layer (URL or filepath).
|
||||||
|
string path = 2;
|
||||||
|
// Any HTTP Headers that need to be used if requesting a layer over HTTP(S).
|
||||||
|
map<string, string> headers = 3;
|
||||||
|
}
|
||||||
|
// The name of the ancestry being scanned.
|
||||||
|
// If scanning OCI images, this should be the hash of the manifest.
|
||||||
|
string ancestry_name = 1;
|
||||||
|
// The format of the image being uploaded.
|
||||||
|
string format = 2;
|
||||||
|
// The layers to be scanned for this particular ancestry.
|
||||||
|
repeated PostLayer layers = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message PostAncestryResponse {
|
||||||
|
// The status of Clair at the time of the request.
|
||||||
|
ClairStatus status = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
service NotificationService {
|
||||||
|
// The RPC used to get a particularly Notification.
|
||||||
rpc GetNotification(GetNotificationRequest) returns (GetNotificationResponse) {
|
rpc GetNotification(GetNotificationRequest) returns (GetNotificationResponse) {
|
||||||
option (google.api.http) = {
|
option (google.api.http) = { get: "/notifications/{name}" };
|
||||||
get: "/notifications/{name}"
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
// The RPC used to mark a Notification as read after it has been processed.
|
||||||
rpc MarkNotificationAsRead(MarkNotificationAsReadRequest) returns (google.protobuf.Empty) {
|
rpc MarkNotificationAsRead(MarkNotificationAsReadRequest) returns (MarkNotificationAsReadResponse) {
|
||||||
option (google.api.http) = {
|
option (google.api.http) = { delete: "/notifications/{name}" };
|
||||||
delete: "/notifications/{name}"
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message GetNotificationRequest {
|
||||||
|
// The current page of previous vulnerabilities for the ancestry.
|
||||||
|
// This will be empty when it is the first page.
|
||||||
|
string old_vulnerability_page = 1;
|
||||||
|
// The current page of vulnerabilities for the ancestry.
|
||||||
|
// This will be empty when it is the first page.
|
||||||
|
string new_vulnerability_page = 2;
|
||||||
|
// The requested maximum number of results per page.
|
||||||
|
int32 limit = 3;
|
||||||
|
// The name of the notification being requested.
|
||||||
|
string name = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message GetNotificationResponse {
|
||||||
|
message Notification {
|
||||||
|
// The name of the requested notification.
|
||||||
|
string name = 1;
|
||||||
|
// The time at which the notification was created.
|
||||||
|
string created = 2;
|
||||||
|
// The time at which the notification was last sent out.
|
||||||
|
string notified = 3;
|
||||||
|
// The time at which a notification has been deleted.
|
||||||
|
string deleted = 4;
|
||||||
|
// The previous vulnerability and a paginated view of the ancestries it affects.
|
||||||
|
PagedVulnerableAncestries old = 5;
|
||||||
|
// The newly updated vulnerability and a paginated view of the ancestries it affects.
|
||||||
|
PagedVulnerableAncestries new = 6;
|
||||||
|
}
|
||||||
|
// The notification as requested.
|
||||||
|
Notification notification = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message PagedVulnerableAncestries {
|
||||||
|
message IndexedAncestryName {
|
||||||
|
// The index is an ever increasing number associated with the particular ancestry.
|
||||||
|
// This is useful if you're processing notifications, and need to keep track of the progress of paginating the results.
|
||||||
|
int32 index = 1;
|
||||||
|
// The name of the ancestry.
|
||||||
|
string name = 2;
|
||||||
|
}
|
||||||
|
// The identifier for the current page.
|
||||||
|
string current_page = 1;
|
||||||
|
// The token used to request the next page.
|
||||||
|
// This will be empty when there are no more pages.
|
||||||
|
string next_page = 2;
|
||||||
|
// The requested maximum number of results per page.
|
||||||
|
int32 limit = 3;
|
||||||
|
// The vulnerability that affects a given set of ancestries.
|
||||||
|
Vulnerability vulnerability = 4;
|
||||||
|
// The ancestries affected by a vulnerability.
|
||||||
|
repeated IndexedAncestryName ancestries = 5;
|
||||||
|
}
|
||||||
|
|
||||||
|
message MarkNotificationAsReadRequest {
|
||||||
|
// The name of the Notification that has been processed.
|
||||||
|
string name = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message MarkNotificationAsReadResponse {}
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
{
|
{
|
||||||
"swagger": "2.0",
|
"swagger": "2.0",
|
||||||
"info": {
|
"info": {
|
||||||
"title": "clair.proto",
|
"title": "api/v3/clairpb/clair.proto",
|
||||||
"version": "version not set"
|
"version": "version not set"
|
||||||
},
|
},
|
||||||
"schemes": [
|
"schemes": [
|
||||||
@ -17,12 +17,13 @@
|
|||||||
"paths": {
|
"paths": {
|
||||||
"/ancestry": {
|
"/ancestry": {
|
||||||
"post": {
|
"post": {
|
||||||
|
"summary": "The RPC used to create a new scan of an ancestry.",
|
||||||
"operationId": "PostAncestry",
|
"operationId": "PostAncestry",
|
||||||
"responses": {
|
"responses": {
|
||||||
"200": {
|
"200": {
|
||||||
"description": "",
|
"description": "",
|
||||||
"schema": {
|
"schema": {
|
||||||
"$ref": "#/definitions/clairpbPostAncestryResponse"
|
"$ref": "#/definitions/clairPostAncestryResponse"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -32,7 +33,7 @@
|
|||||||
"in": "body",
|
"in": "body",
|
||||||
"required": true,
|
"required": true,
|
||||||
"schema": {
|
"schema": {
|
||||||
"$ref": "#/definitions/clairpbPostAncestryRequest"
|
"$ref": "#/definitions/clairPostAncestryRequest"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
@ -43,12 +44,13 @@
|
|||||||
},
|
},
|
||||||
"/ancestry/{ancestry_name}": {
|
"/ancestry/{ancestry_name}": {
|
||||||
"get": {
|
"get": {
|
||||||
|
"summary": "The RPC used to read the results of scanning for a particular ancestry.",
|
||||||
"operationId": "GetAncestry",
|
"operationId": "GetAncestry",
|
||||||
"responses": {
|
"responses": {
|
||||||
"200": {
|
"200": {
|
||||||
"description": "",
|
"description": "",
|
||||||
"schema": {
|
"schema": {
|
||||||
"$ref": "#/definitions/clairpbGetAncestryResponse"
|
"$ref": "#/definitions/clairGetAncestryResponse"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -61,6 +63,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "with_vulnerabilities",
|
"name": "with_vulnerabilities",
|
||||||
|
"description": "Whether to include vulnerabilities or not in the response.",
|
||||||
"in": "query",
|
"in": "query",
|
||||||
"required": false,
|
"required": false,
|
||||||
"type": "boolean",
|
"type": "boolean",
|
||||||
@ -68,6 +71,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "with_features",
|
"name": "with_features",
|
||||||
|
"description": "Whether to include features or not in the response.",
|
||||||
"in": "query",
|
"in": "query",
|
||||||
"required": false,
|
"required": false,
|
||||||
"type": "boolean",
|
"type": "boolean",
|
||||||
@ -81,12 +85,13 @@
|
|||||||
},
|
},
|
||||||
"/notifications/{name}": {
|
"/notifications/{name}": {
|
||||||
"get": {
|
"get": {
|
||||||
|
"summary": "The RPC used to get a particularly Notification.",
|
||||||
"operationId": "GetNotification",
|
"operationId": "GetNotification",
|
||||||
"responses": {
|
"responses": {
|
||||||
"200": {
|
"200": {
|
||||||
"description": "",
|
"description": "",
|
||||||
"schema": {
|
"schema": {
|
||||||
"$ref": "#/definitions/clairpbGetNotificationResponse"
|
"$ref": "#/definitions/clairGetNotificationResponse"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -99,19 +104,21 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "old_vulnerability_page",
|
"name": "old_vulnerability_page",
|
||||||
"description": "if the vulnerability_page is empty, it implies the first page.",
|
"description": "The current page of previous vulnerabilities for the ancestry.\nThis will be empty when it is the first page.",
|
||||||
"in": "query",
|
"in": "query",
|
||||||
"required": false,
|
"required": false,
|
||||||
"type": "string"
|
"type": "string"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "new_vulnerability_page",
|
"name": "new_vulnerability_page",
|
||||||
|
"description": "The current page of vulnerabilities for the ancestry.\nThis will be empty when it is the first page.",
|
||||||
"in": "query",
|
"in": "query",
|
||||||
"required": false,
|
"required": false,
|
||||||
"type": "string"
|
"type": "string"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "limit",
|
"name": "limit",
|
||||||
|
"description": "The requested maximum number of results per page.",
|
||||||
"in": "query",
|
"in": "query",
|
||||||
"required": false,
|
"required": false,
|
||||||
"type": "integer",
|
"type": "integer",
|
||||||
@ -123,12 +130,13 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"delete": {
|
"delete": {
|
||||||
|
"summary": "The RPC used to mark a Notification as read after it has been processed.",
|
||||||
"operationId": "MarkNotificationAsRead",
|
"operationId": "MarkNotificationAsRead",
|
||||||
"responses": {
|
"responses": {
|
||||||
"200": {
|
"200": {
|
||||||
"description": "",
|
"description": "",
|
||||||
"schema": {
|
"schema": {
|
||||||
"$ref": "#/definitions/protobufEmpty"
|
"$ref": "#/definitions/clairMarkNotificationAsReadResponse"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -147,57 +155,107 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"definitions": {
|
"definitions": {
|
||||||
"PostAncestryRequestPostLayer": {
|
"GetAncestryResponseAncestry": {
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"hash": {
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
"path": {
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
"headers": {
|
|
||||||
"type": "object",
|
|
||||||
"additionalProperties": {
|
|
||||||
"type": "string"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"clairpbAncestry": {
|
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"name": {
|
"name": {
|
||||||
"type": "string"
|
"type": "string",
|
||||||
|
"description": "The name of the desired ancestry."
|
||||||
},
|
},
|
||||||
"features": {
|
"features": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"items": {
|
"items": {
|
||||||
"$ref": "#/definitions/clairpbFeature"
|
"$ref": "#/definitions/clairFeature"
|
||||||
}
|
},
|
||||||
|
"description": "The list of features present in the ancestry.\nThis will only be provided if requested."
|
||||||
},
|
},
|
||||||
"layers": {
|
"layers": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"items": {
|
"items": {
|
||||||
"$ref": "#/definitions/clairpbLayer"
|
"$ref": "#/definitions/clairLayer"
|
||||||
}
|
},
|
||||||
|
"description": "The layers present in the ancestry."
|
||||||
},
|
},
|
||||||
"scanned_listers": {
|
"scanned_listers": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"items": {
|
"items": {
|
||||||
"type": "string"
|
"type": "string"
|
||||||
},
|
},
|
||||||
"description": "scanned_listers and scanned_detectors are used to scan this ancestry, it\nmay be different from listers and detectors in ClairStatus since the\nancestry could be scanned by previous version of Clair."
|
"description": "The configured list of feature listers used to scan this ancestry."
|
||||||
},
|
},
|
||||||
"scanned_detectors": {
|
"scanned_detectors": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"items": {
|
"items": {
|
||||||
"type": "string"
|
"type": "string"
|
||||||
}
|
},
|
||||||
|
"description": "The configured list of namespace detectors used to scan an ancestry."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"clairpbClairStatus": {
|
"GetNotificationResponseNotification": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"name": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The name of the requested notification."
|
||||||
|
},
|
||||||
|
"created": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The time at which the notification was created."
|
||||||
|
},
|
||||||
|
"notified": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The time at which the notification was last sent out."
|
||||||
|
},
|
||||||
|
"deleted": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The time at which a notification has been deleted."
|
||||||
|
},
|
||||||
|
"old": {
|
||||||
|
"$ref": "#/definitions/clairPagedVulnerableAncestries",
|
||||||
|
"description": "The previous vulnerability and a paginated view of the ancestries it affects."
|
||||||
|
},
|
||||||
|
"new": {
|
||||||
|
"$ref": "#/definitions/clairPagedVulnerableAncestries",
|
||||||
|
"description": "The newly updated vulnerability and a paginated view of the ancestries it affects."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"PagedVulnerableAncestriesIndexedAncestryName": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"index": {
|
||||||
|
"type": "integer",
|
||||||
|
"format": "int32",
|
||||||
|
"description": "The index is an ever increasing number associated with the particular ancestry.\nThis is useful if you're processing notifications, and need to keep track of the progress of paginating the results."
|
||||||
|
},
|
||||||
|
"name": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The name of the ancestry."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"PostAncestryRequestPostLayer": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"hash": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The hash of the layer."
|
||||||
|
},
|
||||||
|
"path": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The location of the layer (URL or filepath)."
|
||||||
|
},
|
||||||
|
"headers": {
|
||||||
|
"type": "object",
|
||||||
|
"additionalProperties": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"description": "Any HTTP Headers that need to be used if requesting a layer over HTTP(S)."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"clairClairStatus": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"listers": {
|
"listers": {
|
||||||
@ -205,194 +263,181 @@
|
|||||||
"items": {
|
"items": {
|
||||||
"type": "string"
|
"type": "string"
|
||||||
},
|
},
|
||||||
"title": "listers and detectors are processors implemented in this Clair and used to\nscan ancestries"
|
"description": "The configured list of feature listers used to scan an ancestry."
|
||||||
},
|
},
|
||||||
"detectors": {
|
"detectors": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"items": {
|
"items": {
|
||||||
"type": "string"
|
"type": "string"
|
||||||
}
|
},
|
||||||
|
"description": "The configured list of namespace detectors used to scan an ancestry."
|
||||||
},
|
},
|
||||||
"last_update_time": {
|
"last_update_time": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"format": "date-time"
|
"format": "date-time",
|
||||||
|
"description": "The time at which the updater last ran."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"clairpbFeature": {
|
"clairFeature": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"name": {
|
"name": {
|
||||||
"type": "string"
|
"type": "string",
|
||||||
|
"description": "The name of the feature."
|
||||||
},
|
},
|
||||||
"namespace_name": {
|
"namespace_name": {
|
||||||
"type": "string"
|
"type": "string",
|
||||||
|
"description": "The name of the namespace in which the feature is detected."
|
||||||
},
|
},
|
||||||
"version": {
|
"version": {
|
||||||
"type": "string"
|
"type": "string",
|
||||||
|
"description": "The specific version of this feature."
|
||||||
},
|
},
|
||||||
"version_format": {
|
"version_format": {
|
||||||
"type": "string"
|
"type": "string",
|
||||||
|
"description": "The format used to parse version numbers for the feature."
|
||||||
},
|
},
|
||||||
"vulnerabilities": {
|
"vulnerabilities": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"items": {
|
"items": {
|
||||||
"$ref": "#/definitions/clairpbVulnerability"
|
"$ref": "#/definitions/clairVulnerability"
|
||||||
}
|
},
|
||||||
|
"description": "The list of vulnerabilities that affect the feature."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"clairpbGetAncestryResponse": {
|
"clairGetAncestryResponse": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"ancestry": {
|
"ancestry": {
|
||||||
"$ref": "#/definitions/clairpbAncestry"
|
"$ref": "#/definitions/GetAncestryResponseAncestry",
|
||||||
|
"description": "The ancestry requested."
|
||||||
},
|
},
|
||||||
"status": {
|
"status": {
|
||||||
"$ref": "#/definitions/clairpbClairStatus"
|
"$ref": "#/definitions/clairClairStatus",
|
||||||
|
"description": "The status of Clair at the time of the request."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"clairpbGetNotificationResponse": {
|
"clairGetNotificationResponse": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"notification": {
|
"notification": {
|
||||||
"$ref": "#/definitions/clairpbNotification"
|
"$ref": "#/definitions/GetNotificationResponseNotification",
|
||||||
|
"description": "The notification as requested."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"clairpbIndexedAncestryName": {
|
"clairLayer": {
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"index": {
|
|
||||||
"type": "integer",
|
|
||||||
"format": "int32",
|
|
||||||
"description": "index is unique to name in all streams simultaneously streamed, increasing\nand larger than all indexes in previous page in same stream."
|
|
||||||
},
|
|
||||||
"name": {
|
|
||||||
"type": "string"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"clairpbLayer": {
|
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"hash": {
|
"hash": {
|
||||||
"type": "string"
|
"type": "string",
|
||||||
|
"description": "The sha256 tarsum for the layer."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"clairpbNotification": {
|
"clairMarkNotificationAsReadResponse": {
|
||||||
"type": "object",
|
"type": "object"
|
||||||
"properties": {
|
|
||||||
"name": {
|
|
||||||
"type": "string"
|
|
||||||
},
|
},
|
||||||
"created": {
|
"clairPagedVulnerableAncestries": {
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
"notified": {
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
"deleted": {
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
"old": {
|
|
||||||
"$ref": "#/definitions/clairpbPagedVulnerableAncestries"
|
|
||||||
},
|
|
||||||
"new": {
|
|
||||||
"$ref": "#/definitions/clairpbPagedVulnerableAncestries"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"clairpbPagedVulnerableAncestries": {
|
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"current_page": {
|
"current_page": {
|
||||||
"type": "string"
|
"type": "string",
|
||||||
|
"description": "The identifier for the current page."
|
||||||
},
|
},
|
||||||
"next_page": {
|
"next_page": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "if next_page is empty, it signals the end of all pages."
|
"description": "The token used to request the next page.\nThis will be empty when there are no more pages."
|
||||||
},
|
},
|
||||||
"limit": {
|
"limit": {
|
||||||
"type": "integer",
|
"type": "integer",
|
||||||
"format": "int32"
|
"format": "int32",
|
||||||
|
"description": "The requested maximum number of results per page."
|
||||||
},
|
},
|
||||||
"vulnerability": {
|
"vulnerability": {
|
||||||
"$ref": "#/definitions/clairpbVulnerability"
|
"$ref": "#/definitions/clairVulnerability",
|
||||||
|
"description": "The vulnerability that affects a given set of ancestries."
|
||||||
},
|
},
|
||||||
"ancestries": {
|
"ancestries": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"items": {
|
"items": {
|
||||||
"$ref": "#/definitions/clairpbIndexedAncestryName"
|
"$ref": "#/definitions/PagedVulnerableAncestriesIndexedAncestryName"
|
||||||
}
|
},
|
||||||
|
"description": "The ancestries affected by a vulnerability."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"clairpbPostAncestryRequest": {
|
"clairPostAncestryRequest": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"ancestry_name": {
|
"ancestry_name": {
|
||||||
"type": "string"
|
"type": "string",
|
||||||
|
"description": "The name of the ancestry being scanned.\nIf scanning OCI images, this should be the hash of the manifest."
|
||||||
},
|
},
|
||||||
"format": {
|
"format": {
|
||||||
"type": "string"
|
"type": "string",
|
||||||
|
"description": "The format of the image being uploaded."
|
||||||
},
|
},
|
||||||
"layers": {
|
"layers": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"items": {
|
"items": {
|
||||||
"$ref": "#/definitions/PostAncestryRequestPostLayer"
|
"$ref": "#/definitions/PostAncestryRequestPostLayer"
|
||||||
}
|
},
|
||||||
|
"description": "The layers to be scanned for this particular ancestry."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"clairpbPostAncestryResponse": {
|
"clairPostAncestryResponse": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"status": {
|
"status": {
|
||||||
"$ref": "#/definitions/clairpbClairStatus"
|
"$ref": "#/definitions/clairClairStatus",
|
||||||
|
"description": "The status of Clair at the time of the request."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"clairpbVulnerability": {
|
"clairVulnerability": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"name": {
|
"name": {
|
||||||
"type": "string"
|
"type": "string",
|
||||||
|
"description": "The name of the vulnerability."
|
||||||
},
|
},
|
||||||
"namespace_name": {
|
"namespace_name": {
|
||||||
"type": "string"
|
"type": "string",
|
||||||
|
"description": "The name of the namespace in which the vulnerability was detected."
|
||||||
},
|
},
|
||||||
"description": {
|
"description": {
|
||||||
"type": "string"
|
"type": "string",
|
||||||
|
"description": "A description of the vulnerability according to the source for the namespace."
|
||||||
},
|
},
|
||||||
"link": {
|
"link": {
|
||||||
"type": "string"
|
"type": "string",
|
||||||
|
"description": "A link to the vulnerability according to the source for the namespace."
|
||||||
},
|
},
|
||||||
"severity": {
|
"severity": {
|
||||||
"type": "string"
|
"type": "string",
|
||||||
|
"description": "How dangerous the vulnerability is."
|
||||||
},
|
},
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"type": "string"
|
"type": "string",
|
||||||
|
"description": "Namespace agnostic metadata about the vulnerability."
|
||||||
},
|
},
|
||||||
"fixed_by": {
|
"fixed_by": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "fixed_by exists when vulnerability is under feature."
|
"description": "The feature that fixes this vulnerability.\nThis field only exists when a vulnerability is a part of a Feature."
|
||||||
},
|
},
|
||||||
"affected_versions": {
|
"affected_versions": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"items": {
|
"items": {
|
||||||
"$ref": "#/definitions/clairpbFeature"
|
"$ref": "#/definitions/clairFeature"
|
||||||
},
|
},
|
||||||
"description": "affected_versions exists when vulnerability is under notification."
|
"description": "The Features that are affected by the vulnerability.\nThis field only exists when a vulnerability is a part of a Notification."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
|
||||||
"protobufEmpty": {
|
|
||||||
"type": "object",
|
|
||||||
"description": "service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.",
|
|
||||||
"title": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -48,7 +48,7 @@ func PagedVulnerableAncestriesFromDatabaseModel(dbVuln *database.PagedVulnerable
|
|||||||
}
|
}
|
||||||
|
|
||||||
for index, ancestryName := range dbVuln.Affected {
|
for index, ancestryName := range dbVuln.Affected {
|
||||||
indexedAncestry := IndexedAncestryName{
|
indexedAncestry := PagedVulnerableAncestries_IndexedAncestryName{
|
||||||
Name: ancestryName,
|
Name: ancestryName,
|
||||||
Index: int32(index),
|
Index: int32(index),
|
||||||
}
|
}
|
||||||
@ -60,9 +60,9 @@ func PagedVulnerableAncestriesFromDatabaseModel(dbVuln *database.PagedVulnerable
|
|||||||
|
|
||||||
// NotificationFromDatabaseModel converts database notification, old and new
|
// NotificationFromDatabaseModel converts database notification, old and new
|
||||||
// vulnerabilities' paged vulnerable ancestries to be api notification.
|
// vulnerabilities' paged vulnerable ancestries to be api notification.
|
||||||
func NotificationFromDatabaseModel(dbNotification database.VulnerabilityNotificationWithVulnerable) (*Notification, error) {
|
func NotificationFromDatabaseModel(dbNotification database.VulnerabilityNotificationWithVulnerable) (*GetNotificationResponse_Notification, error) {
|
||||||
var (
|
var (
|
||||||
noti Notification
|
noti GetNotificationResponse_Notification
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -123,8 +123,8 @@ func VulnerabilityWithFixedInFromDatabaseModel(dbVuln database.VulnerabilityWith
|
|||||||
}
|
}
|
||||||
|
|
||||||
// AncestryFromDatabaseModel converts database ancestry to api ancestry.
|
// AncestryFromDatabaseModel converts database ancestry to api ancestry.
|
||||||
func AncestryFromDatabaseModel(dbAncestry database.Ancestry) *Ancestry {
|
func AncestryFromDatabaseModel(dbAncestry database.Ancestry) *GetAncestryResponse_Ancestry {
|
||||||
ancestry := &Ancestry{
|
ancestry := &GetAncestryResponse_Ancestry{
|
||||||
Name: dbAncestry.Name,
|
Name: dbAncestry.Name,
|
||||||
}
|
}
|
||||||
for _, layer := range dbAncestry.Layers {
|
for _, layer := range dbAncestry.Layers {
|
||||||
|
28
api/v3/clairpb/generate-protobuf.sh
Executable file
28
api/v3/clairpb/generate-protobuf.sh
Executable file
@ -0,0 +1,28 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# Copyright 2018 clair authors
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
set -o errexit
|
||||||
|
set -o nounset
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
DOCKER_REPO_ROOT="$GOPATH/src/github.com/coreos/clair"
|
||||||
|
IMAGE=${IMAGE:-"quay.io/coreos/clair-gen-proto"}
|
||||||
|
|
||||||
|
docker run --rm -it \
|
||||||
|
-v "$DOCKER_REPO_ROOT":"$DOCKER_REPO_ROOT" \
|
||||||
|
-w "$DOCKER_REPO_ROOT" \
|
||||||
|
"$IMAGE" \
|
||||||
|
"./api/v3/clairpb/run_in_docker.sh"
|
3
api/v3/clairpb/prototool.yaml
Normal file
3
api/v3/clairpb/prototool.yaml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
protoc_version: 3.5.1
|
||||||
|
protoc_includes:
|
||||||
|
- ../../../vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis
|
39
api/v3/clairpb/run_in_docker.sh
Executable file
39
api/v3/clairpb/run_in_docker.sh
Executable file
@ -0,0 +1,39 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# Copyright 2018 clair authors
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
set -o errexit
|
||||||
|
set -o nounset
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
protoc -I/usr/include -I. \
|
||||||
|
-I"${GOPATH}/src" \
|
||||||
|
-I"${GOPATH}/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis" \
|
||||||
|
--go_out=plugins=grpc:. \
|
||||||
|
./api/v3/clairpb/clair.proto
|
||||||
|
|
||||||
|
protoc -I/usr/include -I. \
|
||||||
|
-I"${GOPATH}/src" \
|
||||||
|
-I"${GOPATH}/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis" \
|
||||||
|
--grpc-gateway_out=logtostderr=true:. \
|
||||||
|
./api/v3/clairpb/clair.proto
|
||||||
|
|
||||||
|
protoc -I/usr/include -I. \
|
||||||
|
-I"${GOPATH}/src" \
|
||||||
|
-I"${GOPATH}/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis" \
|
||||||
|
--swagger_out=logtostderr=true:. \
|
||||||
|
./api/v3/clairpb/clair.proto
|
||||||
|
|
||||||
|
go generate .
|
@ -18,7 +18,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/golang/protobuf/ptypes"
|
"github.com/golang/protobuf/ptypes"
|
||||||
google_protobuf1 "github.com/golang/protobuf/ptypes/empty"
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
@ -227,7 +226,7 @@ func (s *NotificationServer) GetNotification(ctx context.Context, req *pb.GetNot
|
|||||||
|
|
||||||
// MarkNotificationAsRead implements deleting a notification via the Clair gRPC
|
// MarkNotificationAsRead implements deleting a notification via the Clair gRPC
|
||||||
// service.
|
// service.
|
||||||
func (s *NotificationServer) MarkNotificationAsRead(ctx context.Context, req *pb.MarkNotificationAsReadRequest) (*google_protobuf1.Empty, error) {
|
func (s *NotificationServer) MarkNotificationAsRead(ctx context.Context, req *pb.MarkNotificationAsReadRequest) (*pb.MarkNotificationAsReadResponse, error) {
|
||||||
if req.GetName() == "" {
|
if req.GetName() == "" {
|
||||||
return nil, status.Error(codes.InvalidArgument, "notification name should not be empty")
|
return nil, status.Error(codes.InvalidArgument, "notification name should not be empty")
|
||||||
}
|
}
|
||||||
@ -249,5 +248,5 @@ func (s *NotificationServer) MarkNotificationAsRead(ctx context.Context, req *pb
|
|||||||
return nil, status.Error(codes.Internal, err.Error())
|
return nil, status.Error(codes.Internal, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
return &google_protobuf1.Empty{}, nil
|
return &pb.MarkNotificationAsReadResponse{}, nil
|
||||||
}
|
}
|
||||||
|
@ -180,7 +180,7 @@ func main() {
|
|||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
// Check for dependencies.
|
// Check for dependencies.
|
||||||
for _, bin := range []string{"git", "bzr", "rpm", "xz"} {
|
for _, bin := range []string{"git", "rpm", "xz"} {
|
||||||
_, err := exec.LookPath(bin)
|
_, err := exec.LookPath(bin)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithError(err).WithField("dependency", bin).Fatal("failed to find dependency")
|
log.WithError(err).WithField("dependency", bin).Fatal("failed to find dependency")
|
||||||
|
61
code-of-conduct.md
Normal file
61
code-of-conduct.md
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
## CoreOS Community Code of Conduct
|
||||||
|
|
||||||
|
### Contributor Code of Conduct
|
||||||
|
|
||||||
|
As contributors and maintainers of this project, and in the interest of
|
||||||
|
fostering an open and welcoming community, we pledge to respect all people who
|
||||||
|
contribute through reporting issues, posting feature requests, updating
|
||||||
|
documentation, submitting pull requests or patches, and other activities.
|
||||||
|
|
||||||
|
We are committed to making participation in this project a harassment-free
|
||||||
|
experience for everyone, regardless of level of experience, gender, gender
|
||||||
|
identity and expression, sexual orientation, disability, personal appearance,
|
||||||
|
body size, race, ethnicity, age, religion, or nationality.
|
||||||
|
|
||||||
|
Examples of unacceptable behavior by participants include:
|
||||||
|
|
||||||
|
* The use of sexualized language or imagery
|
||||||
|
* Personal attacks
|
||||||
|
* Trolling or insulting/derogatory comments
|
||||||
|
* Public or private harassment
|
||||||
|
* Publishing others' private information, such as physical or electronic addresses, without explicit permission
|
||||||
|
* Other unethical or unprofessional conduct.
|
||||||
|
|
||||||
|
Project maintainers have the right and responsibility to remove, edit, or
|
||||||
|
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||||
|
that are not aligned to this Code of Conduct. By adopting this Code of Conduct,
|
||||||
|
project maintainers commit themselves to fairly and consistently applying these
|
||||||
|
principles to every aspect of managing this project. Project maintainers who do
|
||||||
|
not follow or enforce the Code of Conduct may be permanently removed from the
|
||||||
|
project team.
|
||||||
|
|
||||||
|
This code of conduct applies both within project spaces and in public spaces
|
||||||
|
when an individual is representing the project or its community.
|
||||||
|
|
||||||
|
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||||
|
reported by contacting a project maintainer, Brandon Philips
|
||||||
|
<brandon.philips@coreos.com>, and/or Rithu John <rithu.john@coreos.com>.
|
||||||
|
|
||||||
|
This Code of Conduct is adapted from the Contributor Covenant
|
||||||
|
(http://contributor-covenant.org), version 1.2.0, available at
|
||||||
|
http://contributor-covenant.org/version/1/2/0/
|
||||||
|
|
||||||
|
### CoreOS Events Code of Conduct
|
||||||
|
|
||||||
|
CoreOS events are working conferences intended for professional networking and
|
||||||
|
collaboration in the CoreOS community. Attendees are expected to behave
|
||||||
|
according to professional standards and in accordance with their employer’s
|
||||||
|
policies on appropriate workplace behavior.
|
||||||
|
|
||||||
|
While at CoreOS events or related social networking opportunities, attendees
|
||||||
|
should not engage in discriminatory or offensive speech or actions including
|
||||||
|
but not limited to gender, sexuality, race, age, disability, or religion.
|
||||||
|
Speakers should be especially aware of these concerns.
|
||||||
|
|
||||||
|
CoreOS does not condone any statements by speakers contrary to these standards.
|
||||||
|
CoreOS reserves the right to deny entrance and/or eject from an event (without
|
||||||
|
refund) any individual found to be engaging in discriminatory or offensive
|
||||||
|
speech or actions.
|
||||||
|
|
||||||
|
Please bring any concerns to the immediate attention of designated on-site
|
||||||
|
staff, Brandon Philips <brandon.philips@coreos.com>, and/or Rithu John <rithu.john@coreos.com>.
|
@ -8,4 +8,4 @@ sources:
|
|||||||
- https://github.com/coreos/clair
|
- https://github.com/coreos/clair
|
||||||
maintainers:
|
maintainers:
|
||||||
- name: Jimmy Zelinskie
|
- name: Jimmy Zelinskie
|
||||||
- email: jimmy.zelinskie@coreos.com
|
email: jimmy.zelinskie@coreos.com
|
||||||
|
@ -14,7 +14,7 @@ data:
|
|||||||
# PostgreSQL Connection string
|
# PostgreSQL Connection string
|
||||||
# https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING
|
# https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING
|
||||||
# This should be done using secrets or Vault, but for now this will also work
|
# This should be done using secrets or Vault, but for now this will also work
|
||||||
{{- if .Values.config.postgresURI -}}
|
{{- if .Values.config.postgresURI }}
|
||||||
source: "{{ .Values.config.postgresURI }}"
|
source: "{{ .Values.config.postgresURI }}"
|
||||||
{{ else }}
|
{{ else }}
|
||||||
source: "postgres://{{ .Values.postgresql.postgresUser }}:{{ .Values.postgresql.postgresPassword }}@{{ template "postgresql.fullname" . }}:5432/{{ .Values.postgresql.postgresDatabase }}?sslmode=disable"
|
source: "postgres://{{ .Values.postgresql.postgresUser }}:{{ .Values.postgresql.postgresPassword }}@{{ template "postgresql.fullname" . }}:5432/{{ .Values.postgresql.postgresDatabase }}?sslmode=disable"
|
||||||
|
@ -30,11 +30,11 @@ ingress:
|
|||||||
# - chart-example.local
|
# - chart-example.local
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
cpu: 100m
|
cpu: 200m
|
||||||
memory: 1Gi
|
memory: 1500Mi
|
||||||
requests:
|
requests:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 128Mi
|
memory: 500Mi
|
||||||
config:
|
config:
|
||||||
# postgresURI: "postgres://user:password@host:5432/postgres?sslmode=disable"
|
# postgresURI: "postgres://user:password@host:5432/postgres?sslmode=disable"
|
||||||
paginationKey: "XxoPtCUzrUv4JV5dS+yQ+MdW7yLEJnRMwigVY/bpgtQ="
|
paginationKey: "XxoPtCUzrUv4JV5dS+yQ+MdW7yLEJnRMwigVY/bpgtQ="
|
||||||
@ -59,6 +59,9 @@ config:
|
|||||||
# Configuration values for the postgresql dependency.
|
# Configuration values for the postgresql dependency.
|
||||||
# ref: https://github.com/kubernetes/charts/blob/master/stable/postgresql/README.md
|
# ref: https://github.com/kubernetes/charts/blob/master/stable/postgresql/README.md
|
||||||
postgresql:
|
postgresql:
|
||||||
|
# The dependant Postgres chart can be disabled, to connect to
|
||||||
|
# an existing database by defining config.postgresURI
|
||||||
|
enabled: true
|
||||||
cpu: 1000m
|
cpu: 1000m
|
||||||
memory: 1Gi
|
memory: 1Gi
|
||||||
# These values are hardcoded until Helm supports secrets.
|
# These values are hardcoded until Helm supports secrets.
|
||||||
|
@ -71,8 +71,8 @@ type Namespace struct {
|
|||||||
// determined.
|
// determined.
|
||||||
//
|
//
|
||||||
// e.g. Name: OpenSSL, Version: 1.0, VersionFormat: dpkg.
|
// e.g. Name: OpenSSL, Version: 1.0, VersionFormat: dpkg.
|
||||||
// dpkg implies the installer package manager but the namespace (might be
|
// dpkg is the version format of the installer package manager, which in this
|
||||||
// debian:7, debian:8, ...) could not be determined.
|
// case could be dpkg or apk.
|
||||||
type Feature struct {
|
type Feature struct {
|
||||||
Name string
|
Name string
|
||||||
Version string
|
Version string
|
||||||
|
@ -45,4 +45,5 @@ var UbuntuReleasesMapping = map[string]string{
|
|||||||
"yakkety": "16.10",
|
"yakkety": "16.10",
|
||||||
"zesty": "17.04",
|
"zesty": "17.04",
|
||||||
"artful": "17.10",
|
"artful": "17.10",
|
||||||
|
"bionic": "18.04",
|
||||||
}
|
}
|
||||||
|
@ -30,7 +30,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
oracleReleaseRegexp = regexp.MustCompile(`(?P<os>[^\s]*) (Linux Server release) (?P<version>[\d]+)`)
|
oracleReleaseRegexp = regexp.MustCompile(`(?P<os>Oracle) (Linux Server release) (?P<version>[\d]+)`)
|
||||||
centosReleaseRegexp = regexp.MustCompile(`(?P<os>[^\s]*) (Linux release|release) (?P<version>[\d]+)`)
|
centosReleaseRegexp = regexp.MustCompile(`(?P<os>[^\s]*) (Linux release|release) (?P<version>[\d]+)`)
|
||||||
redhatReleaseRegexp = regexp.MustCompile(`(?P<os>Red Hat Enterprise Linux) (Client release|Server release|Workstation release) (?P<version>[\d]+)`)
|
redhatReleaseRegexp = regexp.MustCompile(`(?P<os>Red Hat Enterprise Linux) (Client release|Server release|Workstation release) (?P<version>[\d]+)`)
|
||||||
)
|
)
|
||||||
|
@ -42,6 +42,12 @@ func TestDetector(t *testing.T) {
|
|||||||
"etc/centos-release": []byte(`CentOS release 6.6 (Final)`),
|
"etc/centos-release": []byte(`CentOS release 6.6 (Final)`),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
ExpectedNamespace: &database.Namespace{Name: "centos:7"},
|
||||||
|
Files: tarutil.FilesMap{
|
||||||
|
"etc/redhat-release": []byte(`Red Hat Enterprise Linux Server release 7.2 (Maipo)`),
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
ExpectedNamespace: &database.Namespace{Name: "centos:7"},
|
ExpectedNamespace: &database.Namespace{Name: "centos:7"},
|
||||||
Files: tarutil.FilesMap{
|
Files: tarutil.FilesMap{
|
||||||
|
@ -58,19 +58,20 @@ func (u *updater) Update(db database.Datastore) (resp vulnsrc.UpdateResponse, er
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ask the database for the latest commit we successfully applied.
|
// Open a database transaction.
|
||||||
var dbCommit string
|
|
||||||
tx, err := db.Begin()
|
tx, err := db.Begin()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer tx.Rollback()
|
defer tx.Rollback()
|
||||||
|
|
||||||
dbCommit, ok, err := tx.FindKeyValue(updaterFlag)
|
// Ask the database for the latest commit we successfully applied.
|
||||||
|
var dbCommit string
|
||||||
|
var ok bool
|
||||||
|
dbCommit, ok, err = tx.FindKeyValue(updaterFlag)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if !ok {
|
if !ok {
|
||||||
dbCommit = ""
|
dbCommit = ""
|
||||||
}
|
}
|
||||||
@ -193,7 +194,7 @@ func (u *updater) pullRepository() (commit string, err error) {
|
|||||||
cmd.Dir = u.repositoryLocalPath
|
cmd.Dir = u.repositoryLocalPath
|
||||||
if out, err := cmd.CombinedOutput(); err != nil {
|
if out, err := cmd.CombinedOutput(); err != nil {
|
||||||
u.Clean()
|
u.Clean()
|
||||||
log.WithError(err).WithField("output", string(out)).Error("could not pull alpine-secdb repository")
|
log.WithError(err).WithField("output", string(out)).Error("could not clone alpine-secdb repository")
|
||||||
return "", commonerr.ErrCouldNotDownload
|
return "", commonerr.ErrCouldNotDownload
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -18,14 +18,13 @@ package ubuntu
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
@ -38,8 +37,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
trackerURI = "https://launchpad.net/ubuntu-cve-tracker"
|
trackerURI = "https://git.launchpad.net/ubuntu-cve-tracker"
|
||||||
trackerRepository = "https://launchpad.net/ubuntu-cve-tracker"
|
|
||||||
updaterFlag = "ubuntuUpdater"
|
updaterFlag = "ubuntuUpdater"
|
||||||
cveURL = "http://people.ubuntu.com/~ubuntu-security/cve/%s"
|
cveURL = "http://people.ubuntu.com/~ubuntu-security/cve/%s"
|
||||||
)
|
)
|
||||||
@ -74,6 +72,8 @@ var (
|
|||||||
|
|
||||||
affectsCaptureRegexp = regexp.MustCompile(`(?P<release>.*)_(?P<package>.*): (?P<status>[^\s]*)( \(+(?P<note>[^()]*)\)+)?`)
|
affectsCaptureRegexp = regexp.MustCompile(`(?P<release>.*)_(?P<package>.*): (?P<status>[^\s]*)( \(+(?P<note>[^()]*)\)+)?`)
|
||||||
affectsCaptureRegexpNames = affectsCaptureRegexp.SubexpNames()
|
affectsCaptureRegexpNames = affectsCaptureRegexp.SubexpNames()
|
||||||
|
|
||||||
|
errUnknownRelease = errors.New("found packages with CVEs for a verison of Ubuntu that Clair doesn't know about")
|
||||||
)
|
)
|
||||||
|
|
||||||
type updater struct {
|
type updater struct {
|
||||||
@ -84,211 +84,179 @@ func init() {
|
|||||||
vulnsrc.RegisterUpdater("ubuntu", &updater{})
|
vulnsrc.RegisterUpdater("ubuntu", &updater{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *updater) Update(datastore database.Datastore) (resp vulnsrc.UpdateResponse, err error) {
|
func (u *updater) Update(db database.Datastore) (resp vulnsrc.UpdateResponse, err error) {
|
||||||
log.WithField("package", "Ubuntu").Info("Start fetching vulnerabilities")
|
log.WithField("package", "Ubuntu").Info("Start fetching vulnerabilities")
|
||||||
|
|
||||||
// Pull the bzr repository.
|
// Pull the master branch.
|
||||||
if err = u.pullRepository(); err != nil {
|
var commit string
|
||||||
return resp, err
|
commit, err = u.pullRepository()
|
||||||
}
|
|
||||||
|
|
||||||
// Get revision number.
|
|
||||||
revisionNumber, err := getRevisionNumber(u.repositoryLocalPath)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
tx, err := datastore.Begin()
|
// Open a database transaction.
|
||||||
|
tx, err := db.Begin()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
|
||||||
// Get the latest revision number we successfully applied in the database.
|
// Ask the database for the latest commit we successfully applied.
|
||||||
dbRevisionNumber, ok, err := tx.FindKeyValue("ubuntuUpdater")
|
var dbCommit string
|
||||||
|
var ok bool
|
||||||
|
dbCommit, ok, err = tx.FindKeyValue(updaterFlag)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return resp, err
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := tx.Rollback(); err != nil {
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !ok {
|
if !ok {
|
||||||
dbRevisionNumber = ""
|
dbCommit = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the updaterFlag to equal the commit processed.
|
||||||
|
resp.FlagName = updaterFlag
|
||||||
|
resp.FlagValue = commit
|
||||||
|
|
||||||
|
// Short-circuit if there have been no updates.
|
||||||
|
if commit == dbCommit {
|
||||||
|
log.WithField("package", "ubuntu").Debug("no update")
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the list of vulnerabilities that we have to update.
|
// Get the list of vulnerabilities that we have to update.
|
||||||
modifiedCVE, err := collectModifiedVulnerabilities(revisionNumber, dbRevisionNumber, u.repositoryLocalPath)
|
var modifiedCVE map[string]struct{}
|
||||||
|
modifiedCVE, err = collectModifiedVulnerabilities(commit, dbCommit, u.repositoryLocalPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return resp, err
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
notes := make(map[string]struct{})
|
// Get the list of vulnerabilities.
|
||||||
for cvePath := range modifiedCVE {
|
resp.Vulnerabilities, resp.Notes, err = collectVulnerabilitiesAndNotes(u.repositoryLocalPath, modifiedCVE)
|
||||||
// Open the CVE file.
|
|
||||||
file, err := os.Open(u.repositoryLocalPath + "/" + cvePath)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// This can happen when a file is modified and then moved in another
|
return
|
||||||
// commit.
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse the vulnerability.
|
// The only notes we take are if we encountered unknown Ubuntu release.
|
||||||
v, unknownReleases, err := parseUbuntuCVE(file)
|
// We don't want the commit to be considered as managed in that case.
|
||||||
if err != nil {
|
if len(resp.Notes) != 0 {
|
||||||
return resp, err
|
resp.FlagValue = dbCommit
|
||||||
}
|
|
||||||
|
|
||||||
// Add the vulnerability to the response.
|
|
||||||
resp.Vulnerabilities = append(resp.Vulnerabilities, v)
|
|
||||||
|
|
||||||
// Store any unknown releases as notes.
|
|
||||||
for k := range unknownReleases {
|
|
||||||
note := fmt.Sprintf("Ubuntu %s is not mapped to any version number (eg. trusty->14.04). Please update me.", k)
|
|
||||||
notes[note] = struct{}{}
|
|
||||||
|
|
||||||
// If we encountered unknown Ubuntu release, we don't want the revision
|
|
||||||
// number to be considered as managed.
|
|
||||||
dbRevisionNumberInt, _ := strconv.Atoi(dbRevisionNumber)
|
|
||||||
revisionNumber = dbRevisionNumberInt
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close the file manually.
|
|
||||||
//
|
|
||||||
// We do that instead of using defer because defer works on a function-level scope.
|
|
||||||
// We would open many files and close them all at once at the end of the function,
|
|
||||||
// which could lead to exceed fs.file-max.
|
|
||||||
file.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add flag and notes.
|
|
||||||
resp.FlagName = updaterFlag
|
|
||||||
resp.FlagValue = strconv.Itoa(revisionNumber)
|
|
||||||
for note := range notes {
|
|
||||||
resp.Notes = append(resp.Notes, note)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *updater) Clean() {
|
func (u *updater) Clean() {
|
||||||
|
if u.repositoryLocalPath != "" {
|
||||||
os.RemoveAll(u.repositoryLocalPath)
|
os.RemoveAll(u.repositoryLocalPath)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *updater) pullRepository() (err error) {
|
func (u *updater) pullRepository() (commit string, err error) {
|
||||||
// Determine whether we should branch or pull.
|
// Determine whether we should branch or pull.
|
||||||
if _, pathExists := os.Stat(u.repositoryLocalPath); u.repositoryLocalPath == "" || os.IsNotExist(pathExists) {
|
if _, pathExists := os.Stat(u.repositoryLocalPath); u.repositoryLocalPath == "" || os.IsNotExist(pathExists) {
|
||||||
// Create a temporary folder to store the repository.
|
// Create a temporary folder to store the repository.
|
||||||
if u.repositoryLocalPath, err = ioutil.TempDir(os.TempDir(), "ubuntu-cve-tracker"); err != nil {
|
if u.repositoryLocalPath, err = ioutil.TempDir(os.TempDir(), "ubuntu-cve-tracker"); err != nil {
|
||||||
return vulnsrc.ErrFilesystem
|
return "", vulnsrc.ErrFilesystem
|
||||||
}
|
}
|
||||||
|
cmd := exec.Command("git", "clone", trackerURI, ".")
|
||||||
// Branch repository.
|
|
||||||
cmd := exec.Command("bzr", "branch", "--use-existing-dir", trackerRepository, ".")
|
|
||||||
cmd.Dir = u.repositoryLocalPath
|
cmd.Dir = u.repositoryLocalPath
|
||||||
if out, err := cmd.CombinedOutput(); err != nil {
|
if out, err := cmd.CombinedOutput(); err != nil {
|
||||||
log.WithError(err).WithField("output", string(out)).Error("could not branch Ubuntu repository")
|
u.Clean()
|
||||||
return commonerr.ErrCouldNotDownload
|
log.WithError(err).WithField("output", string(out)).Error("could not clone ubuntu-cve-tracker repository")
|
||||||
|
return "", commonerr.ErrCouldNotDownload
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
return nil
|
// The repository already exists and it needs to be refreshed via a pull.
|
||||||
}
|
cmd := exec.Command("git", "pull")
|
||||||
|
|
||||||
// Pull repository.
|
|
||||||
cmd := exec.Command("bzr", "pull", "--overwrite")
|
|
||||||
cmd.Dir = u.repositoryLocalPath
|
cmd.Dir = u.repositoryLocalPath
|
||||||
if out, err := cmd.CombinedOutput(); err != nil {
|
if _, err := cmd.CombinedOutput(); err != nil {
|
||||||
os.RemoveAll(u.repositoryLocalPath)
|
return "", vulnsrc.ErrGitFailure
|
||||||
log.WithError(err).WithField("output", string(out)).Error("could not pull Ubuntu repository")
|
}
|
||||||
return commonerr.ErrCouldNotDownload
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
cmd := exec.Command("git", "rev-parse", "HEAD")
|
||||||
}
|
cmd.Dir = u.repositoryLocalPath
|
||||||
|
|
||||||
func getRevisionNumber(pathToRepo string) (int, error) {
|
|
||||||
cmd := exec.Command("bzr", "revno")
|
|
||||||
cmd.Dir = pathToRepo
|
|
||||||
out, err := cmd.CombinedOutput()
|
out, err := cmd.CombinedOutput()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithError(err).WithField("output", string(out)).Error("could not get Ubuntu repository's revision number")
|
return "", vulnsrc.ErrGitFailure
|
||||||
return 0, commonerr.ErrCouldNotDownload
|
|
||||||
}
|
}
|
||||||
|
|
||||||
revno, err := strconv.Atoi(strings.TrimSpace(string(out)))
|
commit = strings.TrimSpace(string(out))
|
||||||
if err != nil {
|
return
|
||||||
log.WithError(err).WithField("output", string(out)).Error("could not parse Ubuntu repository's revision number")
|
|
||||||
return 0, commonerr.ErrCouldNotDownload
|
|
||||||
}
|
|
||||||
|
|
||||||
return revno, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func collectModifiedVulnerabilities(revision int, dbRevision, repositoryLocalPath string) (map[string]struct{}, error) {
|
func collectModifiedVulnerabilities(commit, dbCommit, repositoryLocalPath string) (map[string]struct{}, error) {
|
||||||
modifiedCVE := make(map[string]struct{})
|
modifiedCVE := make(map[string]struct{})
|
||||||
|
for _, dirName := range []string{"active", "retired"} {
|
||||||
|
if err := processDirectory(repositoryLocalPath, dirName, modifiedCVE); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return modifiedCVE, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Handle a brand new database.
|
func processDirectory(repositoryLocalPath, dirName string, modifiedCVE map[string]struct{}) error {
|
||||||
if dbRevision == "" {
|
// Open the directory.
|
||||||
for _, folder := range []string{"active", "retired"} {
|
d, err := os.Open(repositoryLocalPath + "/" + dirName)
|
||||||
d, err := os.Open(repositoryLocalPath + "/" + folder)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithError(err).Error("could not open Ubuntu vulnerabilities repository's folder")
|
log.WithError(err).Error("could not open Ubuntu vulnerabilities repository's folder")
|
||||||
return nil, vulnsrc.ErrFilesystem
|
return vulnsrc.ErrFilesystem
|
||||||
}
|
}
|
||||||
|
defer d.Close()
|
||||||
|
|
||||||
// Get the FileInfo of all the files in the directory.
|
// Get the FileInfo of all the files in the directory.
|
||||||
names, err := d.Readdirnames(-1)
|
names, err := d.Readdirnames(-1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithError(err).Error("could not read Ubuntu vulnerabilities repository's folder")
|
log.WithError(err).Error("could not read Ubuntu vulnerabilities repository's folder")
|
||||||
return nil, vulnsrc.ErrFilesystem
|
return vulnsrc.ErrFilesystem
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add the vulnerabilities to the list.
|
// Add the vulnerabilities to the list.
|
||||||
for _, name := range names {
|
for _, name := range names {
|
||||||
if strings.HasPrefix(name, "CVE-") {
|
if strings.HasPrefix(name, "CVE-") {
|
||||||
modifiedCVE[folder+"/"+name] = struct{}{}
|
modifiedCVE[dirName+"/"+name] = struct{}{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close the file manually.
|
return nil
|
||||||
//
|
}
|
||||||
// We do that instead of using defer because defer works on a function-level scope.
|
|
||||||
// We would open many files and close them all at once at the end of the function,
|
|
||||||
// which could lead to exceed fs.file-max.
|
|
||||||
d.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
return modifiedCVE, nil
|
func collectVulnerabilitiesAndNotes(repositoryLocalPath string, modifiedCVE map[string]struct{}) ([]database.VulnerabilityWithAffected, []string, error) {
|
||||||
}
|
vulns := make([]database.VulnerabilityWithAffected, 0)
|
||||||
|
noteSet := make(map[string]struct{})
|
||||||
|
|
||||||
// Handle an up to date database.
|
for cvePath := range modifiedCVE {
|
||||||
dbRevisionInt, _ := strconv.Atoi(dbRevision)
|
// Open the CVE file.
|
||||||
if revision == dbRevisionInt {
|
file, err := os.Open(repositoryLocalPath + "/" + cvePath)
|
||||||
log.WithField("package", "Ubuntu").Debug("no update")
|
|
||||||
return modifiedCVE, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle a database that needs upgrading.
|
|
||||||
cmd := exec.Command("bzr", "log", "--verbose", "-r"+strconv.Itoa(dbRevisionInt+1)+"..", "-n0")
|
|
||||||
cmd.Dir = repositoryLocalPath
|
|
||||||
out, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithError(err).WithField("output", string(out)).Error("could not get Ubuntu vulnerabilities repository logs")
|
// This can happen when a file is modified then moved in another commit.
|
||||||
return nil, commonerr.ErrCouldNotDownload
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
scanner := bufio.NewScanner(bytes.NewReader(out))
|
// Parse the vulnerability.
|
||||||
for scanner.Scan() {
|
v, unknownReleases, err := parseUbuntuCVE(file)
|
||||||
text := strings.TrimSpace(scanner.Text())
|
if err != nil {
|
||||||
if strings.Contains(text, "CVE-") && (strings.HasPrefix(text, "active/") || strings.HasPrefix(text, "retired/")) {
|
file.Close()
|
||||||
if strings.Contains(text, " => ") {
|
return nil, nil, err
|
||||||
text = text[strings.Index(text, " => ")+4:]
|
|
||||||
}
|
|
||||||
modifiedCVE[text] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return modifiedCVE, nil
|
// Add the vulnerability to the response.
|
||||||
|
vulns = append(vulns, v)
|
||||||
|
|
||||||
|
// Store any unknown releases as notes.
|
||||||
|
for k := range unknownReleases {
|
||||||
|
noteSet[errUnknownRelease.Error()+": "+k] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
file.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert the note set into a slice.
|
||||||
|
var notes []string
|
||||||
|
for note := range noteSet {
|
||||||
|
notes = append(notes, note)
|
||||||
|
}
|
||||||
|
|
||||||
|
return vulns, notes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseUbuntuCVE(fileContent io.Reader) (vulnerability database.VulnerabilityWithAffected, unknownReleases map[string]struct{}, err error) {
|
func parseUbuntuCVE(fileContent io.Reader) (vulnerability database.VulnerabilityWithAffected, unknownReleases map[string]struct{}, err error) {
|
||||||
|
Loading…
Reference in New Issue
Block a user