1
0
mirror of https://github.com/aquasecurity/kube-bench.git synced 2025-05-29 12:18:55 +00:00
This commit is contained in:
Carter Williamson 2025-04-05 04:25:06 +01:00 committed by GitHub
commit 8f0ddd51c1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
24 changed files with 1643 additions and 21 deletions

3
.gitignore vendored
View File

@ -5,6 +5,7 @@ dist
.vscode/
hack/kind.test.yaml
coverage.txt
venv/
.idea/
@ -13,4 +14,4 @@ coverage.txt
thumbs.db
/kubeconfig.kube-bench
/test.data
*.iml
*.iml

View File

@ -449,6 +449,11 @@ target_mapping:
- "controlplane"
- "policies"
- "managedservices"
"gke-stig-kubernetes-v2r2":
- "node"
- "controlplane"
- "policies"
- "managedservices"
"tkgi-1.2.53":
- "master"
- "etcd"

View File

@ -0,0 +1,18 @@
---
## Version-specific settings that override the values in cfg/config.yaml
## These settings are required if you are using the --gscc option to report findings to GCP Security Command Center
## GCP Organization ID is required.
GCP_SCC_SOURCE_ID: "<GCP_SCC_SOURCE_ID>"
## GCP project ID is required.
GCP_PROJECT_ID: "<GCP_PROJECT_ID>"
## GCP region is required.
GCP_REGION: "<GCP_REGION>"
## GKE Cluster Name is required.
CLUSTER_NAME: "<CLUSTER_NAME>"
node:
kubelet:
confs:
- "/home/kubernetes/kubelet-config.yaml"
- "/etc/kubernetes/kubelet-config.yaml"

View File

@ -0,0 +1,51 @@
---
controls:
version: "gke-stig-kubernetes-v1r6"
id: 2
text: "Control Plane Configuration"
type: "controlplane"
groups:
- id: 2.1
text: "DISA Category Code I - API Server Security"
checks:
- id: V-242400
text: "The Kubernetes API server must have Alpha APIs disabled"
type: "manual"
remediation: |
Check the release channel using the GCP gcloud CLI.
gcloud container clusters describe <ClusterName> --region <RegionName> --format json | jq -r '.releaseChannel.channel'
This should be set to "STABLE". Any "Alpha" clusters will need to be rebuilt on the STABLE release channel.
- id: 2.2
text: "DISA Category Code II - Controller Manager Security"
checks:
- id: V-242443
text: " Kubernetes must contain the latest updates as authorized by IAVMs, CTOs, DTMs, and STIGs. (Manual)"
type: "manual"
remediation: |
Upgrade Kubernetes to a supported version.
- id: V-242461
text: "Kubernetes API Server audit logs must be enabled. (Manual)"
type: "manual"
remediation: |
Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler.
Ref: https://cloud.google.com/kubernetes-engine/docs/how-to/view-logs#control-plane-access-logs
- id: V-242462
text: "The Kubernetes API Server must be set to audit log max size | Component of GKE Control Plane"
type: "skip"
- id: V-242463
text: "The Kubernetes API Server must be set to audit log maximum backup | Component of GKE Control Plane"
type: "skip"
- id: V-242464
text: "The Kubernetes API Server audit log retention must be set | Component of GKE Control Plane"
type: "skip"
- id: V-242394
text: "The Kubernetes API Server audit log path must be set | Component of GKE Control Plane"
type: "skip"

View File

@ -0,0 +1,245 @@
---
controls:
version: "gke-stig-kubernetes-v2r2"
id: 5
text: "Managed Services"
type: "managedservices"
groups:
- id: 5.1
text: "DISA Category Code I"
checks:
- id: V-242386
text: "The Kubernetes API server must have the insecure port flag disabled | Component of GKE Control Plane"
type: "skip"
- id: V-242388
text: "The Kubernetes API server must have the insecure bind address not set | Component of GKE Control Plane"
type: "skip"
- id: V-242436
text: "The Kubernetes API server must have the ValidatingAdmissionWebhook enabled | Component of GKE Control Plane"
type: "skip"
- id: V-242437
text: "[Deprecated] Kubernetes must have a pod security policy set. policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+"
type: "skip"
- id: V-245542
text: "Kubernetes API Server must disable basic authentication to protect information in transit | Component of GKE Control Plane"
type: "skip"
- id: 5.2
text: "DISA Category Code II"
checks:
- id: V-242376
text: "The Kubernetes Controller Manager must use TLS 1.2, at a minimum | Component of GKE Control Plane"
type: "skip"
- id: V-242377
text: "The Kubernetes Scheduler must use TLS 1.2, at a minimum | Component of GKE Control Plane"
type: "skip"
- id: V-242378
text: "The Kubernetes API Server must use TLS 1.2, at a minimum | Component of GKE Control Plane"
type: "skip"
- id: V-242379
text: "The Kubernetes etcd must use TLS to protect the confidentiality of sensitive data during electronic dissemination | Component of GKE Control Plane"
type: "skip"
- id: V-242380
text: "The Kubernetes API Server must use TLS to protect the confidentiality of sensitive data during electronic dissemination | Component of GKE Control Plane"
type: "skip"
- id: V-242382
text: "The Kubernetes API Server must enable Node,RBAC as the authorization mode | Component of GKE Control Plane"
type: "skip"
- id: V-242384
text: "The Kubernetes Scheduler must have secure binding | Component of GKE Control Plane"
type: "skip"
- id: V-242385
text: "The Kubernetes Controller Manager must have secure binding | Component of GKE Control Plane"
type: "skip"
- id: V-242389
text: "The Kubernetes API server must have the secure port set | Component of GKE Control Plane"
type: "skip"
- id: V-242401
text: "The Kubernetes API Server must have an audit policy set | Component of GKE Control Plane"
type: "skip"
- id: V-242402
text: "The Kubernetes API Server must have an audit log path set | Component of GKE Control Plane"
type: "skip"
- id: V-242403
text: "Kubernetes API Server must generate audit records | Component of GKE Control Plane"
type: "skip"
- id: V-242405
text: "The Kubernetes manifests must be owned by root | Component of GKE Control Plane"
type: "skip"
- id: V-242408
text: "The Kubernetes manifests must have least privileges | Component of GKE Control Plane"
type: "skip"
- id: V-242409
text: "Kubernetes Controller Manager must disable profiling | Component of GKE Control Plane"
type: "skip"
- id: V-242410
text: "The Kubernetes API Server must enforce PPS that adhere to PPSM CAL | Component of GKE Control Plane"
type: "skip"
- id: V-242411
text: "The Kubernetes Scheduler must enforce PPS that adhere to PPSM CAL | Component of GKE Control Plane"
type: "skip"
- id: V-242412
text: "The Kubernetes Controllers must enforce PPS that adhere to PPSM CAL | Component of GKE Control Plane"
type: "skip"
- id: V-242413
text: "The Kubernetes etcd must enforce PPS that adhere to PPSM CAL | Component of GKE Control Plane"
type: "skip"
- id: V-242418
text: "The Kubernetes API server must use approved cipher suites | Component of GKE Control Plane"
type: "skip"
- id: V-242419
text: "Kubernetes API Server must have the SSL Certificate Authority set | Component of GKE Control Plane"
type: "skip"
- id: V-242421
text: "Kubernetes Controller Manager must have the SSL Certificate Authority set | Component of GKE Control Plane"
type: "skip"
- id: V-242422
text: "Kubernetes API Server must have a certificate for communication | Component of GKE Control Plane"
type: "skip"
- id: V-242423
text: "Kubernetes etcd must enable client authentication to secure service | Component of GKE Control Plane"
type: "skip"
- id: V-242424
text: "Kubernetes etcd must enable client authentication to secure service | Component of GKE Control Plane"
type: "skip"
- id: V-242425
text: "Kubernetes Kubelet must enable tls-cert-file for client authentication to secure service | Component of GKE Control Plane"
type: "skip"
- id: V-242426
text: "Kubernetes etcd must enable client authentication to secure service | Component of GKE Control Plane"
type: "skip"
- id: V-242427
text: "Kubernetes etcd must have a key file for secure communication | Component of GKE Control Plane"
type: "skip"
- id: V-242428
text: "Kubernetes etcd must have a certificate for communication | Component of GKE Control Plane"
type: "skip"
- id: V-242429
text: "Kubernetes etcd must have the SSL Certificate Authority set | Component of GKE Control Plane"
type: "skip"
- id: V-242430
text: "Kubernetes etcd must have a certificate for communication | Component of GKE Control Plane"
type: "skip"
- id: V-242431
text: "Kubernetes etcd must have a key file for secure communication | Component of GKE Control Plane"
type: "skip"
- id: V-242432
text: "Kubernetes etcd must have peer-cert-file set for secure communication | Component of GKE Control Plane"
type: "skip"
- id: V-242433
text: "Kubernetes etcd must have a peer-key-file set for secure communication | Component of GKE Control Plane"
type: "skip"
- id: V-242438
text: "Kubernetes API Server must configure timeouts to limit attack surface | Component of GKE Control Plane"
type: "skip"
- id: V-242444
text: "The Kubernetes component manifests must be owned by root | Component of GKE Control Plane"
type: "skip"
- id: V-242445
text: "The Kubernetes component etcd must be owned by etcd | Component of GKE Control Plane"
type: "skip"
- id: V-242446
text: "The Kubernetes conf files must be owned by root | Component of GKE Control Plane"
type: "skip"
- id: V-242447
text: "The Kubernetes Kube Proxy must have file permissions set to 644 or more restrictive | Component of GKE Control Plane"
type: "skip"
- id: V-242448
text: "The Kubernetes Kube Proxy must be owned by root | Component of GKE Control Plane"
type: "skip"
- id: V-242449
text: "The Kubernetes Kubelet certificate authority file must have file permissions set to 644 or more restrictive | Component of GKE Control Plane"
type: "skip"
- id: V-242450
text: "The Kubernetes Kubelet certificate authority must be owned by root | Component of GKE Control Plane"
type: "skip"
- id: V-242451
text: "The Kubernetes component PKI must be owned by root | Component of GKE Control Plane"
type: "skip"
- id: V-242459
text: "The Kubernetes etcd must have file permissions set to 644 or more restrictive | Component of GKE Control Plane"
type: "skip"
- id: V-242460
text: "The Kubernetes admin.conf must have file permissions set to 644 or more restrictive | Component of GKE Control Plane"
type: "skip"
- id: V-242466
text: "The Kubernetes PKI CRT must have file permissions set to 644 or more restrictive | Component of GKE Control Plane"
type: "skip"
- id: V-242467
text: "The Kubernetes PKI keys must have file permissions set to 600 or more restrictive | Component of GKE Control Plane"
type: "skip"
- id: V-242468
text: "The Kubernetes API Server must prohibit communication using TLS version 1.0 and 1.1, and SSL 2.0 and 3.0 | Component of GKE Control Plane"
type: "skip"
- id: V-245543
text: "Kubernetes API Server must disable token authentication to protect information in transit | Component of GKE Control Plane"
type: "skip"
- id: V-245544
text: "Kubernetes endpoints must use approved organizational certificate and key pair to protect information in transit | Component of GKE Control Plane"
type: "skip"
- id: V-254800
text: "Kubernetes must have a Pod Security Admission control file configured. | Component of GKE Control Plane"
type: "skip"
- id: V-254801
text: "Kubernetes must enable PodSecurity admission controller on static pods and Kubelets. | Component of GKE Control Plane"
type: "skip"
- id: V-242394
text: "Kubernetes Worker Nodes must not have the sshd service enabled | Component of GKE Control Plane"
type: "skip"

View File

@ -0,0 +1,620 @@
---
controls:
version: "gke-stig-kubernetes-v1r6"
id: 3
text: "Node Configuration"
type: "node"
groups:
- id: 3.1
text: "DISA Category Code I"
checks:
- id: V-242387 # CIS 3.2.4
text: "The Kubernetes Kubelet must have the read-only port flag disabled"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
severity: high
tests:
test_items:
- flag: "--read-only-port"
path: '{.readOnlyPort}'
set: false
- path: '{.readOnlyPort}'
compare:
op: eq
value: 0
bin_op: or
remediation: |
If modifying the Kubelet config file, edit the kubelet-config.json file
$kubeletconf and set the below parameter to 0
"readOnlyPort": 0
If using executable arguments, edit the kubelet service file
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each
worker node and add the below parameter at the end of the KUBELET_ARGS variable
string.
--read-only-port=0
For each remediation:
Based on your system, restart the kubelet service and check status
systemctl daemon-reload
systemctl restart kubelet.service
systemctl status kubelet -l
scored: true
- id: V-242391 # CIS 3.2.1
text: "Ensure that the Anonymous Auth is Not Enabled (Automated)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: "--anonymous-auth"
path: '{.authentication.anonymous.enabled}'
compare:
op: eq
value: false
remediation: |
Remediation Method 1:
If configuring via the Kubelet config file, you first need to locate the file.
To do this, SSH to each node and execute the following command to find the kubelet
process:
ps -ef | grep kubelet
The output of the above command provides details of the active kubelet process, from
which we can see the location of the configuration file provided to the kubelet service
with the --config argument. The file can be viewed with a command such as more or
less, like so:
sudo less $kubeletconf
Disable Anonymous Authentication by setting the following parameter:
"authentication": { "anonymous": { "enabled": false } }
Remediation Method 2:
If using executable arguments, edit the kubelet service file on each worker node and
ensure the below parameters are part of the KUBELET_ARGS variable string.
For systems using systemd, such as the Amazon EKS Optimised Amazon Linux or
Bottlerocket AMIs, then this file can be found at
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf. Otherwise,
you may need to look up documentation for your chosen operating system to determine
which service manager is configured:
--anonymous-auth=false
For Both Remediation Steps:
Based on your system, restart the kubelet service and check the service status.
The following example is for operating systems using systemd, such as the Amazon
EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the systemctl
command. If systemctl is not available then you will need to look up documentation for
your chosen operating system to determine which service manager is configured:
systemctl daemon-reload
systemctl restart kubelet.service
systemctl status kubelet -l
scored: true
- id: V-242392 # CIS 3.2.2
text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --authorization-mode
path: '{.authorization.mode}'
compare:
op: nothave
value: AlwaysAllow
remediation: |
Remediation Method 1:
If configuring via the Kubelet config file, you first need to locate the file.
To do this, SSH to each node and execute the following command to find the kubelet
process:
ps -ef | grep kubelet
The output of the above command provides details of the active kubelet process, from
which we can see the location of the configuration file provided to the kubelet service
with the --config argument. The file can be viewed with a command such as more or
less, like so:
sudo less /path/to/kubelet-config.json
Enable Webhook Authentication by setting the following parameter:
"authentication": { "webhook": { "enabled": true } }
Next, set the Authorization Mode to Webhook by setting the following parameter:
"authorization": { "mode": "Webhook }
Finer detail of the authentication and authorization fields can be found in the
Kubelet Configuration documentation (https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/).
Remediation Method 2:
If using executable arguments, edit the kubelet service file on each worker node and
ensure the below parameters are part of the KUBELET_ARGS variable string.
For systems using systemd, such as the Amazon EKS Optimised Amazon Linux or
Bottlerocket AMIs, then this file can be found at
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf. Otherwise,
you may need to look up documentation for your chosen operating system to determine
which service manager is configured:
--authentication-token-webhook
--authorization-mode=Webhook
For Both Remediation Steps:
Based on your system, restart the kubelet service and check the service status.
The following example is for operating systems using systemd, such as the Amazon
EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the systemctl
command. If systemctl is not available then you will need to look up documentation for
your chosen operating system to determine which service manager is configured:
systemctl daemon-reload
systemctl restart kubelet.service
systemctl status kubelet -l
scored: true
- id: V-242395
text: "Kubernetes dashboard must not be enabled."
audit: "kubectl get pods --all-namespaces -l k8s-app=kubernetes-dashboard"
tests:
test_items:
- flag: "k8s-app=kubernetes-dashboard"
set: false
remediation: |
Delete the Kubernetes dashboard deployment with the following command:
kubectl delete deployment kubernetes-dashboard --namespace=kube-system
scored: true
- id: V-242396
text: "Kubernetes Kubectl cp command must give expected access and results. (Manual)"
type: "manual"
# audit: "kubectl version --client --output=yaml | grep 'gitVersion' | sed -E 's/.*v([0-9]+)\\.([0-9]+)\\.([0-9]+)/major=\\1\\nminor=\\2\\npatch=\\3/'"
# tests:
# bin_op: or
# test_items:
# - flag: "major="
# compare:
# op: gte
# value: 1
# - flag: "minor="
# compare:
# op: gte
# value: 12
# - flag: "patch="
# compare:
# op: gte
# value: 9
remediation: |
If any Worker nodes are not using kubectl version 1.12.9 or newer, this is a finding.
Upgrade the Master and Worker nodes to the latest version of kubectl.
scored: false
- id: V-242397
text: "The Kubernetes kubelet staticPodPath must not enable static pods."
audit: "ps -ef | grep $kubeletbin | grep -- --config"
tests:
bin_op: or
test_items:
- flag: "staticPodPath"
set: false
- path: '{.staticPodPath}'
set: false
remediation: |
Edit the Kubernetes kubelet configuration file.
Remove the setting "staticPodPath".
Restart the kubelet service using:
systemctl daemon-reload && systemctl restart kubelet
scored: true
- id: V-242398
text: "Kubernetes DynamicAuditing must not be enabled. (Automated)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
bin_op: or
test_items:
- flag: "--feature-gates"
compare:
op: nothave
value: "DynamicAuditing=true"
set: true
- flag: "--feature-gates"
set: false
remediation: |
Edit any manifest files or kubelet config files that contain the feature-gates
setting with DynamicAuditing set to "true".
Set the flag to "false" or remove the "DynamicAuditing" setting
completely. Restart the kubelet service if the kubelet config file
if the kubelet config file is changed.
scored: true
- id: V-242399
text: "Kubernetes DynamicKubeletConfig must not be enabled. (Automated)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
bin_op: or
test_items:
- flag: "--feature-gates"
compare:
op: nothave
value: "DynamicKubeletConfig=true"
set: true
- flag: "--feature-gates"
set: false
remediation: |
Edit any manifest files or $kubeletconf that contain the feature-gates
setting with DynamicKubeletConfig set to "true".
Set the flag to "false" or remove the "DynamicKubeletConfig" setting
completely. Restart the kubelet service if the kubelet config file
if the kubelet config file is changed.
scored: true
- id: V-242404 # CIS 3.2.8
text: "Ensure that the --rotate-certificates argument is not present or is set to true (Automated)"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --rotate-certificates
path: '{.rotateCertificates}'
compare:
op: eq
value: true
- flag: --rotate-certificates
path: '{.rotateCertificates}'
set: false
bin_op: or
remediation: |
Remediation Method 1:
If modifying the Kubelet config file, edit the kubelet-config.yaml file
/etc/kubernetes/kubelet/kubelet-config.yaml and set the below parameter to
true
"RotateCertificate":true
Additionally, ensure that the kubelet service file
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set the --RotateCertificate
executable argument to false because this would override the Kubelet
config file.
Remediation Method 2:
If using executable arguments, edit the kubelet service file
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each
worker node and add the below parameter at the end of the KUBELET_ARGS variable
string.
--RotateCertificate=true
scored: true
- id: V-242406
text: "The Kubernetes kubelet configuration file must be owned by root (Automated)"
audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' '
tests:
test_items:
- flag: root:root
remediation: |
Run the below command (based on the file location on your system) on the each worker node.
For example,
chown root:root $kubeletkubeconfig
scored: true
- id: V-242407
text: "The Kubernetes kubelet configuration files must have file permissions set to 644 or more restrictive (Automated)"
audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' '
tests:
test_items:
- flag: "permissions"
compare:
op: bitmask
value: "644"
remediation: |
Run the following command (using the config file location identified in the Audit step)
chmod 644 $kubeletconf
scored: true
- id: V-242414
text: "The Kubernetes cluster must use non-privileged host ports for user pods. (Manual)"
type: "manual"
remediation: |
For any of the pods that are using ports below 1024,
reconfigure the pod to use a service to map a host non-privileged
port to the pod port or reconfigure the image to use non-privileged ports.
kubectl get services -A -o json | jq '.items[].spec.ports'
Note this should excempt non-configurable services from the GKE managed service, such as anthos, gatewaykeeper, kubelet, etc.
scored: false
- id: V-242415
text: "Secrets in Kubernetes must not be stored as environment variables.(Manual)"
type: "manual"
remediation: |
Run the following command:
kubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {"\n"}{end}' -A
If any of the values returned reference environment variables
rewrite application code to read secrets from mounted secret files, rather than
from environment variables.
scored: false
- id: V-242442
text: "Kubernetes must remove old components after updated versions have been installed. (Manual)"
type: "manual"
remediation: |
To view all pods and the images used to create the pods, from the Master node, run the following command:
kubectl get pods --all-namespaces -o jsonpath="{..image}" | \
tr -s '[[:space:]]' '\n' | \
sort | \
uniq -c
Review the images used for pods running within Kubernetes.
Remove any old pods that are using older images.
scored: false
- id: 3.2
text: "DISA Category Code II - Node Security"
checks:
# TODO Verify this.. seems to be failing but also not sure if this can be disabled with GKE
- id: V-242393
text: "Kubernetes Worker Nodes must not have sshd service running. (Automated)"
audit: 'ps aux | grep sshd'
severity: medium
tests:
test_items:
- flag: bin/sshd
set: false
remediation: |
To stop the sshd service, run the command: systemctl stop sshd
scored: true
# TODO Verify this, low confidence this will work
# Both of these are not working at the moment
# - id: V-242394
# text: "Kubernetes Worker Nodes must not have the sshd service enabled. (Automated)"
# audit: "/bin/sh -c 'systemctl list-unit-files | grep sshd'"
# tests:
# bin_op:
# test_items:
# - flag: "disabled"
# - flag: "sshd"
# set: false
# remediation: |
# To disable the sshd service, run the command:
# chkconfig sshd off
# scored: true
# - id: V-242394
# text: "Kubernetes Worker Nodes must not have the sshd service enabled."
# audit: "systemctl is-enabled sshd"
# tests:
# test_items:
# - flag: "sshd"
# compare:
# op: eq
# value: "disabled"
# remediation: |
# To disable the sshd service, run the command:
# systemctl disable sshd
# scored: true
- id: V-242434 # CIS 3.2.6
text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)"
severity: high
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: --make-iptables-util-chains
path: '{.makeIPTablesUtilChains}'
compare:
op: eq
value: true
- flag: --make-iptables-utils-chains
path: '{.makeIPTablesUtilChains}'
set: false
bin_op: or
remediation: |
Remediation Method 1:
If modifying the Kubelet config file, edit the kubelet-config.json file
/etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to
true
"makeIPTablesUtilChains": true
Ensure that /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf
does not set the --make-iptables-util-chains argument because that would
override your Kubelet config file.
Remediation Method 2:
If using executable arguments, edit the kubelet service file
/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each
worker node and add the below parameter at the end of the KUBELET_ARGS variable
string.
--make-iptables-util-chains:true
Remediation Method 3:
If using the api configz endpoint consider searching for the status of
"makeIPTablesUtilChains.: true by extracting the live configuration from the nodes
running kubelet.
**See detailed step-by-step configmap procedures in Reconfigure a Node's Kubelet in a
Live Cluster (https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/),
and then rerun the curl statement from audit process to check for kubelet
configuration changes
kubectl proxy --port=8001 &
export HOSTNAME_PORT=localhost:8001 (example host and port number)
export NODE_NAME=gke-cluster-1-pool1-5e572947-r2hg (example node name from
"kubectl get nodes")
curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz"
For all three remediations:
Based on your system, restart the kubelet service and check status
systemctl daemon-reload
systemctl restart kubelet.service
systemctl status kubelet -l
scored: true
- id: V-242420
severity: medium
text: "Kubernetes Kubelet must have the SSL Certificate Authority set."
audit: "ps -ef | grep kubelet"
audit_config: "/bin/cat $kubeletconf"
tests:
bin_op: or
test_items:
- flag: "--client-ca-file"
set: true
- path: "{.authentication.x509.clientCAFile}"
set: true
remediation: |
On the Control Plane, run the command:
ps -ef | grep kubelet
If the "--client-ca-file" option exists, verify its value is correctly set.
Note the path to the config file (identified by --config).
Edit the Kubernetes Kubelet config file:
Set the value of "clientCAFile" to a path containing an Approved Organizational Certificate.
Restart the kubelet service using the following command:
systemctl daemon-reload && systemctl restart kubelet
scored: false
- id: V-242452
severity: medium
text: "The Kubernetes kubelet KubeConfig must have file permissions set to 644 or more restrictive."
audit: "stat -c %a $kubeletconf"
tests:
test_items:
- flag: "644"
compare:
op: lte
value: "644"
remediation: |
Change the permissions of the Kubelet KubeConfig file to 644 by executing the command:
chmod 644 $kubeletconf
scored: false
- id: V-242453
severity: medium
text: "The Kubernetes kubelet KubeConfig file must be owned by root."
audit: "stat -c %U:%G $kubeletconf"
tests:
test_items:
- flag: "root:root"
set: true
remediation: |
Change the ownership of the kubelet.conf file to root:root by executing the command:
chown root:root $kubeletconf
scored: false
- id: V-242454
severity: medium
text: "The Kubernetes kubeadm.conf must be owned by root."
audit: "stat -c %U:%G $kubeletsvc"
tests:
test_items:
- flag: "root:root"
set: true
remediation: |
Change the ownership of the kubeadm.conf to root:root by executing the command:
chown root:root $kubeletsvc
scored: false
- id: V-242455
severity: medium
text: "The Kubernetes kubeadm.conf must have file permissions set to 644 or more restrictive."
audit: "stat -c %a $kubeletsvc"
tests:
test_items:
- flag: "644"
compare:
op: lte
value: "644"
remediation: |
Change the permissions of the kubeadm.conf to 644 by executing the command:
chmod 644 $kubeletsvc
scored: false
- id: V-242456
severity: medium
text: "The Kubernetes kubelet config must have file permissions set to 644 or more restrictive."
audit: "stat -c %a $kubeletconf"
tests:
test_items:
- flag: "644"
compare:
op: lte
value: "644"
remediation: |
Change the permissions of the config.yaml to 644 by executing the command:
chmod 644 $kubeletconf
scored: false
- id: V-242457
severity: medium
text: "The Kubernetes kubelet config must be owned by root."
audit: "stat -c %U:%G $kubeletconf"
tests:
test_items:
- flag: "root:root"
set: true
remediation: |
Change the ownership of the kubelet config file to root:root by executing the command:
chown root:root $kubeletconf
scored: false
- id: V-245541
severity: medium
text: "Kubernetes Kubelet must not disable timeouts."
audit: "ps -ef | grep kubelet"
audit_config: "/bin/cat $kubeletconf"
tests:
bin_op: and
test_items:
- flag: "--streaming-connection-idle-timeout"
set: false
- path: "{.streamingConnectionIdleTimeout}"
set: true
compare:
op: gte
value: "5m"
remediation: |
On the Control Plane, run the command:
ps -ef | grep kubelet
If the "--streaming-connection-idle-timeout" option exists, verify its value.
Edit the Kubernetes Kubelet config file:
Set the argument "streamingConnectionIdleTimeout" to a value of "5m" or greater.
Restart the kubelet service using the following command:
systemctl daemon-reload && systemctl restart kubelet
scored: true
- id: V-242390 # Similar to CIS 3.2.1
severity: high
text: "The Kubernetes API server must have anonymous authentication disabled (Automated)"
# audit: "/bin/ps -fC kubelet"
audit: "/bin/ps -fC $kubeletbin"
# audit_config: "/bin/cat /etc/kubernetes/kubelet-config.yaml"
audit_config: "/bin/cat $kubeletconf"
tests:
test_items:
- flag: "--anonymous-auth"
path: '{.authentication.anonymous.enabled}'
set: true
compare:
op: eq
value: false
remediation: |
If using a Kubelet config file, edit $kubeletconf to set authentication: anonymous: enabled to
false.
If using executable arguments, edit the kubelet service file
$kubeletsvc on each worker node and
set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
--anonymous-auth=false
Based on your system, restart the kubelet service. For example:
systemctl daemon-reload
systemctl restart kubelet.service

View File

@ -0,0 +1,34 @@
---
controls:
version: "gke-stig-kubernetes-v1r6"
id: 4
text: "Kubernetes Security Policies"
type: "policies"
groups:
- id: 4.1
text: "DISA Category Code I - Pod Security Policies"
checks:
- id: V-242381
text: "The Kubernetes Controller Manager must create unique service accounts for each work payload. (Manual)"
type: "manual"
remediation: |
Create explicit service accounts wherever a Kubernetes workload requires specific access
to the Kubernetes API server.
Modify the configuration of each default service account to include this value
automountServiceAccountToken: false
scored: false
- id: V-242383
text: "User-managed resources must be created in dedicated namespaces. (Manual)"
type: "manual"
remediation: |
Move any user-managed resources from the default, kube-public and kube-node-lease namespaces, to user namespaces.
scored: false
- id: V-242417
text: "Kubernetes must separate user functionality. (Manual)"
type: "manual"
remediation: |
Move any user pods that are present in the Kubernetes system namespaces to user specific namespaces.
scored: false

View File

@ -85,6 +85,7 @@ type Check struct {
AuditEnvOutput string `json:"-"`
AuditConfigOutput string `json:"-"`
DisableEnvTesting bool `json:"-"`
Severity string `json:"severity,omitempty"`
}
// Runner wraps the basic Run method.

View File

@ -94,6 +94,33 @@ func TestCheck_Run(t *testing.T) {
},
Expected: FAIL,
},
{
name: "Scored checks that pass should FAIL when config file is not present",
check: Check{
Scored: true,
AuditConfig: "/test/config.yaml",
Tests: &tests{TestItems: []*testItem{{
Flag: "hello",
Set: true,
}}},
Severity: "medium",
},
Expected: FAIL,
},
{
name: "Scored checks that pass should PASS when config file is not present",
check: Check{
Scored: true,
Audit: "echo hello",
AuditConfig: "/test/config.yaml",
Tests: &tests{TestItems: []*testItem{{
Flag: "hello",
Set: true,
}}},
Severity: "high",
},
Expected: PASS,
},
}
for _, testCase := range testCases {

View File

@ -19,13 +19,19 @@ import (
"encoding/json"
"encoding/xml"
"fmt"
"log"
"strings"
"time"
securitypb "cloud.google.com/go/securitycenter/apiv1/securitycenterpb"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/securityhub/types"
"github.com/golang/glog"
"github.com/google/uuid"
"github.com/onsi/ginkgo/reporters"
"github.com/spf13/viper"
"google.golang.org/protobuf/types/known/structpb"
"google.golang.org/protobuf/types/known/timestamppb"
"gopkg.in/yaml.v2"
)
@ -291,6 +297,85 @@ func (controls *Controls) ASFF() ([]types.AwsSecurityFinding, error) {
return fs, nil
}
func (controls *Controls) GSCC() ([]*securitypb.Finding, error) {
fs := []*securitypb.Finding{}
project, err := getConfig("GCP_PROJECT_ID")
if err != nil {
return nil, err
}
region, err := getConfig("GCP_REGION")
if err != nil {
return nil, err
}
cluster, err := getConfig("CLUSTER_NAME")
if err != nil {
return nil, err
}
resourceName := fmt.Sprintf("//container.googleapis.com/projects/%s/locations/%s/clusters/%s", project, region, cluster)
ti := timestamppb.Now()
for _, g := range controls.Groups {
for _, check := range g.Checks {
if check.State == FAIL || check.State == WARN {
actualValue := check.ActualValue
remediation := check.Remediation
reason := check.Reason
severity := securitypb.Finding_HIGH
if len(actualValue) > 1024 {
actualValue = actualValue[:1023]
}
if len(remediation) > 512 {
remediation = remediation[:511]
}
if len(reason) > 1024 {
reason = reason[:1023]
}
if strings.ToLower(check.Severity) == "medium" {
severity = securitypb.Finding_MEDIUM
}
if strings.ToLower(check.Severity) == "low" {
severity = securitypb.Finding_LOW
}
// id := fmt.Sprintf("%s/stig/%s/%s", cluster, controls.Version, check.ID)
id := strings.Replace(uuid.New().String(), "-", "", -1)
// Create SourceProperties map with structpb.NewValue() properly handled
sourceProperties, err := structpb.NewStruct(map[string]interface{}{
"Reason": reason,
"ActualResult": actualValue,
"ExpectedResult": check.ExpectedResult,
"Section": fmt.Sprintf("%s %s", controls.ID, controls.Text),
"Subsection": fmt.Sprintf("%s %s", g.ID, g.Text),
"Remediation": remediation,
})
if err != nil {
log.Fatalf("Failed to create SourceProperties: %v", err)
}
f := &securitypb.Finding{
Name: id,
Category: "KUBERNETES_BENCHMARK",
ResourceName: resourceName,
FindingClass: securitypb.Finding_MISCONFIGURATION,
Severity: severity,
State: securitypb.Finding_ACTIVE,
EventTime: ti,
Description: fmt.Sprintf("%s - %s", check.ID, check.Text),
SourceProperties: sourceProperties.GetFields(),
}
fs = append(fs, f)
}
}
}
return fs, nil
}
func getConfig(name string) (string, error) {
r := viper.GetString(name)
if len(r) == 0 {

View File

@ -193,6 +193,7 @@ groups:
remediation: |
Edit the config file /this/is/a/file/path and set SomeSampleFlag to true.
scored: true
severity: medium
`)
// and
controls, err := NewControls(MASTER, in, "")
@ -224,6 +225,7 @@ groups:
assert.Equal(t, "SomeSampleFlag=true", G2.Checks[0].Tests.TestItems[0].Flag)
assert.Equal(t, "Edit the config file /this/is/a/file/path and set SomeSampleFlag to true.\n", G2.Checks[0].Remediation)
assert.Equal(t, true, G2.Checks[0].Scored)
assert.Equal(t, "medium", G2.Checks[0].Severity)
assertEqualGroupSummary(t, 0, 1, 0, 0, G2)
// and
assert.Equal(t, 1, controls.Summary.Pass)

View File

@ -416,6 +416,9 @@ func writeOutput(controlsCollection []*check.Controls) {
writeASFFOutput(controlsCollection)
return
}
if GSCC {
writeGSCCOutput((controlsCollection))
}
writeStdoutOutput(controlsCollection)
}
@ -468,12 +471,24 @@ func writeASFFOutput(controlsCollection []*check.Controls) {
if err != nil {
exitWithError(fmt.Errorf("failed to format findings as ASFF: %v", err))
}
if err := writeFinding(out); err != nil {
if err := writeASSFFinding(out); err != nil {
exitWithError(fmt.Errorf("failed to output to ASFF: %v", err))
}
}
}
func writeGSCCOutput(controlsCollection []*check.Controls) {
for _, controls := range controlsCollection {
out, err := controls.GSCC()
if err != nil {
exitWithError(fmt.Errorf("failed to format findings as GSCC: %v", err))
}
if err := writeGSCCFinding(out); err != nil {
exitWithError(fmt.Errorf("failed to output to GSCC: %v", err))
}
}
}
func writeStdoutOutput(controlsCollection []*check.Controls) {
for _, controls := range controlsCollection {
summary := controls.Summary

View File

@ -44,6 +44,7 @@ var (
junitFmt bool
pgSQL bool
aSFF bool
GSCC bool
masterFile = "master.yaml"
nodeFile = "node.yaml"
etcdFile = "etcd.yaml"
@ -66,8 +67,8 @@ var (
// RootCmd represents the base command when called without any subcommands
var RootCmd = &cobra.Command{
Use: os.Args[0],
Short: "Run CIS Benchmarks checks against a Kubernetes deployment",
Long: `This tool runs the CIS Kubernetes Benchmark (https://www.cisecurity.org/benchmark/kubernetes/)`,
Short: "Run CIS and STIG Benchmarks checks against a Kubernetes deployment",
Long: `This tool runs the CIS and STIG Kubernetes Benchmark (https://www.cisecurity.org/benchmark/kubernetes/)`,
Run: func(cmd *cobra.Command, args []string) {
bv, err := getBenchmarkVersion(kubeVersion, benchmarkVersion, getPlatformInfo(), viper.GetViper())
if err != nil {
@ -168,6 +169,7 @@ func init() {
RootCmd.PersistentFlags().BoolVar(&junitFmt, "junit", false, "Prints the results as JUnit")
RootCmd.PersistentFlags().BoolVar(&pgSQL, "pgsql", false, "Save the results to PostgreSQL")
RootCmd.PersistentFlags().BoolVar(&aSFF, "asff", false, "Send the results to AWS Security Hub")
RootCmd.PersistentFlags().BoolVar(&GSCC, "gscc", false, "Send the results to GCP Security Command Center")
RootCmd.PersistentFlags().BoolVar(&filterOpts.Scored, "scored", true, "Run the scored CIS checks")
RootCmd.PersistentFlags().BoolVar(&filterOpts.Unscored, "unscored", true, "Run the unscored CIS checks")
RootCmd.PersistentFlags().StringVar(&skipIds, "skip", "", "List of comma separated values of checks to be skipped")

View File

@ -0,0 +1,56 @@
package cmd
import (
"context"
"fmt"
"log"
securitycenter "cloud.google.com/go/securitycenter/apiv1"
securitypb "cloud.google.com/go/securitycenter/apiv1/securitycenterpb"
"github.com/aquasecurity/kube-bench/internal/findings"
"github.com/spf13/viper"
)
const GCP_REGION = "GCP_REGION"
const GCP_PROJECT_ID = "GCP_PROJECT_ID"
const GCP_SCC_SOURCE_ID = "GCP_SCC_SOURCE_ID"
func writeGSCCFinding(in []*securitypb.Finding) error {
r := viper.GetString(GCP_REGION)
if len(r) == 0 {
return fmt.Errorf("%s not set", GCP_REGION)
}
projectId := viper.GetString(GCP_PROJECT_ID)
if len(projectId) == 0 {
return fmt.Errorf("%s not set", GCP_PROJECT_ID)
}
sccSourceId := viper.GetString(GCP_SCC_SOURCE_ID)
if len(sccSourceId) == 0 {
return fmt.Errorf("%s not set", GCP_SCC_SOURCE_ID)
}
ctx := context.Background()
client, err := securitycenter.NewClient(ctx)
if err != nil {
return fmt.Errorf("failed to create SCC client: %w", err)
}
defer client.Close()
p := findings.NewGSCC(client, sccSourceId)
out, perr := p.PublishFinding(in)
printGSCC(out)
return perr
}
func printGSCC(out *findings.GSCCPublisherOutput) {
if out.SuccessCount > 0 {
log.Printf("Number of findings that were successfully imported:%v\n", out.SuccessCount)
}
if out.FailedCount > 0 {
log.Printf("Number of findings that failed to import:%v\n", out.FailedCount)
for _, f := range out.FailedFindings {
log.Printf("ID:%s", f.Finding.GetName())
log.Printf("Message:%s", f.Error)
}
}
}

View File

@ -13,12 +13,12 @@ import (
)
// REGION ...
const REGION = "AWS_REGION"
const AWS_REGION = "AWS_REGION"
func writeFinding(in []types.AwsSecurityFinding) error {
r := viper.GetString(REGION)
func writeASSFFinding(in []types.AwsSecurityFinding) error {
r := viper.GetString(AWS_REGION)
if len(r) == 0 {
return fmt.Errorf("%s not set", REGION)
return fmt.Errorf("%s not set", AWS_REGION)
}
cfg, err := config.LoadDefaultConfig(context.Background(), config.WithRegion(r))
if err != nil {
@ -28,11 +28,11 @@ func writeFinding(in []types.AwsSecurityFinding) error {
svc := securityhub.NewFromConfig(cfg)
p := findings.New(*svc)
out, perr := p.PublishFinding(in)
print(out)
printASSF(out)
return perr
}
func print(out *findings.PublisherOutput) {
func printASSF(out *findings.PublisherOutput) {
if out.SuccessCount > 0 {
log.Printf("Number of findings that were successfully imported:%v\n", out.SuccessCount)
}

View File

@ -104,7 +104,7 @@ command line, with the flag `--group` or `-g`.
## Check
The CIS Kubernetes Benchmark recommends configurations to harden Kubernetes components. These recommendations are usually configuration options and can be
The STIG/CIS Kubernetes Benchmarks recommend configurations to harden Kubernetes components. These recommendations are usually configuration options and can be
specified by flags to Kubernetes binaries, or in configuration files.
The Benchmark also provides commands to audit a Kubernetes installation, identify
@ -130,11 +130,16 @@ remediation: |
on the master node and set the below parameter.
--anonymous-auth=false
scored: false
severity: high
```
A `check` object has an `id`, a `text`, an `audit`, a `tests`, `remediation`
and `scored` fields.
Optionally, `severity` can be provided. The severity will default to `high` if not set.
This field is used for sending GCP SCC results. AWS Security Hub does not currently support setting severity.
Valid options are `high`, `medium` or `low`.
`kube-bench` supports running individual checks by specifying the check's `id`
as a comma-delimited list on the command line with the `--check` flag.

57
docs/gscc.md Normal file
View File

@ -0,0 +1,57 @@
# Integrating kube-bench with GCP Security Command Center
You can configure kube-bench with the `--gscc` to send findings to GCP Security Command Center (SCC). There are some additional steps required so that kube-bench has information and permissions to send these findings.
A few notes before getting started:
- There's multiple ways to assign pod identity in GCP. For this walkthrough we are using [Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity).
- The SCC `source` for kube-bench is created using a python script. This needs to be ran prior to executing kube-bench.
- Creating sources is not currently supported in the gcloud cli.
- Creating a source is an organizational permission, which is excessive for the kube-bench pod. This is why it is not part of the kube-bench application.
## Create the GCP SCC Source for kube-bench
This only needs to be done once per GCP organization.
This script requires the user to have the following perission: `securitycenter.sources.update` at the organization scope. The current role associated with this is `roles/securitycenter.sourcesEditor`
```bash
python3 -m venv venv
source venv/bin/activate
pip install -r ./helper_scripts/create_gcp_source/requirements.txt
python ./helper_scripts/create_gcp_source/__main__.py <YOUR GCP ORG ID>
```
The output of this script is the name/id for the source. Format `organizations/<ORG_ID>/sources/<SOURCE_ID>`
## Enable API Access the GCP Security Command Center
_You will need GCP Security Command Center to be enabled in your project._
The details for assigning roles to the workload identity service account created by the job deployment is [documented here.](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#authenticating_to)
This step can be taken before you create the service account.
```bash
PROJECT_NUMBER="1234567890"
PROJECT_ID="my_gcp_project_id"
NAMESPACE="kube-bench"
KSA_NAME="kube-bench-sa"
ROLE="roles/securitycenter.findingsEditor"
gcloud projects add-iam-policy-binding projects/$PROJECT_ID --role=$ROLE \
--member=principal://iam.googleapis.com/projects/$PROJECT_NUMBER/locations/global/workloadIdentityPools/$PROJECT_ID.svc.id.goog/subject/ns/$NAMESPACE/sa/$KSA_NAME
```
### Modify the job configuration
- Modify the kube-bench Configmap in `job-gke-stig-gscc.yaml` to specify the project ID, region, cluster name and source ID.
- In the same file, modify the image specifed in the Job to use the kube-bench image pushed to your GCP Artifact Registry.
- You may also need to modify the volume mount location for `kube-bench-gke-config` to match the version of the GKE STIG benchmark you are using.
You can now run kube-bench as a pod in your cluster: `kubectl apply -f job-gke-stig-gscc.yaml`
Findings will be generated for any kube-bench test that generates a `[FAIL]` or `[WARN]` output. If all tests pass, no findings will be generated. However, it's recommended that you consult the pod log output to check whether any findings were generated but could not be written to Security Command Center.
Query findings in SCC with the following:
```
state="ACTIVE" AND NOT mute="MUTED" AND parent_display_name="KubeBench" AND category="KUBERNETES_BENCHMARK"
```

43
go.mod
View File

@ -22,6 +22,13 @@ require (
)
require (
cloud.google.com/go v0.118.3 // indirect
cloud.google.com/go/auth v0.15.0 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.7 // indirect
cloud.google.com/go/compute/metadata v0.6.0 // indirect
cloud.google.com/go/iam v1.4.1 // indirect
cloud.google.com/go/longrunning v0.6.5 // indirect
cloud.google.com/go/securitycenter v1.36.1 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.17.62 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect
@ -35,18 +42,23 @@ require (
github.com/aws/smithy-go v1.22.2 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/go-cmp v0.7.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/s2a-go v0.1.9 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.5 // indirect
github.com/googleapis/gax-go/v2 v2.14.1 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
@ -75,18 +87,29 @@ require (
github.com/stretchr/objx v0.5.2 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/x448/float16 v0.8.4 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect
go.opentelemetry.io/otel v1.34.0 // indirect
go.opentelemetry.io/otel/metric v1.34.0 // indirect
go.opentelemetry.io/otel/trace v1.34.0 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/multierr v1.9.0 // indirect
golang.org/x/crypto v0.35.0 // indirect
golang.org/x/crypto v0.36.0 // indirect
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
golang.org/x/net v0.36.0 // indirect
golang.org/x/oauth2 v0.23.0 // indirect
golang.org/x/sync v0.11.0 // indirect
golang.org/x/sys v0.30.0 // indirect
golang.org/x/term v0.29.0 // indirect
golang.org/x/text v0.22.0 // indirect
golang.org/x/time v0.7.0 // indirect
google.golang.org/protobuf v1.35.1 // indirect
golang.org/x/net v0.37.0 // indirect
golang.org/x/oauth2 v0.27.0 // indirect
golang.org/x/sync v0.12.0 // indirect
golang.org/x/sys v0.31.0 // indirect
golang.org/x/term v0.30.0 // indirect
golang.org/x/text v0.23.0 // indirect
golang.org/x/time v0.10.0 // indirect
google.golang.org/api v0.224.0 // indirect
google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect
google.golang.org/grpc v1.71.0 // indirect
google.golang.org/protobuf v1.36.5 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect

67
go.sum
View File

@ -1,3 +1,17 @@
cloud.google.com/go v0.118.3 h1:jsypSnrE/w4mJysioGdMBg4MiW/hHx/sArFpaBWHdME=
cloud.google.com/go v0.118.3/go.mod h1:Lhs3YLnBlwJ4KA6nuObNMZ/fCbOQBPuWKPoE0Wa/9Vc=
cloud.google.com/go/auth v0.15.0 h1:Ly0u4aA5vG/fsSsxu98qCQBemXtAtJf+95z9HK+cxps=
cloud.google.com/go/auth v0.15.0/go.mod h1:WJDGqZ1o9E9wKIL+IwStfyn/+s59zl4Bi+1KQNVXLZ8=
cloud.google.com/go/auth/oauth2adapt v0.2.7 h1:/Lc7xODdqcEw8IrZ9SvwnlLX6j9FHQM74z6cBk9Rw6M=
cloud.google.com/go/auth/oauth2adapt v0.2.7/go.mod h1:NTbTTzfvPl1Y3V1nPpOgl2w6d/FjO7NNUQaWSox6ZMc=
cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I=
cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg=
cloud.google.com/go/iam v1.4.1 h1:cFC25Nv+u5BkTR/BT1tXdoF2daiVbZ1RLx2eqfQ9RMM=
cloud.google.com/go/iam v1.4.1/go.mod h1:2vUEJpUG3Q9p2UdsyksaKpDzlwOrnMzS30isdReIcLM=
cloud.google.com/go/longrunning v0.6.5 h1:sD+t8DO8j4HKW4QfouCklg7ZC1qC4uzVZt8iz3uTW+Q=
cloud.google.com/go/longrunning v0.6.5/go.mod h1:Et04XK+0TTLKa5IPYryKf5DkpwImy6TluQ1QTLwlKmY=
cloud.google.com/go/securitycenter v1.36.1 h1:QOXZRilyXK80/61Szse35K1w3SU5mzBlEM8/XVJOkzI=
cloud.google.com/go/securitycenter v1.36.1/go.mod h1:SxE1r7Y5V9AVPa+DU0d+4QAOIJzcKglO3Vc4zvcQtPo=
github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM=
github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg=
github.com/aws/aws-sdk-go-v2/config v1.29.9 h1:Kg+fAYNaJeGXp1vmjtidss8O2uXIsXwaRqsQJKXVr+0=
@ -36,6 +50,8 @@ github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxER
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
@ -44,8 +60,11 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
@ -79,13 +98,21 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.3.5 h1:VgzTY2jogw3xt39CusEnFJWm7rlsq5yL5q9XdLOuP5g=
github.com/googleapis/enterprise-certificate-proxy v0.3.5/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q=
github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
@ -194,6 +221,18 @@ github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 h1:rgMkmiGfix9vFJDcDi1PK8WEQP4FLQwLDfhp5ZLpFeE=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0/go.mod h1:ijPqXp5P6IRRByFVVg9DY8P5HkxkHE5ARIa+86aXPf4=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I=
go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY=
go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI=
go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ=
go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE=
go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k=
go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE=
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
@ -203,6 +242,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs=
golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ=
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
@ -215,14 +256,20 @@ golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA=
golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I=
golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c=
golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -236,14 +283,22 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU=
golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s=
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/time v0.10.0 h1:3usCWA8tQn0L8+hFJQNgzpWbd89begxN66o1Ojdn5L4=
golang.org/x/time v0.10.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
@ -255,6 +310,16 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.224.0 h1:Ir4UPtDsNiwIOHdExr3fAj4xZ42QjK7uQte3lORLJwU=
google.golang.org/api v0.224.0/go.mod h1:3V39my2xAGkodXy0vEqcEtkqgw2GtrFL5WuBZlCTCOQ=
google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb h1:ITgPrl429bc6+2ZraNSzMDk3I95nmQln2fuPstKwFDE=
google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:sAo5UzpjUwgFBCzupwhcLcxHVDK7vG5IqI30YnwX2eE=
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950=
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb h1:TLPQVbx1GJ8VKZxz52VAxl1EBgKXXbTiU9Fc5fZeLn4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I=
google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg=
google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -263,6 +328,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=

View File

@ -0,0 +1,40 @@
import sys
from google.cloud import securitycenter_v2
def create_source(organization_id) -> dict:
"""
Create a new findings source
Args:
organization_id: organization_id is the numeric ID of the organization. e.g.:organization_id = "111122222444"
"""
client = securitycenter_v2.SecurityCenterClient()
org_name = f"organizations/{organization_id}"
response = client.list_sources(parent=org_name)
source_exists = False
for source in response:
if source.display_name == "KubeBench":
print(f"Found exisitng source: {source.name}")
source_exists = True
break
if not source_exists:
response = client.create_source(
request={
"parent": org_name,
"source": {
"display_name": "KubeBench",
"description": "KubeBench is an open-source CIS and STIG scanning tool for Kubernetes",
},
}
)
print(f"Created Source: {response.name}")
if __name__ == "__main__":
if len(sys.argv) == 2:
create_source(sys.argv[1])
else:
print("Syntax: python __main__.py <GCP_ORGANIZATION_ID>")

View File

@ -0,0 +1 @@
google-cloud-securitycenter

View File

@ -0,0 +1,75 @@
package findings
import (
"context"
"fmt"
securitycenter "cloud.google.com/go/securitycenter/apiv1"
securitypb "cloud.google.com/go/securitycenter/apiv1/securitycenterpb"
"github.com/pkg/errors"
)
// Publisher represents an object that publishes findings to GCP Security Command Center (SCC).
type GSCCPublisher struct {
client *securitycenter.Client // GCP SCC Client
sourceID string // SCC Source ID
}
// Capture the error and the finding which threw the error
type FailedFinding struct {
Error string `json:"error"`
Finding *securitypb.Finding `json:"finding"`
}
type GSCCPublisherOutput struct {
// The number of findings that failed to import.
//
// FailedCount is a required field
FailedCount int32
// The list of findings that failed to import.
FailedFindings []FailedFinding
// The number of findings that were successfully imported.
//
// SuccessCount is a required field
SuccessCount int32
}
// New creates a new Publisher.
func NewGSCC(client *securitycenter.Client, sourceID string) *GSCCPublisher {
return &GSCCPublisher{
client: client,
sourceID: sourceID,
}
}
// PublishFinding publishes findings to GCP SCC.
func (p *GSCCPublisher) PublishFinding(findings []*securitypb.Finding) (*GSCCPublisherOutput, error) {
o := GSCCPublisherOutput{}
var errs error
ctx := context.Background()
for _, finding := range findings {
req := &securitypb.CreateFindingRequest{
Parent: p.sourceID,
FindingId: finding.GetName(),
Finding: finding,
}
resp, err := p.client.CreateFinding(ctx, req)
if err != nil {
errs = errors.Wrap(err, "finding publish failed")
o.FailedCount++
o.FailedFindings = append(o.FailedFindings, FailedFinding{
Error: err.Error(),
Finding: finding,
})
continue
}
fmt.Printf("Finding created: %s\n", resp.Name)
o.SuccessCount++
}
return &o, errs
}

105
job-gke-stig-gscc.yaml Normal file
View File

@ -0,0 +1,105 @@
# Service account role required for V-242395
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-bench-sa
namespace: kube-bench
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kube-bench-list-pods
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["list"]
resourceNames: []
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kube-bench-sa-binding
subjects:
- kind: ServiceAccount
name: kube-bench-sa
namespace: kube-bench
roleRef:
kind: ClusterRole
name: kube-bench-list-pods
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-bench-gke-config
data:
config.yaml: |
GCP_PROJECT_ID: "<PROJECT_ID>"
GCP_REGION: "<REGION>"
CLUSTER_NAME: "<CLUSTER_NAME>"
GCP_SCC_SOURCE_ID: "projects/<PROJECT_ID>/sources/<SOURCE_ID>"
---
apiVersion: batch/v1
kind: Job
metadata:
name: kube-bench
spec:
template:
spec:
serviceAccountName: kube-bench-sa
hostPID: true
containers:
- name: kube-bench
imagePullPolicy: Always
# Push the image to your GCP Artifact Registry and then refer to it here
# image: <region>-docker.pkg.dev/<registry>/<repository>/kube-bench:latest
image: docker.io/aquasec/kube-bench:latest
command:
[
"kube-bench",
"run",
"--benchmark",
"gke-stig-kubernetes-v2r2",
"--gscc",
]
volumeMounts:
- name: var-lib-kubelet
mountPath: /var/lib/kubelet
readOnly: true
- name: etc-systemd
mountPath: /etc/systemd
readOnly: true
- name: etc-kubernetes
mountPath: /etc/kubernetes
readOnly: true
- name: home-kubernetes
mountPath: /home/kubernetes
readOnly: true
- name: kube-bench-gke-config
mountPath: "/opt/kube-bench/cfg/gke-stig-kubernetes-v2r2/config.yaml"
subPath: config.yaml
readOnly: true
restartPolicy: Never
volumes:
- name: var-lib-kubelet
hostPath:
path: "/var/lib/kubelet"
- name: etc-systemd
hostPath:
path: "/etc/systemd"
- name: etc-kubernetes
hostPath:
path: "/etc/kubernetes"
- name: home-kubernetes
hostPath:
path: "/home/kubernetes"
- name: kube-bench-gke-config
configMap:
name: kube-bench-gke-config
items:
- key: config.yaml
path: config.yaml

87
job-gke-stig.yaml Normal file
View File

@ -0,0 +1,87 @@
# Service account role required for V-242395
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-bench-sa
namespace: kube-bench
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kube-bench-list-pods
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["list"]
resourceNames: []
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kube-bench-sa-binding
subjects:
- kind: ServiceAccount
name: kube-bench-sa
namespace: kube-bench
roleRef:
kind: ClusterRole
name: kube-bench-list-pods
apiGroup: rbac.authorization.k8s.io
---
apiVersion: batch/v1
kind: Job
metadata:
name: kube-bench
spec:
template:
spec:
serviceAccountName: kube-bench-sa
hostPID: true
containers:
- name: kube-bench
imagePullPolicy: Always
# Push the image to your GCP Artifact Registry and then refer to it here
# image: <region>-docker.pkg.dev/<registry>/<repository>/kube-bench:latest
image: docker.io/aquasec/kube-bench:latest
command:
["kube-bench", "run", "--benchmark", "gke-stig-kubernetes-v2r2"]
volumeMounts:
- name: var-lib-kubelet
mountPath: /var/lib/kubelet
readOnly: true
- name: etc-systemd
mountPath: /etc/systemd
readOnly: true
- name: etc-kubernetes
mountPath: /etc/kubernetes
readOnly: true
- name: home-kubernetes
mountPath: /home/kubernetes
readOnly: true
- name: kube-bench-gke-config
mountPath: "/opt/kube-bench/cfg/gke-stig-kubernetes-v2r2/config.yaml"
subPath: config.yaml
readOnly: true
restartPolicy: Never
volumes:
- name: var-lib-kubelet
hostPath:
path: "/var/lib/kubelet"
- name: etc-systemd
hostPath:
path: "/etc/systemd"
- name: etc-kubernetes
hostPath:
path: "/etc/kubernetes"
- name: home-kubernetes
hostPath:
path: "/home/kubernetes"
- name: kube-bench-gke-config
configMap:
name: kube-bench-gke-config
items:
- key: config.yaml
path: config.yaml