1
0
mirror of https://github.com/aquasecurity/kube-bench.git synced 2025-06-20 15:08:52 +00:00

Completed testing for GCP SCC publisher, documentation, test updates and deployment scripts. Added helper script for creating the SCC Source

This commit is contained in:
Carter Williamson 2025-03-21 14:14:14 -07:00
parent 414f456de7
commit 8e151b75c0
15 changed files with 330 additions and 56 deletions

3
.gitignore vendored
View File

@ -5,6 +5,7 @@ dist
.vscode/
hack/kind.test.yaml
coverage.txt
venv/
.idea/
@ -13,4 +14,4 @@ coverage.txt
thumbs.db
/kubeconfig.kube-bench
/test.data
*.iml
*.iml

View File

@ -1,5 +1,16 @@
---
## Version-specific settings that override the values in cfg/config.yaml
## These settings are required if you are using the --gscc option to report findings to GCP Security Command Center
## GCP Organization ID is required.
GCP_SCC_SOURCE_ID: "<GCP_SCC_SOURCE_ID>"
## GCP project ID is required.
GCP_PROJECT_ID: "<GCP_PROJECT_ID>"
## GCP region is required.
GCP_REGION: "<GCP_REGION>"
## GKE Cluster Name is required.
CLUSTER_NAME: "<CLUSTER_NAME>"
node:
kubelet:
confs:

View File

@ -12,6 +12,7 @@ groups:
text: "The Kubernetes Kubelet must have the read-only port flag disabled"
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
severity: high
tests:
test_items:
- flag: "--read-only-port"
@ -354,6 +355,7 @@ groups:
- id: V-242393
text: "Kubernetes Worker Nodes must not have sshd service running. (Automated)"
audit: 'ps aux | grep sshd'
severity: medium
tests:
test_items:
- flag: bin/sshd
@ -395,6 +397,7 @@ groups:
- id: V-242434 # CIS 3.2.6
text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)"
severity: high
audit: "/bin/ps -fC $kubeletbin"
audit_config: "/bin/cat $kubeletconf"
tests:
@ -453,6 +456,7 @@ groups:
scored: true
- id: V-242420
severity: medium
text: "Kubernetes Kubelet must have the SSL Certificate Authority set."
audit: "ps -ef | grep kubelet"
audit_config: "/bin/cat $kubeletconf"
@ -478,6 +482,7 @@ groups:
scored: false
- id: V-242452
severity: medium
text: "The Kubernetes kubelet KubeConfig must have file permissions set to 644 or more restrictive."
audit: "stat -c %a $kubeletconf"
tests:
@ -492,6 +497,7 @@ groups:
scored: false
- id: V-242453
severity: medium
text: "The Kubernetes kubelet KubeConfig file must be owned by root."
audit: "stat -c %U:%G $kubeletconf"
tests:
@ -504,6 +510,7 @@ groups:
scored: false
- id: V-242454
severity: medium
text: "The Kubernetes kubeadm.conf must be owned by root."
audit: "stat -c %U:%G $kubeletsvc"
tests:
@ -516,6 +523,7 @@ groups:
scored: false
- id: V-242455
severity: medium
text: "The Kubernetes kubeadm.conf must have file permissions set to 644 or more restrictive."
audit: "stat -c %a $kubeletsvc"
tests:
@ -530,6 +538,7 @@ groups:
scored: false
- id: V-242456
severity: medium
text: "The Kubernetes kubelet config must have file permissions set to 644 or more restrictive."
audit: "stat -c %a $kubeletconf"
tests:
@ -544,6 +553,7 @@ groups:
scored: false
- id: V-242457
severity: medium
text: "The Kubernetes kubelet config must be owned by root."
audit: "stat -c %U:%G $kubeletconf"
tests:
@ -556,6 +566,7 @@ groups:
scored: false
- id: V-245541
severity: medium
text: "Kubernetes Kubelet must not disable timeouts."
audit: "ps -ef | grep kubelet"
audit_config: "/bin/cat $kubeletconf"
@ -583,6 +594,7 @@ groups:
scored: true
- id: V-242390 # Similar to CIS 3.2.1
severity: high
text: "The Kubernetes API server must have anonymous authentication disabled (Automated)"
# audit: "/bin/ps -fC kubelet"
audit: "/bin/ps -fC $kubeletbin"

View File

@ -85,6 +85,7 @@ type Check struct {
AuditEnvOutput string `json:"-"`
AuditConfigOutput string `json:"-"`
DisableEnvTesting bool `json:"-"`
Severity string `json:"severity,omitempty"`
}
// Runner wraps the basic Run method.

View File

@ -94,6 +94,33 @@ func TestCheck_Run(t *testing.T) {
},
Expected: FAIL,
},
{
name: "Scored checks that pass should FAIL when config file is not present",
check: Check{
Scored: true,
AuditConfig: "/test/config.yaml",
Tests: &tests{TestItems: []*testItem{{
Flag: "hello",
Set: true,
}}},
Severity: "medium",
},
Expected: FAIL,
},
{
name: "Scored checks that pass should PASS when config file is not present",
check: Check{
Scored: true,
Audit: "echo hello",
AuditConfig: "/test/config.yaml",
Tests: &tests{TestItems: []*testItem{{
Flag: "hello",
Set: true,
}}},
Severity: "high",
},
Expected: PASS,
},
}
for _, testCase := range testCases {

View File

@ -20,12 +20,14 @@ import (
"encoding/xml"
"fmt"
"log"
"strings"
"time"
securitypb "cloud.google.com/go/securitycenter/apiv1/securitycenterpb"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/securityhub/types"
"github.com/golang/glog"
"github.com/google/uuid"
"github.com/onsi/ginkgo/reporters"
"github.com/spf13/viper"
"google.golang.org/protobuf/types/known/structpb"
@ -297,7 +299,7 @@ func (controls *Controls) ASFF() ([]types.AwsSecurityFinding, error) {
func (controls *Controls) GSCC() ([]*securitypb.Finding, error) {
fs := []*securitypb.Finding{}
project, err := getConfig("GCP_PROJECT")
project, err := getConfig("GCP_PROJECT_ID")
if err != nil {
return nil, err
}
@ -318,6 +320,7 @@ func (controls *Controls) GSCC() ([]*securitypb.Finding, error) {
actualValue := check.ActualValue
remediation := check.Remediation
reason := check.Reason
severity := securitypb.Finding_HIGH
if len(actualValue) > 1024 {
actualValue = actualValue[:1023]
@ -329,15 +332,26 @@ func (controls *Controls) GSCC() ([]*securitypb.Finding, error) {
reason = reason[:1023]
}
id := fmt.Sprintf("%s/stig-kubernetes-benchmark/%s/%s", resourceName, controls.Version, check.ID)
if strings.ToLower(check.Severity) == "medium" {
severity = securitypb.Finding_MEDIUM
}
if strings.ToLower(check.Severity) == "low" {
severity = securitypb.Finding_LOW
}
// id := fmt.Sprintf("%s/stig/%s/%s", cluster, controls.Version, check.ID)
id := strings.Replace(uuid.New().String(), "-", "", -1)
// Create SourceProperties map with structpb.NewValue() properly handled
sourceProperties, err := structpb.NewStruct(map[string]interface{}{
"Reason": reason,
"Actual result": actualValue,
"Expected result": check.ExpectedResult,
"ActualResult": actualValue,
"ExpectedResult": check.ExpectedResult,
"Section": fmt.Sprintf("%s %s", controls.ID, controls.Text),
"Subsection": fmt.Sprintf("%s %s", g.ID, g.Text),
"Remediation": remediation,
})
if err != nil {
log.Fatalf("Failed to create SourceProperties: %v", err)
@ -345,12 +359,13 @@ func (controls *Controls) GSCC() ([]*securitypb.Finding, error) {
f := &securitypb.Finding{
Name: id,
Category: "CIS_KUBERNETES_BENCHMARK",
Category: "KUBERNETES_BENCHMARK",
ResourceName: resourceName,
Severity: securitypb.Finding_HIGH,
FindingClass: securitypb.Finding_MISCONFIGURATION,
Severity: severity,
State: securitypb.Finding_ACTIVE,
EventTime: ti,
Description: check.Text,
Description: fmt.Sprintf("%s - %s", check.ID, check.Text),
SourceProperties: sourceProperties.GetFields(),
}
fs = append(fs, f)

View File

@ -193,6 +193,7 @@ groups:
remediation: |
Edit the config file /this/is/a/file/path and set SomeSampleFlag to true.
scored: true
severity: medium
`)
// and
controls, err := NewControls(MASTER, in, "")
@ -224,6 +225,7 @@ groups:
assert.Equal(t, "SomeSampleFlag=true", G2.Checks[0].Tests.TestItems[0].Flag)
assert.Equal(t, "Edit the config file /this/is/a/file/path and set SomeSampleFlag to true.\n", G2.Checks[0].Remediation)
assert.Equal(t, true, G2.Checks[0].Scored)
assert.Equal(t, "medium", G2.Checks[0].Severity)
assertEqualGroupSummary(t, 0, 1, 0, 0, G2)
// and
assert.Equal(t, 1, controls.Summary.Pass)

View File

@ -11,63 +11,46 @@ import (
"github.com/spf13/viper"
)
// GCP_REGION and ORG_ID should be set in the config
const GCP_REGION = "GCP_REGION"
const ORG_ID = "GCP_ORG_ID"
const GCP_PROJECT_ID = "GCP_PROJECT_ID"
const GCP_SCC_SOURCE_ID = "GCP_SCC_SOURCE_ID"
func writeGSCCFinding(in []*securitypb.Finding) error {
r := viper.GetString(GCP_REGION)
if len(r) == 0 {
return fmt.Errorf("%s not set", GCP_REGION)
}
orgId := viper.GetString(ORG_ID)
if len(orgId) == 0 {
return fmt.Errorf("%s not set", ORG_ID)
projectId := viper.GetString(GCP_PROJECT_ID)
if len(projectId) == 0 {
return fmt.Errorf("%s not set", GCP_PROJECT_ID)
}
sccSourceId := viper.GetString(GCP_SCC_SOURCE_ID)
if len(sccSourceId) == 0 {
return fmt.Errorf("%s not set", GCP_SCC_SOURCE_ID)
}
ctx := context.Background()
client, err := securitycenter.NewClient(ctx)
if err != nil {
return fmt.Errorf("failed to create SCC client: %w", err)
}
defer client.Close()
// SCC Source ID - replace with your actual SCC source ID
sourceID := fmt.Sprintf("organizations/%s/sources/1234567890", orgId)
// Iterate over findings and publish them
for _, f := range in {
req := &securitypb.CreateFindingRequest{
Parent: sourceID,
FindingId: f.GetName(), // Ensure unique finding ID
Finding: f,
}
resp, err := client.CreateFinding(ctx, req)
if err != nil {
return fmt.Errorf("failed to create finding %s: %w", f.GetName(), err)
}
fmt.Printf("Finding created: %s\n", resp.Name)
}
return nil
// svc := securityhub.NewFromConfig(cfg)
// p := findings.New(*svc)
// out, perr := p.GSCCPublishFinding(in)
// printGSCC(out)
// return perr
p := findings.NewGSCC(client, sccSourceId)
out, perr := p.PublishFinding(in)
printGSCC(out)
return perr
}
func printGSCC(out *findings.PublisherOutput) {
func printGSCC(out *findings.GSCCPublisherOutput) {
if out.SuccessCount > 0 {
log.Printf("Number of findings that were successfully imported:%v\n", out.SuccessCount)
}
if out.FailedCount > 0 {
log.Printf("Number of findings that failed to import:%v\n", out.FailedCount)
for _, f := range out.FailedFindings {
log.Printf("ID:%s", *f.Id)
log.Printf("Message:%s", *f.ErrorMessage)
log.Printf("Error Code:%s", *f.ErrorCode)
log.Printf("ID:%s", f.Finding.GetName())
log.Printf("Message:%s", f.Error)
}
}
}

View File

@ -104,7 +104,7 @@ command line, with the flag `--group` or `-g`.
## Check
The CIS Kubernetes Benchmark recommends configurations to harden Kubernetes components. These recommendations are usually configuration options and can be
The STIG/CIS Kubernetes Benchmarks recommend configurations to harden Kubernetes components. These recommendations are usually configuration options and can be
specified by flags to Kubernetes binaries, or in configuration files.
The Benchmark also provides commands to audit a Kubernetes installation, identify
@ -130,11 +130,16 @@ remediation: |
on the master node and set the below parameter.
--anonymous-auth=false
scored: false
severity: high
```
A `check` object has an `id`, a `text`, an `audit`, a `tests`, `remediation`
and `scored` fields.
Optionally, `severity` can be provided. The severity will default to `high` if not set.
This field is used for sending GCP SCC results. AWS Security Hub does not currently support setting severity.
Valid options are `high`, `medium` or `low`.
`kube-bench` supports running individual checks by specifying the check's `id`
as a comma-delimited list on the command line with the `--check` flag.

57
docs/gscc.md Normal file
View File

@ -0,0 +1,57 @@
# Integrating kube-bench with GCP Security Command Center
You can configure kube-bench with the `--gscc` to send findings to GCP Security Command Center (SCC). There are some additional steps required so that kube-bench has information and permissions to send these findings.
A few notes before getting started:
- There's multiple ways to assign pod identity in GCP. For this walkthrough we are using [Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity).
- The SCC `source` for kube-bench is created using a python script. This needs to be ran prior to executing kube-bench.
- Creating sources is not currently supported in the gcloud cli.
- Creating a source is an organizational permission, which is excessive for the kube-bench pod. This is why it is not part of the kube-bench application.
## Create the GCP SCC Source for kube-bench
This only needs to be done once per GCP organization.
This script requires the user to have the following perission: `securitycenter.sources.update` at the organization scope. The current role associated with this is `roles/securitycenter.sourcesEditor`
```bash
python3 -m venv venv
source venv/bin/activate
pip install -r ./helper_scripts/create_gcp_source/requirements.txt
python ./helper_scripts/create_gcp_source/__main__.py <YOUR GCP ORG ID>
```
The output of this script is the name/id for the source. Format `organizations/<ORG_ID>/sources/<SOURCE_ID>`
## Enable API Access the GCP Security Command Center
_You will need GCP Security Command Center to be enabled in your project._
The details for assigning roles to the workload identity service account created by the job deployment is [documented here.](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#authenticating_to)
This step can be taken before you create the service account.
```bash
PROJECT_NUMBER="1234567890"
PROJECT_ID="my_gcp_project_id"
NAMESPACE="kube-bench"
KSA_NAME="kube-bench-sa"
ROLE="roles/securitycenter.findingsEditor"
gcloud projects add-iam-policy-binding projects/$PROJECT_ID --role=$ROLE \
--member=principal://iam.googleapis.com/projects/$PROJECT_NUMBER/locations/global/workloadIdentityPools/$PROJECT_ID.svc.id.goog/subject/ns/$NAMESPACE/sa/$KSA_NAME
```
### Modify the job configuration
- Modify the kube-bench Configmap in `job-gke-stig-gscc.yaml` to specify the project ID, region, cluster name and source ID.
- In the same file, modify the image specifed in the Job to use the kube-bench image pushed to your GCP Artifact Registry.
- You may also need to modify the volume mount location for `kube-bench-gke-config` to match the version of the GKE STIG benchmark you are using.
You can now run kube-bench as a pod in your cluster: `kubectl apply -f job-gke-stig-gscc.yaml`
Findings will be generated for any kube-bench test that generates a `[FAIL]` or `[WARN]` output. If all tests pass, no findings will be generated. However, it's recommended that you consult the pod log output to check whether any findings were generated but could not be written to Security Command Center.
Query findings in SCC with the following:
```
state="ACTIVE" AND NOT mute="MUTED" AND parent_display_name="KubeBench" AND category="KUBERNETES_BENCHMARK"
```

View File

@ -0,0 +1,40 @@
import sys
from google.cloud import securitycenter_v2
def create_source(organization_id) -> dict:
"""
Create a new findings source
Args:
organization_id: organization_id is the numeric ID of the organization. e.g.:organization_id = "111122222444"
"""
client = securitycenter_v2.SecurityCenterClient()
org_name = f"organizations/{organization_id}"
response = client.list_sources(parent=org_name)
source_exists = False
for source in response:
if source.display_name == "KubeBench":
print(f"Found exisitng source: {source.name}")
source_exists = True
break
if not source_exists:
response = client.create_source(
request={
"parent": org_name,
"source": {
"display_name": "KubeBench",
"description": "KubeBench is an open-source CIS and STIG scanning tool for Kubernetes",
},
}
)
print(f"Created Source: {response.name}")
if __name__ == "__main__":
if len(sys.argv) == 2:
create_source(sys.argv[1])
else:
print("Syntax: python __main__.py <GCP_ORGANIZATION_ID>")

View File

@ -0,0 +1 @@
google-cloud-securitycenter

View File

@ -11,8 +11,14 @@ import (
// Publisher represents an object that publishes findings to GCP Security Command Center (SCC).
type GSCCPublisher struct {
client *securitycenter.Client // GCP SCC Client
sourceID string // SCC Source ID
client *securitycenter.Client // GCP SCC Client
sourceID string // SCC Source ID
}
// Capture the error and the finding which threw the error
type FailedFinding struct {
Error string `json:"error"`
Finding *securitypb.Finding `json:"finding"`
}
type GSCCPublisherOutput struct {
@ -22,7 +28,7 @@ type GSCCPublisherOutput struct {
FailedCount int32
// The list of findings that failed to import.
FailedFindings []string
FailedFindings []FailedFinding
// The number of findings that were successfully imported.
//
@ -47,7 +53,7 @@ func (p *GSCCPublisher) PublishFinding(findings []*securitypb.Finding) (*GSCCPub
for _, finding := range findings {
req := &securitypb.CreateFindingRequest{
Parent: p.sourceID,
FindingId: finding.GetName(), // Ensure unique finding ID
FindingId: finding.GetName(),
Finding: finding,
}
@ -55,7 +61,10 @@ func (p *GSCCPublisher) PublishFinding(findings []*securitypb.Finding) (*GSCCPub
if err != nil {
errs = errors.Wrap(err, "finding publish failed")
o.FailedCount++
o.FailedFindings = append(o.FailedFindings, finding.GetName())
o.FailedFindings = append(o.FailedFindings, FailedFinding{
Error: err.Error(),
Finding: finding,
})
continue
}
fmt.Printf("Finding created: %s\n", resp.Name)

105
job-gke-stig-gscc.yaml Normal file
View File

@ -0,0 +1,105 @@
# Service account role required for V-242395
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-bench-sa
namespace: kube-bench
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kube-bench-list-pods
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["list"]
resourceNames: []
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kube-bench-sa-binding
subjects:
- kind: ServiceAccount
name: kube-bench-sa
namespace: kube-bench
roleRef:
kind: ClusterRole
name: kube-bench-list-pods
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-bench-gke-config
data:
config.yaml: |
GCP_PROJECT_ID: "<PROJECT_ID>"
GCP_REGION: "<REGION>"
CLUSTER_NAME: "<CLUSTER_NAME>"
GCP_SCC_SOURCE_ID: "projects/<PROJECT_ID>/sources/<SOURCE_ID>"
---
apiVersion: batch/v1
kind: Job
metadata:
name: kube-bench
spec:
template:
spec:
serviceAccountName: kube-bench-sa
hostPID: true
containers:
- name: kube-bench
imagePullPolicy: Always
# Push the image to your GCP Artifact Registry and then refer to it here
# image: <region>-docker.pkg.dev/<registry>/<repository>/kube-bench:latest
image: docker.io/aquasec/kube-bench:latest
command:
[
"kube-bench",
"run",
"--benchmark",
"gke-stig-kubernetes-v2r2",
"--gscc",
]
volumeMounts:
- name: var-lib-kubelet
mountPath: /var/lib/kubelet
readOnly: true
- name: etc-systemd
mountPath: /etc/systemd
readOnly: true
- name: etc-kubernetes
mountPath: /etc/kubernetes
readOnly: true
- name: home-kubernetes
mountPath: /home/kubernetes
readOnly: true
- name: kube-bench-gke-config
mountPath: "/opt/kube-bench/cfg/gke-stig-kubernetes-v2r2/config.yaml"
subPath: config.yaml
readOnly: true
restartPolicy: Never
volumes:
- name: var-lib-kubelet
hostPath:
path: "/var/lib/kubelet"
- name: etc-systemd
hostPath:
path: "/etc/systemd"
- name: etc-kubernetes
hostPath:
path: "/etc/kubernetes"
- name: home-kubernetes
hostPath:
path: "/home/kubernetes"
- name: kube-bench-gke-config
configMap:
name: kube-bench-gke-config
items:
- key: config.yaml
path: config.yaml

View File

@ -1,4 +1,4 @@
# Service account role required for 242395
# Service account role required for V-242395
apiVersion: v1
kind: ServiceAccount
metadata:
@ -38,7 +38,7 @@ metadata:
spec:
template:
spec:
serviceAccountName: kube-bench-sa
serviceAccountName: kube-bench-sa
hostPID: true
containers:
- name: kube-bench
@ -47,12 +47,7 @@ spec:
# image: <region>-docker.pkg.dev/<registry>/<repository>/kube-bench:latest
image: docker.io/aquasec/kube-bench:latest
command:
[
"kube-bench",
"run",
"--benchmark",
"gke-stig-kubernetes-v2r2"
]
["kube-bench", "run", "--benchmark", "gke-stig-kubernetes-v2r2"]
volumeMounts:
- name: var-lib-kubelet
mountPath: /var/lib/kubelet
@ -66,6 +61,10 @@ spec:
- name: home-kubernetes
mountPath: /home/kubernetes
readOnly: true
- name: kube-bench-gke-config
mountPath: "/opt/kube-bench/cfg/gke-stig-kubernetes-v2r2/config.yaml"
subPath: config.yaml
readOnly: true
restartPolicy: Never
volumes:
- name: var-lib-kubelet
@ -80,3 +79,9 @@ spec:
- name: home-kubernetes
hostPath:
path: "/home/kubernetes"
- name: kube-bench-gke-config
configMap:
name: kube-bench-gke-config
items:
- key: config.yaml
path: config.yaml