diff --git a/.github/workflows/release-helm-charts.yaml b/.github/workflows/release-helm-charts.yaml index 131aa76d..f4aa9432 100644 --- a/.github/workflows/release-helm-charts.yaml +++ b/.github/workflows/release-helm-charts.yaml @@ -42,3 +42,4 @@ jobs: charts_url: https://5gsec.github.io/charts/ commit_username: "github-actions[bot]" commit_email: "github-actions[bot]@users.noreply.github.com" + dependencies: nimbus-kubearmor,https://5gsec.github.io/charts/;nimbus-netpol,https://5gsec.github.io/charts/;nimbus-kyverno,https://5gsec.github.io/charts/;nimbus-k8tls,https://5gsec.github.io/charts/ diff --git a/PROJECT b/PROJECT index 1d759f06..ef2ebb5e 100644 --- a/PROJECT +++ b/PROJECT @@ -10,36 +10,42 @@ repo: github.com/5GSEC/nimbus resources: - api: crdVersion: v1 - namespaced: true controller: true domain: security.nimbus.com group: intent kind: SecurityIntent - path: github.com/5GSEC/nimbus/api/v1 - version: v1 + path: github.com/5GSEC/nimbus/api/v1alpha1 + version: v1alpha1 - api: crdVersion: v1 namespaced: true controller: true domain: security.nimbus.com group: intent + kind: SecurityIntentBinding + path: github.com/5GSEC/nimbus/api/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + domain: security.nimbus.com + group: intent kind: NimbusPolicy - path: github.com/5GSEC/nimbus/api/v1 - version: v1 + path: github.com/5GSEC/nimbus/api/v1alpha1 + version: v1alpha1 - api: crdVersion: v1 - controller: true domain: security.nimbus.com group: intent kind: ClusterNimbusPolicy - path: github.com/5GSEC/nimbus/api/v1 - version: v1 + path: github.com/5GSEC/nimbus/api/v1alpha1 + version: v1alpha1 - api: crdVersion: v1 controller: true domain: security.nimbus.com group: intent kind: ClusterSecurityIntentBinding - path: github.com/5GSEC/nimbus/api/v1 - version: v1 + path: github.com/5GSEC/nimbus/api/v1alpha1 + version: v1alpha1 version: "3" diff --git a/docs/assets/1.jpg b/docs/assets/1.jpg new file mode 100644 index 00000000..b64e43bf Binary files /dev/null and b/docs/assets/1.jpg differ diff --git a/docs/assets/2.jpg b/docs/assets/2.jpg new file mode 100644 index 00000000..7dbda0fc Binary files /dev/null and b/docs/assets/2.jpg differ diff --git a/docs/assets/3.jpg b/docs/assets/3.jpg new file mode 100644 index 00000000..f57df298 Binary files /dev/null and b/docs/assets/3.jpg differ diff --git a/docs/assets/4.jpg b/docs/assets/4.jpg new file mode 100644 index 00000000..e7f891b0 Binary files /dev/null and b/docs/assets/4.jpg differ diff --git a/docs/crd/Readme.md b/docs/crd/Readme.md new file mode 100644 index 00000000..887b024b --- /dev/null +++ b/docs/crd/Readme.md @@ -0,0 +1,33 @@ +# Nimbus API + +This document provides guidance on extending and maintaining the [Nimbus API](../../api) + +## Concepts + +* https://kubernetes.io/docs/reference/using-api/api-concepts/ +* https://kubernetes.io/docs/reference/using-api/ +* https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/ +* https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md + +## API Groups + +All Nimbus resources are currently defined in the `intent.security.nimbus.com` API group. + +## API Versions + +This `intent.security.nimbus.com` has the following versions: + +* v1alpha1 + +## Adding a new attribute + +New attributes can be added to existing resources without impacting compatibility. They do not require a new version. + +## Deleting an attribute + +Attributes cannot be deleted in a version. They should be marked for deprecation and removed after 3 releases. + +## Modifying an attribute + +Attributes cannot be modified in a version. The existing attribute should be marked for deprecation and a new attribute +should be added following version compatibility guidelines. diff --git a/docs/crd/v1alpha1/clustersecurityintentbinding.md b/docs/crd/v1alpha1/clustersecurityintentbinding.md new file mode 100644 index 00000000..43f01a23 --- /dev/null +++ b/docs/crd/v1alpha1/clustersecurityintentbinding.md @@ -0,0 +1,77 @@ +# Nimbus ClusterSecurityIntentBinding Specification + +## Description + +A `ClusterSecurityIntentBinding` binds specific `SecurityIntent` resources to targeted resources within a cluster. +Unlike its namespaced counterpart (`SecurityIntentBinding`), it operates at the cluster level, enabling intent +application across multiple namespaces. + +```text +apiVersion: intent.security.nimbus.com/v1alpha1 +kind: ClusterSecurityIntentBinding +metadata: + name: [ ClusterSecurityIntentBinding name ] +spec: + intents: + - name: [ intent-to-bind-name ] + selector: + workloadSelector: # --> optional + matchLabels: + [ key1 ]: [ value1 ] + [ keyN ]: [ valueN ] + nsSelector: # --> optional + excludeNames: # --> optional + - [ namespace-to-exclude ] + matchNames: # --> optional + - [ namespace-to-include ] +``` + +### Explanation of Fields + +### Common Fields + +- `apiVersion`: Identifies the version of the API group for this resource. This remains constant for all Nimbus + policies. +- `kind`: Specifies the resource type, which is always `ClusterSecurityIntentBinding` in this case. +- `metadata`: Contains standard Kubernetes metadata like the resource name, which you define in the `.metadata` + placeholder. + +```yaml +apiVersion: intent.security.nimbus.com/v1alpha1 +kind: ClusterSecurityIntentBinding +metadata: + name: cluster-security-intent-binding-name +``` + +### Intents + +- `.spec.intents` **(Required)**: An array containing one or more objects specifying the names of `SecurityIntent` + resources to be + bound. Each object has a single field: + - `name` **(Required)**: The name of the `SecurityIntent` that should be applied to resources selected by this + binding. + +```yaml +... +spec: + intents: + - name: assess-tls-scheduled +... +``` + +### Selector + +`ClusterSecurityIntentBinding` has different selector to bind intent(s) to resources across namespaces. + +- `.spec.selector` **(Required)**: Defines resources targeted by the bound `SecurityIntent` policies. + - `workloadSelector` **(Optional)**: Same selector as `SecurityIntentBinding`. + - `nsSelector` **(Optional)**: Namespace selection criteria. + - `excludeNames` **(Optional)**: Exclude namespaces from the binding. + - `matchNames` **(Optional)**: Include namespaces in the binding. + Note: At least one of `matchNames` or `excludeNames` must be specified in `nsSelector`. + +Here are some examples: + +- [Apply to all namespaces](../../../examples/clusterscoped/csib-1-all-ns-selector.yaml) +- [Apply to specific namespaces](../../../examples/clusterscoped/csib-2-match-names.yaml) +- [Apply to all namespaces excluding specific namespaces](../../../examples/clusterscoped/csib-3-exclude-names.yaml) diff --git a/docs/crd/v1alpha1/securityintent.md b/docs/crd/v1alpha1/securityintent.md new file mode 100644 index 00000000..0ecdea20 --- /dev/null +++ b/docs/crd/v1alpha1/securityintent.md @@ -0,0 +1,61 @@ +# Nimbus `SecurityIntent` Specification + +## Description + +A `SecurityIntent` resource defines the desired security state for your Kubernetes cluster at a high level. It describes +**_what security outcome you want_**, not how to achieve it. This resource is cluster-scoped resource. + +## Spec + +```text +apiVersion: intent.security.nimbus.com/v1alpha1 +kind: SecurityIntent +metadata: + name: [SecurityIntent name] +spec: + intent: + id: [supported intent ID] # ID from the predefined pool + action: [Audit|Block] # Block by default. + params: # Optional. Parameters allows fine-tuning of intents to specific requirements. + key: ["value1", "value2"] +``` + +### Explanation of Fields + +### Common Fields + +- `apiVersion`: Identifies the version of the API group for this resource. This remains constant for all Nimbus + policies. +- `kind`: Specifies the resource type, which is always `SecurityIntent` in this case. +- `metadata`: Contains standard Kubernetes metadata like the resource name, which you define in the `.metadata.name` + placeholder. + +```yaml +apiVersion: intent.security.nimbus.com/v1alpha1 +kind: SecurityIntent +metadata: + name: securityIntent-name +``` + +### Intent Fields + +The `.spec.intent` field defines the specific security behavior you want: + +- `id` **(Required)**: This refers to a predefined intent ID from the [pool]( ../../intents/supportedIntents). + Security engines use this ID to generate corresponding security policies. +- `action` **(Required)**: This defines how the generated policy will be enforced. Supported actions are `Audit` (logs + the violation) and `Block` (prevents the violation). By default, the action is set to `Block`. +- `params` **(Optional)**: Parameters are key-value pairs that allow you to customize the chosen intent for your + specific needs. Refer to the [supported intents]( ../../intents/supportedIntents) for details on available + parameters and their valid values. + +```yaml +... +spec: + intent: + id: assessTLS + action: Audit + params: + schedule: [ "* * * * *" ] +... +``` diff --git a/docs/crd/v1alpha1/securityintentbinding.md b/docs/crd/v1alpha1/securityintentbinding.md new file mode 100644 index 00000000..29520465 --- /dev/null +++ b/docs/crd/v1alpha1/securityintentbinding.md @@ -0,0 +1,76 @@ +# Nimbus `SecurityIntentBinding` Specification + +## Description + +A `SecurityIntentBinding` object defines how a specific `SecurityIntent` is applied to resources within a namespace. It +essentially binds an intent to target resources like pods. + +## Spec + +```text +apiVersion: intent.security.nimbus.com/v1alpha1 +kind: SecurityIntentBinding +metadata: + name: [ securityIntentBinding name ] + namespace: [ namespace name ] # Namespace where the binding applies +spec: + intents: + - name: [ intent-to-bind-name ] # Name of the SecurityIntent to apply + selector: + workloadSelector: + matchLabels: + key1: value1 + # ... (additional label selectors) +``` + +### Explanation of Fields + +### Common Fields + +- `apiVersion`: Identifies the version of the API group for this resource. This remains constant for all Nimbus + policies. +- `kind`: Specifies the resource type, which is always `SecurityIntentBinding` in this case. +- `metadata`: Contains standard Kubernetes metadata like the resource name, which you define in the `.metadata` + placeholder. + +```yaml +apiVersion: intent.security.nimbus.com/v1alpha1 +kind: SecurityIntentBinding +metadata: + name: securityIntentBinding-name +``` + +### Intents + +- `.spec.intents` **(Required)**: An array containing one or more objects specifying the names of `SecurityIntent` + resources to be + bound. Each object has a single field: + - `name` **(Required)**: The name of the `SecurityIntent` that should be applied to resources selected by this + binding. + +```yaml +... +spec: + intents: + - name: dns-manipulation +... +``` + +### Selector + +- `spec.selector` **(Required)**: Defines the Kubernetes [workload](https://kubernetes.io/docs/concepts/workloads/) that + will be + subject to the bound `SecurityIntent` policies. + - `workloadSelector` : Selects resources based on labels. + - `matchLabels`: A key-value map where each key represents a label on the target resource and its corresponding + value specifies the expected value for that label. Resources with matching labels will be targeted by the + bound `SecurityIntent`. + +```yaml +... +selector: + workloadSelector: + matchLabels: + key1: value +... +``` diff --git a/docs/getting-started.md b/docs/getting-started.md index 5cf928e3..e5a1a400 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -9,9 +9,10 @@ Before you begin, set up the following: - [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) version 1.26 or later. - A Kubernetes cluster running version 1.26 or later. -- In case of kind clusters, bpf-lsm module needs to be installed ([bpf-lsm](https://docs.kubearmor.io/kubearmor/documentation/faq#how-to-enable-kubearmorhostpolicy-for-k8s-cluster)). +- Make sure that the bpf-lsm module needs is installed ([bpf-lsm](https://docs.kubearmor.io/kubearmor/documentation/faq#how-to-enable-kubearmorhostpolicy-for-k8s-cluster)). - The Kubernetes clusters should be configured with a CNI that supports network policy. - - For kind clusters, this reference ([kind-calico](https://docs.tigera.io/calico/latest/getting-started/kubernetes/kind)) has the details. + - For kind clusters, this reference ([calico-kind](https://docs.tigera.io/calico/latest/getting-started/kubernetes/kind)) has the details. + - For on-prem clusters, this reference ([calico-onprem](https://docs.tigera.io/calico/latest/getting-started/kubernetes/self-managed-onprem/onpremises)) has the details. - For AWS EKS clusters, the VPC CNI supports kubernetes network policies ([vpc-cni-policy](https://aws.amazon.com/blogs/containers/amazon-vpc-cni-now-supports-kubernetes-network-policies/)). - K8s cluster nodes need to have nested virtualization enabled for the confidential containers intent. Additionally kvm needs to be installed ([ubuntu-kvm](https://help.ubuntu.com/community/KVM/Installation)). - For GCP VMs, nested virtualization can be enabled at create time using below command. The machine types which support nested virtualization are listed here ([cpu-virt](https://cloud.google.com/compute/docs/machine-resource#machine_type_comparison)). diff --git a/docs/intents/assess-tls.md b/docs/intents/assess-tls.md new file mode 100644 index 00000000..87d2e93a --- /dev/null +++ b/docs/intents/assess-tls.md @@ -0,0 +1,287 @@ +# Objective + +**Attack vectors**: An attacker who controls an external Application Function (AF) could potentially present a +fraudulent OAuth access token to gain unauthorized access to Network Exposure Function (NEF) services. This highlights +the importance of securing communication channels within your network. + +**Mitigation**: The `Assess TLS` `SecurityIntent` focuses on mitigating this risk by assessing the TLS configuration of +your resources in all namespaces. This includes verifying aspects like: + +- TLS protocol version +- Certificate validity +- Cipher suite compliance with FIPS-140-3 standards + +## Tutorial + +### Prerequisites + +**Nimbus suite**: Follow [this](../../deployments/nimbus/Readme.md) guide to install complete suite. + +#### 1. Create a SecurityIntent and ClusterSecurityIntentBinding: + +```shell +cat < +``` + +This SecurityIntent triggers a periodically scheduled CronJob within the `nimbus-k8tls-env` namespace. This CronJob is +responsible for running the TLS assessment. + +#### 3. Validation: + +```shell +$ kubectl -n nimbus-k8tls-env get cj +NAME SCHEDULE TIMEZONE SUSPEND ACTIVE LAST SCHEDULE AGE +assess-tls-default-assesstls @weekly False 0 8m22s +``` + +> [!Note]: By default, the CronJob runs weekly. You can adjust the schedule using the `schedule` parameter in the +> SecurityIntent definition. Refer to this [example](../../examples/clusterscoped/assesstls-with-schedule.yaml). + +If the `SecurityIntent` and its policy are working correctly, you should see a `CronJob` in `nimbus-k8tls-env` +namespace. + +#### 4. Assessment Report: + +- By default, this `CronJob` logs its assessment report to `STDOUT`. You can access these + logs using `kubectl logs` command from the specific CronJob's pod. + +
+ +```shell +$ kubectl -n nimbus-k8tls-env logs -c k8tls +unsupported protocol UDP +checking [10.96.0.1:443 default/kubernetes[https]]... + executing [k8tls_tls_00chktls] tool=tls... + executing [k8tls_tls_01checkversion] tool=tls... + executing [k8tls_tls_02certificateChecks] tool=tls... +checking [10.99.11.127:5000 default/web-server-service]... + executing [k8tls_tls_00chktls] tool=tls... + executing [k8tls_tls_01checkversion] tool=tls... + executing [k8tls_tls_02certificateChecks] tool=tls... +checking [10.108.204.179:80 ingress-nginx/ingress-nginx-controller[http]]... + executing [k8tls_tls_00chktls] tool=tls... + executing [k8tls_tls_01checkversion] tool=tls... + executing [k8tls_tls_02certificateChecks] tool=tls... +checking [10.108.204.179:443 ingress-nginx/ingress-nginx-controller[https]]... + executing [k8tls_tls_00chktls] tool=tls... + executing [k8tls_tls_01checkversion] tool=tls... + executing [k8tls_tls_02certificateChecks] tool=tls... +checking [10.97.149.132:443 ingress-nginx/ingress-nginx-controller-admission[https-webhook]]... + executing [k8tls_tls_00chktls] tool=tls... + executing [k8tls_tls_01checkversion] tool=tls... + executing [k8tls_tls_02certificateChecks] tool=tls... +checking [10.96.0.10:53 kube-system/kube-dns[dns-tcp]]... + executing [k8tls_tls_00chktls] tool=tls... + executing [k8tls_tls_01checkversion] tool=tls... + executing [k8tls_tls_02certificateChecks] tool=tls... +checking [10.96.0.10:9153 kube-system/kube-dns[metrics]]... + executing [k8tls_tls_00chktls] tool=tls... + executing [k8tls_tls_01checkversion] tool=tls... + executing [k8tls_tls_02certificateChecks] tool=tls... +checking [10.111.234.163:443 kube-system/metrics-server[https]]... + executing [k8tls_tls_00chktls] tool=tls... + executing [k8tls_tls_01checkversion] tool=tls... + executing [k8tls_tls_02certificateChecks] tool=tls... +checking [10.109.26.113:32767 nimbus/kubearmor]... + executing [k8tls_tls_00chktls] tool=tls... + executing [k8tls_tls_01checkversion] tool=tls... + executing [k8tls_tls_02certificateChecks] tool=tls... +checking [10.103.33.142:8443 nimbus/kubearmor-controller-metrics-service[https]]... + executing [k8tls_tls_00chktls] tool=tls... + executing [k8tls_tls_01checkversion] tool=tls... + executing [k8tls_tls_02certificateChecks] tool=tls... +checking [10.99.183.240:443 nimbus/kubearmor-controller-webhook-service]... + executing [k8tls_tls_00chktls] tool=tls... + executing [k8tls_tls_01checkversion] tool=tls... + executing [k8tls_tls_02certificateChecks] tool=tls... +checking [10.108.53.146:8000 nimbus/kyverno-background-controller-metrics[metrics-port]]... + executing [k8tls_tls_00chktls] tool=tls... + executing [k8tls_tls_01checkversion] tool=tls... + executing [k8tls_tls_02certificateChecks] tool=tls... +checking [10.96.240.82:443 nimbus/kyverno-cleanup-controller[https]]... + executing [k8tls_tls_00chktls] tool=tls... + executing [k8tls_tls_01checkversion] tool=tls... + executing [k8tls_tls_02certificateChecks] tool=tls... +checking [10.98.180.87:8000 nimbus/kyverno-cleanup-controller-metrics[metrics-port]]... + executing [k8tls_tls_00chktls] tool=tls... + executing [k8tls_tls_01checkversion] tool=tls... + executing [k8tls_tls_02certificateChecks] tool=tls... +checking [10.110.185.62:8000 nimbus/kyverno-reports-controller-metrics[metrics-port]]... + executing [k8tls_tls_00chktls] tool=tls... + executing [k8tls_tls_01checkversion] tool=tls... + executing [k8tls_tls_02certificateChecks] tool=tls... +checking [10.96.61.200:443 nimbus/nimbus-operator-kyverno-svc[https]]... + executing [k8tls_tls_00chktls] tool=tls... + executing [k8tls_tls_01checkversion] tool=tls... + executing [k8tls_tls_02certificateChecks] tool=tls... +checking [10.106.165.242:8000 nimbus/nimbus-operator-kyverno-svc-metrics[metrics-port]]... + executing [k8tls_tls_00chktls] tool=tls... + executing [k8tls_tls_01checkversion] tool=tls... + executing [k8tls_tls_02certificateChecks] tool=tls... +checking [10.102.109.113:8080 sentryflow/sentryflow[exporter]]... + executing [k8tls_tls_00chktls] tool=tls... + executing [k8tls_tls_01checkversion] tool=tls... + executing [k8tls_tls_02certificateChecks] tool=tls... +checking [10.102.109.113:8081 sentryflow/sentryflow[filter-server]]... + executing [k8tls_tls_00chktls] tool=tls... + executing [k8tls_tls_01checkversion] tool=tls... + executing [k8tls_tls_02certificateChecks] tool=tls... +json report generated at [/tmp/report.json] +2024/10/07 16:18:19 rendering MD +2024/10/07 16:18:19 rendering HTML +┌─────────────────────────────────────────────────────────────────┬─────────────────────┬────────────┬─────────┬────────────────────────┬────────┬───────────┬──────────────────────────────────────────────┬──────────────────────┐ +│ NAME │ ADDRESS │ STATUS │ VERSION │ CIPHERSUITE │ HASH │ SIGNATURE │ VERIFICATION │ FIPS_140_3_COMPLIANT │ +├─────────────────────────────────────────────────────────────────┼─────────────────────┼────────────┼─────────┼────────────────────────┼────────┼───────────┼──────────────────────────────────────────────┼──────────────────────┤ +│ default/kubernetes[https] │ 10.96.0.1:443 │ TLS │ TLSv1.3 │ TLS_AES_128_GCM_SHA256 │ SHA256 │ RSA-PSS │ unable to verify the first certificate │ OK │ +│ default/web-server-service │ 10.99.11.127:5000 │ PLAIN_TEXT │ │ │ │ │ │ FAIL │ +│ ingress-nginx/ingress-nginx-controller[http] │ 10.108.204.179:80 │ PLAIN_TEXT │ │ │ │ │ │ FAIL │ +│ ingress-nginx/ingress-nginx-controller[https] │ 10.108.204.179:443 │ TLS │ TLSv1.3 │ TLS_AES_256_GCM_SHA384 │ SHA256 │ RSA-PSS │ self-signed certificate │ OK │ +│ ingress-nginx/ingress-nginx-controller-admission[https-webhook] │ 10.97.149.132:443 │ TLS │ TLSv1.3 │ TLS_AES_128_GCM_SHA256 │ SHA256 │ ECDSA │ unable to verify the first certificate │ OK │ +│ kube-system/kube-dns[dns-tcp] │ 10.96.0.10:53 │ PLAIN_TEXT │ │ │ │ │ │ FAIL │ +│ kube-system/kube-dns[metrics] │ 10.96.0.10:9153 │ PLAIN_TEXT │ │ │ │ │ │ FAIL │ +│ kube-system/metrics-server[https] │ 10.111.234.163:443 │ TLS │ TLSv1.3 │ TLS_AES_128_GCM_SHA256 │ SHA256 │ RSA-PSS │ self-signed certificate in certificate chain │ OK │ +│ nimbus/kubearmor │ 10.109.26.113:32767 │ PLAIN_TEXT │ │ │ │ │ │ FAIL │ +│ nimbus/kubearmor-controller-metrics-service[https] │ 10.103.33.142:8443 │ TLS │ TLSv1.3 │ TLS_AES_128_GCM_SHA256 │ SHA256 │ RSA-PSS │ self-signed certificate in certificate chain │ OK │ +│ nimbus/kubearmor-controller-webhook-service │ 10.99.183.240:443 │ TLS │ TLSv1.3 │ TLS_AES_128_GCM_SHA256 │ SHA256 │ RSA-PSS │ unable to verify the first certificate │ OK │ +│ nimbus/kyverno-background-controller-metrics[metrics-port] │ 10.108.53.146:8000 │ PLAIN_TEXT │ │ │ │ │ │ FAIL │ +│ nimbus/kyverno-cleanup-controller[https] │ 10.96.240.82:443 │ TLS │ TLSv1.3 │ TLS_AES_128_GCM_SHA256 │ SHA256 │ RSA-PSS │ unable to verify the first certificate │ OK │ +│ nimbus/kyverno-cleanup-controller-metrics[metrics-port] │ 10.98.180.87:8000 │ PLAIN_TEXT │ │ │ │ │ │ FAIL │ +│ nimbus/kyverno-reports-controller-metrics[metrics-port] │ 10.110.185.62:8000 │ PLAIN_TEXT │ │ │ │ │ │ FAIL │ +│ nimbus/nimbus-operator-kyverno-svc[https] │ 10.96.61.200:443 │ TLS │ TLSv1.3 │ TLS_AES_128_GCM_SHA256 │ SHA256 │ RSA-PSS │ unable to verify the first certificate │ OK │ +│ nimbus/nimbus-operator-kyverno-svc-metrics[metrics-port] │ 10.106.165.242:8000 │ PLAIN_TEXT │ │ │ │ │ │ FAIL │ +│ sentryflow/sentryflow[exporter] │ 10.102.109.113:8080 │ PLAIN_TEXT │ │ │ │ │ │ FAIL │ +│ sentryflow/sentryflow[filter-server] │ 10.102.109.113:8081 │ PLAIN_TEXT │ │ │ │ │ │ FAIL │ +└─────────────────────────────────────────────────────────────────┴─────────────────────┴────────────┴─────────┴────────────────────────┴────────┴───────────┴──────────────────────────────────────────────┴──────────────────────┘ + +Summary: +┌──────────────────────────┬───────┐ +│ STATUS │ COUNT │ +├──────────────────────────┼───────┤ +│ self-signed certificate │ 3 │ +│ insecure port │ 11 │ +│ FIPS 140-3 non-compliant │ 11 │ +``` + +
+ +
+ +- You can send the assessment report to [Elasticsearch](https://www.elastic.co/elasticsearch). Follow the below steps to + send report to it: + + - Install full Nimbus suite except `nimbus-k8tls` adapter: + + ```shell + helm upgrade --install nimbus-operator 5gsec/nimbus -n nimbus --create-namespace --set autoDeploy.k8tls=false + ``` + + - Get the `nimbus-k8tls` adapter `values.yaml` file: + + ```shell + helm show values 5gsec/nimbus-k8tls > values.yaml + ``` + + - Update `values.yaml` file accordingly with the required elasticsearch values under `.output.elasticsearch`. + + - Deploy the adapter: + ```shell + helm install --values values.yaml nimbus-k8tls 5gsec/nimbus-k8tls -n nimbus + ``` + +
+ +- Check the report in [Kibana](https://www.elastic.co/kibana) UI: + - Search for `indices` on homepage as shown in following screenshot: + ![HomePage](../assets/1.jpg) + +
+ + - Select the index that you've configured at `.output.elasticsearch.index` while installing the `nimbus-k8tls` + adapter: + ![Index](../assets/2.jpg) + +
+ + - Click on `Documents` tab on top to show the documents: + ![Documents](../assets/3.jpg) + +
+ + - Click on the icons on right side to show full report as shown in following screenshot: + ![Report](../assets/4.jpg) + +**_The [k8tls](https://github.com/kubearmor/k8tls) engine is used for assessing TLS configurations._** + +#### 4. Cleanup + +Once done, delete the created resources: + +```shell +kubectl delete si assess-tls-default +kubectl delete csib assess-tls-default +``` diff --git a/docs/intents/coco-workload.md b/docs/intents/coco-workload.md new file mode 100644 index 00000000..ca340cd1 --- /dev/null +++ b/docs/intents/coco-workload.md @@ -0,0 +1,30 @@ +## Objective + +The coco-workload intent likely aims to enhance security by ensuring that sensitive workloads are executed in environments that provide confidentiality and isolation. This could involve leveraging technologies like Confidential VMs, which are designed to protect data in use, thereby reducing the risk of data exposure or leakage. + + +**Note** : For the escapeToHost intent one needs to have either [nimbus-kyverno](../../deployments/nimbus-kyverno/Readme.md) adapter running in their cluster. To install the complete suite with all the adapters pls follow the steps mentioned [here](../getting-started.md#nimbus) + +## Policy Creation + +### Kyverno Policy + +#### Prereq + +- K8s cluster nodes need to have nested virtualization enabled for the confidential containers intent. Additionally kvm needs to be installed ([ubuntu-kvm](https://help.ubuntu.com/community/KVM/Installation)). + +- One should have [ConfidentialContainers](../getting-started.md#confidential-containers) runner installed in their cluster. + +#### Policy Description + +- The policy is designed to operate during the admission phase (admission: true), meaning it will enforce rules when workloads (like Deployments) are created. The background: true setting indicates that the policy can also apply to existing resources in the background, ensuring compliance over time. Apply on existing resource means that the policy will can generate policy reports for the resources which are ommitting the compliance defined by the policy. + +- The key action in this policy is to mutate the workload by adding a runtimeClassName: kata-clh to the Deployment's spec. This is crucial because kata-clh likely refers to a runtime class configured to use Confidential VMs. By ensuring that the workload runs under this runtime, the policy enforces that the deployment is secured within a Confidential VM. User can apply any runtimeClassName by specifying it as a intent param. + + + ``` + params: + runtimeClass: ["kata-qemu"] + ``` + + diff --git a/docs/intents/deny-external-network-access.md b/docs/intents/deny-external-network-access.md new file mode 100644 index 00000000..492b921b --- /dev/null +++ b/docs/intents/deny-external-network-access.md @@ -0,0 +1,30 @@ +## Objective + +- The denyExternalNetworkAccess intent focuses on enhancing security by restricting external network access for specific applications, such as those labeled with selectors. This intent aims to ensure that these applications can only communicate with trusted internal resources while preventing unauthorized access from external networks. + +- The goal of the denyExternalNetworkAccess intent is to create a secure environment for the application by limiting both ingress and egress traffic. This is critical for minimizing the attack surface and protecting sensitive data from external threats. + +**Note** : For the denyExternalNetworkAccess intent one needs to have [nimbus-netpol](../../deployments/nimbus-netpol/Readme.md) adapter running in their cluster. To install the complete suite with all the adapters pls follow the steps mentioned [here](../getting-started.md#nimbus) + +## Policy Creation + +The denyExternalNetworkAccess intent results in `NetworkPolicy`. Below is the behaviour of intent in terms of policy: + +### Network Policy + +#### Prereq + +- For the `NetworkPolicy` to work, one should have a [Calico-CNI](https://docs.tigera.io/calico/latest/getting-started/kubernetes/self-managed-onprem/onpremises) installed in their cluster. + +#### Policy Description + +- The NetworkPolicy created as a result of this intent defines rules that enforce restricted network access: + + - **Egress Rules:** The policy allows outbound traffic only to specific IP ranges and the kube-dns service, enabling the application to resolve DNS queries while restricting communication to the external network. + + - **Ingress Rules:** The policy specifies that only traffic from defined internal IP ranges can reach the pods, ensuring that only trusted sources can communicate with them. + +- By limiting both ingress and egress traffic, this policy significantly reduces the risk of data exfiltration and unauthorized access. + +- The application can securely operate within a controlled environment while still being able to resolve DNS queries necessary for its functionality. + diff --git a/docs/intents/dns-manipulation.md b/docs/intents/dns-manipulation.md new file mode 100644 index 00000000..38a14a8a --- /dev/null +++ b/docs/intents/dns-manipulation.md @@ -0,0 +1,62 @@ +## Objective + +- The primary goal of the dns-manipulation intent is to enforce policies that manage and restrict how pods interact with DNS services, particularly regarding outbound DNS queries. This can be crucial for security, compliance, and operational integrity. + +- The dns-manipulation intent presumably defines the requirement for the application to interact with DNS services, potentially to manipulate DNS requests or responses based on the application’s needs. + +- The dnsManipulation intent emphasizes the importance of safeguarding DNS resolution mechanisms. This is crucial because adversaries might exploit vulnerabilities to alter DNS requests, redirect traffic, or expose sensitive user activity. + +**Note** : For the dns-manipulation intent one needs to have either [nimbus-netpol](../../deployments/nimbus-netpol/Readme.md) adapter or [nimbus-kubearmor](../../deployments/nimbus-kubearmor/Readme.md) adapter or both adapters running in their cluster + +## Policy Creation + +The dns-manipulation intent results in two policies `NetworkPolicy` and a `KubeArmorPolicy`. Below are the behaviours of intent in terms of policy: + +### KubeArmor Policy + +#### Prereq + +- For the `KubeArmorPolicy` to work, one should have a [BPF-LSM](https://github.com/kubearmor/KubeArmor/blob/main/getting-started/FAQ.md#checking-and-enabling-support-for-bpf-lsm) enabled for each node in their cluster. + + +#### Policy Description + +- The `KubeArmorPolicy` is configured to block any unauthorized access or modifications to critical files, particularly `/etc/resolv`.conf, which is essential for DNS resolution in Linux-based systems. + +- This file is where the system looks for DNS servers and configuration details. Ensuring it is read-only protects against malicious changes that could redirect DNS requests. + +- The policy defines a Block action, indicating that any attempts to modify or write to the specified file `/etc/resolv.conf` will be denied. + +- This approach minimizes the attack surface of the pods by limiting egress traffic strictly to defined endpoints, which helps in maintaining a secure network posture. + +- By securing `/etc/resolv.conf`, the policy effectively mitigates the risk of DNS spoofing or hijacking, which can lead to compromised network traffic and potential data leakage. + +---- + +### Network Policy + +#### Prereq + +- For the `NetworkPolicy` to work, one should have a [Calico-CNI](https://docs.tigera.io/calico/latest/getting-started/kubernetes/self-managed-onprem/onpremises) installed in their cluster. + + +#### Policy Description + +- The `NetworkPolicy` created as a result of this intent includes specific egress rules that align with the intent’s goals. It reflects the desire to secure and control DNS traffic. + +- By specifying the egress rules to allow traffic to the kube-dns service, the policy ensures that pods can resolve DNS queries through the designated DNS service within the cluster. + +- By allowing access to kube-dns, the intent ensures that the pods can perform DNS lookups necessary for their operation without exposing them to arbitrary external IPs. + +- This approach minimizes the attack surface of the pods by limiting egress traffic strictly to defined endpoints, which helps in maintaining a secure network posture. + +- It ensures compliance with security policies that require minimal exposure of services to the outside world while allowing necessary functionality. + + +---- + +Together with the NetworkPolicy, this KubeArmorPolicy creates a layered defense strategy: + +- The NetworkPolicy restricts egress traffic to legitimate DNS services and IP addresses. + +- The KubeArmorPolicy protects the integrity of the DNS configuration itself, ensuring that no unauthorized processes can alter it. \ No newline at end of file diff --git a/docs/intents/escape-to-host.md b/docs/intents/escape-to-host.md new file mode 100644 index 00000000..c54821cf --- /dev/null +++ b/docs/intents/escape-to-host.md @@ -0,0 +1,77 @@ +## Objective + +- The escapeToHost intent focuses on preventing potential vulnerabilities that could allow an attacker to escape from a containerized environment and gain unauthorized access to the host machine. This is a crucial aspect of container security, as it aims to ensure that the isolation provided by containers is not compromised. + +- The goal of the escapeToHost intent is to enforce strict security standards on pods to prevent any breaches that could lead to container escape. This is particularly relevant for applications, which may have extensive network exposure. + +- The escapeToHost intent is also focused on preventing attackers from breaching container boundaries and gaining access to the host machine. This is a critical aspect of securing containerized environments, as it helps maintain the isolation provided by containers, thereby protecting the host and other workloads from potential compromises. + +**Note** : For the escapeToHost intent one needs to have either [nimbus-kyverno](../../deployments/nimbus-kyverno/Readme.md) adapter or [nimbus-kubearmor](../../deployments/nimbus-kubrarmor/Readme.md) or both adapters running in their cluster. (it is recommended to have both the adapters in place to make sure that the intent work with its full potential). To install the complete suite with all the adapters pls follow the steps mentioned [here](../getting-started.md#nimbus) + +## Policy Creation + +The escapeToHost intent results in `KyvernoPolicy` and a couple of `KubearmorPolicy`. Below is the behaviour of intent in terms of policy: + +### Kyverno Policy + +#### Policy Description + +- The KyvernoPolicy defined here implements rules that align with the escape prevention objectives: + + - **Admission Control:** The policy is applied at admission time, meaning it will validate pod creation requests before they are accepted by the Kubernetes API. + + - **Background Scanning:** The policy also runs in the background to continuously validate existing pods against the specified security criteria. And provide the PolicyReports for the same. + +- The policy enforces a pod security standard at the baseline level, which includes basic security measures to mitigate risks associated with container escape. + +- By requiring a baseline pod security level, the policy enforces essential security practices, such as ensuring that pods do not run as root or have excessive privileges. This helps mitigate the risk of attackers exploiting container vulnerabilities to gain access to the host system. + +- User can change the pod security level by specifying any pod security level enforced by KyvernoPolicy in the intent params as: + + ``` + params: + psaLevel: ["restricted"] + ``` + +- The `escapeToHost` intent and corresponding policy work together to establish a strong security posture for the application. By enforcing pod security standards, the policy reduces the risk of container escape, which is critical for maintaining the integrity of the host system. + + +- The use of admission control ensures that potential security issues are addressed before they can affect the running environment. + + +### KubeArmor Policy + +#### Prereq + +- For the `KubeArmorPolicy` to work, one should have a [BPF-LSM](https://github.com/kubearmor/KubeArmor/blob/main/getting-started/FAQ.md#checking-and-enabling-support-for-bpf-lsm) enabled for each node in their cluster. + + +#### Policy Description + + The intent is implemented through three distinct KubeArmorPolicy configurations, each addressing different aspects of container security: + + - **Disallow Capabilities Policy** + + - Capabilities: The policy blocks specific Linux capabilities that are critical for system-level access: + + - `sys_admin`: Allows a process to perform administrative tasks. + - `sys_ptrace`: Enables processes to observe and control other processes. + - `sys_ptrace`: Allows loading and unloading of kernel modules. + - `dac_read_search` and `dac_override`: Affect discretionary access control. + +- **Disallow Chroot Policy** + + - Process: Specifically blocks access to chroot binaries, which can be used to change the root filesystem of a process. This could allow an attacker to escape from their container environment. + + - Match Paths: Includes `/usr/sbin/chroot` and `/sbin/chroot`. + +- **Disallow Deployment Tools Policy** + + - Process: Blocks access to various package management and build tools that could be exploited to manipulate the container or the host. + + - Match Paths: Includes package managers like `apt`, `yum`, `dnf`, `zypper`, as well as build tools like `make`, and network utilities like `curl` and `wget`. + + +- Each policy is designed to actively block actions that could lead to a compromise of the host system. By preventing access to critical capabilities and processes, these policies effectively reduce the risk of container escape. + +- These policies together create a robust security mechanism to protect the host from potential breaches originating from the application. They ensure that even if an attacker manages to compromise a container, their ability to impact the host is severely limited. diff --git a/docs/intents/exploit-pfa.md b/docs/intents/exploit-pfa.md new file mode 100644 index 00000000..0d3c2123 --- /dev/null +++ b/docs/intents/exploit-pfa.md @@ -0,0 +1,38 @@ +## Objective + +- The `exploit-pfa` (Prevent Exploitation of Public-Facing Applications) intent is focused on securing applications that are exposed to the internet. This intent aims to mitigate risks associated with malicious actors potentially exploiting vulnerabilities in public-facing applications, such as web servers, to execute harmful actions. + +- The main goal of the exploit-pfa intent is to prevent the execution of unauthorized or harmful binaries that could be uploaded to or executed from temporary or log directories. This is particularly relevant for applications like nginx, which may handle external requests and could be targets for exploitation. + +**Note** : For the exploit-pfa intent one needs to have [nimbus-kubearmor](../../deployments/nimbus-kubearmor/Readme.md) adapter running in their cluster. To install the complete suite with all the adapters pls follow the steps mentioned [here](../getting-started.md#nimbus) + +## Policy Creation + +The exploit-pfa intent results in `KubeArmorPolicy`. Below is the behaviour of intent in terms of policy: + +### KubeArmorPolicy + +#### Prereq + +- For the `KubeArmorPolicy` to work, one should have a [BPF-LSM](https://github.com/kubearmor/KubeArmor/blob/main/getting-started/FAQ.md#checking-and-enabling-support-for-bpf-lsm) enabled for each node in their cluster. + +#### Policy Description + +- The KubeArmorPolicy created here implements strict controls on where executables can be run within the containerized environment + +- The policy is set to Block, any attempts to execute binaries from specified directories will be denied. + +- Process Matching: + + - `/var/tmp/` + - `/tmp/` + - `/var/log/` + - `/app/logs/` + - `/logs/` + - `/etc/` + +- All these directories are marked as recursive, meaning that the policy applies to all files and subdirectories within them. This comprehensive approach helps ensure that any harmful binaries, regardless of their specific location, cannot be executed. + +- By blocking execution from these critical directories, the policy significantly reduces the attack surface for the application. This prevents attackers from executing potentially malicious scripts or binaries that could lead to data breaches or further compromises. + +- This policy serves as an additional layer of defense, particularly important for applications exposed to the internet, which are more vulnerable to exploitation. \ No newline at end of file diff --git a/docs/intents/pkg-mgr-execution.md b/docs/intents/pkg-mgr-execution.md new file mode 100644 index 00000000..8c56cc52 --- /dev/null +++ b/docs/intents/pkg-mgr-execution.md @@ -0,0 +1,21 @@ +## Objective + +- The `pkg-mgr-execution` intent likely aims to prevent unauthorized or potentially harmful package management operations. This is critical in a Kubernetes environment, where package managers can be exploited by adversaries to install malicious software or manipulate existing applications. + +**Note** : For the exploit-pfa intent one needs to have [nimbus-kubearmor](../../deployments/nimbus-kubearmor/Readme.md) adapter running in their cluster. To install the complete suite with all the adapters pls follow the steps mentioned [here](../getting-started.md#nimbus) + +## Policy Creation + +The exploit-pfa intent results in `KubeArmorPolicy`. Below is the behaviour of intent in terms of policy: + +### KubeArmorPolicy + +#### Prereq + +- For the `KubeArmorPolicy` to work, one should have a [BPF-LSM](https://github.com/kubearmor/KubeArmor/blob/main/getting-started/FAQ.md#checking-and-enabling-support-for-bpf-lsm) enabled for each node in their cluster. + +#### Policy Description + +- The KubeArmorPolicy created here specifies that any attempt to execute certain package management commands will be blocked. This is a proactive security measure to prevent unauthorized changes to the system. + +- By blocking execution of these critical pkg-mgmt tools, the policy significantly reduces the attack surface for the application. This prevents attackers from executing potentially malicious scripts or binaries that could lead to data breaches or further compromises. \ No newline at end of file diff --git a/docs/intents/supportedIntents.md b/docs/intents/supportedIntents.md new file mode 100644 index 00000000..ea551881 --- /dev/null +++ b/docs/intents/supportedIntents.md @@ -0,0 +1,18 @@ +# Supported Intents + +| IntentID | Parameters | Description | +|-----------------------------------------|------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `dnsManipulation` | NA | An adversary can manipulate DNS requests to redirect network traffic, exfiltrate and potentially reveal end user activity. | +| `swDeploymentTools` | NA | Adversaries may gain access to and use third-party software suites installed within an enterprise network, such as administration, monitoring, and deployment systems, to move laterally through the network. | +| `assessTLS` | `schedule` | Assess the TLS configuration to ensure compliance with the security standards. | +| `unAuthorizedSaTokenAccess` | NA | K8s mounts the service account token by default in each pod even if there is no app using it. Attackers use these service account tokens to do lateral movements. | +| `escapeToHost` | Todo @Ved | | +| `preventExecutionFromTempOrLogsFolders` | Todo @Ved | | +| `denyExternalNetworkAccess` | Todo @Ved | | +| `cocoWorkload` | Todo @Ved | | + +Here are the examples and tutorials: + +- [Namespace scoped](../../examples/namespaced) +- [Cluster scoped](../../examples/clusterscoped) +- [Detailed examples](../intents) diff --git a/docs/quick-tutorials.md b/docs/quick-tutorials.md index ac5a6fe5..0523282e 100644 --- a/docs/quick-tutorials.md +++ b/docs/quick-tutorials.md @@ -266,4 +266,4 @@ No resources found ## Next steps - Try out other [SecurityIntents](../examples/namespaced) and review the policy generation. -- Checkout [Security Intents](https://github.com/5GSEC/security-intents). +- Checkout [Security Intents](https://github.com/5GSEC/security-intents). For description of intents, pls see [here](intents) diff --git a/examples/clusterscoped/escape-to-host-si-csib-with-params.yaml b/examples/clusterscoped/escape-to-host-si-csib-with-params.yaml index 6d9b7f10..3e82d76c 100644 --- a/examples/clusterscoped/escape-to-host-si-csib-with-params.yaml +++ b/examples/clusterscoped/escape-to-host-si-csib-with-params.yaml @@ -11,7 +11,7 @@ spec: description: "A attacker can breach container boundaries and can gain access to the host machine" action: Block params: - psa_level: ["restricted"] + psaLevel: ["restricted"] --- apiVersion: intent.security.nimbus.com/v1alpha1 kind: ClusterSecurityIntentBinding diff --git a/examples/namespaced/escape-to-host-with-params.yaml b/examples/namespaced/escape-to-host-with-params.yaml index f6a09a0b..a43903c6 100644 --- a/examples/namespaced/escape-to-host-with-params.yaml +++ b/examples/namespaced/escape-to-host-with-params.yaml @@ -11,7 +11,7 @@ spec: description: "A attacker can breach container boundaries and can gain access to the host machine" action: Block params: - psa_level: ["restricted"] + psaLevel: ["restricted"] --- apiVersion: intent.security.nimbus.com/v1alpha1 kind: SecurityIntentBinding diff --git a/examples/namespaced/virtual-patch-si-sib.yaml b/examples/namespaced/virtual-patch-si-sib.yaml new file mode 100644 index 00000000..31504de8 --- /dev/null +++ b/examples/namespaced/virtual-patch-si-sib.yaml @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright 2023 Authors of Nimbus + +apiVersion: intent.security.nimbus.com/v1alpha1 +kind: SecurityIntent +metadata: + name: virtual-patch +spec: + intent: + id: virtualPatch + description: > + There might exist CVE's associated with certain images, adversaries might exploit these CVE and can cause potential threat, + to any production server. Check and apply virtual patch for a given set of CVEs as per a schedule + action: Block + params: + cveList: + - "CVE-2024-4439" + - "CVE-2024-27268" + schedule: ["0 23 * * SUN"] + +--- + +apiVersion: intent.security.nimbus.com/v1alpha1 +kind: SecurityIntentBinding +metadata: + name: virtual-patch-binding +spec: + intents: + - name: virtual-patch + selector: + workloadSelector: + matchLabels: + app: prod \ No newline at end of file diff --git a/pkg/adapter/idpool/idpool.go b/pkg/adapter/idpool/idpool.go index c5d4f939..414250db 100644 --- a/pkg/adapter/idpool/idpool.go +++ b/pkg/adapter/idpool/idpool.go @@ -19,6 +19,7 @@ const ( CocoWorkload = "cocoWorkload" AssessTLS = "assessTLS" DenyENAccess = "denyExternalNetworkAccess" + VirtualPatch = "virtualPatch" ) // KaIds are IDs supported by KubeArmor. @@ -45,6 +46,7 @@ var NetPolIDs = []string{ var KyvIds = []string{ EscapeToHost, CocoWorkload, + VirtualPatch, } // k8tlsIds are IDs supported by k8tls. diff --git a/pkg/adapter/nimbus-kyverno/go.mod b/pkg/adapter/nimbus-kyverno/go.mod index 8627bb36..790eca3a 100644 --- a/pkg/adapter/nimbus-kyverno/go.mod +++ b/pkg/adapter/nimbus-kyverno/go.mod @@ -202,6 +202,7 @@ require ( github.com/puzpuzpuz/xsync/v2 v2.5.1 // indirect github.com/r3labs/diff v1.1.0 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect + github.com/robfig/cron/v3 v3.0.1 github.com/sagikazarmark/locafero v0.3.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/sassoftware/relic v7.2.1+incompatible // indirect diff --git a/pkg/adapter/nimbus-kyverno/go.sum b/pkg/adapter/nimbus-kyverno/go.sum index 10acb72c..481c93fd 100644 --- a/pkg/adapter/nimbus-kyverno/go.sum +++ b/pkg/adapter/nimbus-kyverno/go.sum @@ -1225,6 +1225,9 @@ github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5X github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052 h1:Qp27Idfgi6ACvFQat5+VJvlYToylpM/hcyLBI3WaKPA= github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052/go.mod h1:uvX/8buq8uVeiZiFht+0lqSLBHF+uGV8BrTv8W/SIwk= +github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= diff --git a/pkg/adapter/nimbus-kyverno/manager/manager.go b/pkg/adapter/nimbus-kyverno/manager/manager.go index 23e0167f..cbb5de08 100644 --- a/pkg/adapter/nimbus-kyverno/manager/manager.go +++ b/pkg/adapter/nimbus-kyverno/manager/manager.go @@ -59,6 +59,7 @@ func Run(ctx context.Context) { deletedKpCh := make(chan common.Request) go watcher.WatchKps(ctx, updatedKpCh, deletedKpCh) + for { select { case <-ctx.Done(): @@ -431,6 +432,9 @@ func createTriggerForKp(ctx context.Context, nameNamespace common.Request) { ObjectMeta: metav1.ObjectMeta{ Name: nameNamespace.Name + "-trigger-configmap", Namespace: nameNamespace.Namespace, + Labels: map[string]string { + "trigger" : "configmap", + }, }, Data: map[string]string{ "data": "dummy", diff --git a/pkg/adapter/nimbus-kyverno/processor/kcpbuilder.go b/pkg/adapter/nimbus-kyverno/processor/kcpbuilder.go index b13f8c37..7710c752 100644 --- a/pkg/adapter/nimbus-kyverno/processor/kcpbuilder.go +++ b/pkg/adapter/nimbus-kyverno/processor/kcpbuilder.go @@ -121,7 +121,7 @@ func clusterCocoRuntimeAddition(cnp *v1alpha1.ClusterNimbusPolicy, rule v1alpha1 } matchFilters = append(matchFilters, resourceFilter) } - } else if namespaces[0] == "*" && len(labels) == 0 { + } else if namespaces[0] == "*" && len(labels) == 0 { if len(excludeNamespaces) > 0 { resourceFilter = kyvernov1.ResourceFilter{ ResourceDescription: kyvernov1.ResourceDescription{ @@ -167,7 +167,7 @@ func clusterCocoRuntimeAddition(cnp *v1alpha1.ClusterNimbusPolicy, rule v1alpha1 }, Mutation: kyvernov1.Mutation{ Targets: []kyvernov1.TargetResourceSpec{ - kyvernov1.TargetResourceSpec{ + { ResourceSpec: kyvernov1.ResourceSpec{ APIVersion: "apps/v1", Kind: "Deployment", @@ -185,16 +185,16 @@ func clusterCocoRuntimeAddition(cnp *v1alpha1.ClusterNimbusPolicy, rule v1alpha1 } func clusterEscapeToHost(cnp *v1alpha1.ClusterNimbusPolicy, rule v1alpha1.Rule) kyvernov1.ClusterPolicy { - var psa_level api.Level = api.LevelBaseline + var psaLevel api.Level = api.LevelBaseline - if rule.Params["psa_level"] != nil { + if rule.Params["psaLevel"] != nil { - switch rule.Params["psa_level"][0] { + switch rule.Params["psaLevel"][0] { case "restricted": - psa_level = api.LevelRestricted + psaLevel = api.LevelRestricted default: - psa_level = api.LevelBaseline + psaLevel = api.LevelBaseline } } @@ -241,7 +241,7 @@ func clusterEscapeToHost(cnp *v1alpha1.ClusterNimbusPolicy, rule v1alpha1.Rule) } else if namespaces[0] == "*" && len(labels) > 0 { if len(excludeNamespaces) > 0 { resourceFilter = kyvernov1.ResourceFilter{ - ResourceDescription: kyvernov1.ResourceDescription { + ResourceDescription: kyvernov1.ResourceDescription{ Namespaces: excludeNamespaces, }, } @@ -296,7 +296,7 @@ func clusterEscapeToHost(cnp *v1alpha1.ClusterNimbusPolicy, rule v1alpha1.Rule) }, Validation: kyvernov1.Validation{ PodSecurity: &kyvernov1.PodSecurity{ - Level: psa_level, + Level: psaLevel, Version: "latest", }, }, diff --git a/pkg/adapter/nimbus-kyverno/processor/kpbuilder.go b/pkg/adapter/nimbus-kyverno/processor/kpbuilder.go index 8a559ad3..a21d9a7a 100644 --- a/pkg/adapter/nimbus-kyverno/processor/kpbuilder.go +++ b/pkg/adapter/nimbus-kyverno/processor/kpbuilder.go @@ -6,13 +6,18 @@ package processor import ( "context" "encoding/json" + "fmt" + "os" + "strconv" "strings" v1alpha1 "github.com/5GSEC/nimbus/api/v1alpha1" "github.com/5GSEC/nimbus/pkg/adapter/idpool" "github.com/5GSEC/nimbus/pkg/adapter/k8s" + "github.com/5GSEC/nimbus/pkg/adapter/nimbus-kyverno/utils" "github.com/go-logr/logr" kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1" + "github.com/robfig/cron/v3" "go.uber.org/multierr" v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -32,25 +37,23 @@ func init() { func BuildKpsFrom(logger logr.Logger, np *v1alpha1.NimbusPolicy) []kyvernov1.Policy { // Build KPs based on given IDs var allkps []kyvernov1.Policy - admission := true background := true for _, nimbusRule := range np.Spec.NimbusRules { id := nimbusRule.ID if idpool.IsIdSupportedBy(id, "kyverno") { - kps, err := buildKpFor(id, np) + kps, err := buildKpFor(id, np, logger) if err != nil { logger.Error(err, "error while building kyverno policies") } for _, kp := range kps { - if id != "cocoWorkload" { + if id != "cocoWorkload" && id != "virtualPatch" { kp.Name = np.Name + "-" + strings.ToLower(id) } kp.Namespace = np.Namespace kp.Annotations = make(map[string]string) kp.Annotations["policies.kyverno.io/description"] = nimbusRule.Description - kp.Spec.Admission = &admission kp.Spec.Background = &background - + if nimbusRule.Rule.RuleAction == "Block" { kp.Spec.ValidationFailureAction = kyvernov1.ValidationFailureAction("Enforce") } else { @@ -68,21 +71,130 @@ func BuildKpsFrom(logger logr.Logger, np *v1alpha1.NimbusPolicy) []kyvernov1.Pol } // buildKpFor builds a KyvernoPolicy based on intent ID supported by Kyverno Policy Engine. -func buildKpFor(id string, np *v1alpha1.NimbusPolicy) ([]kyvernov1.Policy, error) { +func buildKpFor(id string, np *v1alpha1.NimbusPolicy, logger logr.Logger) ([]kyvernov1.Policy, error) { var kps []kyvernov1.Policy switch id { case idpool.EscapeToHost: - kps = append(kps, escapeToHost(np, np.Spec.NimbusRules[0].Rule)) + kps = append(kps, escapeToHost(np)) case idpool.CocoWorkload: kpols, err := cocoRuntimeAddition(np) if err != nil { return kps, err } kps = append(kps, kpols...) + case idpool.VirtualPatch: + kpols, err := virtualPatch(np, logger) + if err != nil { + return kps, err + } + kps = append(kps, kpols...) + watchCVES(np, logger) } return kps, nil } +func watchCVES(np *v1alpha1.NimbusPolicy, logger logr.Logger) { + rule := np.Spec.NimbusRules[0].Rule + schedule := "0 0 * * *" + if rule.Params["schedule"] != nil { + schedule = rule.Params["schedule"][0] + } + // Schedule the deletion of the Nimbus policy + c := cron.New() + _, err := c.AddFunc(schedule, func() { + logger.Info("Checking for CVE updates and updation of policies") + err := deleteNimbusPolicy(np, logger) + if err != nil { + logger.Error(err, "error while updating policies") + } + }) + if err != nil { + logger.Error(err, "error while adding the schedule to update policies") + os.Exit(1) + } + c.Start() + +} + +func deleteNimbusPolicy(np *v1alpha1.NimbusPolicy, logger logr.Logger) error { + nimbusPolicyGVR := schema.GroupVersionResource{Group: "intent.security.nimbus.com", Version: "v1alpha1", Resource: "nimbuspolicies"} + err := client.Resource(nimbusPolicyGVR).Namespace(np.Namespace).Delete(context.TODO(), np.Name, metav1.DeleteOptions{}) + if err != nil { + return fmt.Errorf("failed to delete Nimbus Policy: %s", err.Error()) + } + logger.Info("Nimbus policy deleted successfully") + return nil +} + +func escapeToHost(np *v1alpha1.NimbusPolicy) kyvernov1.Policy { + rule := np.Spec.NimbusRules[0].Rule + var psaLevel api.Level = api.LevelBaseline + var matchResourceFilters []kyvernov1.ResourceFilter + + if rule.Params["psaLevel"] != nil { + + switch rule.Params["psaLevel"][0] { + case "restricted": + psaLevel = api.LevelRestricted + + default: + psaLevel = api.LevelBaseline + } + } + + labels := np.Spec.Selector.MatchLabels + + if len(labels) > 0 { + for key, value := range labels { + resourceFilter := kyvernov1.ResourceFilter{ + ResourceDescription: kyvernov1.ResourceDescription{ + Kinds: []string{ + "v1/Pod", + }, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + key: value, + }, + }, + }, + } + matchResourceFilters = append(matchResourceFilters, resourceFilter) + } + } else { + resourceFilter := kyvernov1.ResourceFilter{ + ResourceDescription: kyvernov1.ResourceDescription{ + Kinds: []string{ + "v1/Pod", + }, + }, + } + matchResourceFilters = append(matchResourceFilters, resourceFilter) + } + + background := true + kp := kyvernov1.Policy{ + Spec: kyvernov1.Spec{ + Background: &background, + Rules: []kyvernov1.Rule{ + { + Name: "pod-security-standard", + MatchResources: kyvernov1.MatchResources{ + Any: matchResourceFilters, + }, + Validation: kyvernov1.Validation{ + PodSecurity: &kyvernov1.PodSecurity{ + Level: psaLevel, + Version: "latest", + }, + }, + }, + }, + }, + } + + return kp +} + func cocoRuntimeAddition(np *v1alpha1.NimbusPolicy) ([]kyvernov1.Policy, error) { var kps []kyvernov1.Policy var errs []error @@ -93,7 +205,7 @@ func cocoRuntimeAddition(np *v1alpha1.NimbusPolicy) ([]kyvernov1.Policy, error) runtimeClass := "kata-clh" params := np.Spec.NimbusRules[0].Rule.Params["runtimeClass"] if params != nil { - runtimeClass = params[0] + runtimeClass = params[0] } patchStrategicMerge := map[string]interface{}{ "spec": map[string]interface{}{ @@ -239,75 +351,322 @@ func cocoRuntimeAddition(np *v1alpha1.NimbusPolicy) ([]kyvernov1.Policy, error) return kps, multierr.Combine(errs...) } -func escapeToHost(np *v1alpha1.NimbusPolicy, rule v1alpha1.Rule) kyvernov1.Policy { - - var psa_level api.Level = api.LevelBaseline - var matchResourceFilters []kyvernov1.ResourceFilter - - if rule.Params["psa_level"] != nil { +func virtualPatch(np *v1alpha1.NimbusPolicy, logger logr.Logger) ([]kyvernov1.Policy, error) { + rule := np.Spec.NimbusRules[0].Rule + requiredCVES := rule.Params["cveList"] + var kps []kyvernov1.Policy + resp, err := utils.FetchVirtualPatchData[[]map[string]any]() + if err != nil { + return kps, err + } + for _, currObj := range resp { + image := currObj["image"].(string) + cves := currObj["cves"].([]any) + for _, obj := range cves { + cveData := obj.(map[string]any) + cve := cveData["cve"].(string) + if utils.Contains(requiredCVES, cve) { + // create generate kyverno policies which will generate the native virtual patch policies based on the CVE's + karmorPolCount := 1 + kyvPolCount := 1 + netPolCount := 1 + virtualPatch := cveData["virtual_patch"].([]any) + for _, policy := range virtualPatch { + pol := policy.(map[string]any) + policyData, ok := pol["karmor"].(map[string]any) + if ok { + karmorPol, err := generatePol("karmor", cve, image, np, policyData, karmorPolCount, logger) + if err != nil { + logger.V(2).Error(err, "Error while generating karmor policy") + } else { + kps = append(kps, karmorPol) + karmorPolCount += 1 + } - switch rule.Params["psa_level"][0] { - case "restricted": - psa_level = api.LevelRestricted + } + policyData, ok = pol["kyverno"].(map[string]any) + if ok { + kyvernoPol, err := generatePol("kyverno", cve, image, np, policyData, kyvPolCount, logger) + if err != nil { + logger.V(2).Error(err, "Error while generating kyverno policy") + } else { + kps = append(kps, kyvernoPol) + kyvPolCount += 1 + } + } - default: - psa_level = api.LevelBaseline + policyData, ok = pol["netpol"].(map[string]any) + if ok { + netPol, err := generatePol("netpol", cve, image, np, policyData, netPolCount, logger) + if err != nil { + logger.V(2).Error(err, "Error while generating network policy") + } else { + kps = append(kps, netPol) + netPolCount += 1 + } + } + } + } } } + return kps, nil +} + +func addManagedByAnnotation(kp *kyvernov1.Policy) { + kp.Annotations["app.kubernetes.io/managed-by"] = "nimbus-kyverno" +} +func generatePol(polengine string, cve string, image string, np *v1alpha1.NimbusPolicy, policyData map[string]any, count int, logger logr.Logger) (kyvernov1.Policy, error) { + var pol kyvernov1.Policy labels := np.Spec.Selector.MatchLabels + cve = strings.ToLower(cve) + uid := np.ObjectMeta.GetUID() + ownerShipList := []any{ + map[string]any{ + "apiVersion": "intent.security.nimbus.com/v1alpha1", + "blockOwnerDeletion": true, + "controller": true, + "kind": "NimbusPolicy", + "name": np.GetName(), + "uid": uid, + }, + } - if len(labels) > 0 { + preConditionMap := map[string]any{ + "all": []any{ + map[string]any{ + "key": image, + "operator": "AnyIn", + "value": "{{ request.object.spec.containers[].image }}", + }, + }, + } + preconditionBytes, _ := json.Marshal(preConditionMap) + + getPodName := kyvernov1.ContextEntry{ + Name: "podName", + Variable: &kyvernov1.Variable{ + JMESPath: "request.object.metadata.name", + }, + } + + metadataMap := policyData["metadata"].(map[string]any) + + // set OwnerShipRef for the generatedPol + + metadataMap["ownerReferences"] = ownerShipList + + specMap := policyData["spec"].(map[string]any) + + jmesPathContainerNameQuery := "request.object.spec.containers[?(@.image=='" + image + "')].name | [0]" + + delete(policyData, "apiVersion") + delete(policyData, "kind") + + generatorPolicyName := np.Name + "-" + cve + "-" + polengine + "-" + strconv.Itoa(count) + + // kubearmor policy generation + + if polengine == "karmor" { + generatedPolicyName := metadataMap["name"].(string) + "-{{ podName }}" + selector := specMap["selector"].(map[string]any) + delete(selector, "matchLabels") + selectorLabels := make(map[string]any) for key, value := range labels { - resourceFilter := kyvernov1.ResourceFilter { - ResourceDescription: kyvernov1.ResourceDescription{ - Kinds: []string{ - "v1/Pod", - }, - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - key: value, + selectorLabels[key] = value + } + selectorLabels["kubearmor.io/container.name"] = "{{ containerName }}" + selector["matchLabels"] = selectorLabels + + policyBytes, err := json.Marshal(policyData) + if err != nil { + return pol, err + } + pol = kyvernov1.Policy{ + ObjectMeta: metav1.ObjectMeta{ + Name: generatorPolicyName, + }, + Spec: kyvernov1.Spec{ + GenerateExisting: true, + Rules: []kyvernov1.Rule{ + { + Name: cve + "virtual-patch-karmor", + MatchResources: kyvernov1.MatchResources{ + Any: kyvernov1.ResourceFilters{ + { + ResourceDescription: kyvernov1.ResourceDescription{ + Kinds: []string{ + "v1/Pod", + }, + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + }, + }, + }, + }, + RawAnyAllConditions: &v1.JSON{Raw: preconditionBytes}, + Context: []kyvernov1.ContextEntry{ + { + Name: "containerName", + Variable: &kyvernov1.Variable{ + JMESPath: jmesPathContainerNameQuery, + }, + }, + getPodName, + }, + Generation: kyvernov1.Generation{ + ResourceSpec: kyvernov1.ResourceSpec{ + APIVersion: "security.kubearmor.com/v1", + Kind: "KubeArmorPolicy", + Name: generatedPolicyName, + Namespace: np.GetNamespace(), + }, + RawData: &v1.JSON{Raw: policyBytes}, }, }, }, - } - matchResourceFilters = append(matchResourceFilters, resourceFilter) + }, } - } else { - resourceFilter := kyvernov1.ResourceFilter{ - ResourceDescription: kyvernov1.ResourceDescription{ - Kinds: []string{ - "v1/Pod", + } + + // kyverno policy generation + + if polengine == "kyverno" { + + generatedPolicyName := metadataMap["name"].(string) + selectorMap := map[string]any{ + "matchLabels": labels, + } + + kindMap := map[string]any{ + "kinds": []any{ + "Pod", + }, + "selector": selectorMap, + } + + newMatchMap := map[string]any{ + "any": []any{ + map[string]any{ + "resources": kindMap, }, }, } - matchResourceFilters = append(matchResourceFilters, resourceFilter) - } + rulesMap := specMap["rules"].([]any) + rule := rulesMap[0].(map[string]any) - background := true - kp := kyvernov1.Policy{ - Spec: kyvernov1.Spec{ - Background: &background, - Rules: []kyvernov1.Rule{ - { - Name: "pod-security-standard", - MatchResources: kyvernov1.MatchResources{ - Any: matchResourceFilters, - }, - Validation: kyvernov1.Validation{ - PodSecurity: &kyvernov1.PodSecurity{ - Level: psa_level, - Version: "latest", + // adding resources as Pod and ommitting all the incoming resource types + delete(rule, "match") + rule["match"] = newMatchMap + + // appending the image matching precondition to the existing preconditions + preCndMap := rule["preconditions"].(map[string]any) + conditionsList, ok := preCndMap["any"].([]any) + if ok { + preConditionMap["all"] = append(preConditionMap["all"].([]any), conditionsList...) + } + + delete(rule, "preconditions") + + rule["preconditions"] = preConditionMap + + policyBytes, err := json.Marshal(policyData) + if err != nil { + return pol, err + } + + pol = kyvernov1.Policy{ + ObjectMeta: metav1.ObjectMeta{ + Name: generatorPolicyName, + }, + Spec: kyvernov1.Spec{ + GenerateExisting: true, + Rules: []kyvernov1.Rule{ + { + Name: cve + "-virtual-patch-kyverno", + MatchResources: kyvernov1.MatchResources{ + Any: kyvernov1.ResourceFilters{ + { + ResourceDescription: kyvernov1.ResourceDescription{ + Kinds: []string{ + "v1/Pod", + }, + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + }, + }, + }, + }, + Generation: kyvernov1.Generation{ + ResourceSpec: kyvernov1.ResourceSpec{ + APIVersion: "kyverno.io/v1", + Kind: "Policy", + Name: generatedPolicyName, + Namespace: np.GetNamespace(), + }, + RawData: &v1.JSON{Raw: policyBytes}, }, }, }, }, - }, + } } - return kp -} + // network policy generation -func addManagedByAnnotation(kp *kyvernov1.Policy) { - kp.Annotations["app.kubernetes.io/managed-by"] = "nimbus-kyverno" + if polengine == "netpol" { + generatedPolicyName := metadataMap["name"].(string) + selector := specMap["podSelector"].(map[string]any) + delete(selector, "matchLabels") + selector["matchLabels"] = labels + + policyBytes, err := json.Marshal(policyData) + + if err != nil { + return pol, err + } + pol = kyvernov1.Policy{ + ObjectMeta: metav1.ObjectMeta{ + Name: generatorPolicyName, + }, + Spec: kyvernov1.Spec{ + GenerateExisting: true, + Rules: []kyvernov1.Rule{ + { + Name: cve + "virtual-patch-netpol", + MatchResources: kyvernov1.MatchResources{ + Any: kyvernov1.ResourceFilters{ + { + ResourceDescription: kyvernov1.ResourceDescription{ + Kinds: []string{ + "v1/Pod", + }, + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + }, + }, + }, + }, + RawAnyAllConditions: &v1.JSON{Raw: preconditionBytes}, + Context: []kyvernov1.ContextEntry{ + getPodName, + }, + Generation: kyvernov1.Generation{ + ResourceSpec: kyvernov1.ResourceSpec{ + APIVersion: "networking.k8s.io/v1", + Kind: "NetworkPolicy", + Name: generatedPolicyName, + Namespace: np.GetNamespace(), + }, + RawData: &v1.JSON{Raw: policyBytes}, + }, + }, + }, + }, + } + } + return pol, nil } diff --git a/pkg/adapter/nimbus-kyverno/utils/utils.go b/pkg/adapter/nimbus-kyverno/utils/utils.go index 73ac5bb6..c619658d 100644 --- a/pkg/adapter/nimbus-kyverno/utils/utils.go +++ b/pkg/adapter/nimbus-kyverno/utils/utils.go @@ -4,8 +4,11 @@ package utils import ( + "encoding/json" "fmt" + "os" "reflect" + "slices" "strings" kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1" @@ -14,6 +17,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +var VirtualPatchData []map[string]any + func GetGVK(kind string) string { // Map to store the mappings of kinds to their corresponding API versions kindToAPIVersion := map[string]string{ @@ -122,3 +127,42 @@ func Title(input string) string { return toTitle.String(input) } + +func FetchVirtualPatchData[T any]()(T, error) { + var out T + // Open the JSON file + file, err := os.Open("../../../vp.json") + if err != nil { + return out, err + } + defer file.Close() + + // Read the file contents + bytes, err := os.ReadFile("../../../vp.json") + if err != nil { + return out, err + } + + err = json.Unmarshal(bytes, &out) + if err != nil { + return out, err + } + + return out, nil +} + +func Contains(slice []string, value string) bool { + return slices.Contains(slice, value) +} + +func ParseImageString(imageString string) (string, string) { + parts := strings.SplitN(imageString, ":", 2) + repository := parts[0] + tag := "latest" // Default tag + + if len(parts) > 1 { + tag = parts[1] + } + + return repository, tag +} diff --git a/pkg/adapter/nimbus-kyverno/watcher/kpwatcher.go b/pkg/adapter/nimbus-kyverno/watcher/kpwatcher.go index 9ddc0522..9bfc7e0b 100644 --- a/pkg/adapter/nimbus-kyverno/watcher/kpwatcher.go +++ b/pkg/adapter/nimbus-kyverno/watcher/kpwatcher.go @@ -11,6 +11,7 @@ import ( "github.com/5GSEC/nimbus/pkg/adapter/common" "github.com/5GSEC/nimbus/pkg/adapter/k8s" "github.com/5GSEC/nimbus/pkg/adapter/nimbus-kyverno/utils" + adapterutil "github.com/5GSEC/nimbus/pkg/adapter/util" kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -122,5 +123,3 @@ func WatchKps(ctx context.Context, updatedKpCh, deletedKpCh chan common.Request) logger.Info("KyvernoPolicy watcher started") informer.Run(ctx.Done()) } - - diff --git a/virtual_patch_si.yaml b/virtual_patch_si.yaml new file mode 100644 index 00000000..fdfccb78 --- /dev/null +++ b/virtual_patch_si.yaml @@ -0,0 +1,13 @@ +apiVersion: intent.security.nimbus.com/v1alpha1 +kind: SecurityIntent +metadata: + name: virtual-patch +spec: + intent: + id: virtualPatch + description: "Check and apply virtual patch for a given set of CVEs as per a schedule" + action: Block + params: + cve_list: + - "CVE-2024-4439" + - "CVE-2024-27268" \ No newline at end of file diff --git a/virtual_patch_sib.yaml b/virtual_patch_sib.yaml new file mode 100644 index 00000000..4596f3a1 --- /dev/null +++ b/virtual_patch_sib.yaml @@ -0,0 +1,11 @@ +apiVersion: intent.security.nimbus.com/v1alpha1 +kind: SecurityIntentBinding +metadata: + name: virtual-patch-binding +spec: + intents: + - name: virtual-patch + selector: + workloadSelector: + matchLabels: + app: prod \ No newline at end of file diff --git a/vp.json b/vp.json new file mode 100644 index 00000000..1b44e807 --- /dev/null +++ b/vp.json @@ -0,0 +1,169 @@ +[ + { + "image": "nginx:latest", + "cves": [ + { + "cve": "CVE-2024-4439", + "virtual_patch": [ + { + "karmor": { + "apiVersion": "security.kubearmor.com/v1", + "kind": "KubeArmorPolicy", + "metadata": { + "name": "block-pkg-mgmt-tools-exec" + }, + "spec": { + "selector": { + "matchLabels": { + "app": "nginx" + } + }, + "process": { + "matchPaths": [ + { + "path": "/usr/bin/apt" + }, + { + "path": "/usr/bin/apt-get" + } + ] + }, + "action": "Block" + } + } + }, + { + "kyverno": { + "apiVersion": "kyverno.io/v1", + "kind": "ClusterPolicy", + "name": "CVE_NUMBER-Virtual-Patch-Kyverno", + "metadata": { + "name": "disallow-latest-tag" + }, + "spec": { + "validationFailureAction": "Enforce", + "background": true, + "rules": [ + { + "name": "validate-image-tag", + "match": { + "any": [ + { + "resources": { + "kinds": [ + "Pod" + ], + "selector": { + "matchLabels": { + "app": "test" + } + } + } + + } + ] + }, + "preconditions": { + "all": [ + { + "key": "busybox", + "operator": "AnyIn", + "value": "{{ images.containers.*.name }}" + } + ] + }, + "validate": { + "message": "Using a mutable image tag e.g. 'latest' is not allowed.", + "pattern": { + "spec": { + "containers": [ + { + "image": "!*:latest" + } + ] + } + } + } + } + ] + } + } + }, + { + "netpol": { + "apiVersion": "networking.k8s.io/v1", + "kind": "NetworkPolicy", + "metadata": { + "name": "test-network-policy" + }, + "spec": { + "podSelector": { + "matchLabels": { + "role": "db", + "app": "dsfsdf" + } + }, + "policyTypes": [ + "Ingress", + "Egress" + ], + "ingress": [ + { + "from": [ + { + "ipBlock": { + "cidr": "172.17.0.0/16", + "except": [ + "172.17.1.0/24" + ] + } + }, + { + "namespaceSelector": { + "matchLabels": { + "project": "myproject" + } + } + }, + { + "podSelector": { + "matchLabels": { + "role": "frontend" + } + } + } + ], + "ports": [ + { + "protocol": "TCP", + "port": 6379 + } + ] + } + ], + "egress": [ + { + "to": [ + { + "ipBlock": { + "cidr": "10.0.0.0/24" + } + } + ], + "ports": [ + { + "protocol": "TCP", + "port": 5978 + } + ] + } + ] + } + } + } + ] + } + ] +} +] +