https://www.redhat.com/en/blog/openshift-and-network-security-zones-coexistence-approaches -https://docs.cilium.io/en/latest/network/l2-announcements/ -https://docs.cilium.io/en/latest/network/egress-gateway/#id1 -https://docs.cilium.io/en/latest/network/servicemesh/ingress/#gs-ingress -https://docs.cilium.io/en/latest/network/concepts/ipam/multi-pool/ -https://docs.cilium.io/en/latest/network/kubernetes/ipam-multi-pool/#enable-multi-pool-ipam-mode -https://stackoverflow.com/questions/52487333/how-to-assign-a-namespace-to-certain-nodes
--
-
-
-
Install Cilium CNI
--
-
- with L2 Announcements enabled -
- with Egress Gateway enabled -
- with Multi-Pool -
- -
-
Configuration
--
-
- Create two LB pools (DMZ and LAN) -
- Create two Ingress Controller within ther LB pools (DMZ and LAN) -
- Create two cilium Egress Gateway within ther LB pools (DMZ and LAN) -
- Create Network policy for ingress/egress communication separation -
- Create two IP pools (DMZ and LAN) for network separation in Kubernetes -
-
kubectl taint nodes --all node-role.kubernetes.io/control-plane:NoSchedule-
-kubectl taint nodes --all node-role.kubernetes.io/master:NoSchedule-
-
kubeProxyReplacement: true
-
-k8sServiceHost: 10.0.2.15
-k8sServicePort: 6443
-containerRuntime:
- integration: containerd
-rollOutCiliumPods: true
-priorityClassName: system-cluster-critical
-
-ipv4:
- enabled: true
-ipv6:
- enabled: false
-
-# L2 LoadBalancer service
-l2announcements:
- enabled: true
-
-# Policy audit log
-config:
- policyAuditMode: true
-
-# egressGateway
-egressGateway:
- enabled: true
-bpf:
- masquerade: true
-l7Proxy: false
-
-endpointStatus:
- enabled: true
- status: policy
-
-dashboards:
- enabled: false
- namespace: "monitoring-system"
- annotations:
- grafana_folder: "cilium"
-
-hubble:
- metrics:
- enableOpenMetrics: true
- enabled:
- - dns
- - drop
- - tcp
- - flow:sourceContext=workload-name|reserved-identity;destinationContext=workload-name|reserved-identity
- - port-distribution
- - icmp
- - kafka:labelsContext=source_namespace,source_workload,destination_namespace,destination_workload,traffic_direction;sourceContext=workload-name|reserved-identity;destinationContext=workload-name|reserved-identity
- - policy:sourceContext=app|workload-name|pod|reserved-identity;destinationContext=app|workload-name|pod|dns|reserved-identity;labelsContext=source_namespace,destination_namespace
- - httpV2:exemplars=true;labelsContext=source_ip,source_namespace,source_workload,destination_ip,destination_namespace,destination_workload,traffic_direction
- serviceMonitor:
- enabled: false
- dashboards:
- enabled: false
- namespace: "monitoring-system"
- annotations:
- grafana_folder: "cilium"
-
- ui:
- enabled: true
- replicas: 1
- ingress:
- enabled: true
- hosts:
- - hubble.k8s.intra
- annotations:
- kubernetes.io/ingress.class: nginx
- cert-manager.io/cluster-issuer: ca-issuer
- tls:
- - secretName: hubble-ingress-tls
- hosts:
- - hubble.k8s.intra
- tolerations:
- - key: "node-role.kubernetes.io/master"
- operator: "Exists"
- effect: "NoSchedule"
- - key: "node-role.kubernetes.io/control-plane"
- operator: "Exists"
- effect: "NoSchedule"
- backend:
- resources:
- limits:
- cpu: 60m
- memory: 300Mi
- requests:
- cpu: 20m
- memory: 64Mi
- frontend:
- resources:
- limits:
- cpu: 1000m
- memory: 1024M
- requests:
- cpu: 100m
- memory: 64Mi
- proxy:
- resources:
- limits:
- cpu: 1000m
- memory: 1024M
- requests:
- cpu: 100m
- memory: 64Mi
-
- relay:
- enabled: true
- tolerations:
- - key: "node-role.kubernetes.io/master"
- operator: "Exists"
- effect: "NoSchedule"
- - key: "node-role.kubernetes.io/control-plane"
- operator: "Exists"
- effect: "NoSchedule"
- resources:
- limits:
- cpu: 100m
- memory: 500Mi
- prometheus:
- enabled: true
- serviceMonitor:
- enabled: false
-
-operator:
- replicas: 1
- resources:
- limits:
- cpu: 1000m
- memory: 1Gi
- requests:
- cpu: 100m
- memory: 128Mi
- prometheus:
- enabled: true
- serviceMonitor:
- enabled: false
- dashboards:
- enabled: false
- namespace: "monitoring-system"
- annotations:
- grafana_folder: "cilium"
-
-ipam:
- mode: "multi-pool"
- operator:
- clusterPoolIPv4PodCIDR: "10.43.0.0/16"
- clusterPoolIPv4MaskSize: 24
- clusterPoolIPv6PodCIDR: "fd00::/104"
- clusterPoolIPv6MaskSize: 120
- autoCreateCiliumPodIPPools:
- default:
- ipv4:
- cidrs:
- - 10.10.0.0/16
- maskSize: 27
-
-routingMode: native
-autoDirectNodeRoutes: true
-endpointRoutes:
- enabled: true
-
-ipv4NativeRoutingCIDR: 10.0.0.0/8
-
Create two LB pools (default and dmz)
-kubectl get ciliumpodippool default -o yaml
-apiVersion: cilium.io/v2alpha1
-kind: CiliumPodIPPool
-metadata:
- creationTimestamp: "2024-05-15T11:00:27Z"
- generation: 1
- name: default
- resourceVersion: "3188"
- uid: ce4288fa-e458-4ed6-b03c-c46af24e2a92
-spec:
- ipv4:
- cidrs:
- - 10.10.0.0/16
- maskSize: 27
-
cat <<EOF | kubectl apply -f -
-apiVersion: cilium.io/v2alpha1
-kind: CiliumPodIPPool
-metadata:
- name: damz
-spec:
- ipv4:
- cidrs:
- - 10.20.0.0/16
- maskSize: 27
-EOF
-
kubectl get ciliumpodippools
-NAME AGE
-damz 26s
-default 66m
-
Test with apps the two ip pools:
-cat <<EOF | kubectl apply -f -
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: nginx-default
- namespace: default
-spec:
- selector:
- matchLabels:
- app: nginx-default
- replicas: 2
- template:
- metadata:
- labels:
- app: nginx-default
- spec:
- containers:
- - name: nginx
- image: nginx:1.25.1
- ports:
- - containerPort: 80
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: nginx-damz
- namespace: default
-spec:
- selector:
- matchLabels:
- app: nginx-damz
- replicas: 2
- template:
- metadata:
- labels:
- app: nginx-damz
- annotations:
- ipam.cilium.io/ip-pool: damz
- spec:
- containers:
- - name: nginx
- image: nginx:1.25.1
- ports:
- - containerPort: 80
-EOF
-
kubectl get po -o wide
-NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
-nginx-damz-769c564fc8-br85b 1/1 Running 0 30s 10.20.0.28 alma8.mydomain.intra <none> <none>
-nginx-damz-769c564fc8-zqxgw 1/1 Running 0 30s 10.20.0.10 alma8.mydomain.intra <none> <none>
-nginx-default-7d4b875895-7pz9f 1/1 Running 0 31s 10.10.0.30 alma8.mydomain.intra <none> <none>
-nginx-default-7d4b875895-tbstn 1/1 Running 0 31s 10.10.0.15 alma8.mydomain.intra <none> <none>
-