Example using the gcloud
CLI in the /gke-deploy directory. The 5 namespace application example below was run using 3x n2-standard-8
instance types
- 5 namespace isolated applications
- using nicholasjackson/fake-service application
- 3-tier application, 4 deployments per namespace, 1 replicas per deployment
- A > B1,B2 > C
- CPU requests: 700m // CPU limits: 700m (guaranteed QoS)
- MEM requests: 500Mi // MEM limits: 500Mi (guaranteed QoS)
helm repo add istio https://istio-release.storage.googleapis.com/charts
helm repo update
helm upgrade --install istio-base istio/base -n istio-system --version 1.21.0 --create-namespace
echo "installing Kubernetes Gateway CRDs"
kubectl get crd gateways.gateway.networking.k8s.io &> /dev/null || \
{ kubectl kustomize "github.com/kubernetes-sigs/gateway-api/config/crd?ref=v1.0.0" | kubectl apply -f -; }
helm upgrade --install istio-cni istio/cni \
-n kube-system \
--version=1.21.0 \
-f -<<EOF
profile: ambient
# uncomment below if using GKE
cni:
cniBinDir: /home/kubernetes/bin
EOF
helm upgrade --install istiod istio/istiod \
-n istio-system \
--version=1.21.0 \
-f -<<EOF
profile: ambient
EOF
For GKE, ztunnel is expected to be deployed in kube-system
helm upgrade --install ztunnel istio/ztunnel \
-n kube-system \
--version=1.21.0 \
-f -<<EOF
hub: docker.io/istio
tag: 1.21.0
resources:
requests:
cpu: 500m
memory: 2048Mi
istioNamespace: istio-system
# Additional volumeMounts to the ztunnel container
volumeMounts:
- name: tmp
mountPath: /tmp
# Additional volumes to the ztunnel pod
volumes:
- name: tmp
emptyDir:
medium: Memory
EOF
kubectl apply -k client/ambient
kubectl apply -k httpbin/ambient
kubectl exec -it deploy/sleep -n client -c sleep sh
curl httpbin.httpbin.svc.cluster.local:8000/get
kubectl delete -k httpbin/ambient
kubectl apply -k tiered-app/5-namespace-app/ambient
kubectl exec -it deploy/sleep -n client sh
curl http://tier-1-app-a.ns-1.svc.cluster.local:8080
kubectl logs -n kube-system ds/ztunnel -f
Output should look similar to below, note you can see the spiffe ID of client sleep
2024-03-07T00:45:13.205154Z INFO inbound{id=75f92cf47e739b015f76405a976d0359 peer_ip=10.32.1.7 peer_id=spiffe://cluster.local/ns/client/sa/sleep}: ztunnel::proxy::inbound: got CONNECT request to 10.32.3.6:80
2024-03-07T00:45:13.839633Z INFO inbound{id=e737b49726c8c0a5b92e20c0ae6b7872 peer_ip=10.32.1.7 peer_id=spiffe://cluster.local/ns/client/sa/sleep}: ztunnel::proxy::inbound: got CONNECT request to 10.32.3.6:80
2024-03-07T00:45:14.377121Z INFO inbound{id=1c8da768ba34c2eba072911c6a17b892 peer_ip=10.32.1.7 peer_id=spiffe://cluster.local/ns/client/sa/sleep}: ztunnel::proxy::inbound: got CONNECT request to 10.32.3.6:80
kubectl apply -k loadgenerators/5-loadgenerators
kubectl logs -l app=vegeta -f -n ns-1
watch kubectl top pods -n ns-1
watch kubectl top pods -n kube-system --sort-by cpu
kubectl --namespace ns-1 exec -it deploy/vegeta -c vegeta -- /bin/sh
test run:
echo "GET http://tier-1-app-a.ns-1.svc.cluster.local:8080" | vegeta attack -dns-ttl=0 -rate 500/1s -duration=2s | tee results.bin | vegeta report -type=text
helm uninstall ztunnel -n kube-system
helm uninstall istiod -n istio-system
helm uninstall istio-cni -n kube-system
helm uninstall istio-base -n istio-system
kubectl kustomize "github.com/kubernetes-sigs/gateway-api/config/crd?ref=v1.0.0" | kubectl delete -f -;