This commit is contained in:
2024-02-20 17:15:27 +08:00
committed by huty
parent 6706e1a633
commit 34158042ad
1529 changed files with 177765 additions and 0 deletions

View File

@@ -0,0 +1,7 @@
version: "3.7"
services:
ch19-stress:
image: kiamol/ch19-stress
build:
context: ./stress

View File

@@ -0,0 +1,16 @@
FROM debian:buster-slim
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
bc \
stress \
&& rm -rf /var/lib/apt/lists/*
ENV MEMORY_STRESS_MB="50" \
MEMORY_STRESS_FACTOR="" \
STRESS_HANG="3540" \
STRESS_TIMEOUT="3600"
COPY ./stress-memory.sh .
CMD chmod +x ./stress-memory.sh && ./stress-memory.sh

View File

@@ -0,0 +1,38 @@
#!/bin/bash
#!/usr/bin/env bash
# This script reproduces what the kubelet does
# to calculate memory.available relative to root cgroup.
# current memory usage
memory_capacity_in_kb=$(cat /proc/meminfo | grep MemTotal | awk '{print $2}')
memory_capacity_in_bytes=$((memory_capacity_in_kb * 1024))
memory_usage_in_bytes=$(cat /sys/fs/cgroup/memory/memory.usage_in_bytes)
memory_total_inactive_file=$(cat /sys/fs/cgroup/memory/memory.stat | grep total_inactive_file | awk '{print $2}')
memory_working_set=${memory_usage_in_bytes}
if [ "$memory_working_set" -lt "$memory_total_inactive_file" ];
then
memory_working_set=0
else
memory_working_set=$((memory_usage_in_bytes - memory_total_inactive_file))
fi
memory_available_in_bytes=$((memory_capacity_in_bytes - memory_working_set))
memory_available_in_kb=$((memory_available_in_bytes / 1024))
memory_available_in_mb=$((memory_available_in_kb / 1024))
if [ -n "$MEMORY_STRESS_FACTOR" ]; then
stress_mb=$(echo "$memory_available_in_mb * $MEMORY_STRESS_FACTOR" | bc -l)
else
stress_mb=$MEMORY_STRESS_MB
fi
stress_mb_int="$(printf '%d' $stress_mb 2>/dev/null)"
echo '----------------'
echo "Memory available: ${memory_available_in_mb}M"
echo "Stress factor: ${MEMORY_STRESS_FACTOR}"
echo "Stressing memory: ${stress_mb_int}M"
echo '----------------'
exec /bin/sh -c "trap : TERM INT; (stress -q --vm 1 --vm-bytes ${stress_mb_int}M --vm-hang $STRESS_HANG -t $STRESS_TIMEOUT) & wait"

View File

@@ -0,0 +1,11 @@
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
kubeadmConfigPatches:
- |
kind: InitConfiguration
nodeRegistration:
kubeletExtraArgs:
eviction-hard: "memory.available<40%"
eviction-pressure-transition-period: "30s"

View File

@@ -0,0 +1,30 @@
#!/bin/bash
#!/usr/bin/env bash
# This script reproduces what the kubelet does
# to calculate memory.available relative to root cgroup.
# current memory usage
memory_capacity_in_kb=$(cat /proc/meminfo | grep MemTotal | awk '{print $2}')
memory_capacity_in_bytes=$((memory_capacity_in_kb * 1024))
memory_usage_in_bytes=$(cat /sys/fs/cgroup/memory/memory.usage_in_bytes)
memory_total_inactive_file=$(cat /sys/fs/cgroup/memory/memory.stat | grep total_inactive_file | awk '{print $2}')
memory_working_set=${memory_usage_in_bytes}
if [ "$memory_working_set" -lt "$memory_total_inactive_file" ];
then
memory_working_set=0
else
memory_working_set=$((memory_usage_in_bytes - memory_total_inactive_file))
fi
memory_available_in_bytes=$((memory_capacity_in_bytes - memory_working_set))
memory_available_in_kb=$((memory_available_in_bytes / 1024))
memory_available_in_mb=$((memory_available_in_kb / 1024))
memory_capacity_in_mb=$((memory_capacity_in_kb / 1024))
echo '----------------'
echo "Memory capacity : ${memory_capacity_in_mb}M"
echo "Memory available: ${memory_available_in_mb}M"
echo '----------------'

View File

@@ -0,0 +1,73 @@
# ch19 lab
## Setup
Deploy metrics-server if you need it (check with `kubectl top nodes` - no stats means you need it):
```
kubectl apply -f metrics-server/
```
Run the app:
```
kubectl apply -f lab/pi/
```
Confirm the metrics are coming through:
```
kubectl top pods -l app=pi-web-lab
```
> Browse to the app and check the CPU spikes - e.g. http://localhost:8032/?dp=100000
## Sample Solution
You need to label your node to indicate it's in the EU region - you can use any key and value for this, but you'll need to use the same in your affinity rules:
```
kubectl label node --all kiamol.net/region=eu
```
### Pod with affinity rules
The updated deployment in [solution/pi.yaml](./solution/pi.yaml) adds these settings:
- **node affinity** - require to run on nodes with region=eu
- **pod anti-affinity** - prefer to run on nodes without any other Pi pods
- **resources** - add memory request for the HPA to use
- **replicas** - start with 2 as that's the desired minimum
```
kubectl apply -f lab/solution/pi.yaml
```
> You'll have two Pods running; browse to the app in a few tabs and both will spike CPU
### HPA for scaling on CPU
The HPA spec in [solution/hpa-cpu.yaml](./solution/hpa-cpu.yaml) scales from 2 to 5 Pods based on target CPU utilization of 50%.
```
kubectl apply -f lab/solution/hpa-cpu.yaml
```
> Make lots of browser requests in different tabs (or adapt the `ch19/loadpi` script in) and you'll see the Pods scale up to a maximum of five replicas:
![Horizontal pod autoscaling in action](./solution/hpa.png)
## Teardown
Delete all the resources:
```
kubectl delete all,hpa -l kiamol=ch19-lab
```
And metrics-server if you deployed it:
```
kubectl delete -f metrics-server/
```

View File

@@ -0,0 +1,40 @@
apiVersion: v1
kind: Service
metadata:
name: pi-web
labels:
kiamol: ch19-lab
spec:
ports:
- port: 8032
targetPort: http
selector:
app: pi-web-lab
type: LoadBalancer
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: pi-web-lab
labels:
kiamol: ch19-lab
spec:
selector:
matchLabels:
app: pi-web-lab
replicas: 1
template:
metadata:
labels:
app: pi-web-lab
spec:
containers:
- image: kiamol/ch05-pi
command: ["dotnet", "Pi.Web.dll", "-m", "web"]
name: web
ports:
- containerPort: 80
name: http
resources:
limits:
cpu: 200m

View File

@@ -0,0 +1,14 @@
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: pi-cpu
labels:
kiamol: ch19-lab
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: pi-web-lab
minReplicas: 2
maxReplicas: 5
targetCPUUtilizationPercentage: 50

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

View File

@@ -0,0 +1,48 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: pi-web-lab
labels:
kiamol: ch19-lab
spec:
selector:
matchLabels:
app: pi-web-lab
replicas: 2
template:
metadata:
labels:
app: pi-web-lab
spec:
containers:
- image: kiamol/ch05-pi
command: ["dotnet", "Pi.Web.dll", "-m", "web"]
name: web
ports:
- containerPort: 80
name: http
resources:
limits:
cpu: 200m
requests:
cpu: 100m
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kiamol.net/region
operator: In
values:
- eu
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- pi-web-lab
topologyKey: "kubernetes.io/hostname"

View File

@@ -0,0 +1,6 @@
# pi @ 100K do is hard on CPU:
$url = $(kubectl get svc pi-web -o jsonpath='http://{.status.loadBalancer.ingress[0].*}:8031/?dp=100000')
# two calls is enough to trigger HPA:
Start-Process -NoNewWindow curl $url
Start-Process -NoNewWindow curl $url

View File

@@ -0,0 +1,8 @@
#!/bin/bash
# pi @ 100K do is hard on CPU:
URL=$(kubectl get svc pi-web -o jsonpath='http://{.status.loadBalancer.ingress[0].*}:8031/?dp=100000')
# two calls is enough to trigger HPA:
curl -s $URL > /dev/null &
curl -s $URL > /dev/null &

View File

@@ -0,0 +1,154 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:aggregated-metrics-reader
labels:
rbac.authorization.k8s.io/aggregate-to-view: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rules:
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: apiregistration.k8s.io/v1beta1
kind: APIService
metadata:
name: v1beta1.metrics.k8s.io
spec:
service:
name: metrics-server
namespace: kube-system
group: metrics.k8s.io
version: v1beta1
insecureSkipTLSVerify: true
groupPriorityMinimum: 100
versionPriority: 100
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: metrics-server
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: metrics-server
namespace: kube-system
labels:
k8s-app: metrics-server
spec:
selector:
matchLabels:
k8s-app: metrics-server
template:
metadata:
name: metrics-server
labels:
k8s-app: metrics-server
spec:
serviceAccountName: metrics-server
volumes:
# mount in tmp so we can safely use from-scratch images and/or read-only containers
- name: tmp-dir
emptyDir: {}
containers:
- name: metrics-server
image: k8s.gcr.io/metrics-server-amd64:v0.3.6
imagePullPolicy: IfNotPresent
args:
- --cert-dir=/tmp
- --secure-port=4443
- --kubelet-insecure-tls
- --kubelet-preferred-address-types=InternalIP
- --v=2
ports:
- name: main-port
containerPort: 4443
protocol: TCP
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- name: tmp-dir
mountPath: /tmp
nodeSelector:
kubernetes.io/os: linux
kubernetes.io/arch: "amd64"
---
apiVersion: v1
kind: Service
metadata:
name: metrics-server
namespace: kube-system
labels:
kubernetes.io/name: "Metrics-server"
kubernetes.io/cluster-service: "true"
spec:
selector:
k8s-app: metrics-server
ports:
- port: 443
protocol: TCP
targetPort: main-port
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/stats
- namespaces
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system

View File

@@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: numbers-api
labels:
kiamol: ch19
spec:
ports:
- port: 80
selector:
app: numbers
component: api
type: ClusterIP

View File

@@ -0,0 +1,21 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: numbers-api
labels:
kiamol: ch19
app: numbers
spec:
selector:
matchLabels:
app: numbers
component: api
template:
metadata:
labels:
app: numbers
component: api
spec:
containers:
- name: api
image: kiamol/ch03-numbers-api

View File

@@ -0,0 +1,35 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: numbers-api
labels:
kiamol: ch19
app: numbers
spec:
selector:
matchLabels:
app: numbers
component: api
template:
metadata:
labels:
app: numbers
component: api
spec:
containers:
- name: api
image: kiamol/ch03-numbers-api
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- numbers
- key: component
operator: In
values:
- api
topologyKey: "kubernetes.io/hostname"

View File

@@ -0,0 +1,48 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: numbers-web
labels:
kiamol: ch19
app: numbers
spec:
selector:
matchLabels:
app: numbers
component: web
template:
metadata:
labels:
app: numbers
component: web
spec:
containers:
- name: web
image: kiamol/ch03-numbers-web
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- numbers
- key: component
operator: In
values:
- api
topologyKey: "kubernetes.io/hostname"
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- numbers
- key: component
operator: In
values:
- web
topologyKey: "kubernetes.io/hostname"

View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
name: numbers-web
labels:
kiamol: ch19
spec:
ports:
- port: 8090
targetPort: 80
selector:
app: numbers
component: web
type: LoadBalancer

View File

@@ -0,0 +1,35 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: numbers-web
labels:
kiamol: ch19
app: numbers
spec:
selector:
matchLabels:
app: numbers
component: web
template:
metadata:
labels:
app: numbers
component: web
spec:
containers:
- name: web
image: kiamol/ch03-numbers-web
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- numbers
- key: component
operator: In
values:
- api
topologyKey: "kubernetes.io/hostname"

View File

@@ -0,0 +1,27 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: pi-web
labels:
kiamol: ch19
spec:
selector:
matchLabels:
app: pi-web
template:
metadata:
labels:
app: pi-web
spec:
containers:
- image: kiamol/ch05-pi
command: ["dotnet", "Pi.Web.dll", "-m", "web"]
name: web
ports:
- containerPort: 80
name: http
resources:
limits:
cpu: 250m
requests:
cpu: 125m

View File

@@ -0,0 +1,14 @@
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: pi-cpu
labels:
kiamol: ch19
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: pi-web
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 75

View File

@@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: pi-web
labels:
kiamol: ch19
spec:
ports:
- port: 8031
targetPort: http
selector:
app: pi-web
type: LoadBalancer

View File

@@ -0,0 +1,27 @@
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
metadata:
name: pi-cpu
labels:
kiamol: ch19
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: pi-web
minReplicas: 1
maxReplicas: 5
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 75
behavior:
scaleDown:
stabilizationWindowSeconds: 30
policies:
- type: Percent
value: 50
periodSeconds: 15

View File

@@ -0,0 +1,9 @@
apiVersion: scheduling.k8s.io/v1
kind: PriorityClass
metadata:
name: kiamol-high
labels:
kiamol: ch19
value: 10000
globalDefault: false
description: "High priority - may evict low priority"

View File

@@ -0,0 +1,9 @@
apiVersion: scheduling.k8s.io/v1
kind: PriorityClass
metadata:
name: kiamol-low
labels:
kiamol: ch19
value: 100
globalDefault: true
description: "Low priority - OK to be evicted"

View File

@@ -0,0 +1,18 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: sleep
labels:
kiamol: ch19
spec:
selector:
matchLabels:
app: sleep
template:
metadata:
labels:
app: sleep
spec:
containers:
- name: sleep
image: kiamol/ch03-sleep

View File

@@ -0,0 +1,18 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: sleep2
labels:
kiamol: ch19
spec:
selector:
matchLabels:
app: sleep2
template:
metadata:
labels:
app: sleep2
spec:
containers:
- name: sleep
image: kiamol/ch03-sleep

View File

@@ -0,0 +1,62 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: sleep2
labels:
kiamol: ch19
spec:
selector:
matchLabels:
app: sleep2
template:
metadata:
labels:
app: sleep2
spec:
containers:
- name: sleep
image: kiamol/ch03-sleep
tolerations:
- key: "kiamol-disk"
operator: "Equal"
value: "hdd"
effect: "NoSchedule"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/arch
operator: In
values:
- amd64
- key: kubernetes.io/os
operator: In
values:
- linux
- windows
- matchExpressions:
- key: beta.kubernetes.io/arch
operator: In
values:
- amd64
- key: beta.kubernetes.io/os
operator: In
values:
- linux
- windows
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
preference:
matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- weight: 1
preference:
matchExpressions:
- key: beta.kubernetes.io/os
operator: In
values:
- linux

View File

@@ -0,0 +1,37 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: sleep2
labels:
kiamol: ch19
spec:
selector:
matchLabels:
app: sleep2
template:
metadata:
labels:
app: sleep2
spec:
containers:
- name: sleep
image: kiamol/ch03-sleep
tolerations:
- key: "kiamol-disk"
operator: "Equal"
value: "hdd"
effect: "NoSchedule"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/arch
operator: In
values:
- amd64
- matchExpressions:
- key: beta.kubernetes.io/arch
operator: In
values:
- amd64

View File

@@ -0,0 +1,25 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: sleep2
labels:
kiamol: ch19
spec:
selector:
matchLabels:
app: sleep2
template:
metadata:
labels:
app: sleep2
spec:
containers:
- name: sleep
image: kiamol/ch03-sleep
tolerations:
- key: "kiamol-disk"
operator: "Equal"
value: "hdd"
effect: "NoSchedule"
nodeSelector:
kubernetes.io/arch: zxSpectrum

View File

@@ -0,0 +1,23 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: sleep2
labels:
kiamol: ch19
spec:
selector:
matchLabels:
app: sleep2
template:
metadata:
labels:
app: sleep2
spec:
containers:
- name: sleep
image: kiamol/ch03-sleep
tolerations:
- key: "kiamol-disk"
operator: "Equal"
value: "hdd"
effect: "NoSchedule"

View File

@@ -0,0 +1,23 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: stress
labels:
kiamol: ch19
spec:
replicas: 4
selector:
matchLabels:
app: stress
template:
metadata:
labels:
app: stress
spec:
containers:
- name: stress
image: kiamol/ch19-stress
env:
- name: MEMORY_STRESS_MB
value: "300"

View File

@@ -0,0 +1,31 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: stress-high
labels:
kiamol: ch19
spec:
replicas: 2
selector:
matchLabels:
app: stress
level: high
template:
metadata:
labels:
app: stress
level: high
spec:
priorityClassName: kiamol-low
containers:
- name: stress
image: kiamol/ch19-stress
env:
- name: MEMORY_STRESS_MB
value: "300"
resources:
requests:
memory: 250Mi
limits:
memory: 350Mi

View File

@@ -0,0 +1,31 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: stress-low
labels:
kiamol: ch19
spec:
replicas: 4
selector:
matchLabels:
app: stress
level: low
template:
metadata:
labels:
app: stress
level: low
spec:
priorityClassName: kiamol-low
containers:
- name: stress
image: kiamol/ch19-stress
env:
- name: MEMORY_STRESS_MB
value: "300"
resources:
requests:
memory: 50Mi
limits:
memory: 350Mi