开发

Table of Contents

Core Concepts

获取 Kubernets 所有对象

$ kubectl api-resources --sort-by=name -o name | wc -l
67

$ kubectl api-resources --sort-by=name -o name
apiservices.apiregistration.k8s.io
bgpconfigurations.crd.projectcalico.org
bgppeers.crd.projectcalico.org
bindings
blockaffinities.crd.projectcalico.org
certificatesigningrequests.certificates.k8s.io
clusterinformations.crd.projectcalico.org
clusterrolebindings.rbac.authorization.k8s.io
clusterroles.rbac.authorization.k8s.io
componentstatuses
configmaps
controllerrevisions.apps
cronjobs.batch
csidrivers.storage.k8s.io
csinodes.storage.k8s.io
customresourcedefinitions.apiextensions.k8s.io
daemonsets.apps
deployments.apps
endpoints
endpointslices.discovery.k8s.io
events
events.events.k8s.io
felixconfigurations.crd.projectcalico.org
globalnetworkpolicies.crd.projectcalico.org
globalnetworksets.crd.projectcalico.org
horizontalpodautoscalers.autoscaling
hostendpoints.crd.projectcalico.org
ingresses.extensions
ingresses.networking.k8s.io
ipamblocks.crd.projectcalico.org
ipamconfigs.crd.projectcalico.org
ipamhandles.crd.projectcalico.org
ippools.crd.projectcalico.org
jobs.batch
leases.coordination.k8s.io
limitranges
localsubjectaccessreviews.authorization.k8s.io
mutatingwebhookconfigurations.admissionregistration.k8s.io
namespaces
networkpolicies.networking.k8s.io
networkpolicies.crd.projectcalico.org
networksets.crd.projectcalico.org
nodes
persistentvolumeclaims
persistentvolumes
poddisruptionbudgets.policy
pods
podsecuritypolicies.policy
podtemplates
priorityclasses.scheduling.k8s.io
replicasets.apps
replicationcontrollers
resourcequotas
rolebindings.rbac.authorization.k8s.io
roles.rbac.authorization.k8s.io
runtimeclasses.node.k8s.io
secrets
selfsubjectaccessreviews.authorization.k8s.io
selfsubjectrulesreviews.authorization.k8s.io
serviceaccounts
services
statefulsets.apps
storageclasses.storage.k8s.io
subjectaccessreviews.authorization.k8s.io
tokenreviews.authentication.k8s.io
validatingwebhookconfigurations.admissionregistration.k8s.io
volumeattachments.storage.k8s.io

在 namespace 下创建容器

创建一个 namespace,名称为 mynamespace,并在该 namespace 下创建 POD,POD 中容器使用镜像为 nginx,POD 的名称为 nginx

kubectl create ns mynamespace
kubectl run nginx --image=nginx --generator=run-pod/v1 -n mynamespace
kubectl delete ns mynamespace

使用 yaml 文件创建和删除 POD

kubectl run nginx --image=nginx --generator=run-pod/v1 --dry-run -o yaml > pod.yaml
kubectl create -f pod.yaml
kubectl delete -f pod.yaml

创建 POD 并查看 POD 环境变量

kubectl run busybox --image=busybox --generator=run-pod/v1 --command -- env
kubectl logs busybox > env.log

创建 busybox POD 并确保 POD 一直运行

// create pod
kubectl run busybox --image=busybox:1.28 --generator=run-pod/v1 --command -- sh -c "echo Hello Kubernetes! && sleep 3600"

// get pod
kubectl get pods -o wide

创建一个 POD 执行 echo 命令后退出

kubectl run busybox --image=busybox --generator=run-pod/v1 --restart=Never --command -- echo "Hello World"

创建一个 POD 执行 echo 命令后删除

kubectl run busybox --image=busybox -it --rm --generator=run-pod/v1 --restart=Never --command -- echo "Hello World"

创建 ResourceQuota 限制 default 下最多使用 1GB 内存 1 CPU,最多运行 2 POD

kubectl create quota myrq --hard=cpu=1,memory=1G,pods=2
kubectl delete quota myrq

创建 POD,使用 nginx 镜像,设定 containerPort 为 80,执行 nginx 命令,nginx 命令传递参数 -g daemon off; -q,POD 在 web namespace 下

// yaml
cat <<EOF > ./pod-nginx.yaml
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    run: nginx
  name: nginx
spec:
  containers:
  - command: ["nginx"]
    args: ["-g", "daemon off;", "-q"]
    image: nginx
    name: nginx
    ports:
    - containerPort: 80
EOF

// create
kubectl create -f pod-nginx.yaml -n web

// verify
kubectl get pods -n web

查看 Node

// get node
kubectl get nodes

// view details
kubectl describe nodes

Configuration

创建 ConfigMap 包含两个键值对(foo=love, bar=puppy)

kubectl create configmap my-config --from-literal=foo=love --from-literal=bar=puppy
kubectl get cm
kubectl get cm my-config -o yaml
kubectl delete cm my-config

创建 ConfigMap 包含 UAT 和 PROD 配置文件

// prepare config
echo "{state: 101, config: {cur: 12, weight: 25}}" > configmap/UAT.config
echo "{state: 102, config: {cur: 12, weight: 25}}" > configmap/PROD.config

// ConfigMap
kubectl create configmap my-config --from-file=configmap/
kubectl get cm
kubectl describe cm my-config
kubectl get cm my-config -o yaml
kubectl delete cm my-config

创建 ConfigMap 包含两个键值对(foo=love, bar=puppy) 并以环境变量的方式传递给 busybox POD

kubectl create configmap my-config --from-literal=foo=love --from-literal=bar=puppy

cat <<EOF > ./busybox.yaml
apiVersion: v1
kind: Pod
metadata:
  labels:
    run: busybox
  name: busybox
spec:
  containers:
  - image: busybox
    name: busybox
    command: ["env"]
    env:
    - name: CM_VALUE_FOO
      valueFrom:
        configMapKeyRef:
          name: my-config
          key: foo
    - name: CM_VALUE_BAR
      valueFrom:
        configMapKeyRef:
          name: my-config
          key: bar
  restartPolicy: Never
EOF

kubectl create -f busybox.yaml

kubectl logs busybox | grep CM_VALUE

将 ConfigMap 两个键值对(foo=love, bar=puppy) 直接作为 POD 的环境变量

kubectl create configmap my-config --from-literal=foo=love --from-literal=bar=puppy

cat <<EOF > ./busybox.yaml
apiVersion: v1
kind: Pod
metadata:
  labels:
    run: busybox
  name: busybox
spec:
  containers:
  - image: busybox
    name: busybox
    command: ["env"]
    envFrom:
    - configMapRef:
        name: my-config
  restartPolicy: Never
EOF

kubectl create -f busybox.yaml

kubectl logs busybox | grep foo
kubectl logs busybox | grep bar

创建 ConfigMap 包含 UAT 和 PROD 配置文件,并挂载到 POD /etc/data 目录

// prepare config
echo "{state: 101, config: {cur: 12, weight: 25}}" > configmap/UAT.config
echo "{state: 102, config: {cur: 12, weight: 25}}" > configmap/PROD.config

// ConfigMap
kubectl create configmap my-config --from-file=configmap/

// pod
cat <<EOF > ./busybox.yaml
apiVersion: v1
kind: Pod
metadata:
  labels:
    run: busybox
  name: busybox
spec:
  containers:
  - image: busybox
    name: busybox
    command: ["sh", "-c", "sleep 3600"]
    volumeMounts:
    - name: config-volume
      mountPath: /etc/data
  volumes:
  - name: config-volume
    configMap:
      name: my-config
EOF

kubectl create -f busybox.yaml

// view
kubectl exec busybox -- cat /etc/data/UAT.config
kubectl exec busybox -- cat /etc/data/PROD.config

创建 ConfigMap 包含 UAT 和 PROD 配置文件,并分别挂载到 POD 的 /etc/data/uat 和 /etc/data/prod 目录

// prepare config
echo "{state: 101, config: {cur: 12, weight: 25}}" > configmap/UAT.config
echo "{state: 102, config: {cur: 12, weight: 25}}" > configmap/PROD.config

// ConfigMap
kubectl create configmap my-config --from-file=configmap/

// pod
cat <<EOF > ./busybox.yaml
apiVersion: v1
kind: Pod
metadata:
  labels:
    run: busybox
  name: busybox
spec:
  containers:
  - image: busybox
    name: busybox
    command: ["sh", "-c", "sleep 3600"]
    volumeMounts:
    - name: prod-config-volume
      mountPath: /etc/data/prod
    - name: uat-config-volume
      mountPath: /etc/data/uat
  volumes:
  - name: prod-config-volume
    configMap:
      name: my-config
      items:
      - key: PROD.config
        path: PROD.config
  - name: uat-config-volume
    configMap:
      name: my-config
      items:
      - key: UAT.config
        path: UAT.config
EOF

kubectl create -f busybox.yaml

// view
kubectl exec busybox -- cat /etc/data/uat/UAT.config
kubectl exec busybox -- cat /etc/data/prod/PROD.config

Redis 配置文件(Volume 对接 ConfigMap)

本部分演示将 Redis 的配置文件通过 ConfigMap 传递。

1. 创建 redis-config
// prepare files
cat <<EOF > ./redis-config
maxmemory 2mb
maxmemory-policy allkeys-lru
EOF

// create configMap
kubectl create configmap redis-config --from-file=redis-config
2. 创建一个 POD
// prepare yaml files
cat <<EOF > ./pod.yaml
apiVersion: v1
kind: Pod
metadata:
  name: redis
spec:
  containers:
  - name: redis
    image: redis:5.0.4
    command:
      - redis-server
      - "/redis-master/redis.conf"
    env:
    - name: MASTER
      value: "true"
    ports:
    - containerPort: 6379
    resources:
      limits:
        cpu: "0.1"
    volumeMounts:
    - mountPath: /redis-master-data
      name: data
    - mountPath: /redis-master
      name: config
  volumes:
    - name: data
      emptyDir: {}
    - name: config
      configMap:
        name: redis-config
        items:
        - key: redis-config
          path: redis.conf
EOF

// create pod
kubectl create -f pod.yaml
3. 验证
# kubectl exec -it redis redis-cli
127.0.0.1:6379> CONFIG GET maxmemory
1) "maxmemory"
2) "2097152"
127.0.0.1:6379> CONFIG GET maxmemory-policy
1) "maxmemory-policy"
2) "allkeys-lru"

Ngnix 运行参数(ENV 参数对接 ConfigMap)

1. 创建 nginx-config
kubectl create configmap nginx-config --from-literal=username=kylin --from-literal=password=password
2. 创建一个 POD
// prepare yaml files
cat <<EOF > ./pod-nginx.yaml
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    run: nginx
  name: nginx
spec:
  containers:
  - image: nginx
    name: nginx
    ports:
    - containerPort: 80
    resources: {}
    env:
    - name: NGINX_USERNAME
      valueFrom:
        configMapKeyRef:
          name: nginx-config
          key: username
    - name: NGINX_PASSWORD
      valueFrom:
        configMapKeyRef:
          name: nginx-config
          key: password
  dnsPolicy: ClusterFirst
  restartPolicy: Always
EOF

// create pod
kubectl create -f pod-nginx.yaml
3. 验证
# kubectl exec nginx env
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
HOSTNAME=nginx
NGINX_PASSWORD=password
NGINX_USERNAME=kylin

SecurityContexts 定义容器访问底层文件系统的权限

SecurityContexts 用来定义 Pod 或容器如何和底层的安全机制进行交互,本部分部署的容器需要读取底层节点上的特定用户和组权限的文件。

1. 查看 Node 上文件
// a. init users, groups, and files on both worker nodes
# for i in 2 3 ; do ssh root@machine0$i "useradd -u 2000 container-user-0; groupadd -g 3000 container-group-0 ; useradd -u 2001 container-user-1 ; groupadd -g 3001 container-group-1"; done

// b. check created users
# for i in 2 3 ; do ssh root@machine0$i "grep container /etc/passwd"; done;
container-user-0:x:2000:2000::/home/container-user-0:/bin/sh
container-user-1:x:2001:2001::/home/container-user-1:/bin/sh
container-user-0:x:2000:2000::/home/container-user-0:/bin/sh
container-user-1:x:2001:2001::/home/container-user-1:/bin/sh

// c. check created groups
# for i in 2 3 ; do ssh root@machine0$i "grep container /etc/group"; done;
container-user-0:x:2000:
container-group-0:x:3000:
container-user-1:x:2001:
container-group-1:x:3001:
container-user-0:x:2000:
container-group-0:x:3000:
container-user-1:x:2001:
container-group-1:x:3001:

// d. create file and grant rights
# for i in 2 3 ; do ssh root@machine0$i "mkdir -p /etc/message/"; done
# for i in 2 3 ; do ssh root@machine0$i "echo 'Hello, World' | tee -a /etc/message/message.txt "; done
# for i in 2 3 ; do ssh root@machine0$i "chown 2000:3000 /etc/message/message.txt ; chmod 640 /etc/message/message.txt"; done

// e. check files grants
# for i in 2 3 ; do ssh root@machine0$i "ls -l /etc/message/message.txt"; done;
-rw-r----- 1 container-user-0 container-group-0 13 Mar  9 00:16 /etc/message/message.txt
-rw-r----- 1 container-user-0 container-group-0 13 Mar  9 00:16 /etc/message/message.txt

// f. check the content
for i in 2 3 ; do ssh root@machine0$i "cat /etc/message/message.txt"; done;
Hello, World
Hello, World
2. 无 securityContext,默认 root 用户访问底层文件,访问成功
// yaml
cat <<EOF > ./securitycontext-test-1.yaml
apiVersion: v1
kind: Pod
metadata:
  name: securitycontext-test-1
spec:
  containers:
  - name: app
    image: busybox
    command: ['sh', '-c', "id && cat /message/message.txt && sleep 3600"]
    volumeMounts:
    - name: message-volume
      mountPath: /message
  volumes:
  - name: message-volume
    hostPath:
      path: /etc/message
EOF

// create
kubectl create -f securitycontext-test-1.yaml

// verify
# kubectl logs securitycontext-test-1
uid=0(root) gid=0(root) groups=10(wheel)
Hello, World
3. securityContext 指定用户访问底层文件,访问不成功
// yaml
cat <<EOF > ./securitycontext-test-2.yaml
apiVersion: v1
kind: Pod
metadata:
  name: securitycontext-test-2
spec:
  securityContext:
    runAsUser: 2001
    fsGroup: 3001
  containers:
  - name: app
    image: busybox
    command: ['sh', '-c', "id && cat /message/message.txt && sleep 3600"]
    volumeMounts:
    - name: message-volume
      mountPath: /message
  volumes:
  - name: message-volume
    hostPath:
      path: /etc/message
EOF

// create
kubectl create -f securitycontext-test-2.yaml

// verify
# kubectl get pods securitycontext-test-2 --no-headers
securitycontext-test-2   0/1   CrashLoopBackOff   3     2m17s

# kubectl logs securitycontext-test-2
cat: can't open '/message/message.txt': Permission denied
uid=2001 gid=0(root) groups=300
4. securityContext 指定用户访问底层文件,访问成功
// yaml
cat <<EOF > ./securitycontext-test-3.yaml
apiVersion: v1
kind: Pod
metadata:
  name: securitycontext-test-3
spec:
  securityContext:
    runAsUser: 2000
    fsGroup: 3000
  containers:
  - name: app
    image: busybox
    command: ['sh', '-c', "id && cat /message/message.txt && sleep 3600"]
    volumeMounts:
    - name: message-volume
      mountPath: /message
  volumes:
  - name: message-volume
    hostPath:
      path: /etc/message
EOF

// create
kubectl create -f securitycontext-test-3.yaml

// verify
# kubectl logs securitycontext-test-3
uid=2000 gid=0(root) groups=3000
Hello, World
5. Clean up
kubectl delete pod $(kubectl get pods --no-headers | awk '{print $1}')

创建一个 YAML 部署一个 POD,以 ID 为 101 的用户运行 nginx

// yaml
cat <<EOF > ./securitycontext-nginx.yaml
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    run: nginx
  name: nginx
spec:
  securityContext:
    runAsUser: 101
  containers:
  - image: nginx
    name: nginx
    ports:
    - containerPort: 80
EOF

// create
kubectl create -f securitycontext-nginx.yaml

// verify
# kubectl logs nginx
2020/03/12 03:20:55 [warn] 1#1: the "user" directive makes sense only if the master process runs with super-user privileges, ignored in /etc/nginx/nginx.conf:2
nginx: [warn] the "user" directive makes sense only if the master process runs with super-user privileges, ignored in /etc/nginx/nginx.conf:2
2020/03/12 03:20:55 [emerg] 1#1: mkdir() "/var/cache/nginx/client_temp" failed (13: Permission denied)
nginx: [emerg] mkdir() "/var/cache/nginx/client_temp" failed (13: Permission denied)

创建一个 YAML 部署一个 POD,唯一的容器具有 NET_ADMIN 和 SYS_TIME 能力

// yaml
cat <<EOF > ./securitycontext-capability.yaml
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    run: nginx
  name: nginx
spec:
  containers:
  - image: nginx
    name: nginx
    ports:
    - containerPort: 80
    securityContext:
      capabilities:
        add: ["NET_ADMIN", "SYS_TIME"]
EOF

// create
kubectl create -f securitycontext-capability.yaml

Resource Requests 和 Resource Limits 的区别

Kubernets allow us to specify the resource requirements of a container in the pod spec. A container’s memory and CPU requirements are defined in term of resource requests and resource limits:

  • Resource request - The amount of resources necessary to run a container. A pod will only be a run on a node that has enough avalilable resources to run pod’s containers

  • Resource limit - A maximum value of the resource usage of a container.

命令行部署 busybox 容器,设定运行容器所需内存 64 MB CPU 为 250 m,容器运行的最大内存为 128 MB,CPU 为 500 m

kubectl run busybox --image=busybox --limits='cpu=500m,memory=128Mi' --requests='cpu=250m,memory=64Mi' --generator=run-pod/v1

YAML 部署 busybox 容器,设定运行容器所需内存 64 MB CPU 为 250 m,容器运行的最大内存为 128 MB,CPU 为 500 m

// yaml
cat <<EOF > ./resource-limits.yaml
apiVersion: v1
kind: Pod
metadata:
  name: busybox
spec:
  containers:
  - name: busybox
    image: busybox
    command: ['sh', '-c', 'echo Hello Kubernetes! && sleep 3600']
    resources:
      requests:
        memory: "64Mi"
        cpu: "250m"
      limits:
        memory: "128Mi"
        cpu: "500m"
EOF

// run
kubectl create -f resource-limits.yaml

创建 Secret 内容为 password=mypass

kubectl create secret generic my-secret --from-literal=password=mypass
kubectl get secret my-secret -o yaml

创建 Secret 内容为 password, password 保存在文件上

echo "myPassord" > password
kubectl create secret generic my-secret --from-file=password
kubectl get secret my-secret -o yaml

将 Secret 挂载到容器,路径为 /etc/foo

1. 创建 my-secret
echo "This is a password" > password
echo "This is a username" > username

kubectl create secret generic my-secret --from-file=username --from-file=password
kubectl get secret my-secret -o yaml
2. 部署
// yaml
cat <<EOF > ./secret-volumes.yaml
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    run: secret-volumes
  name: secret-volumes
spec:
  containers:
  - image: busybox
    name: secret-volumes
    command: ["sh", "-c", "ls -l /etc/foo && cat /etc/foo/username && cat /etc/foo/password"]
    volumeMounts:
    - name: secret-volume
      mountPath: /etc/foo
      readOnly: true
  volumes:
  - name: secret-volume
    secret:
      secretName: my-secret
  restartPolicy: Never
EOF

// create
kubectl create -f secret-volumes.yaml
3. 验证
# kubectl logs secret-volumes
total 0
lrwxrwxrwx    1 root     root            15 Mar 12 09:09 password -> ..data/password
lrwxrwxrwx    1 root     root            15 Mar 12 09:09 username -> ..data/username
This is a username
This is a password

将 Secret 以环境变量的方式传递给容器,变量名为 MY_SECRET_TEST_USERNAME 和 MY_SECRET_TEST_PASSWORD

1. 创建 my-secret
echo "This is a password" > password
echo "This is a username" > username

kubectl create secret generic my-secret --from-file=username --from-file=password
kubectl get secret my-secret -o yaml
2. 部署
// yaml
cat <<EOF > ./secret-envs.yaml
apiVersion: v1
kind: Pod
metadata:
  labels:
    run: secret-envs
  name: secret-envs
spec:
  containers:
  - image: busybox
    name: secret-envs
    command: ["sh", "-c", "env | grep MY_SECRET_TEST"]
    env:
    - name: MY_SECRET_TEST_USERNAME
      valueFrom:
        secretKeyRef:
          name: my-secret
          key: username
    - name: MY_SECRET_TEST_PASSWORD
      valueFrom:
        secretKeyRef:
          name: my-secret
          key: password
  restartPolicy: Never
EOF

// create
kubectl create -f secret-envs.yaml
3. 验证
# kubectl logs secret-envs
MY_SECRET_TEST_PASSWORD=This is a password
MY_SECRET_TEST_USERNAME=This is a username

查看集群中所有的 ServiceAccounts

kubectl get serviceaccount --all-namespaces
kubectl get serviceaccount default -o yaml

创建一个 ServiceAccounts 名为 myuser

kubectl create sa myuser

POD 中使用 ServiceAccounts

ServiceAccounts 可以使某一个容器内调运 Kubernetes API.

// yaml
cat <<EOF > ./sa.yaml
apiVersion: v1
kind: Pod
metadata:
  name: serviceaccount-pod
spec:
  serviceAccountName: myuser
  containers:
  - name: myapp
    image: busybox
    command: ['sh', '-c', "echo Hello, Kubernetes! && sleep 3600"]
EOF

// create
kubectl create -f sa.yaml

// verify
kubectl get pod serviceaccount-pod  -o yaml

容器化部署 candy-themed 游戏

容器化部署 candy-themed 游戏满足如下要求:

  • 容器使用镜像 linuxacademycontent/candy-service:1

  • candy-themed 游戏运行需要配置文件 /etc/candy-service/candy.cfg,内容如下,需要通过 ConfigMap 挂载道容器,ConfigMap 的名称为 candy-service-config

candy.peppermint.power=100000000
candy.nougat-armor.strength=10
  • 容器需要以 group ID 2000 运行文件系统,通过 securityContext 设定

  • 运行容器需要 64MiB 内存,250m CPU

  • 运行容器最大允许的内存为 128MiB,CPU 为 500m

  • 容器需要连接后台数据库,数据库的密码为 Kub3rn3t3sRul3s!,密码需要以 secret 的方式保存,secret 的名称为 db-password,通过 DB_PASSWORD 变量传递到容器

  • 容器需要 ServiceAccount candy-svc 来访问 Kubernetes API

1. 创建 candy.cfg
cat <<EOF > ./candy.cfg
candy.peppermint.power=100000000
candy.nougat-armor.strength=10
EOF
2. 创建 ConfigMap candy-service-config
kubectl create configmap candy-service-config --from-file=candy.cfg
3. 创建 Secret db-password
kubectl create secret generic db-password --from-literal=password=Kub3rn3t3sRul3s!
4. 创建 ServiceAccount candy-svc
kubectl create sa candy-svc
5. candy.yaml
cat <<EOF > ./candy.yaml
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    run: candy
  name: candy
spec:
  securityContext:
    fsGroup: 2000
  volumes:
  - name: config-volume
    configMap:
      name: candy-service-config
  containers:
  - image: linuxacademycontent/candy-service:1
    name: candy
    env:
    - name: DB_PASSWORD
      valueFrom:
        secretKeyRef:
          name: db-password
          key: password
    resources:
      limits:
        cpu: 500m
        memory: 128Mi
      requests:
        cpu: 250m
        memory: 64Mi
    volumeMounts:
    - name: config-volume
      mountPath: /etc/candy-service
  dnsPolicy: ClusterFirst
  restartPolicy: Always
  serviceAccountName: candy-svc
EOF
6. 创建 candy 服务
kubectl create -f candy.yaml
7. 部署验证
kubectl get pods

kubectl exec candy -- cat /etc/candy-service/candy.cfg

kubectl exec candy -- env | grep DB_PASSWORD

kubectl exec candy -- id

kubectl describe pod candy
7. Clean up
kubectl delete pod candy
kubectl delete cm candy-service-config
kubectl delete secret db-password
kubectl delete sa candy-svc

Multi-Container Pods

Multi-Container 三种设计模式

一个 Pod 中的多个容器:

  • 共享网络

  • 共享存储

  • 共享 Process Namespace

三种设计模式:

  • Sidecar - Sidecar 容器对主容器的能力和通过的方法做一补充

  • Ambassador - 网络入口反向代理,进入主容器的网络流量上限进入 Ambassador 容器

  • Adapter - 定制主容器的输出

HAproxy Ambassador 模式 - HAproxy 容器位于 SVC 容器之前

遗留下来的 fruit-service 只能提供 8775 端口,但是需要通过 80 端口访问服务,本部分使用多容器 Ambassador 模式,HAproxy 容器位于 SVC 容器之前,通过 80 端口提供服务,流量经 HAproxy 容器 80 端口到 fruit-service 8775 端口,具体要求如下:

  • POD 名称为 fruit-service

  • POD 中名为 fruit-service 的容器使用镜像为:linuxacademycontent/legacy-fruit-service:1

  • Ambassador HAproxy 容器使用的镜像为 haproxy:1.7,代理在80 端口接收请求转发到后台 8775,具体参照如下 HAProxy 配置文件

global
    daemon
    maxconn 256

defaults
    mode http
    timeout connect 5000ms
    timeout client 50000ms
    timeout server 50000ms

listen http-in
    bind *:80
    server server1 127.0.0.1:8775 maxconn 32
  • HAProxy 配置文件需要通过 ConfigMap 传递,HAProxy 容器运行需要配置文件的路径为 /usr/local/etc/haproxy/haproxy.cfg

  • busybox POD用来测试,yaml 文件如下

apiVersion: v1
kind: Pod
metadata:
  name: busybox
spec:
  containers:
  - name: myapp-container
    image: radial/busyboxplus:curl
    command: ['sh', '-c', 'while true; do sleep 3600; done']
1. 创建 HAProxy 配置文件
cat <<EOF > ./haproxy.cfg
global
    daemon
    maxconn 256

defaults
    mode http
    timeout connect 5000ms
    timeout client 50000ms
    timeout server 50000ms

listen http-in
    bind *:80
    server server1 127.0.0.1:8775 maxconn 32
EOF
2. 创建 ConfigMap 保存 haproxy.cfg
kubectl create configmap haproxy-cfg --from-file=haproxy.cfg
3. HAproxy Ambassador 模式 POD YAML
cat <<EOF > ./service.yaml
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    run: fruit-service
  name: fruit-service
spec:
  containers:
  - image: linuxacademycontent/legacy-fruit-service:1
    name: fruit-service
    resources: {}
  - name: haproxy
    image: haproxy:1.7
    ports:
    - containerPort: 80
    volumeMounts:
    - name: cfg-volume
      mountPath: /usr/local/etc/haproxy
  volumes:
  - name: cfg-volume
    configMap:
      name: haproxy-cfg
EOF
4. 创建 HAproxy Ambassador 模式 POD
kubectl create -f service.yaml
5. 测试
kubectl run busybox --image=radial/busyboxplus:curl --generator=run-pod/v1 --command -- sh -c "sleep 3600"
kubectl exec busybox -- curl http://$(kubectl get pod fruit-service -o=custom-columns=IP:.status.podIP --no-headers):80
6. Clean up
kubectl delete all --all
kubectl delete cm haproxy-cfg

fluentd Adapter 模式 - fluentd 收集 SVC 容器日志

创建 fluentd Adapter 模式容器,具体要求如下:

  • 将 POD 的描述保存到 adapter-pod.yml

  • POD 名称 counter

  • 容器 count 使用 busybox 镜像,提供日志,对应的参数为

- /bin/sh
- -c
- >
  i=0;
  while true;
  do
    echo "$i: $(date)" >> /var/log/1.log;
    echo "$(date) INFO $i" >> /var/log/2.log;
    i=$((i+1));
    sleep 1;
  done
  • adapter 容器使用镜像 k8s.gcr.io/fluentd-gcp:1.30,名称也为 adapter

  • fluentd 的配置文件内容如下,配置文件需要通过名为 fluentd-config 的 ConfigMap 加载,ConfigMap 需要挂载到 adapter 容器,挂载的路径为 /fluentd/etc/fluent.conf

<source>
  type tail
  format none
  path /var/log/1.log
  pos_file /var/log/1.log.pos
  tag count.format1
</source>

<source>
  type tail
  format none
  path /var/log/2.log
  pos_file /var/log/2.log.pos
  tag count.format2
</source>

<match **>
  @type file
  path /var/logout/count
  time_slice_format %Y%m%d%H%M%S
  flush_interval 5s
  log_level trace
</match>
  • adapter 容器添加环境变量 FLUENTD_ARGS,该变量的值为 “-c /fluentd/etc/fluent.conf”

  • 给 POD 创建一个 volume,使 POD 退出后会被删除,将 volume 挂载到两个容器,挂载的路径都是 /var/log,这样 count 容器写,adapter 容器读

  • 创建一个 hostPath 的 volume,adapter 将数据输出到 /tmp/count_output,adapter 容器的挂载路径为 /var/logout

1. 创建 fluentd-config
kubectl create configmap fluentd-config --from-file=fluent.conf

kubectl get cm fluentd-config -o yaml
2. 编辑 adapter-pod.yaml
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    run: counter
  name: counter
spec:
  containers:
  - image: busybox
    name: count
    command:
    - /bin/sh
    - -c
    - >
      i=0;
      while true;
      do
        echo "$i: $(date)" >> /var/log/1.log;
        echo "$(date) INFO $i" >> /var/log/2.log;
        i=$((i+1));
        sleep 1;
      done
    volumeMounts:
    - name: share-volume
      mountPath: /var/log
  - image: k8s.gcr.io/fluentd-gcp:1.30
    name: adapter
    volumeMounts:
    - name: config-volume
      mountPath: /fluentd/etc
    - name: share-volume
      mountPath: /var/log
    - name: output-volume
      mountPath: /var/logout
    env:
    - name: FLUENTD_ARGS
      value: "-c /fluentd/etc/fluent.conf"
  volumes:
  - name: config-volume
    configMap:
      name: fluentd-config
  - name: share-volume
    emptyDir: {}
  - name: output-volume
    hostPath:
      path: /tmp/count_output
3. 创建
kubectl create -f adapter-pod.yml
4. 验证
// make sure pod is running
kubectl get pods

// check count log
kubectl exec counter -c count -- cat /var/log/1.log
kubectl exec counter -c count -- cat /var/log/2.log

// check adapter log & files
# kubectl exec counter -c adapter -- cat /fluentd/etc/fluent.conf
# kubectl exec counter -c adapter -- ls /var/logout | wc -l
65
#kubectl exec counter -c adapter -- cat /var/logout/count.20200311092543_0.log
2020-03-11T09:25:43+00:00	count.format1	{"message":"89: Wed Mar 11 09:25:43 UTC 2020"}
2020-03-11T09:25:43+00:00	count.format2	{"message":"Wed Mar 11 09:25:43 UTC 2020 INFO 89"}

// check from host path
# ssh $(kubectl get pod -o wide --no-headers | awk '{print $7}')
# ls -l /tmp/count_output | wc -l
82
# cat /tmp/count_output/count.20200311092918_0.log
2020-03-11T09:29:18+00:00	count.format2	{"message":"Wed Mar 11 09:29:18 UTC 2020 INFO 132"}
2020-03-11T09:29:18+00:00	count.format1	{"message":"132: Wed Mar 11 09:29:18 UTC 2020"}
5. Clean Up
kubectl delete all --all
kubectl delete cm fluentd-config
6. 验证 hostpath 的文件是否删除
# ssh machine03 'ls -l /tmp/count_output | wc -l'
127

Observability

创建 livenessProbe,执行 echo test 命令,初次延迟为 5 秒,周期为 10 秒

// yaml
cat <<EOF > ./liveness.yaml
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    run: liveness-pod
  name: liveness-pod
spec:
  containers:
  - image: busybox
    name: liveness-pod
    command: ["sh", "-c", "sleep 3600"]
    livenessProbe:
      exec:
        command: ["echo", "test"]
      initialDelaySeconds: 5
      periodSeconds: 10
EOF

// create
kubectl create -f liveness.yaml

// verify
kubectl describe pod liveness-pod | grep Liveness

创建 readinessProbe,执行 HTTP GET,路径 /,端口 80,初次延迟为 5 秒,周期 10 秒

// yaml
cat <<EOF > ./readness.yaml
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    run: readiness-pod
  name: readiness-pod
spec:
  containers:
  - image: nginx
    name: readiness-pod
    readinessProbe:
      httpGet:
        path: /
        port: 80
      initialDelaySeconds: 5
      periodSeconds: 10
EOF

// create
kubectl create -f readness.yaml

// verify
kubectl describe pod readiness-pod | grep Readiness

创建 busybox POD,执行 i=0; while true; do echo "$i: $(date)"; i=$i+1; sleep 1; done,查看日志

kubectl run busybox --image=busybox --generator=run-pod/v1 --command -- sh -c "i=0; while true; do echo '$i: $(date)'; i=$((i+1)); sleep 1; done"
kubectl logs -f busybox

检测应用是否重启,以及应用是否就绪

  • 容器中应用通常会有错导致服务异常误,但错误不会导致容器停止或 Crash(Kubernets 本身不会感知到容器发生异常),需要配置 livenessProbe 来确定是否需要重启容器

  • 容器启动过程,一些用户的请求会无法及时响应,通过 readinessProbe 来确保容器就绪

示例
apiVersion: v1
kind: Pod
metadata:
  name: candy-service
spec:
  containers:
  - name: candy-service
    image: linuxacademycontent/candy-service:2
    livenessProbe:
      httpGet:
        path: /healthz
        port: 8081
    readinessProbe:
      httpGet:
        path: /
        port: 80

安装 Metrics Server

// install
git clone https://github.com/kubernetes-sigs/metrics-server.git
kubectl apply -f metrics-server/deploy/kubernetes/

// verify
kubectl get --raw /apis/metrics.k8s.io/
{"kind":"APIGroup","apiVersion":"v1","name":"metrics.k8s.io","versions":[{"groupVersion":"metrics.k8s.io/v1beta1","version":"v1beta1"}],"preferredVersion":{"groupVersion":"metrics.k8s.io/v1beta1","version":"v1beta1"}}

// export
kubectl get pod <POD_NAME> -o yaml --export > nginx-pod.yml

常见应用监控场景

kubectl top pods
kubectl top pod resource-consumer-big
kubectl top pods -n kube-system
kubectl top nodes

基本 Troubleshooting 步骤

// 1. describe
kubectl describe <POD_NAME>

// 2. logs
kubectl logs <OBJECT_NAME>

Pod Design

常见 Labels, Selectors 操作

kubectl get pods -l app=my-app

kubectl get pods -l environment=production

kubectl get pods -l environment=development

kubectl get pods -l environment!=production

kubectl get pods -l 'environment in (development,production)'

kubectl get pods -l app=my-app,environment=production

创建三个 POD ,Label 都包含 app=v1

for i in 1 2 3 ; do kubectl run nginx-$i --image=nginx --port=80 --labels=app=v1 --generator=run-pod/v1 ; done

kubectl get pods -l app=v1

kubectl delete all -l app=v1

查看并修改 POD 的 Label

kubectl get pods --show-labels
kubectl label pod nginx-1 app=v2 --overwrite

Annotation 记录应用的所有者和 git commits

apiVersion: v1
kind: Pod
metadata:
  name: my-annotation-pod
  annotations:
    owner: terry@linuxacademy.com
    git-commit: bdab0c6
spec:
  containers:
  - name: nginx
    image: nginx

给运行的 POD 添加"description='my description'" 的 Annotation

kubectl annotate pod nginx-1 description='my description'
kubectl describe pod nginx-1

删除运行 POD 的 Annotation

kubectl annotate pod nginx-1 description-

常见 Deployments 操作

kubectl set image deployment/rolling-deployment nginx=nginx:1.7.9 --record

kubectl rollout history deployment/rolling-deployment

kubectl rollout history deployment/rolling-deployment --revision=2

kubectl rollout undo deployment/rolling-deployment

滚动升级,版本回滚

1. 部署 nginx
// yaml
cat <<EOF > ./deployment-nginx.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  creationTimestamp: null
  labels:
    app: nginx
  name: nginx
spec:
  replicas: 3
  selector:
    matchLabels:
      app: nginx
  strategy: {}
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: nginx
    spec:
      containers:
      - image: nginx:1.7.8
        name: nginx
EOF

// create
kubectl create -f deployment-nginx.yaml
2. 升级
kubectl set image deployments/nginx nginx=nginx:1.7.9  --record
3. 查看滚动升级状态
kubectl rollout status deployments/nginx
4. 查看滚动升级历史
kubectl rollout history deployments/nginx
5. 回滚
kubectl rollout undo deployments/nginx

部署 StatefulSet, 使用 nginx 镜像,POD 副本数为 3

1. 创建 PersistentVolume
// yaml
cat <<EOF > ./my-storage.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: statefulsets-volume
spec:
  capacity:
    storage: 1Gi
  volumeMode: Filesystem
  accessModes:
    - ReadWriteOnce
  persistentVolumeReclaimPolicy: Recycle
  storageClassName: my-storage-class
  hostPath:
    path: /tmp/statefulsets
EOF

// create
kubectl create -f my-storage.yaml
2. 创建 Headless Service
// yaml
cat <<EOF > ./my-headless-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: nginx
  labels:
    app: nginx
spec:
  ports:
  - port: 80
    name: web
  clusterIP: None
  selector:
    app: nginx
EOF

// create
kubectl create -f my-headless-svc.yaml
3. 创建 StatefulSet
// yaml
cat <<EOF > ./my-statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: web
spec:
  selector:
    matchLabels:
      app: nginx # has to match .spec.template.metadata.labels
  serviceName: "nginx"
  replicas: 1
  template:
    metadata:
      labels:
        app: nginx # has to match .spec.selector.matchLabels
    spec:
      terminationGracePeriodSeconds: 10
      containers:
      - name: nginx
        image: k8s.gcr.io/nginx-slim:0.8
        ports:
        - containerPort: 80
          name: web
        volumeMounts:
        - name: www
          mountPath: /usr/share/nginx/html
  volumeClaimTemplates:
  - metadata:
      name: www
    spec:
      accessModes: [ "ReadWriteOnce" ]
      storageClassName: "my-storage-class"
      resources:
        requests:
          storage: 1Gi
EOF

// create
kubectl create -f my-statefulset.yaml

Jobs 和 CronJobs 比较

Job 和 CronJob 都会创建 POD 去完成后一个任务

  • Job 类似 Pod,但是做完一件事情后退出

  • CronJob 类似 Job, 但周期性做同一件事件

创建 Job 计算 π 后 2000 位输出到日志

// yaml
cat <<EOF > ./pi.yaml
apiVersion: batch/v1
kind: Job
metadata:
  name: pi
spec:
  template:
    spec:
      containers:
      - name: pi
        image: perl
        command: ["perl",  "-Mbignum=bpi", "-wle", "print bpi(2000)"]
      restartPolicy: Never
  backoffLimit: 4
EOF

// create
kubectl create -f pi.yaml

// verify
kubectl logs pi-r7kxf

创建 Job 计算 π 后 20 位输出到日志,并输出 5 次

// yaml
cat <<EOF > ./pi-completions.yaml
apiVersion: batch/v1
kind: Job
metadata:
  name: pi-completions
spec:
  completions: 5
  template:
    spec:
      containers:
      - name: pi
        image: perl
        command: ["perl",  "-Mbignum=bpi", "-wle", "print bpi(20)"]
      restartPolicy: Never
  backoffLimit: 4
EOF

// create
kubectl create -f pi-completions.yaml

// verify
for p in $(kubectl get pods --no-headers | awk '{print $1}') ; do kubectl logs $p ; done

创建 Job 计算 π 后 20 位输出到日志,并并行输出 5 次

// yaml
cat <<EOF > ./pi-parallelism.yaml
apiVersion: batch/v1
kind: Job
metadata:
  name: pi-parallelism
spec:
  parallelism: 5
  template:
    spec:
      containers:
      - name: pi
        image: perl
        command: ["perl",  "-Mbignum=bpi", "-wle", "print bpi(20)"]
      restartPolicy: Never
  backoffLimit: 4
EOF

// create
kubectl create -f pi-parallelism.yaml

// verify
for p in $(kubectl get pods --no-headers | awk '{print $1}') ; do kubectl logs $p ; done

创建 Job 运行 busybox 执行 echo, sleep 命令

kubectl create job busybox --image=busybox -- /bin/sh -c 'echo hello;sleep 30;echo world'
kubectl logs -f busybox-f9bbz
kubectl logs job/busybox

创建 CronJob 每隔一分钟输出时间及日志信息

// yaml
cat <<EOF > ./hello-cronjob.yaml
apiVersion: batch/v1beta1
kind: CronJob
metadata:
  name: hello
spec:
  schedule: "*/1 * * * *"
  jobTemplate:
    spec:
      template:
        spec:
          containers:
          - name: hello
            image: busybox
            args:
            - /bin/sh
            - -c
            - date; echo Hello from the Kubernetes cluster
          restartPolicy: OnFailure
EOF

// create
kubectl create -f hello-cronjob.yaml

// verify
kubectl get pods -w

pizza 服务部署

部署 pizza 服务,满足如下条件:

  • 所有对象下 pizza namespace 下

  • Deployment 对象名称为pizza-deployment

  • Deployment 对象有 3 个副本

  • 部署容器的镜像为 linuxacademycontent/pizza-service:1.14.6

  • 容器通过 nginx 命令运行

  • 容器命令的参数有 "-g", "daemon off;"

  • POD 对外暴露的端口是 80

  • POD 通过路径 /healthz 和端口 8081 检测是否需要自动重启

  • POD 通过路径 / 和端口 80 检测服务是否在线

  • 服务名称为 pizza-service

  • 服务将请求转发到 80 端口的 POD

  • 服务对外暴露的端口为 30080

1. 创建 namespace
kubectl create ns pizza
2. 创建 Deployment
// yaml
cat <<EOF > ./deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  creationTimestamp: null
  labels:
    app: pizza-deployment
  name: pizza-deployment
spec:
  replicas: 3
  selector:
    matchLabels:
      app: pizza-deployment
  strategy: {}
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: pizza-deployment
    spec:
      containers:
      - image: linuxacademycontent/pizza-service:1.14.6
        name: pizza-service
        ports:
        - containerPort: 80
        command: ["nginx"]
        args: ["-g", "daemon off;"]
        livenessProbe:
          httpGet:
            path: /healthz
            port: 8081
        readinessProbe:
          httpGet:
            path: /
            port: 80
EOF

// create
kubectl create -f deployment.yaml -n pizza
3. 创建服务
// yaml
cat <<EOF > ./pizza-service.yaml
apiVersion: v1
kind: Service
metadata:
  labels:
    app: pizza-deployment
  name: pizza-deployment
  namespace: pizza
spec:
  ports:
  - nodePort: 30080
    port: 80
    protocol: TCP
    targetPort: 80
  selector:
    app: pizza-deployment
  type: NodePort
EOF

// create svc
kubectl create -f pizza-service.yaml -n pizza
4. 验证
kubectl get svc -n pizza
kubectl get ep -n pizza
kubectl get po -n pizza
5. Clean up
kubectl delete all --all -n pizza
kubectl delete ns pizza

Services and Networking

基于 nginx 镜像创建一个 POD,并将其暴露在 80 端口

kubectl run nginx --image=nginx --port=80 --generator=run-pod/v1
kubectl expose pod nginx --port=80 --name=nginx

kubectl get svc
kubectl get ep

kubectl run busybox --image=busybox --rm -it --generator=run-pod/v1 --command -- sh wget -0- 192.168.208.216:80

基于 nginx 镜像创建一个 Deployment,并将其暴露在 80 端口

kubectl create deployment nginx --image=nginx
kubectl scale deployments/nginx --replicas=3
kubectl expose deployments/nginx --port=80 --name=nginx --type=NodePort

Nginx 访问策略定制

定制访问策略,确保 Nginx 容器提供的服务只被特定标签的容器访问:

  • Nginx Pod 提供 /hello 服务,供同一 namespace 内的其他服务访问

  • Nginx 只允许 allow-access: "true" 标签的容器访问

1. 部署 nginx service
// yaml
cat <<EOF > ./hello.yaml
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    run: hello
  name: hello
spec:
  containers:
  - image: nginx
    name: hello
    ports:
    - containerPort: 80
    volumeMounts:
    - name: svc-root
      mountPath: "/usr/share/nginx/html"
    livenessProbe:
      exec:
        command: ["test", "-e", "/usr/share/nginx/html/hello"]
  initContainers:
  - image: busybox:1.28
    name: init-svc
    command: ["sh", "-c", "echo 'hello' > /usr/share/nginx/html/hello"]
    volumeMounts:
    - name: svc-root
      mountPath: "/usr/share/nginx/html"
  volumes:
  - name: svc-root
    emptyDir: {}
  restartPolicy: Always
EOF

// create service
kubectl create -f hello.yaml

// verify
# kubectl exec hello -- cat /usr/share/nginx/html/hello
hello
2. 创建 Client 服务,访问 hello
kubectl run client --image=radial/busyboxplus:curl --generator=run-pod/v1 --command -- sh -c "sleep 3600"

// Access service
# kubectl exec client -- curl http://$(kubectl get pod hello -o wide --no-headers | awk '{print $6}')/hello
hello
3. 设定访问策略
// yaml
cat <<EOF > ./policy.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  name: my-network-policy
spec:
  podSelector:
    matchLabels:
      run: hello
  policyTypes:
  - Ingress
  - Egress
  ingress:
  - from:
    - podSelector:
        matchLabels:
          allow-access: "true"
    ports:
    - protocol: TCP
      port: 80
  egress:
  - to:
    - podSelector:
        matchLabels:
          allow-access: "true"
    ports:
    - protocol: TCP
      port: 80
EOF

// create
kubectl create -f policy.yaml
4. 设定访问策略后重新访问 hello 服务(服务不允许被访问)
kubectl exec client -- curl http://$(kubectl get pod hello -o wide --no-headers | awk '{print $6}')/hello
5. 给 Client 服务设定标签后,重新访问
kubectl run client --image=radial/busyboxplus:curl --generator=run-pod/v1 --labels='allow-access=true' --command -- sh -c "sleep 3600"

// Access hello service again
# kubectl exec client -- curl http://$(kubectl get pod hello -o wide --no-headers | awk '{print $6}')/hello
hello
6. Clean up
kubectl delete pod hello client
kubectl delete networkpolicy my-network-policy

网关基于不同策略访问不同的服务

本部分有 2 个服务和一个网关,2 个服务有对应访问策略,网关位于 2 个服务之前

对象 yaml

customer-data

apiVersion: apps/v1
kind: Deployment
metadata:
  creationTimestamp: null
  labels:
    app: customer-data
  name: customer-data
spec:
  replicas: 2
  selector:
    matchLabels:
      app: customer-data
  strategy: {}
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: customer-data
    spec:
      containers:
      - image: nginx
        name: nginx

inventory

apiVersion: apps/v1
kind: Deployment
metadata:
  creationTimestamp: null
  labels:
    app: inventory
  name: inventory
spec:
  replicas: 2
  selector:
    matchLabels:
      app: inventory
  strategy: {}
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: inventory
    spec:
      containers:
      - image: nginx
        name: nginx

customer-data-policy

apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  name: customer-data-policy
spec:
  podSelector:
    matchLabels:
      app: customer-data
  policyTypes:
  - Ingress
  - Egress
  ingress:
  - from:
    - podSelector:
        matchLabels:
          allow-access-customer-data: "true"
    ports:
    - protocol: TCP
      port: 80
  egress:
  - to:
    - podSelector:
        matchLabels:
          allow-access-customer-data: "true"
    ports:
    - protocol: TCP
      port: 80

inventory-policy

apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  name: inventory-policy
spec:
  podSelector:
    matchLabels:
      app: inventory
  policyTypes:
  - Ingress
  - Egress
  ingress:
  - from:
    - podSelector:
        matchLabels:
          allow-access-inventory: "true"
    ports:
    - protocol: TCP
      port: 80
  egress:
  - to:
    - podSelector:
        matchLabels:
          allow-access-inventory: "true"
    ports:
    - protocol: TCP
      port: 80

web-gateway

apiVersion: v1
kind: Pod
metadata:
  labels:
    run: web-gateway
  name: web-gateway
spec:
  containers:
  - command:
    - sh
    - -c
    - sleep 3600
    - --dry-run
    - -o
    - yaml
    image: radial/busyboxplus:curl
    name: web-gateway
1. 初始化
kubectl create -f customer-data.yaml
kubectl create -f inventory.yaml
kubectl create -f customer-data-policy.yaml
kubectl create -f inventory-policy.yaml
kubectl create -f web-gateway.yaml
kubectl expose deployments/customer-data --port=80 --name=customer-data-svc
kubectl expose deployments/inventory --port=80 --name=inventory-svc
2. 访问测试(访问不通)
kubectl exec web-gateway -- curl -m 3 customer-data-svc
kubectl exec web-gateway -- curl -m 3 inventory-svc
3. 设定规则
kubectl label pod web-gateway allow-access-customer-data=true
kubectl label pod web-gateway allow-access-inventory=true
4. 再次访问测试,访问成功
kubectl exec web-gateway -- curl -m 3 customer-data-svc
kubectl exec web-gateway -- curl -m 3 inventory-svc

State Persistence

Redis 持久化

1. Create a PersistentVolume
// yaml
cat <<EOF > ./redis-pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: redis-pv
spec:
  storageClassName: ""
  capacity:
    storage: 1Gi
  accessModes:
    - ReadWriteOnce
  hostPath:
    path: "/tmp/data"
EOF

// create
kubectl create -f redis-pv.yaml

// view
# kubectl get pv --no-headers
redis-pv   1Gi   RWO   Retain   Available                     5m53s
2. Create a PersistentVolumeClaim
// yaml
cat <<EOF > ./redis-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: redisdb-pvc
spec:
  storageClassName: ""
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 1Gi
EOF

// create
kubectl create -f redis-pvc.yaml

// view
# kubectl get pv --no-headers
redis-pv   1Gi   RWO   Retain   Bound   default/redisdb-pvc               6m40s

# kubectl get pvc --no-headers
redisdb-pvc   Bound   redis-pv   1Gi   RWO         34s
3. Create the redispod image, with a mounted volume to mount path /data
// yaml
cat <<EOF > ./redispod.yaml
apiVersion: v1
kind: Pod
metadata:
  name: redispod
spec:
  containers:
  - image: redis
    name: redisdb
    volumeMounts:
    - name: redis-data
      mountPath: /data
    ports:
    - containerPort: 6379
      protocol: TCP
  volumes:
  - name: redis-data
    persistentVolumeClaim:
      claimName: redisdb-pvc
EOF

// create pod
kubectl create -f redispod.yaml

// view pod
# kubectl get pod redispod -o wide --no-headers
redispod   1/1   Running   0     3m31s   192.168.208.248   machine03.example.com   <none>   <none>
4. Connect to the container and write some data
kubectl exec -it redispod redis-cli
127.0.0.1:6379> SET server:name "redis server"
OK
127.0.0.1:6379> GET server:name
"redis server"
127.0.0.1:6379> QUIT
5. Delete pod and check persist files
// delete pod
kubectl delete pod redispod

// check persist files
# ssh machine03.example.com "ls /tmp/data"
dump.rdb
6. Create pod again
// create
kubectl create -f redispod.yaml

// get pod
# kubectl get pod redispod -o wide --no-headers
redispod   1/1   Running   0     62s   192.168.208.253   machine03.example.com   <none>   <none>
7. Verify data existing
kubectl exec -it redispod redis-cli
127.0.0.1:6379> GET server:name
"redis server"
127.0.0.1:6379> QUIT

Mysql 持久化

Mysql PV
apiVersion: v1
kind: PersistentVolume
metadata:
  name: mysql-pv
spec:
  storageClassName: localdisk
  capacity:
    storage: 1Gi
  accessModes:
    - ReadWriteOnce
  hostPath:
    path: "/mnt/data"
Mysql PVC
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: mysql-pv-claim
spec:
  storageClassName: localdisk
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 500Mi
Mysql Pod
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    run: mysql-pod
  name: mysql-pod
spec:
  containers:
  - image: mysql:5.6
    name: mysql-pod
    ports:
    - containerPort: 3306
    env:
    - name: MYSQL_ROOT_PASSWORD
      value: password
    volumeMounts:
    - name: mysql-storage
      mountPath: "/var/lib/mysql"
    resources: {}
  volumes:
  - name: mysql-storage
    persistentVolumeClaim:
      claimName: mysql-pv-claim

results matching ""

    No results matching ""