k8s安装及
1. 安装仓库服务
[root@registry ~]# yum makecache
[root@registry ~]# yum install -y docker-distribution
[root@registry ~]# systemctl enable --now docker-distribution
2、安装软件包(master)
[root@master ~]# yum makecache
[root@master ~]# yum install -y kubeadm kubelet kubectl docker-ce
[root@master ~]# mkdir -p /etc/docker
[root@master ~]# vim /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors": ["https://hub-mirror.c.163.com"],
"insecure-registries":["192.168.1.100:5000", "registry:5000"]
}
[root@master ~]# systemctl enable --now docker kubelet
[root@master ~]# docker info |grep Cgroup
Cgroup Driver: systemd
3.镜像导入私有仓库
[root@master base-image]# for i in *.tar.gz;do docker load -i ${i};done
[root@master base-image]# docker images
[root@master base-image]# docker images |awk '$2!="TAG"{print $1,$2}'|while read _f _v;do
docker tag ${_f}:${_v} 192.168.1.100:5000/${_f##*/}:${_v};
docker push 192.168.1.100:5000/${_f##*/}:${_v};
docker rmi ${_f}:${_v};
done
4、Tab键设置
[root@master ~]# kubectl completion bash >/etc/bash_completion.d/kubectl
[root@master ~]# kubeadm completion bash >/etc/bash_completion.d/kubeadm
[root@master ~]# exit
5 安装IPVS代理软件包
[root@master ~]# yum install -y ipvsadm ipset
6 配置master主机环境
[root@master ~]# vim /etc/hosts
192.168.1.21 master
192.168.1.31 node-0001
192.168.1.32 node-0002
192.168.1.33 node-0003
192.168.1.100 registry
[root@master ~]# vim /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
[root@master ~]# modprobe br_netfilter
[root@master ~]# sysctl --system
7、使用kubeadm部署
[root@master ~]# mkdir init;cd init
[root@master init]# kubeadm init --config=kubeadm-init.yaml |tee master-init.log
# 根据提示执行命令
[root@master init]# mkdir -p $HOME/.kube
[root@master init]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master init]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
8.验证安装结果
[root@master ~]# kubectl version
[root@master ~]# kubectl get componentstatuses
1、获取token
# 创建token
[root@master ~]# kubeadm token create --ttl=0 --print-join-command
[root@master ~]# kubeadm token list
# 获取token_hash
[root@master ~]# openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt |openssl rsa -pubin -outform der |openssl dgst -sha256 -hex
2、node安装
[root@ecs-proxy ~]# cd node-install/
[root@ecs-proxy node-install]# vim files/hosts
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
192.168.1.21 master
192.168.1.31 node-0001
192.168.1.32 node-0002
192.168.1.33 node-0003
192.168.1.100 registry
[root@ecs-proxy node-install]# vim node_install.yaml
... ...
vars:
master: '192.168.1.21:6443'
token: 'fm6kui.mp8rr3akn74a3nyn'
token_hash: 'sha256:f46dd7ee29faa3c096cad189b0f9aedf59421d8a881f7623a543065fa6b0088c'
... ...
[root@ecs-proxy node-install]# ansible-playbook node_install.yaml
3、验证安装
[root@master ~]# kubectl get nodes
1、上传镜像到私有仓库(插件)
2、修改配置文件并安装
[root@master flannel]# vim kube-flannel.yml
128: "Network": "10.244.0.0/16",
172: image: 192.168.1.100:5000/flannel:v0.12.0-amd64
186: image: 192.168.1.100:5000/flannel:v0.12.0-amd64
227-结尾: 删除
[root@master flannel]# kubectl apply -f kube-flannel.yml
3、验证结果
[root@master flannel]# kubectl get nodes

[root@master ~]# kubectl get nodes
[root@master ~]# kubectl get pod
[root@master ~]# kubectl get namespaces
[root@master ~]# kubectl -n kube-system get pod
[root@master ~]# kubectl -n kube-system describe pod kube-flannel-ds-amd64-rtl4l
[root@master ~]# kubectl run testweb --image=192.168.1.100:5000/myos:httpd
[root@master ~]# kubectl get pod -o wide
[root@master ~]# kubectl exec -it testos-79778b4895-s8mxl -- /bin/bash
[root@master ~]# kubectl attach -it testos-79778b4895-s8mxl
[root@master ~]# kubectl logs testweb-7bf98b9576-v566c
[root@master flannel]# kubectl apply -f kube-flannel.yml
[root@master ~]# kubectl delete pod testos-79778b4895-s8mxl
[root@master ~]# kubectl delete deployments testos
pod 资源文件
---
kind: Pod
apiVersion: v1
metadata:
name: mypod
spec:
containers:
- name: mylinux
image: 192.168.1.100:5000/myos:v1804
stdin: true
tty: true
[root@master ~]# kubectl get pod
deployment 资源文件
[root@master ~]# vim myapache.yaml
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: myapache
annotations:
kubernetes.io/change-cause: httpd.v1
spec:
selector:
matchLabels:
myapp: httpd
replicas: 1
template:
metadata:
labels:
myapp: httpd
spec:
containers:
- name: webcluster
image: 192.168.1.100:5000/myos:httpd
stdin: false
tty: false
ports:
- protocol: TCP
containerPort: 80
restartPolicy: Always
[root@master ~]# kubectl get deployments.apps
[root@master ~]# kubectl scale deployment myapache --replicas=3 副本增加三个
集群更新与回滚
[root@master config]# kubectl rollout history deployment myapache
[root@master ~]# kubectl edit deployments.apps myapache
[root@master ~]# kubectl rollout undo deployment myapache --to-revision=1 回滚
节点标签选择器
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: myapache
spec:
selector:
matchLabels:
myapp: httpd
replicas: 1
template:
metadata:
labels:
myapp: httpd
spec:
nodeName: node-0001 # 新增一行 节点标签
containers:
- name: webcluster
image: 192.168.1.100:5000/myos:httpd
stdin: false
tty: false
ports:
- protocol: TCP
containerPort: 80
restartPolicy: Always
[root@master ~]# kubectl delete -f myapache.yaml
deployment.apps "myapache" deleted
[root@master ~]# kubectl apply -f myapache.yaml
deployment.apps/myapache created
[root@master ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE
myapache-xxx 1/1 Running 0 3m49s 10.244.3.9 node-0001
使用 标签 让容器运行在一些节点上
[root@master ~]# kubectl delete -f myapache.yaml
deployment.apps "myapache" deleted
[root@master ~]# kubectl get nodes --show-labels
NAME STATUS ROLES AGE VERSION LABELS
master Ready master 10h v1.17.6 kubernetes.io/hostname=master ... ...
node-0001 Ready <none> 10h v1.17.6 kubernetes.io/hostname=node-0001 ... ...
node-0002 Ready <none> 10h v1.17.6 kubernetes.io/hostname=node-0002 ... ...
node-0003 Ready <none> 10h v1.17.6 kubernetes.io/hostname=node-0003 ... ...
[root@master ~]# kubectl label nodes node-0002 node-0003 disktype=ssd
node/node-0002 labeled
node/node-0003 labeled
[root@master ~]# kubectl get nodes --show-labels
NAME STATUS ROLES AGE VERSION LABELS
master Ready master 10h v1.17.6 kubernetes.io/hostname=master ... ...
node-0001 Ready <none> 10h v1.17.6 kubernetes.io/hostname=node-0001 ... ...
node-0002 Ready <none> 10h v1.17.6 disktype=ssd ... ...
node-0003 Ready <none> 10h v1.17.6 disktype=ssd ... ...
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: myapache
spec:
selector:
matchLabels:
myapp: httpd
replicas: 1
template:
metadata:
labels:
myapp: httpd
spec:
nodeSelector: # 新添加
disktype: ssd # 新添加
containers:
- name: webcluster
image: 192.168.1.100:5000/myos:httpd
stdin: false
tty: false
ports:
- protocol: TCP
containerPort: 80
restartPolicy: Always
[root@master ~]# kubectl scale deployment myapache --replicas=3
deployment.apps/myapache scaled
[root@master ~]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE
myapache-xxx 1/1 Running 0 9s 10.244.1.7 node-0003 <none>
myapache-xxx 1/1 Running 0 9s 10.244.2.8 node-0002 <none>
myapache-xxx 1/1 Running 0 21s 10.244.2.7 node-0002 <none>
[root@master ~]# kubectl delete -f myapache.yaml
deployment.apps "myapache" deleted
[root@master ~]# kubectl label nodes node-0002 node-0003 disktype-
node/node-0002 labeled
node/node-0003 labeled
[root@master ~]# kubectl get nodes --show-labels
NAME STATUS ROLES AGE VERSION LABELS
master Ready master 10h v1.17.6 kubernetes.io/hostname=master ... ...
node-0001 Ready <none> 10h v1.17.6 kubernetes.io/hostname=node-0001 ... ...
node-0002 Ready <none> 10h v1.17.6 kubernetes.io/hostname=node-0002 ... ...
node-0003 Ready <none> 10h v1.17.6 kubernetes.io/hostname=node-0003 ... ...
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: mynginx
spec:
selector:
matchLabels:
myapp: nginx
template:
metadata:
labels:
myapp: nginx
spec:
containers:
- name: nginxcluster
image: 192.168.1.100:5000/myos:nginx
stdin: false
tty: false
ports:
- protocol: TCP
containerPort: 80
restartPolicy: Always
污点与容忍
污点策略:NoSchedule、PreferNoSchedule、NoExecute
[root@master ~]# kubectl delete -f mynginx.yaml
daemonset.apps "mynginx" deleted
[root@master ~]# kubectl describe nodes |grep -P "^Taints"
Taints: node-role.kubernetes.io/master:NoSchedule
Taints: <none>
Taints: <none>
Taints: <none>
[root@master ~]# kubectl taint node node-0001 k1=v1:NoSchedule
node/node-0001 tainted
[root@master ~]# kubectl apply -f mynginx.yaml
daemonset.apps/mynginx created
[root@master ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
mynginx-f2rxh 1/1 Running 0 4s
mynginx-n7xsw 1/1 Running 0 4s
[root@master ~]# kubectl taint node node-0001 k1-
node/node-0001 untainted
[root@master ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
mynginx-f2rxh 1/1 Running 0 105s
mynginx-hp6f2 1/1 Running 0 2s
mynginx-n7xsw 1/1 Running 0 105s
apiVersion: batch/v1
kind: Job
metadata:
name: pi
spec:
template:
spec:
containers:
- name: pi
image: 192.168.1.100:5000/myos:v1804
command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
restartPolicy: OnFailure
cronjob 资源文件
---
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: cronjob-pi
spec:
schedule: "*/1 * * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: pi
image: 192.168.1.100:5000/myos:v1804
command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
restartPolicy: OnFailure
[root@master ~]# kubectl apply -f mycronjob.yaml
cronjob.batch/cronjob-pi created
[root@master ~]# kubectl get cronjobs.batch
NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE
cronjob-pi */1 * * * * False 0 <none> 10s
[root@master ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
cronjob-pi-1595410620-vvztx 0/1 Completed 0 62s
集群服务
ClusterIP服务
会变化的资源
创建 ClusterIP 服务 服务只有在集群内部才可以访问,创建 Pod,在Pod 中访问服务
---
kind: Service
apiVersion: v1
metadata:
name: myapache
spec:
ports:
- protocol: TCP
port: 80
targetPort: 80
selector:
myapp: httpd # 标签必须与 deploy 资源文件中一致
type: ClusterIP
[root@master config]# kubectl get service
---
kind: Service
apiVersion: v1
metadata:
name: mynodeport
spec:
ports:
- protocol: TCP
port: 80
targetPort: 80
selector:
myapp: httpd
type: NodePort # 指定服务类型
---
kind: Service
apiVersion: v1
metadata:
name: myheadless
spec:
ports:
- protocol: TCP
port: 80
targetPort: 80
selector:
myapp: httpd
type: ClusterIP
clusterIP: None # 新添加
[root@master ~]# vim ingress/mandatory.yaml
221: image: 192.168.1.100:5000/nginx-ingress-controller:0.30.0
[root@master ~]# kubectl apply -f ingress/mandatory.yaml
[root@master ~]# kubectl -n ingress-nginx get pod
NAME READY STATUS RESTARTS AGE
nginx-ingress-controller-fc6766d7-ptppp 1/1 Running 0 47s
发布服务
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: my-web
annotations:
kubernetes.io/ingress.class: "nginx"
spec:
backend:
serviceName: myapache
servicePort: 80
[root@master ingress]# kubectl get ingresses
创建 configmap
[root@master ~]# kubectl create configmap nginx-conf --from-file=/var/webconf/nginx.conf
configmap/nginx-conf created
[root@master ~]# kubectl get configmaps
配置 configmap
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: webnginx
spec:
selector:
matchLabels:
myapp: nginx
replicas: 1
template:
metadata:
labels:
myapp: nginx
spec:
volumes: # 新添加
- name: nginx-php # 新添加(标记1)
configMap: # 新添加
name: nginx-conf # 新添加,必须与 configmap 命令创建的名称相同
containers:
- name: nginx
image: 192.168.1.100:5000/myos:nginx
volumeMounts: # 新添加
- name: nginx-php # 新添加,必须与(标记1)名称相同
subPath: nginx.conf # 新添加
mountPath: /usr/local/nginx/conf/nginx.conf # 新添加
ports:
- protocol: TCP
containerPort: 80
restartPolicy: Always
添加 php 容器
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: webnginx
spec:
selector:
matchLabels:
myapp: nginx
replicas: 1
template:
metadata:
labels:
myapp: nginx
spec:
volumes:
- name: nginx-php
configMap:
name: nginx-conf
containers:
- name: nginx
image: 192.168.1.100:5000/myos:nginx
volumeMounts:
- name: nginx-php
subPath: nginx.conf
mountPath: /usr/local/nginx/conf/nginx.conf
ports:
- protocol: TCP
containerPort: 80
- name: php-backend # 新添加
image: 192.168.1.100:5000/myos:php-fpm # 新添加
restartPolicy: Always
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: webcache
spec:
selector:
matchLabels:
myapp: cache
replicas: 1
template:
metadata:
labels:
myapp: cache
spec:
volumes: # 新添加
- name: empty-data # 新添加
emptyDir: {} # 新添加
containers:
- name: apache
image: 192.168.1.100:5000/myos:httpd
stdin: false
tty: false
volumeMounts: # 新添加
- name: empty-data # 新添加
mountPath: /var/cache # 新添加
ports:
- protocol: TCP
containerPort: 80
restartPolicy: Always
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: webcache
spec:
selector:
matchLabels:
myapp: cache
replicas: 1
template:
metadata:
labels:
myapp: cache
spec:
volumes:
- name: empty-data
emptyDir: {}
- name: log-data # 新添加
hostPath: # 新添加
path: /var/weblog # 新添加
type: DirectoryOrCreate # 新添加
containers:
- name: apache
image: 192.168.1.100:5000/myos:httpd
stdin: false
tty: false
volumeMounts:
- name: empty-data
mountPath: /var/cache
- name: log-data # 新添加
mountPath: /var/log/httpd # 新添加
ports:
- protocol: TCP
containerPort: 80
restartPolicy: Always
创建 pv
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: pv-nfs
spec:
volumeMode: Filesystem
capacity:
storage: 30Gi
accessModes:
- ReadWriteOnce
- ReadOnlyMany
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
nfs:
server: 192.168.1.100
path: /var/webroot
创建 pvc
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: pvc-nfs
spec:
volumeMode: Filesystem
accessModes:
- ReadWriteMany
resources:
requests:
storage: 25Gi
应用持久卷
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: webnginx
spec:
selector:
matchLabels:
myapp: nginx
replicas: 1
template:
metadata:
labels:
myapp: nginx
spec:
volumes:
- name: nginx-php
configMap:
name: nginx-conf
- name: website # 新添加
persistentVolumeClaim: # 新添加
claimName: pvc-nfs # 新添加
containers:
- name: nginx
image: 192.168.1.100:5000/myos:nginx
volumeMounts:
- name: nginx-php
subPath: nginx.conf
mountPath: /usr/local/nginx/conf/nginx.conf
- name: website # 新添加
mountPath: /usr/local/nginx/html # 新添加
ports:
- protocol: TCP
containerPort: 80
- name: php-backend
image: 192.168.1.100:5000/myos:php-fpm
volumeMounts: # 新添加
- name: website # 新添加
mountPath: /usr/local/nginx/html # 新添加
restartPolicy: Always