CentOS7 搭建 K8S 環境

前期準備

環境規劃

K8S 與Docker兼容問題

k8s v1.18.0 => Docker v18.x

k8s v1.19.0 => Docker v19.x

軟體 版本
Linux作業系統 CentOS 7.9.2009 (Core) x64
Kubernetes 1.8.0
Docker 18.06.3-ce
角色 IP 組件 推薦配置(最低)
master 192.168.137.101 kubelet
kubeadm
kubectl
docker
CUP 2 核 +
記憶體 2G +
node1 192.168.137.102 kubelet
kubeadm
kubectl
docker
CUP 2 核 +
記憶體 2G +
node2 192.168.137.103 kubelet
kubeadm
kubectl
docker
CUP 2 核 +
記憶體 2G +

修改HostName

# 修改hostname 
# vi /etc/hostname

# 192.168.137.101
hostnamectl set-hostname master
# 192.168.137.102
hostnamectl set-hostname node1
# 192.168.137.103
hostnamectl set-hostname node2

配置主機和IP映射

# 將本機IP指向hostname
vi /etc/hosts

192.168.137.101 master
192.168.137.102 node1
192.168.137.103 node2

reboot -h # 重啟(可以做完全部前期準備後再重啟)

放行需求埠(線上環境)

# Master節點埠放行

# Kubernetes API Server 6443
firewall-cmd --zone=public --add-port=6443/tcp --permanent
# etcd server client api 2379~2380
firewall-cmd --zone=public --add-port=2379-2380/tcp --permanent
# kubelet 10250, kube-scheduler 10251, kube-controller-manager 10252
firewall-cmd --zone=public --add-port=10250-10252/tcp --permanent

# Node節點埠放行

# kubelet API 10250
firewall-cmd --zone=public --add-port=10250/tcp --permanent
# NodePort Services 30000~32767
firewall-cmd --zone=public --add-port=30000-32767/tcp --permanent

firewall-cmd --reload
firewall-cmd --list-ports

直接關閉防火牆(不推薦)

systemctl disable firewalld
systemctl stop firewalld

安裝Docker

# 安裝 wget
yum install -y wget

# 下載 docker 鏡像源
wget //mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo

# docker安裝版本查看
yum list docker-ce --showduplicates | sort -r

# 安裝 docker
yum -y install docker-ce
# 指定版本
yum -y install docker-ce-18.06.3.ce-3.el7

# 設置開機自啟動
systemctl enable docker && systemctl start docker

# 版本檢查
docker --version
Docker version 18.06.3-ce, build d7080c1

修改配置文件

vi /etc/docker/daemon.json

{
	"registry-mirrors": [
		"//1nj0zren.mirror.aliyuncs.com",
		"//docker.mirrors.ustc.edu.cn",
		"//f1361db2.m.daocloud.io",
		"//registry.docker-cn.com"
	],
	"exec-opts": [
		"native.cgroupdriver=systemd"
	],
	"log-driver": "json-file",
	"log-opts": {
		"max-size": "100m"
	},
	"storage-driver": "overlay2"
}

#重新載入配置文件 
systemctl daemon-reload
#重啟Docker 
systemctl restart docker

安裝Kubernetes工具

添加源

由於中國網路原因, 官方文檔中的地址不可用, 本文替換為阿里雲鏡像地址, 執行以下程式碼即可:

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=//mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=//mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg //mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
exclude=kube*
EOF

# 注意:gpgkey 後面的兩個網址中間是空格,不是換行,複製後出現換行會導致安裝出錯

安裝

yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes

# 指定版本
yum install -y kubelet-1.18.0 kubeadm-1.18.0 kubectl-1.18.0 --disableexcludes=kubernetes

# 如下出現錯誤 [Errno -1] repomd.xml signature could not be verified for kubernetes 則是 repo 的 gpg 驗證不通過導致的,可以修改 /etc/yum.repos.d/kubernetes.repo 中的 repo_gpgcheck=0 跳過驗證。

systemctl enable kubelet && systemctl start kubelet

修改網路配置

cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF

sysctl --system

注意: 以上的全部操作, 在 Node 機器上也需要執行. 注意hostname等不能相同.

初始化Master

生成初始化文件

1. 配置文件方式

kubeadm config print init-defaults > kubeadm-init.yaml

vi kubeadm-init.yaml
#################################################################
localAPIEndpoint:
  #advertiseAddress: 1.2.3.4 
  advertiseAddress: 192.168.137.101  # 本機IP

nodeRegistration:
  #name: localhost.localdomain
  name: master

#imageRepository: k8s.gcr.io
imageRepository: registry.aliyuncs.com/google_containers # 鏡像倉庫

networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
  podSubnet: 10.244.0.0/16 # 新增Pod子網路
#################################################################
:wq

修改完畢後文件如下:

apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  #advertiseAddress: 1.2.3.4
  advertiseAddress: 192.168.137.101
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  #name: localhost.localdomain
  name: master
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
#imageRepository: k8s.gcr.io
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.18.0
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
  podSubnet: 10.244.0.0/16
scheduler: {}

2.直接傳參方式(推薦,老司機常用方式)

kubeadm init \
--apiserver-advertise-address=192.168.137.101 \
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version v1.18.0 \
--service-cidr=10.1.0.0/16 \
--pod-network-cidr=10.244.0.0/16

下載鏡像

kubeadm config images pull --config kubeadm-init.yaml

配置禁用Swap

# 注意不要重複執行
sed -i 's/KUBELET_EXTRA_ARGS=/KUBELET_EXTRA_ARGS="--fail-swap-on=false"/' /etc/sysconfig/kubelet

# 臨時關閉
swapoff -a

執行初始化

kubeadm init --config kubeadm-init.yaml

# 出現埠被佔用情況
kubeadm reset
kubeadm init --config kubeadm-init.yaml --ignore-preflight-errors=Swap

# reset後初始化提示文件已存在
rm -rf /etc/kubernetes/manifests
rm -rf /var/lib/etcd

驗證是否成功

# 出現下面文字表示初始化成功:

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.137.101:6443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:d126a8ec9cb47ac4bfae5a2d7501172da937d91b1ccf0eae093a9a3687c841f2 

配置環境, 讓當前用戶可以執行kubectl命令

# 配置kubectl執行命令環境
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config

# 執行kubectl命令查看機器節點
kubectl get node
-----------------------------------------
NAME STATUS ROLES AGE VERSION
master NotReady master 48m v1.18.8

配置網路

使用以下命令安裝 Calico

wget //docs.projectcalico.org/manifests/calico.yaml

# 獲取網路資訊
firewall-cmd --get-active-zones
public
  interfaces: eth0

vi calico.yaml # 大概從 3639 行開始,有些改動沒有則追加
#####################################################################
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
  value: "k8s,bgp"
# Auto-detect the BGP IP address.
- name: IP
  value: "autodetect"
# IP automatic detection. 
- name: IP_AUTODETECTION_METHOD
  value: "interface=eth.*"
# Enable IPIP
- name: CALICO_IPV4POOL_IPIP
  #value: "Always"
  value: "Never"
#####################################################################
# 構建calico網路
kubectl apply -f calico.yaml
# 檢查結果
kubectl get po -n kube-system -o wide | grep calico

檢查 master 的狀態是否已經成為 Ready

kubectl get node

NAME     STATUS     ROLES    AGE     VERSION
master   Ready   master   5m20s   v1.18.0

安裝Dashboard

安裝文檔: Web UI (Dashboard)

部署文檔:Web UI (Dashboard)

解決GitHub的raw.githubusercontent.com無法連接問題

1、進入網址 //site.ip138.com/raw.Githubusercontent.com/

2、輸入 raw.githubusercontent.com,查詢對應的IP地址:151.101.108.133

3、編輯/etc/hosts文件配置映射:151.101.108.133 raw.githubusercontent.com

# 下載配置文件
wget //raw.githubusercontent.com/kubernetes/dashboard/v2.0.0/aio/deploy/recommended.yaml

# 創建 pod
kubectl apply -f recommended.yaml

# 查看 pods 狀態
kubectl get pods --all-namespaces | grep dashboard

# 使用 nodeport方式 將 dashboard服務 暴露在集群外,指定使用 30443 埠
kubectl patch svc kubernetes-dashboard -n kubernetes-dashboard \
-p '{"spec":{"type":"NodePort","ports":[{"port":443,"targetPort":8443,"nodePort":30443}]}}'

# 查看暴露的service,已修改為nodeport類型
kubectl -n kubernetes-dashboard get svc

# 此時我們可以訪問登錄面板: //192.168.137.101:30443,但是暫時還無法登錄

修改 Service

# 刪除現有的dashboard服務
kubectl delete -f recommended.yaml
# 重命名 recommended.yaml
mv recommended.yaml dashboard-svc.yaml
# 修改配置項
vi dashboard-svc.yaml
#####################################################################
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  type: NodePort # 服務類型改為 NodePort
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 30443 # 暴露埠 30443 
  selector:
    k8s-app: kubernetes-dashboard
#####################################################################
:wq

# 重新創建 pod
kubectl apply -f dashboard-svc.yaml

創建用戶

文檔地址: Creating sample user

vi dashboard-svc-account.yaml
#####################################################################
apiVersion: v1
kind: ServiceAccount
metadata:
  name: dashboard-admin
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: dashboard-admin
roleRef:
  kind: ClusterRole
  name: cluster-admin
  apiGroup: rbac.authorization.k8s.io
subjects:
  - kind: ServiceAccount
    name: dashboard-admin
    namespace: kube-system
#####################################################################
:wq

# 執行
kubectl apply -f dashboard-svc-account.yaml

生成證書

官方文檔中提供了登錄 1.7.X 以上版本的登錄方式,而且步驟很不清晰,我們自己按下面步驟操作即可:

grep 'client-certificate-data' ~/.kube/config | head -n 1 | awk '{print $2}' | base64 -d >> kubecfg.crt

grep 'client-key-data' ~/.kube/config | head -n 1 | awk '{print $2}' | base64 -d >> kubecfg.key

# 生成證書時會提示輸入密碼, 可以直接兩次回車跳過.
openssl pkcs12 -export -clcerts -inkey kubecfg.key -in kubecfg.crt -out kubecfg.p12 -name "kubernetes-client"

# kubecfg.p12 即需要導入客戶端機器的證書. 將證書拷貝到客戶端機器上: 若生成證書時跳過了密碼, 導入時提示填寫密碼直接回車即可
scp [email protected]:/root/.kube/kubecfg.p12 ./

# 此時我們可以訪問登錄面板: //192.168.137.101:30443 ,登錄時會提示選擇證書, 確認後會提示輸入當前用戶名密碼(注意是電腦的用戶名密碼).

登錄Dashboard(Token登錄)

文檔地址: Bearer Token

# 獲取 Token:
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep dashboard-admin | awk '{print $1}')

# 複製該Token到登錄頁, 點擊登錄即可

添加 Node 節點

# 關閉交換空間
swapoff -a

# 如果前面執行 kubeadm init 命令後沒有保留 kubeadm join 語句,需要執行如下命令重新生成:
kubeadm token create --print-join-command
kubeadm join 192.168.137.101:6443 --token ngqaor.ayhyq00qb3o0gxjk     --discovery-token-ca-cert-hash sha256:4c18ecc6e9bd4457308b028123cbd16b2d3cbdefb14ec1e61b43a15e05ab63b3

# 執行如下命令將 Node 加入集群:
kubeadm join 192.168.137.101:6443 --token ngqaor.ayhyq00qb3o0gxjk \
    --discovery-token-ca-cert-hash sha256:4c18ecc6e9bd4457308b028123cbd16b2d3cbdefb14ec1e61b43a15e05ab63b3 


添加完畢後, 在 master 上查看節點狀態:

# 查看所有節點狀態
kubectl get nodes               
NAME     STATUS   ROLES    AGE     VERSION
master   Ready    master   6h38m   v1.18.0
node1    Ready    <none>   32m     v1.18.0
node2    Ready    <none>   32m     v1.18.0

# 查看所有 pod 狀態
kubectl get po --all-namespaces
NAMESPACE              NAME                                         READY   STATUS            RESTARTS   AGE
kube-system            calico-kube-controllers-65d7476764-zgfp2     1/1     Running           0          5h44m
kube-system            calico-node-dk6v2                            0/1     Running           0          5h44m
kube-system            calico-node-rgt4x                            0/1     PodInitializing   0          9m19s
kube-system            calico-node-tzvn2                            0/1     Running           0          9m29s
kube-system            coredns-7ff77c879f-5hgb6                     1/1     Running           0          6h15m
kube-system            coredns-7ff77c879f-l7wpq                     1/1     Running           0          6h15m
kube-system            etcd-master                                  1/1     Running           0          6h15m
kube-system            kube-apiserver-master                        1/1     Running           0          6h15m
kube-system            kube-controller-manager-master               1/1     Running           0          6h15m
kube-system            kube-proxy-6jf4p                             1/1     Running           0          6h15m
kube-system            kube-proxy-nrsr2                             1/1     Running           0          9m19s
kube-system            kube-proxy-sfh7l                             1/1     Running           0          9m29s
kube-system            kube-scheduler-master                        1/1     Running           0          6h15m
kubernetes-dashboard   dashboard-metrics-scraper-6b4884c9d5-kh88n   1/1     Running           0          124m
kubernetes-dashboard   kubernetes-dashboard-7b544877d5-csfkz        1/1     Running           0          124m