90.k8s安装实践

发表于 linux 分类,标签:
1.关闭防火墙
systemctl stop firewalld
systemctl disable firewalld

2.关闭selinux
#永久关闭
sed -i 's/enforcing/disabled/' /etc/selinux/config
#临时关闭
setenforce 0

3.关闭swap
#临时关闭
swapoff -a
#永久关闭
vim /etc/fstab

4.主机名
hostnamectl set-hostname <hostname>

5.在master添加hosts
cat >> /etc/hosts << EOF
192.168.2.60 master1
192.168.2.61 node1
192.168.2.62 node2
EOF

6.将桥接的IPv4流量传递到iptables的链
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
#生效
sysctl --system

7.时间同步
yum install ntpdate -y
ntpdate time.windows.com

8.所有节点安装Docker/kubeadm/kubelet
yum -y install yum-utils
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum  install docker-ce-20.10.24-3.el7 docker-ce-cli-20.10.24-3.el7 docker-ce-rootless-extras-20.10.24-3.el7 docker-compose-plugin-20.10.24-3.el7 docker-ce-cli-20.10.24-3.el7
systemctl enable docker && systemctl start docker
docker --version

wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
yum -y install docker-ce-18.06.1.ce-3.el7

9.设置仓库地址
cat > /etc/docker/daemon.json << EOF
{
"registry-mirrors": ["https://b9pmyelo.mirror.aliyuncs.com"]
}
EOF

10.添加yum源
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

11.安装kubeadm,kubelet和kubectl
yum install -y kubelet kubeadm kubectl
systemctl enable kubelet
如提示版本不对,请更换版本
yum install -y kubeadm-1.23.6 kubelet-1.23.6 kubectl-1.23.6
yum install -y kubeadm-1.17.0-0 kubelet-1.17.0-0 kubectl-1.17.0-0

12.部署Kubernetes Master
①在192.168.2.60(Master)执行
kubeadm init \
--apiserver-advertise-address=192.168.2.60 \
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version v1.23.6 \
--service-cidr=10.96.0.0/12 \
--pod-network-cidr=10.244.0.0/16

ps:此处需要更改docker驱动,否则如法正常启动kubelet
#查看docker驱动
docker info | grep Cgroup
 Cgroup Driver: cgroupfs
 Cgroup Version: 1
WARNING: No swap limit support

#查看kubelet驱动
cat /var/lib/kubelet/config.yaml | grep cgroup
cgroupDriver: systemd

#修改docker驱动,查看/etc/docker/daemon.json文件,没有的话,手动创建,添加以下内容
vim /etc/docker/daemon.json
#在该文件中加入第二行
{
  "registry-mirrors": ["https://dpxn2pal.mirror.aliyuncs.com"],
  "exec-opts": [ "native.cgroupdriver=systemd" ]
}

#重启docker 
systemctl daemon-reload
systemctl restart docker

#重启kubelet
systemctl restart kubelet
#此处重置
kubeadm reset

#下面这两行用来验证cgroupdriver修改生效,都得出systemd
docker info -f {{.CgroupDriver}}
systemd
docker info | grep -i cgroup
 Cgroup Driver: systemd
 Cgroup Version: 1
WARNING: No swap limit support
由于默认拉取镜像地址k8s.gcr.io国内无法访问,这里指定阿里云镜像仓库地址
②使用 kubectl 工具
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
kubectl get nodes
③安装 Pod 网络插件(CNI)
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
---
kind: Namespace
apiVersion: v1
metadata:
  name: kube-flannel
  labels:
    k8s-app: flannel
    pod-security.kubernetes.io/enforce: privileged
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: flannel
  name: flannel
rules:
- apiGroups:
  - ""
  resources:
  - pods
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes/status
  verbs:
  - patch
- apiGroups:
  - networking.k8s.io
  resources:
  - clustercidrs
  verbs:
  - list
  - watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: flannel
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-flannel
---
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: flannel
  name: flannel
  namespace: kube-flannel
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-flannel
  labels:
    tier: node
    k8s-app: flannel
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds
  namespace: kube-flannel
  labels:
    tier: node
    app: flannel
    k8s-app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: kubernetes.io/os
                operator: In
                values:
                - linux
      hostNetwork: true
      priorityClassName: system-node-critical
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni-plugin
        image: docker.io/flannel/flannel-cni-plugin:v1.4.0-flannel1
        command:
        - cp
        args:
        - -f
        - /flannel
        - /opt/cni/bin/flannel
        volumeMounts:
        - name: cni-plugin
          mountPath: /opt/cni/bin
      - name: install-cni
        image: docker.io/flannel/flannel:v0.24.3
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: docker.io/flannel/flannel:v0.24.3
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
            add: ["NET_ADMIN", "NET_RAW"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        - name: EVENT_QUEUE_DEPTH
          value: "5000"
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
        - name: xtables-lock
          mountPath: /run/xtables.lock
      volumes:
      - name: run
        hostPath:
          path: /run/flannel
      - name: cni-plugin
        hostPath:
          path: /opt/cni/bin
      - name: cni
        hostPath:
          path: /etc/cni/net.d
      - name: flannel-cfg
        configMap:
          name: kube-flannel-cfg
      - name: xtables-lock
        hostPath:
          path: /run/xtables.lock
          type: FileOrCreate
也可以vim编辑上面的内容,直接apply

13.加入Kubernetes Node
在192.168.2.61/62(Node)执行
使用该命令在master节点查询node节点加入命令
kubeadm token create --print-join-command
向集群添加新节点,执行在 kubeadm init 输出的 kubeadm join 命令:
kubeadm join 192.168.31.61:6443 --token esce21.q6hetwm8si29qxwn \
--discovery-token-ca-cert-hash
sha256:00603a05805807501d7181c3d60b478788408cfe6cedefedb1f97569708be9c5

14.测试kubernetes集群
在Kubernetes集群中创建一个pod,验证是否正常运行
kubectl create deployment nginx --image=nginx
kubectl expose deployment nginx --port=80 --type=NodePort
kubectl get pod,svc
访问地址:http://NodeIP:Port

15.常用命令
查看节点负载
kubectl describe node k8sn1

删除节点
kubectl drain <node-name> --ignore-daemonsets
kubectl delete node <node-name>
第一条命令 kubectl drain 将节点标记为不可调度,并排空其上的 Pod。--ignore-daemonsets 标志将忽略 DaemonSet 类型的 Pod,以确保集群中关键的系统 Pod 仍然运行。
第二条命令 kubectl delete node 将删除节点。

获取集群中所有服务的列表和其所在的节点
kubectl get pods --all-namespaces -o wide

1.master和nod关闭防火墙

systemctl stop firewalld

systemctl disable firewalld 2.关闭selinux sed -i 's/enforcing/disabled/' /etc/selinux/config # 永久 setenforce 0 # 临时 3.关闭swap swapoff -a # 临时 vim /etc/fstab # 永久 4.主机名 hostnamectl set-hostname <hostname> 5.在master添加hosts $ cat >> /etc/hosts << EOF 192.168.31.61 k8s-master 192.168.31.62 k8s-node1 192.168.31.63 k8s-node2 EOF 实际部署 cat >> /etc/hosts << EOF 192.168.124.61 k8sm1 192.168.124.65 k8sn1 192.168.124.66 k8sn2 EOF 6.将桥接的 IPv4 流量传递到 iptables 的链,直接执行就完了 cat > /etc/sysctl.d/k8s.conf << EOF net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 EOF sysctl --system # 生效 7.时间同步 yum install ntpdate -y ntpdate time.windows.com 8.所有节点安装 Docker/kubeadm/kubelet wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo yum -y install docker-ce-18.06.1.ce-3.el7 systemctl enable docker && systemctl start docker docker --version 9.设置仓库地址 # cat > /etc/docker/daemon.json << EOF { "registry-mirrors": ["https://b9pmyelo.mirror.aliyuncs.com"] } EOF 10.添加 yum 源 cat > /etc/yum.repos.d/kubernetes.repo << EOF [kubernetes] name=Kubernetes baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64 enabled=1 gpgcheck=0 repo_gpgcheck=0 gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg EOF 11.安装 kubeadm,kubelet 和 kubectl yum install -y kubelet kubeadm kubectl systemctl enable kubelet 如提示版本不对,请更换版本 yum install -y kubeadm-1.17.0-0 kubelet-1.17.0-0 kubectl-1.17.0-0 12.部署 Kubernetes Master ①在 192.168.31.61(Master)执行 kubeadm init \ --apiserver-advertise-address=172.22.0.61 \ --image-repository registry.aliyuncs.com/google_containers \ --kubernetes-version v1.17.0 \ --service-cidr=10.96.0.0/12 \ --pod-network-cidr=10.244.0.0/16 由于默认拉取镜像地址 k8s.gcr.io 国内无法访问,这里指定阿里云镜像仓库地址 ②使用 kubectl 工具 mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config kubectl get nodes ③安装 Pod 网络插件(CNI) kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml 确保能够访问到 quay.io 这个 registery。如果 Pod 镜像下载失败,可以改这个镜像地址 13.加入 Kubernetes Node 在 172.22.0.65/66(Node)执行 使用该命令在master节点查询node节点加入命令 kubeadm token create --print-join-command 向集群添加新节点,执行在 kubeadm init 输出的 kubeadm join 命令: kubeadm join 192.168.31.61:6443 --token esce21.q6hetwm8si29qxwn \ --discovery-token-ca-cert-hash sha256:00603a05805807501d7181c3d60b478788408cfe6cedefedb1f97569708be9c5 14.测试 kubernetes 集群 在 Kubernetes 集群中创建一个 pod,验证是否正常运行: $ kubectl create deployment nginx --image=nginx $ kubectl expose deployment nginx --port=80 --type=NodePort $ kubectl get pod,svc 访问地址:http://NodeIP:Port 15.常用命令 查看节点负载 kubectl describe node k8sn1 删除节点 kubectl drain <node-name> --ignore-daemonsets kubectl delete node <node-name> 第一条命令 kubectl drain 将节点标记为不可调度,并排空其上的 Pod。--ignore-daemonsets 标志将忽略 DaemonSet 类型的 Pod,以确保集群中关键的系统 Pod 仍然运行。 第二条命令 kubectl delete node 将删除节点。 获取集群中所有服务的列表和其所在的节点 kubectl get pods --all-namespaces -o wide

0 篇评论

发表我的评论