Ubuntu 18.04 下部署k8s
Ubuntu 18.04 下部署k8s
一、更新Ubuntu源
mv /etc/apt/sources.list /etc/apt/sources.list.bakcat /etc/apt/sources.list.bak |grep -v "#" |grep -v "^#34; >sources.listsed -i s/archive.ubuntu.com/mirrors.ustc.edu.cn/g /etc/apt/sources.listsed -i s/security.ubuntu.com/mirrors.ustc.edu.cn/g /etc/apt/sources.listapt -y update && apt -y upgrade# 2
、timedatectlsed -i s/en_US/C/g /etc/default/localetimedatectl set-timezone Asia/Shanghai# 3 、bash-completionsed -i 97,99s/#//g /root/.bashrc# 4、sshecho "PermitRootLogin yes" >>/etc/ssh/sshd_configpasswd root << "EOF"passwordpasswordEOFsystemctl reload ssh# 5
、hostsvim /etc/hosts10.0.0.20 k8s-master0010.0.0.21 k8s-master0110.0.0.22 k8s-master0210.0.0.23 k8s-node0110.0.0.24 k8s-node0210.0.0.25 k8s-bl-master# 6 、ssh-keygenssh-keygen -t rsafor i in `cat /root/*.txt`;do echo $i;ssh-copy-id -i .ssh/id_rsa.pub $i;done# 7、swapswapoff -ased -i '/swap/s/^(.*)$/#1/g' /etc/fstab# 8
、networknet=`cat /etc/netplan/00-installer-config.yaml |awk 'NR==4{ print $1}'`sed -i "s/${ net}/eth0:/g" /etc/netplan/00-installer-config.yamlsed -i '11s/""/"net.ifnames=0 biosdevname=0"/g' /etc/default/grubupdate-grubreboot二 、安裝ipvs
apt -y install ipvsadm ipset sysstat conntrack libseccomp2 libseccomp-devcat >/etc/modules-load.d/ipvs.conf << EOFip_vsip_vs_lcip_vs_wlcip_vs_rrip_vs_wrrip_vs_lblcip_vs_lblcrip_vs_dhip_vs_ship_vs_foip_vs_nqip_vs_sedip_vs_ftpnf_conntrackip_tablesip_setxt_setipt_setipt_rpfilteript_REJECTipipEOFsystemctl restart systemd-modules-load.servicelsmod |grep -e ip_vs -e nf_conntrack_ipv4三、下載安裝containerd
wget https://github.com/containerd/containerd/releases/download/v1.6.1/cri-containerd-cni-1.6.1-linux-amd64.tar.gztar --no-overwrite-dir -C / -xzf cri-containerd-cni-1.6.1-linux-amd64.tar.gzsystemctl daemon-reloadsystemctl enable --now containerd修改 config.tomlcontainerd config default >/etc/containerd/config.toml---sed -i "s#k8s.gcr.io#registry.aliyuncs.com/google_containers#g" /etc/containerd/config.tomlsed -i "s#SystemdCgroup = false#SystemdCgroup = true#g" /etc/containerd/config.tomlsed -i '153a [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]' /etc/containerd/config.toml # 8個空格 # endpoint 10個空格sed -i '154a endpoint = ["https://registry.aliyuncs.com"]' /etc/containerd/config.toml修改crictl.yamlmv /etc/crictl.yaml /etc/crictl.yaml.bakcat >/etc/crictl.yaml << "EOF"runtime-endpoint: unix:///run/containerd/containerd.sockimage-endpoint: unix:///run/containerd/containerd.socktimeout: 0debug: falsepull-image-on-create: falsedisable-pull-on-run: falseEOF四、安裝nginx 做四層代理
apt -y install nginxcp /etc/nginx/nginx.conf /etc/nginx/nginx.conf.bakvim /etc/nginx/nginx.conf---......stream { log_format main '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent'; access_log /var/log/nginx/k8s-access.log main; upstream k8s-apiserver { server 10.0.0.20:6443; server 10.0.0.21:6443; server 10.0.0.22:6443; } server { listen 6444; proxy_pass k8s-apiserver; }}http { log_format main '$remote_addr - $remote_user [$time_local] "$request" ' '$status $body_bytes_sent "$http_referer" ' '"$http_user_agent" "$http_x_forwarded_for"'; ... ...}---systemctl enable --now nginx.servicesystemctl status nginx.service五、安裝keepalive 做高可用
apt -y install keepalived#keepalived configcat >/etc/keepalived/keepalived.conf << "EOF"global_defs { notification_email { acassen@firewall.loc failover@firewall.loc sysadmin@firewall.loc } notification_email_from Alexandre.Cassen@firewall.loc smtp_server 127.0.0.1 smtp_connect_timeout 30 router_id NGINX_MASTER}vrrp_script check_nginx { script "/etc/keepalived/check_nginx.sh" interval 5 weight -1 fall 2 rise 1}vrrp_instance VI_1 { state MASTER interface eth0 # 修改為實際網(wǎng)卡名 virtual_router_id 51 # VRRP 路由 ID 實例,每個實例是唯一的 priority 100 # 優(yōu)先級
,備服務(wù)器設(shè)置 90 advert_int 1 # 指定 VRRP 心跳包通告間隔時間,默認 1 秒 authentication { auth_type PASS auth_pass K8SHA_KA_AUTH } # 虛擬 IP virtual_ipaddress { 10.0.0.25/24 } track_script { check_nginx }}EOF#health configcat >/etc/keepalived/check_nginx.sh << "EOF"#!/bin/bash count=$(ps -ef |grep nginx | grep sbin | egrep -cv "grep|$") if [ "$count" -eq 0 ];then systemctl stop keepalived fiEOF---systemctl enable --now keepalived.servicesystemctl status keepalived.service六、master端部署cfssl、etcd 、ca certificate、etcd certificate
6.1 、下載cfssl
wget https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssl_1.6.1_linux_amd64 -O /usr/local/bin/cfsslwget https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssljson_1.6.1_linux_amd64 -O /usr/local/bin/cfssljsonwget https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssl-certinfo_1.6.1_linux_amd64 -O /usr/local/bin/cfssl-certinfochmod +x /usr/local/bin/cfssl*chown -Rf root:root /usr/local/bin/cfssl*6.2 、etcd目錄規(guī)劃
# all Master# 1、etcd-sslmkdir -p /etc/etcd/ssl/# 2
、etcd-WorkingDirectorymkdir -p /var/lib/etcd/default.etcd# 3
、kubernetes-sslmkdir -p /etc/kubernetes/ssl# 4 、kubernetes-logmkdir -p /var/log/kubernetes6.3 、ca 證書生成
mkdir -p ~/workcd ~/work/---cat >ca-csr.json << "EOF"{ "CN": "kubernetes", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Shanghai", "L": "Shanghai", "O": "k8s", "OU": "system" } ]}EOF---cat >ca-config.json << "EOF"{ "signing": { "default": { "expiry": "87600h" }, "profiles": { "kubernetes": { "usages": [ "signing", "key encipherment", "server auth", "client auth" ], "expiry": "87600h" } } }}EOF---cfssl gencert -initca ca-csr.json | cfssljson -bare cacp ca*.pem /etc/etcd/ssl/---# send to other masterfor i in `cat ~/MasterNodes.txt`;do echo $i;scp /etc/etcd/ssl/ca*.pem $i:/etc/etcd/ssl;done6.4 配置etcd證書
cat >etcd-csr.json << "EOF"{ "CN": "etcd", "hosts": [ "127.0.0.1", "10.0.0.20", "10.0.0.21", "10.0.0.22", "10.0.0.25" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Shanghai", "L": "Shanghai", "O": "k8s", "OU": "system" } ]}EOF---cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes etcd-csr.json | cfssljson -bare etcdcp etcd*.pem /etc/etcd/ssl/---# send to otherfor i in `cat ~/MasterNodes.txt`;do echo $i;scp /etc/etcd/ssl/etcd*.pem $i:/etc/etcd/ssl;done6.5 、下載及配置etcd
# download etcdwget https://github.com/etcd-io/etcd/releases/download/v3.5.0/etcd-v3.5.0-linux-amd64.tar.gz# tar etcd-*.tar.gztar -xf etcd-v3.5.0-linux-amd64.tar.gz --strip-components=1 -C ~/work/ etcd-v3.5.0-linux-amd64/etcd{ ,ctl}chown -Rf root:root etcd*cp -arp etcd* /usr/local/bin/# send to otherfor i in `cat ~/MasterNodes.txt`;do echo $i;scp /usr/local/bin/etcd{ ,ctl} $i:/usr/local/bin/;donecat >/etc/etcd/etcd.conf << "EOF"ETCD_NAME='etcd1'ETCD_DATA_DIR="/var/lib/etcd/default.etcd"ETCD_LISTEN_PEER_URLS="https://10.0.0.20:2380" # change ipETCD_LISTEN_CLIENT_URLS="https://10.0.0.20:2379,http://127.0.0.1:2379" # change ipETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.0.0.20:2380" # change ipETCD_ADVERTISE_CLIENT_URLS="https://10.0.0.20:2379" # change ipETCD_INITIAL_CLUSTER="etcd1=https://10.0.0.20:2380,etcd2=https://10.0.0.21:2380,etcd3=https://10.0.0.22:2380"ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"ETCD_INITIAL_CLUSTER_STATE="new"EOF6.6 、添加etcd systemd啟動
cat >/usr/lib/systemd/system/etcd.service << "EOF"[Unit]Description=Etcd ServiceAfter=network.targetAfter=network-online.targetWants=network-online.target[Service]Type=notifyEnvironmentFile=-/etc/etcd/etcd.confWorkingDirectory=/var/lib/etcd/ExecStart=/usr/local/bin/etcd --cert-file=/etc/etcd/ssl/etcd.pem --key-file=/etc/etcd/ssl/etcd-key.pem --trusted-ca-file=/etc/etcd/ssl/ca.pem --peer-cert-file=/etc/etcd/ssl/etcd.pem --peer-key-file=/etc/etcd/ssl/etcd-key.pem --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem --peer-client-cert-auth --client-cert-authRestart=on-failureRestartSec=10LimitNOFILE=65536[Install]WantedBy=multi-user.targetEOF---# send to otherfor i in `cat ~/MasterNodes.txt`;do echo $i;scp /usr/lib/systemd/system/etcd.service $i:/usr/lib/systemd/system/;done啟動etcd
# 1、start etcdsystemctl daemon-reloadsystemctl enable --now etcd.servicesystemctl status etcd.service# 2
、check etcdETCDCTL_API=3etcdctl --endpoints=https://10.0.0.20:2379,https://10.0.0.21:2379,https://10.0.0.22:2379 --write-out=table --cacert=/etc/etcd/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem endpoint health+----------------------------+--------+-------------+-------+| ENDPOINT | HEALTH | TOOK | ERROR |+----------------------------+--------+-------------+-------+| https://10.0.0.20:2379 | true | 16.188005ms | || https://10.0.0.21:2379 | true | 16.693314ms | || https://10.0.0.22:2379 | true | 16.089367ms | |+----------------------------+--------+-------------+-------+七 、安裝 k8s-master
# 1、downloadwget https://dl.k8s.io/v1.23.5/kubernetes-server-linux-amd64.tar.gz# 2、tartar -xf kubernetes-server-linux-amd64.tar.gz --strip-components=3 -C ~/work kubernetes/server/bin/kube{ let,ctl,-apiserver,-controller-manager,-scheduler,-proxy}scp kube{ ctl,-apiserver,-controller-manager,-scheduler} /usr/local/bin/# 3、kube{ let,ctl,-apiserver,-controller-manager,-scheduler,-proxy}for i in `cat ~/MasterNodes.txt`;do echo $i;scp ~/work/kube{ ctl,-apiserver,-controller-manager,-scheduler} $i:/usr/local/bin/;done# 4
、kube{ let,-proxy}for i in `cat ~/WorkNodes.txt`;do echo $i;scp ~/work/kube{ let,-proxy} $i:/usr/local/bin/;done# 5
、send pemcp /etc/etcd/ssl/ca*.pem /etc/kubernetes/ssl/for i in `cat ~/WorkNodes.txt`;do echo $i;scp /etc/etcd/ssl/ca*.pem $i:/etc/kubernetes/ssl/;done# 添加kube-apiserver tokencat >/etc/kubernetes/token.csv <7.2 、添加kube-apiserver 證書
cat >kube-apiserver-csr.json << "EOF"{ "CN": "kubernetes", "hosts": [ "127.0.0.1", "10.0.0.20", "10.0.0.21", "10.0.0.22", "10.0.0.23", "10.0.0.24", "10.0.0.25", "10.96.0.1", "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster", "kubernetes.default.svc.cluster.local" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Shanghai", "L": "Shanghai", "O": "k8s", "OU": "system" } ]}EOF---cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-apiserver-csr.json | cfssljson -bare kube-apiservercp kube-apiserver*.pem /etc/kubernetes/ssl/for i in `cat ~/MasterNodes.txt`;do echo $i;scp ~/work/kube-apiserver*.pem $i:/etc/kubernetes/ssl/;done7.3、天kube-apiserver 配置文件
# change --bind-address= and --advertise-address=---cat >/etc/kubernetes/kube-apiserver.conf << "EOF"KUBE_APISERVER_OPTS="--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota --anonymous-auth=false --bind-address=10.0.0.20 --secure-port=6443 --advertise-address=10.0.0.20 --insecure-port=0 --authorization-mode=Node,RBAC --runtime-config=api/all=true --enable-bootstrap-token-auth --service-cluster-ip-range=10.96.0.0/16 --token-auth-file=/etc/kubernetes/token.csv --service-node-port-range=30000-50000 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --client-ca-file=/etc/kubernetes/ssl/ca.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem --service-account-signing-key-file=/etc/kubernetes/ssl/ca-key.pem --service-account-issuer=https://kubernetes.default.svc.cluster.local --etcd-cafile=/etc/etcd/ssl/ca.pem --etcd-certfile=/etc/etcd/ssl/etcd.pem --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem --etcd-servers=https://10.0.0.20:2379,https://10.0.0.21:2379,https://10.0.0.22:2379 --enable-swagger-ui=true --allow-privileged=true --apiserver-count=3 --audit-log-maxage=30 --audit-log-maxbackup=3 --audit-log-maxsize=100 --audit-log-path=/var/log/kube-apiserver-audit.log --event-ttl=1h --alsologtostderr=true --logtostderr=false --log-dir=/var/log/kubernetes --v=4"EOF7.4 、添加kube-apiserver systemd啟動
cat >/usr/lib/systemd/system/kube-apiserver.service << "EOF"[Unit]Description=Kubernetes API ServerDocumentation=https://github.com/kubernetes/kubernetesAfter=etcd.serviceWants=etcd.service[Service]EnvironmentFile=-/etc/kubernetes/kube-apiserver.confExecStart=/usr/local/bin/kube-apiserver $KUBE_APISERVER_OPTSRestart=on-failureRestartSec=5Type=notifyLimitNOFILE=65536[Install]WantedBy=multi-user.targetEOF---for i in `cat ~/MasterNodes.txt`;do echo $i;scp /usr/lib/systemd/system/kube-apiserver.service $i:/usr/lib/systemd/system/;done啟動kube-apiserver
systemctl daemon-reloadsystemctl enable --now kube-apiserver.servicesystemctl status kube-apiserver.service---# checkcurl --insecure https://10.0.0.20:6443---{ "kind": "Status", "apiVersion": "v1", "metadata": { }, "status": "Failure", "message": "Unauthorized", "reason": "Unauthorized", "code": 4017.5 、kubectl 安裝
# 添加 admin certificatecat >admin-csr.json << "EOF"{ "CN": "admin", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Shanghai", "L": "Shanghai", "O": "system:masters", "OU": "system" } ]}EOF---cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admincp admin*.pem /etc/kubernetes/ssl/---for i in `cat ~/MasterNodes.txt`;do echo $i;scp /etc/kubernetes/ssl/admin*.pem $i:/etc/kubernetes/ssl/;done # 添加 admin.config# 1、設(shè)置集群參數(shù)kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://10.0.0.25:6444 --kubeconfig=admin.config# 2、設(shè)置客戶端認證參數(shù)kubectl config set-credentials kubernetes-admin --client-certificate=admin.pem --client-key=admin-key.pem --embed-certs=true --kubeconfig=admin.config# 3、設(shè)置上下文參數(shù)kubectl config set-context kubernetes --cluster=kubernetes --user=kubernetes-admin --kubeconfig=admin.config# 4
、設(shè)置當前上下文kubectl config use-context kubernetes --kubeconfig=admin.config# kubernetes-kubelet apikubectl create clusterrolebinding kube-apiserver:kubelet-apiserver --clusterrole=system.kubelet-api-admin --user kuberneteskubectl create clusterrolebinding kubernetes --clusterrole=cluster-admin --user=kubernetes#其它節(jié)點cp ~/work/admin.config /etc/kubernetesmkdir -p $HOME/.kubecp -i /etc/kubernetes/admin.config $HOME/.kube/configchown $(id -u):$(id -g) $HOME/.kube/configfor i in `cat ~/MasterNodes.txt`;do echo $i;scp /etc/kubernetes/admin.config $i:/etc/kubernetes/;doneecho "export KUBECONFIG=/etc/kubernetes/admin.config" >>/etc/profilesource /etc/profilekubectl(bash-completion)# kubectl(bash-completion)source <(kubectl completion bash)echo "source <(kubectl completion bash)" >>/etc/profilesource /etc/profilekubectl cluster-info---{ Kubernetes control plane is running at https://10.0.0.20:6443}---kubectl get componentstatuses---NAME STATUS MESSAGE ERRORscheduler Unhealthy Get "https://127.0.0.1:10259/healthz": dial tcp 127.0.0.1:10259: connect: connection refused controller-manager Unhealthy Get "https://127.0.0.1:10257/healthz": dial tcp 127.0.0.1:10257: connect: connection refused etcd-0 Healthy { "health":"true","reason":""} etcd-1 Healthy { "health":"true","reason":""} etcd-2 Healthy { "health":"true","reason":""} ---kubectl get all --all-namespaces---NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGEdefault service/kubernetes ClusterIP 10.96.0.1 443/TCP 56m# verify kubectlkubectl cluster-info---{ Kubernetes control plane is running at https://10.0.0.20:6443}---kubectl get componentstatuses---NAME STATUS MESSAGE ERRORscheduler Unhealthy Get "https://127.0.0.1:10259/healthz": dial tcp 127.0.0.1:10259: connect: connection refused controller-manager Unhealthy Get "https://127.0.0.1:10257/healthz": dial tcp 127.0.0.1:10257: connect: connection refused etcd-0 Healthy { "health":"true","reason":""} etcd-1 Healthy { "health":"true","reason":""} etcd-2 Healthy { "health":"true","reason":""} ---kubectl get all --all-namespaces---NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGEdefault service/kubernetes ClusterIP 10.96.0.1 443/TCP 56m 八 、kube-controller-manager
# kube-controller-manager certificatecat >kube-controller-manager-csr.json << "EOF"{ "CN": "system:kube-controller-manager", "hosts": [ "127.0.0.1", "10.0.0.20", "10.0.0.21", "10.0.0.22", "10.0.0.25" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Shanghai", "L": "Shanghai", "O": "system:kube-controller-manager", "OU": "Kubernetes" } ]}EOF---cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-managercp kube-controller-manager*.pem /etc/kubernetes/ssl/for i in `cat ~/MasterNodes.txt`;do echo $i;scp /etc/kubernetes/ssl/kube-controller-manager*.pem $i:/etc/kubernetes/ssl/;done# kube-controller-manager.kubeconfig# 1
、設(shè)置集群參數(shù)kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://10.0.0.25:6444 --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig# 2、設(shè)置客戶端認證參數(shù)kubectl config set-credentials system:kube-controller-manager --client-certificate=kube-controller-manager.pem --client-key=kube-controller-manager-key.pem --embed-certs=true --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig# 3、設(shè)置上下文參數(shù)kubectl config set-context system:kube-controller-manager --cluster=kubernetes --user=system:kube-controller-manager --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig# 4、設(shè)置當前上下文kubectl config use-context system:kube-controller-manager --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig# kube-controller-manager.confcat >/etc/kubernetes/kube-controller-manager.conf << "EOF"KUBE_CONTROLLER_MANAGER_OPTS="--v=2 --secure-port=10257 --bind-address=127.0.0.1 --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig --service-cluster-ip-range=10.96.0.0/16 --cluster-name=kubernetes --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem --allocate-node-cidrs=true --cluster-cidr=10.244.0.0/16 --experimental-cluster-signing-duration=87600h --root-ca-file=/etc/kubernetes/ssl/ca.pem --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem --leader-elect=true --feature-gates=RotateKubeletServerCertificate=true --controllers=*,bootstrapsigner,tokencleaner --horizontal-pod-autoscaler-sync-period=10s --tls-cert-file=/etc/kubernetes/ssl/kube-controller-manager.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-controller-manager-key.pem --use-service-account-credentials=true"EOF ---for i in `cat ~/MasterNodes.txt`;do echo $i;scp /etc/kubernetes/kube-controller-manager* $i:/etc/kubernetes/;done# kube-controller-manager.service systemd 啟動cat >/usr/lib/systemd/system/kube-controller-manager.service << "EOF"[Unit]Description=Kubernetes Controller ManagerDocumentation=https://github.com/kubernetes/kubernetes[Service]EnvironmentFile=-/etc/kubernetes/kube-controller-manager.confExecStart=/usr/local/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTSRestart=on-failureRestartSec=5[Install]WantedBy=multi-user.targetEOF---for i in `cat ~/MasterNodes.txt`;do echo $i;scp /usr/lib/systemd/system/kube-controller-manager.service $i:/usr/lib/systemd/system/;done# start kube-controller-manager.servicesystemctl daemon-reloadsystemctl enable --now kube-controller-manager.servicesystemctl status kube-controller-manager.service九 、調(diào)度器kube-scheduler
# kube-scheduler certificatecat >kube-scheduler-csr.json << "EOF"{ "CN": "system:kube-scheduler", "hosts": [ "127.0.0.1", "10.0.0.20", "10.0.0.21", "10.0.0.22", "10.0.0.25" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Shanghai", "L": "Shanghai", "O": "system:kube-scheduler", "OU": "system" } ]}EOF---cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-schedulercp kube-scheduler*.pem /etc/kubernetes/ssl/for i in `cat ~/MasterNodes.txt`;do echo $i;scp /etc/kubernetes/ssl/kube-scheduler*.pem $i:/etc/kubernetes/ssl/;done# kube-scheduler.kubeconfig 1、設(shè)置集群參數(shù)kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://10.0.0.25:6444 --kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig2 、設(shè)置客戶端認證參數(shù)kubectl config set-credentials system:kube-scheduler --client-certificate=kube-scheduler.pem --client-key=kube-scheduler-key.pem --embed-certs=true --kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig3、設(shè)置上下文參數(shù)kubectl config set-context system:kube-scheduler --cluster=kubernetes --user=system:kube-scheduler --kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig4、設(shè)置當前上下文kubectl config use-context system:kube-scheduler --kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig# kube-scheduler.confcat >/etc/kubernetes/kube-scheduler.conf << "EOF"KUBE_SCHEDULER_OPTS="--address=127.0.0.1 --kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig --leader-elect=true --alsologtostderr=true --logtostderr=false --log-dir=/var/log/kubernetes --v=2"EOF---for i in `cat ~/MasterNodes.txt`;do echo $i;scp /etc/kubernetes/kube-scheduler* $i:/etc/kubernetes/;done# kube-scheduler.servicecat >/usr/lib/systemd/system/kube-scheduler.service << "EOF"[Unit]Description=Kubernetes SchedulerDocumentation=https://github.com/kubernetes/kubernetes[Service]EnvironmentFile=-/etc/kubernetes/kube-scheduler.confExecStart=/usr/local/bin/kube-scheduler $KUBE_SCHEDULER_OPTSRestart=on-failureRestartSec=5[Install]WantedBy=multi-user.targetEOF---for i in `cat ~/MasterNodes.txt`;do echo $i;scp /usr/lib/systemd/system/kube-scheduler.service $i:/usr/lib/systemd/system/;done#start kube-scheduler.servicesystemctl daemon-reloadsystemctl enable --now kube-scheduler.servicesystemctl status kube-scheduler.service十 、k8s node節(jié)點安裝
1、kubelet
#BOOTSTRAP_TOKENBOOTSTRAP_TOKEN=$(awk -F "," '{ print $1}' /etc/kubernetes/token.csv)1.2 kubelet-bootstrap.kubeconfig
# 1
、設(shè)置集群參數(shù)kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://10.0.0.25:6444 --kubeconfig=/root/work/kubelet-bootstrap.kubeconfig# 2、設(shè)置客戶端認證參數(shù)kubectl config set-credentials kubelet-bootstrap --token=${ BOOTSTRAP_TOKEN} --kubeconfig=/root/work/kubelet-bootstrap.kubeconfig# 3 、設(shè)置上下文參數(shù)kubectl config set-context default --cluster=kubernetes --user=kubelet-bootstrap --kubeconfig=/root/work/kubelet-bootstrap.kubeconfig# 4 、設(shè)置當前上下文kubectl config use-context default --kubeconfig=/root/work/kubelet-bootstrap.kubeconfig# 5
、創(chuàng)建clusterrolebindingkubectl delete clusterrolebinding kubelet-bootstrapkubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrapkubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=kubelet-bootstrap1.3 kubelet.json
cat >~/work/kubelet.json << "EOF"{ "kind": "KubeletConfiguration", "apiVersion": "kubelet.config.k8s.io/v1beta1", "authentication": { "x509": { "clientCAFile": "/etc/kubernetes/ssl/ca.pem" }, "webhook": { "enabled": true, "cacheTTL": "2m0s" }, "anonymous": { "enabled": false } }, "authorization": { "mode": "Webhook", "webhook": { "cacheAuthorizedTTL": "5m0s", "cacheUnauthorizedTTL": "30s" } }, "address": "10.0.0.23", "port": 10250, "readOnlyPort": 10255, "cgroupDriver": "systemd", "hairpinMode": "promiscuous-bridge", "serializeImagePulls": false, "clusterDomain": "cluster.local.", "clusterDNS": ["10.96.0.2"]}EOF1.4 kubelet.service
cat >~/work/kubelet.service <<"EOF"[Unit]Description=Kubernetes KubeletDocumentation=https://github.com/kubernetes/kubernetesAfter=containerd.serviceRequires=containerd.service[Service]WorkingDirectory=/var/lib/kubeletExecStart=/usr/local/bin/kubelet --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig --cert-dir=/etc/kubernetes/ssl --kubeconfig=/etc/kubernetes/kubelet.kubeconfig --config=/etc/kubernetes/kubelet.json --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.2 --v=2Restart=on-failureRestartSec=5[Install]WantedBy=multi-user.targetEOF---for i in `cat ~/WorkNodes.txt`;do echo $i;scp ~/work/kubelet.json ~/work/kubelet-bootstrap.kubeconfig $i:/etc/kubernetes;donefor i in `cat ~/WorkNodes.txt`;do echo $i;scp ~/work/kubelet.service $i:/usr/lib/systemd/system;donefor i in `cat ~/WorkNodes.txt`;do echo $i;scp /etc/kubernetes/ssl/ca.pem $i:/etc/kubernetes/ssl/;done1.5 start kubelet.services
mkdir -p /var/lib/kubeletsystemctl daemon-reloadsystemctl enable --now kubelet.servicesystemctl status kubelet.service1.6 Approve Nodes
kubectl get csr |grep node |awk '{ print$1,$6}'"+--- | ---+" node-csr-BV7RZ1Mc1RFkWhH9jzJH8h8on_dRMB3an_7FgBUwWhk Pending node-csr-wZOI__ACKylv7DlEPRK8iMg3_sYyBErjbGjxkMkRyPo Pending"+--- | ---+"kubectl certificate approve node-csr-csr-BV7RZ1Mc1RFkWhH9jzJH8h8on_dRMB3an_7FgBUwWhk node-csr-wZOI__ACKylv7DlEPRK8iMg3_sYyBErjbGjxkMkRyPokubectl get nodesNAME STATUS ROLES AGE VERSIONk8s-node01 Ready 118m v1.23.5k8s-node02 Ready 118m v1.23.5 2 、kube-pproxy
# kube-proxy certificatecat >kube-proxy-csr.json << "EOF"{ "CN": "system:kube-proxy", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Shanghai", "L": "Shanghai", "O": "k8s", "OU": "system" } ]}EOF---cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxycp kube-proxy*.pem /etc/kubernetes/ssl/for i in `cat ~/WorkNodes.txt`;do echo $i;scp ~/work/kube-proxy*.pem $i:/etc/kubernetes/ssl/;done2.3 kube-proxy.kubeconfig
# 1 、設(shè)置集群參數(shù)kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://10.0.0.25:6444 --kubeconfig=/root/work/kube-proxy.kubeconfig# 2、設(shè)置客戶端認證參數(shù)kubectl config set-credentials kube-proxy --client-certificate=kube-proxy.pem --client-key=kube-proxy-key.pem --embed-certs=true --kubeconfig=/root/work/kube-proxy.kubeconfig# 3、設(shè)置上下文參數(shù)kubectl config set-context default --cluster=kubernetes --user=kube-proxy --kubeconfig=/root/work/kube-proxy.kubeconfig# 4、設(shè)置當前上下文kubectl config use-context default --kubeconfig=/root/work/kube-proxy.kubeconfi# kube-proxy.yaml# 以下bindAddress均為宿主機ip,clusterCIDR為宿主機網(wǎng)段---cat >~/work/kube-proxy.yaml << "EOF"apiVersion: kubeproxy.config.k8s.io/v1alpha1bindAddress: 10.0.0.23clientConnection: kubeconfig: /etc/kubernetes/kube-proxy.kubeconfigclusterCIDR: 10.244.0.0/24healthzBindAddress: 10.0.0.23:10256kind: KubeProxyConfigurationmetricsBindAddress: 10.0.0.23:10249mode: "ipvs"EOF---for i in `cat ~/WorkNodes.txt`;do echo $i;scp ~/work/kube-proxy.yaml ~/work/kube-proxy.kubeconfig $i:/etc/kubernetes/;done2.4 kube-proxy.service
cat >~/work/kube-proxy.service << "EOF"[Unit]Description=Kubernetes Kube-Proxy ServerDocumentation=https://github.com/kubernetes/kubernetesAfter=network.target[Service]WorkingDirectory=/var/lib/kube-proxyExecStart=/usr/local/bin/kube-proxy --config=/etc/kubernetes/kube-proxy.yaml --alsologtostderr=true --logtostderr=false --log-dir=/var/log/kubernetes --v=2Restart=on-failureRestartSec=5LimitNOFILE=65536[Install]WantedBy=multi-user.targetEOF---for i in `cat ~/WorkNodes.txt`;do echo $i;scp ~/work/kube-proxy.service $i:/usr/lib/systemd/system;done2.5 start kube-proxy.services
mkdir -p /var/lib/kube-proxy systemctl daemon-reload systemctl enable --now kube-proxy.service systemctl status kube-proxy.service
3 、網(wǎng)絡(luò) calico
wget https://docs.projectcalico.org/manifests/calico.yamlkubectl apply -f calico.yaml3.1 coredns
mv /etc/resolv.conf /etc/resolv.conf.bakln -s /run/systemd/resolve/resolv.conf /etc/systemctl restart systemd-resolved.service && systemctl enable systemd-resolved.servicecoredns.yaml
apiVersion: v1kind: ServiceAccountmetadata: name: coredns namespace: kube-system---apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRolemetadata: labels: kubernetes.io/bootstrapping: rbac-defaults name: system:corednsrules: - apiGroups: - "" resources: - endpoints - services - pods - namespaces verbs: - list - watch - apiGroups: - discovery.k8s.io resources: - endpointslices verbs: - list - watch---apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRoleBindingmetadata: annotations: rbac.authorization.kubernetes.io/autoupdate: "true" labels: kubernetes.io/bootstrapping: rbac-defaults name: system:corednsroleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:corednssubjects:- kind: ServiceAccount name: coredns namespace: kube-system---apiVersion: v1kind: ConfigMapmetadata: name: coredns namespace: kube-systemdata: Corefile: | .:53 { errors health { lameduck 5s } ready kubernetes cluster.local in-addr.arpa ip6.arpa { fallthrough in-addr.arpa ip6.arpa } prometheus :9153 forward . /etc/resolv.conf { max_concurrent 1000 } cache 30 loop reload loadbalance }---apiVersion: apps/v1kind: Deploymentmetadata: name: coredns namespace: kube-system labels: k8s-app: kube-dns kubernetes.io/name: "CoreDNS"spec: # replicas: not specified here: # 1. Default is 1. # 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on. strategy: type: RollingUpdate rollingUpdate: maxUnavailable: 1 selector: matchLabels: k8s-app: kube-dns template: metadata: labels: k8s-app: kube-dns spec: priorityClassName: system-cluster-critical serviceAccountName: coredns tolerations: - key: "CriticalAddonsOnly" operator: "Exists" nodeSelector: kubernetes.io/os: linux affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 podAffinityTerm: labelSelector: matchExpressions: - key: k8s-app operator: In values: ["kube-dns"] topologyKey: kubernetes.io/hostname containers: - name: coredns image: coredns/coredns:1.8.4 imagePullPolicy: IfNotPresent resources: limits: memory: 170Mi requests: cpu: 100m memory: 70Mi args: [ "-conf", "/etc/coredns/Corefile" ] volumeMounts: - name: config-volume mountPath: /etc/coredns readOnly: true ports: - containerPort: 53 name: dns protocol: UDP - containerPort: 53 name: dns-tcp protocol: TCP - containerPort: 9153 name: metrics protocol: TCP securityContext: allowPrivilegeEscalation: false capabilities: add: - NET_BIND_SERVICE drop: - all readOnlyRootFilesystem: true livenessProbe: httpGet: path: /health port: 8080 scheme: HTTP initialDelaySeconds: 60 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 readinessProbe: httpGet: path: /ready port: 8181 scheme: HTTP dnsPolicy: Default volumes: - name: config-volume configMap: name: coredns items: - key: Corefile path: Corefile---apiVersion: v1kind: Servicemetadata: name: kube-dns namespace: kube-system annotations: prometheus.io/port: "9153" prometheus.io/scrape: "true" labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" kubernetes.io/name: "CoreDNS"spec: selector: k8s-app: kube-dns clusterIP: 10.96.0.2 ports: - name: dns port: 53 protocol: UDP - name: dns-tcp port: 53 protocol: TCP - name: metrics port: 9153 protocol: TCP安裝 coredns
kubectl apply -f coredns.yaml二進制安裝很繁瑣
,請耐心看完展開閱讀全文投稿時間 :2022-05-14 最后更新:2022-05-14
.jpg)
標簽:氣流干燥設(shè)備