操作系统:CentOS Linux release 7.7.1908 (Core)
docker版本:18.09.1
kubernetes版本:v1.16.3
至少2G内存、2CPU
网络良好
特定端口是开放的(前期可以把防火墙给禁用掉)
一、配置网络
1.主机网络采用静态网址,例:
vim /etc/sysconfig/network-scripts/ifcfg-ens33
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static #设置为静态地址
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens33
UUID=e4f81192-b2e7-407b-90ea-a36bd0cc230a
DEVICE=ens33
ONBOOT=YES
IPADDR="192.168.1.100" #本机ip地址
PREFIX="24"
GATEWAY="192.168.1.2" #默认网关
DNS1="8.8.8.8" #DNS
2.启动网络systemctr restart network,其他主机采用类似方法安装。
二、主机名设置
1.分别在主机上设置主机名,例如:在第一台主机上执行下列命令
hostnamectl set-hostname airport-k8s-m1
2.配置hosts
api.k8s.airport.com是我们设置的k8s集群api-server域名,也可以采用DNS域名解析实现,这里键就通过hsots模拟DNS域名解析。
cat >>/etc/host <<EOF
192.168.1.100 airport-k8s-m1
192.168.1.101 airport-k8s-m2
192.168.1.102 airport-k8s-m3
192.168.1.100 api.k8s.airport.com
192.168.1.101 api.k8s.airport.com
192.168.1.102 api.k8s.airport.com
EOF
三、生成免密登录密钥(可选)
ssh-keygen -f /root/.ssh/id_rsa -N ''
for i in airport-k8s-m1 airport-k8s-m2 airport-k8s-m3
do
ssh-copy-id $i
done
四、关闭selinux
sed -i 's/^SELINUX=enforcing$/SELINUX=disabled/' /etc/selinux/config && setenforce 0
五、时间同步
安装 chrony 服务
yum install -y chrony
systemctl start chronyd
systemctl enable chronyd
六、关闭防火墙
在生产环境中,应该是配置防火墙,开放必要的端口
systemctl stop firewalld
systemctl disable firewalld
七、关闭swap分区
sed -i '11s/\/dev/# \/dev/g' /etc/fstab
swapoff -a
八、配置IPVS内核
1.默认情况下,Kube-proxy将在kubeadm部署的集群中以iptables模式运行
centos8中已经放弃iptables
yum install -y ipset ipvsadm
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF
chmod +x /etc/sysconfig/modules/ipvs.modules
bash /etc/sysconfig/modules/ipvs.modules
2.配置内核参数
cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
#net.ipv4.ip_nonlocal_bind = 1
#net.ipv4.ip_forward = 1
#vm.swappiness=0
EOF
modprobe br_netfilter
sysctl -p /etc/sysctl.d/k8s.conf
3.打开文件数
echo "* soft nofile 65536" >> /etc/security/limits.conf
echo "* hard nofile 65536" >> /etc/security/limits.conf
九、安装docker(参考官网)
sudo yum remove docker docker-client docker-client-latest docker-common docker-latest docker-latest-logrotate docker-logrotate docker-engine
rm -rf /var/lib/docker
sudo yum install -y yum-utils device-mapper-persistent-data lvm2
sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
sudo yum-config-manager --enable docker-ce-nightly
sudo yum-config-manager --enable docker-ce-test
sudo yum-config-manager --disable docker-ce-nightly
sudo yum install docker-ce docker-ce-cli containerd.io
如需指定版本:
sudo yum -y install docker-ce-18.09.1 docker-ce-cli-18.09.1 containerd.io
yum list docker-ce --showduplicates | sort -r
sudo systemctl start docker
sudo docker run hello-world
docker version
需要配合 kubernetes的地方有:
1.设置开机自动重启
systemctl enable docker && systemctl start docker
2.设置 cgroup驱动使用systemd .(前面有 /etc/docker目录后才会执行后面的.) 以及存储
su root
cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
]
}
EOF
3.然后重启一下 docker, 因为修改了配置文件.
systemctl restart docker
十、kubeadm安装
1.安装 kubeadm, kubelet and kubectl
还是一样的所有的节点都要安装这个.
添加源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF
替换成阿里的源如下:
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
2.安装
sudo yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
sudo systemctl enable --now kubelet
3.kubectl 自动补全.
默认安装的有
sudo yum install bash-completion -y
echo "source <(kubectl completion bash)" >> ~/.bashrc
source ~/.bashrc
十一、安装kebenetes集群
安装airport-k8s-m1
1.准备配置文件
cat >kubeadm-config.yaml<<EOF
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
kubernetesVersion: v1.16.3
apiServer:
certSANs:
- "api.k8s.airport.com"
controlPlaneEndpoint: "api.k8s.airport.com:6443"
imageRepository: registry.aliyuncs.com/google_containers
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
EOF
2.下载所需镜像
kubeadm config images pull --config kubeadm-config.yaml
3.初始化
kubeadm init --config kubeadm-config.yaml
输出如下:
[init] Using Kubernetes version: v1.16.3
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [node1 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local api.k8s.gy.com api.k8s.gy.com] and IPs [10.96.0.1 192.168.44.100]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [node1 localhost] and IPs [192.168.44.100 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [node1 localhost] and IPs [192.168.44.100 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 19.007860 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.16" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node node1 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node node1 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: ks271v.t4pp7n7k0qd5danp
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:
kubeadm join api.k8s.airport.com:6443 --token ks271v.t4pp7n7k0qd5danp \
--discovery-token-ca-cert-hash sha256:6ab48ebd0024ae79aff1a2fcfa63aa2b61e58083b4aa265ef8fc17d54e6dcca6 \
--control-plane
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join api.k8s.airport.com:6443 --token ks271v.t4pp7n7k0qd5danp \
--discovery-token-ca-cert-hash sha256:6ab48ebd0024ae79aff1a2fcfa63aa2b61e58083b4aa265ef8fc17d54e6dcca6
4.在需对集群管理的节点上执行下列命令:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
十二、安装网络插件
1.这里以 flannel 为例子.
注意这个地方下的是这个版本, 而不要去 flannel 的官网找最新的版本.
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/2140ac876ef134e0ed5af15c65e414cf26827915/Documentation/kube-flannel.yml
2.插件安装完成后,可以检查 CoreDNS pod 是否运行正常.
kubectl get pods --all-namespace
十三、以master模式加入集群
1.当节点需要以master加入集群时,需要主master的证书和key,下面的脚本将本地的证书拷贝到其他master节点。(还有一种方法是使用kubeadm命令拷贝证书)
权限拷贝脚本sync.master.ca.sh。
#!/bin/sh
vhost="airport-k8s-m2 airport-k8s-m3"
usr=root
who=`whoami`
if [[ "$who" != "$usr" ]];then
echo "请使用 root 用户执行或者 sudo ./sync.master.ca.sh"
exit 1
fi
echo $who
# 需要从 node1 拷贝的 ca 文件
caFiles=(
/etc/kubernetes/pki/ca.crt
/etc/kubernetes/pki/ca.key
/etc/kubernetes/pki/sa.key
/etc/kubernetes/pki/sa.pub
/etc/kubernetes/pki/front-proxy-ca.crt
/etc/kubernetes/pki/front-proxy-ca.key
/etc/kubernetes/pki/etcd/ca.crt
/etc/kubernetes/pki/etcd/ca.key
/etc/kubernetes/admin.conf
)
pkiDir=/etc/kubernetes/pki/etcd
for h in $vhost
do
ssh ${usr}@$h "mkdir -p $pkiDir"
echo "Dirs for ca scp created, start to scp..."
# scp 文件到目标机
scp /etc/kubernetes/pki/ca.crt /etc/kubernetes/pki/ca.key /etc/kubernetes/pki/sa.key /etc/kubernetes/pki/sa.pub /etc/kubernetes/pki/front-proxy-ca.crt /etc/kubernetes/pki/front-proxy-ca.key ${usr}@$h:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/etcd/ca.crt /etc/kubernetes/pki/etcd/ca.key ${usr}@$h:/etc/kubernetes/pki/etcd/
scp /etc/kubernetes/admin.conf ${usr}@$h:/etc/kubernetes/
echo "Ca files transfered for $h ... ok"
done
2.在node2和node3上以下列命令加入集群:(初始化的输出里,sha256值是唯一的,请注意!)
# 下列命令以master加入集群
You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:
kubeadm join api.k8s.airport.com:6443 --token ks271v.t4pp7n7k0qd5danp \
--discovery-token-ca-cert-hash sha256:6ab48ebd0024ae79aff1a2fcfa63aa2b61e58083b4aa265ef8fc17d54e6dcca6 \
--control-plane
# 下列命令可以将节点作为node加入集群
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join api.k8s.airport.com:6443 --token ks271v.t4pp7n7k0qd5danp \
--discovery-token-ca-cert-hash sha256:6ab48ebd0024ae79aff1a2fcfa63aa2b61e58083b4aa265ef8fc17d54e6dcca6
3.检查结果
kubectl get nodes
NAME STATUS ROLES AGE VERSION
node1 Ready master 127m v1.16.3
node2 Ready master 106m v1.16.3
node3 Ready master 103m v1.16.3
kubectl get pods --all-namespace #拉取镜像时间较长,耐心等待
4.将master作为node(去污)
kubectl taint nodes --all node-role.kubernetes.io/master-