可以参照此网站:https://www.kubernetes.org.cn/5273.html
重置kubelet设置
kubeadm reset -f
rm -rf /etc/kubernetes/pki/
k8s1.14安装一主多从
1.下载ks8的yum源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
主节点安装:yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
slave节点:yum install -y kubelet kubeadm --disableexcludes=kubernetes
2.下载docker
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum makecache
yum list docker-ce --showduplicates|sort -r
yum -y install docker-ce-<version>
3.初始化配置
3.1关闭selinux
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
3.2开启路由转发
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl -p /etc/sysctl.d/k8s.conf
输入:sysctl --system 查看应用了k8s.conf
3.3关闭防火墙
systemctl stop firewalld && systemctl disable firewalld
3.4关闭swap分区
swapoff -a
sed -i 's/.*swap.*/#&/' /etc/fstab
4.启动docker,设置开机自启动
设置docker的cgroupdriver:(下载比较慢 可以换别的docker镜像仓库)
vim /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"]
}
systemctl start docker && systemctl enable docker
5.用kubeadm创建Cluster
5.1修改kubelet配置文件,忽略某些参数
echo "KUBELET_EXTRA_ARGS="--fail-swap-on=false"" > /etc/sysconfig/kubelet
设置kubelet 开机自启动,并且启动kubelet
systemctl enable kubelet && systemctl start kubelet
5.2初始化master
查看kubeadm需要使用的镜像:kubeadm config images list
k8s.gcr.io/kube-apiserver:v1.14.1
k8s.gcr.io/kube-controller-manager:v1.14.1
k8s.gcr.io/kube-scheduler:v1.14.1
k8s.gcr.io/kube-proxy:v1.14.1
k8s.gcr.io/pause:3.1
k8s.gcr.io/etcd:3.3.10
k8s.gcr.io/coredns:1.3.1
下载需要的docker镜像:
docker pull gcrxio/kube-apiserver:v1.14.1
docker pull gcrxio/kube-controller-manager:v1.14.1
docker pull gcrxio/kube-scheduler:v1.14.1
docker pull gcrxio/pause:3.1
docker pull gcrxio/etcd:3.3.10
docker pull gcrxio/kube-proxy:v1.14.1
docker pull coredns/coredns:1.3.1
修改镜像名称:
docker tag gcrxio/kube-proxy:v1.14.1 k8s.gcr.io/kube-proxy:v1.14.1
docker tag gcrxio/kube-apiserver:v1.14.1 k8s.gcr.io/kube-apiserver:v1.14.1
docker tag gcrxio/kube-controller-manager:v1.14.1 k8s.gcr.io/kube-controller-manager:v1.14.1
docker tag gcrxio/kube-scheduler:v1.14.1 k8s.gcr.io/kube-scheduler:v1.14.1
docker tag gcrxio/etcd:3.3.10 k8s.gcr.io/etcd:3.3.10
docker tag gcrxio/pause:3.1 k8s.gcr.io/pause:3.1
docker tag coredns/coredns:1.3.1 k8s.gcr.io/coredns:1.3.1
主节点初始化:
kubeadm init --apiserver-advertise-address 192.168.163.171 --kubernetes-version=v1.14.1 --pod-network-cidr=10.244.0.0/16 --service-cidr=10.96.0.0/12 --ignore-preflight-errors=Swap
初始化完毕出现如下内容:
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.100.146:6443 --token v1ngzc.wanoz3ld4fgisup9 \
--discovery-token-ca-cert-hash sha256:f8e4685cac03e990aa32120413a7ed539c24942bff19c097febeb86709dd21ef
设置普通账户权限:(都在初始化master节点里显示)
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
查看资源
[root@kube-master1 ~]# kubectl get cs
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok
scheduler Healthy ok
etcd-0 Healthy {"health":"true"}
[root@kube-master1 ~]# kubectl get csr
NAME AGE REQUESTOR CONDITION
csr-t5cxt 2m system:node:kube-master1 Approved,Issued
[root@kube-master1 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
kube-master1 NotReady master 2m18s v1.14.1
6.安装flannel
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
如过上面flannel安装不了,请如下操作
curl -o kube-flannel.yml https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
sed -i "s/quay.io\/coreos\/flannel/quay-mirror.qiniu.com\/coreos\/flannel/g" kube-flannel.yml
kubectl apply -f kube-flannel.yml
rm -f kube-flannel.yml
7.配置节点机:node上执行(在初始化master节点生成的)
kubeadm join 192.168.100.146:6443 --token v1ngzc.wanoz3ld4fgisup9 \
--discovery-token-ca-cert-hash sha256:f8e4685cac03e990aa32120413a7ed539c24942bff19c097febeb86709dd21ef
8.master上查看状态
[root@kube-master1 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
kube-master1 Ready master 127m v1.14.1
kube-node-1 Ready <none> 121m v1.14.1
kube-node-2 Ready <none> 121m v1.14.1
9.添加自动补全脚本到系统
echo "source <(kubectl completion bash)" >> ~/.bashrc
以上就是一主多从搭建全部过程
接下来看一下高可用的搭建
此安装时3主3etcd2从方式安装k8s集群,需要负载均衡器haproxy以及keepalived:haproxy下载地址如下(本机为了方便测试使用yum安装haproxy和keepalived):
https://src.fedoraproject.org/repo/pkgs/haproxy/
主机IP划分
|ip|主机名|部署内容|
|192.168.100.145|master0|部署k8s主节点内容、etcd|
|192.168.100.146|master1|部署k8s主节点内容、etcd|
|192.168.100.147|master2|部署k8s主节点内容、etcd|
|192.168.100.254|vip|vip|
|192.168.100.148|node1|部署k8s从节点内容、etcd|
|192.168.100.149|node2|部署k8s从节点内容、etcd|
一定要先配置keepalived和haproxy
创建flannel网络之后再添加node节点
部署k8s基础环境过程不再重复,参考1主多从1etcd方式部署
1.使用keepalived设置虚拟IP
使用yum下载keepalived和haproxy
master0添加keepalived配置文件创建虚拟ip
global_defs {
router_id lb01
vrrp_mcast_group4 244.0.0.18
}
vrrp_instance VI_1 {
state MASTER
interface ens33
virtual_router_id 51
priority 150
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.100.254/24 dev ens33 label ens33:1
}
}
systemctl start keepalived.service && systemctl enable keepalived.service
master1和master2修改state BACKUP和priority值即可
至此虚拟ip高可用已经启动
2.设置haproxy转发master的请求
设置vim /etc/sysctl.conf 文件参数
vim /etc/sysctl.conf
net.ipv4.ip_forward = 1
net.ipv4.ip_nonlocal_bind = 1
sysctl -p
3.配置haproxy
vim /etc/haproxy/haproxy.cfg
global
log 127.0.0.1 local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 20000
user haproxy
group haproxy
daemon
nbproc 2
ulimit-n 65535
stats socket /var/lib/haproxy/stats
defaults
mode http
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
balance roundrobin
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 20000
frontend k8s-api
mode tcp
bind 192.168.163.181:8443
default_backend k8s-api
backend k8s-api
mode tcp
balance roundrobin
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
server k8s-api-1 192.168.163.178:6443 check
server k8s-api-2 192.168.163.179:6443 check
server k8s-api-3 192.168.163.182:6443 check
启动 systemctl enable haproxy&&systemctl start haproxy
4.master0上编辑kubeadm-config.yaml文件,设置初始化参数
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
kubernetesVersion: v1.14.1
controlPlaneEndpoint: "192.168.100.254:8443"
networking:
podSubnet: 10.244.0.0/16
apiServer:
certSANs:
- 192.168.100.145
- 192.168.100.146
- 192.168.100.147
- 192.168.100.254
指定配置初始化:kubeadm init --config kubeadm-config.yaml
master0初始化后需要创建的文件:
mkdir -p HOME/.kube/config
sudo chown (id -g) $HOME/.kube/config
在master1和master2创建文件:mkdir -p /etc/kubernetes/pki/etcd/
拷贝相关证书到master1、master2
#################拷贝到master1###########################
scp /etc/kubernetes/pki/ca.crt master2:/etc/kubernetes/pki/ca.crt
scp /etc/kubernetes/pki/ca.key master2:/etc/kubernetes/pki/ca.key
scp /etc/kubernetes/pki/sa.key master2:/etc/kubernetes/pki/sa.key
scp /etc/kubernetes/pki/sa.pub master2:/etc/kubernetes/pki/sa.pub
scp /etc/kubernetes/pki/front-proxy-ca.crt master2:/etc/kubernetes/pki/front-proxy-ca.crt
scp /etc/kubernetes/pki/front-proxy-ca.key master2:/etc/kubernetes/pki/front-proxy-ca.key
scp /etc/kubernetes/pki/etcd/ca.crt master2:/etc/kubernetes/pki/etcd/ca.crt
scp /etc/kubernetes/pki/etcd/ca.key master2:/etc/kubernetes/pki/etcd/ca.key
scp /etc/kubernetes/admin.conf master2:/etc/kubernetes/admin.conf
scp /etc/kubernetes/admin.conf master2:~/.kube/config
#################拷贝到master2###########################
scp /etc/kubernetes/pki/ca.crt master3:/etc/kubernetes/pki/ca.crt
scp /etc/kubernetes/pki/ca.key master3:/etc/kubernetes/pki/ca.key
scp /etc/kubernetes/pki/sa.key master3:/etc/kubernetes/pki/sa.key
scp /etc/kubernetes/pki/sa.pub master3:/etc/kubernetes/pki/sa.pub
scp /etc/kubernetes/pki/front-proxy-ca.crt master3:/etc/kubernetes/pki/front-proxy-ca.crt
scp /etc/kubernetes/pki/front-proxy-ca.key master3:/etc/kubernetes/pki/front-proxy-ca.key
scp /etc/kubernetes/pki/etcd/ca.crt master3:/etc/kubernetes/pki/etcd/ca.crt
scp /etc/kubernetes/pki/etcd/ca.key master3:/etc/kubernetes/pki/etcd/ca.key
scp /etc/kubernetes/admin.conf master3:/etc/kubernetes/admin.conf
scp /etc/kubernetes/admin.conf master3:~/.kube/config
其他主节点加入集群:
kubeadm join 192.168.100.254:6443 --token hn6oep.zxb1nk5wb117aedc \
--discovery-token-ca-cert-hash sha256:36588d7cdf4b1b91726629dd8ff8006257e73a0d2e175779e12e7242a6e5c0bf \
--experimental-control-plane
主节点上安装flannel
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
node节点加入集群:
kubeadm join 192.168.100.254:6443 --token hn6oep.zxb1nk5wb117aedc \
--discovery-token-ca-cert-hash sha256:36588d7cdf4b1b91726629dd8ff8006257e73a0d2e175779e12e7242a6e5c0bf
kubeadm join 192.168.100.254:8443 --token 6dy980.sxyqhcgj1ll4ljgs \
--discovery-token-ca-cert-hash sha256:c801532023dfcacdb4f7f9412c7f78f3cefd72f08825c50df1fb733e798cd6d5 \
--experimental-control-plane
kubeadm join 192.168.100.254:8443 --token 6dy980.sxyqhcgj1ll4ljgs \
--discovery-token-ca-cert-hash sha256:c801532023dfcacdb4f7f9412c7f78f3cefd72f08825c50df1fb733e798cd6d5
以上是高可用搭建k8s集群过程