1.规划信息
1.1架构示意图
1.2服务器规划
功能 | IP | 描述 |
---|---|---|
K8S Master 2台 | 192.168.7.101/102 | K8s控制端,通过一个VIP做主备高可用 |
Etcd 最少3台 | 192.168.7.105/106/107 | 保存k8s集群数据的服务器 |
Node节点 2-N台 | 192.168.7.108/109 | 真正运行容器的服务器,高可用环境至少两台 |
Harbor 2台 | 192.168.7.108/109 | 高可用镜像服务器 |
Hproxy 2台 | 192.168.7.108/109 | 高可用etcd代理服务器 |
1.3主机名信息
功能 | IP | 主机名 | VIP |
---|---|---|---|
K8S Master1 | 192.168.7.101 | k8s-master1.exmple.demo | 192.168.7.248 |
K8S Master2 | 192.168.7.102 | k8s-master2.exmple.demo | 192.168.7.248 |
Harbor1 | 192.168.7.103 | k8s-harbor1.exmple.demo | 无 |
Harbor2 | 192.168.7.104 | k8s-harbor2.exmple.demo | 无 |
etcd节点1 | 192.168.7.105 | k8s-etcd1.exmple.demo | 无 |
etcd节点2 | 192.168.7.106 | k8s-etcd2.exmple.demo | 无 |
etcd节点3 | 192.168.7.107 | k8s-etcd3.exmple.demo | 无 |
Haproxy1 | 192.168.7.108 | k8s-ha1.exmple.demo | 无 |
Haproxy2 | 192.168.7.109 | k8s-ha2.exmple.demo | 无 |
Node节点1 | 192.168.7.110 | k8s-node1.exmple.demo | 无 |
Node节点2 | 192.168.7.111 | k8s-node2.exmple.demo | 无 |
1.4软件清单
操作系统:ubuntu server 1804
k8s v1.13.5
Etcd v3.4.7
Calico:v3.4.4
Docker CE v19.03.6
Coredns v1.2.6
Harbor v1.2.2
2.基础环境准备
2.1系统配置
主机名IP等系统配置略
禁用防火墙
systemctl stop firewalld
systemctl disable firewalld
关闭 swap
swapoff -a
sed -i 's/.*swap.*/#&/' /etc/fstab
2.2高可用负载均衡
配置keepalived
apt-get install keepalived
cp /usr/share/doc/keepalived/samples/keepalived.conf.sample /etc/keepalived/keepalived.conf
vi /etc/keepalived/keepalived.conf
#修改配置文件
vrrp_instance VI_1 {
state MASTER
interface ens33
virtual_router_id 1
priority 100
advert_int 3
unicast_src_ip 192.168.7.108
unicast_peer {
192.168.7.109
}
authentication {
auth_type PASS
auth_pass 123abc
}
virtual_ipaddress {
192.168.7.248 dev ens33 label ens33:1
}
}
配置haproxy
vi /etc/haproxy/haproxy.cfg
listen k8s_api_nodes_6443
bind 192.168.7.248:6443 mode tcp
#balance leastconn
server 192.168.7.101 192.168.7.101:6443 check inter 2000 fall 3 rise 5
2.3harbor自签证书https
内部镜像将统一保存在内部Harbor服务器,不再通过互联网在线下载。
下载harbor
wget https://github.com/vmware/harbor/releases/download/v1.2.2/harbor-offlineinstaller-v1.2.2.tgz
cd /usr/local/src/harbor
mkdir certs
#生成私有key
openssl genrsa -out /usr/local/src/harbor/certs/harbor-ca.key
#签发证书
openssl req -x509 -new -nodes -key /usr/local/src/harbor/certs/harbor-ca.key - subj "/CN=harbor.magedu.net" -days 7120 -out /usr/local/src/harbor/certs/harbor- ca.crt
vim harbor.cfg
hostname = harbor.exmple.demo
ui_url_protocol = https
ssl_cert = /usr/local/src/harbor/certs/harbor-ca.crt
ssl_cert_key = /usr/local/src/harbor/certs/harbor-ca.key harbor_admin_password = 123456
./install.sh
k8s master同步crt证书
mkdir /etc/docker/certs.d/harbor.exmple.demo -p
scp /usr/local/src/harbor/certs/harbor-ca.crt 192.168.7.101:/etc/docker/certs.d/harbor.exmple.demo
vim /etc/hosts
192.168.7.103 harbor.exmple.demo
systemctl restart docker
docker login harbor.exmple.demo
2.4部署ansible
apt-get install python2.7
ln -s /usr/bin/python2.7 /usr/bin/python
apt-get install git ansible -y
ssh-keygen #生成密钥对
apt-get install sshpass
#分发公钥脚本:
#!/bin/bash
#目标主机列表
IP="
192.168.7.101
192.168.7.102
192.168.7.103
192.168.7.104
192.168.7.105
192.168.7.106
192.168.7.107
192.168.7.108
192.168.7.109
192.168.7.110
192.168.7.111
"
for node in ${IP};do
sshpass -p 123456 ssh-copy-id ${node} -o StrictHostKeyChecking=no
if [ $? -eq 0 ];then
echo "${node} 秘钥copy完成"
else
echo "${node} 秘钥copy失败"
fi
done
#同步docker证书脚本:
#!/bin/bash
#目标主机列表
IP="
192.168.7.101
192.168.7.102
192.168.7.103
192.168.7.104
192.168.7.105
192.168.7.106
192.168.7.107
192.168.7.108
192.168.7.109
192.168.7.110
192.168.7.111
"
for node in ${IP};do
sshpass -p 123456 ssh-copy-id ${node} -o StrictHostKeyChecking=no
if [ $? -eq 0 ];then
echo "${node} 秘钥copy完成"
echo "${node} 秘钥copy完成,准备环境初始化....."
ssh ${node} "mkdir /etc/docker/certs.d/harbor.magedu.net -p"
echo "Harbor 证书目录创建成功!"
scp /etc/docker/certs.d/harbor.magedu.net/harbor-ca.crt
${node}:/etc/docker/certs.d/harbor.magedu.net/harbor-ca.crt
echo "Harbor 证书拷贝成功!"
scp /etc/hosts ${node}:/etc/hosts
echo "host 文件拷贝完成"
scp -r /root/.docker ${node}:/root/
echo "Harbor 认证文件拷贝完成!"
scp -r /etc/resolv.conf ${node}:/etc/
else
echo "${node} 秘钥copy失败"
fi
done
#执行脚本同步:
bash scp.sh
配置ansible安装脚本项目
git clone -b 0.6.1 https://github.com/easzlab/kubeasz.git
mv /etc/ansible/* /bak/
mv kubeasz/* /etc/ansible/
cd /etc/ansible/
cp example/hosts.m-masters.example ./hosts
vi hosts
# 集群部署节点:一般为运行ansible 脚本的节点
# 变量 NTP_ENABLED (=yes/no) 设置集群是否安装 chrony 时间同步
[deploy]
192.168.7.101 NTP_ENABLED=no
# etcd集群请提供如下NODE_NAME,注意etcd集群必须是1,3,5,7...奇数个节点
[etcd]
192.168.7.105 NODE_NAME=etcd1
192.168.7.106 NODE_NAME=etcd2
192.168.7.107 NODE_NAME=etcd3
[new-etcd] # 预留组,后续添加etcd节点使用
#192.168.7.x NODE_NAME=etcdx
[kube-master]
192.168.7.101
[new-master] # 预留组,后续添加master节点使用
#192.168.7.5
[kube-node]
192.168.7.110
[new-node] # 预留组,后续添加node节点使用
#192.168.7.xx
# 参数 NEW_INSTALL:yes表示新建,no表示使用已有harbor服务器
# 如果不使用域名,可以设置 HARBOR_DOMAIN=""
[harbor]
#192.168.7.8 HARBOR_DOMAIN="harbor.yourdomain.com" NEW_INSTALL=no
# 负载均衡(目前已支持多于2节点,一般2节点就够了) 安装 haproxy+keepalived
[lb]
192.168.7.1 LB_ROLE=backup
192.168.7.2 LB_ROLE=master
#【可选】外部负载均衡,用于自有环境负载转发 NodePort 暴露的服务等
[ex-lb]
#192.168.7.6 LB_ROLE=backup EX_VIP=192.168.7.250
#192.168.7.7 LB_ROLE=master EX_VIP=192.168.7.250
[all:vars]
# ---------集群主要参数---------------
#集群部署模式:allinone, single-master, multi-master
DEPLOY_MODE=multi-master
#集群主版本号,目前支持: v1.8, v1.9, v1.10,v1.11, v1.12, v1.13
K8S_VER="v1.13"
# 集群 MASTER IP即 LB节点VIP地址,为区别与默认apiserver端口,设置VIP监听的服务端口8443
# 公有云上请使用云负载均衡内网地址和监听端口
MASTER_IP="192.168.7.248"
KUBE_APISERVER="https://{{ MASTER_IP }}:6443"
# 集群网络插件,目前支持calico, flannel, kube-router, cilium
CLUSTER_NETWORK="calico"
# 服务网段 (Service CIDR),注意不要与内网已有网段冲突
SERVICE_CIDR="10.20.0.0/16"
# POD 网段 (Cluster CIDR),注意不要与内网已有网段冲突
CLUSTER_CIDR="172.31.0.0/16"
# 服务端口范围 (NodePort Range)
NODE_PORT_RANGE="20000-60000"
# kubernetes 服务 IP (预分配,一般是 SERVICE_CIDR 中第一个IP)
CLUSTER_KUBERNETES_SVC_IP="10.20.0.1"
# 集群 DNS 服务 IP (从 SERVICE_CIDR 中预分配)
CLUSTER_DNS_SVC_IP="10.20.254.254"
# 集群 DNS 域名
CLUSTER_DNS_DOMAIN="exmple.local."
# 集群basic auth 使用的用户名和密码
BASIC_AUTH_USER="admin"
BASIC_AUTH_PASS="123456"
# ---------附加参数--------------------
#默认二进制文件目录
bin_dir="/usr/bin"
#证书目录
ca_dir="/etc/kubernetes/ssl"
#部署目录,即 ansible 工作目录,建议不要修改
base_dir="/etc/ansible"
准备k8s二进制文件
cd /etc/ansible/bin
tar xvf k8s.1-13-5.tar.gz
mv bin/* .
通过ansible脚本初始化环境及部署k8s 高可用集群
cd /etc/ansible/
#环境初始化
ansible-playbook 01.prepare.yml
#部署etcd集群
ansible-playbook 02.etcd.yml
#部署docker
ansible-playbook 03.docker.yml
#部署master
ansible-playbook 04.kube-master.yml
#部署node,node节点必须安装docker
vim roles/kube-node/defaults/main.yml
# 基础容器镜像 SANDBOX_IMAGE: "harbor.exmple.demo/baseimages/pause-amd64:3.1"
ansible-playbook 05.kube-node.yml
#部署网络服务calico
ansible-playbook 06.network.yml
#验证运行状态
calicoctl node status
#创建pod测试夸主机网络通信是否正常
kubectl run net-test1 --image=alpine --replicas=4 sleep 360000
#添加node节点
vi /etc/ansible/hosts
[new-node] # 预留组,后续添加node节点使用
192.168.7.111
ansible-playbook 20.addnode.yml
#添加master节点
vi /etc/ansible/hosts
[new-master] # 预留组,后续添加master节点使用
192.168.7.102
ansible-playbook 21.addmaster.yml
验证当前状态
calicoctl node status
kubectl get nodes
3.k8s应用环境
dashboard
#1.导入dashboard镜像并上传至本地harbor服务器
tar xvf dashboard-yaml_image-1.10.1.tar.gz
docker load -i kubernetes-dashboard-amd64-v1.10.1.tar.gz
docker tag gcr.io/google-containers/kubernetes-dashboard-amd64:v1.10.1 harbor.exmple.demo/baseimages/kubernetes-dashboard-amd64:v1.10.1
docker push harbor.exmple.demo/baseimages/kubernetes-dashboard-amd64:v1.10.1
#2.修改yaml文件中的dashboard镜像地址为本地harbor地址 image: harbor.exmple.demo/baseimages/kubernetes-dashboard-amd64:v1.10.1
#3.创建服务
kubectl apply -f .
#4.验证dashboard启动完成
kubectl get pods -n kube-system
4.DNS服务
部署coredns
docker tag gcr.io/google-containers/coredns:1.2.6 harbor.exmple.demo/baseimages/coredns:1.2.6
docker push harbor.exmple.demo/baseimages/coredns:1.2.6
vi vim coredns.yaml
- image: harbor.exmple.demo/baseimages/coredns:1.2.6
kubectl apply -f coredns.yaml