1、环境准备
操作系统:ecntos7.6
容器引擎:docker-ce-19
kubernetes:1.20.4
2、服务器整体规划
k8s-master1 192.168.145.100 kube-apiserver,kube-controller-manager,kube-scheduler,kubelet,kube-proxy,docker
k8s-node1 192.168.145.101 kubelet,kube-proxy,docker
k8s-node2 192.168.145.102 kubelet,kube-proxy,docker
k8s-etcd-1 192.168.145.103 etcd
k8s-etcd-2 192.168.145.104 etcd
k8s-etcd-3 192.168.145.105 etcd
3、系统初始化配置
3.1、关闭防火墙(所有节点)
systemctl stop firewalld
systemctl disable firewalld
3.2、关闭selinux(所有节点)
setenforce 0 # 临时
sed -i 's/enforcing/disabled/' /etc/selinux/config # 永久
3.3、关闭swap(所有节点)
swapoff -a # 临时
sed -ri 's/.*swap.*/#&/' /etc/fstab # 永久
3.4、根据规划设置主机名(所有节点)
hostnamectl set-hostname <hostname>
3.5、添加hosts(所有节点)
cat >> /etc/hosts << EOF
192.168.145.100 k8s-master
192.168.145.101 k8s-node1
192.168.145.102 k8s-node2
192.168.145.103 k8s-etcd-1
192.168.145.104 k8s-etcd-2
192.168.145.105 k8s-etcd-3
EOF
3.6、将IPv4流量传递到iptables的链(所有节点)
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system # 生效
3.7、时间同步(所有节点)
yum install ntpdate -y
ntpdate time.windows.com
4、证书说明
在这里插入图片描述
5、部署Etcd集群
Etcd 是一个分布式键值存储系统,Kubernetes使用Etcd进行数据存储,所以先准备一个Etcd数据库,为解决Etcd单点故障,应采用集群方式部署,这里使用3台组建集群,可容忍1台机器故障.
5.1、准备cfssl证书生成工具
5.2、在master节点操作
下载软件包
mkdir /root/liu/cfssl && cd /root/liu/cfssl/
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo
5.3、生成Etcd证书
5.3.1、创建工作目录
mkdir -p /root/liu/{etcd,k8s} && cd /root/liu/etcd
5.3.2、自签证书颁发机构(CA)
cat > ca-config.json << EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"www": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
cat > ca-csr.json << EOF
{
"CN": "etcd CA",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Beijing",
"ST": "Beijing"
}
]
}
EOF
生成证书:会生成ca.pem和ca-key.pem文件
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
5.3.3、使用自签CA签发Etcd Https证书
创建证书请求文件
cat > server-csr.json << EOF
{
"CN": "etcd",
"hosts": [
"192.168.145.103",
"192.168.145.104",
"192.168.145.105"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing"
}
]
}
EOF
hosts字段中IP为所有etcd节点的集群内部通信IP
生成证书,会生成server.pem和server-key.pem文件
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server
5.4、开始部署ETCD集群
5.4.1、下载etcd二进制文件
mkdir /root/liu/package && cd /root/liu/package
wget https://github.com/etcd-io/etcd/releases/download/v3.4.9/etcd-v3.4.9-linux-amd64.tar.gz
5.4.2、创建工作目录并解压二进制包
mkdir /opt/etcd/{bin,cfg,ssl} -p
tar zxvf /root/liu/package/etcd-v3.4.9-linux-amd64.tar.gz
cp /root/liu/package/etcd-v3.4.9-linux-amd64/{etcd,etcdctl} /opt/etcd/bin/
5.4.3、创建etcd配置文件
cat > /opt/etcd/cfg/etcd.conf << EOF
#[Member]
ETCD_NAME="etcd-1"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.145.103:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.145.103:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.145.103:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.145.103:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.145.103:2380,etcd-2=https://192.168.145.104:2380,etcd-3=https://192.168.145.105:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF
5.4.4、配置文件说明:
ETCD_NAME:节点名称,集群中唯一
ETCD_DATA_DIR:数据目录
ETCD_LISTEN_PEER_URLS:集群通信监听地址
ETCD_LISTEN_CLIENT_URLS:客户端访问监听地址
ETCD_INITIAL_ADVERTISE_PEERURLS:集群通告地址
ETCD_ADVERTISE_CLIENT_URLS:客户端通告地址
ETCD_INITIAL_CLUSTER:集群节点地址
ETCD_INITIALCLUSTER_TOKEN:集群Token
ETCD_INITIALCLUSTER_STATE:加入集群的当前状态,new是新集群,existing表示加入已有集群
5.4.5、systemd管理etcd
cat > /usr/lib/systemd/system/etcd.service << EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=/opt/etcd/cfg/etcd.conf
ExecStart=/opt/etcd/bin/etcd \
--cert-file=/opt/etcd/ssl/server.pem \
--key-file=/opt/etcd/ssl/server-key.pem \
--peer-cert-file=/opt/etcd/ssl/server.pem \
--peer-key-file=/opt/etcd/ssl/server-key.pem \
--trusted-ca-file=/opt/etcd/ssl/ca.pem \
--peer-trusted-ca-file=/opt/etcd/ssl/ca.pem \
--logger=zap
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
5.4.6、拷贝生成的证书至指定位置
cp /root/liu/etcd/ca*pem ~/TLS/etcd/server*pem /opt/etcd/ssl/
5.4.7、启动并设置开机启动
systemctl daemon-reload
systemctl start etcd
systemctl enable etcd
5.4.8、将上面节点1所有生成的文件拷贝到节点2和节点3(创建2和3节点工作路劲)
scp -r /opt/etcd/ etcd-2:/opt/
scp /usr/lib/systemd/system/etcd.service etcd-2:/usr/lib/systemd/system/
scp -r /opt/etcd/ etcd-3:/opt/
scp /usr/lib/systemd/system/etcd.service etcd-3:/usr/lib/systemd/system/
5.4.9、在节点2和节点3分别修改etcd.conf配置文件中的节点名称和当前服务器IP
节点2
vim /opt/etcd/cfg/etcd.conf
#[Member]
ETCD_NAME="etcd-2" # 修改此处,节点2改为etcd-2,节点3改为etcd-3
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="192.168.145.104:2380" # 修改此处为当前服务器IP
ETCD_LISTEN_CLIENT_URLS="https://192.168.145.104:2379" # 修改此处为当前服务器IP
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.145.104:2380" # 修改此处为当前服务器IP
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.145.104:2379" # 修改此处为当前服务器IP
ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.145.103:2380,etcd-2=https://192.168.145.104:2380,etcd-3=https://192.168.145.105:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
节点3
vim /opt/etcd/cfg/etcd.conf
#[Member]
ETCD_NAME="etcd-3" # 修改此处,节点2改为etcd-2,节点3改为etcd-3
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="192.168.145.105:2380" # 修改此处为当前服务器IP
ETCD_LISTEN_CLIENT_URLS="https://192.168.145.105:2379" # 修改此处为当前服务器IP
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.145.105:2380" # 修改此处为当前服务器IP
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.145.105:2379" # 修改此处为当前服务器IP
ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.145.103:2380,etcd-2=https://10.0.0.72192.168.145.104:2380,etcd-3=https://192.168.145.105:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
5.4.10、启动etcd并设置开机启动
systemctl daemon-reload
systemctl start etcd
systemctl enable etcd
5.4.11、查看集群状态
ETCDCTL_API=3 /opt/etcd/bin/etcdctl --cacert=/opt/etcd/ssl/ca.pem --cert=/opt/etcd/ssl/server.pem --key=/opt/etcd/ssl/server-key.pem --endpoints="https://192.168.145.103:2379,https://193.168.145.104:2379,https://192.168.145.105:2379" endpoint health --write-out=table
+------------------------------+--------+------------+-------+
| ENDPOINT | HEALTH | TOOK | ERROR |
+------------------------------+--------+------------+-------+
| https://192.168.145.103:2379 | true | 9.054571ms | |
+------------------------------+--------+------------+-------+
| https://192.168.145.104:2379 | true | 9.054571ms | |
+------------------------------+--------+------------+-------+
| https://192.168.145.105:2379 | true | 9.054571ms | |
+------------------------------+--------+------------+-------+
5.4.12、报错查看思路
journalctl -u etcd -l
systemctl status etcd
cat /var/log/message
6、安装docker(这里选择二进制安装方式)(master和node节点)
6.1、下载并安装
cd /root/liu/package
wget https://download.docker.com/linux/static/stable/x86_64/docker-19.03.9.tgz
解压二进制软件包
tar zxvf docker-19.03.9.tgz
cp docker/* /usr/bin
docker version
6.2、systemd管理docker
cat > /usr/lib/systemd/system/docker.service << EOF
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target
[Service]
Type=notify
ExecStart=/usr/bin/dockerd
ExecReload=/bin/kill -s HUP $MAINPID
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
Delegate=yes
KillMode=process
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
EOF
6.3、启动
systemctl daemon-reload
systemctl start docker
systemctl enable docker
6.4、配置加速地址
cat > /etc/docker/daemon.json << EOF
{
"registry-mirrors": ["https://b9pmyelo.mirror.aliyuncs.com"]
}
EOF
systemctl restart docker
7、部署kube-apiserver
7.1、自签证书签发机构(CA)
cd /root/liu/k8s
cat > ca-config.json << EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
cat > ca-csr.json << EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Beijing",
"ST": "Beijing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
生成证书:生成ca.pem和ca-key.pem文件
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
7.2、使用自签CA签发kube-apiserver HTTPS证书
cat > server-csr.json << EOF
{
"CN": "kubernetes",
"hosts": [
"10.0.0.1",
"127.0.0.1",
"192.168.145.100",
"192.168.145.101",
"192.168.145.102",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
注意:上述文件hosts字段中IP为所有节点IP
生成证书,生成server.pem和server-key.pem
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
7.3、下载并解压二进制软件包
cd /root/liu/package
wget https://dl.k8s.io/v1.20.4/kubernetes-server-linux-amd64.tar.gz
mkdir -p /opt/kubernetes/{bin,cfg,ssl,logs}
tar zxvf kubernetes-server-linux-amd64.tar.gz
cd kubernetes/server/bin
cp kube-apiserver kube-scheduler kube-controller-manager /opt/kubernetes/bin
cp kubectl /usr/bin/
7.4、创建配置文件
cat > /opt/kubernetes/cfg/kube-apiserver.conf << EOF
KUBE_APISERVER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs \\
--etcd-servers=https://192.168.145.103:2379,https://192.168.145.104:2379,https://192.168.145.105:2379 \\
--bind-address=192.168.145.100 \\
--secure-port=6443 \\
--advertise-address=192.168.145.100 \\
--allow-privileged=true \\
--service-cluster-ip-range=10.0.0.0/24 \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \\
--authorization-mode=RBAC,Node \\
--enable-bootstrap-token-auth=true \\
--token-auth-file=/opt/kubernetes/cfg/token.csv \\
--service-node-port-range=30000-32767 \\
--kubelet-client-certificate=/opt/kubernetes/ssl/server.pem \\
--kubelet-client-key=/opt/kubernetes/ssl/server-key.pem \\
--tls-cert-file=/opt/kubernetes/ssl/server.pem \\
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \\
--client-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--service-account-issuer=api \\
--service-account-signing-key-file=/opt/kubernetes/ssl/server-key.pem \\
--etcd-cafile=/opt/etcd/ssl/ca.pem \\
--etcd-certfile=/opt/etcd/ssl/server.pem \\
--etcd-keyfile=/opt/etcd/ssl/server-key.pem \\
--requestheader-client-ca-file=/opt/kubernetes/ssl/ca.pem \\
--proxy-client-cert-file=/opt/kubernetes/ssl/server.pem \\
--proxy-client-key-file=/opt/kubernetes/ssl/server-key.pem \\
--requestheader-allowed-names=kubernetes \\
--requestheader-extra-headers-prefix=X-Remote-Extra- \\
--requestheader-group-headers=X-Remote-Group \\
--requestheader-username-headers=X-Remote-User \\
--enable-aggregator-routing=true \\
--audit-log-maxage=30 \\
--audit-log-maxbackup=3 \\
--audit-log-maxsize=100 \\
--audit-log-path=/opt/kubernetes/logs/k8s-audit.log"
EOF
7.5、注:上面两个\ \ 第一个是转义符,第二个是换行符,使用转义符是为了使用EOF保留换行符
7.6、参数说明
--logtostderr:启用日志
--v:日志等级
--log-dir:日志目录
--etcd-servers:etcd集群地址
--bind-address:监听地址
--secure-port:https安全端口
--advertise-address:集群通告地址
--allow-privileged:启用授权
--service-cluster-ip-range:Service虚拟IP地址段
--enable-admission-plugins:准入控制模块
--authorization-mode:认证授权,启用RBAC授权和节点自管理
--enable-bootstrap-token-auth:启用TLS bootstrap机制
--token-auth-file:bootstrap token文件
--service-node-port-range:Service nodeport类型默认分配端口范围
--kubelet-client-xxx:apiserver访问kubelet客户端证书
--tls-xxx-file:apiserver https证书
1.20版本必须加的参数:--service-account-issuer,--service-account-signing-key-file
--etcd-xxxfile:连接Etcd集群证书
--audit-log-xxx:审计日志
启动聚合层相关配置:--requestheader-client-ca-file,--proxy-client-cert-file,--proxy-client-key-file,--requestheader-allowed-names,--requestheader-extra-headers-prefix,--requestheader-group-headers,--requestheader-username-headers,--enable-aggregator-routing
把刚才生成的证书拷贝到配置文件中的路径
cp /root/liu/k8s/ca*pem /root/liu/k8s/server*pem /opt/kubernetes/ssl/
7.7、启用 TLS Bootstrapping 机制
TLS Bootstraping:Master apiserver启用TLS认证后,Node节点kubelet和kube-proxy要与kube-apiserver进行通信,必须使用CA签发的有效证书才可以,当Node节点很多时,这种客户端证书颁发需要大量工作,同样也会增加集群扩展复杂度。为了简化流程,Kubernetes引入了TLS bootstraping机制来自动颁发客户端证书,kubelet会以一个低权限用户自动向apiserver申请证书,kubelet的证书由apiserver动态签署。所以强烈建议在Node上使用这种方式,目前主要用于kubelet,kube-proxy还是由我们统一颁发一个证书。
7.8、TLS bootstraping 工作流程
7.9、创建token文件
7.9.1、自行生成token
head -c 16 /dev/urandom | od -An -t x | tr -d ' '
cat > /opt/kubernetes/cfg/token.csv << EOF
5ec291f50943b0b8d4e5d879b656328a,kubelet-bootstrap,10001,"system:node-bootstrapper"
EOF
7.10、systemd管理apiserver
cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-apiserver.conf
ExecStart=/opt/kubernetes/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
7.11、启动并设置开机启动
systemctl daemon-reload
systemctl start kube-apiserver
systemctl enable kube-apiserver
7.12、注意:如果启动报错,查看是否与etcd连接不上,注意配置文件字符
journalctl -u kube-apiserver -l //查看报错详细信息
8、部署kube-controller-manager
8.1、创建配置文件
cat > /opt/kubernetes/cfg/kube-controller-manager.conf << EOF
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs \\
--leader-elect=true \\
--kubeconfig=/opt/kubernetes/cfg/kube-controller-manager.kubeconfig \\
--bind-address=127.0.0.1 \\
--allocate-node-cidrs=true \\
--cluster-cidr=10.244.0.0/16 \\
--service-cluster-ip-range=10.0.0.0/24 \\
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--root-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--cluster-signing-duration=87600h0m0s"
EOF
8.2、配置说明
--kubeconfig:连接apiserver配置文件
--leader-elect:当该组件启动多个时,自动选举(HA)
--cluster-signing-cert-file/--cluster-signing-key-file:自动为kubelet颁发证书的CA,与apiserver保持一致
8.3、生成kubeconfig文件
生成kube-controller-manager证书
cd /root/liu/k8s
# 创建证书请求文件
cat > kube-controller-manager-csr.json << EOF
{
"CN": "system:kube-controller-manager",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "system:masters",
"OU": "System"
}
]
}
EOF
生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
8.4、生成kubeconfig文件(以下是shell命令,直接在终端执行)
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server="https://192.168.145.100:6443" \
--kubeconfig="/opt/kubernetes/cfg/kube-controller-manager.kubeconfig"
kubectl config set-credentials kube-controller-manager \
--client-certificate=./kube-controller-manager.pem \
--client-key=./kube-controller-manager-key.pem \
--embed-certs=true \
--kubeconfig="/opt/kubernetes/cfg/kube-controller-manager.kubeconfig"
kubectl config set-context default \
--cluster=kubernetes \
--user=kube-controller-manager \
--kubeconfig="/opt/kubernetes/cfg/kube-controller-manager.kubeconfig"
kubectl config use-context default --kubeconfig="/opt/kubernetes/cfg/kube-controller-manager.kubeconfig"
8.5、systemd管理controller-manager
cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-controller-manager.conf
ExecStart=/opt/kubernetes/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
8.6、启动并设置开机启动
systemctl daemon-reload
systemctl start kube-controller-manager
systemctl enable kube-controller-manager
9、部署kube-scheduler
9.1、创建配置文件
cat > /opt/kubernetes/cfg/kube-scheduler.conf << EOF
KUBE_SCHEDULER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs \\
--leader-elect \\
--kubeconfig=/opt/kubernetes/cfg/kube-scheduler.kubeconfig \\
--bind-address=127.0.0.1"
EOF
9.2、生成kubeconfig文件
cd /root/liu/k8s
# 创建证书请求文件
cat > kube-scheduler-csr.json << EOF
{
"CN": "system:kube-scheduler",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "system:masters",
"OU": "System"
}
]
}
EOF
# 生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
9.3、成kubeconfig文件(以下是shell命令,直接在终端执行)
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server="https://192.168.145.100:6443" \
--kubeconfig=/opt/kubernetes/cfg/kube-scheduler.kubeconfig
kubectl config set-credentials kube-scheduler \
--client-certificate=./kube-scheduler.pem \
--client-key=./kube-scheduler-key.pem \
--embed-certs=true \
--kubeconfig=/opt/kubernetes/cfg/kube-scheduler.kubeconfig
kubectl config set-context default \
--cluster=kubernetes \
--user=kube-scheduler \
--kubeconfig=/opt/kubernetes/cfg/kube-scheduler.kubeconfig
kubectl config use-context default --kubeconfig=/opt/kubernetes/cfg/kube-scheduler.kubeconfig
9.4、systemd管理scheduler
cat > /usr/lib/systemd/system/kube-scheduler.service << EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-scheduler.conf
ExecStart=/opt/kubernetes/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
9.5、启动并设置开机启动
systemctl daemon-reload
systemctl start kube-scheduler
systemctl enable kube-scheduler
10、查看集群状态
10.1、生成kubectl连接集群的证书
cd /root/liu/k8s
cat > admin-csr.json <<EOF
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "system:masters",
"OU": "System"
}
]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
10.2、生成kubeconfig文件
mkdir /root/.kube
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server="https://192.168.145.100:6443" \
--kubeconfig="/root/.kube/config"
kubectl config set-credentials cluster-admin \
--client-certificate=./admin.pem \
--client-key=./admin-key.pem \
--embed-certs=true \
--kubeconfig="/root/.kube/config"
kubectl config set-context default \
--cluster=kubernetes \
--user=cluster-admin \
--kubeconfig="/root/.kube/config"
kubectl config use-context default --kubeconfig="/root/.kube/config"
10.3、通过kubectl工具查看当前集群组件状态
kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-2 Healthy {"health":"true"}
etcd-1 Healthy {"health":"true"}
etcd-0 Healthy {"health":"true"}
10.4、授权kubelet-bootstrap用户允许请求证书
kubectl create clusterrolebinding kubelet-bootstrap \
--clusterrole=system:node-bootstrapper \
--user=kubelet-bootstrap
11、部署Worker Node(还是在master上操作)
11.1、创建工作目录并拷贝二进制文件
# 在所有worker node创建工作目录(master已创建,新加入节点需要创建)
mkdir -p /opt/kubernetes/{bin,cfg,ssl,logs}
# 从解压的k8s server压缩包中拷贝文件
cd /root/liu/package/kubernetes/server/bin
cp kubelet kube-proxy /opt/kubernetes/bin
11.2、部署kubelet、创建配置文件
cat > /opt/kubernetes/cfg/kubelet.conf << EOF
KUBELET_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs \\
--hostname-override=k8s-master1 \\
--network-plugin=cni \\
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \\
--bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \\
--config=/opt/kubernetes/cfg/kubelet-config.yml \\
--cert-dir=/opt/kubernetes/ssl \\
--pod-infra-container-image=lizhenliang/pause-amd64:3.0"
EOF
11.3、参数说明
--hostname-override:显示名称,集群中唯一
--network-plugin:启用CNI
--kubeconfig:空路径,会自动生成,后面用于连接apiserver
--bootstrap-kubeconfig:首次启动向apiserver申请证书
--config:配置参数文件
--cert-dir:kubelet证书生成目录
--pod-infra-container-image:管理Pod网络容器的镜像
11.4、配置参数文件
cat > /opt/kubernetes/cfg/kubelet-config.yml << EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS:
- 10.0.0.2
clusterDomain: cluster.local
failSwapOn: false
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 2m0s
enabled: true
x509:
clientCAFile: /opt/kubernetes/ssl/ca.pem
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 5m0s
cacheUnauthorizedTTL: 30s
evictionHard:
imagefs.available: 15%
memory.available: 100Mi
nodefs.available: 10%
nodefs.inodesFree: 5%
maxOpenFiles: 1000000
maxPods: 110
EOF
11.5、生成kubelet初次加入集群引导kubeconfig文件
# 生成 kubelet bootstrap kubeconfig 配置文件
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server="https://192.168.145.100:6443" \
--kubeconfig="/opt/kubernetes/cfg/bootstrap.kubeconfig"
kubectl config set-credentials "kubelet-bootstrap" \
--token="5ec291f50943b0b8d4e5d879b656328a" \
--kubeconfig="/opt/kubernetes/cfg/bootstrap.kubeconfig"
kubectl config set-context default \
--cluster=kubernetes \
--user="kubelet-bootstrap" \
--kubeconfig="/opt/kubernetes/cfg/bootstrap.kubeconfig"
kubectl config use-context default --kubeconfig="/opt/kubernetes/cfg/bootstrap.kubeconfig"
11.6、systemd管理kubelet
cat > /usr/lib/systemd/system/kubelet.service << EOF
[Unit]
Description=Kubernetes Kubelet
After=docker.service
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kubelet.conf
ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
11.7、启动并设置开机启动
systemctl daemon-reload
systemctl start kubelet
systemctl enable kubelet
11.8、批准kubelet证书申请并加入集群
查看kubelet证书请求
kubectl get csr
NAME AGE SIGNERNAME REQUESTOR CONDITION
node-csr-9c-gV7fhdyBFpybXgaZXXEj8ZUr6z9UwmY5TT49jCJ8 62s kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Pending
批准申请
kubectl certificate approve node-csr-9c-gV7fhdyBFpybXgaZXXEj8ZUr6z9UwmY5TT49jCJ8
查看节点(由于网络插件还没有部署,节点会没有准备就绪 NotReady)
kubectl get node
NAME STATUS ROLES AGE VERSION
master NotReady <none> 10s v1.20.4
11.9、部署kube-proxy、创建配置文件
cat > /opt/kubernetes/cfg/kube-proxy.conf << EOF
KUBE_PROXY_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs \\
--config=/opt/kubernetes/cfg/kube-proxy-config.yml"
EOF
11.10、配置参数文件
cat > /opt/kubernetes/cfg/kube-proxy-config.yml << EOF
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
metricsBindAddress: 0.0.0.0:10249
clientConnection:
kubeconfig: /opt/kubernetes/cfg/kube-proxy.kubeconfig
hostnameOverride: k8s-master1
clusterCIDR: 10.0.0.0/24
EOF
11.11、生成kube-proxy.kubeconfig文件
生成kube-proxy证书
cd /root/liu/k8s
创建证书请求文件
cat > kube-proxy-csr.json << EOF
{
"CN": "system:kube-proxy",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
11.12、生成kubeconfig文件
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server="https://192.168.145.100:6443" \
--kubeconfig="/opt/kubernetes/cfg/kube-proxy.kubeconfig"
kubectl config set-credentials kube-proxy \
--client-certificate=./kube-proxy.pem \
--client-key=./kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig="/opt/kubernetes/cfg/kube-proxy.kubeconfig"
kubectl config set-context default \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig="/opt/kubernetes/cfg/kube-proxy.kubeconfig"
kubectl config use-context default --kubeconfig="/opt/kubernetes/cfg/kube-proxy.kubeconfig"
11.13、systemd管理kube-proxy
cat > /usr/lib/systemd/system/kube-proxy.service << EOF
[Unit]
Description=Kubernetes Proxy
After=network.target
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-proxy.conf
ExecStart=/opt/kubernetes/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
11.14、启动并设置开机启动
systemctl daemon-reload
systemctl start kube-proxy
systemctl enable kube-proxy
12、部署网络组件Calico
下载地址:https://docs.projectcalico.org/
[root@master yaml]# kubectl apply -f calico.yaml
[root@master yaml]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-756dd4db79-2k24z 1/1 Running 1 2d9h
calico-node-6zh89 1/1 Running 0 41h
calico-node-7f7zh 1/1 Running 0 41h
calico-node-c6ss8 1/1 Running 1 2d9h
13、授权apiserver访问kubelet
cat > apiserver-to-kubelet-rbac.yaml << EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:kube-apiserver-to-kubelet
rules:
- apiGroups:
- ""
resources:
- nodes/proxy
- nodes/stats
- nodes/log
- nodes/spec
- nodes/metrics
- pods/log
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:kube-apiserver
namespace: ""
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-apiserver-to-kubelet
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kubernetes
EOF
kubectl apply -f apiserver-to-kubelet-rbac.yaml
14、拷贝已部署好的Node相关文件到新节点(node1、node2)
scp -r /opt/kubernetes node1:/opt/
scp -r /usr/lib/systemd/system/{kubelet,kube-proxy}.service node1:/usr/lib/systemd/system
scp /opt/kubernetes/ssl/ca.pem node1:/opt/kubernetes/ssl
scp -r /opt/kubernetes node2:/opt/
scp -r /usr/lib/systemd/system/{kubelet,kube-proxy}.service node2:/usr/lib/systemd/system
scp /opt/kubernetes/ssl/ca.pem node2:/opt/kubernetes/ssl
14.1、删除kubelet证书和kubeconfig文件(node1、node2)
rm -f /opt/kubernetes/cfg/kubelet.kubeconfig
rm -f /opt/kubernetes/ssl/kubelet*
14.2、修改配置文件中的主机名(node1、node2)
node1节点
vim /opt/kubernetes/cfg/kubelet.conf
--hostname-override=k8s-node1
vim /opt/kubernetes/cfg/kube-proxy-config.yml
hostnameOverride: k8s-node1
node2节点
vim /opt/kubernetes/cfg/kubelet.conf
--hostname-override=k8s-node2
vim /opt/kubernetes/cfg/kube-proxy-config.yml
hostnameOverride: k8s-node2
14.3、启动并设置开机启动(node1、node2)
systemctl daemon-reload
systemctl start kubelet kube-proxy
systemctl enable kubelet kube-proxy
14.4、在Master上批准新Node kubelet证书申请
kubectl get csr
NAME AGE SIGNERNAME REQUESTOR CONDITION
node-csr-9c-g2f5guyBFpybXgaZXXEj8ZUr6z9UwmYHT835TT49jAJ9 62s kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Pending
node-csr-XoaRNKKNZAPfkzhVF93gtog9Hj1342XqqWcngsWa0ZnW0dQ 62s kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Pending
批准申请
kubectl certificate approve node-csr-9c-g2f5gyBFpybXgaZXXEj8ZUr6z9UwmYHT835TT49jAJ9
kubectl certificate approve node-csr-XoaRNKKNZAPfkzhVF93gtog9Hj1342XqqWcngsWa0ZnW0dQ
[root@k8s-master1 ~]# kubectl get no
NAME STATUS ROLES AGE VERSION
k8s-master1 Ready <none> 2d10h v1.20.4
k8s-node1 Ready <none> 41h v1.20.4
k8s-node2 Ready <none> 41h v1.20.4
15、部署CoreDNS和Dashboard
15.1、部署CoreDNS、准备coredns.yaml文件
# Warning: This is a file generated from the base underscore template file: coredns.yaml.base
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: Reconcile
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: EnsureExists
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
data:
Corefile: |
.:53 {
log
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
forward . /etc/resolv.conf
cache 30
loop
reload
loadbalance
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
nodeSelector:
kubernetes.io/os: linux
containers:
- name: coredns
image: lizhenliang/coredns:1.6.7
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 512Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.0.0.2
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
[root@k8s-master1 yaml]# kubectl get po -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-756dd4db79-2k24z 1/1 Running 1 2d10h
calico-node-6zh89 1/1 Running 0 41h
calico-node-7f7zh 1/1 Running 0 41h
calico-node-c6ss8 1/1 Running 1 2d10h
coredns-6cc56c94bd-vbdg7 1/1 Running 0 17m
15.2、部署Dashboard、准备dashboard.yaml文件
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Namespace
metadata:
name: kubernetes-dashboard
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort
ports:
- port: 443
targetPort: 8443
nodePort: 30001
selector:
k8s-app: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: kubernetesui/dashboard:v2.0.3
imagePullPolicy: Always
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kubernetes-dashboard
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: kubernetesui/metrics-scraper:v1.0.4
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}
kubectl apply -f dashboard.yaml
kubectl get pod -n kube-system
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
kube-system calico-kube-controllers-756dd4db79-2k24z 1/1 Running 1 2d10h 172.16.159.130 k8s-master1 <none> <none>
kube-system calico-node-6zh89 1/1 Running 0 42h 192.168.145.101 k8s-node1 <none> <none>
kube-system calico-node-7f7zh 1/1 Running 0 42h 192.168.145.102 k8s-node2 <none> <none>
kube-system calico-node-c6ss8 1/1 Running 1 2d10h 192.168.145.100 k8s-master1 <none> <none>
kube-system coredns-6cc56c94bd-vbdg7 1/1 Running 0 39m 172.16.36.65 k8s-node1 <none> <none>
kubernetes-dashboard dashboard-metrics-scraper-7b59f7d4df-bmgw6 1/1 Running 0 18m 172.16.36.66 k8s-node1 <none> <none>
kubernetes-dashboard kubernetes-dashboard-5dbf55bd9d-fcpbl 1/1 Running 0 18m 172.16.169.129 k8s-node2 <none> <none>
kubectl get svc -A
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
default kubernetes ClusterIP 10.0.0.1 <none> 443/TCP 2d11h
kube-system kube-dns ClusterIP 10.0.0.2 <none> 53/UDP,53/TCP,9153/TCP 41m
kubernetes-dashboard dashboard-metrics-scraper ClusterIP 10.0.0.229 <none> 8000/TCP 19m
kubernetes-dashboard kubernetes-dashboard NodePort 10.0.0.242 <none> 443:30001/TCP 19m
15.3、访问地址:https://NodeIP:30001
15.4、创建service account并绑定默认cluster-admin管理员集群角色
kubectl create serviceaccount dashboard-admin -n kube-system
kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}')