#!/bin/bash
set -e
# 测试阶段写固定ip,脚本完成后用变量传递ip
#MASTER_IP="172.16.32.112"
#NODE1_IP="172.16.32.80"
#NODE2_IP="172.16.32.111"
#./k8sClusterInstallAll.sh 172.16.18.106 172.16.17.52 172.16.20.203 localIp
#etcd.service 有问题没解决:变量不传递与tab忽略如何并存-EOF与"EOF" 暂时用echo粗陋替代EOF
#ssl生成一次,拷贝到其他节点,判断是master则scp /root/ssl/* 到node,是node就不执行ssl创建和移动
MASTER_IP="$1"
NODE1_IP="$2"
NODE2_IP="$3"
LOCAL_IP="$4"
ETCD_VERSION="3.2.18"
#安装docker
function dockerInstall(){
#卸载旧版本
echo -e "\033[32mINFO: Remove Docker...\033[0m"
yum remove -y docker docker-common container-selinux docker-selinux docker-engine
#设置存储库
#安装yum-utils,它提供yum-config-manager可以用来配置repo:
echo -e "\033[32mINFO: Install yum-utils...\033[0m"
yum install -y yum-utils
#使用以下命令设置稳定版 repository :
echo -e "\033[32mINFO: Add repo...\033[0m"
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
#更新yum包索引
echo -e "\033[32mINFO: Makecache...\033[0m"
yum makecache fast
#安装最新版本Docker
echo -e "\033[32mINFO: Install docker-ce...\033[0m"
yum install -y docker-ce
#启动 docker
echo -e "\033[32mINFO: Start docker...\033[0m"
systemctl start docker
}
#安装 cfssl
function cfsslInstall(){
echo -e "\033[32mINFO: Install cfssl...\033[0m"
wget https://mritdftp.b0.upaiyun.com/cfssl/cfssl.tar.gz
tar -zxvf cfssl.tar.gz
mv cfssl cfssljson /usr/local/bin
chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson
rm -f cfssl.tar.gz
##若上边cdn失效,用下边的
#wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
#chmod +x cfssl_linux-amd64
#cfssl_linux-amd64 /usr/local/bin
#
#wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
#chmod +x cfssljson_linux-amd64
#cfssljson_linux-amd64 /usr/local/bin
}
#生成 Etcd 证书 --- 创建json文件
function createEtcdCa(){
if [ ! -d "/root/etcd/conf/ssl" ]; then
mkdir /root/etcd/conf/ssl -p
fi
cd /root/etcd/conf/ssl
echo -e "\033[32mINFO: Create etcd-csr.json...\033[0m"
cat > etcd-csr.json <<-EOF
{
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"O": "etcd",
"OU": "etcd Security",
"L": "Beijing",
"ST": "Beijing",
"C": "CN"
}
],
"CN": "etcd",
"hosts": [
"127.0.0.1",
"localhost",
"${MASTER_IP}",
"${NODE1_IP}",
"${NODE2_IP}"
]
}
EOF
echo -e "\033[32mINFO: Create etcd-gencert.json...\033[0m"
cat > etcd-gencert.json <<-EOF
{
"signing": {
"default": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "87600h"
}
}
}
EOF
echo -e "\033[32mINFO: Create etcd-root-ca-csr.json...\033[0m"
cat >etcd-root-ca-csr.json <<-EOF
{
"key": {
"algo": "rsa",
"size": 4096
},
"names": [
{
"O": "etcd",
"OU": "etcd Security",
"L": "Beijing",
"ST": "Beijing",
"C": "CN"
}
],
"CN": "etcd-root-ca"
}
EOF
#生成证书
echo -e "\033[32mINFO: Create ca for Etcd...\033[0m"
cfssl gencert --initca=true etcd-root-ca-csr.json | cfssljson --bare etcd-root-ca
cfssl gencert --ca etcd-root-ca.pem --ca-key etcd-root-ca-key.pem --config etcd-gencert.json etcd-csr.json | cfssljson --bare etcd
}
# end of function createEtcdCa
#安装Etcd
function createEtcdConf(){
if [ ! -d "/root/etcd/conf" ]; then
mkdir /root/etcd/conf -p
fi
cd /root/etcd
echo -e "\033[32mINFO: Create etcd.service...\033[0m"
echo '[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
EnvironmentFile=-/etc/etcd/etcd.conf
User=etcd
# set GOMAXPROCS to number of processors
ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /usr/local/bin/etcd --name=\"${ETCD_NAME}\" --data-dir=\"${ETCD_DATA_DIR}\" --listen-client-urls=\"${ETCD_LISTEN_CLIENT_URLS}\""
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target'>etcd.service
sed -i 's/^[ \t]*//g' etcd.service
cd /root/etcd/conf
echo -e "\033[32mINFO: Create etcd.conf...\033[0m"
cat > etcd.conf <<-EOF
# [member]
ETCD_NAME=etcd1
ETCD_DATA_DIR="/var/lib/etcd/etcd.etcd"
ETCD_WAL_DIR="/var/lib/etcd/wal"
ETCD_SNAPSHOT_COUNT="100"
ETCD_HEARTBEAT_INTERVAL="100"
ETCD_ELECTION_TIMEOUT="1000"
ETCD_LISTEN_PEER_URLS="https://${LOCAL_IP}:2380"
ETCD_LISTEN_CLIENT_URLS="https://${LOCAL_IP}:2379,http://127.0.0.1:2379"
ETCD_MAX_SNAPSHOTS="5"
ETCD_MAX_WALS="5"
#ETCD_CORS=""
# [cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://${LOCAL_IP}:2380"
# if you use different ETCD_NAME (e.g. test), set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
ETCD_INITIAL_CLUSTER="etcd1=https://${MASTER_IP}:2380,etcd2=https://${NODE1_IP}:2380,etcd3=https://${NODE2_IP}:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="https://${LOCAL_IP}:2379"
#ETCD_DISCOVERY=""
#ETCD_DISCOVERY_SRV=""
#ETCD_DISCOVERY_FALLBACK="proxy"
#ETCD_DISCOVERY_PROXY=""
#ETCD_STRICT_RECONFIG_CHECK="false"
#ETCD_AUTO_COMPACTION_RETENTION="0"
# [proxy]
#ETCD_PROXY="off"
#ETCD_PROXY_FAILURE_WAIT="5000"
#ETCD_PROXY_REFRESH_INTERVAL="30000"
#ETCD_PROXY_DIAL_TIMEOUT="1000"
#ETCD_PROXY_WRITE_TIMEOUT="5000"
#ETCD_PROXY_READ_TIMEOUT="0"
# [security]
ETCD_CERT_FILE="/etc/etcd/ssl/etcd.pem"
ETCD_KEY_FILE="/etc/etcd/ssl/etcd-key.pem"
ETCD_CLIENT_CERT_AUTH="true"
ETCD_TRUSTED_CA_FILE="/etc/etcd/ssl/etcd-root-ca.pem"
ETCD_AUTO_TLS="true"
ETCD_PEER_CERT_FILE="/etc/etcd/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/etc/etcd/ssl/etcd-key.pem"
ETCD_PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_TRUSTED_CA_FILE="/etc/etcd/ssl/etcd-root-ca.pem"
ETCD_PEER_AUTO_TLS="true"
# [logging]
#ETCD_DEBUG="false"
# examples for -log-package-levels etcdserver=WARNING,security=DEBUG
#ETCD_LOG_PACKAGE_LEVELS=""
EOF
if [ ${LOCAL_IP} = ${NODE1_IP} ];then
sed -i 's/ETCD_NAME=etcd1/ETCD_NAME=etcd2/g' /root/etcd/conf/etcd.conf
elif [[ ${LOCAL_IP} = ${NODE2_IP} ]]; then
sed -i 's/ETCD_NAME=etcd1/ETCD_NAME=etcd3/g' /root/etcd/conf/etcd.conf
fi
}
function downloadEtcd(){
if [ ! -d "/root/etcd" ]; then
mkdir /root/etcd
fi
cd /root/etcd
if [ ! -f "etcd-v${ETCD_VERSION}-linux-amd64.tar.gz" ]; then
wget https://github.com/coreos/etcd/releases/download/v${ETCD_VERSION}/etcd-v${ETCD_VERSION}-linux-amd64.tar.gz
fi
}
function userGroupEtcd(){
echo -e "\033[32mINFO: Add user group etcd...\033[0m"
getent group etcd >/dev/null || groupadd -r etcd
getent passwd etcd >/dev/null || useradd -r -g etcd -d /var/lib/etcd -s /sbin/nologin -c "etcd user" etcd
}
function createEtcdDataDir(){
if [ ! -d "/var/lib/etcd" ]; then
mkdir /var/lib/etcd
chown -R etcd:etcd /var/lib/etcd
fi
}
function installEtcd(){
if [ ! -d "/root/etcd" ]; then
mkdir /root/etcd
fi
cd /root/etcd
echo -e "\033[32mINFO: Copy etcd...\033[0m"
tar -zxvf etcd-v${ETCD_VERSION}-linux-amd64.tar.gz
cp etcd-v${ETCD_VERSION}-linux-amd64/etcd* /usr/local/bin
rm -rf etcd-v${ETCD_VERSION}-linux-amd64
echo -e "\033[32mINFO: Copy etcd config...\033[0m"
if [ ! -d "/etc/etcd" ]; then
cp -r /root/etcd/conf /etc/etcd
chown -R etcd:etcd /etc/etcd
chmod -R 755 /etc/etcd/ssl
else
mv -f /etc/etcd /etc/etcd.bak
cp -r conf /etc/etcd
chown -R etcd:etcd /etc/etcd
chmod -R 755 /etc/etcd/ssl
fi
echo -e "\033[32mINFO: Copy etcd systemd config...\033[0m"
cp /root/etcd/etcd.service /lib/systemd/system
systemctl daemon-reload
systemctl start etcd
}
dockerInstall
if [ ${LOCAL_IP} == ${MASTER_IP} ];then
cfsslInstall
createEtcdCa
fi
if [ ${LOCAL_IP} != ${MASTER_IP} ];then
if [ ! -d "/root/etcd/conf/ssl" ]; then
mkdir /root/etcd/conf/ssl -p
fi
scp ${MASTER_IP}:/root/etcd/conf/ssl/* /root/etcd/conf/ssl/
fi
createEtcdConf
downloadEtcd
userGroupEtcd
createEtcdDataDir
installEtcd
#!/bin/bash
set -e
MASTER_IP="172.16.32.112"
NODE1_IP="172.16.32.80"
NODE2_IP="172.16.32.111"
LOCAL_IP="172.16.32.112"
HYPERKUBE_VERSION="1.10.1"
KUBE_APISERVER="https://127.0.0.1:6443"
#安装 cfssl
function cfsslInstall(){
echo -e "\033[32mINFO: Install cfssl...\033[0m"
wget https://mritdftp.b0.upaiyun.com/cfssl/cfssl.tar.gz
tar -zxvf cfssl.tar.gz
mv cfssl cfssljson /usr/local/bin
chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson
rm -f cfssl.tar.gz
##若上边cdn失效,用下边的
#wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
#chmod +x cfssl_linux-amd64
#cfssl_linux-amd64 /usr/local/bin
#
#wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
#chmod +x cfssljson_linux-amd64
#cfssljson_linux-amd64 /usr/local/bin
}
# kubelet 和 kube-proxy 用到的 kubeconfig 配置文件需要借助 kubectl 来生成
安装 hyperkube
function InstallHyperKube(){
if [ ! -f "hyperkube_${HYPERKUBE_VERSION}" ]; then
echo -e "\033[32mINFO: Download hyperkube...\033[0m"
wget https://storage.googleapis.com/kubernetes-release/release/v{HYPERKUBE_VERSION}/bin/linux/amd64/hyperkube -O hyperkube_{HYPERKUBE_VERSION}
fi
chmod +x hyperkube_${HYPERKUBE_VERSION}
echo -e "\033[32mINFO: Copy hyperkube...\033[0m"
cp hyperkube_${HYPERKUBE_VERSION} /usr/local/bin/hyperkube
if [ ! -f "/usr/local/bin/kubectl" ]; then
echo -e "\033[32mINFO: Create symbolic link...\033[0m"
ln -s /usr/local/bin/hyperkube /usr/local/bin/kubectl
fi
}
创建 kube 用户、组
function userGroupKube(){
echo -e "\033[32mINFO: Add user group kube...\033[0m"
getent group kube >/dev/null || groupadd -r kube
getent passwd kube >/dev/null || useradd -r -g kube -d / -s /sbin/nologin -c "Kubernetes user" kube
}
function createAllFile(){
生成 k8s 证书
admin-csr.json
echo -e "\033[32mINFO: Create k8s-csr.json...\033[0m"
cat > admin-csr.json <<-EOF
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "system:masters",
"OU": "System"
}
]
}
EOF
k8s-gencert.json
echo -e "\033[32mINFO: Create k8s-gencert.json...\033[0m"
cat > k8s-gencert.json <<-EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "87600h"
}
}
}
}
EOF
k8s-root-ca-csr.json
echo -e "\033[32mINFO: Create k8s-root-ca-csr.json...\033[0m"
cat >k8s-root-ca-csr.json <<-EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 4096
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
kube-apiserver-csr.json
echo -e "\033[32mINFO: Create kube-apiserver-csr.json...\033[0m"
cat >kube-apiserver-csr.json <<-EOF
{
"CN": "kubernetes",
"hosts": [
"127.0.0.1",
"10.254.0.1",
"${MASTER_IP}",
"${NODE1_IP}",
"${NODE2_IP}",
"*.kubernetes.master",
"localhost",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
kube-proxy-csr.json
echo -e "\033[32mINFO: Create kube-proxy-csr.json...\033[0m"
cat >kube-proxy-csr.json <<-EOF
{
"CN": "system:kube-proxy",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
生成 CA
echo -e "\033[32mINFO: Create root-ca...\033[0m"
cfssl gencert --initca=true k8s-root-ca-csr.json | cfssljson --bare k8s-root-ca
依次生成其他组件证书
echo -e "\033[32mINFO: Create admin proxy apiserver ca...\033[0m"
for targetName in kube-apiserver admin kube-proxy; do
cfssl gencert --ca k8s-root-ca.pem --ca-key k8s-root-ca-key.pem --config k8s-gencert.json --profile kubernetes $targetName-csr.json | cfssljson --bare $targetName
done
生成 token
token.csv
echo -e "\033[32mINFO: Create token...\033[0m"
地址默认为 127.0.0.1:6443
如果在 master 上启用 kubelet 请在生成后的 kubeconfig 中
修改该地址为 当前MASTER_IP:6443
KUBE_APISERVER="https://127.0.0.1:6443" 放在最前
BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
echo -e "\033[32mINFO: Create tokne: ${BOOTSTRAP_TOKEN}...\033[0m"
不要质疑 system:bootstrappers 用户组是否写错了,有疑问请参考官方文档
https://kubernetes.io/docs/admin/kubelet-tls-bootstrapping/
cat > token.csv <<-EOF
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:bootstrappers"
EOF
.kubeconfig配置生成
bootstrap.kubeconfig
echo -e "\033[32mINFO: Create kubelet bootstrapping kubeconfig...\033[0m"
设置集群参数
kubectl config set-cluster kubernetes \
--certificate-authority=k8s-root-ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=bootstrap.kubeconfig
设置客户端认证参数
kubectl config set-credentials kubelet-bootstrap \
--token=${BOOTSTRAP_TOKEN} \
--kubeconfig=bootstrap.kubeconfig
设置上下文参数
kubectl config set-context default \
--cluster=kubernetes \
--user=kubelet-bootstrap \
--kubeconfig=bootstrap.kubeconfig
设置默认上下文
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
kube-proxy.kubeconfig
echo -e "\033[32mINFO: Create kubectl config...\033[0m"
设置集群参数
kubectl config set-cluster kubernetes \
--certificate-authority=k8s-root-ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=kube-proxy.kubeconfig
设置客户端认证参数
kubectl config set-credentials kube-proxy \
--client-certificate=kube-proxy.pem \
--client-key=kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig
设置上下文参数
kubectl config set-context default \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig
设置默认上下文
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
创建高级审计配置
audit-policy.yaml
echo -e "\033[32mINFO: Create audit-policy.yaml...\033[0m"
cd /root/k8s/conf
cat > audit-policy.yaml <<-EOF
Log all requests at the Metadata level.
apiVersion: audit.k8s.io/v1beta1
kind: Policy
rules:
- level: Metadata
EOF
systemd 配置
kube-apiserver.service
echo -e "\033[32mINFO: Create kube-apiserver.service...\033[0m"
echo '[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
After=etcd.service
[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/apiserver
User=kube
ExecStart=/usr/local/bin/hyperkube apiserver \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBE_ETCD_SERVERS \
$KUBE_API_ADDRESS \
$KUBE_API_PORT \
$KUBELET_PORT \
$KUBE_ALLOW_PRIV \
$KUBE_SERVICE_ADDRESSES \
$KUBE_ADMISSION_CONTROL \
$KUBE_API_ARGS
Restart=on-failure
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target'>kube-apiserver.service
sed -i 's/^[\t]*//g' kube-apiserver.service
kube-controller-manager.service
echo -e "\033[32mINFO: Create kube-controller-manager.service...\033[0m"
echo '[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/controller-manager
User=kube
ExecStart=/usr/local/bin/hyperkube controller-manager \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBE_MASTER \
$KUBE_CONTROLLER_MANAGER_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target'>kube-controller-manager.service
sed -i 's/^[\t]*//g' kube-controller-manager.service
kubelet.service
echo -e "\033[32mINFO: Create kubelet.service...\033[0m"
echo '[Unit]
Description=Kubernetes Kubelet Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service
[Service]
WorkingDirectory=/var/lib/kubelet
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/kubelet
ExecStart=/usr/local/bin/hyperkube kubelet \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBELET_API_SERVER \
$KUBELET_ADDRESS \
$KUBELET_PORT \
$KUBELET_HOSTNAME \
$KUBE_ALLOW_PRIV \
$KUBELET_ARGS
Restart=on-failure
KillMode=process
[Install]
WantedBy=multi-user.target'>kubelet.service
sed -i 's/^[\t]*//g' kubelet.service
kube-proxy.service
echo -e "\033[32mINFO: Create kube-proxy.service...\033[0m"
echo '[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/proxy
ExecStart=/usr/local/bin/hyperkube proxy \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBE_MASTER \
$KUBE_PROXY_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target'>kube-proxy.service
sed -i 's/^[\t]*//g' kube-proxy.service
kube-scheduler.service
echo -e "\033[32mINFO: Create kube-scheduler.service...\033[0m"
echo '[Unit]
Description=Kubernetes Scheduler Plugin
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/scheduler
User=kube
ExecStart=/usr/local/bin/hyperkube scheduler \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBE_MASTER \
$KUBE_SCHEDULER_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target'>kube-scheduler.service
sed -i 's/^[\t]*//g' kube-scheduler.service
config、apiserver、controller-manager、scheduler、kubelet、proxy
config
echo -e "\033[32mINFO: Create config...\033[0m"
cat > config <<-EOF
kubernetes system config
The following values are used to configure various aspects of all
kubernetes services, including
kube-apiserver.service
kube-controller-manager.service
kube-scheduler.service
kubelet.service
kube-proxy.service
logging to stderr means we get it in the systemd journal
KUBE_LOGTOSTDERR="--logtostderr=true"
journal message level, 0 is debug
KUBE_LOG_LEVEL="--v=2"
Should this cluster be allowed to run privileged docker containers
KUBE_ALLOW_PRIV="--allow-privileged=true"
How the controller-manager, scheduler, and proxy find the apiserver
KUBE_MASTER="--master=http://127.0.0.1:8080"
EOF
apiserver
echo -e "\033[32mINFO: Create apiserver...\033[0m"
cat > apiserver <<-EOF
kubernetes system config
The following values are used to configure the kube-apiserver
The address on the local server to listen to.
KUBE_API_ADDRESS="--advertise-address={LOCAL_IP} --bind-address={LOCAL_IP}"
The port on the local server to listen on.
KUBE_API_PORT="--secure-port=6443"
Port minions listen on
KUBELET_PORT="--kubelet-port=10250"
Comma separated list of nodes in the etcd cluster
KUBE_ETCD_SERVERS="--etcd-servers=https://{MASTER_IP}:2379,https://{NODE1_IP}:2379,https://${NODE2_IP}:2379"
Address range to use for services
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
default admission control policies
KUBE_ADMISSION_CONTROL="--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction"
Add your own!
KUBE_API_ARGS=" --anonymous-auth=false \
--apiserver-count=3 \\
--audit-log-maxage=30 \\
--audit-log-maxbackup=3 \\
--audit-log-maxsize=100 \\
--audit-log-path=/var/log/kube-audit/audit.log \\
--audit-policy-file=/etc/kubernetes/audit-policy.yaml \\
--authorization-mode=Node,RBAC \\
--client-ca-file=/etc/kubernetes/ssl/k8s-root-ca.pem \\
--enable-bootstrap-token-auth \\
--enable-garbage-collector \\
--enable-logs-handler \\
--enable-swagger-ui \\
--etcd-cafile=/etc/etcd/ssl/etcd-root-ca.pem \\
--etcd-certfile=/etc/etcd/ssl/etcd.pem \\
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \\
--etcd-compaction-interval=5m0s \\
--etcd-count-metric-poll-period=1m0s \\
--event-ttl=48h0m0s \\
--kubelet-https=true \\
--kubelet-timeout=3s \\
--log-flush-frequency=5s \\
--token-auth-file=/etc/kubernetes/token.csv \\
--tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem \\
--tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem \\
--service-node-port-range=30000-50000 \\
--service-account-key-file=/etc/kubernetes/ssl/k8s-root-ca.pem \\
--storage-backend=etcd3 \\
--enable-swagger-ui=true"
EOF
controller-manager
echo -e "\033[32mINFO: Create controller-manager...\033[0m"
cat > controller-manager <<-EOF
The following values are used to configure the kubernetes controller-manager
defaults from config and apiserver should be adequate
Add your own!
KUBE_CONTROLLER_MANAGER_ARGS=" --bind-address=0.0.0.0 \
--cluster-name=kubernetes \\
--cluster-signing-cert-file=/etc/kubernetes/ssl/k8s-root-ca.pem \\
--cluster-signing-key-file=/etc/kubernetes/ssl/k8s-root-ca-key.pem \\
--controllers=*,bootstrapsigner,tokencleaner \\
--deployment-controller-sync-period=10s \\
--experimental-cluster-signing-duration=86700h0m0s \\
--leader-elect=true \\
--node-monitor-grace-period=40s \\
--node-monitor-period=5s \\
--pod-eviction-timeout=5m0s \\
--terminated-pod-gc-threshold=50 \\
--root-ca-file=/etc/kubernetes/ssl/k8s-root-ca.pem \\
--service-account-private-key-file=/etc/kubernetes/ssl/k8s-root-ca-key.pem \\
--feature-gates=RotateKubeletServerCertificate=true"
EOF
scheduler
echo -e "\033[32mINFO: Create scheduler...\033[0m"
cat > scheduler <<-EOF
kubernetes scheduler config
default config should be adequate
Add your own!
KUBE_SCHEDULER_ARGS=" --address=0.0.0.0 \
--leader-elect=true \\
--algorithm-provider=DefaultProvider"
EOF
kubelet
echo -e "\033[32mINFO: Create kubelet...\033[0m"
cat > kubelet <<-EOF
kubernetes kubelet (minion) config
The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
KUBELET_ADDRESS="--node-ip=192.168.1.61"
The port for the info server to serve on
KUBELET_PORT="--port=10250"
You may leave this blank to use the actual hostname
KUBELET_HOSTNAME="--hostname-override=k1.node"
location of the api-server
KUBELET_API_SERVER=""
如果在 master 上启动 kubelet,请将 node-role.kubernetes.io/k8s-node=true 修改为 node-role.kubernetes.io/k8s-master=true
Add your own!
KUBELET_ARGS=" --bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \
--cert-dir=/etc/kubernetes/ssl \\
--cgroup-driver=cgroupfs \\
--cluster-dns=10.254.0.2 \\
--cluster-domain=cluster.local. \\
--fail-swap-on=false \\
--feature-gates=RotateKubeletClientCertificate=true,RotateKubeletServerCertificate=true \\
--node-labels=node-role.kubernetes.io/k8s-node=true \\
--image-gc-high-threshold=70 \\
--image-gc-low-threshold=50 \\
--kube-reserved=cpu=500m,memory=512Mi,ephemeral-storage=1Gi \\
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\
--system-reserved=cpu=1000m,memory=1024Mi,ephemeral-storage=1Gi \\
--serialize-image-pulls=false \\
--sync-frequency=30s \\
--pod-infra-container-image=k8s.gcr.io/pause-amd64:3.0 \\
--resolv-conf=/etc/resolv.conf \\
--rotate-certificates"
EOF
proxy
echo -e "\033[32mINFO: Create proxy...\033[0m"
cat > proxy <<-EOF
kubernetes proxy config
default config should be adequate
Add your own!
KUBE_PROXY_ARGS="--bind-address=0.0.0.0 \
--hostname-override=k1.node \\
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig \\
--cluster-cidr=10.254.0.0/16"
EOF
}
创建 kube 数据目录
function createKubeDataDir(){
echo -e "\033[32mINFO: Create kube data dir...\033[0m"
if [ ! -d "/var/log/kube-audit" ]; then
mkdir /var/log/kube-audit
fi
if [ ! -d "/var/lib/kubelet" ]; then
mkdir /var/lib/kubelet
fi
if [ ! -d "/usr/libexec" ]; then
mkdir /usr/libexec
fi
chown -R kube:kube /var/log/kube-audit /var/lib/kubelet /usr/libexec
}
nginx-proxy 【node】
function nginxProxy(){
nginx-proxy.service
echo -e "\033[32mINFO: Create nginx-proxy.service...\033[0m"
cat > nginx-proxy.service <<-EOF
[Unit]
Description=kubernetes apiserver docker wrapper
Wants=docker.socket
After=docker.service
[Service]
User=root
PermissionsStartOnly=true
ExecStart=/usr/bin/docker run -p 127.0.0.1:6443:6443 \
-v /etc/nginx:/etc/nginx \\
--name nginx-proxy \\
--net=host \\
--restart=on-failure:5 \\
--memory=512M \\
nginx:1.13.12-alpine
ExecStartPre=-/usr/bin/docker rm -f nginx-proxy
ExecStop=/usr/bin/docker stop nginx-proxy
Restart=always
RestartSec=15s
TimeoutStartSec=30s
[Install]
WantedBy=multi-user.target
EOF
nginx.conf
echo -e "\033[32mINFO: Create nginx.conf...\033[0m"
cat > nginx.conf <<-EOF
error_log stderr notice;
worker_processes auto;
events {
multi_accept on;
use epoll;
worker_connections 1024;
}
stream {
upstream kube_apiserver {
least_conn;
server 172.16.18.106:6443;
server 172.16.17.52:6443;
server 172.16.20.203:6443;
}
server {
listen 0.0.0.0:6443;
proxy_pass kube_apiserver;
proxy_timeout 10m;
proxy_connect_timeout 1s;
}
}
EOF
if [ ! -d "/etc/nginx" ]; then
mkdir /etc/nginx
fi
echo -e "\033[32mINFO: Copy nginx.conf nginx-proxy.service...\033[0m"
cp nginx.conf /etc/nginx
cp nginx-proxy.service /lib/systemd/system
echo -e "\033[32mINFO: daemon-reload...\033[0m"
systemctl daemon-reload
echo -e "\033[32mINFO: start nginx-proxy...\033[0m"
systemctl start nginx-proxy
systemctl enable nginx-proxy
}
附:部署脚本:
node.sh、master.sh、k8s.sh、etcdinstall.sh
参考文档
https://mritd.me/2018/04/19/set-up-kubernetes-1.10.1-cluster-by-hyperkube/
https://blog.qikqiak.com/post/manual-install-high-available-kubernetes-cluster/