google-k8s部署试验

版本

k8s 1.5

docker version
Client:
Version:        1.12.6
API version:    1.24
Package version: docker-1.12.6-32.git88a4867.el7.centos.x86_64
Go version:      go1.7.4
Git commit:      88a4867/1.12.6
Built:          Mon Jul  3 16:02:02 2017
OS/Arch:        linux/amd64

Server:
Version:        1.12.6
API version:    1.24
Package version: docker-1.12.6-32.git88a4867.el7.centos.x86_64
Go version:      go1.7.4
Git commit:      88a4867/1.12.6
Built:          Mon Jul  3 16:02:02 2017
OS/Arch:        linux/amd64

部署etcd

-bash-4.1$ more start.sh 
TOKEN=zhangqi-etcd
CLUSTER_STATE=new
NAME_1=zhangqi-151
NAME_2=zhangqi-26
NAME_3=zhangqi-150
HOST_1=192.168.6.151
HOST_2=192.168.6.26
HOST_3=192.168.6.150
CLUSTER=${NAME_1}=http://${HOST_1}:2380,${NAME_2}=http://${HOST_2}:2380,${NAME_3}=http://${HOST_3}:2380

THIS_NAME=${NAME_2}
THIS_IP=${HOST_2}
./etcd --data-dir=data.etcd --name ${THIS_NAME} \
        --initial-advertise-peer-urls http://${THIS_IP}:2380 --listen-peer-urls http://${THIS_IP}:2380 \
        --advertise-client-urls http://${THIS_IP}:2379 --listen-client-urls http://${THIS_IP}:2379 \
        --initial-cluster ${CLUSTER} \
        --initial-cluster-state ${CLUSTER_STATE} --initial-cluster-token ${TOKEN}
-bash-4.1$ more monitor.sh
export ETCDCTL_API=3
HOST_1=192.168.6.151
HOST_2=192.168.6.26
HOST_3=192.168.6.150

ENDPOINTS=$HOST_1:2379,$HOST_2:2379,$HOST_3:2379
##./etcdctl --endpoints=$ENDPOINTS member list
./etcdctl --write-out=table --endpoints=$ENDPOINTS endpoint status
#./etcdctl --endpoints=$ENDPOINTS endpoint health

curl -s http://192.168.6.26:2379/v2/keys | /slview/test/zhangqi/jq-linux64 
{
  "action": "get",
  "node": {
    "dir": true,
    "nodes": [
      {
        "key": "/syspara",
        "dir": true,
        "modifiedIndex": 52,
        "createdIndex": 52
      },
      {
        "key": "/shconfig",
        "dir": true,
        "modifiedIndex": 17,
        "createdIndex": 17
      },
      {
        "key": "/test",
        "dir": true,
        "modifiedIndex": 20,
        "createdIndex": 20
      },
      {
        "key": "/zy",
        "dir": true,
        "modifiedIndex": 30,
        "createdIndex": 30
      }
    ]
  }
}

部署docker搭建私服

#docker私有库
docker run -d -p 5000:5000 --restart=always --name="docker-image" --hostname="docker-image" -v /root/test/docker/image:/tmp/registry -e REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/registry registry
docker run -d -p 5000:5000 --restart=always --name="docker-image" --hostname="docker-image" -v /root/test/docker/image:/var/lib/registry registry

#pod通信
docker pull docker.io/kubernetes/pause

#存入本地私有库
docker tag f9d5de079539 docker-image:5000/pause-2017:3.0
docker rmi docker-image:5000/pause-2017:3.0
docker tag f9d5de079539 192.168.6.50:5000/pause-2017:3.0
修改私有库ip地址重启docker
docker push 192.168.6.50:5000/pause-2017:3.0


#查看
docker search 192.168.6.50:5000

[root@localhost image]# curl -XGET http://192.168.6.50:5000/v2/pause-2017/tags/list
{"name":"pause-2017","tags":["3.0"]}
[root@localhost image]# curl -XGET http://192.168.6.50:5000/v2/_catalog
{"repositories":["pause-2017"]}

systemctl status docker.service

[root@localhost k8s]# docker images 
REPOSITORY                              TAG                IMAGE ID            CREATED            SIZE
docker.io/registry                      latest              751f286bc25e        6 weeks ago        33.19 MB
192.168.6.50:5000/kubernetes-bootcamp  v1                  8fafd8af70e9        13 months ago      211.3 MB
192.168.6.50:5000/pause-2017            3.0                f9d5de079539        3 years ago        239.8 kB
[root@localhost k8s]# 

部署k8s的master

##master
./kube-apiserver --address=0.0.0.0 \
                --insecure-port=8080 \
                --service-cluster-ip-range='10.254.0.0/16' \
                --log_dir=/var/log/kube  \
                --kubelet_port=10250 \
                --v=0 \
                --logtostderr=true \ 
                --etcd_servers=http://192.168.6.151:2379 \
                --allow_privileged=false
                
                
./kube-controller-manager  --v=0 \
                          --logtostderr=true \
                          --log_dir=/var/log/kube  \
                          --machines=192.168.6.51  \
                          --master=127.0.0.1:8080
                          
                          
./kube-scheduler  --master='127.0.0.1:8080'  \
                  --v=0  \
                  --log_dir=/var/log/kube  \
                  --logtostderr=true 
                  
                  
kube-apiserver --address=0.0.0.0  --insecure-port=8080 --service-cluster-ip-range='10.254.0.0/16' --log_dir=/root/test/k8s/kube --kubelet_port=10250 --v=0 --logtostderr=false --etcd_servers=http://192.168.6.26:2379 --allow_privileged=false  >> /root/test/k8s/kube-apiserver.log 2>&1 &

kube-controller-manager  --v=0 --logtostderr=false --log_dir=/root/test/k8s/kube --master='127.0.0.1:8080' >> /root/test/k8s/kube-controller-manager 2>&1 &

kube-scheduler  --master='127.0.0.1:8080' --v=0  --log_dir=/root/test/k8s/kube  >> /root/test/k8s/kube-scheduler.log 2>&1 &

https://192.168.6.50:6443/healthz

kubectl get componentstatuses
NAME                STATUS    MESSAGE              ERROR
scheduler            Healthy  ok                  
controller-manager  Healthy  ok                  
etcd-0              Healthy  {"health": "true"}  

#etcd
-bash-4.1$ curl -s http://192.168.6.26:2379/v2/keys | /slview/test/zhangqi/jq-linux64 
curl: /usr/local/lib/libidn.so.11: no version information available (required by /usr/local/lib/libcurl.so.4)
{
  "action": "get",
  "node": {
    "dir": true,
    "nodes": [
      {
        "key": "/registry",
        "dir": true,
        "modifiedIndex": 24587,
        "createdIndex": 24587
      },
      {
        "key": "/shconfig",
        "dir": true,
        "modifiedIndex": 17,
        "createdIndex": 17
      },
      {
        "key": "/test",
        "dir": true,
        "modifiedIndex": 20,
        "createdIndex": 20
      },
      {
        "key": "/zy",
        "dir": true,
        "modifiedIndex": 30,
        "createdIndex": 30
      },
      {
        "key": "/syspara",
        "dir": true,
        "modifiedIndex": 52,
        "createdIndex": 52
      }
    ]
  }
}
-bash-4.1$ curl -s http://192.168.6.26:2379/v2/keys/registry | /slview/test/zhangqi/jq-linux64 
curl: /usr/local/lib/libidn.so.11: no version information available (required by /usr/local/lib/libcurl.so.4)
{
  "action": "get",
  "node": {
    "key": "/registry",
    "dir": true,
    "nodes": [
      {
        "key": "/registry/services",
        "dir": true,
        "modifiedIndex": 24596,
        "createdIndex": 24596
      },
      {
        "key": "/registry/clusterrolebindings",
        "dir": true,
        "modifiedIndex": 24603,
        "createdIndex": 24603
      },
      {
        "key": "/registry/events",
        "dir": true,
        "modifiedIndex": 24610,
        "createdIndex": 24610
      },
      {
        "key": "/registry/serviceaccounts",
        "dir": true,
        "modifiedIndex": 24611,
        "createdIndex": 24611
      },
      {
        "key": "/registry/ranges",
        "dir": true,
        "modifiedIndex": 24587,
        "createdIndex": 24587
      },
      {
        "key": "/registry/clusterroles",
        "dir": true,
        "modifiedIndex": 24588,
        "createdIndex": 24588
      },
      {
        "key": "/registry/namespaces",
        "dir": true,
        "modifiedIndex": 24592,
        "createdIndex": 24592
      }
    ],
    "modifiedIndex": 24587,
    "createdIndex": 24587
  }
}
-bash-4.1$  curl -s http://192.168.6.26:2379/v2/keys/registry | ./jq-linux64  
curl: /usr/local/lib/libidn.so.11: no version information available (required by /usr/local/lib/libcurl.so.4)
{
  "action": "get",
  "node": {
    "key": "/registry",
    "dir": true,
    "nodes": [
      {
        "key": "/registry/ranges",
        "dir": true,
        "modifiedIndex": 24587,
        "createdIndex": 24587
      },
      {
        "key": "/registry/clusterrolebindings",
        "dir": true,
        "modifiedIndex": 24603,
        "createdIndex": 24603
      },
      {
        "key": "/registry/replicasets",
        "dir": true,
        "modifiedIndex": 27098,
        "createdIndex": 27098
      },
      {
        "key": "/registry/pods",
        "dir": true,
        "modifiedIndex": 27099,
        "createdIndex": 27099
      },
      {
        "key": "/registry/clusterroles",
        "dir": true,
        "modifiedIndex": 24588,
        "createdIndex": 24588
      },
      {
        "key": "/registry/namespaces",
        "dir": true,
        "modifiedIndex": 24592,
        "createdIndex": 24592
      },
      {
        "key": "/registry/services",
        "dir": true,
        "modifiedIndex": 24596,
        "createdIndex": 24596
      },
      {
        "key": "/registry/events",
        "dir": true,
        "modifiedIndex": 24610,
        "createdIndex": 24610
      },
      {
        "key": "/registry/serviceaccounts",
        "dir": true,
        "modifiedIndex": 24611,
        "createdIndex": 24611
      },
      {
        "key": "/registry/minions",
        "dir": true,
        "modifiedIndex": 25942,
        "createdIndex": 25942
      },
      {
        "key": "/registry/deployments",
        "dir": true,
        "modifiedIndex": 27097,
        "createdIndex": 27097
      }
    ],
    "modifiedIndex": 24587,
    "createdIndex": 24587
  }
}


#slave
./kube-proxy  --logtostderr=false  \
              --v=0  \
              --master=http://192.168.6.50:8080 
              
              
./kubelet  --logtostderr=false  \
          --v=0  \
          --allow-privileged=false  \
          --log_dir=/var/log/kube  \
          --address=0.0.0.0  \
          --port=10250  \
          --hostname_override=192.168.6.51  \
          --api_servers=http://192.168.6.50:8080
          
          
kube-proxy  --logtostderr=false --v=0 --master=http://192.168.6.50:8080  >>  /root/test/k8s/kube-proxy.log 2>&1 &

kubelet  --logtostderr=false --v=0 --allow-privileged=false  --log_dir=/root/test/k8s/kube  --address=0.0.0.0  --port=10250  --hostname_override=192.168.6.51  --api_servers=http://192.168.6.50:8080 --pod-infra-container-image=192.168.6.50:5000/pause-2017:3.0 >>/root/test/k8s/kube-kubelet.log 2>&1 &

使用k8s做服务部署

docker pull 192.168.6.50:5000/kubernetes-bootcamp:v1
docker pull jocatalin/kubernetes-bootcamp:v2
docker tag b6556396ebd4 192.168.6.50:5000/kubernetes-bootcamp:v2

#开启服务
kubectl run kubernetes-bootcamp --image=192.168.6.50:5000/kubernetes-bootcamp:v1 --port=8080
kubectl run kubernetes-bootcamp --image=docker.io/jocatalin/kubernetes-bootcamp:v1 --port=8080
kubectl run kubernetes-bootcamp --image=jocatalin/kubernetes-bootcamp:v1 --port=8080
#默认docker.io下载镜像
[root@hadoop1 k8s]# docker images 
REPOSITORY                                TAG                IMAGE ID            CREATED            SIZE
docker.io/jocatalin/kubernetes-bootcamp  v2                  b6556396ebd4        13 months ago      211.3 MB
192.168.6.50:5000/kubernetes-bootcamp    v1                  8fafd8af70e9        13 months ago      211.3 MB
docker.io/jocatalin/kubernetes-bootcamp  v1                  8fafd8af70e9        13 months ago      211.3 MB
192.168.6.50:5000/pause-2017              3.0                f9d5de079539        3 years ago        239.8 kB
[root@hadoop1 k8s]# 

[root@localhost k8s]# kubectl get pods
NAME                                  READY    STATUS        RESTARTS  AGE
kubernetes-bootcamp-264884655-wfcfd  1/1      Terminating  0          8m

#删除部署
kubectl delete deployment kubernetes-bootcamp 

#进入容器
kubectl exec -ti kubernetes-bootcamp-264884655-wfcfd bash

#部署服务
kubectl expose deployment/kubernetes-bootcamp --type="NodePort" --port 8080

#升级
kubectl set image deployments/kubernetes-bootcamp kubernetes-bootcamp=192.168.6.50:5000/kubernetes-bootcamp:v2

#扩展
kubectl scale deployments/kubernetes-bootcamp --replicas=3

#回退
kubectl rollout undo deployments --to-revision=2

#查看ep,多pods启动proxy
kubectl get ep
NAME                  ENDPOINTS          AGE
kubernetes            192.168.6.50:6443  21h
kubernetes-bootcamp  172.17.0.2:8080    13m
©著作权归作者所有,转载或内容合作请联系作者
  • 序言:七十年代末,一起剥皮案震惊了整个滨河市,随后出现的几起案子,更是在滨河造成了极大的恐慌,老刑警刘岩,带你破解...
    沈念sama阅读 204,921评论 6 478
  • 序言:滨河连续发生了三起死亡事件,死亡现场离奇诡异,居然都是意外死亡,警方通过查阅死者的电脑和手机,发现死者居然都...
    沈念sama阅读 87,635评论 2 381
  • 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
    开封第一讲书人阅读 151,393评论 0 338
  • 文/不坏的土叔 我叫张陵,是天一观的道长。 经常有香客问我,道长,这世上最难降的妖魔是什么? 我笑而不...
    开封第一讲书人阅读 54,836评论 1 277
  • 正文 为了忘掉前任,我火速办了婚礼,结果婚礼上,老公的妹妹穿的比我还像新娘。我一直安慰自己,他们只是感情好,可当我...
    茶点故事阅读 63,833评论 5 368
  • 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
    开封第一讲书人阅读 48,685评论 1 281
  • 那天,我揣着相机与录音,去河边找鬼。 笑死,一个胖子当着我的面吹牛,可吹牛的内容都是我干的。 我是一名探鬼主播,决...
    沈念sama阅读 38,043评论 3 399
  • 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
    开封第一讲书人阅读 36,694评论 0 258
  • 序言:老挝万荣一对情侣失踪,失踪者是张志新(化名)和其女友刘颖,没想到半个月后,有当地人在树林里发现了一具尸体,经...
    沈念sama阅读 42,671评论 1 300
  • 正文 独居荒郊野岭守林人离奇死亡,尸身上长有42处带血的脓包…… 初始之章·张勋 以下内容为张勋视角 年9月15日...
    茶点故事阅读 35,670评论 2 321
  • 正文 我和宋清朗相恋三年,在试婚纱的时候发现自己被绿了。 大学时的朋友给我发了我未婚夫和他白月光在一起吃饭的照片。...
    茶点故事阅读 37,779评论 1 332
  • 序言:一个原本活蹦乱跳的男人离奇死亡,死状恐怖,灵堂内的尸体忽然破棺而出,到底是诈尸还是另有隐情,我是刑警宁泽,带...
    沈念sama阅读 33,424评论 4 321
  • 正文 年R本政府宣布,位于F岛的核电站,受9级特大地震影响,放射性物质发生泄漏。R本人自食恶果不足惜,却给世界环境...
    茶点故事阅读 39,027评论 3 307
  • 文/蒙蒙 一、第九天 我趴在偏房一处隐蔽的房顶上张望。 院中可真热闹,春花似锦、人声如沸。这庄子的主人今日做“春日...
    开封第一讲书人阅读 29,984评论 0 19
  • 文/苍兰香墨 我抬头看了看天上的太阳。三九已至,却和暖如春,着一层夹袄步出监牢的瞬间,已是汗流浃背。 一阵脚步声响...
    开封第一讲书人阅读 31,214评论 1 260
  • 我被黑心中介骗来泰国打工, 没想到刚下飞机就差点儿被人妖公主榨干…… 1. 我叫王不留,地道东北人。 一个月前我还...
    沈念sama阅读 45,108评论 2 351
  • 正文 我出身青楼,却偏偏与公主长得像,于是被迫代替她去往敌国和亲。 传闻我的和亲对象是个残疾皇子,可洞房花烛夜当晚...
    茶点故事阅读 42,517评论 2 343

推荐阅读更多精彩内容