[docker 网络]docker跨主机网络 ovs gre 测试1

1. 准备工作

1.1 ovs 安装

参考该文章 https://blog.csdn.net/wodeamd1/article/details/81282437

1.2 对docker网络类型有基本了解

可以参考[mydocker]---docker的四种网络模型与原理实现(1)[mydocker]---docker的四种网络模型与原理实现(2).

1.2 两台机器

vm1

[root@vm1 ~]# ifconfig
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 172.19.0.12  netmask 255.255.240.0  broadcast 172.19.15.255
        inet6 fe80::5054:ff:fe4b:71f8  prefixlen 64  scopeid 0x20<link>
        ether 52:54:00:4b:71:f8  txqueuelen 1000  (Ethernet)
        RX packets 456800  bytes 531196208 (506.5 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 133968  bytes 149845102 (142.9 MiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        inet6 ::1  prefixlen 128  scopeid 0x10<host>
        loop  txqueuelen 1000  (Local Loopback)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
[root@vm1 ~]# route -n
Kernel IP routing table
Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
0.0.0.0         172.19.0.1      0.0.0.0         UG    0      0        0 eth0
169.254.0.0     0.0.0.0         255.255.0.0     U     1002   0        0 eth0
172.19.0.0      0.0.0.0         255.255.240.0   U     0      0        0 eth0

vm2

[root@vm2 ~]# ifconfig
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 172.19.0.8  netmask 255.255.240.0  broadcast 172.19.15.255
        inet6 fe80::5054:ff:fe14:eae  prefixlen 64  scopeid 0x20<link>
        ether 52:54:00:14:0e:ae  txqueuelen 1000  (Ethernet)
        RX packets 278496  bytes 394401800 (376.1 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 36312  bytes 5027752 (4.7 MiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        inet6 ::1  prefixlen 128  scopeid 0x10<host>
        loop  txqueuelen 1000  (Local Loopback)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

[root@vm2 ~]# route -n
Kernel IP routing table
Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
0.0.0.0         172.19.0.1      0.0.0.0         UG    0      0        0 eth0
169.254.0.0     0.0.0.0         255.255.0.0     U     1002   0        0 eth0
172.19.0.0      0.0.0.0         255.255.240.0   U     0      0        0 eth0

2. 配置

gre.png

2.1 vm1中的配置

[root@vm1 ~]# ovs-vsctl show
91e815a1-1021-4c97-a21c-893ab8c28e37
    ovs_version: "2.5.1"
[root@vm1 ~]# 
[root@vm1 ~]# 
[root@vm1 ~]# ovs-vsctl add-br br0
[root@vm1 ~]# ovs-vsctl add-port br0 tep0 -- set interface tep0 type=internal
[root@vm1 ~]# ifconfig tep0 192.168.0.200 netmask 255.255.255.0
[root@vm1 ~]# ovs-vsctl add-port br0 gre10 -- set interface gre10 type=gre options:remote_ip=172.19.0.8
[root@vm1 ~]# ovs-vsctl show
91e815a1-1021-4c97-a21c-893ab8c28e37
    Bridge "br0"
        Port "gre10"
            Interface "gre10"
                type: gre
                options: {remote_ip="172.19.0.8"}
        Port "tep0"
            Interface "tep0"
                type: internal
        Port "br0"
            Interface "br0"
                type: internal
    ovs_version: "2.5.1"
[root@vm1 ~]# route -n
Kernel IP routing table
Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
0.0.0.0         172.19.0.1      0.0.0.0         UG    0      0        0 eth0
169.254.0.0     0.0.0.0         255.255.0.0     U     1002   0        0 eth0
172.19.0.0      0.0.0.0         255.255.240.0   U     0      0        0 eth0
192.168.0.0     0.0.0.0         255.255.255.0   U     0      0        0 tep0

2.2 vm2中的配置

[root@vm2 ~]# ovs-vsctl add-br br0
[root@vm2 ~]# ovs-vsctl add-port br0 tep0 -- set interface tep0 type=internal
[root@vm2 ~]# ifconfig tep0 192.168.0.201 netmask 255.255.255.0
[root@vm2 ~]# ovs-vsctl add-port br0 gre10 -- set interface gre10 type=gre options:remote_ip=172.19.0.12
[root@vm2 ~]# ovs-vsctl show
533800d4-246f-4099-a776-8254610db91f
    Bridge "br0"
        Port "gre10"
            Interface "gre10"
                type: gre
                options: {remote_ip="172.19.0.12"}
        Port "tep0"
            Interface "tep0"
                type: internal
        Port "br0"
            Interface "br0"
                type: internal
    ovs_version: "2.5.1"
[root@vm2 ~]# route -n
Kernel IP routing table
Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
0.0.0.0         172.19.0.1      0.0.0.0         UG    0      0        0 eth0
169.254.0.0     0.0.0.0         255.255.0.0     U     1002   0        0 eth0
172.19.0.0      0.0.0.0         255.255.240.0   U     0      0        0 eth0
192.168.0.0     0.0.0.0         255.255.255.0   U     0      0        0 tep0

2.3 测试

-------------------------------vm1---------------------------------------
[root@vm1 ~]# ping -c 1 192.168.0.200
PING 192.168.0.200 (192.168.0.200) 56(84) bytes of data.
64 bytes from 192.168.0.200: icmp_seq=1 ttl=64 time=0.027 ms

--- 192.168.0.200 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 0.027/0.027/0.027/0.000 ms
[root@vm1 ~]# ping -c 1 192.168.0.201
PING 192.168.0.201 (192.168.0.201) 56(84) bytes of data.
64 bytes from 192.168.0.201: icmp_seq=1 ttl=64 time=4.20 ms

--- 192.168.0.201 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 4.204/4.204/4.204/0.000 ms
[root@vm1 ~]# 

-------------------------------vm2---------------------------------------
[root@vm2 ~]# ping -c 1 192.168.0.201
PING 192.168.0.201 (192.168.0.201) 56(84) bytes of data.
64 bytes from 192.168.0.201: icmp_seq=1 ttl=64 time=0.018 ms

--- 192.168.0.201 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 0.018/0.018/0.018/0.000 ms
[root@vm2 ~]# ping -c 1 192.168.0.200
PING 192.168.0.200 (192.168.0.200) 56(84) bytes of data.
64 bytes from 192.168.0.200: icmp_seq=1 ttl=64 time=0.684 ms

--- 192.168.0.200 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 0.684/0.684/0.684/0.000 ms

3. 模拟docker测试 (容器在同一个子网中)

图片.png

yum install bridge-utils

3.1 vm1中配置

[root@vm1 ~]# ip link add docker0 type bridge
[root@vm1 ~]# ifconfig docker0 172.17.42.1/16
[root@vm1 ~]# ip link set docker0 up
[root@vm1 ~]# ip netns list
[root@vm1 ~]# ip netns add ns1 
[root@vm1 ~]# ip link add veth0 type veth peer name veth1 
[root@vm1 ~]# brctl addif docker0 veth0
[root@vm1 ~]# ip link set veth1 netns ns1
[root@vm1 ~]# ip link set veth0 up
[root@vm1 ~]# ip netns exec ns1 sh
sh-4.2# ip link set veth1 up
sh-4.2# ip link set lo up
sh-4.2# ip addr add 172.17.1.2/16 dev veth1
sh-4.2# ifconfig
lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        inet6 ::1  prefixlen 128  scopeid 0x10<host>
        loop  txqueuelen 1000  (Local Loopback)
        RX packets 2  bytes 168 (168.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 2  bytes 168 (168.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

veth1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 172.17.1.2  netmask 255.255.0.0  broadcast 0.0.0.0
        inet6 fe80::78b0:1eff:fe83:58c7  prefixlen 64  scopeid 0x20<link>
        ether 7a:b0:1e:83:58:c7  txqueuelen 1000  (Ethernet)
        RX packets 31  bytes 2206 (2.1 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 21  bytes 1474 (1.4 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
sh-4.2# exit
exit
[root@vm1 ~]# 

3.2 vm2中配置

[root@vm2 ~]# ip link add docker0 type bridge
[root@vm2 ~]# ip addr add 172.17.43.1/16 dev docker0
[root@vm2 ~]# ip link set docker0 up
[root@vm2 ~]# ip netns list
[root@vm2 ~]# ip netns add ns1 
[root@vm2 ~]# ip link add veth0 type veth peer name veth1 
[root@vm2 ~]# brctl addif docker0 veth0
[root@vm2 ~]# ip link set veth1 netns ns1
[root@vm2 ~]# ip link set veth0 up
[root@vm2 ~]# ip netns exec ns1 sh
sh-4.2# ip link set veth1 up
sh-4.2# ip link set lo up
sh-4.2# ip addr add 172.17.2.2/16 dev veth1
sh-4.2# exit
exit
[root@vm2 ~]# 

3.3 测试

从vm1的ns1中访问vm2的ns2, 相当于容器间访问

[root@vm1 ~]# ip netns exec ns1 sh
sh-4.2# ifconfig veth1
veth1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 172.17.1.2  netmask 255.255.0.0  broadcast 0.0.0.0
        inet6 fe80::78b0:1eff:fe83:58c7  prefixlen 64  scopeid 0x20<link>
        ether 7a:b0:1e:83:58:c7  txqueuelen 1000  (Ethernet)
        RX packets 34  bytes 2388 (2.3 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 24  bytes 1656 (1.6 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
// 访问vm2中的ns1
sh-4.2# ping -c 1 172.17.2.2
PING 172.17.2.2 (172.17.2.2) 56(84) bytes of data.
64 bytes from 172.17.2.2: icmp_seq=1 ttl=64 time=0.779 ms

--- 172.17.2.2 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 0.779/0.779/0.779/0.000 ms
// 访问本机docker0
sh-4.2# ping -c 1 172.17.42.1
PING 172.17.42.1 (172.17.42.1) 56(84) bytes of data.
64 bytes from 172.17.42.1: icmp_seq=1 ttl=64 time=0.063 ms

--- 172.17.42.1 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 0.063/0.063/0.063/0.000 ms
// 访问vm2中的docker0
sh-4.2# ping -c 1 172.17.43.1
PING 172.17.43.1 (172.17.43.1) 56(84) bytes of data.
64 bytes from 172.17.43.1: icmp_seq=1 ttl=64 time=1.17 ms

--- 172.17.43.1 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 1.170/1.170/1.170/0.000 ms
sh-4.2# exit
exit
[root@vm1 ~]# 

同理vm2中的ns1也可以访问vm1中的ns1

[root@vm2 ~]# ip netns exec ns1 sh
sh-4.2# ping -c 1  172.17.2.2
PING 172.17.2.2 (172.17.2.2) 56(84) bytes of data.
64 bytes from 172.17.2.2: icmp_seq=1 ttl=64 time=0.034 ms

--- 172.17.2.2 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 0.034/0.034/0.034/0.000 ms
sh-4.2# ping -c 1  172.17.1.2
PING 172.17.1.2 (172.17.1.2) 56(84) bytes of data.
64 bytes from 172.17.1.2: icmp_seq=1 ttl=64 time=0.769 ms

--- 172.17.1.2 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 0.769/0.769/0.769/0.000 ms
sh-4.2# ping -c 1  172.17.42.1
PING 172.17.42.1 (172.17.42.1) 56(84) bytes of data.
64 bytes from 172.17.42.1: icmp_seq=1 ttl=64 time=0.724 ms

--- 172.17.42.1 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 0.724/0.724/0.724/0.000 ms
sh-4.2# ping -c 1  172.17.43.1
PING 172.17.43.1 (172.17.43.1) 56(84) bytes of data.
64 bytes from 172.17.43.1: icmp_seq=1 ttl=64 time=0.034 ms

--- 172.17.43.1 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 0.034/0.034/0.034/0.000 ms
sh-4.2# exit
exit
[root@vm2 ~]# 

容器之间已经可以访问了. 接下来测试一下如下的情况. 用vm1中的ns1测试即可.

[root@vm1 ~]# ip netns exec ns1 sh
// 访问本机内网ip不通  因为ns1中没有设置默认网关
sh-4.2# ping -c 1 172.19.0.12
PING 172.19.0.12 (172.19.0.12) 56(84) bytes of data.
// 访问vm2的ip访问不了 因为没有设置iptables规则和开启ip_forward功能
sh-4.2# ping -c 1 172.19.0.8
PING 172.19.0.8 (172.19.0.8) 56(84) bytes of data.
// 访问vm2中的tep0不通 因为不属于同一个网络
sh-4.2# ping -c 1 192.168.1.200
PING 192.168.1.200 (192.168.1.200) 56(84) bytes of data.
// 访问互联网不通 因为没有设置iptables规则和开启ip_forward功能
sh-4.2# ping -c 1 www.baidu.com

vm1做如下设置

[root@vm1 ~]# echo 1 >  /proc/sys/net/ipv4/ip_forward
[root@vm1 ~]# iptables -t nat -A POSTROUTING -s 172.17.1.0/16 -o eth0 -j MASQUERADE
[root@vm1 ~]# ip netns exec ns1 sh
// 将docker0设置为ns1的默认网关
sh-4.2# route add default gw 172.17.42.1
sh-4.2# ping -c 1 172.19.0.12
PING 172.19.0.12 (172.19.0.12) 56(84) bytes of data.
64 bytes from 172.19.0.12: icmp_seq=1 ttl=64 time=0.038 ms

--- 172.19.0.12 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 0.038/0.038/0.038/0.000 ms
sh-4.2# ping -c 1 172.19.0.8
PING 172.19.0.8 (172.19.0.8) 56(84) bytes of data.
64 bytes from 172.19.0.8: icmp_seq=1 ttl=63 time=0.327 ms

--- 172.19.0.8 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 0.327/0.327/0.327/0.000 ms
sh-4.2# ping -c 1 www.baidu.com
PING www.wshifen.com (119.63.197.151) 56(84) bytes of data.
64 bytes from 119.63.197.151 (119.63.197.151): icmp_seq=1 ttl=49 time=51.4 ms

--- www.wshifen.com ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 51.474/51.474/51.474/0.000 ms
// 因为在gre中不属于同一个网络 无法访问到vm2中的tep0
sh-4.2# ping -c 1 192.168.1.200
PING 192.168.1.200 (192.168.1.200) 56(84) bytes of data.

--- 192.168.1.200 ping statistics ---
1 packets transmitted, 0 received, 100% packet loss, time 0ms
sh-4.2# exit
exit
[root@vm1 ~]# 

4. 清理

vm1

[root@vm1 ~]# ovs-vsctl show
91e815a1-1021-4c97-a21c-893ab8c28e37
    Bridge "br0"
        Port "gre10"
            Interface "gre10"
                type: gre
                options: {remote_ip="172.19.0.8"}
        Port "tep0"
            Interface "tep0"
                type: internal
        Port "br0"
            Interface "br0"
                type: internal
    ovs_version: "2.5.1"
[root@vm1 ~]# ovs-vsctl del-br br0
[root@vm1 ~]# ip link delete docker0 type bridge
[root@vm1 ~]# ip link delete veth0 type veth
[root@vm1 ~]# ip netns delete ns1
[root@vm1 ~]# iptables -t nat -F
[root@vm1 ~]# ovs-vsctl show
91e815a1-1021-4c97-a21c-893ab8c28e37
    ovs_version: "2.5.1"
[root@vm1 ~]# 

vm2

[root@vm2 ~]# ovs-vsctl show
533800d4-246f-4099-a776-8254610db91f
    Bridge "br0"
        Port "gre10"
            Interface "gre10"
                type: gre
                options: {remote_ip="172.19.0.12"}
        Port "tep0"
            Interface "tep0"
                type: internal
        Port "br0"
            Interface "br0"
                type: internal
    ovs_version: "2.5.1"
[root@vm2 ~]# ovs-vsctl del-br br0
[root@vm2 ~]# ip link delete docker0 type bridge
[root@vm2 ~]# ip link delete veth0 type veth
[root@vm2 ~]# ip netns delete ns1
[root@vm2 ~]# iptables -t nat -F

5. 参考

1. https://blog.csdn.net/wodeamd1/article/details/81282437
2. https://blog.csdn.net/song7999/article/details/80403527
3. Docker 容器与容器云
4. https://blog.csdn.net/qq_27366789/article/details/83348366

©著作权归作者所有,转载或内容合作请联系作者
  • 序言:七十年代末,一起剥皮案震惊了整个滨河市,随后出现的几起案子,更是在滨河造成了极大的恐慌,老刑警刘岩,带你破解...
    沈念sama阅读 199,902评论 5 468
  • 序言:滨河连续发生了三起死亡事件,死亡现场离奇诡异,居然都是意外死亡,警方通过查阅死者的电脑和手机,发现死者居然都...
    沈念sama阅读 84,037评论 2 377
  • 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
    开封第一讲书人阅读 146,978评论 0 332
  • 文/不坏的土叔 我叫张陵,是天一观的道长。 经常有香客问我,道长,这世上最难降的妖魔是什么? 我笑而不...
    开封第一讲书人阅读 53,867评论 1 272
  • 正文 为了忘掉前任,我火速办了婚礼,结果婚礼上,老公的妹妹穿的比我还像新娘。我一直安慰自己,他们只是感情好,可当我...
    茶点故事阅读 62,763评论 5 360
  • 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
    开封第一讲书人阅读 48,104评论 1 277
  • 那天,我揣着相机与录音,去河边找鬼。 笑死,一个胖子当着我的面吹牛,可吹牛的内容都是我干的。 我是一名探鬼主播,决...
    沈念sama阅读 37,565评论 3 390
  • 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
    开封第一讲书人阅读 36,236评论 0 254
  • 序言:老挝万荣一对情侣失踪,失踪者是张志新(化名)和其女友刘颖,没想到半个月后,有当地人在树林里发现了一具尸体,经...
    沈念sama阅读 40,379评论 1 294
  • 正文 独居荒郊野岭守林人离奇死亡,尸身上长有42处带血的脓包…… 初始之章·张勋 以下内容为张勋视角 年9月15日...
    茶点故事阅读 35,313评论 2 317
  • 正文 我和宋清朗相恋三年,在试婚纱的时候发现自己被绿了。 大学时的朋友给我发了我未婚夫和他白月光在一起吃饭的照片。...
    茶点故事阅读 37,363评论 1 329
  • 序言:一个原本活蹦乱跳的男人离奇死亡,死状恐怖,灵堂内的尸体忽然破棺而出,到底是诈尸还是另有隐情,我是刑警宁泽,带...
    沈念sama阅读 33,034评论 3 315
  • 正文 年R本政府宣布,位于F岛的核电站,受9级特大地震影响,放射性物质发生泄漏。R本人自食恶果不足惜,却给世界环境...
    茶点故事阅读 38,637评论 3 303
  • 文/蒙蒙 一、第九天 我趴在偏房一处隐蔽的房顶上张望。 院中可真热闹,春花似锦、人声如沸。这庄子的主人今日做“春日...
    开封第一讲书人阅读 29,719评论 0 19
  • 文/苍兰香墨 我抬头看了看天上的太阳。三九已至,却和暖如春,着一层夹袄步出监牢的瞬间,已是汗流浃背。 一阵脚步声响...
    开封第一讲书人阅读 30,952评论 1 255
  • 我被黑心中介骗来泰国打工, 没想到刚下飞机就差点儿被人妖公主榨干…… 1. 我叫王不留,地道东北人。 一个月前我还...
    沈念sama阅读 42,371评论 2 346
  • 正文 我出身青楼,却偏偏与公主长得像,于是被迫代替她去往敌国和亲。 传闻我的和亲对象是个残疾皇子,可洞房花烛夜当晚...
    茶点故事阅读 41,948评论 2 341

推荐阅读更多精彩内容