您当前的位置: 首页 > 学无止境 > 心得笔记 网站首页心得笔记
05-Docker容器网络
发布时间:2020-09-06 18:14:46编辑:雪饮阅读()
添加及显示网络命名空间列表
[root@localhost ~]# ip netns add r1
[root@localhost ~]# ip netns add r2
[root@localhost ~]# ip netns list
r2
r1
在网络命名空间之外执行网络命名空间之内的命令
[root@localhost ~]# ip netns exec r1 ifconfig -a
lo: flags=8<LOOPBACK> mtu 65536
loop txqueuelen 1000 (Local Loopback)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
添加链(成对链)与链列表
[root@localhost ~]# ip link add name veth1.1 type veth peer name veth1.2
[root@localhost ~]# ip link sh
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP m ode DEFAULT group default qlen 1000
link/ether 00:0c:29:73:73:85 brd ff:ff:ff:ff:ff:ff
3: veth1.2@veth1.1: <BROADCAST,MULTICAST,M-DOWN> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
link/ether b6:2d:75:32:8c:74 brd ff:ff:ff:ff:ff:ff
4: veth1.1@veth1.2: <BROADCAST,MULTICAST,M-DOWN> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
link/ether 4e:80:5e:47:c7:4c brd ff:ff:ff:ff:ff:ff
上面添加链的时候有两个name以为这里添加的是一对链,则name分别是该对链的每个半对链的名称。
上面链列表sh命令是show的缩写。
将链摞到网络名称空间
[root@localhost ~]# ip link set dev veth1.2 netns r1
[root@localhost ~]# ip link show
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP m ode DEFAULT group default qlen 1000
link/ether 00:0c:29:73:73:85 brd ff:ff:ff:ff:ff:ff
4: veth1.1@if3: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN mode DEFAUL T group default qlen 1000
link/ether 4e:80:5e:47:c7:4c brd ff:ff:ff:ff:ff:ff link-netnsid 0
这里将veth1.2链摞到了r1网络空间中了。
所以这里就不存在veth1.2了
但是我们在r1的网络空间中是可以看到的
[root@localhost ~]# ip netns exec r1 ifconfig -a
lo: flags=8<LOOPBACK> mtu 65536
loop txqueuelen 1000 (Local Loopback)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
veth1.2: flags=4098<BROADCAST,MULTICAST> mtu 1500
ether b6:2d:75:32:8c:74 txqueuelen 1000 (Ethernet)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
修改网络空间中某个接口(网络)名称
[root@localhost ~]# ip netns exec r1 ip link set dev veth1.2 name eth0
[root@localhost ~]# ip netns exec r1 ifconfig -a
eth0: flags=4098<BROADCAST,MULTICAST> mtu 1500
ether b6:2d:75:32:8c:74 txqueuelen 1000 (Ethernet)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
lo: flags=8<LOOPBACK> mtu 65536
loop txqueuelen 1000 (Local Loopback)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
激活链(网络接口)
上面我们已经将其中一个链移动到r1网络空间中了,接下来我们宿主机还有另外一个veth1.1的链,但我们发现ifconfig中还不存在veth1.1的网络接口,这是因为默认不会自动去激活链,所以这里我们要手动激活链
[root@localhost ~]# ifconfig veth1.1 10.1.0.1/24 up
[root@localhost ~]# ifconfig
ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.108.128 netmask 255.255.255.0 broadcast 192.168.108.255
inet6 fe80::e96f:43c6:938b:d1a6 prefixlen 64 scopeid 0x20<link>
ether 00:0c:29:73:73:85 txqueuelen 1000 (Ethernet)
RX packets 1246 bytes 103902 (101.4 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 609 bytes 70215 (68.5 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
inet6 ::1 prefixlen 128 scopeid 0x10<host>
loop txqueuelen 1000 (Local Loopback)
RX packets 68 bytes 5920 (5.7 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 68 bytes 5920 (5.7 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
veth1.1: flags=4099<UP,BROADCAST,MULTICAST> mtu 1500
inet 10.1.0.1 netmask 255.255.255.0 broadcast 10.1.0.255
ether 4e:80:5e:47:c7:4c txqueuelen 1000 (Ethernet)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
注意:这里激活链的时候需要手动指定默认网关和子网掩码,就像上面的“10.1.0.1/24”
那么同样的我们还可以把r1网络名称空间中的刚才攞过去的链也激活
[root@localhost ~]# ip netns exec r1 ifconfig eth0 10.1.0.2/24 up
[root@localhost ~]# ip netns exec r1 ifconfig
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 10.1.0.2 netmask 255.255.255.0 broadcast 10.1.0.255
inet6 fe80::b42d:75ff:fe32:8c74 prefixlen 64 scopeid 0x20<link>
ether b6:2d:75:32:8c:74 txqueuelen 1000 (Ethernet)
RX packets 8 bytes 656 (656.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 8 bytes 656 (656.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
那么此时宿主机也是可以ping通r1空间中的这个10.1.0.2网络接口的
[root@localhost ~]# ping 10.1.0.2
PING 10.1.0.2 (10.1.0.2) 56(84) bytes of data.
64 bytes from 10.1.0.2: icmp_seq=1 ttl=64 time=0.142 ms
64 bytes from 10.1.0.2: icmp_seq=2 ttl=64 time=0.036 ms
64 bytes from 10.1.0.2: icmp_seq=3 ttl=64 time=0.056 ms
64 bytes from 10.1.0.2: icmp_seq=4 ttl=64 time=0.036 ms
64 bytes from 10.1.0.2: icmp_seq=5 ttl=64 time=0.049 ms
^C
--- 10.1.0.2 ping statistics ---
5 packets transmitted, 5 received, 0% packet loss, time 4000ms
rtt min/avg/max/mdev = 0.036/0.063/0.142/0.041 ms
将激活的链移动到网络名称空间
已经激活的链也是可以移动到网络名称空间的,这里我们将宿主机中剩下的veth1.1现在处于已经激活的状态,移动到r2网络名称空间
[root@localhost ~]# ip link set dev veth1.1 netns r2
[root@localhost ~]# ip netns exec r2 ifconfig -a
lo: flags=8<LOOPBACK> mtu 65536
loop txqueuelen 1000 (Local Loopback)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
veth1.1: flags=4098<BROADCAST,MULTICAST> mtu 1500
ether 4e:80:5e:47:c7:4c txqueuelen 1000 (Ethernet)
RX packets 15 bytes 1230 (1.2 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 15 bytes 1230 (1.2 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
然后我们会发现移动过去之后在r2中该链的ip地址已经失效了,所以需要重新在r2中激活,激活后同样的用r2来ping通r1也是可以的
[root@localhost ~]# ip netns exec r2 ifconfig veth1.1 10.1.0.3/24 up
[root@localhost ~]# ip netns exec r2 ifconfig
veth1.1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 10.1.0.3 netmask 255.255.255.0 broadcast 10.1.0.255
inet6 fe80::4c80:5eff:fe47:c74c prefixlen 64 scopeid 0x20<link>
ether 4e:80:5e:47:c7:4c txqueuelen 1000 (Ethernet)
RX packets 15 bytes 1230 (1.2 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 23 bytes 1886 (1.8 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
[root@localhost ~]# ip netns exec r2 ping 10.1.0.2
PING 10.1.0.2 (10.1.0.2) 56(84) bytes of data.
64 bytes from 10.1.0.2: icmp_seq=1 ttl=64 time=0.163 ms
64 bytes from 10.1.0.2: icmp_seq=2 ttl=64 time=0.033 ms
64 bytes from 10.1.0.2: icmp_seq=3 ttl=64 time=0.034 ms
64 bytes from 10.1.0.2: icmp_seq=4 ttl=64 time=0.034 ms
^C
--- 10.1.0.2 ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 3000ms
rtt min/avg/max/mdev = 0.033/0.066/0.163/0.056 ms
运行一个关闭即删除的容器
[root@localhost ~]# docker run --name t1 -it --rm busybox:latest
/ # ifconfig
eth0 Link encap:Ethernet HWaddr 02:42:AC:11:00:02
inet addr:172.17.0.2 Bcast:0.0.0.0 Mask:255.255.0.0
inet6 addr: fe80::42:acff:fe11:2/64 Scope:Link
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:7 errors:0 dropped:0 overruns:0 frame:0
TX packets:7 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:586 (586.0 B) TX bytes:586 (586.0 B)
lo Link encap:Local Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
inet6 addr: ::1/128 Scope:Host
UP LOOPBACK RUNNING MTU:65536 Metric:1
RX packets:0 errors:0 dropped:0 overruns:0 frame:0
TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)
/ # exit
[root@localhost ~]# docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
54430c82f146 foe/httpd:v0.1-1 "sh" 23 hours ago Exited (137) 21 hours ago t3
1b767a128e0b towards/httpd:v0.2 "/bin/httpd -f -h ..." 23 hours ago Exited (137) 21 hours ago towards3
a77522172c4a foe/httpd:v0.2 "/bin/httpd -f -h ..." 6 days ago Exited (137) 6 days ago foe1
8bf289b6017f towards/httpd:v0.2 "/bin/httpd -f -h ..." 6 days ago Exited (137) 6 days ago towards1
44fd298d3df4 foe/httpd:v0.2 "/bin/httpd -f -h ..." 6 days ago Exited (137) 6 days ago t2
a12aee4c7924 busybox "sh" 6 days ago Exited (137) 6 days ago bl
6e2f3d9d2545 redis:4-alpine "docker-entrypoint..." 7 days ago Exited (0) 6 days ago kvstorl
9f1ed90b19a9 nginx:1.14-alpine "nginx -g 'daemon ..." 7 days ago Exited (0) 6 days ago web1
36374b410be3 busybox:latest "sh" 7 days ago Exited (130) 7 days ago bl6
05f9881a6708 busybox:latest "sh" 7 days ago Exited (130) 7 days ago bl5
b3e2e29e598b busybox:latest "sh" 7 days ago Exited (127) 7 days ago bl4
58a64a5fd709 busybox:latest "sh" 7 days ago Created bl3
5c4f03f15e56 busybox:latest "sh" 7 days ago Created bl2
我们可以看到我们运行后在控制台中输入了exit退出后再次用docker ps -a就查不到刚才运行的t1了,这是因为我们运行的是退出即删除的容器
容器不指定network参数默认就是bridge
[root@localhost ~]# docker run --name t1 -it --network bridge --rm busybox:latest
/ # ifconfig
eth0 Link encap:Ethernet HWaddr 02:42:AC:11:00:02
inet addr:172.17.0.2 Bcast:0.0.0.0 Mask:255.255.0.0
inet6 addr: fe80::42:acff:fe11:2/64 Scope:Link
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:6 errors:0 dropped:0 overruns:0 frame:0
TX packets:6 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:516 (516.0 B) TX bytes:516 (516.0 B)
lo Link encap:Local Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
inet6 addr: ::1/128 Scope:Host
UP LOOPBACK RUNNING MTU:65536 Metric:1
RX packets:0 errors:0 dropped:0 overruns:0 frame:0
TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)
/ #
我们可以和上面退出即删除的例子比较,在退出即删除的例子中我们看到默认的ifconfig配置信息会发现和本例子中指定了network参数值为bridge时的结果是一致的
Network为null时仅有lo网络接口
[root@localhost ~]# docker run --name t1 -it --network none --rm busybox:latest
/ # ifconfig -a
lo Link encap:Local Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
inet6 addr: ::1/128 Scope:Host
UP LOOPBACK RUNNING MTU:65536 Metric:1
RX packets:0 errors:0 dropped:0 overruns:0 frame:0
TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)
/ #
主机名即是容器id
我们运行一个容器并查看其主机名
[root@localhost ~]# docker run --name t1 -it --network bridge --rm busybox:latest
/ # hostname
5c838e034922
/ #
然后另外起一个会话查看该容器id
[root@localhost ~]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
5c838e034922 busybox:latest "sh" 42 seconds ago Up 41 seconds t1
会发现容器id既是该容器的默认主机名
主机名注入
[root@localhost ~]# docker run --name t1 -it --network bridge -h t1.magedu.com --rm busybox:latest
/ # hostname
t1.magedu.com
/ # cat /etc/hosts
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
172.17.0.2 t1.magedu.com
/ # cat /etc/resolv.conf
# Generated by NetworkManager
search localdomain
nameserver 192.168.108.2
/ # nslookup -type=A www.baidu.com
Server: 192.168.108.2
Address: 192.168.108.2:53
Non-authoritative answer:
www.baidu.com canonical name = www.a.shifen.com
Name: www.a.shifen.com
Address: 14.215.177.38
Name: www.a.shifen.com
Address: 14.215.177.39
/ #
指定dns
上面我特意有查看dns信息发现是一个内网ip,那么其实dns也是可以指定的
[root@localhost ~]# docker run --name t1 -it --network bridge -h t1.magedu.com --dns 114.114.114.114 --rm busybox:latest
/ # cat /etc/resolv.conf
search localdomain
nameserver 114.114.114.114
/ #
指定dns search
同样的上面的上面我特意有查看dns信息发现search是localdomain,那么这个search我们也可以注入的,但在注入之前我们先要了解下search是什么,以下面案例阐述下:
search google.com baidu.com
nameserver解析失败时会在域名后加上search部分然后再解析,比如域名为www.abc,nameserver解析失败后加上search后为www.abc.google.com、www.abc.baidu.com,一直尝试到search列表结束。该选项可以用来指定多个域名,中间用空格或tab键隔开。
那么我们知道了search的作用后我们如何来注入search呢,注入如
[root@localhost ~]# docker run --name t1 -it --network bridge -h t1.magedu.com --dns 114.114.114.114 --dns-search linux.io --rm busybox:latest / # cat /etc/resolv.conf
search linux.io
nameserver 114.114.114.114
/ #
注入host
同样的上面的上面的上面我特意有查看hosts,会发现主机名注入后hosts中会自动注入一个host条目,那么如果我除了自动注入的这个host之外我还想继续注入一个怎么做呢,如
[root@localhost ~]# docker run --name t1 -it --network bridge -h t1.magedu.com --dns 114.114.114.114 --dns-search linux.io --add-host www.magedu.com:1.1.1.1 --rm busybox:latest
/ # cat /etc/hosts
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
1.1.1.1 www.magedu.com
172.17.0.2 t1.magedu.com
/ #
容器服务跨宿主机的局限
上面这个实例我们可以看到其ip地址172.17.0.2
/ # ifconfig
eth0 Link encap:Ethernet HWaddr 02:42:AC:11:00:02
inet addr:172.17.0.2 Bcast:0.0.0.0 Mask:255.255.0.0
inet6 addr: fe80::42:acff:fe11:2/64 Scope:Link
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:8 errors:0 dropped:0 overruns:0 frame:0
TX packets:8 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:656 (656.0 B) TX bytes:656 (656.0 B)
lo Link encap:Local Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
inet6 addr: ::1/128 Scope:Host
UP LOOPBACK RUNNING MTU:65536 Metric:1
RX packets:0 errors:0 dropped:0 overruns:0 frame:0
TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)
/ #
该地址在我们宿主机中是可以ping通的
[root@localhost ~]# ping 172.17.0.2
PING 172.17.0.2 (172.17.0.2) 56(84) bytes of data.
64 bytes from 172.17.0.2: icmp_seq=1 ttl=64 time=0.086 ms
64 bytes from 172.17.0.2: icmp_seq=2 ttl=64 time=0.042 ms
^C
--- 172.17.0.2 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 999ms
rtt min/avg/max/mdev = 0.042/0.064/0.086/0.022 ms
但是与我们宿主机同网段的另外一个主机则无法ping通
[root@localhost ~]# ping 172.17.0.2
PING 172.17.0.2 (172.17.0.2) 56(84) bytes of data.
^C
--- 172.17.0.2 ping statistics ---
10 packets transmitted, 0 received, 100% packet loss, time 9001ms
Docker服务暴露
我们运行一个docker容器指定httpd服务且将80端口暴露
[root@localhost ~]# docker run --name myweb --rm -p 80 towards/httpd:v0.2
那么此时在本宿主机内部是可以被成功访问的
[root@localhost ~]# docker inspect myweb
[
{
"Id": "6d4d6311f0328e57cf4afd4141007059ad40cb69e224213d32a0dabf922c1b1d",
"Created": "2020-09-06T08:56:10.454993831Z",
"Path": "/bin/httpd",
"Args": [
"-f",
"-h",
"/data/html"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 2504,
"ExitCode": 0,
"Error": "",
"StartedAt": "2020-09-06T08:56:11.243739461Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:023aaf0a80ce889a9b50f45efcff0f9adccd6784c4dde1f7cdea00f5451f1100",
"ResolvConfPath": "/var/lib/docker/containers/6d4d6311f0328e57cf4afd4141007059ad40cb69e224213d32a0dabf922c1b1d/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/6d4d6311f0328e57cf4afd4141007059ad40cb69e224213d32a0dabf922c1b1d/hostname",
"HostsPath": "/var/lib/docker/containers/6d4d6311f0328e57cf4afd4141007059ad40cb69e224213d32a0dabf922c1b1d/hosts",
"LogPath": "",
"Name": "/myweb",
"RestartCount": 0,
"Driver": "overlay2",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "",
"ExecIDs": null,
"HostConfig": {
"Binds": null,
"ContainerIDFile": "",
"LogConfig": {
"Type": "journald",
"Config": {}
},
"NetworkMode": "default",
"PortBindings": {
"80/tcp": [
{
"HostIp": "",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": true,
"VolumeDriver": "",
"VolumesFrom": null,
"CapAdd": null,
"CapDrop": null,
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": false,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": null,
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "docker-runc",
"ConsoleSize": [
0,
0
],
"Isolation": "",
"CpuShares": 0,
"Memory": 0,
"NanoCpus": 0,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": null,
"BlkioDeviceReadBps": null,
"BlkioDeviceWriteBps": null,
"BlkioDeviceReadIOps": null,
"BlkioDeviceWriteIOps": null,
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DiskQuota": 0,
"KernelMemory": 0,
"MemoryReservation": 0,
"MemorySwap": 0,
"MemorySwappiness": -1,
"OomKillDisable": false,
"PidsLimit": 0,
"Ulimits": null,
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0
},
"GraphDriver": {
"Name": "overlay2",
"Data": {
"LowerDir": "/var/lib/docker/overlay2/e9229ad2695a5a19bcc621e4442e3a7b6a502b703a371a8cd03bb05a7e4334bf-init/diff:/var/lib/docker/overlay2/dffa8ea04d68f47b94906eaafd095420060e61865bdf0ea667f0ce70de791b38/diff:/var/lib/docker/overlay2/c85fc5ddc2b05c93c6b2682475a635ef62a7d615bafe9b740e81b969d6e6167d/diff",
"MergedDir": "/var/lib/docker/overlay2/e9229ad2695a5a19bcc621e4442e3a7b6a502b703a371a8cd03bb05a7e4334bf/merged",
"UpperDir": "/var/lib/docker/overlay2/e9229ad2695a5a19bcc621e4442e3a7b6a502b703a371a8cd03bb05a7e4334bf/diff",
"WorkDir": "/var/lib/docker/overlay2/e9229ad2695a5a19bcc621e4442e3a7b6a502b703a371a8cd03bb05a7e4334bf/work"
}
},
"Mounts": [],
"Config": {
"Hostname": "6d4d6311f032",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": true,
"AttachStderr": true,
"ExposedPorts": {
"80/tcp": {}
},
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": [
"/bin/httpd",
"-f",
"-h",
"/data/html"
],
"ArgsEscaped": true,
"Image": "towards/httpd:v0.2",
"Volumes": null,
"WorkingDir": "",
"Entrypoint": null,
"OnBuild": null,
"Labels": {}
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "098b6c565853507a653bfd6aeeaa427d18c23f01d58dead11b9859b7cfe241ec",
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"Ports": {
"80/tcp": [
{
"HostIp": "0.0.0.0",
"HostPort": "32768"
}
]
},
"SandboxKey": "/var/run/docker/netns/098b6c565853",
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "513474afbcbfcd86e31527236208f24154257a74f313990e12f4a9d001aa3bd2",
"Gateway": "172.17.0.1",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "172.17.0.3",
"IPPrefixLen": 16,
"IPv6Gateway": "",
"MacAddress": "02:42:ac:11:00:03",
"Networks": {
"bridge": {
"IPAMConfig": null,
"Links": null,
"Aliases": null,
"NetworkID": "76fe8dd5a327416cecc376897444f5436cef13a495dc6a7d955a38c666e1b52b",
"EndpointID": "513474afbcbfcd86e31527236208f24154257a74f313990e12f4a9d001aa3bd2",
"Gateway": "172.17.0.1",
"IPAddress": "172.17.0.3",
"IPPrefixLen": 16,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"MacAddress": "02:42:ac:11:00:03"
}
}
}
}
]
[root@localhost ~]# curl 172.17.0.3
this is index
但是我们如果要在与宿主机同网段的另外一个宿主机访问则需要知道该容器向其容器暴露的80端口与其容器的什么端口关联的
[root@localhost ~]# iptables -t nat -vnL
Chain PREROUTING (policy ACCEPT 2 packets, 473 bytes)
pkts bytes target prot opt in out source destination
53 8367 PREROUTING_direct all -- * * 0.0.0.0/0 0.0.0.0/0
53 8367 PREROUTING_ZONES_SOURCE all -- * * 0.0.0.0/0 0.0.0.0/0
53 8367 PREROUTING_ZONES all -- * * 0.0.0.0/0 0.0.0.0/0
2 104 DOCKER all -- * * 0.0.0.0/0 0.0.0.0/0 ADDRTYPE match dst-type LOCAL
Chain INPUT (policy ACCEPT 0 packets, 0 bytes)
pkts bytes target prot opt in out source destination
Chain OUTPUT (policy ACCEPT 4 packets, 540 bytes)
pkts bytes target prot opt in out source destination
185 16759 OUTPUT_direct all -- * * 0.0.0.0/0 0.0.0.0/0
0 0 DOCKER all -- * * 0.0.0.0/0 !127.0.0.0/8 ADDRTYPE match dst-type LOCAL
Chain POSTROUTING (policy ACCEPT 4 packets, 540 bytes)
pkts bytes target prot opt in out source destination
1 59 MASQUERADE all -- * !docker0 172.17.0.0/16 0.0.0.0/0
185 16759 POSTROUTING_direct all -- * * 0.0.0.0/0 0.0.0.0/0
185 16759 POSTROUTING_ZONES_SOURCE all -- * * 0.0.0.0/0 0.0.0.0/0
185 16759 POSTROUTING_ZONES all -- * * 0.0.0.0/0 0.0.0.0/0
0 0 MASQUERADE tcp -- * * 172.17.0.3 172.17.0.3 tcp dpt:80
Chain DOCKER (2 references)
pkts bytes target prot opt in out source destination
0 0 RETURN all -- docker0 * 0.0.0.0/0 0.0.0.0/0
0 0 DNAT tcp -- !docker0 * 0.0.0.0/0 0.0.0.0/0 tcp dpt:32768 to:172.17.0.3:80
Chain OUTPUT_direct (1 references)
pkts bytes target prot opt in out source destination
Chain POSTROUTING_ZONES (1 references)
pkts bytes target prot opt in out source destination
180 16383 POST_public all -- * ens33 0.0.0.0/0 0.0.0.0/0 [goto]
5 376 POST_public all -- * + 0.0.0.0/0 0.0.0.0/0 [goto]
Chain POSTROUTING_ZONES_SOURCE (1 references)
pkts bytes target prot opt in out source destination
Chain POSTROUTING_direct (1 references)
pkts bytes target prot opt in out source destination
Chain POST_public (2 references)
pkts bytes target prot opt in out source destination
185 16759 POST_public_log all -- * * 0.0.0.0/0 0.0.0.0/0
185 16759 POST_public_deny all -- * * 0.0.0.0/0 0.0.0.0/0
185 16759 POST_public_allow all -- * * 0.0.0.0/0 0.0.0.0/0
Chain POST_public_allow (1 references)
pkts bytes target prot opt in out source destination
Chain POST_public_deny (1 references)
pkts bytes target prot opt in out source destination
Chain POST_public_log (1 references)
pkts bytes target prot opt in out source destination
Chain PREROUTING_ZONES (1 references)
pkts bytes target prot opt in out source destination
52 8308 PRE_public all -- ens33 * 0.0.0.0/0 0.0.0.0/0 [goto]
1 59 PRE_public all -- + * 0.0.0.0/0 0.0.0.0/0 [goto]
Chain PREROUTING_ZONES_SOURCE (1 references)
pkts bytes target prot opt in out source destination
Chain PREROUTING_direct (1 references)
pkts bytes target prot opt in out source destination
Chain PRE_public (2 references)
pkts bytes target prot opt in out source destination
53 8367 PRE_public_log all -- * * 0.0.0.0/0 0.0.0.0/0
53 8367 PRE_public_deny all -- * * 0.0.0.0/0 0.0.0.0/0
53 8367 PRE_public_allow all -- * * 0.0.0.0/0 0.0.0.0/0
Chain PRE_public_allow (1 references)
pkts bytes target prot opt in out source destination
Chain PRE_public_deny (1 references)
pkts bytes target prot opt in out source destination
Chain PRE_public_log (1 references)
pkts bytes target prot opt in out source destination
那么我们得知关联的动态端口是32768,则不仅仅是同网段的另外一个宿主机,甚至宿主机8号和宿主机9号所属的物理机都是可以访问的,当然访问时候要加上宿主机8号的ip地址和刚才获悉的这个端口号(后面就将第一个宿主机简称8号,第二个宿主机简称9号)
由于上面这种暴露服务方式对容器所属宿主机来说可以直接访问,但对于像是9号宿主机则需要先获知容器在8号宿主机中的端口号,所以这种暴露服务称为端口的动态暴露。
上面这个容器被干掉后其刚才创建的这个关联链也会被清除,如
[root@localhost ~]# docker kill myweb
myweb
[root@localhost ~]# iptables -t nat -vnL
Chain PREROUTING (policy ACCEPT 0 packets, 0 bytes)
pkts bytes target prot opt in out source destination
56 8700 PREROUTING_direct all -- * * 0.0.0.0/0 0.0.0.0/0
56 8700 PREROUTING_ZONES_SOURCE all -- * * 0.0.0.0/0 0.0.0.0/0
56 8700 PREROUTING_ZONES all -- * * 0.0.0.0/0 0.0.0.0/0
4 208 DOCKER all -- * * 0.0.0.0/0 0.0.0.0/0 ADDRTYPE match dst-type LOCAL
Chain INPUT (policy ACCEPT 0 packets, 0 bytes)
pkts bytes target prot opt in out source destination
Chain OUTPUT (policy ACCEPT 0 packets, 0 bytes)
pkts bytes target prot opt in out source destination
188 17239 OUTPUT_direct all -- * * 0.0.0.0/0 0.0.0.0/0
0 0 DOCKER all -- * * 0.0.0.0/0 !127.0.0.0/8 ADDRTYPE match dst-type LOCAL
Chain POSTROUTING (policy ACCEPT 0 packets, 0 bytes)
pkts bytes target prot opt in out source destination
1 59 MASQUERADE all -- * !docker0 172.17.0.0/16 0.0.0.0/0
190 17343 POSTROUTING_direct all -- * * 0.0.0.0/0 0.0.0.0/0
190 17343 POSTROUTING_ZONES_SOURCE all -- * * 0.0.0.0/0 0.0.0.0/0
190 17343 POSTROUTING_ZONES all -- * * 0.0.0.0/0 0.0.0.0/0
Chain DOCKER (2 references)
pkts bytes target prot opt in out source destination
0 0 RETURN all -- docker0 * 0.0.0.0/0 0.0.0.0/0
Chain OUTPUT_direct (1 references)
pkts bytes target prot opt in out source destination
Chain POSTROUTING_ZONES (1 references)
pkts bytes target prot opt in out source destination
183 16863 POST_public all -- * ens33 0.0.0.0/0 0.0.0.0/0 [goto]
7 480 POST_public all -- * + 0.0.0.0/0 0.0.0.0/0 [goto]
Chain POSTROUTING_ZONES_SOURCE (1 references)
pkts bytes target prot opt in out source destination
Chain POSTROUTING_direct (1 references)
pkts bytes target prot opt in out source destination
Chain POST_public (2 references)
pkts bytes target prot opt in out source destination
190 17343 POST_public_log all -- * * 0.0.0.0/0 0.0.0.0/0
190 17343 POST_public_deny all -- * * 0.0.0.0/0 0.0.0.0/0
190 17343 POST_public_allow all -- * * 0.0.0.0/0 0.0.0.0/0
Chain POST_public_allow (1 references)
pkts bytes target prot opt in out source destination
Chain POST_public_deny (1 references)
pkts bytes target prot opt in out source destination
Chain POST_public_log (1 references)
pkts bytes target prot opt in out source destination
Chain PREROUTING_ZONES (1 references)
pkts bytes target prot opt in out source destination
55 8641 PRE_public all -- ens33 * 0.0.0.0/0 0.0.0.0/0 [goto]
1 59 PRE_public all -- + * 0.0.0.0/0 0.0.0.0/0 [goto]
Chain PREROUTING_ZONES_SOURCE (1 references)
pkts bytes target prot opt in out source destination
Chain PREROUTING_direct (1 references)
pkts bytes target prot opt in out source destination
Chain PRE_public (2 references)
pkts bytes target prot opt in out source destination
56 8700 PRE_public_log all -- * * 0.0.0.0/0 0.0.0.0/0
56 8700 PRE_public_deny all -- * * 0.0.0.0/0 0.0.0.0/0
56 8700 PRE_public_allow all -- * * 0.0.0.0/0 0.0.0.0/0
Chain PRE_public_allow (1 references)
pkts bytes target prot opt in out source destination
Chain PRE_public_deny (1 references)
pkts bytes target prot opt in out source destination
Chain PRE_public_log (1 references)
pkts bytes target prot opt in out source destination
更轻松的获取动态端口
上面例子中我们暴露服务后,动态端口用iptables -t nat -vnL获取,但是里面的信息量比较多而杂,很费眼力来找,其实我们有更好的方法来获取动态端口,如我们现在重新启动一个和上面一样的实例,然后查看器动态端口
[root@localhost ~]# docker run --name myweb --rm -p 80 towards/httpd:v0.2
然后我们另外开一个会话来查看动态端口
[root@localhost ~]# docker port myweb
80/tcp -> 0.0.0.0:32769
暴露到宿主机的指定ip地址
上面实例我们看到暴露的服务绑定在宿主机的任意ip地址0.0.0.0上,那么如果我只想暴露在宿主机的对外连接ip地址呢?
那么当我们获取到宿主机的ip地址后,然后运行容器如
[root@localhost ~]# docker run --name myweb --rm -p 192.168.108.128::80 towards/httpd:v0.2
另外开一个会话查看动态端口信息如
[root@localhost ~]# docker port myweb
80/tcp -> 192.168.108.128:32768
暴露到宿主机的指定端口
[root@localhost ~]# docker run --name myweb --rm -p 80:80 towards/httpd:v0.2
[root@localhost ~]# docker port myweb
80/tcp -> 0.0.0.0:80
同时指定端口与ip地址的暴露
[root@localhost ~]# docker run --name myweb --rm -p 192.168.108.128:80:80 towards/httpd:v0.2
[root@localhost ~]# docker port myweb
80/tcp -> 192.168.108.128:80
共享网络名称空间
正常启动一个容器
[root@localhost ~]# docker run --name bl -it --rm busybox
/ # ifconfig
eth0 Link encap:Ethernet HWaddr 02:42:AC:11:00:02
inet addr:172.17.0.2 Bcast:0.0.0.0 Mask:255.255.0.0
inet6 addr: fe80::42:acff:fe11:2/64 Scope:Link
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:8 errors:0 dropped:0 overruns:0 frame:0
TX packets:8 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:656 (656.0 B) TX bytes:656 (656.0 B)
lo Link encap:Local Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
inet6 addr: ::1/128 Scope:Host
UP LOOPBACK RUNNING MTU:65536 Metric:1
RX packets:0 errors:0 dropped:0 overruns:0 frame:0
TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)
/ #
正常启动第二个容器
[root@localhost ~]# docker run --name bl2 -it --rm busybox
/ # ifconfig
eth0 Link encap:Ethernet HWaddr 02:42:AC:11:00:03
inet addr:172.17.0.3 Bcast:0.0.0.0 Mask:255.255.0.0
inet6 addr: fe80::42:acff:fe11:3/64 Scope:Link
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:6 errors:0 dropped:0 overruns:0 frame:0
TX packets:6 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:516 (516.0 B) TX bytes:516 (516.0 B)
lo Link encap:Local Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
inet6 addr: ::1/128 Scope:Host
UP LOOPBACK RUNNING MTU:65536 Metric:1
RX packets:0 errors:0 dropped:0 overruns:0 frame:0
TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)
/ #
会发现他们的ip地址是不同的,那么如何共享相同的网络名称空间(ip相同)
[root@localhost ~]# docker run --name bl2 --network container:bl -it --rm busybox
/ # ifconfig
eth0 Link encap:Ethernet HWaddr 02:42:AC:11:00:02
inet addr:172.17.0.2 Bcast:0.0.0.0 Mask:255.255.0.0
inet6 addr: fe80::42:acff:fe11:2/64 Scope:Link
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:16 errors:0 dropped:0 overruns:0 frame:0
TX packets:8 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:1312 (1.2 KiB) TX bytes:656 (656.0 B)
lo Link encap:Local Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
inet6 addr: ::1/128 Scope:Host
UP LOOPBACK RUNNING MTU:65536 Metric:1
RX packets:0 errors:0 dropped:0 overruns:0 frame:0
TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)
/ #
这里重新启动了一个容器并共享了bl的网络名称空间所以其ip地址和bl的ip地址相同
我们在第一个实例中创建一些东西
/ # mkdir /tmp/testdir
/ #
然后第二个实例中发现仍旧空空如也
/ # ls /tmp
/ #
这说明文件系统并不会共享
此时我们在第二个实例中开启httpd服务
/ # echo "hello world" > /tmp/index.html
/ # httpd -h /tmp/
/ # netstat -tnl
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address Foreign Address Stat e
tcp 0 0 :::80 :::* LIST EN
/ #
然后第一个实例中访问其本地的httpd服务
/ # wget -O - -q 127.0.0.1
hello world
/ #
这可以说明httpd也是共享的
配置默认的docker的ip地址
我们会发现docker0的ip地址默认是172.17.0.1
[root@localhost ~]# ifconfig
docker0: flags=4099<UP,BROADCAST,MULTICAST> mtu 1500
inet 172.17.0.1 netmask 255.255.0.0 broadcast 0.0.0.0
inet6 fe80::42:afff:feac:4678 prefixlen 64 scopeid 0x20<link>
ether 02:42:af:ac:46:78 txqueuelen 0 (Ethernet)
RX packets 161 bytes 11481 (11.2 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 55 bytes 5547 (5.4 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.108.128 netmask 255.255.255.0 broadcast 192.168.108.255
inet6 fe80::e96f:43c6:938b:d1a6 prefixlen 64 scopeid 0x20<link>
ether 00:0c:29:73:73:85 txqueuelen 1000 (Ethernet)
RX packets 8704 bytes 674335 (658.5 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 3537 bytes 389116 (379.9 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
inet6 ::1 prefixlen 128 scopeid 0x10<host>
loop txqueuelen 1000 (Local Loopback)
RX packets 80 bytes 6837 (6.6 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 80 bytes 6837 (6.6 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
那么我们修改下该默认网段
[root@localhost ~]# vi /etc/docker/daemon.json
[root@localhost ~]# cat /etc/docker/daemon.json
{
"registry-mirrors":["https://7bezldxe.mirror.aliyuncs.com","https://iktw4ld3.mirror.aliyuncs.com"],
"bip":"10.0.0.1/16"
}
[root@localhost ~]# service docker restart
Redirecting to /bin/systemctl restart docker.service
[root@localhost ~]# ifconfig
docker0: flags=4099<UP,BROADCAST,MULTICAST> mtu 1500
inet 10.0.0.1 netmask 255.255.0.0 broadcast 0.0.0.0
inet6 fe80::42:afff:feac:4678 prefixlen 64 scopeid 0x20<link>
ether 02:42:af:ac:46:78 txqueuelen 0 (Ethernet)
RX packets 161 bytes 11481 (11.2 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 55 bytes 5547 (5.4 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.108.128 netmask 255.255.255.0 broadcast 192.168.108.255
inet6 fe80::e96f:43c6:938b:d1a6 prefixlen 64 scopeid 0x20<link>
ether 00:0c:29:73:73:85 txqueuelen 1000 (Ethernet)
RX packets 9026 bytes 699999 (683.5 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 3713 bytes 407548 (397.9 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
inet6 ::1 prefixlen 128 scopeid 0x10<host>
loop txqueuelen 1000 (Local Loopback)
RX packets 80 bytes 6837 (6.6 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 80 bytes 6837 (6.6 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
创建自己的docker网络
[root@localhost ~]# docker network create -d bridge --subnet "172.26.0.0/16" --gateway "172.26.0.1" mybr0
ae29568f4cf2a695b80c946ce879c9cc346911530ac40112e75ccd7717b6d54a
[root@localhost ~]# docker network ls
NETWORK ID NAME DRIVER SCOPE
98b4e3c50c56 bridge bridge local
a2ce16500b13 host host local
ae29568f4cf2 mybr0 bridge local
850a070c90de none null local
[root@localhost ~]# ifconfig
br-ae29568f4cf2: flags=4099<UP,BROADCAST,MULTICAST> mtu 1500
inet 172.26.0.1 netmask 255.255.0.0 broadcast 0.0.0.0
ether 02:42:7c:8e:1c:a4 txqueuelen 0 (Ethernet)
RX packets 10121 bytes 786499 (768.0 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 4240 bytes 463238 (452.3 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
docker0: flags=4099<UP,BROADCAST,MULTICAST> mtu 1500
inet 10.0.0.1 netmask 255.255.0.0 broadcast 0.0.0.0
inet6 fe80::42:afff:feac:4678 prefixlen 64 scopeid 0x20<link>
ether 02:42:af:ac:46:78 txqueuelen 0 (Ethernet)
RX packets 161 bytes 11481 (11.2 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 55 bytes 5547 (5.4 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.108.128 netmask 255.255.255.0 broadcast 192.168.108.255
inet6 fe80::e96f:43c6:938b:d1a6 prefixlen 64 scopeid 0x20<link>
ether 00:0c:29:73:73:85 txqueuelen 1000 (Ethernet)
RX packets 10121 bytes 786499 (768.0 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 4240 bytes 463238 (452.3 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
inet6 ::1 prefixlen 128 scopeid 0x10<host>
loop txqueuelen 1000 (Local Loopback)
RX packets 80 bytes 6837 (6.6 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 80 bytes 6837 (6.6 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
然后用运行容器指定刚才建立的这个自定义网络
[root@localhost ~]# docker run --name t1 -it --net mybr0 busybox:latest
/ # ifconfig
eth0 Link encap:Ethernet HWaddr 02:42:AC:1A:00:02
inet addr:172.26.0.2 Bcast:0.0.0.0 Mask:255.255.0.0
inet6 addr: fe80::42:acff:fe1a:2/64 Scope:Link
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:12 errors:0 dropped:0 overruns:0 frame:0
TX packets:6 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:1032 (1.0 KiB) TX bytes:516 (516.0 B)
lo Link encap:Local Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
inet6 addr: ::1/128 Scope:Host
UP LOOPBACK RUNNING MTU:65536 Metric:1
RX packets:0 errors:0 dropped:0 overruns:0 frame:0
TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)
/ #
当然这里用了缩写,net是network的缩写
关键字词:docker,网络命名空间,链,接口,激活,容器
上一篇:04-容器虚拟化网络概述
下一篇:06-Docker存储卷