Weave sub-networks in Docker swarm are not isolated to each other after running Olsrd

This is my workflows:

# 1. Setting up a Docker swarm cluster consisting of 3 hosts:

 // Create a kv-store on host1
 sudo docker-machine create  --driver generic  --generic-ip-address=172.20.0.1 --generic-ssh-key /home/ubuntu/.ssh/id_rsa --generic-ssh-user ubuntu kv-store
 eval $(sudo docker-machine env kv-store) && sudo docker run --name consul --restart=always -p 8400:8400 -p 8500:8500 -p 53:53/udp -d progrium/consul -server -bootstrap-expect 1 -ui-dir /ui

 // Create swarm master on host1
 sudo docker-machine create --driver generic --generic-ip-address=172.20.0.1 --generic-ssh-key  /home/ubuntu/.ssh/id_rsa --generic-ssh-user ubuntu --swarm
 --swarm-master  --swarm-discovery="consul://$(docker-machine ip kv-store):8500"  --engine-opt="cluster-store=consul://$(docker-machine
 ip kv-store):8500"  --engine-opt="cluster-advertise=eth1:2376" swarm

 // Create weave-1 (worker1 on host2)
 sudo docker-machine create --driver generic --generic-ip-address=172.20.0.2  --generic-ssh-key  /home/ubuntu/.ssh/id_rsa --generic-ssh-user ubuntu --swarm
 --swarm-discovery="consul://$(docker-machine ip kv-store):8500" --engine-opt="cluster-store=consul://$(docker-machine ip kv-store):8500" --engine-opt="cluster-advertise=eth1:2376" weave-1

 // Create weave-2 (worker2 on host3)
 sudo docker-machine create --driver generic --generic-ip-address=172.20.0.3  --generic-ssh-key  /home/ubuntu/.ssh/id_rsa --generic-ssh-user ubuntu --swarm
 --swarm-discovery="consul://$(docker-machine ip kv-store):8500" --engine-opt="cluster-store=consul://$(docker-machine ip kv-store):8500" --engine-opt="cluster-advertise=eth1:2376" weave-2

## 2. Setting up Weave networks

I followed the instruction at https://github.com/weaveworks-guides/old-guides/blob/master/docker-legacy/part-2.md

// Connecting the Cluster with Weave Net: Initializing Peers
eval $(docker-machine env swarm) && weave launch --no-dns --ipalloc-init consensus=3
eval $(docker-machine env weave-1) && weave launch --ipalloc-init consensus=3 && weave connect $(docker-machine ip swarm)
eval $(docker-machine env weave-2) && weave launch --ipalloc-init consensus=3 && weave connect $(docker-machine ip swarm)

// Setting up Swarm Agents to Use the Weave Docker API Proxy
DOCKER_CLIENT_ARGS="$(docker-machine config swarm)" && weave_proxy_endpoint="$(docker-machine ip swarm):12375" && docker ${DOCKER_CLIENT_ARGS} rm -f swarm-agent && docker ${DOCKER_CLIENT_ARGS} run -d --restart=always --name=swarm-agent swarm join --advertise ${weave_proxy_endpoint}  consul://$(docker-machine ip kv-store):8500
DOCKER_CLIENT_ARGS="$(docker-machine config swarm)" && weave_proxy_endpoint="$(docker-machine ip swarm):12375" && docker ${DOCKER_CLIENT_ARGS} rm -f swarm-agent-master && docker ${DOCKER_CLIENT_ARGS} run -d --restart=always --name=/swarm-agent-master -p 3376:3376 -v /etc/docker:/etc/docker  swarm manage --tlsverify --tlscacert=/etc/docker/ca.pem --tlscert=/etc/docker/server.pem --tlskey=/etc/docker/server-key.pem -H tcp://0.0.0.0:3376 --strategy spread --advertise ${weave_proxy_endpoint} consul://$(docker-machine ip kv-store):8500
DOCKER_CLIENT_ARGS="$(docker-machine config weave-1)" && weave_proxy_endpoint="$(docker-machine ip weave-1):12375" && docker ${DOCKER_CLIENT_ARGS} rm -f swarm-agent && docker ${DOCKER_CLIENT_ARGS} run -d --restart=always --name=swarm-agent swarm join --advertise ${weave_proxy_endpoint}  consul://$(docker-machine ip kv-store):8500
DOCKER_CLIENT_ARGS="$(docker-machine config weave-2)" && weave_proxy_endpoint="$(docker-machine ip weave-2):12375" && docker ${DOCKER_CLIENT_ARGS} rm -f swarm-agent && docker ${DOCKER_CLIENT_ARGS} run -d --restart=always --name=swarm-agent swarm join --advertise ${weave_proxy_endpoint}  consul://$(docker-machine ip kv-store):8500

## 3. Deploying 6 containers, which form a tree topology

version: "2" 
services: 
 node0: 
  image: trongnhanuit/iottestbedv2_tmp:1.0
  container_name: node0 
  hostname: node0 
  privileged: true
  networks:  
   iotnet0: 
    ipv4_address: 10.10.0.2 
   iotnet1: 
    ipv4_address: 10.10.0.18  
 node1: 
  image: trongnhanuit/iottestbedv2_tmp:1.0
  container_name: node1 
  hostname: node1 
  privileged: true
  networks:  
   iotnet0: 
    ipv4_address: 10.10.0.3 
   iotnet2: 
    ipv4_address: 10.10.0.34 
   iotnet4: 
    ipv4_address: 10.10.0.66  
 node2: 
  image: trongnhanuit/iottestbedv2_tmp:1.0
  container_name: node2 
  hostname: node2 
  privileged: true
  networks:  
   iotnet1: 
    ipv4_address: 10.10.0.19 
   iotnet3: 
    ipv4_address: 10.10.0.50  
 node3: 
  image: trongnhanuit/iottestbedv2_tmp:1.0
  container_name: node3 
  hostname: node3 
  privileged: true
  networks:  
   iotnet2: 
    ipv4_address: 10.10.0.35  
 node4: 
  image: trongnhanuit/iottestbedv2_tmp:1.0
  container_name: node4 
  hostname: node4 
  privileged: true
  networks:  
   iotnet3: 
    ipv4_address: 10.10.0.51  
 node5: 
  image: trongnhanuit/iottestbedv2_tmp:1.0
  container_name: node5 
  hostname: node5 
  privileged: true
  networks:  
   iotnet4: 
    ipv4_address: 10.10.0.67 
networks:
 iotnet0:
  driver: weave
  ipam:
   config:
    - subnet: 10.10.0.0/28
 iotnet1:
  driver: weave
  ipam:
   config:
    - subnet: 10.10.0.16/28
 iotnet2:
  driver: weave
  ipam:
   config:
    - subnet: 10.10.0.32/28
 iotnet3:
  driver: weave
  ipam:
   config:
    - subnet: 10.10.0.48/28
 iotnet4:
  driver: weave
  ipam:
   config:
    - subnet: 10.10.0.64/28

## 4. Test #1: The sub-networks worked well before running routing

Node1 had got 4 network interfaces as follows:

- eth0: 172.18.0.4/16
- ethwe0: 10.10.0.3/28
- ethwe1: 10.10.0.66/28
- ethwe2: 10.10.0.34/28

It pinged successfully to 10.10.0.2/28 (owned by node0) through the ethwe0 -> That’s great! Besides, node4 (10.10.0.51/28) was unreachable from node0 -> that was exactly what I expected, which means those sub-networks were isolated to each other. This is the routing table in node1:

Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
0.0.0.0         172.18.0.1      0.0.0.0         UG    0      0        0 eth0
10.10.0.0       0.0.0.0         255.255.255.240 U     0      0        0 ethwe0
10.10.0.32      0.0.0.0         255.255.255.240 U     0      0        0 ethwe2
10.10.0.64      0.0.0.0         255.255.255.240 U     0      0        0 ethwe1
172.18.0.0      0.0.0.0         255.255.0.0     U     0      0        0 eth0

## 5. Test #2: After running Olsrd, the sub-network were not isolated to each other anymore

For the purpose of enabling multi-hop connections (for eg, node1->node0->node2->node4), I run Olsrd on each container. This is Olsrd.conf in node1:

...
Interface "ethwe0" "ethwe1" "ethwe2"
{
    # Emission intervals in seconds.
    # If not defined, Freifunk network defaults are used
    # (default is 2.0/20.0 for Hello and 5.0/300.0 for Tc/Mid/Hna)
    Ip4Broadcast        255.255.255.255

    # HelloInterval       2.0
    # HelloValidityTime   6.0
    # TcInterval          5.0
    # TcValidityTime     30.0
    # MidInterval         5.0
    # MidValidityTime    30.0
    # HnaInterval         5.0
    # HnaValidityTime    30.0
}

Then, node1 was able to ping to node4. However, the problem was node1 can ping directly to node4 without going through node0 and node2. This is the routing table in node1:

Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
0.0.0.0         172.18.0.1      0.0.0.0         UG    0      0        0 eth0
10.10.0.0       0.0.0.0         255.255.255.240 U     0      0        0 ethwe0
10.10.0.2       10.10.0.18      255.255.255.255 UGH   2      0        0 ethwe0
10.10.0.16      10.10.0.2       255.255.255.240 UG    20     0        0 ethwe0
10.10.0.18      10.10.0.18      255.255.255.255 UGH   2      0        0 ethwe0
10.10.0.19      10.10.0.50      255.255.255.255 UGH   2      0        0 ethwe0
10.10.0.32      0.0.0.0         255.255.255.240 U     0      0        0 ethwe2
10.10.0.35      10.10.0.35      255.255.255.255 UGH   2      0        0 ethwe0
10.10.0.48      10.10.0.2       255.255.255.240 UG    20     0        0 ethwe0
10.10.0.50      10.10.0.50      255.255.255.255 UGH   2      0        0 ethwe0
10.10.0.51      10.10.0.51      255.255.255.255 UGH   2      0        0 ethwe0
10.10.0.64      0.0.0.0         255.255.255.240 U     0      0        0 ethwe1
10.10.0.67      10.10.0.67      255.255.255.255 UGH   2      0        0 ethwe0
172.18.0.0      0.0.0.0         255.255.0.0     U     0      0        0 eth0

This is the output when I run traceroute 10.10.0.51 in node1. The packets from Node1 (10.10.0.3/28) could even directly reach to node2 (10.10.0.51/28) although they were not in a same sub-network.

traceroute to 10.10.0.51 (10.10.0.51), 30 hops max, 46 byte packets
 1  10.10.0.51 (10.10.0.51)  38.650 ms  13.291 ms  13.351 ms

In addition, when I run tcpdump to log all traffic received on the ethwe0 in node1, I recognize that it received all broadcast packets of other sub-networks. This is the log of tcpdump:

...
11:45:20.835653 IP node5.dockerscenario_iotnet4.698 > 255.255.255.255.698: OLSRv4, seq 0xd66c, length 60
11:45:20.877292 IP node1.698 > 255.255.255.255.698: OLSRv4, seq 0x3872, length 88
11:45:20.919044 IP 10.10.0.51.698 > 255.255.255.255.698: OLSRv4, seq 0xe003, length 96
11:45:21.048157 IP 10.10.0.19.698 > 255.255.255.255.698: OLSRv4, seq 0x6fa3, length 92
11:45:21.097715 IP 10.10.0.50.698 > 255.255.255.255.698: OLSRv4, seq 0xdecb, length 56
11:45:21.736235 IP node1.698 > 255.255.255.255.698: OLSRv4, seq 0x12a3, length 72
11:45:21.766085 IP node3.dockerscenario_iotnet2.698 > 255.255.255.255.698: OLSRv4, seq 0x8f05, length 60
11:45:21.886903 IP node1.698 > 255.255.255.255.698: OLSRv4, seq 0x76a5, length 88
11:45:22.288091 IP node0.dockerscenario_iotnet0.698 > 255.255.255.255.698: OLSRv4, seq 0x0b4e, length 56

In conclusions, my problem is the sub-networks are not isolated to each other after I run routing software (Olsrd in this case). Does anyone have any idea to deal with this issue? Thanks in advance