• 手动模拟 calico 网络


    机器1:
    ip 192.168.1.11
    网段 10.42.11.0/24

    docker create --name calico-node calico/node:v3.26.1
    
    docker cp calico-node:/usr/bin/bird ./
    docker cp calico-node:/usr/bin/bird6 ./
    docker cp calico-node:/usr/bin/birdcl ./
    
    docker rm calico-node
    chmod +x /usr/local/sbin/bird*
    
    
    mkdir -p /etc/bird-cfg/
    
    
    cat > /etc/bird-cfg/bird.cfg << EOL
    protocol static {
       # IP blocks for this host.
       route 10.42.11.0/24 blackhole;
    }
    
    # Aggregation of routes on this host; export the block, nothing beneath it.
    function calico_aggr ()
    {
          # Block 10.42.11.0/24 is confirmed
          if ( net = 10.42.11.0/24 ) then { accept; }
          if ( net ~ 10.42.11.0/24 ) then { reject; }
    }
    
    
    filter calico_export_to_bgp_peers {
      calico_aggr();
      if ( net ~ 10.42.0.0/16 ) then {
        accept;
      }
      reject;
    }
    
    filter calico_kernel_programming {
      if ( net ~ 10.42.0.0/16 ) then {
        krt_tunnel = "tunl0";
        accept;
      }
      accept;
    }
    
    router id 192.168.1.11;
    
    # Configure synchronization between routing tables and kernel.
    protocol kernel {
      learn;             # Learn all alien routes from the kernel
      persist;           # Don't remove routes on bird shutdown
      scan time 2;       # Scan kernel routing table every 2 seconds
      import all;
      export filter calico_kernel_programming; # Default is export none
      graceful restart;  # Turn on graceful restart to reduce potential flaps in
                         # routes when reloading BIRD configuration.  With a full
                         # automatic mesh, there is no way to prevent BGP from
                         # flapping since multiple nodes update their BGP
                         # configuration at the same time, GR is not guaranteed to
                         # work correctly in this scenario.
    }
    
    # Watch interface up/down events.
    protocol device {
      debug all;
      scan time 2;    # Scan interfaces every 2 seconds
    }
    
    protocol direct {
      debug all;
      interface -"tap*", "*"; # Exclude tap* but include everything else.
    }
    
    # Template for all BGP clients
    template bgp bgp_template {
      debug all;
      description "Connection to BGP peer";
      local as 64512;
      multihop;
      gateway recursive; # This should be the default, but just in case.
      import all;        # Import all routes, since we don't know what the upstream
                         # topology is and therefore have to trust the ToR/RR.
      export filter calico_export_to_bgp_peers;  # Only want to export routes for workloads.
      source address 192.168.1.11;  # The local address we use for the TCP connection
      add paths on;
      graceful restart;  # See comment in kernel section about graceful restart.
      connect delay time 2;
      connect retry time 5;
      error wait time 5,30;
    }
    
    protocol bgp Mesh_192_168_1_10 from bgp_template {
      neighbor 192.168.1.10 as 64512;
      #passive on; # Mesh is unidirectional, peer will connect to us. 
    }
    EOL
    
    
    
    
    cp bird* /usr/local/bin/
    bird -R -s /var/run/bird.ctl -d -c /etc/bird-cfg/bird.cfg
    
    birdcl -s /var/run/bird.ctl
    
    cat > /etc/sysctl.d/30-ipforward.conf<<EOL
    net.ipv4.ip_forward=1
    net.ipv6.conf.default.forwarding=1
    net.ipv6.conf.all.forwarding=1
    EOL
    sysctl -p /etc/sysctl.d/30-ipforward.conf
    
    ip netns add ns1
    ip netns add ns2
    ip netns add ns3
    
    ip link add tap1 type veth peer name veth1 netns ns1
    ip link add tap2 type veth peer name veth1 netns ns2
    ip link add tap3 type veth peer name veth1 netns ns3
    
    ip l set address ee:ee:ee:ee:ee:ee dev tap1
    ip l set address ee:ee:ee:ee:ee:ee dev tap2
    ip l set address ee:ee:ee:ee:ee:ee dev tap3
    
    echo 1 > /proc/sys/net/ipv4/conf/tap1/proxy_arp
    echo 1 > /proc/sys/net/ipv4/conf/tap2/proxy_arp
    echo 1 > /proc/sys/net/ipv4/conf/tap3/proxy_arp
    
    ip link set tap1 up
    ip link set tap2 up
    ip link set tap3 up
    
    
    
    
    ip r a 10.42.11.11 dev tap1
    ip r a 10.42.11.12 dev tap2
    ip r a 10.42.11.13 dev tap3
    
    ip netns exec ns1 ip addr add 10.42.11.11/32 dev veth1
    ip netns exec ns2 ip addr add 10.42.11.12/32 dev veth1
    ip netns exec ns3 ip addr add 10.42.11.13/32 dev veth1
    
    
    
    
    
    
    ip netns exec ns1 ip link set veth1 up
    ip netns exec ns2 ip link set veth1 up
    ip netns exec ns3 ip link set veth1 up
    
    ip netns exec ns1 ip link set lo up
    ip netns exec ns2 ip link set lo up
    ip netns exec ns3 ip link set lo up
    
    ip netns exec ns1 ip route add 169.254.1.1 dev veth1
    ip netns exec ns2 ip route add 169.254.1.1 dev veth1
    ip netns exec ns3 ip route add 169.254.1.1 dev veth1
    
    ip netns exec ns1 ip route add default via 169.254.1.1 dev veth1 
    ip netns exec ns2 ip route add default via 169.254.1.1 dev veth1
    ip netns exec ns3 ip route add default via 169.254.1.1 dev veth1
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101
    • 102
    • 103
    • 104
    • 105
    • 106
    • 107
    • 108
    • 109
    • 110
    • 111
    • 112
    • 113
    • 114
    • 115
    • 116
    • 117
    • 118
    • 119
    • 120
    • 121
    • 122
    • 123
    • 124
    • 125
    • 126
    • 127
    • 128
    • 129
    • 130
    • 131
    • 132
    • 133
    • 134
    • 135
    • 136
    • 137
    • 138
    • 139
    • 140
    • 141
    • 142
    • 143
    • 144
    • 145
    • 146
    • 147
    • 148
    • 149
    • 150
    • 151
    • 152
    • 153
    • 154
    • 155
    • 156
    • 157
    • 158
    • 159
    • 160
    • 161
    • 162

    可选:使用ipip

    modprobe ipip
    ip a a 10.42.1.0/32 brd 10.42.1.0 dev tunl0
    ip link set tunl0 up
    
    iptables -F
    
    • 1
    • 2
    • 3
    • 4
    • 5

    可选:不使用BGP,手动添加网关

    # 设置IP隧道的转发路由表
    # 10.42.2.0/24通过tun0设备转发,网关地址为192.168.154.12
    ip route add 10.42.1.0/24 via 192.168.1.10 dev tunl0 onlink
    
    • 1
    • 2
    • 3

    机器2
    ip 192.168.1.10
    网段 10.42.1.0/24

    
    
    docker create --name calico-node calico/node:v3.26.1
    
    docker cp calico-node:/usr/bin/bird ./
    docker cp calico-node:/usr/bin/bird6 ./
    docker cp calico-node:/usr/bin/birdcl ./
    
    docker rm calico-node
    chmod +x /usr/local/sbin/bird*
    
    
    mkdir -p /etc/bird-cfg/
    
    cat > /etc/bird-cfg/bird.cfg << EOL
    protocol static {
       # IP blocks for this host.
       route 10.42.1.0/24 blackhole;
    }
    
    # Aggregation of routes on this host; export the block, nothing beneath it.
    function calico_aggr ()
    {
          # Block 10.42.1.0/24 is confirmed
          if ( net = 10.42.1.0/24 ) then { accept; }
          if ( net ~ 10.42.1.0/24 ) then { reject; }
    }
    
    
    filter calico_export_to_bgp_peers {
      calico_aggr();
      if ( net ~ 10.42.0.0/16 ) then {
        accept;
      }
      reject;
    }
    
    filter calico_kernel_programming {
      if ( net ~ 10.42.0.0/16 ) then {
        krt_tunnel = "tunl0";
        accept;
      }
      accept;
    }
    
    router id 192.168.1.10;
    
    # Configure synchronization between routing tables and kernel.
    protocol kernel {
      learn;             # Learn all alien routes from the kernel
      persist;           # Don't remove routes on bird shutdown
      scan time 2;       # Scan kernel routing table every 2 seconds
      import all;
      export filter calico_kernel_programming; # Default is export none
      graceful restart;  # Turn on graceful restart to reduce potential flaps in
                         # routes when reloading BIRD configuration.  With a full
                         # automatic mesh, there is no way to prevent BGP from
                         # flapping since multiple nodes update their BGP
                         # configuration at the same time, GR is not guaranteed to
                         # work correctly in this scenario.
    }
    
    # Watch interface up/down events.
    protocol device {
      debug all;
      scan time 2;    # Scan interfaces every 2 seconds
    }
    
    protocol direct {
      debug all;
      interface -"tap*", "*"; # Exclude tap* but include everything else.
    }
    
    # Template for all BGP clients
    template bgp bgp_template {
      debug all;
      description "Connection to BGP peer";
      local as 64512;
      multihop;
      gateway recursive; # This should be the default, but just in case.
      import all;        # Import all routes, since we don't know what the upstream
                         # topology is and therefore have to trust the ToR/RR.
      export filter calico_export_to_bgp_peers;  # Only want to export routes for workloads.
      source address 192.168.1.10;  # The local address we use for the TCP connection
      add paths on;
      graceful restart;  # See comment in kernel section about graceful restart.
      connect delay time 2;
      connect retry time 5;
      error wait time 5,30;
    }
    
    protocol bgp Mesh_192_168_1_11 from bgp_template {
      neighbor 192.168.1.11 as 64512;
      #passive on; # Mesh is unidirectional, peer will connect to us. 
    }
    EOL
    
    cp bird* /usr/local/bin/
    bird -R -s /var/run/bird.ctl -d -c /etc/bird-cfg/bird.cfg
    
    birdcl -s /var/run/bird.ctl
    
    cat > /etc/sysctl.d/30-ipforward.conf<<EOL
    net.ipv4.ip_forward=1
    net.ipv6.conf.default.forwarding=1
    net.ipv6.conf.all.forwarding=1
    EOL
    sysctl -p /etc/sysctl.d/30-ipforward.conf   
    
    
    
    
    ip netns add ns1
    ip netns add ns2
    ip netns add ns3
    
    ip link add tap1 type veth peer name veth1 netns ns1
    ip link add tap2 type veth peer name veth1 netns ns2
    ip link add tap3 type veth peer name veth1 netns ns3
    
    ip l set address ee:ee:ee:ee:ee:ee dev tap1
    ip l set address ee:ee:ee:ee:ee:ee dev tap2
    ip l set address ee:ee:ee:ee:ee:ee dev tap3
    
    echo 1 > /proc/sys/net/ipv4/conf/tap1/proxy_arp
    echo 1 > /proc/sys/net/ipv4/conf/tap2/proxy_arp
    echo 1 > /proc/sys/net/ipv4/conf/tap3/proxy_arp
    
    ip link set tap1 up
    ip link set tap2 up
    ip link set tap3 up
    
    
    
    
    ip r a 10.42.1.11 dev tap1
    ip r a 10.42.1.12 dev tap2
    ip r a 10.42.1.13 dev tap3
    
    ip netns exec ns1 ip addr add 10.42.1.11/32 dev veth1
    ip netns exec ns2 ip addr add 10.42.1.12/32 dev veth1
    ip netns exec ns3 ip addr add 10.42.1.13/32 dev veth1
    
    
    
    
    ip netns exec ns1 ip link set veth1 up
    ip netns exec ns2 ip link set veth1 up
    ip netns exec ns3 ip link set veth1 up
    
    ip netns exec ns1 ip link set lo up
    ip netns exec ns2 ip link set lo up
    ip netns exec ns3 ip link set lo up
    
    ip netns exec ns1 ip route add 169.254.1.1 dev veth1
    ip netns exec ns2 ip route add 169.254.1.1 dev veth1
    ip netns exec ns3 ip route add 169.254.1.1 dev veth1
    
    ip netns exec ns1 ip route add default via 169.254.1.1 dev veth1 
    ip netns exec ns2 ip route add default via 169.254.1.1 dev veth1
    ip netns exec ns3 ip route add default via 169.254.1.1 dev veth1
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101
    • 102
    • 103
    • 104
    • 105
    • 106
    • 107
    • 108
    • 109
    • 110
    • 111
    • 112
    • 113
    • 114
    • 115
    • 116
    • 117
    • 118
    • 119
    • 120
    • 121
    • 122
    • 123
    • 124
    • 125
    • 126
    • 127
    • 128
    • 129
    • 130
    • 131
    • 132
    • 133
    • 134
    • 135
    • 136
    • 137
    • 138
    • 139
    • 140
    • 141
    • 142
    • 143
    • 144
    • 145
    • 146
    • 147
    • 148
    • 149
    • 150
    • 151
    • 152
    • 153
    • 154
    • 155
    • 156
    • 157
    • 158
    • 159
    • 160
    • 161

    可选:使用ipip

    # 添加tun0隧道设备,指定本地地址为192.168.154.11,远端地址为0.0.0.0(即远端是一个广播的地址,具体某个数据包往哪里转发,由路由表决定)
    modprobe ipip
    ip tunnel add tun0 mode ipip remote 0.0.0.0 local 192.168.1.10
    # 设置tun0的IP地址为10.42.1.0/32,表示一个单点的局域网。仅仅在IP转发的时候用作原始的IP Header的src IP地址。(具体为什么要设置这个IP地址才能转发,还不明白)
    ip a a 10.42.1.0/32 dev tun0
    ip link set tun0 up
    
    iptables -F
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8

    可选:不使用BGP,手动添加网关

    # 设置IP隧道的转发路由表
    # 10.42.2.0/24通过tun0设备转发,网关地址为192.168.154.12
    ip route add 10.42.11.0/24 via 192.168.1.11 dev tun0 onlink
    
    
    • 1
    • 2
    • 3
    • 4
  • 相关阅读:
    互联网摸鱼日报(2023-11-08)
    C# Winform代码
    live555 rtsp服务器实战之createNewStreamSource
    动捕设备推动舞蹈表演动作捕捉动画制作突破边界
    使用 gopkg.in/yaml.v3 解析 YAML 数据
    java--自增自减运算符
    Xxl-Job 初次体验
    拥抱jsx,开启vue3用法的另一种选择
    基于Java的旅游管理系统设计与实现(源码+lw+部署文档+讲解等)
    HashMap底层原理
  • 原文地址:https://blog.csdn.net/kunyus/article/details/132959261