+--------------------+              +--------------------+ 
                  |   +------------+   |              |   +------------+   | 
                  |   |            |   |              |   |            |   | 
                  |   |    ConA    |   |              |   |    ConB    |   | 
                  |   |            |   |              |   |            |   | 
                  |   +-----+------+   |              |   +-----+------+   | 
                  |         |veth      |              |         |veth      | 
                  |       wl-A         |              |       wl-B         | 
                  |         |          |              |         |          |
                  +-------node-A-------+              +-------node-B-------+ 
                          |    |                               |    |
                          |    | type1.  in the same lan       |    |
                          |    +-------------------------------+    |
                          |                                         |
                          |      type2. in different network        |
                          |             +-------------+             |
                          |             |             |             |
                          +-------------+   Routers   |-------------+
                                        |             |
                                        +-------------+
从ConA中发送给ConB的报文被nodeA的wl-A接收,根据nodeA上的路由规则,经过各种iptables规则后,转发到nodeB。
如果nodeA和nodeB在同一个二层网段,下一条地址直接就是node-B,经过二层交换机即可到达。
如果nodeA和nodeB在不同的网段,报文被路由到下一跳,经过三层交换或路由器,一步步跳转到node-B。

aliyun_vos_vpc

#测试命令如下

k8s-node2 IP 地址 192.168.10.202
内网 DNS IP 地址 10.96.0.10
pod 网段10.100.0.1/16,svc 网段10.96.0.0/16
办公网段 192.168.10.0/23
#给 k8s-node2节点打上污点标签(taints),不让 k8s 调度 pod 来占用资源:
kubectl taint nodes k8s-node2 forward=k8s-node2:NoSchedule
# 开启转发
# vim /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
# sysctl -p

# 来自办公室访问pod、service snat
iptables -t nat -A POSTROUTING -s 192.168.10.0/23 -d 10.100.0.1/16 -j MASQUERADE
iptables -t nat -A POSTROUTING -s 192.168.10.0/23 -d 10.96.0.0/16 -j  MASQUERADE

#清除命令
iptables -t nat -D POSTROUTING -s 192.168.10.0/23 -d 10.100.0.1/16 -j MASQUERADE
iptables -t nat -D POSTROUTING -s 192.168.10.0/23 -d 10.96.0.0/16 -j  MASQUERADE
iptables -t nat -nvL --line-number
iptables -t nat -D POSTROUTING 4

#在办公室的出口路由器上,设置静态路由,将 k8s pod 和 service 的网段,路由到 k8s-node2 节点上
ip route 10.100.0.0 255.255.0.0   192.168.10.202
ip route 10.96.0.0  255.240.0.0   192.168.10.202


#方案一, 最简单的做法,我们把内网DNS架设在node-30这台节点上,那么他肯定访问到kube-dns 10.96.0.10
# kubectl  get svc  -n kube-system |grep kube-dns
kube-dns   ClusterIP   10.96.0.10   <none>        53/UDP,53/TCP   20d

#方案二,由于我们实验场景内网DNS IP地址 10.60.20.1 ,并不在node-30上,我们需要打通10.60.20.1 访问 svc网段10.96.0.0/12即可
#内网DNS(IP 10.60.20.1) 添加静态路由
route add -net 10.96.0.0/12 gw 192.168.10.202
# node-30(IP 192.168.10.202) 做snat
iptables -t nat -A POSTROUTING -s 10.96.0.10/32 -d 10.96.0.0/16 -j MASQUERADE


#nginx代理
kubectl create namespace dns-l4
#打标签
kubectl label nodes k8s-node2 node=dns-l4
kubectl apply -f dns-l4.yaml
#部署完成后,电脑验证,是否生效:
nslookup -q=A kube-dns.kube-system.svc.cluster.local  192.168.10.202
nslookup -q=A my-app-ip.default.svc.cluster.local  192.168.10.202

#这里我们用轻量级的dnsmasq来作为内网 dns 配置案例,将来自内网的*.cluster.local解析请求,走 KubeDNS 10.60.20.30:
# vim /etc/dnsmasq.conf
strict-order
listen-address=10.96.0.10
bogus-nxdomain=223.5.5.5
server=/cluster.local/192.168.10.202

还有一种方法用shadowsocks打通k8s 具体参考demo

kubectl label nodes k8s-node2 some-condition=true
kubectl apply -f ss.yaml

ss.yaml

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: shadowsocks-deployment
  namespace: default
  labels:
    app: ssserver
spec:
  replicas: 1
  selector:
    matchLabels:
      app: ssserver
  template:
    metadata:
      labels:
        app: ssserver
    spec:
      containers:
        - name: ssserver-container
          image: shadowsocks/shadowsocks-libev:v3.2.0
          command: [
            # need to modify "-d" to k8s core-dns
            "ss-server", "-p", "8388", "-t", "300", "-k", "xiaofafn@1", "-m", "aes-256-cfb", "--fast-open", "-d", "10.96.0.10,8.8.8.8", "-u"
          ]
          ports:
            - containerPort: 8388
              protocol: TCP
              name: tcp
            - containerPort: 8388
              protocol: UDP
              name: udp
      nodeSelector:
        "some-condition": "true"
---
apiVersion: v1
kind: Service
metadata:
  name: socks-proxy-svc
  namespace: default
spec:
  type: NodePort
  ports:
    - port: 8388
      targetPort: 8388
      nodePort: 32088
      protocol: TCP
      name: tcp
    - port: 8388
      targetPort: 8388
      nodePort: 32088
      protocol: UDP
      name: udp
  selector:
    app: ssserver

vi dns-l4.yaml

kind: ConfigMap
apiVersion: v1
metadata:
  name: nginx-configuration
  namespace: dns-l4
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx

---
kind: ConfigMap
apiVersion: v1
metadata:
  name: tcp-services
  namespace: dns-l4
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
data:
  53: "kube-system/kube-dns:53"

---
kind: ConfigMap
apiVersion: v1
metadata:
  name: udp-services
  namespace: dns-l4
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
data:
  53: "kube-system/kube-dns:53"
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nginx-ingress-serviceaccount
  namespace: dns-l4
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx

---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
  name: nginx-ingress-clusterrole
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
rules:
  - apiGroups:
      - ""
    resources:
      - configmaps
      - endpoints
      - nodes
      - pods
      - secrets
    verbs:
      - list
      - watch
  - apiGroups:
      - ""
    resources:
      - nodes
    verbs:
      - get
  - apiGroups:
      - ""
    resources:
      - services
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - "extensions"
    resources:
      - ingresses
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - ""
    resources:
      - events
    verbs:
      - create
      - patch
  - apiGroups:
      - "extensions"
    resources:
      - ingresses/status
    verbs:
      - update

---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
  name: nginx-ingress-role
  namespace: dns-l4
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
rules:
  - apiGroups:
      - ""
    resources:
      - configmaps
      - pods
      - secrets
      - namespaces
    verbs:
      - get
  - apiGroups:
      - ""
    resources:
      - configmaps
    resourceNames:
      # Defaults to "<election-id>-<ingress-class>"
      # Here: "<ingress-controller-leader>-<nginx>"
      # This has to be adapted if you change either parameter
      # when launching the nginx-ingress-controller.
      - "ingress-controller-leader-nginx"
    verbs:
      - get
      - update
  - apiGroups:
      - ""
    resources:
      - configmaps
    verbs:
      - create
  - apiGroups:
      - ""
    resources:
      - endpoints
    verbs:
      - get

---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
  name: nginx-ingress-role-nisa-binding
  namespace: dns-l4
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: nginx-ingress-role
subjects:
  - kind: ServiceAccount
    name: nginx-ingress-serviceaccount
    namespace: dns-l4

---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: nginx-ingress-clusterrole-nisa-binding
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: nginx-ingress-clusterrole
subjects:
  - kind: ServiceAccount
    name: nginx-ingress-serviceaccount
    namespace: dns-l4

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-ingress-controller
  namespace: dns-l4
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: ingress-nginx
      app.kubernetes.io/part-of: ingress-nginx
  template:
    metadata:
      labels:
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/part-of: ingress-nginx
      annotations:
        prometheus.io/port: "10254"
        prometheus.io/scrape: "true"
    spec:
      nodeSelector:
        node: dns-l4
      hostNetwork: true
      serviceAccountName: nginx-ingress-serviceaccount
      tolerations:
      - key: "forward"
        operator: "Exists"
        effect: "NoSchedule"
      containers:
        - name: nginx-ingress-controller
          image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.21.0
          args:
            - /nginx-ingress-controller
            - --configmap=$(POD_NAMESPACE)/nginx-configuration
            - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
            - --udp-services-configmap=$(POD_NAMESPACE)/udp-services
            - --publish-service=$(POD_NAMESPACE)/ingress-nginx
            - --annotations-prefix=nginx.ingress.kubernetes.io
          securityContext:
            capabilities:
              drop:
                - ALL
              add:
                - NET_BIND_SERVICE
            runAsUser: 33
          env:
            - name: POD_NAME
              valueFrom:
                fieldRef:
                  fieldPath: metadata.name
            - name: POD_NAMESPACE
              valueFrom:
                fieldRef:
                  fieldPath: metadata.namespace
          ports:
            - name: http
              containerPort: 80
            - name: https
              containerPort: 443
          livenessProbe:
            failureThreshold: 3
            httpGet:
              path: /healthz
              port: 10254
              scheme: HTTP
            initialDelaySeconds: 10
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 1
          readinessProbe:
            failureThreshold: 3
            httpGet:
              path: /healthz
              port: 10254
              scheme: HTTP
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 1