由于k8s集群内部的服务发现都是通过DNS来发现并实现的,所以需要依赖DNS服务

1.创建coredns.yaml文件:

[root@linux-node1 ~]# cat > coredns.yaml << EOF
apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
  labels:
      kubernetes.io/cluster-service: "true"
      addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: Reconcile
  name: system:coredns
rules:
- apiGroups:
  - ""
  resources:
  - endpoints
  - services
  - pods
  - namespaces
  verbs:
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: EnsureExists
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
  labels:
      addonmanager.kubernetes.io/mode: EnsureExists
data:
  Corefile: |
    .:53 {
        errors
        health
        kubernetes cluster.local. in-addr.arpa ip6.arpa {
            pods insecure
            upstream
            fallthrough in-addr.arpa ip6.arpa
        }
        prometheus :9153
        proxy . /etc/resolv.conf
        cache 30
    }
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: coredns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "CoreDNS"
spec:
  replicas: 2
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  selector:
    matchLabels:
      k8s-app: coredns
  template:
    metadata:
      labels:
        k8s-app: coredns
    spec:
      serviceAccountName: coredns
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
        - key: "CriticalAddonsOnly"
          operator: "Exists"
      containers:
      - name: coredns
        image: coredns/coredns:1.0.6
        imagePullPolicy: IfNotPresent
        resources:
          limits:
            memory: 170Mi
          requests:
            cpu: 100m
            memory: 70Mi
        args: [ "-conf", "/etc/coredns/Corefile" ]
        volumeMounts:
        - name: config-volume
          mountPath: /etc/coredns
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
      dnsPolicy: Default
      volumes:
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile
---
apiVersion: v1
kind: Service
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: coredns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "CoreDNS"
spec:
  selector:
    k8s-app: coredns
  clusterIP: 10.1.0.2  #!!!!!!!关键点!!!!!!!
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP

EOF
# 注意 cluster ip 需要跟service ip是在一个网段

2.运行创建:

[root@linux-node1 ~]# kubectl create -f coredns.yaml
deployment.extensions "coredns" created
Error from server (AlreadyExists): error when creating "coredns.yaml": serviceaccounts "coredns" already exists
Error from server (AlreadyExists): error when creating "coredns.yaml": clusterroles.rbac.authorization.k8s.io "system:coredns" already exists
Error from server (AlreadyExists): error when creating "coredns.yaml": clusterrolebindings.rbac.authorization.k8s.io "system:coredns" already exists
Error from server (AlreadyExists): error when creating "coredns.yaml": configmaps "coredns" already exists
Error from server (Invalid): error when creating "coredns.yaml": Service "coredns" is invalid: spec.clusterIP: Invalid value: "10.1.0.2": provided IP is already allocated

# 忽略以上错误,由于我已经安装过了。

3.查看创建结果:

# 注意: 与k8s相关的组件或者服务都是在kube-system这个命名空间 所以这里 -n 指定了命名空间为kube-system
[root@linux-node1 ~]# kubectl get deployment -n kube-system
NAME                   DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
coredns                2         2         2            2           12m


# 查看service
[root@linux-node1 ~]# kubectl get service -n kube-system
NAME                   TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)         AGE
coredns                ClusterIP   10.1.0.2      <none>        53/UDP,53/TCP   32m

# 通过LVS查看这个cluster的信息
[root@linux-node2 ~]# ipvsadm -Ln | grep -v ":80"
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  10.1.0.1:443 rr persistent 10800
  -> 192.168.56.11:6443           Masq    1      2          0  
TCP  10.1.0.2:53 rr
  -> 10.2.70.216:53               Masq    1      0          0
  -> 10.2.97.209:53               Masq    1      0          0
UDP  10.1.0.2:53 rr
  -> 10.2.70.216:53               Masq    1      0          0
  -> 10.2.97.209:53               Masq    1      0          0

# 可以看到通过LVS转向了两个Pod, 查看pod:
[root@linux-node1 ~]# kubectl get pod -n kube-system -o wide
NAME                                    READY     STATUS    RESTARTS   AGE       IP            NODE
coredns-77c989547b-55kjb                1/1       Running   0          19m       10.2.97.209   192.168.56.13
coredns-77c989547b-qvfw4                1/1       Running   0          19m       10.2.70.216   192.168.56.12

4. 验证创建结果:

启动一个容器

# 可以看出来已经获取到了dns服务的VIP地址
[root@linux-node1 ~]# kubectl exec -it nginx-deployment-75d56bb955-b4z4k /bin/sh
# cat /etc/resolv.conf
nameserver 10.1.0.2 
search default.svc.cluster.localping . svc.cluster.local. cluster.local. example.com
options ndots:5

梳理主要内容:

  • 在k8s集群中,服务是运行在Pod中的,Pod的发现和副本间负载均衡是我们面临的问题。
  • 通过Service可以解决这两个问题,但访问Service也需要对应的IP,因此又引入了Service发现的问题。
  • 得益于coredns插件,我们可以通过域名来访问集群内的Service,解决了Service发现的问题。
  • 为了让Pod中的容器可以使用coredns来解析域名,k8s会修改容器的/etc/resolv.conf配置。

有了以上机制的保证,就可以在Pod中通过Service名称和namespace非常方便地访问对应的服务了。

DashBoard

部署dashboard

[root@linux-node1 ~]# kubectl create -f dashboard/
# 查看集群详情
[root@linux-node1 ~]# kubectl cluster-info
Kubernetes master is running at https://192.168.56.11:6443
CoreDNS is running at https://192.168.56.11:6443/api/v1/namespaces/kube-system/services/coredns:dns/proxy
kubernetes-dashboard is running at https://192.168.56.11:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.


[root@linux-node1 src]# kubectl get services kubernetes-dashboard -n kube-system
NAME                   TYPE       CLUSTER-IP    EXTERNAL-IP   PORT(S)         AGE
kubernetes-dashboard   NodePort   10.1.27.148   <none>        443:28966/TCP   3d

登录:

获取登录令牌:

[root@linux-node1 src]# kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
# 复制 token的内容部分 输入 登录