1.创建一个测试用的deployment

[root@linux-node1 ~]# kubectl run net-test --image=alpine --replicas=3 sleep 360000

2.查看创建情况

[root@linux-node1 ~]# kubectl get pods -o wide
NAME                        READY     STATUS    RESTARTS   AGE       IP            NODE
net-test-5767cb94df-6crmr   1/1       Running   0          17s       10.2.97.176   192.168.56.13
net-test-5767cb94df-9mpmb   1/1       Running   0          17s       10.2.70.187   192.168.56.12
net-test-5767cb94df-x8l44   1/1       Running   0          17s       10.2.97.175   192.168.56.13

# 使用kubectl创建了一个名为net-test 的deployment, 镜像是alpine 副本数为3,
[root@linux-node1 ~]# kubectl get deployments
NAME       DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
net-test   3         3         3            3           7m

3.测试联通性

ping 10.2.97.176

通过yaml文件部署应用

1.编写一个nginx-deployment.yaml文件:

[root@linux-node1 ~]# cat >  nginx-deployment.yaml  << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
  labels:
    app: nginx
spec:
  replicas: 3
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:1.10.3
        ports:
        - containerPort: 80
   EOF

2.执行创建:

[root@linux-node1 ~]# kubectl create -f nginx-deployment.yaml
deployment.apps "nginx-deployment" created

# 查看deployment
[root@linux-node1 ~]# kubectl get deployment/nginx-deployment
NAME               DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
nginx-deployment   10        10        10           10          1m

# 查看deployment 详情
[root@linux-node1 ~]# kubectl describe deployment/nginx-deployment
Name:                   nginx-deployment
Namespace:              default
CreationTimestamp:      Thu, 31 May 2018 20:20:17 +0800
Labels:                 app=nginx
Annotations:            deployment.kubernetes.io/revision=1
Selector:               app=nginx
Replicas:               10 desired | 10 updated | 10 total | 10 available | 0 unavailable
StrategyType:           RollingUpdate
MinReadySeconds:        0
RollingUpdateStrategy:  25% max unavailable, 25% max surge
Pod Template:
  Labels:  app=nginx
  Containers:
   nginx:
    Image:        nginx:1.10.3
    Port:         80/TCP
    Host Port:    0/TCP
    Environment:  <none>
    Mounts:       <none>
  Volumes:        <none>
Conditions:
  Type           Status  Reason
  ----           ------  ------
  Progressing    True    NewReplicaSetAvailable
  Available      True    MinimumReplicasAvailable
OldReplicaSets:  <none>
NewReplicaSet:   nginx-deployment-75d56bb955 (10/10 replicas created)
Events:
  Type    Reason             Age               From                   Message
  ----    ------             ----              ----                   -------
  Normal  ScalingReplicaSet  5m                deployment-controller  Scaled up replica set nginx-deployment-75d56bb955 to 3
  Normal  ScalingReplicaSet  1m (x2 over 3m)   deployment-controller  Scaled down replica set nginx-deployment-75d56bb955 to 3
  Normal  ScalingReplicaSet  57s (x3 over 5m)  deployment-controller  Scaled up replica set nginx-deployment-75d56bb955 to 10

# 查看pod ,可以看出按照我们的描述文件里面的副本数已经有10个POD通过Schedule到了不同的node节点上面
[root@linux-node1 ~]# kubectl get pod -o wide
NAME                                READY     STATUS    RESTARTS   AGE       IP            NODE
net-test-5767cb94df-6crmr           1/1       Running   0          40m       10.2.97.178   192.168.56.13
net-test-5767cb94df-9mpmb           1/1       Running   0          40m       10.2.70.187   192.168.56.12
net-test-5767cb94df-x8l44           1/1       Running   0          40m       10.2.97.177   192.168.56.13
nginx-deployment-75d56bb955-255w2   1/1       Running   0          3m        10.2.70.196   192.168.56.12
nginx-deployment-75d56bb955-5ckvs   1/1       Running   0          8m        10.2.70.188   192.168.56.12
nginx-deployment-75d56bb955-7dq87   1/1       Running   0          3m        10.2.70.198   192.168.56.12
nginx-deployment-75d56bb955-8vl7p   1/1       Running   0          3m        10.2.97.191   192.168.56.13
nginx-deployment-75d56bb955-9f9ms   1/1       Running   0          3m        10.2.97.189   192.168.56.13
nginx-deployment-75d56bb955-9g9bk   1/1       Running   0          3m        10.2.97.188   192.168.56.13
nginx-deployment-75d56bb955-9kz2r   1/1       Running   0          3m        10.2.70.197   192.168.56.12
nginx-deployment-75d56bb955-s78gs   1/1       Running   0          8m        10.2.70.189   192.168.56.12
nginx-deployment-75d56bb955-v8n5v   1/1       Running   0          3m        10.2.97.190   192.168.56.13
nginx-deployment-75d56bb955-zfb4m   1/1       Running   0          8m        10.2.97.179   192.168.56.13

# 查看某个pod详情
[root@linux-node1 ~]# kubectl describe pod/nginx-deployment-75d56bb955-255w2
Name:           nginx-deployment-75d56bb955-255w2
Namespace:      default
Node:           192.168.56.12/192.168.56.12
Start Time:     Thu, 31 May 2018 20:24:31 +0800
Labels:         app=nginx
                pod-template-hash=3181266511
Annotations:    <none>
Status:         Running
IP:             10.2.70.196
Controlled By:  ReplicaSet/nginx-deployment-75d56bb955
Containers:
  nginx:
    Container ID:   docker://48ad80730efd6d744ac6d31b34778eb8e75b0f4c6698e1c1fc8f6046a6a77b2b
    Image:          nginx:1.10.3
    Image ID:       docker-pullable://nginx@sha256:6202beb06ea61f44179e02ca965e8e13b961d12640101fca213efbfd145d7575
    Port:           80/TCP
    Host Port:      0/TCP
    State:          Running
      Started:      Thu, 31 May 2018 20:24:33 +0800
    Ready:          True
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-5htws (ro)
Conditions:
  Type           Status
  Initialized    True
  Ready          True
  PodScheduled   True
Volumes:
  default-token-5htws:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  default-token-5htws
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  <none>
Tolerations:     <none>
Events:
  Type    Reason                 Age   From                    Message
  ----    ------                 ----  ----                    -------
  Normal  SuccessfulMountVolume  3m    kubelet, 192.168.56.12  MountVolume.SetUp succeeded for volume "default-token-5htws"
  Normal  Pulled                 3m    kubelet, 192.168.56.12  Container image "nginx:1.10.3" already present on machine
  Normal  Created                3m    kubelet, 192.168.56.12  Created container
  Normal  Started                3m    kubelet, 192.168.56.12  Started container
  Normal  Scheduled              2m    default-scheduler       Successfully assigned nginx-deployment-75d56bb955-255w2 to 192.168.56.12


# 访问Nginx pod
[root@linux-node1 ~]# curl -I http://10.2.70.196
HTTP/1.1 200 OK
Server: nginx/1.10.3
Date: Thu, 31 May 2018 12:30:45 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 31 Jan 2017 15:01:11 GMT
Connection: keep-alive
ETag: "5890a6b7-264"
Accept-Ranges: byte

# 更新deployment
[root@linux-node1 ~]# kubectl set image  deployment/nginx-deployment nginx=nginx:1.12.2 --record
deployment.apps "nginx-deployment" image updated

# 可以看到更新已经成功(滚动更新)
[root@linux-node1 ~]# kubectl get deployment/nginx-deployment -o wide
NAME               DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE       CONTAINERS   IMAGES         SELECTOR
nginx-deployment   10        10        10           10          17m       nginx        nginx:1.12.2   app=nginx

[root@linux-node1 ~]# curl -I 10.2.97.192
HTTP/1.1 200 OK
Server: nginx/1.12.2   # 版本已经更新成功
Date: Thu, 31 May 2018 12:35:54 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 11 Jul 2017 13:29:18 GMT
Connection: keep-alive
ETag: "5964d2ae-264"
Accept-Ranges: bytes

# 查看deployment更新历史
[root@linux-node1 ~]# kubectl rollout history deployment/nginx-deployment
deployments "nginx-deployment"
REVISION  CHANGE-CAUSE
1         <none> # 用于在之前在创建创建nginx deployment的时候 未加参数 --record 所以这个信息为none
2         kubectl set image deployment/nginx-deployment nginx=nginx:1.12.2 --record=true

# 查看版本详情(--revison=VERSION_NUM)
[root@linux-node1 ~]# kubectl rollout history deployment/nginx-deployment --revision=1
deployments "nginx-deployment" with revision #1
Pod Template:
  Labels:   app=nginx
    pod-template-hash=3181266511
  Containers:
   nginx:
    Image:  nginx:1.10.3
    Port:   80/TCP
    Host Port:  0/TCP
    Environment:    <none>
    Mounts: <none>
  Volumes:  <none>

# 版本回滚至上一个版本:
[root@linux-node1 ~]# kubectl rollout undo deployment/nginx-deployment
deployment.apps "nginx-deployment"

[root@linux-node1 ~]# kubectl get deployment/nginx-deployment -o wide
NAME               DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE       CONTAINERS   IMAGES         SELECTOR
nginx-deployment   10        10        10           10          26m       nginx        nginx:1.10.3   app=nginx

# 扩容pod数量
[root@linux-node1 ~]# kubectl scale --replicas=20 deployment nginx-deployment
deployment.extensions "nginx-deployment" scaled

[root@linux-node1 ~]# kubectl get deployment/nginx-deployment -o wide
NAME               DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE       CONTAINERS   IMAGES         SELECTOR
nginx-deployment   20        20        20           20          28m       nginx        nginx:1.10.3   app=nginx

那问题来了:

  1. 每次收到获取podIP太扯了,总不能每次都要手动改程序或者配置才能访问服务吧,要怎么提前知道podIP呢?
  2. Pod在运行中可能会重建,IP变了怎么解?
  3. 如何在多个Pod中实现负载均衡嘞?

这些问题使用k8s Service就可以解决。

Service可以将pod IP封装起来,即使Pod发生重建,依然可以通过Service来访问Pod提供的服务。此外,Service还解决了负载均衡的问题,大家可以多访问几次Service,然后通过kubectl logs 来查看Nginx Pod的访问日志来确认

创建一个对于nginx deployment的一个service

1.编写nginx-service.yaml文件

[root@linux-node1 ~]# cat > nginx-service.yaml << EOF
kind: Service 
apiVersion: v1
metadata:
  name: nginx-service
spec:
  selector:
    app: nginx
  ports:
  - protocol: TCP
    port: 80
    targetPort: 80
EOF

2.执行创建操作:

[root@linux-node1 ~]# kubectl create -f nginx-service.yaml
service "nginx-service" created

[root@linux-node1 ~]# kubectl get service -o wide
NAME            TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)   AGE       SELECTOR
kubernetes      ClusterIP   10.1.0.1       <none>        443/TCP   3d        <none>
nginx-service   ClusterIP   10.1.146.223   <none>        80/TCP    30s       app=nginx

# 访问VIP
[root@linux-node1 ~]# curl -I http://10.1.146.223
HTTP/1.1 200 OK
Server: nginx/1.10.3
Date: Thu, 31 May 2018 12:54:54 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 31 Jan 2017 15:01:11 GMT
Connection: keep-alive
ETag: "5890a6b7-264"
Accept-Ranges: bytes

# 在节点2上查看LVS,通过k8s封装LVS来实现了负载均衡
[root@linux-node2 ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  10.1.0.1:443 rr persistent 10800
  -> 192.168.56.11:6443           Masq    1      1          0
TCP  10.1.146.223:80 rr
  -> 10.2.70.204:80               Masq    1      0          0
  -> 10.2.70.205:80               Masq    1      0          1
  -> 10.2.70.206:80               Masq    1      0          0
  -> 10.2.70.207:80               Masq    1      0          0
  -> 10.2.70.208:80               Masq    1      0          0
  -> 10.2.70.209:80               Masq    1      0          0
  -> 10.2.70.210:80               Masq    1      0          0
  -> 10.2.70.211:80               Masq    1      0          0
  -> 10.2.70.212:80               Masq    1      0          0
  -> 10.2.70.213:80               Masq    1      0          0
  -> 10.2.97.197:80               Masq    1      0          0
  -> 10.2.97.198:80               Masq    1      0          0
  -> 10.2.97.199:80               Masq    1      0          1
  -> 10.2.97.200:80               Masq    1      0          0
  -> 10.2.97.201:80               Masq    1      0          0
  -> 10.2.97.202:80               Masq    1      0          1
  -> 10.2.97.203:80               Masq    1      0          1
  -> 10.2.97.204:80               Masq    1      0          0
  -> 10.2.97.205:80               Masq    1      0          0