源码部署Kubernetes-Master节点:

k8s-master     10.0.0.10      k8s-master    etcdkube-apiserverkube-controller-managerkube-scheduler
k8s-node-01    10.0.0.20      k8s-node      kubeletdockerkube_proxy
k8s-node-02    10.0.0.30      k8s-node      kubeletdockerkube_proxy
k8s-manager    10.0.0.40      k8s-manager   harbornfs

部署docker环境:(k8s-master节点)

[root@one ~]# hostnamectl set-hostname k8s-master
安装docker-1.12.6版本包:(k8s-master节点)
yum -y install epel-release
mkdir /home/tools && cd /home/tools
wget https://yum.dockerproject.org/repo/main/centos/7/Packages/docker-engine-1.12.6-1.el7.centos.x86_64.rpm
wget https://yum.dockerproject.org/repo/main/centos/7/Packages/docker-engine-selinux-1.12.6-1.el7.centos.noarch.rpm
yum -y localinstall docker-engine-*
开启内核转发及文件描述符:(k8s-master节点)
# 不指定内核开启全局转发,在重启网络后flannel网络就会不可用,容器无法通信
echo 'net.ipv4.ip_forward = 1' >> /etc/sysctl.conf
sysctl -p /etc/sysctl.conf
# 永久生效并临时不限制任何
echo '* soft nofile  65535' >> /etc/security/limits.conf
echo '* hard nofile 65535' >> /etc/security/limits.conf
ulimit
配置docker文件:(k8s-master节点)
# 第一行修改了docker的数据目录
# 第二行使用overlay驱动
# 第三行定义私有仓库地址
# 第四行定义容器IP地址网段
# 第五行定义容器交给systemd管理
# 第六行不知道
mkdir -p /data/docker
mkdir -p /etc/docker
vim /etc/docker/daemon.json

注意:定义容器地址段每个主机的都是不能一样的,当前是根据10/20/30/40物理主机地址写的容器地址段

{
  "graph": "/data/docker",
  "storage-driver": "overlay",
  "insecure-registries": ["registry.access.redhat.com","quay.io","10.0.0.40"],
  "bip": "172.16.10.1/24",
  "exec-opts": ["native.cgroupdriver=systemd"],
  "live-restore": true
}
启动docker服务:(k8s-master节点)
systemctl restart docker.service
systemctl enable docker.service
ll -sh /data/docker/
total 0
0 drwx------ 2 root root  6 May 18 02:42 containers
0 drwx------ 3 root root 21 May 18 02:42 image
0 drwxr-x--- 3 root root 19 May 18 02:42 network
0 drwx------ 2 root root  6 May 18 02:42 overlay
0 drwx------ 2 root root  6 May 18 02:42 swarm
0 drwx------ 2 root root  6 May 18 02:42 tmp
0 drwx------ 2 root root  6 May 18 02:42 trust
0 drwx------ 2 root root 25 May 18 02:42 volumes
修改主机名:(k8s-manager节点)
[root@four ~]# hostnamectl set-hostname k8s-manager
免密登录k8s集群:(k8s-manager节点)
[root@k8s-manager ~]# ssh-keygen -t rsa -P "" -f ~/.ssh/id_rsa -C "olda"
[root@k8s-manager ~]# ssh-copy-id -i ~/.ssh/id_rsa.pub root@10.0.0.10
[root@k8s-manager ~]# ssh-copy-id -i ~/.ssh/id_rsa.pub root@10.0.0.20
[root@k8s-manager ~]# ssh-copy-id -i ~/.ssh/id_rsa.pub root@10.0.0.30
下载cfssl程序:(k8s-manager节点)
[root@k8s-manager ~]# mkdir -p /home/tools && cd /home/tools
[root@k8s-manager tools]# wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
[root@k8s-manager tools]# wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
[root@k8s-manager tools]# wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
[root@k8s-manager tools]# chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64
[root@k8s-manager tools]# mv cfssl_linux-amd64 /usr/local/bin/cfssl
[root@k8s-manager tools]# mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
[root@k8s-manager tools]# mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo
生成CA根证书:(k8s-manager节点)
[root@k8s-manager ~]# mkdir -p /application/certs
[root@k8s-manager ~]# vim /application/certs/ca-config.json
{
    "signing": {
        "default": {
            "expiry": "175200h"
        },
        "profiles": {
            "server": {
                "expiry": "175200h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "server auth"
                ]
            },
            "client": {
                "expiry": "175200h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "client auth"
                ]
            },
            "peer": {
                "expiry": "175200h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "server auth",
                    "client auth"
                ]
            }
        }
    }
}
[root@k8s-manager ~]# vim /application/certs/ca-csr.json 
{
    "CN": "kubernetes-ca",
    "hosts": [
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "shanghai",
            "L": "shanghai",
            "O": "olda",
            "OU": "ops"
        }
    ],
    "ca": {
        "expiry": "175200h"
    }
}
生成CA证书:(k8s-manager节点)
[root@k8s-manager ~]# cd /application/certs
[root@k8s-manager certs]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

部署Etcd服务:

编写etcd证书文件:(k8s-manager节点)
[root@k8s-manager certs]# vim /application/certs/etcd-peer-csr.json 
{
    "CN": "etcd-peer",
    "hosts": [
        "10.0.0.10",
        "127.0.0.1"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "shanghai",
            "L": "shanghai",
            "O": "olda",
            "OU": "ops"
        }
    ]
}
[root@k8s-manager certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=peer etcd-peer-csr.json | cfssljson -bare etcd-peer
下载etcd软件包:(k8s-master节点)
[root@k8s-master ~]# cd /home/tools
[root@k8s-master tools]# wget https://github.com/etcd-io/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz
解压etcd软件包:(k8s-master节点)
[root@k8s-master tools]# mkdir -p /application
[root@k8s-master tools]# tar xf etcd-v3.3.10-linux-amd64.tar.gz -C /application/
[root@k8s-master tools]# mv /application/etcd-v3.3.10-linux-amd64/ /application/etcd-3.3.10
[root@k8s-master tools]# ln -s /application/etcd-3.3.10/ /application/etcd
创建etcd用户:(k8s-master节点)
[root@k8s-master ~]# mkdir /application/etcd/{data,certs,log}
[root@k8s-master ~]# useradd -s /sbin/nologin -M etcd
拷贝etcd证书到目录:(k8s-manager节点)
[root@k8s-manager ~]# cd /application/certs/
[root@k8s-manager certs]# scp ca.pem etcd-peer.pem etcd-peer-key.pem root@10.0.0.10:/application/etcd/certs/
编写etcd启动脚本:(k8s-master节点)
[root@k8s-master ~]# vim /application/etcd/etcd-server.sh
#!/bin/sh
./etcd --name etcd-server \
       --data-dir ./data \
       --listen-peer-urls https://10.0.0.10:2380 \
       --listen-client-urls https://10.0.0.10:2379,http://127.0.0.1:2379 \
       --quota-backend-bytes 8000000000 \
       --initial-advertise-peer-urls https://10.0.0.10:2380 \
       --advertise-client-urls https://10.0.0.10:2379,http://127.0.0.1:2379 \
       --initial-cluster  etcd-server=https://10.0.0.10:2380 \
       --ca-file ./certs/ca.pem \
       --cert-file ./certs/etcd-peer.pem \
       --key-file ./certs/etcd-peer-key.pem \
       --client-cert-auth  \
       --trusted-ca-file ./certs/ca.pem \
       --peer-ca-file ./certs/ca.pem \
       --peer-cert-file ./certs/etcd-peer.pem \
       --peer-key-file ./certs/etcd-peer-key.pem \
       --peer-client-cert-auth \
       --peer-trusted-ca-file ./certs/ca.pem \
       --log-output stdout
[root@k8s-master ~]# chmod 700 /application/etcd/etcd-server.sh
[root@k8s-master ~]# chown -R etcd.etcd /application/etcd/
安装supervisor软件:(k8s-master节点)
[root@k8s-master ~]# yum -y install supervisor
[root@k8s-master ~]# systemctl start supervisord.service
[root@k8s-master ~]# systemctl enable supervisord.service
创建sup管理文件:(k8s-master节点)
[root@k8s-master ~]# vim /etc/supervisord.d/etcd-server.ini
[program:etcd-server]
command=/application/etcd/etcd-server.sh                        ; the program (relative uses PATH, can take args)
numprocs=1                                                      ; number of processes copies to start (def 1)
directory=/application/etcd                                     ; directory to cwd to before exec (def no cwd)
autostart=true                                                  ; start at supervisord start (default: true)
autorestart=true                                                ; retstart at unexpected quit (default: true)
startsecs=22                                                    ; number of secs prog must stay running (def. 1)
startretries=3                                                  ; max # of serial start failures (default 3)
exitcodes=0,2                                                   ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                 ; signal used to kill process (default TERM)
stopwaitsecs=10                                                 ; max num secs to wait b4 SIGKILL (default 10)
user=etcd                                                       ; setuid to this UNIX account to run the program
redirect_stderr=false                                           ; redirect proc stderr to stdout (default false)
stdout_logfile=/application/etcd/log/etcd.stdout.log            ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                    ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                        ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                     ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                     ; emit events on stdout writes (default false)
stderr_logfile=/application/etcd/log/etcd.stderr.log            ; stderr log path, NONE for none; default AUTO
stderr_logfile_maxbytes=64MB                                    ; max # logfile bytes b4 rotation (default 50MB)
stderr_logfile_backups=4                                        ; # of stderr logfile backups (default 10)
stderr_capture_maxbytes=1MB                                     ; number of bytes in 'capturemode' (default 0)
stderr_events_enabled=false                                     ; emit events on stderr writes (default false)
启动并查看etcd状态:(k8s-master节点)
[root@k8s-master ~]# supervisorctl update
#etcd-server: added process group
[root@k8s-master ~]# supervisorctl status
#'etcd-server                      RUNNING   pid 2426, uptime 0:00:31
[root@k8s-master ~]# cd /application/etcd && ./etcdctl  cluster-health
#member 3b17aaa147134dd is healthy: got healthy result from http://127.0.0.1:2379
#cluster is healthy

部署Apiserver组件:

下载二进制k8s-server包:(k8s-master节点)
[root@k8s-master ~]#  cd /home/tools/
[root@k8s-master tools]# wget https://dl.k8s.io/v1.13.5/kubernetes-server-linux-amd64.tar.gz
解压二进制server包:(k8s-master节点)
[root@k8s-master tools]# tar xf kubernetes-server-linux-amd64.tar.gz -C /application/
[root@k8s-master tools]# mv /application/kubernetes/ /application/kubernetes-1.13.5
[root@k8s-master tools]# ln -s /application/kubernetes-1.13.5/ /application/kubernetes
[root@k8s-master tools]# mkdir /application/kubernetes/server/{conf,log,certs}
配置执行文件:(k8s-master节点)
[root@k8s-master ~]# rm -f /application/kubernetes/kubernetes-src.tar.gz
[root@k8s-master ~]# rm -f /application/kubernetes/server/bin/*.tar
[root@k8s-master ~]# rm -f /application/kubernetes/server/bin/*tag
[root@k8s-master ~]# ln -s /application/kubernetes/server/bin/kubectl /usr/local/bin/kubectl
签发client证书:(k8s-manager节点)
[root@k8s-manager ~]# cd /application/certs/
[root@k8s-manager certs]# vim /application/certs/client-csr.json
{
    "CN": "k8s-node",
    "hosts": [
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "shanghai",
            "L": "shanghai",
            "O": "olda",
            "OU": "ops"
        }
    ]
}
[root@k8s-manager certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client client-csr.json | cfssljson -bare client
签发apiserver证书:(k8s-manager节点)
[root@k8s-manager certs]# vim /application/certs/apiserver-csr.json
# https连接apiserver的ssl证书host列表,建议预留地址,方便后期扩容节点
{
    "CN": "apiserver",
    "hosts": [
        "127.0.0.1",
        "192.168.0.1",
        "kubernetes.default",
        "kubernetes.default.svc",
        "kubernetes.default.svc.cluster",
        "kubernetes.default.svc.cluster.local",
        "10.0.0.10",
        "10.0.0.20",
        "10.0.0.30",
        "10.0.0.40"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "shanghai",
            "L": "shanghai",
            "O": "olda",
            "OU": "ops"
        }
    ]
}
[root@k8s-manager certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server apiserver-csr.json | cfssljson -bare apiserver
拷贝证书到master节点apiserver目录:(k8s-manager节点)
[root@k8s-manager ~]# cd /application/certs/
[root@k8s-manager certs]# scp ca.pem ca-key.pem client.pem client-key.pem apiserver.pem apiserver-key.pem root@10.0.0.10:/application/kubernetes/server/certs/
编写apiserver配置文件:(k8s-master节点)
[root@k8s-master ~]# vi /application/kubernetes/server/conf/audit.yaml
apiVersion: audit.k8s.io/v1beta1 # This is required.
kind: Policy
# Don't generate audit events for all requests in RequestReceived stage.
omitStages:
  - "RequestReceived"
rules:
  # Log pod changes at RequestResponse level
  - level: RequestResponse
    resources:
    - group: ""
      # Resource "pods" doesn't match requests to any subresource of pods,
      # which is consistent with the RBAC policy.
      resources: ["pods"]
  # Log "pods/log", "pods/status" at Metadata level
  - level: Metadata
    resources:
    - group: ""
      resources: ["pods/log", "pods/status"]

  # Don't log requests to a configmap called "controller-leader"
  - level: None
    resources:
    - group: ""
      resources: ["configmaps"]
      resourceNames: ["controller-leader"]

  # Don't log watch requests by the "system:kube-proxy" on endpoints or services
  - level: None
    users: ["system:kube-proxy"]
    verbs: ["watch"]
    resources:
    - group: "" # core API group
      resources: ["endpoints", "services"]

  # Don't log authenticated requests to certain non-resource URL paths.
  - level: None
    userGroups: ["system:authenticated"]
    nonResourceURLs:
    - "/api*" # Wildcard matching.
    - "/version"

  # Log the request body of configmap changes in kube-system.
  - level: Request
    resources:
    - group: "" # core API group
      resources: ["configmaps"]
    # This rule only applies to resources in the "kube-system" namespace.
    # The empty string "" can be used to select non-namespaced resources.
    namespaces: ["kube-system"]

  # Log configmap and secret changes in all other namespaces at the Metadata level.
  - level: Metadata
    resources:
    - group: "" # core API group
      resources: ["secrets", "configmaps"]

  # Log all other resources in core and extensions at the Request level.
  - level: Request
    resources:
    - group: "" # core API group
    - group: "extensions" # Version of group should NOT be included.

  # A catch-all rule to log all other requests at the Metadata level.
  - level: Metadata
    # Long-running requests like watches that fall under this rule will not
    # generate an audit event in RequestReceived.
    omitStages:
      - "RequestReceived"
编写apiserver启动脚本:(k8s-master节点)
[root@k8s-master ~]# vim /application/kubernetes/server/bin/kube-apiserver.sh
#!/bin/bash
./kube-apiserver \
  --apiserver-count 1 \
  --secure-port=6443 \
  --allow-privileged=true \
  --audit-log-path ../log/audit-log \
  --audit-policy-file ../conf/audit.yaml \
  --authorization-mode RBAC \
  --client-ca-file ../certs/ca.pem \
  --requestheader-client-ca-file ../certs/ca.pem \
  --enable-admission-plugins NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota \
  --etcd-cafile ../certs/ca.pem \
  --etcd-certfile ../certs/client.pem \
  --etcd-keyfile ../certs/client-key.pem \
  --etcd-servers https://10.0.0.10:2379 \
  --service-account-key-file ../certs/ca-key.pem \
  --service-cluster-ip-range 192.168.0.0/16 \
  --service-node-port-range 3000-29999 \
  --target-ram-mb=1024 \
  --kubelet-client-certificate ../certs/client.pem \
  --kubelet-client-key ../certs/client-key.pem \
  --log-dir  ../log/ \
  --tls-cert-file ../certs/apiserver.pem \
  --tls-private-key-file ../certs/apiserver-key.pem \
  --v 2
[root@k8s-master ~]# chmod 700 /application/kubernetes/server/bin/kube-apiserver.sh
创建supervisor配置:(k8s-master节点)
[root@k8s-master ~]# vim /etc/supervisord.d/kube-apiserver.ini
[program:kube-apiserver]
command=/application/kubernetes/server/bin/kube-apiserver.sh    ; the program (relative uses PATH, can take args)
numprocs=1                                                      ; number of processes copies to start (def 1)
directory=/application/kubernetes/server/bin                    ; directory to cwd to before exec (def no cwd)
autostart=true                                                  ; start at supervisord start (default: true)
autorestart=true                                                ; retstart at unexpected quit (default: true)
startsecs=22                                                    ; number of secs prog must stay running (def. 1)
startretries=3                                                  ; max # of serial start failures (default 3)
exitcodes=0,2                                                   ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                 ; signal used to kill process (default TERM)
stopwaitsecs=10                                                 ; max num secs to wait b4 SIGKILL (default 10)
user=root                                                       ; setuid to this UNIX account to run the program
redirect_stderr=false                                           ; redirect proc stderr to stdout (default false)
stdout_logfile=/application/kubernetes/server/log/apiserver.stdout.log        ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                    ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                        ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                     ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                     ; emit events on stdout writes (default false)
stderr_logfile=/application/kubernetes/server/log/apiserver.stderr.log        ; stderr log path, NONE for none; default AUTO
stderr_logfile_maxbytes=64MB                                    ; max # logfile bytes b4 rotation (default 50MB)
stderr_logfile_backups=4                                        ; # of stderr logfile backups (default 10)
stderr_capture_maxbytes=1MB                                     ; number of bytes in 'capturemode' (default 0)
stderr_events_enabled=false                                     ; emit events on stderr writes (default false)
启动并查看apiserver状态:(k8s-master节点)
[root@k8s-master ~]# supervisorctl update
# kube-apiserver: added process group
[root@k8s-master ~]# supervisorctl status
#'etcd-server                      RUNNING   pid 2426, uptime 0:22:45
# kube-apiserver                   RUNNING   pid 5962, uptime 0:00:33

部署Controller-manager组件:

编写controller-manager启动脚本:(k8s-master节点)
[root@k8s-master ~]# vim /application/kubernetes/server/bin/kube-controller-manager.sh
#!/bin/sh
./kube-controller-manager \
  --cluster-cidr 172.16.0.0/16 \
  --leader-elect true \
  --log-dir ../log/ \
  --master http://127.0.0.1:8080 \
  --service-account-private-key-file ../certs/ca-key.pem \
  --service-cluster-ip-range 192.168.0.0/16 \
  --root-ca-file ../certs/ca.pem \
  --v 2
[root@k8s-master ~]# chmod 700 /application/kubernetes/server/bin/kube-controller-manager.sh
创建supervisor配置:(k8s-master节点)
[root@k8s-master ~]# vim /etc/supervisord.d/kube-conntroller-manager.ini
[program:kube-controller-manager]
command=/application/kubernetes/server/bin/kube-controller-manager.sh             ; the program (relative uses PATH, can take args)
numprocs=1                                                                        ; number of processes copies to start (def 1)
directory=/application/kubernetes/server/bin                                      ; directory to cwd to before exec (def no cwd)
autostart=true                                                                    ; start at supervisord start (default: true)
autorestart=true                                                                  ; retstart at unexpected quit (default: true)
startsecs=22                                                                      ; number of secs prog must stay running (def. 1)
startretries=3                                                                    ; max # of serial start failures (default 3)
exitcodes=0,2                                                                     ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                                   ; signal used to kill process (default TERM)
stopwaitsecs=10                                                                   ; max num secs to wait b4 SIGKILL (default 10)
user=root                                                                         ; setuid to this UNIX account to run the program
redirect_stderr=false                                                             ; redirect proc stderr to stdout (default false)
stdout_logfile=/application/kubernetes/server/log/controller.stdout.log           ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                                      ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                                          ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                                       ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                                       ; emit events on stdout writes (default false)
stderr_logfile=/application/kubernetes/server/log/controller.stderr.log           ; stderr log path, NONE for none; default AUTO
stderr_logfile_maxbytes=64MB                                                      ; max # logfile bytes b4 rotation (default 50MB)
stderr_logfile_backups=4                                                          ; # of stderr logfile backups (default 10)
stderr_capture_maxbytes=1MB                                                       ; number of bytes in 'capturemode' (default 0)
stderr_events_enabled=false                                                       ; emit events on stderr writes (default false)
查看并启动controller-manager状态:(k8s-master节点)
[root@k8s-master ~]# supervisorctl update
#kube-controller-manager: added process group
[root@k8s-master ~]# supervisorctl status
#'etcd-server                      RUNNING   pid 2426, uptime 0:32:35
#kube-apiserver                   RUNNING   pid 5962, uptime 0:10:23
#kube-controller-manager          RUNNING   pid 6038, uptime 0:00:51

部署Scheduler组件:

编写启动脚本:(k8s-master节点)
[root@k8s-master ~]# vim /application/kubernetes/server/bin/kube-scheduler.sh
#!/bin/bash
./kube-scheduler \
  --leader-elect  \
  --log-dir ../log/ \
  --master http://127.0.0.1:8080 \
  --v 2
[root@k8s-master ~]# chmod 700 /application/kubernetes/server/bin/kube-scheduler.sh
创建supervisor配置:(k8s-master节点)
[root@k8s-master ~]# vim /etc/supervisord.d/kube-scheduler.ini
[program:kube-scheduler]
command=/application/kubernetes/server/bin/kube-scheduler.sh             ; the program (relative uses PATH, can take args)
numprocs=1                                                               ; number of processes copies to start (def 1)
directory=/application/kubernetes/server/bin                             ; directory to cwd to before exec (def no cwd)
autostart=true                                                           ; start at supervisord start (default: true)
autorestart=true                                                         ; retstart at unexpected quit (default: true)
startsecs=22                                                             ; number of secs prog must stay running (def. 1)
startretries=3                                                           ; max # of serial start failures (default 3)
exitcodes=0,2                                                            ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                          ; signal used to kill process (default TERM)
stopwaitsecs=10                                                          ; max num secs to wait b4 SIGKILL (default 10)
user=root                                                                ; setuid to this UNIX account to run the program
redirect_stderr=false                                                    ; redirect proc stderr to stdout (default false)
stdout_logfile=/application/kubernetes/server/log/scheduler.stdout.log   ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                             ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                                 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                              ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                              ; emit events on stdout writes (default false)
stderr_logfile=/application/kubernetes/server/log/scheduler.stderr.log   ; stderr log path, NONE for none; default AUTO
stderr_logfile_maxbytes=64MB                                             ; max # logfile bytes b4 rotation (default 50MB)
stderr_logfile_backups=4                                                 ; # of stderr logfile backups (default 10)
stderr_capture_maxbytes=1MB                                              ; number of bytes in 'capturemode' (default 0)
stderr_events_enabled=false                                              ; emit events on stderr writes (default false)
启动并查看scheduler状态:(k8s-master节点)
[root@k8s-master ~]# supervisorctl update
# kube-scheduler: added process group
[root@k8s-master ~]# supervisorctl status
#'etcd-server                      RUNNING   pid 2426, uptime 0:42:06
# kube-apiserver                   RUNNING   pid 5962, uptime 0:19:54
# kube-controller-manager          RUNNING   pid 6038, uptime 0:10:22
# kube-scheduler                   RUNNING   pid 6131, uptime 0:00:43
查看状态:(k8s-master节点)
[root@k8s-master tools]# kubectl get cs

源码部署Kubernetes-Node节点:

k8s-master     10.0.0.10      k8s-master    etcdkube-apiserverkube-controller-managerkube-scheduler
k8s-node-01    10.0.0.20      k8s-node      kubeletdockerkube_proxy
k8s-node-02    10.0.0.30      k8s-node      kubeletdockerkube_proxy
k8s-manager    10.0.0.40      k8s-manager   harbornfs

部署docker环境:(k8s-node-01与k8s-node-02节点)

[root@two ~]# hostnamectl set-hostname k8s-node-01
安装docker-1.12.6版本包:(k8s-node-01与k8s-node-02节点)
yum -y install epel-release
mkdir /home/tools && cd /home/tools
wget https://yum.dockerproject.org/repo/main/centos/7/Packages/docker-engine-1.12.6-1.el7.centos.x86_64.rpm
wget https://yum.dockerproject.org/repo/main/centos/7/Packages/docker-engine-selinux-1.12.6-1.el7.centos.noarch.rpm
yum -y localinstall docker-engine-*
开启内核转发及文件描述符:(k8s-node-01与k8s-node-02节点)
# 不指定内核开启全局转发,在重启网络后flannel网络就会不可用,容器无法通信
echo 'net.ipv4.ip_forward = 1' >> /etc/sysctl.conf
sysctl -p /etc/sysctl.conf
# 永久生效并临时不限制任何
echo '* soft nofile  65535' >> /etc/security/limits.conf
echo '* hard nofile 65535' >> /etc/security/limits.conf
ulimit
配置docker文件:(k8s-node-01与k8s-node-02节点)
# 第一行修改了docker的数据目录
# 第二行使用overlay驱动
# 第三行定义私有仓库地址
# 第四行定义容器IP地址网段
# 第五行定义容器交给systemd管理
# 第六行不知道
mkdir -p /data/docker
mkdir -p /etc/docker
vim /etc/docker/daemon.json

注意:定义容器地址段每个主机的都是不能一样的,当前是根据10/20/30/40物理主机地址写的容器地址段

{
  "graph": "/data/docker",
  "storage-driver": "overlay",
  "insecure-registries": ["registry.access.redhat.com","quay.io","10.0.0.40"],
  "bip": "172.16.20.1/24",
  "exec-opts": ["native.cgroupdriver=systemd"],
  "live-restore": true
}
启动docker服务:(k8s-node-01与k8s-node-02节点)
systemctl start docker.service
systemctl enable docker.service
ll -sh /data/docker/
total 0
0 drwx------ 2 root root  6 May 18 02:42 containers
0 drwx------ 3 root root 21 May 18 02:42 image
0 drwxr-x--- 3 root root 19 May 18 02:42 network
0 drwx------ 2 root root  6 May 18 02:42 overlay
0 drwx------ 2 root root  6 May 18 02:42 swarm
0 drwx------ 2 root root  6 May 18 02:42 tmp
0 drwx------ 2 root root  6 May 18 02:42 trust
0 drwx------ 2 root root 25 May 18 02:42 volumes

准备环境并签发证书:

下载二进制k8s-server包:(k8s-node-01与k8s-node-02节点)
[root@k8s-node-01 ~]# mkdir /application
[root@k8s-node-01 ~]# cd /home/tools/
[root@k8s-node-01 tools]# wget https://dl.k8s.io/v1.13.5/kubernetes-server-linux-amd64.tar.gz
解压二进制server包:(k8s-node-01与k8s-node-02节点)
[root@k8s-node-01 tools]# tar xf kubernetes-server-linux-amd64.tar.gz -C /application/
[root@k8s-node-01 tools]# mv /application/kubernetes/ /application/kubernetes-1.13.5
[root@k8s-node-01 tools]# ln -s /application/kubernetes-1.13.5/ /application/kubernetes
[root@k8s-node-01 tools]# ln -s /application/kubernetes/server/bin/kubectl /usr/local/bin/kubectl
配置执行文件:(k8s-node-01与k8s-node-02节点)
[root@k8s-node-01 ~]# rm -f /application/kubernetes/kubernetes-src.tar.gz
[root@k8s-node-01 ~]# rm -f /application/kubernetes/server/bin/*.tar
[root@k8s-node-01 ~]# rm -f /application/kubernetes/server/bin/*tag
[root@k8s-node-01 ~]# ln -s /application/kubernetes/server/bin/kubelet /usr/local/bin/kubelet
[root@k8s-node-01 ~]# mkdir /application/kubernetes/server/{conf,log,certs}
签发服务端证书:(k8s-manager节点)
[root@k8s-manager ~]# cd /application/certs/
[root@k8s-manager certs]# vim /application/certs/kubelet-csr.json 
# 多预留几个IP地址,可以直接扩容node节点
{
    "CN": "kubelet-node",
    "hosts": [
    "127.0.0.1",
    "10.0.0.10",
    "10.0.0.20",
    "10.0.0.30",
    "10.0.0.40"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "shanghai",
            "L": "shanghai",
            "O": "olda",
            "OU": "ops"
        }
    ]
}
[root@k8s-master certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server kubelet-csr.json | cfssljson -bare kubelet
拷贝证书到node节点:(k8s-manager节点)
[root@k8s-manager ~]# cd /application/certs/
[root@k8s-manager certs]# scp ca.pem client.pem client-key.pem kubelet.pem kubelet-key.pem root@10.0.0.20:/application/kubernetes/server/certs/
[root@k8s-manager certs]# scp ca.pem client.pem client-key.pem kubelet.pem kubelet-key.pem root@10.0.0.30:/application/kubernetes/server/certs/

部署kubelet组件:

创建集群配置set-cluster:(k8s-node-01与k8s-node-02节点)
[root@k8s-node-01 ~]# cd /application/kubernetes/server/conf/
[root@k8s-node-01 conf]# kubectl config set-cluster myk8s \
  --certificate-authority=../certs/ca.pem \
  --embed-certs=true \
  --server=https://10.0.0.10:6443 \
  --kubeconfig=kubelet.kubeconfig

#Cluster "myk8s" set.
创建集群配置set-credentials:(k8s-node-01与k8s-node-02节点)
[root@k8s-node-01 ~]# cd /application/kubernetes/server/conf/
[root@k8s-node-01 conf]# kubectl config set-credentials k8s-node --client-certificate=../certs/client.pem \
  --client-key=../certs/client-key.pem --embed-certs=true --kubeconfig=kubelet.kubeconfig

#User "k8s-node" set.
创建集群配置set-context:(k8s-node-01与k8s-node-02节点)
[root@k8s-node-01 ~]# cd /application/kubernetes/server/conf/
[root@k8s-node-01 conf]# kubectl config set-context myk8s-context \
  --cluster=myk8s \
  --user=k8s-node \
  --kubeconfig=kubelet.kubeconfig

#Context "myk8s-context" created.
创建集群配置set-context:(k8s-node-01与k8s-node-02节点)
[root@k8s-node-01 ~]# cd /application/kubernetes/server/conf/
[root@k8s-node-01 conf]# kubectl config use-context myk8s-context --kubeconfig=kubelet.kubeconfig

#Switched to context "myk8s-context".
创建资源配置文件:(k8s-master节点)
[root@k8s-master ~]# vim /application/kubernetes/server/conf/k8s-node.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: k8s-node
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:node
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: User
  name: k8s-node
[root@k8s-master ~]# kubectl apply -f /application/kubernetes/server/conf/k8s-node.yaml

#clusterrolebinding.rbac.authorization.k8s.io/k8s-node created
查看node配置:(k8s-master节点)
[root@k8s-master ~]# kubectl get clusterrolebinding k8s-node
NAME       AGE
k8s-node   20s
[root@k8s-master ~]# kubectl get clusterrolebinding k8s-node -o yaml

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    kubectl.kubernetes.io/last-applied-configuration: |
      {"apiVersion":"rbac.authorization.k8s.io/v1","kind":"ClusterRoleBinding","metadata":{"annotations":{},"name":"k8s-node"},"roleRef":{"apiGroup":"rbac.authorization.k8s.io","kind":"ClusterRole","name":"system:node"},"subjects":[{"apiGroup":"rbac.authorization.k8s.io","kind":"User","name":"k8s-node"}]}
  creationTimestamp: "2019-05-31T14:11:40Z"
  name: k8s-node
  resourceVersion: "3336"
  selfLink: /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/k8s-node
  uid: 02ec7821-83ae-11e9-9783-000c295ac6a7
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:node
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: User
  name: k8s-node
创建kubelet启动脚本:(k8s-node-01与k8s-node-02节点)
# 注意脚本内的 IP地址
[root@k8s-node-01 ~]# mkdir /data/kubelet
[root@k8s-node-01 ~]# vim /application/kubernetes/server/bin/kube-kubelet.sh
#!/bin/bash
./kubelet \
  --anonymous-auth=false \
  --cgroup-driver systemd \
  --cluster-dns 192.168.0.2 \
  --cluster-domain cluster.local \
  --runtime-cgroups=/systemd/system.slice --kubelet-cgroups=/systemd/system.slice \
  --fail-swap-on="false" \
  --client-ca-file ../certs/ca.pem \
  --tls-cert-file ../certs/kubelet.pem \
  --tls-private-key-file ../certs/kubelet-key.pem \
  --hostname-override 10.0.0.20 \
  --image-gc-high-threshold 20 \
  --image-gc-low-threshold 10 \
  --kubeconfig ../conf/kubelet.kubeconfig \
  --log-dir ../log/ \
  --pod-infra-container-image 10.0.0.40/kubernetes/pod:v3.4 \
  --root-dir /data/kubelet \
  --v 2
[root@k8s-node-01 ~]# chmod 700 /application/kubernetes/server/bin/kube-kubelet.sh
安装supervisor软件:(k8s-node-01与k8s-node-02节点)
[root@k8s-master ~]# yum -y install supervisor
[root@k8s-master ~]# systemctl start supervisord.service
[root@k8s-master ~]# systemctl enable supervisord.service
创建supervisor配置:(k8s-node-01与k8s-node-02节点)
[root@k8s-node-01 ~]# vim /etc/supervisord.d/kube-kubelet.ini
[program:kube-kubelet]
command=/application/kubernetes/server/bin/kube-kubelet.sh        ; the program (relative uses PATH, can take args)
numprocs=1                                                        ; number of processes copies to start (def 1)
directory=/application/kubernetes/server/bin                      ; directory to cwd to before exec (def no cwd)
autostart=true                                                    ; start at supervisord start (default: true)
autorestart=true                                                  ; retstart at unexpected quit (default: true)
startsecs=22                                                      ; number of secs prog must stay running (def. 1)
startretries=3                                                    ; max # of serial start failures (default 3)
exitcodes=0,2                                                     ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                   ; signal used to kill process (default TERM)
stopwaitsecs=10                                                   ; max num secs to wait b4 SIGKILL (default 10)
user=root                                                         ; setuid to this UNIX account to run the program
redirect_stderr=false                                             ; redirect proc stderr to stdout (default false)
stdout_logfile=/application/kubernetes/server/log/kubelet.stdout.log   ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                      ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                          ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                       ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                       ; emit events on stdout writes (default false)
stderr_logfile=/application/kubernetes/server/log/kubelet.stderr.log   ; stderr log path, NONE for none; default AUTO
stderr_logfile_maxbytes=64MB                                      ; max # logfile bytes b4 rotation (default 50MB)
stderr_logfile_backups=4                                          ; # of stderr logfile backups (default 10)
stderr_capture_maxbytes=1MB                                       ; number of bytes in 'capturemode' (default 0)
stderr_events_enabled=false                                       ; emit events on stderr writes (default false)
启动并查看kubelet状态:(k8s-node-01与k8s-node-02节点)
[root@k8s-node-01 ~]# supervisorctl update
kube-kubelet: added process group
[root@k8s-node-01 ~]# supervisorctl status
#'kube-kubelet                     RUNNING   pid 12175, uptime 0:00:26

部署kube-proxy组件:

签发client证书:(k8s-manager节点)
[root@k8s-manager ~]# cd /application/certs/
[root@k8s-manager certs]# vim /application/certs/kube-proxy-csr.json
{
    "CN": "system:kube-proxy",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "shanghai",
            "L": "shanghai",
            "O": "olda",
            "OU": "ops"
        }
    ]
}
[root@k8s-master certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client kube-proxy-csr.json | cfssljson -bare kube-proxy-client
拷贝client证书到node节点:(k8s-manager节点)
[root@k8s-manager ~]# cd /application/certs/
[root@k8s-manager certs]# scp kube-proxy-client.pem kube-proxy-client-key.pem root@10.0.0.20:/application/kubernetes/server/certs/
[root@k8s-manager certs]# scp kube-proxy-client.pem kube-proxy-client-key.pem root@10.0.0.30:/application/kubernetes/server/certs/
创建集群配置set-cluster:(k8s-node-01与k8s-node-02节点)
[root@k8s-node-01 ~]# cd /application/kubernetes/server/conf/
[root@k8s-node-01 conf]# kubectl config set-cluster myk8s \
  --certificate-authority=../certs/ca.pem \
  --embed-certs=true \
  --server=https://10.0.0.10:6443 \
  --kubeconfig=kube-proxy.kubeconfig

#Cluster "myk8s" set.
创建集群配置set-credentials:(k8s-node-01与k8s-node-02节点)
[root@k8s-node-01 ~]# cd /application/kubernetes/server/conf/
[root@k8s-node-01 conf]# kubectl config set-credentials kube-proxy \
  --client-certificate=../certs/kube-proxy-client.pem \
  --client-key=../certs/kube-proxy-client-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-proxy.kubeconfig

#User "kube-proxy" set.
创建集群配置set-context:(k8s-node-01与k8s-node-02节点)
[root@k8s-node-01 ~]# cd /application/kubernetes/server/conf/
[root@k8s-node-01 conf]# kubectl config set-context myk8s-context \
  --cluster=myk8s \
  --user=kube-proxy \
  --kubeconfig=kube-proxy.kubeconfig

#Context "myk8s-context" created.
创建集群配置use-context:(k8s-node-01与k8s-node-02节点)
[root@k8s-node-01 ~]# cd /application/kubernetes/server/conf/
[root@k8s-node-01 conf]# kubectl config use-context myk8s-context --kubeconfig=kube-proxy.kubeconfig

#Switched to context "myk8s-context".
创建kube-proxy启动脚本:(k8s-node-01与k8s-node-02节点)
# 注意脚本内的 IP地址
[root@k8s-node-01 ~]# vim /application/kubernetes/server/bin/kube-proxy.sh
#!/bin/bash
./kube-proxy \
  --cluster-cidr 172.16.0.0/16 \
  --hostname-override 10.0.0.20 \
  --kubeconfig ../conf/kube-proxy.kubeconfig
[root@k8s-node-01 ~]# chmod 700 /application/kubernetes/server/bin/kube-proxy.sh
创建supervisor配置:(k8s-node-01与k8s-node-02节点)
[root@k8s-node-01 ~]# vim /etc/supervisord.d/kube-proxy.ini
[program:kube-proxy]
command=/application/kubernetes/server/bin/kube-proxy.sh             ; the program (relative uses PATH, can take args)
numprocs=1                                                           ; number of processes copies to start (def 1)
directory=/application/kubernetes/server/bin                         ; directory to cwd to before exec (def no cwd)
autostart=true                                                       ; start at supervisord start (default: true)
autorestart=true                                                     ; retstart at unexpected quit (default: true)
startsecs=22                                                         ; number of secs prog must stay running (def. 1)
startretries=3                                                       ; max # of serial start failures (default 3)
exitcodes=0,2                                                        ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                      ; signal used to kill process (default TERM)
stopwaitsecs=10                                                      ; max num secs to wait b4 SIGKILL (default 10)
user=root                                                            ; setuid to this UNIX account to run the program
redirect_stderr=false                                                ; redirect proc stderr to stdout (default false)
stdout_logfile=/application/kubernetes/server/log/proxy.stdout.log   ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                         ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                             ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                          ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                          ; emit events on stdout writes (default false)
stderr_logfile=/application/kubernetes/server/log/proxy.stderr.log   ; stderr log path, NONE for none; default AUTO
stderr_logfile_maxbytes=64MB                                         ; max # logfile bytes b4 rotation (default 50MB)
stderr_logfile_backups=4                                             ; # of stderr logfile backups (default 10)
stderr_capture_maxbytes=1MB                                          ; number of bytes in 'capturemode' (default 0)
stderr_events_enabled=false                                          ; emit events on stderr writes (default false)
启动并查看kube-peoxy状态:(k8s-node-01与k8s-node-02节点)
[root@k8s-node-01 ~]# supervisorctl update
#kube-proxy: added process group
[root@k8s-node-01 ~]# supervisorctl status
kube-kubelet                     RUNNING   pid 12175, uptime 0:15:00
kube-proxy                       RUNNING   pid 12518, uptime 0:00:38
查看node注册状态:(k8s-master节点)
[root@k8s-master ~]# kubectl get node -o wide
NAME          STATUS   ROLES    AGE     VERSION   INTERNAL-IP   EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION          CONTAINER-RUNTIME
10.0.0.10     Ready    <none>   4m48s   v1.13.5   10.0.0.10     <none>        CentOS Linux 7 (Core)   3.10.0-693.el7.x86_64   docker://1.12.6
10.0.0.20     Ready    <none>   4m47s   v1.13.5   10.0.0.10     <none>        CentOS Linux 7 (Core)   3.10.0-693.el7.x86_64   docker://1.12.6

源码部署Kubernetes-Manager节点:

k8s-master     10.0.0.10      k8s-master    etcdkube-apiserverkube-controller-managerkube-scheduler
k8s-node-01    10.0.0.20      k8s-node      kubeletdockerkube_proxy
k8s-node-02    10.0.0.30      k8s-node      kubeletdockerkube_proxy
k8s-manager    10.0.0.40      k8s-manager   harbornfs

环境准备:(所有节点)

cat >> /etc/hosts <<EOF
10.0.0.10 k8s-master
10.0.0.20 k8s-node-01
10.0.0.30 k8s-node-02
10.0.0.40 k8s-manager
EOF

安装harbor仓库:

[root@four ~]# hostnamectl set-hostname k8s-manager
安装docker-1.12.6版本包:(k8s-manager节点)
yum -y install epel-release
mkdir -p /home/tools && cd /home/tools
wget https://yum.dockerproject.org/repo/main/centos/7/Packages/docker-engine-1.12.6-1.el7.centos.x86_64.rpm
wget https://yum.dockerproject.org/repo/main/centos/7/Packages/docker-engine-selinux-1.12.6-1.el7.centos.noarch.rpm
yum -y localinstall docker-engine-*
配置docker文件:(k8s-manager节点)
# 第一行修改了docker的数据目录
# 第二行使用overlay驱动
# 第三行定义私有仓库地址
# 第四行定义容器IP地址网段
# 第五行定义容器交给systemd管理
# 第六行不知道
mkdir -p /data/docker
mkdir -p /etc/docker
vim /etc/docker/daemon.json

注意:定义容器地址段每个主机的都是不能一样的,当前是根据10/20/30/40物理主机地址写的容器地址段

{
  "graph": "/data/docker",
  "storage-driver": "overlay",
  "insecure-registries": ["registry.access.redhat.com","quay.io","10.0.0.40"],
  "bip": "172.16.40.1/24",
  "exec-opts": ["native.cgroupdriver=systemd"],
  "live-restore": true
}
启动docker服务:(k8s-manager节点)
systemctl start docker.service
systemctl enable docker.service
ll -sh /data/docker/
total 0
0 drwx------ 2 root root  6 May 18 02:42 containers
0 drwx------ 3 root root 21 May 18 02:42 image
0 drwxr-x--- 3 root root 19 May 18 02:42 network
0 drwx------ 2 root root  6 May 18 02:42 overlay
0 drwx------ 2 root root  6 May 18 02:42 swarm
0 drwx------ 2 root root  6 May 18 02:42 tmp
0 drwx------ 2 root root  6 May 18 02:42 trust
0 drwx------ 2 root root 25 May 18 02:42 volumes
安装compose编排工具:(k8s-manager节点)
[root@k8s-manager ~]# yum -y install docker-compose
下载harbor仓库软件包:(k8s-manager节点)
[root@k8s-manager ~]# cd /home/tools/
[root@k8s-manager tools]# wget https://storage.googleapis.com/harbor-releases/release-1.7.0/harbor-offline-installer-v1.7.0.tgz
解压软件包:(k8s-manager节点)
[root@k8s-manager tools]# tar xf harbor-offline-installer-v1.7.0.tgz -C /application/ && cd /application/harbor/
修改配置文件:(k8s-manager节点)
#修改访问主机地址(IP地址)或(主机域名)
sed -i 's#hostname = reg.mydomain.com#hostname = 10.0.0.40#g' /application/harbor/harbor.cfg

#修改harbor的密码为redhat
sed -i 's#harbor_admin_password = Harbor12345#harbor_admin_password = redhat#g' /application/harbor/harbor.cfg

#避开宿主机的80与443端口
sed -i 's#80:80#180:80#g' /application/harbor/docker-compose.yml
sed -i 's#443:443#1443:443#g' /application/harbor/docker-compose.yml
安装harbor仓库:(k8s-manager节点)
[root@k8s-manager ~]# cd /application/harbor/ && ./install.sh
查看harbor状态:(k8s-manager节点)
[root@k8s-manager ~]# cd /application/harbor/
[root@k8s-manager harbor]# docker-compose ps
       Name                     Command               State                                 Ports                               
--------------------------------------------------------------------------------------------------------------------------------
harbor-adminserver   /harbor/start.sh                 Up                                                                        
harbor-core          /harbor/start.sh                 Up                                                                        
harbor-db            /entrypoint.sh postgres          Up      5432/tcp                                                          
harbor-jobservice    /harbor/start.sh                 Up                                                                        
harbor-log           /bin/sh -c /usr/local/bin/ ...   Up      127.0.0.1:1514->10514/tcp                                         
harbor-portal        nginx -g daemon off;             Up      80/tcp                                                            
nginx                nginx -g daemon off;             Up      0.0.0.0:1443->443/tcp, 0.0.0.0:4443->4443/tcp, 0.0.0.0:180->80/tcp
redis                docker-entrypoint.sh redis ...   Up      6379/tcp                                                          
registry             /entrypoint.sh /etc/regist ...   Up      5000/tcp                                                          
registryctl          /harbor/start.sh                 Up
安装Nginx反向代理:(k8s-manager节点)
[root@k8s-manager ~]# yum localinstall -y http://nginx.org/packages/centos/7/noarch/RPMS/nginx-release-centos-7-0.el7.ngx.noarch.rpm
[root@k8s-manager ~]# yum -y install nginx
配置Nginx虚拟主机:(k8s-manager节点)
[root@k8s-manager ~]# rm -f /etc/nginx/conf.d/default.conf
[root@k8s-manager ~]# vim /etc/nginx/conf.d/harbor.conf
server {
  listen 80;
  server_name 10.0.0.40;

  client_max_body_size 1000m;

  location / {
    proxy_pass http://127.0.0.1:180;
  }
}

server {
    listen       443 ssl;
    server_name  10.0.0.40;

    ssl_certificate "certs/harbor.pem";
    ssl_certificate_key "certs/harbor-key.pem";
    ssl_session_cache shared:SSL:1m;
    ssl_session_timeout  10m;
    ssl_ciphers HIGH:!aNULL:!MD5;
    ssl_prefer_server_ciphers on;
    client_max_body_size 1000m;

    location / {
        proxy_pass http://127.0.0.1:180;
    }
}
harbor自签证书:(k8s-manager节点)
[root@k8s-manager ~]# mkdir /etc/nginx/certs
[root@k8s-manager ~]# cd /etc/nginx/certs
[root@k8s-manager certs]# cp -a /application/certs/ca.pem .
[root@k8s-manager certs]# cp -a /application/certs/ca-key.pem .
[root@k8s-manager certs]# (umask 077;openssl genrsa -out harbor-key.pem 2048)
Generating RSA private key, 2048 bit long modulus
.......................+++
...................................+++
e is 65537 (0x10001)


[root@k8s-manager certs]# openssl req -new -key harbor-key.pem -out harbor.csr -subj "/CN=harbor/ST=shanghai/L=shanghai/O=olda/OU=ops"

[root@k8s-manager certs]# openssl x509 -req -in harbor.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out harbor.pem -days 7300
Signature ok
subject=/CN=harbor/ST=Beijing/L=Beijing/O=olda/OU=ops
Getting CA Private Key
启动Nginx服务:(k8s-manager节点)
[root@k8s-manager ~]# nginx -t
nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
nginx: configuration file /etc/nginx/nginx.conf test is successful

[root@k8s-manager ~]# systemctl start nginx
[root@k8s-manager ~]# systemctl enable nginx
Created symlink from /etc/systemd/system/multi-user.target.wants/nginx.service to /usr/lib/systemd/system/nginx.service.

用户:admin
密码:redhat

源码部署Kubernetes-Flannel网络插件:

k8s-master     10.0.0.10      k8s-master    etcdkube-apiserverkube-controller-managerkube-scheduler
k8s-node-01    10.0.0.20      k8s-node      kubeletdockerkube_proxy
k8s-node-02    10.0.0.30      k8s-node      kubeletdockerkube_proxy
k8s-manager    10.0.0.40      k8s-manager   harbornfs
查看iptables规则:(所有k8s-master与k8s-node节点)
[root@k8s-master ~]# iptables-save |grep -i 'postrouting -s'
#-A POSTROUTING -s 172.16.10.0/24 ! -o docker0 -j MASQUERADE

[root@k8s-node-01 ~]# iptables-save |grep -i 'postrouting -s'
#-A POSTROUTING -s 172.16.20.0/24 ! -o docker0 -j MASQUERADE
修改iptables规则:(所有k8s-master与k8s-node节点)
[root@k8s-master ~]# iptables -t nat -D POSTROUTING -s 172.16.10.0/24 ! -o docker0 -j MASQUERADE
[root@k8s-master ~]# iptables -t nat -I POSTROUTING -s 172.16.10.0/24 ! -d 172.16.0.0/16 ! -o docker0 -j MASQUERADE
[root@k8s-master ~]# iptables-save >/etc/sysconfig/iptables

[root@k8s-node-01 ~]# iptables -t nat -D POSTROUTING -s 172.16.20.0/24 ! -o docker0 -j MASQUERADE
[root@k8s-node-01 ~]# iptables -t nat -I POSTROUTING -s 172.16.20.0/24 ! -d 172.16.0.0/16 ! -o docker0 -j MASQUERADE
[root@k8s-node-01 ~]# iptables-save >/etc/sysconfig/iptables

部署flannel组件:

下载flannel软件包:(所有k8s-master与k8s-node节点)
cd /home/tools/
wget https://github.com/coreos/flannel/releases/download/v0.10.0/flannel-v0.10.0-linux-amd64.tar.gz
mkdir /application/flannel-0.10.0
tar xf flannel-v0.10.0-linux-amd64.tar.gz -C /application/flannel-0.10.0
ln -s /application/flannel-0.10.0/ /application/flannel
#' flannel也需要这三个证书文件,当前环境在部署kube-kubelet与kube-proxy时已经拷贝过来了
ca.pem
client-key.pem
client.pem
操作etcd,增加host-gw:(k8s-master节点)
[root@k8s-master ~]# cd /application/etcd
[root@k8s-master etcd]# ./etcdctl set /coreos.com/network/config '{"Network": "172.16.0.0/16", "Backend": {"Type": "host-gw"}}'
#{"Network": "172.16.0.0/16", "Backend": {"Type": "host-gw"}}

# 建议使用vxlan,openstack云主机要用vxlan
[root@k8s-master etcd]# ./etcdctl set /coreos.com/network/config '{"Network": "172.16.0.0/16", "Backend": {"Type": "vxlan"}}'
#{"Network": "172.16.0.0/16", "Backend": {"Type": "vxlan"}}
创建flannel环境变量文件:(所有k8s-master与k8s-node节点)
# 注意脚本内的 IP地址
[root@k8s-master ~]# vim /application/flannel/subnet.env
FLANNEL_NETWORK=172.16.0.0/16
FLANNEL_SUBNET=172.16.10.1/24
FLANNEL_MTU=1500
FLANNEL_IPMASQ=false

[root@k8s-node-01 ~]# vim /application/flannel/subnet.env
FLANNEL_NETWORK=172.16.0.0/16
FLANNEL_SUBNET=172.16.20.1/24
FLANNEL_MTU=1500
FLANNEL_IPMASQ=false
创建flannel启动文件:(所有k8s-master与k8s-node节点)
[root@k8s-master ~]# vim /application/flannel/flanneld.sh
#!/bin/bash
./flanneld \
  --public-ip=10.0.0.10 \
  --etcd-endpoints=https://10.0.0.10:2379 \
  --etcd-keyfile=./certs/client-key.pem \
  --etcd-certfile=./certs/client.pem \
  --etcd-cafile=./certs/ca.pem \
  --iface=eth0 \
  --subnet-file=./subnet.env \
  --healthz-port=2401
[root@k8s-master ~]# chmod 700 /application/flannel/flanneld.sh
[root@k8s-node-01 ~]# vim /application/flannel/flanneld.sh
#!/bin/bash
./flanneld \
  --public-ip=10.0.0.20 \
  --etcd-endpoints=https://10.0.0.10:2379 \
  --etcd-keyfile=./certs/client-key.pem \
  --etcd-certfile=./certs/client.pem \
  --etcd-cafile=./certs/ca.pem \
  --iface=eth0 \
  --subnet-file=./subnet.env \
  --healthz-port=2401
[root@k8s-node-01 ~]# chmod 700 /application/flannel/flanneld.sh
拷贝client证书:(所有k8s-master与k8s-node节点)
mkdir /application/flannel/{log,certs}
cp -a /application/kubernetes/server/certs/ca.pem /application/flannel/certs/
cp -a /application/kubernetes/server/certs/client-key.pem /application/flannel/certs/
cp -a /application/kubernetes/server/certs/client.pem /application/flannel/certs/
创建supervisor配置:(所有k8s-master与k8s-node节点)
[root@k8s-master ~]# vim /etc/supervisord.d/flanneld.ini
[program:flanneld]
command=/application/flannel/flanneld.sh                     ; the program (relative uses PATH, can take args)
numprocs=1                                                   ; number of processes copies to start (def 1)
directory=/application/flannel                               ; directory to cwd to before exec (def no cwd)
autostart=true                                               ; start at supervisord start (default: true)
autorestart=true                                             ; retstart at unexpected quit (default: true)
startsecs=22                                                 ; number of secs prog must stay running (def. 1)
startretries=3                                               ; max # of serial start failures (default 3)
exitcodes=0,2                                                ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                              ; signal used to kill process (default TERM)
stopwaitsecs=10                                              ; max num secs to wait b4 SIGKILL (default 10)
user=root                                                    ; setuid to this UNIX account to run the program
redirect_stderr=false                                        ; redirect proc stderr to stdout (default false)
stdout_logfile=/application/flannel/log/flanneld.stdout.log  ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                 ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                     ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                  ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                  ; emit events on stdout writes (default false)
stderr_logfile=/application/flannel/log/flanneld.stderr.log  ; stderr log path, NONE for none; default AUTO
stderr_logfile_maxbytes=64MB                                 ; max # logfile bytes b4 rotation (default 50MB)
stderr_logfile_backups=4                                     ; # of stderr logfile backups (default 10)
stderr_capture_maxbytes=1MB                                  ; number of bytes in 'capturemode' (default 0)
stderr_events_enabled=false                                  ; emit events on stderr writes (default false)
[root@k8s-node-01 ~]# vim /etc/supervisord.d/flanneld.ini
[program:flanneld]
command=/application/flannel/flanneld.sh                     ; the program (relative uses PATH, can take args)
numprocs=1                                                   ; number of processes copies to start (def 1)
directory=/application/flannel                               ; directory to cwd to before exec (def no cwd)
autostart=true                                               ; start at supervisord start (default: true)
autorestart=true                                             ; retstart at unexpected quit (default: true)
startsecs=22                                                 ; number of secs prog must stay running (def. 1)
startretries=3                                               ; max # of serial start failures (default 3)
exitcodes=0,2                                                ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                              ; signal used to kill process (default TERM)
stopwaitsecs=10                                              ; max num secs to wait b4 SIGKILL (default 10)
user=root                                                    ; setuid to this UNIX account to run the program
redirect_stderr=false                                        ; redirect proc stderr to stdout (default false)
stdout_logfile=/application/flannel/log/flanneld.stdout.log       ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                 ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                     ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                  ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                  ; emit events on stdout writes (default false)
stderr_logfile=/application/flannel/log/flanneld.stderr.log       ; stderr log path, NONE for none; default AUTO
stderr_logfile_maxbytes=64MB                                 ; max # logfile bytes b4 rotation (default 50MB)
stderr_logfile_backups=4                                     ; # of stderr logfile backups (default 10)
stderr_capture_maxbytes=1MB                                  ; number of bytes in 'capturemode' (default 0)
stderr_events_enabled=false                                  ; emit events on stderr writes (default false)
启动并查看flannel状态:(所有k8s-master与k8s-node节点)
[root@k8s-master ~]# supervisorctl update
#flanneld: added process group

[root@k8s-node-01 ~]# supervisorctl update
#flanneld: added process group
[root@k8s-master ~]# supervisorctl status
#'etcd-server                      RUNNING   pid 2426, uptime 4:05:35#
#flanneld                         RUNNING   pid 6914, uptime 0:00:34
#kube-apiserver                   RUNNING   pid 5962, uptime 3:43:23
#kube-controller-manager          RUNNING   pid 6722, uptime 0:11:17
#kube-scheduler                   RUNNING   pid 6715, uptime 0:11:17

[root@k8s-node-01 ~]# supervisorctl status
#flanneld                         RUNNING   pid 15711, uptime 0:00:30
#kube-kubelet                     RUNNING   pid 12175, uptime 1:22:43
#kube-proxy                       RUNNING   pid 12518, uptime 1:08:21
测试flannel网络:(k8s-master节点)

创建pod容器被调度到node节点,然后在master上curl访问

同时在k8s-manager节点下载nginx-1.15.12版本的镜像后打标签 上传到harbor仓库中

[root@k8s-manager tools]# docker login 10.0.0.40
# Username: admin
# Password: redhat
# Login Succeeded

[root@k8s-manager ~]# docker pull xplenty/rhel7-pod-infrastructure:v3.4
[root@k8s-manager ~]# docker tag xplenty/rhel7-pod-infrastructure:v3.4 10.0.0.40/kubernetes/pod:v3.4
[root@k8s-manager ~]# docker push 10.0.0.40/kubernetes/pod:v3.4

[root@k8s-manager ~]# docker pull nginx
[root@k8s-manager ~]# docker tag nginx:latest 10.0.0.40/kubernetes/nginx:1.15.12
[root@k8s-manager ~]# docker push 10.0.0.40/kubernetes/nginx:1.15.12 
[root@k8s-master ~]# vim nginx-ds.yaml
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
  name: nginx-ds
  labels:
    addonmanager.kubernetes.io/mode: Reconcile
spec:
  template:
    metadata:
      labels:
        app: nginx-ds
    spec:
      containers:
      - name: my-nginx
        image: 10.0.0.40/kubernetes/nginx:1.15.12
        ports:
        - containerPort: 80
[root@k8s-master ~]# kubectl apply -f nginx-ds.yaml
# daemonset.extensions/nginx-ds created
[root@k8s-master ~]# kubectl get pods -o wide
NAME             READY   STATUS    RESTARTS   AGE     IP            NODE        NOMINATED NODE   READINESS GATES
nginx-ds-n95k5   1/1     Running   0          2m38s   172.16.20.2   10.0.0.20   <none>           <none>
nginx-ds-n45g1   1/1     Running   0          2m38s   172.16.30.2   10.0.0.30   <none>           <none>
[root@k8s-master ~]# curl -I 172.16.20.2
HTTP/1.1 200 OK
Server: nginx/1.15.12
Date: Sat, 01 Jun 2019 00:57:13 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 16 Apr 2019 13:08:19 GMT
Connection: keep-alive
ETag: "5cb5d3c3-264"
Accept-Ranges: bytes

部署coredns组件:

下载coredns镜像:(k8s-manager节点)

coredns镜像页面
coredns镜像所有版本

[root@k8s-manager ~]# docker pull coredns/coredns:1.3.1
[root@k8s-manager ~]# docker tag coredns/coredns:1.3.1 10.0.0.40/kubernetes/coredns:v1.3.1
[root@k8s-manager ~]# docker push 10.0.0.40/kubernetes/coredns:v1.3.1

#The push refers to a repository [10.0.0.40/kubernetes/coredns]
#c6a5fc8a3f01: Pushed 
#fb61a074724d: Pushed 
#v1.3.1: digest: sha256:e077b9680c32be06fc9652d57f64aa54770dd6554eb87e7a00b97cf8e9431fda size: 739
创建资源清单:(k8s-master节点)

注意yaml文件内的image地址信息

[root@k8s-master ~]# mkdir -p addons/coredns && cd addons/coredns
[root@k8s-master coredns]# vi coredns-configmap.yaml
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
data:
  Corefile: |
    .:53 {
        errors
        log
        health
        kubernetes cluster.local 192.168.0.0/16
        proxy . /etc/resolv.conf
        cache 30
       }
[root@k8s-master coredns]# vi coredns-service.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
  labels:
      kubernetes.io/cluster-service: "true"
      addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: Reconcile
  name: system:coredns
rules:
- apiGroups:
  - ""
  resources:
  - endpoints
  - services
  - pods
  - namespaces
  verbs:
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: EnsureExists
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system

---

apiVersion: v1
kind: Service
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: coredns
    kubernetes.io/cluster-service: "true"
    kubernetes.io/name: "CoreDNS"
spec:
  selector:
    k8s-app: coredns
  clusterIP: 192.168.0.2
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
[root@k8s-master coredns]# vi coredns-deployment.yaml
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: coredns
    kubernetes.io/cluster-service: "true"
    kubernetes.io/name: "CoreDNS"
spec:
  replicas: 1
  selector:
    matchLabels:
      k8s-app: coredns
  template:
    metadata:
      labels:
        k8s-app: coredns
    spec:
      serviceAccountName: coredns
      containers:
      - name: coredns
        image: 10.0.0.40/kubernetes/coredns:v1.3.1
        args:
        - -conf
        - /etc/coredns/Corefile
        volumeMounts:
        - name: config-volume
          mountPath: /etc/coredns
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
      dnsPolicy: Default
      imagePullSecrets:
      - name: harbor
      volumes:
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile
应用coredns配资源置:(k8s-master节点)
[root@k8s-master coredns]# kubectl apply -f coredns-configmap.yaml 
#configmap/coredns created

[root@k8s-master coredns]# kubectl apply -f coredns-service.yaml 
#serviceaccount/coredns created
#clusterrole.rbac.authorization.k8s.io/system:coredns created
#clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
#service/coredns created

[root@k8s-master coredns]# kubectl apply -f coredns-deployment.yaml 
#deployment.extensions/coredns created
查看coredns状态:(k8s-master节点)
[root@k8s-master coredns]# kubectl get pods -n kube-system -o wide
NAME                                    READY   STATUS    RESTARTS   AGE   IP            NODE        NOMINATED NODE   READINESS GATES
coredns-85c95bb878-9tqn6                1/1     Running   0          69s   172.16.30.2   10.0.0.30   <none>           <none>
测试coredns解析功能:(k8s-node-01与k8s-node-02节点)
[root@k8s-node-01 ~]# yum -y install bind-utils
[root@k8s-node-01 ~]# dig -t A kubernetes.default.svc.cluster.local. @192.168.0.2 +short
192.168.0.1

部署Traefik反代:

下载镜像:(k8s-manager节点)
[root@k8s-manager ~]# docker pull traefik:v1.7-alpine
[root@k8s-manager ~]# docker tag traefik:v1.7-alpine 10.0.0.40/kubernetes/traefik:v1.7
[root@k8s-manager ~]# docker push 10.0.0.40/kubernetes/traefik:v1.7

#The push refers to a repository [10.0.0.40/kubernetes/traefik]
#7bea31916728: Pushed 
#1392bc088803: Pushed 
#04870cd09f47: Pushed 
#f1b5933fe4b5: Pushed 
#v1.7: digest: sha256:bf78749c5d15172e3ff060d6dad7f8fc4938e6b476465e254b66e2ebf0cda692 size: 1157
创建资源清单:(k8s-master节点)
[root@k8s-master ~]# mkdir -p addons/traefik && cd addons/traefik
[root@k8s-master traefik]# vi traefik-rbac.yaml 
apiVersion: v1
kind: ServiceAccount
metadata:
  name: traefik-ingress-controller
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
  name: traefik-ingress-controller
rules:
  - apiGroups:
      - ""
    resources:
      - services
      - endpoints
      - secrets
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - extensions
    resources:
      - ingresses
    verbs:
      - get
      - list
      - watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: traefik-ingress-controller
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
  name: traefik-ingress-controller
  namespace: kube-system
[root@k8s-master traefik]# vi traefik-daemonset.yaml
#' 注意traefik插件镜像地址 及 访问traefik前端时的端口 和 apiserver的地址及端口
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
  name: traefik-ingress-controller
  namespace: kube-system
  labels:
    k8s-app: traefik-ingress-lb
spec:
  template:
    metadata:
      labels:
        k8s-app: traefik-ingress-lb
        name: traefik-ingress-lb
    spec:
      serviceAccountName: traefik-ingress-controller
      terminationGracePeriodSeconds: 60
      containers:
      - image: 10.0.0.40/kubernetes/traefik:v1.7
        name: traefik-ingress-lb
        ports:
        - name: http
          containerPort: 80
          # hostPort是ingress代理traefik时的访问端口
          hostPort: 80
        - name: admin
          containerPort: 8080
        securityContext:
          capabilities:
            drop:
            - ALL
            add:
            - NET_BIND_SERVICE
        args:
        - --api
        - --kubernetes
        - --logLevel=INFO
        - --insecureskipverify=true
        - --kubernetes.endpoint=https://10.0.0.10:6443
        - --accesslog
        - --accesslog.filepath=/var/log/traefik_access.log
        - --traefiklog
        - --traefiklog.filepath=/var/log/traefik.log
        - --metrics.prometheus
      imagePullSecrets:
      - name: harbor
[root@k8s-master traefik]# vi traefik-service.yaml 
kind: Service
apiVersion: v1
metadata:
  name: traefik-ingress-service
  namespace: kube-system
spec:
  selector:
    k8s-app: traefik-ingress-lb
  ports:
    - protocol: TCP
      port: 80
      name: web
    - protocol: TCP
      port: 8080
      name: admin
[root@k8s-master traefik]# vi traefik-ingress.yaml 
# 注意访问traefik时使用的域名,当前部署方法必须使用域名(因为使用了ingress代理traefik插件)
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: traefik-web-ui
  namespace: kube-system
  annotations:
    kubernetes.io/ingress.class: traefik
spec:
  rules:
  - host: traefik.opstands.com
    http:
      paths:
      - backend:
          serviceName: traefik-ingress-service
          servicePort: 8080
应用traefik资源配置:(k8s-master节点)
[root@k8s-master traefik]# kubectl apply -f traefik-rbac.yaml 
#serviceaccount/traefik-ingress-controller created
#clusterrole.rbac.authorization.k8s.io/traefik-ingress-controller created
#clusterrolebinding.rbac.authorization.k8s.io/traefik-ingress-controller created

[root@k8s-master traefik]# kubectl apply -f traefik-daemonset.yaml 
#daemonset.extensions/traefik-ingress-controller created

[root@k8s-master traefik]# kubectl apply -f traefik-service.yaml 
#service/traefik-ingress-service created

[root@k8s-master traefik]# kubectl apply -f traefik-ingress.yaml 
#ingress.extensions/traefik-web-ui created
查看traefik状态:(k8s-master节点)
[root@k8s-master traefik]# kubectl get pods -o wide -n kube-system
#NAME                                    READY   STATUS    RESTARTS   AGE     IP            NODE        NOMINATED NODE   READINESS GATES
#coredns-85c95bb878-2ds88                1/1     Running   0          62m     172.16.30.2   10.0.0.30   <none>           <none>
#traefik-ingress-controller-fw6xf        1/1     Running   0          2m48s   172.16.20.6   10.0.0.20   <none>           <none>
#traefik-ingress-controller-lpmqx        1/1     Running   0          2m48s   172.16.30.5   10.0.0.30   <none>           <none>
浏览器访问:(任意浏览器)

本地解析traefik.opstands.com域名到任意一台部署有traefik的node节点IP地址上

http://traefik.opstands.com

NodePort端口访问:(根据需求来选择ingress代理,还是通过nodeport访问)

停止ingress代理traefik的转发,同时对service文件添加nodeport参数

[root@k8s-master traefik]# kubectl delete -f traefik-ingress.yaml
#ingress.extensions "traefik-web-ui" deleted

[root@k8s-master traefik]# kubectl apply -f traefik-service.yaml 
#service/traefik-ingress-service configured

http://10.0.0.20:9999/

部署Dashboard组件:

下载dashboard镜像到harbor仓库:(k8s-manager节点)
[root@k8s-manager ~]# docker pull registry.cn-hangzhou.aliyuncs.com/dnsjia/k8s:kubernetes-dashboard-amd64_v1.10.0
[root@k8s-manager ~]# docker tag registry.cn-hangzhou.aliyuncs.com/dnsjia/k8s:kubernetes-dashboard-amd64_v1.10.0 10.0.0.40/kubernetes/kubernetes-dashboard:v1.10.0
[root@k8s-manager ~]# docker push 10.0.0.40/kubernetes/kubernetes-dashboard:v1.10.0
The push refers to a repository [10.0.0.40/kubernetes/dashboard]
23ddb8cbb75a: Pushed 
1.10.0: digest: sha256:e76c5fe6886c99873898e4c8c0945261709024c4bea773fc477629455631e472 size: 529
安装Nfs-utils软件:(k8s-manager节点)
[root@k8s-manager ~]# yum -y install nfs-utils
配置Nfs目录:(k8s-manager节点)
[root@k8s-manager ~]# mkdir -p  /application/nfs_store/dashboard_certs
[root@k8s-manager ~]# echo '/application/nfs_store/dashboard_certs    10.0.0.0/24(rw,async,no_root_squash,no_all_squash)' > /etc/exports
启动Nfs服务端:(k8s-manager节点)
[root@k8s-manager ~]# systemctl start rpcbind
[root@k8s-manager ~]# systemctl enable rpcbind
[root@k8s-manager ~]# systemctl enable nfs-server.service
[root@k8s-manager ~]# systemctl start nfs-server.service
查看共享目录状态:(k8s-manager节点)
[root@k8s-manager ~]# showmount -e 10.0.0.40
Export list for 10.0.0.40:
/application/container_data 10.0.0.0/24
生成dashboard证书SSL:(k8s-manager节点)
[root@k8s-manager ~]# cd /application/nfs_store/dashboard_certs/
[root@k8s-manager dashboard_certs]# openssl genrsa -des3 -passout pass:x -out dashboard.pass.key 2048
#Generating RSA private key, 2048 bit long modulus
#..................................................................+++
#..........................+++
#e is 65537 (0x10001)

[root@k8s-manager dashboard_certs]# openssl rsa -passin pass:x -in dashboard.pass.key -out dashboard.key
#writing RSA key

[root@k8s-manager dashboard_certs]# rm -f dashboard.pass.key
# 一路回车 9次,生成默认证书
[root@k8s-manager dashboard_certs]# openssl req -new -key dashboard.key -out dashboard.csr
#You are about to be asked to enter information that will be incorporated
#into your certificate request.
#What you are about to enter is what is called a Distinguished Name or a DN.
#There are quite a few fields but you can leave some blank
#For some fields there will be a default value,
#If you enter '.', the field will be left blank.
#-----
#Country Name (2 letter code) [XX]:
#State or Province Name (full name) []:
#Locality Name (eg, city) [Default City]:
#Organization Name (eg, company) [Default Company Ltd]:
#Organizational Unit Name (eg, section) []:
#Common Name (eg, your name or your server's hostname) []:
#Email Address []:
#
#Please enter the following 'extra' attributes
#to be sent with your certificate request
#A challenge password []:
#An optional company name []:
# 证书有效期为7300天,也就是20年
[root@k8s-manager dashboard_certs]# openssl x509 -req -sha256 -days 7300 -in dashboard.csr -signkey dashboard.key -out dashboard.crt
#Signature ok
#subject=/C=XX/L=Default City/O=Default Company Ltd
#Getting Private key

[root@k8s-manager dashboard_certs]# ll -sh
total 12K
4.0K -rw-r--r-- 1 root root 1.1K Jun  5 20:01 dashboard.crt
4.0K -rw-r--r-- 1 root root  952 Jun  5 20:00 dashboard.csr
4.0K -rw-r--r-- 1 root root 1.7K Jun  5 19:59 dashboard.key
安装NFS存储类型:(k8s-node-01与k8s-node-02节点)
[root@k8s-node-01 ~]# yum -y install nfs-utils
编写dashboard启动yaml文件:(k8s-master节点)
# 打开文件注意 SSL证书挂载路径 及  镜像路径
[root@k8s-master ~]# mkdir -p addons/dashboard && cd addons/dashboard
[root@k8s-master dashboard]# vi kubernetes-dashboard.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kube-system
---
# Create ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kube-system

---

# ------------------- Dashboard Secret ------------------- #

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-certs
  namespace: kube-system
type: Opaque

---
# ------------------- Dashboard Service Account ------------------- #

apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system

---
# ------------------- Dashboard Role & Role Binding ------------------- #

kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: kubernetes-dashboard-minimal
  namespace: kube-system
rules:
  # Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
- apiGroups: [""]
  resources: ["secrets"]
  verbs: ["create"]
  # Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
  resources: ["configmaps"]
  verbs: ["create"]
  # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
  resources: ["secrets"]
  resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
  verbs: ["get", "update", "delete"]
  # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
  resources: ["configmaps"]
  resourceNames: ["kubernetes-dashboard-settings"]
  verbs: ["get", "update"]
  # Allow Dashboard to get metrics from heapster.
- apiGroups: [""]
  resources: ["services"]
  resourceNames: ["heapster"]
  verbs: ["proxy"]
- apiGroups: [""]
  resources: ["services/proxy"]
  resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
  verbs: ["get"]

---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  name: kubernetes-dashboard-minimal
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
  name: kubernetes-dashboard
  namespace: kube-system

---
# ------------------- Dashboard Deployment ------------------- #

kind: Deployment
apiVersion: apps/v1beta2
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
    spec:
      containers:
      - name: kubernetes-dashboard
        image: 10.0.0.40/kubernetes/kubernetes-dashboard:v1.10.0
        ports:
        - containerPort: 8443
          protocol: TCP
        args:
          - --auto-generate-certificates
          - --token-ttl=5400
          # Uncomment the following line to manually specify Kubernetes API server Host
          # If not specified, Dashboard will attempt to auto discover the API server and connect
          # to it. Uncomment only if the default does not work.
          # - --apiserver-host=http://my-address:port
        volumeMounts:
        - name: kubernetes-dashboard-certs
          mountPath: /certs
          # Create on-disk volume to store exec logs
        - mountPath: /tmp
          name: tmp-volume
        livenessProbe:
          httpGet:
            scheme: HTTPS
            path: /
            port: 8443
          initialDelaySeconds: 30
          timeoutSeconds: 30
      volumes:
      - name: kubernetes-dashboard-certs
#        secret:
#          secretName: kubernetes-dashboard-certs
        nfs:
          path: /application/nfs_store/dashboard_certs
          server: 10.0.0.40

      - name: tmp-volume
        emptyDir: {}

      serviceAccountName: kubernetes-dashboard
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
      - key: node-role.kubernetes.io/master
        effect: NoSchedule

---
# ------------------- Dashboard Service ------------------- #

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  type: NodePort
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 8888
  selector:
    k8s-app: kubernetes-dashboard
应用dashboard资源文件:(k8s-master节点)
[root@k8s-master dashboard]# kubectl apply -f kubernetes-dashboard.yaml 
#serviceaccount/admin-user created
#clusterrolebinding.rbac.authorization.k8s.io/admin-user created
#secret/kubernetes-dashboard-certs created
#serviceaccount/kubernetes-dashboard created
#role.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
#rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
#deployment.apps/kubernetes-dashboard created
#service/kubernetes-dashboard created
查看dashboard容器状态:(k8s-master节点)
[root@k8s-master dashboard]# kubectl get pods -o wide -n kube-system
NAME                                    READY   STATUS    RESTARTS   AGE   IP            NODE        NOMINATED NODE   READINESS GATES
coredns-85c95bb878-xgg2z                1/1     Running   0          39m   172.16.20.2   10.0.0.20   <none>           <none>
kubernetes-dashboard-7fbf6dcf9b-wptln   1/1     Running   0          24s   172.16.30.2   10.0.0.30   <none>           <none>
traefik-ingress-controller-55s5w        1/1     Running   0          6s    172.16.20.6   10.0.0.20   <none>           <none>
traefik-ingress-controller-ptj4h        1/1     Running   0          6s    172.16.30.5   10.0.0.30   <none>           <none>
浏览器访问:(任意浏览器)

因为使用的nodePort端口,随意访问任意一个node节点的8888端口都可以访问dashboard界面

https://10.0.0.20:8888

# 取出dashboard登录token值
[root@k8s-master dashboard]# kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}') | grep token

部署Deployment与Service功能:

以下操作等K8s集群部署完毕后再操作,用于测试k8s的pod单元deployment的端口能否暴露到公网

编写yaml文件:(k8s-master节点)
[root@k8s-master ~]# vim nginx-deployment.yaml 
apiVersion: v1
kind: Service
metadata:
  name: nginx-deployment
  labels:
    app: nginx-deployment
spec:
  type: NodePort
  selector:
    app: nginx-deployment
  ports:
  - name: http
    nodePort: 3000
    port: 80
    targetPort: 80

---

apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 3
  template:
    metadata:
      labels:
        app: nginx-deployment
    spec:
      containers:
      - name: nginx-deployment
        image: 10.0.0.40/kubernetes/nginx:1.15.12
        ports:
        - containerPort: 80
启动该yaml文件:(k8s-master节点)
[root@k8s-master ~]# kubectl apply -f nginx-deployment.yaml 
#service/nginx-deployment created
#deployment.extensions/nginx-deployment created
查看Pod状态:(k8s-master节点)
[root@k8s-master ~]# kubectl get pods -o wide
NAME                                READY   STATUS    RESTARTS   AGE   IP            NODE        NOMINATED NODE   READINESS GATES
nginx-deployment-7849c455d9-6kf7g   1/1     Running   0          50m   172.16.30.2   10.0.0.30   <none>           <none>
nginx-deployment-7849c455d9-tzzvh   1/1     Running   0          50m   172.16.30.3   10.0.0.30   <none>           <none>
nginx-deployment-7849c455d9-w7glf   1/1     Running   0          50m   172.16.20.2   10.0.0.20   <none>           <none>
查看Service状态:(k8s-master节点)
[root@k8s-master ~]# kubectl describe service nginx-deployment
Name:                     nginx-deployment
Namespace:                default
Labels:                   app=nginx-deployment
Annotations:              kubectl.kubernetes.io/last-applied-configuration:
                            {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{},"labels":{"app":"nginx-deployment"},"name":"nginx-deployment","namespace"...
Selector:                 app=nginx-deployment
Type:                     NodePort
IP:                       192.168.115.202
Port:                     http  80/TCP
TargetPort:               80/TCP
NodePort:                 http  3000/TCP
Endpoints:                172.16.20.2:80,172.16.30.2:80,172.16.30.3:80
Session Affinity:         None
External Traffic Policy:  Cluster
Events:                   <none>
修改三个容器的页面:(k8s-master节点)

如果被修改的容器死掉,K8s从新启动的容器数据会恢复到Nginx默认欢迎界面

[root@k8s-master ~]# kubectl exec -it nginx-deployment-7849c455d9-6kf7g /bin/bash
echo 'web-01' > /usr/share/nginx/html/index.html
exit
[root@k8s-master ~]# kubectl exec -it nginx-deployment-7849c455d9-tzzvh /bin/bash
echo 'web-02' > /usr/share/nginx/html/index.html
exit
[root@k8s-master ~]# kubectl exec -it nginx-deployment-7849c455d9-w7glf /bin/bash
echo 'web-03' > /usr/share/nginx/html/index.html
exit
编写反向代理配置文件:(k8s-manager节点)
[root@k8s-manager ~]# vim /etc/nginx/conf.d/nodeport.conf
# 反向代理每个node节点的端口,这个端口是启动deployment时指定的,代理node端口后就可以实现被外部访问容器内运行的业务了
upstream server_pools {
    server 10.0.0.20:3000 weight=1;
    server 10.0.0.30:3000 weight=1;
}

server {
    listen       80;
    server_name  k8s.opstands.com;
    location / {
    proxy_pass http://server_pools;
    proxy_set_header  X-Real-IP  $remote_addr;
    proxy_set_header Host $host;
    proxy_set_header X-Forwarded-For $remote_addr;
    }
}
重启Nginx服务:(k8s-manager节点)
[root@k8s-manager ~]# systemctl restart nginx

本地解析 k8s.opstands.com 到 10.0.0.40 地址

浏览器访问Nginx代理测试负载容器效果:(Windows-Chrome软件)

部署PV与PVC功能:

以下操作等K8s集群测试外部访问成功后再操作,用于测试k8s的pod单元运行业务数据的持久化

当前文档记录PV与PVC使用controller-manager有警告,没有挂载nfs插件 (可以选择使用容器直接挂载NFS存储)

部署Nfs服务端:
安装Nfs-utils软件:(k8s-manager节点)
[root@k8s-manager ~]# yum -y install nfs-utils
启动rpcbind:(k8s-manager节点)
[root@k8s-manager ~]# systemctl start rpcbind
[root@k8s-manager ~]# systemctl enable rpcbind
配置Nfs目录:(k8s-manager节点)
[root@k8s-manager ~]# mkdir /application/container_data
[root@k8s-manager ~]# echo '/application/container_data/    10.0.0.0/24(rw,async,no_root_squash,no_all_squash)' > /etc/exports
启动Nfs服务端:(k8s-manager节点)
[root@k8s-manager ~]# systemctl enable nfs-server.service
[root@k8s-manager ~]# systemctl start nfs-server.service
查看共享目录状态:(k8s-manager节点)
[root@k8s-manager ~]# showmount -e 10.0.0.40
Export list for 10.0.0.40:
/application/container_data 10.0.0.0/24
编辑创建PV的yaml文件:(k8s-master节点)
[root@k8s-master ~]# vim nginx-pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: nginx-pv
spec:
  capacity:
    storage: 10Gi
  accessModes:
  - ReadWriteMany
  nfs:
    path: /application/container_data
    server: 10.0.0.40
  persistentVolumeReclaimPolicy: Recycle
[root@k8s-master ~]# kubectl apply -f nginx-pv.yaml
persistentvolume/nginx-pv created

[root@k8s-master ~]# kubectl get pv -o wide
NAME       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM   STORAGECLASS   REASON   AGE
nginx-pv   10Gi       RWX            Recycle          Available                                   11s
部署Nfs客户端:
安装Nfs-utils软件:(k8s-node-01与k8s-node-02节点)
[root@k8s-node-01 ~]# yum -y install nfs-utils
编辑创建PVC的yaml文件:(k8s-master节点)
[root@k8s-master ~]# vim nginx-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: nginx-pvc
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 10Gi
[root@k8s-master ~]# kubectl apply -f nginx-pvc.yaml 
#persistentvolumeclaim/nginx-pvc created
查看PV与PVC的绑定状态:(k8s-master节点)
[root@k8s-master ~]# kubectl get pv -o wide
NAME       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM               STORAGECLASS   REASON   AGE
nginx-pv   10Gi       RWX            Recycle          Bound    default/nginx-pvc                           9m18s

[root@k8s-master ~]# kubectl get pvc -o wide
NAME        STATUS   VOLUME     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
nginx-pvc   Bound    nginx-pv   10Gi       RWX                           40s
更新deployment配置yaml文件:(k8s-master节点)
[root@k8s-master ~]# vim nginx-deployment.yaml
apiVersion: v1
kind: Service
metadata:
  name: nginx-deployment
  labels:
    app: nginx-deployment
spec:
  type: NodePort
  selector:
    app: nginx-deployment
  ports:
  - name: http
    nodePort: 3000
    port: 80
    targetPort: 80

---

apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 3
  template:
    metadata:
      labels:
        app: nginx-deployment
    spec:
      containers:
      - name: nginx-deployment
        image: 10.0.0.40/kubernetes/nginx:1.15.12
        ports:
        - containerPort: 80
        volumeMounts:
        - name: nginx-index
          mountPath: /usr/share/nginx/html/
      volumes:
      - name: nginx-index
        persistentVolumeClaim:
          claimName: nginx-pvc
更新deployment容器配置:(k8s-master节点)
[root@k8s-master ~]# kubectl apply -f nginx-deployment.yaml 
#service/nginx-deployment unchanged
#deployment.extensions/nginx-deployment configured
NFS服务端创建index文件:(k8s-manager节点)
[root@k8s-manager ~]# echo 'test data storage' > /application/container_data/index.html
浏览器访问Nginx代理:(Windows-Chrome软件)

容器直接挂载NFS存储:

以下操作等K8s集群测试外部访问成功后再操作,用于测试k8s的pod单元运行业务数据的持久化

部署Nfs服务端:
安装Nfs-utils软件:(k8s-manager节点)
[root@k8s-manager ~]# yum -y install nfs-utils
启动rpcbind:(k8s-manager节点)
[root@k8s-manager ~]# systemctl start rpcbind
[root@k8s-manager ~]# systemctl enable rpcbind
配置Nfs目录:(k8s-manager节点)
[root@k8s-manager ~]# mkdir /application/container_data
[root@k8s-manager ~]# echo '/application/container_data/    10.0.0.0/24(rw,async,no_root_squash,no_all_squash)' > /etc/exports
启动Nfs服务端:(k8s-manager节点)
[root@k8s-manager ~]# systemctl enable nfs-server.service
[root@k8s-manager ~]# systemctl start nfs-server.service
NFS服务端创建index文件:(k8s-manager节点)
[root@k8s-manager ~]# echo 'container mount NFS' > /application/container_data/index.html
查看共享目录状态:(k8s-manager节点)
[root@k8s-manager ~]# showmount -e 10.0.0.40
Export list for 10.0.0.40:
/application/container_data 10.0.0.0/24
部署Nfs客户端:
安装Nfs-utils软件:(k8s-node-01与k8s-node-02节点)
[root@k8s-node-01 ~]# yum -y install nfs-utils
更新deployment配置yaml文件:(k8s-master节点)
[root@k8s-master ~]# vim nginx-deployment.yaml
apiVersion: v1
kind: Service
metadata:
  name: nginx-deployment
  labels:
    app: nginx-deployment
spec:
  type: NodePort
  selector:
    app: nginx-deployment
  ports:
  - name: http
    nodePort: 3000
    port: 80
    targetPort: 80

---

apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 3
  template:
    metadata:
      labels:
        app: nginx-deployment
    spec:
      containers:
      - name: nginx-deployment
        image: 10.0.0.40/kubernetes/nginx:1.15.12
        ports:
        - containerPort: 80
        volumeMounts:
        - name: nginx-index
          mountPath: /usr/share/nginx/html/
      volumes:
      - name: nginx-index
        nfs:
          path: /application/container_data
          server: 10.0.0.40
[root@k8s-master ~]# kubectl apply -f deployment/nginx-deployment.yaml
#service/nginx-deployment unchanged
#deployment.extensions/nginx-deployment configured
浏览器访问Nginx代理:(Windows-Chrome软件)