温馨提示×

温馨提示×

您好,登录后才能下订单哦!

密码登录×
登录注册×
其他方式登录
点击 登录注册 即表示同意《亿速云用户服务条款》

kubernetes1.15.0高可用(keepalived+haproxy)

发布时间:2020-06-16 22:44:25 来源:网络 阅读:819 作者:zyy123 栏目:云计算

一、Master高可用

解决Master单节点,以及etcd单节点的问题,需要针对Master高可用,etcd数据要保持一致。

[root@localhost ~]# hostnamectl set-hostname master01
[root@localhost ~]# hostnamectl set-hostname master02
[root@localhost ~]# hostnamectl set-hostname master03
#生成ssh-key
[root@localhost ~]# ssh-keygen
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa): 
Created directory '/root/.ssh'.
Enter passphrase (empty for no passphrase): 
Enter same passphrase again: 
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:6H0xzKWAv63KofmN8wNlt93tO/Asbl6WDICBCYhvcds root@master01
The key's randomart image is:
+---[RSA 2048]----+
|  . ... o.       |
| . o . +  o      |
|  . o + .. ..    |
|   o . Eo+.o.    |
|  .   .oS.*o o . |
|     ... o.o..+ o|
|      o.o o   +* |
|     +.+.o   oo+.|
|    o.=++.  +o..o|
+----[SHA256]-----+
[root@localhost ~]# ssh-copy-id root@172.16.216.229
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
The authenticity of host '172.16.216.229 (172.16.216.229)' can't be established.
ECDSA key fingerprint is SHA256:RSjZGjpxNF+3FfNVScnO7si+ixmb5cvjEQChMZANJl8.
ECDSA key fingerprint is MD5:91:c5:3d:0a:22:4a:51:9b:b6:57:04:c8:f4:10:df:fd.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@172.16.216.229's password: 

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'root@172.16.216.229'"
and check to make sure that only the key(s) you wanted were added.

[root@localhost ~]# ssh-copy-id root@172.16.216.230
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
The authenticity of host '172.16.216.230 (172.16.216.230)' can't be established.
ECDSA key fingerprint is SHA256:RSjZGjpxNF+3FfNVScnO7si+ixmb5cvjEQChMZANJl8.
ECDSA key fingerprint is MD5:91:c5:3d:0a:22:4a:51:9b:b6:57:04:c8:f4:10:df:fd.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@172.16.216.230's password: 

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'root@172.16.216.230'"
and check to make sure that only the key(s) you wanted were added.

#修改主机名
[root@localhost ~]# vim /etc/hosts
172.16.216.228 master01 master01.linuxplus.com
172.16.216.226 master02 master02.linuxplus.com
172.16.216.230 master03 master03.linuxplus.com
172.16.216.234 cluster-node1
172.16.216.235 cluster-node2

[root@localhost ~]# vim /etc/hosts
172.16.216.228 master01 master01.linuxplus.com
172.16.216.226 master02 master02.linuxplus.com
172.16.216.230 master03 master03.linuxplus.com
172.16.216.234 cluster-node1
172.16.216.235 cluster-node2

[root@localhost ~]# vim /etc/hosts
172.16.216.228 master01 master01.linuxplus.com
172.16.216.226 master02 master02.linuxplus.com
172.16.216.230 master03 master03.linuxplus.com
172.16.216.234 cluster-node1
172.16.216.235 cluster-node2

#三台机器分别重启
[root@localhost ~]# reboot

部署keepalived

#三台服务器分别配置转发
[root@master01 ~]# cat >> /etc/sysctl.conf << EOF
> net.ipv4.ip_forward = 1
> EOF
[root@master01 ~]# sysctl -p
net.ipv4.ip_forward = 1
[root@master02 ~]# cat >> /etc/sysctl.conf << EOF
> net.ipv4.ip_forward = 1
> EOF
[root@master02 ~]# sysctl -p
net.ipv4.ip_forward = 1
[root@master03 ~]# cat >> /etc/sysctl.conf << EOF
> net.ipv4.ip_forward = 1
> EOF
[root@master03 ~]# sysctl -p
net.ipv4.ip_forward = 1
#三台服务器分别安装keepalived
[root@master01 ~]# yum install -y keepalived
[root@master02 ~]# yum install -y keepalived
[root@master03 ~]# yum install -y keepalived
#配置keepalived
[root@master01 ~]# cd /etc/keepalived/
[root@master01 keepalived]# vim keepalived.conf
! Configuration File for keepalived

global_defs {
        router_id LVS_DEVEL
}

vrrp_script check_haproxy {
        script "killall -0 haproxy"
        interval 3
        weight -2
        fall 10
        rise 2
}

vrrp_instance VI_1 {
        state MASTER
        interface ens33
        virtual_router_id 51
        priority 250
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass 35f18af7190d51c9f7f78f37300a0cbd
        }
        virtual_ipaddress {
       172.16.216.30/24 dev ens33
        }
        track_script {
           check_haproxy
        }
}
[root@master02 ~]# cd /etc/keepalived/
[root@master02 keepalived]# vim keepalived.conf
! Configuration File for keepalived

global_defs {
        router_id LVS_DEVEL
}

vrrp_script check_haproxy {
        script "killall -0 haproxy"
        interval 3
        weight -2
        fall 10
        rise 2
}

vrrp_instance VI_1 {
        state BACKUP
        interface ens33
        virtual_router_id 51
        priority 249
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass 35f18af7190d51c9f7f78f37300a0cbd
        }
        virtual_ipaddress {
       172.16.216.30/24 dev ens33
        }
        track_script {
           check_haproxy
        }
}
[root@master03 ~]# cd /etc/keepalived/
[root@master03 keepalived]# vim keepalived.conf
! Configuration File for keepalived

global_defs {
        router_id LVS_DEVEL
}

vrrp_script check_haproxy {
        script "killall -0 haproxy"
        interval 3
        weight -2
        fall 10
        rise 2
}

vrrp_instance VI_1 {
        state BACKUP
        interface ens33
        virtual_router_id 51
        priority 248
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass 35f18af7190d51c9f7f78f37300a0cbd
        }
        virtual_ipaddress {
       172.16.216.30/24 dev ens33
        }
        track_script {
           check_haproxy
        }
}
#启动服务并查看状态
//Master01
[root@master01 keepalived]# systemctl enable keepalived.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/keepalived.service to /usr/lib/systemd/system/keepalived.service.
[root@master01 keepalived]# systemctl start keepalived.service 
[root@master01 keepalived]# systemctl status keepalived.service
● keepalived.service - LVS and VRRP High Availability Monitor
   Loaded: loaded (/usr/lib/systemd/system/keepalived.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2019-06-30 21:53:50 CST; 5s ago
  Process: 45326 ExecStart=/usr/sbin/keepalived $KEEPALIVED_OPTIONS (code=exited, status=0/SUCCESS)
 Main PID: 45327 (keepalived)
   CGroup: /system.slice/keepalived.service
           ├─45327 /usr/sbin/keepalived -D
           ├─45328 /usr/sbin/keepalived -D
           └─45329 /usr/sbin/keepalived -D

Jun 30 21:53:50 master01 Keepalived_vrrp[45329]: VRRP sockpool: [ifindex(2), proto(112), unicast(0), fd(10,11)]
Jun 30 21:53:51 master01 Keepalived_vrrp[45329]: VRRP_Instance(VI_1) Transition to MASTER STATE
Jun 30 21:53:52 master01 Keepalived_vrrp[45329]: VRRP_Instance(VI_1) Entering MASTER STATE
Jun 30 21:53:52 master01 Keepalived_vrrp[45329]: VRRP_Instance(VI_1) setting protocol VIPs.
Jun 30 21:53:52 master01 Keepalived_vrrp[45329]: Sending gratuitous ARP on ens33 for 172.16.216.30
Jun 30 21:53:52 master01 Keepalived_vrrp[45329]: VRRP_Instance(VI_1) Sending/queueing gratuitous ARPs on ens33 for 172.16.216.30
Jun 30 21:53:52 master01 Keepalived_vrrp[45329]: Sending gratuitous ARP on ens33 for 172.16.216.30
Jun 30 21:53:52 master01 Keepalived_vrrp[45329]: Sending gratuitous ARP on ens33 for 172.16.216.30
Jun 30 21:53:52 master01 Keepalived_vrrp[45329]: Sending gratuitous ARP on ens33 for 172.16.216.30
Jun 30 21:53:52 master01 Keepalived_vrrp[45329]: Sending gratuitous ARP on ens33 for 172.16.216.30
[root@master01 keepalived]# ip address show ens33
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:37:f5:ef brd ff:ff:ff:ff:ff:ff
    inet 172.16.216.228/24 brd 172.16.216.255 scope global ens33
       valid_lft forever preferred_lft forever
    inet 172.16.216.30/24 scope global secondary ens33
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fe37:f5ef/64 scope link 
       valid_lft forever preferred_lft forever
//Master02
[root@master02 keepalived]# systemctl enable keepalived.service
Created symlink from /etc/systemd/system/multi-user.target.wants/keepalived.service to /usr/lib/systemd/system/keepalived.service.
[root@master02 keepalived]# systemctl start keepalived.service
[root@master02 keepalived]# systemctl status keepalived.service
● keepalived.service - LVS and VRRP High Availability Monitor
   Loaded: loaded (/usr/lib/systemd/system/keepalived.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2019-06-30 21:54:09 CST; 3s ago
  Process: 45054 ExecStart=/usr/sbin/keepalived $KEEPALIVED_OPTIONS (code=exited, status=0/SUCCESS)
 Main PID: 45055 (keepalived)
   CGroup: /system.slice/keepalived.service
           ├─45055 /usr/sbin/keepalived -D
           ├─45056 /usr/sbin/keepalived -D
           └─45057 /usr/sbin/keepalived -D

Jun 30 21:54:09 master02 Keepalived_vrrp[45057]: Registering gratuitous ARP shared channel
Jun 30 21:54:09 master02 Keepalived_vrrp[45057]: Opening file '/etc/keepalived/keepalived.conf'.
Jun 30 21:54:09 master02 Keepalived_vrrp[45057]: WARNING - default user 'keepalived_script' for script execution does not exist - please create.
Jun 30 21:54:09 master02 Keepalived_vrrp[45057]: Truncating auth_pass to 8 characters
Jun 30 21:54:09 master02 Keepalived_vrrp[45057]: Cannot find script killall in path
Jun 30 21:54:09 master02 Keepalived_vrrp[45057]: Disabling track script check_haproxy since not found
Jun 30 21:54:09 master02 Keepalived_vrrp[45057]: VRRP_Instance(VI_1) removing protocol VIPs.
Jun 30 21:54:09 master02 Keepalived_vrrp[45057]: Using LinkWatch kernel netlink reflector...
Jun 30 21:54:09 master02 Keepalived_vrrp[45057]: VRRP_Instance(VI_1) Entering BACKUP STATE
Jun 30 21:54:09 master02 Keepalived_vrrp[45057]: VRRP sockpool: [ifindex(2), proto(112), unicast(0), fd(10,11)]
[root@master02 keepalived]# ip address show ens33
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:dd:8b:2b brd ff:ff:ff:ff:ff:ff
    inet 172.16.216.229/24 brd 172.16.216.255 scope global ens33
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fedd:8b2b/64 scope link 
       valid_lft forever preferred_lft forever
//Master03
[root@master03 keepalived]# systemctl enable keepalived.service
Created symlink from /etc/systemd/system/multi-user.target.wants/keepalived.service to /usr/lib/systemd/system/keepalived.service.
[root@master03 keepalived]# systemctl start keepalived.service
[root@master03 keepalived]# systemctl status keepalived.service
● keepalived.service - LVS and VRRP High Availability Monitor
   Loaded: loaded (/usr/lib/systemd/system/keepalived.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2019-06-30 21:54:22 CST; 3s ago
  Process: 42102 ExecStart=/usr/sbin/keepalived $KEEPALIVED_OPTIONS (code=exited, status=0/SUCCESS)
 Main PID: 42103 (keepalived)
   CGroup: /system.slice/keepalived.service
           ├─42103 /usr/sbin/keepalived -D
           ├─42104 /usr/sbin/keepalived -D
           └─42105 /usr/sbin/keepalived -D

Jun 30 21:54:22 master03 Keepalived_vrrp[42105]: WARNING - default user 'keepalived_script' for script execution does not exist - please create.
Jun 30 21:54:22 master03 Keepalived_vrrp[42105]: Truncating auth_pass to 8 characters
Jun 30 21:54:22 master03 Keepalived_vrrp[42105]: Cannot find script killall in path
Jun 30 21:54:22 master03 Keepalived_vrrp[42105]: Disabling track script check_haproxy since not found
Jun 30 21:54:22 master03 Keepalived_vrrp[42105]: VRRP_Instance(VI_1) removing protocol VIPs.
Jun 30 21:54:22 master03 Keepalived_vrrp[42105]: Using LinkWatch kernel netlink reflector...
Jun 30 21:54:22 master03 Keepalived_vrrp[42105]: VRRP_Instance(VI_1) Entering BACKUP STATE
Jun 30 21:54:22 master03 Keepalived_vrrp[42105]: VRRP sockpool: [ifindex(2), proto(112), unicast(0), fd(10,11)]
Jun 30 21:54:22 master03 Keepalived_healthcheckers[42104]: Initializing ipvs
Jun 30 21:54:22 master03 Keepalived_healthcheckers[42104]: Opening file '/etc/keepalived/keepalived.conf'.
[root@master03 keepalived]# ip address show ens33
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:6d:08:5b brd ff:ff:ff:ff:ff:ff
    inet 172.16.216.230/24 brd 172.16.216.255 scope global ens33
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fe6d:85b/64 scope link 
       valid_lft forever preferred_lft forever

安装配置haproxy

[root@master01 keepalived]# cat >> /etc/sysctl.conf << EOF
> net.ipv4.ip_nonlocal_bind = 1
> EOF
[root@master01 keepalived]# sysctl -p
net.ipv4.ip_forward = 1
net.ipv4.ip_nonlocal_bind = 1
[root@master02 keepalived]# cat >> /etc/sysctl.conf << EOF
> net.ipv4.ip_nonlocal_bind = 1
> EOF
[root@master02 keepalived]# sysctl -p
net.ipv4.ip_forward = 1
net.ipv4.ip_nonlocal_bind = 1
[root@master03 keepalived]# cat >> /etc/sysctl.conf << EOF
> net.ipv4.ip_nonlocal_bind = 1
> EOF
[root@master03 keepalived]# sysctl -p
net.ipv4.ip_forward = 1
net.ipv4.ip_nonlocal_bind = 1
[root@master01 ~]# yum install -y haproxy
[root@master02 ~]# yum install -y haproxy
[root@master03 ~]# yum install -y haproxy
#修改配置文件
[root@master01 ~]# cd /etc/haproxy/
[root@master01 haproxy]# vim haproxy.cfg
#---------------------------------------------------------------------
# Example configuration for a possible web application.  See the
# full configuration options online.
#
#   http://haproxy.1wt.eu/download/1.4/doc/configuration.txt
#
#---------------------------------------------------------------------

#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
    # to have these messages end up in /var/log/haproxy.log you will
    # need to:
    #
    # 1) configure syslog to accept network log events.  This is done
    #    by adding the '-r' option to the SYSLOGD_OPTIONS in
    #    /etc/sysconfig/syslog
    #
    # 2) configure local2 events to go to the /var/log/haproxy.log
    #   file. A line like the following can be added to
    #   /etc/sysconfig/syslog
    #
    #    local2.*                       /var/log/haproxy.log
    #
    log         127.0.0.1 local2

    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     40000
    user        haproxy
    group       haproxy
    daemon

    # turn on stats unix socket
    stats socket /var/lib/haproxy/stats

#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000

#---------------------------------------------------------------------
# kubernetes apiserver frontend which proxys to the backends
# --------------------------------------------------------------------
frontend kubernetes-apiserver
    mode                 tcp
    bind                 *:16443
    option               tcplog
    default_backend      kubernetes-apiserver
#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend kubernetes-apiserver
    mode        tcp
    balance     roundrobin
    server  master01  172.16.216.228:6443 check
    server  master02  172.16.216.229:6443 check
    server  master03  172.16.216.230:6443 check
#---------------------------------------------------------------------
# collection haproxy statistics message
#---------------------------------------------------------------------
listen stats
    bind               *:1080
    stats auth         admin:awesomePassword
    stats refresh      5s
    stats realm        HAProxy\ Statistics
    stats uri          /admin?stats
[root@master01 haproxy]# systemctl enable haproxy.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/haproxy.service to /usr/lib/systemd/system/haproxy.service.
[root@master01 haproxy]# systemctl start haproxy.service 
[root@master01 haproxy]# systemctl status haproxy.service 
● haproxy.service - HAProxy Load Balancer
   Loaded: loaded (/usr/lib/systemd/system/haproxy.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2019-06-30 22:27:15 CST; 6s ago
 Main PID: 80058 (haproxy-systemd)
   CGroup: /system.slice/haproxy.service
           ├─80058 /usr/sbin/haproxy-systemd-wrapper -f /etc/haproxy/haproxy.cfg -p /run/haproxy.p...
           ├─80059 /usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid -Ds
           └─80060 /usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid -Ds

Jun 30 22:27:15 master01 systemd[1]: Started HAProxy Load Balancer.
Jun 30 22:27:15 master01 systemd[1]: Starting HAProxy Load Balancer...
Jun 30 22:27:15 master01 haproxy-systemd-wrapper[80058]: haproxy-systemd-wrapper: executing /usr...Ds
Jun 30 22:27:15 master01 haproxy-systemd-wrapper[80058]: [WARNING] 180/222715 (80059) : config :...e.
Jun 30 22:27:15 master01 haproxy-systemd-wrapper[80058]: [WARNING] 180/222715 (80059) : config :...e.
Hint: Some lines were ellipsized, use -l to show in full.
[root@master01 haproxy]# ss -lnt |grep -E "16443|1080"
LISTEN     0      128          *:16443                    *:*                  
LISTEN     0      128          *:1080                     *:*     
[root@master01 haproxy]# scp haproxy.cfg root@172.16.216.229:/etc/haproxy/
haproxy.cfg                                                        100% 4320     2.2MB/s   00:00    
[root@master01 haproxy]# scp haproxy.cfg root@172.16.216.230:/etc/haproxy/
haproxy.cfg                                                        100% 4320     3.3MB/s   00:00    
[root@master02 keepalived]# systemctl enable haproxy
Created symlink from /etc/systemd/system/multi-user.target.wants/haproxy.service to /usr/lib/systemd/system/haproxy.service.
[root@master02 keepalived]# systemctl start haproxy
[root@master02 keepalived]# systemctl status haproxy
● haproxy.service - HAProxy Load Balancer
   Loaded: loaded (/usr/lib/systemd/system/haproxy.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2019-06-30 22:28:39 CST; 9s ago
 Main PID: 80834 (haproxy-systemd)
   CGroup: /system.slice/haproxy.service
           ├─80834 /usr/sbin/haproxy-systemd-wrapper -f /etc/haproxy/haproxy.cfg -p /run/haproxy.p...
           ├─80839 /usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid -Ds
           └─80843 /usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid -Ds

Jun 30 22:28:39 master02 systemd[1]: Started HAProxy Load Balancer.
Jun 30 22:28:39 master02 systemd[1]: Starting HAProxy Load Balancer...
Jun 30 22:28:39 master02 haproxy-systemd-wrapper[80834]: haproxy-systemd-wrapper: executing /usr...Ds
Jun 30 22:28:39 master02 haproxy-systemd-wrapper[80834]: [WARNING] 180/222839 (80839) : config :...e.
Jun 30 22:28:39 master02 haproxy-systemd-wrapper[80834]: [WARNING] 180/222839 (80839) : config :...e.
Hint: Some lines were ellipsized, use -l to show in full.
[root@master02 keepalived]# ss -lnt |grep -E "16443|1080"
LISTEN     0      128          *:16443                    *:*                  
LISTEN     0      128          *:1080                     *:*  
[root@master03 keepalived]# systemctl enable haproxy
Created symlink from /etc/systemd/system/multi-user.target.wants/haproxy.service to /usr/lib/systemd/system/haproxy.service.
[root@master03 keepalived]# systemctl start haproxy
[root@master03 keepalived]# systemctl status haproxy.service 
● haproxy.service - HAProxy Load Balancer
   Loaded: loaded (/usr/lib/systemd/system/haproxy.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2019-06-30 22:30:08 CST; 16s ago
 Main PID: 82314 (haproxy-systemd)
   CGroup: /system.slice/haproxy.service
           ├─82314 /usr/sbin/haproxy-systemd-wrapper -f /etc/haproxy/haproxy.cfg -p /run/haproxy.p...
           ├─82315 /usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid -Ds
           └─82316 /usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid -Ds

Jun 30 22:30:08 master03 systemd[1]: Started HAProxy Load Balancer.
Jun 30 22:30:08 master03 systemd[1]: Starting HAProxy Load Balancer...
Jun 30 22:30:08 master03 haproxy-systemd-wrapper[82314]: haproxy-systemd-wrapper: executing /usr...Ds
Jun 30 22:30:08 master03 haproxy-systemd-wrapper[82314]: [WARNING] 180/223008 (82315) : config :...e.
Jun 30 22:30:08 master03 haproxy-systemd-wrapper[82314]: [WARNING] 180/223008 (82315) : config :...e.
Hint: Some lines were ellipsized, use -l to show in full.
[root@master03 keepalived]# ss -lnt |grep -E "16443|1080"
LISTEN     0      128          *:16443                    *:*                  
LISTEN     0      128          *:1080                     *:*

安装配置kubernetes

#------------------系统配置--------------------------------
[root@master01 ~]# cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
[root@master01 ~]# sysctl --system
[root@master02 ~]# cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
[root@master02 ~]# sysctl --system
[root@master03 ~]# cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
[root@master03 ~]# sysctl --system
#---------------安装Docker-----------------------------------
[root@master01 ~]# yum install -y docker
[root@master01 ~]# systemctl enable docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
[root@master01 ~]# systemctl start docker
[root@master02 ~]# yum install -y docker
[root@master02 ~]# systemctl enable docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
[root@master02 ~]# systemctl start docker
[root@master03 ~]# yum install -y docker
[root@master03 ~]# systemctl enable docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
[root@master03 ~]# systemctl start docker
#-------------------------配置yum源----------------------------------------
[root@master01 ~]# cat <<EOF >/etc/yum.repos.d/k8s.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
exclude=kube*
EOF
[root@master01 ~]# yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
[root@master01 ~]# systemctl enable --now kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.

[root@master02 ~]# cat <<EOF >/etc/yum.repos.d/k8s.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
exclude=kube*
EOF
[root@master02 ~]# yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
[root@master02 ~]# systemctl enable --now kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.

[root@master03 ~]# cat <<EOF >/etc/yum.repos.d/k8s.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
exclude=kube*
EOF
[root@master03 ~]# yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
[root@master03 ~]# systemctl enable --now kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
#-------------------------编辑kubeadmin的配置文件----------------------------------
[root@master01 ~]# cat > kubeadm-config.yaml <<EOF
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
kubernetesVersion: v1.15.0
apiServer:
  certSANs:
    - "cluster.kube.com"
controlPlaneEndpoint: "cluster.kube.com:16443"
networking:
   podSubnet: "10.244.0.0/16"
EOF
#-------------------------------关闭swap-----------------------------------------
[root@master01 ~]# swapoff -a
[root@master01 ~]# vim /etc/fstab
#
# /etc/fstab
# Created by anaconda on Sun Aug  5 13:54:05 2018
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/cl-root     /                       xfs     defaults        0 0
UUID=5c427701-1abf-4d52-821d-b7bdc68ed358 /boot                   xfs     defaults        0 0
#/dev/mapper/cl-swap     swap                    swap    defaults        0 0

[root@master02 ~]# swapoff -a
[root@master02 ~]# vim /etc/fstab
#
# /etc/fstab
# Created by anaconda on Sun Aug  5 13:54:05 2018
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/cl-root     /                       xfs     defaults        0 0
UUID=5c427701-1abf-4d52-821d-b7bdc68ed358 /boot                   xfs     defaults        0 0
#/dev/mapper/cl-swap     swap                    swap    defaults        0 0

[root@master03 ~]# swapoff -a
[root@master03 ~]# vim /etc/fstab
#
# /etc/fstab
# Created by anaconda on Sun Aug  5 13:54:05 2018
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/cl-root     /                       xfs     defaults        0 0
UUID=5c427701-1abf-4d52-821d-b7bdc68ed358 /boot                   xfs     defaults        0 0
#/dev/mapper/cl-swap     swap                    swap    defaults        0 0
[root@master01 ~]# kubeadm init --config kubeadm-config.yaml
[root@master01 ~]# kubeadm init --config kubeadm-config.yaml
[init] Using Kubernetes version: v1.15.0
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [master01 localhost] and IPs [172.16.216.228 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [master01 localhost] and IPs [172.16.216.228 127.0.0.1 ::1]
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [master01 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 172.16.216.228 172.16.216.30 172.16.216.30]
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 20.503336 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.15" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node master01 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node master01 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: z37llz.huyi3c5j1l3tt1uz
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of control-plane nodes by copying certificate authorities 
and service account keys on each node and then running the following as root:

  kubeadm join 172.16.216.30:6443 --token z37llz.huyi3c5j1l3tt1uz \
    --discovery-token-ca-cert-hash sha256:a61ce60107cb929f65416b31b6ae95299c90e482f95aac25cf1d42700ab36481 \
    --experimental-control-plane          

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 172.16.216.30:6443 --token z37llz.huyi3c5j1l3tt1uz \
    --discovery-token-ca-cert-hash sha256:a61ce60107cb929f65416b31b6ae95299c90e482f95aac25cf1d42700ab36481 
[root@master01 ~]# mkdir -p $HOME/.kube
[root@master01 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master01 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
#---------------------------设置网络---------------------------------------------
[root@master01 ~]# kubectl apply -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
[root@master01 ~]# kubectl apply -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml
configmap/calico-config created
service/calico-typha created
deployment.apps/calico-typha created
poddisruptionbudget.policy/calico-typha created
daemonset.extensions/calico-node created
serviceaccount/calico-node created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
[root@master01 ~]# kubectl get pods -n kube-system
NAME                               READY   STATUS    RESTARTS   AGE
calico-node-qblqp                  2/2     Running   2          3m30s
coredns-5c98db65d4-d2lgs           0/1     Running   1          32m
coredns-5c98db65d4-hc22b           1/1     Running   1          32m
etcd-master01                      1/1     Running   1          31m
kube-apiserver-master01            1/1     Running   1          31m
kube-controller-manager-master01   1/1     Running   2          31m
kube-proxy-zvdsk                   1/1     Running   1          32m
kube-scheduler-master01            1/1     Running   2          31m
#----------------------------------传输文件-----------------------------------
[root@master01 ~]# ssh root@master02 mkdir -p /etc/kubernetes/pki/etcd
[root@master01 ~]# scp /etc/kubernetes/admin.conf root@master02:/etc/kubernetes/
admin.conf                                                                          100% 5449     2.5MB/s   00:00 
[root@master01 ~]# scp /etc/kubernetes/pki/{ca.*,sa.*,front-proxy-ca.*} root@master02:/etc/kubernetes/pki/
ca.crt                                                                              100% 1025   197.7KB/s   00:00    
ca.key                                                                              100% 1675   522.0KB/s   00:00    
sa.key                                                                              100% 1679   413.0KB/s   00:00    
sa.pub                                                                              100%  451   187.2KB/s   00:00    
front-proxy-ca.crt                                                                  100% 1038   579.2KB/s   00:00    
front-proxy-ca.key                                                                  100% 1675   872.1KB/s   00:00    
[root@master01 ~]# scp /etc/kubernetes/pki/etcd/ca.* root@master02:/etc/kubernetes/pki/etcd/
ca.crt                                                                              100% 1017   663.4KB/s   00:00    
ca.key                                                                              100% 1679     1.1MB/s   00:00    
[root@master01 ~]# ssh root@master03 mkdir -p /etc/kubernetes/pki/etcd
[root@master01 ~]# scp /etc/kubernetes/pki/{ca.*,sa.*,front-proxy-ca.*} root@master03:/etc/kubernetes/pki/
ca.crt                                                                              100% 1025   582.8KB/s   00:00    
ca.key                                                                              100% 1675   960.1KB/s   00:00    
sa.key                                                                              100% 1679     1.2MB/s   00:00    
sa.pub                                                                              100%  451   327.9KB/s   00:00    
front-proxy-ca.crt                                                                  100% 1038   781.4KB/s   00:00    
front-proxy-ca.key                                                                  100% 1675     1.3MB/s   00:00    
[root@master01 ~]# scp /etc/kubernetes/pki/etcd/ca.* root@master03:/etc/kubernetes/pki/etcd/
ca.crt                                                                              100% 1017   579.2KB/s   00:00    
ca.key                                                                              100% 1679   739.3KB/s   00:00    
#-------------------其余master节点加入集群---------------------------------
[root@master02 ~]# kubeadm join 172.16.216.30:6443 --token z37llz.huyi3c5j1l3tt1uz --discovery-token-ca-cert-hash sha256:a61ce60107cb929f65416b31b6ae95299c90e482f95aac25cf1d42700ab36481 --experimental-control-plane
Flag --experimental-control-plane has been deprecated, use --control-plane instead
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[preflight] Running pre-flight checks before initializing the new control plane instance
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [master02 localhost] and IPs [172.16.216.229 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [master02 localhost] and IPs [172.16.216.229 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [master02 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 172.16.216.229 172.16.216.30 172.16.216.30]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Valid certificates and keys now exist in "/etc/kubernetes/pki"
[certs] Using the existing "sa" key
[kubeconfig] Generating kubeconfig files
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/admin.conf"
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[check-etcd] Checking that the etcd cluster is healthy
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.15" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Activating the kubelet service
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
[etcd] Announced new etcd member joining to the existing etcd cluster
[etcd] Wrote Static Pod manifest for a local etcd member to "/etc/kubernetes/manifests/etcd.yaml"
[etcd] Waiting for the new etcd member to join the cluster. This can take up to 40s
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[mark-control-plane] Marking the node master02 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node master02 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]

This node has joined the cluster and a new control plane instance was created:

* Certificate signing request was sent to apiserver and approval was received.
* The Kubelet was informed of the new secure connection details.
* Control plane (master) label and taint were applied to the new node.
* The Kubernetes control plane instances scaled up.
* A new etcd member was added to the local/stacked etcd cluster.

To start administering your cluster from this node, you need to run the following as a regular user:

        mkdir -p $HOME/.kube
        sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
        sudo chown $(id -u):$(id -g) $HOME/.kube/config

Run 'kubectl get nodes' to see this node join the cluster.
[root@master02 ~]# mkdir -p $HOME/.kube
[root@master02 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master02 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

[root@master03 ~]# kubeadm join 172.16.216.30:6443 --token z37llz.huyi3c5j1l3tt1uz --discovery-token-ca-cert-hash sha256:a61ce60107cb929f65416b31b6ae95299c90e482f95aac25cf1d42700ab36481 --experimental-control-plane
Flag --experimental-control-plane has been deprecated, use --control-plane instead
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[preflight] Running pre-flight checks before initializing the new control plane instance
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [master03 localhost] and IPs [172.16.216.230 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [master03 localhost] and IPs [172.16.216.230 127.0.0.1 ::1]
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [master03 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 172.16.216.230 172.16.216.30 172.16.216.30]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Valid certificates and keys now exist in "/etc/kubernetes/pki"
[certs] Using the existing "sa" key
[kubeconfig] Generating kubeconfig files
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[check-etcd] Checking that the etcd cluster is healthy
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.15" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Activating the kubelet service
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
[etcd] Announced new etcd member joining to the existing etcd cluster
[etcd] Wrote Static Pod manifest for a local etcd member to "/etc/kubernetes/manifests/etcd.yaml"
[etcd] Waiting for the new etcd member to join the cluster. This can take up to 40s
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[mark-control-plane] Marking the node master03 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node master03 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]

This node has joined the cluster and a new control plane instance was created:

* Certificate signing request was sent to apiserver and approval was received.
* The Kubelet was informed of the new secure connection details.
* Control plane (master) label and taint were applied to the new node.
* The Kubernetes control plane instances scaled up.
* A new etcd member was added to the local/stacked etcd cluster.

To start administering your cluster from this node, you need to run the following as a regular user:

        mkdir -p $HOME/.kube
        sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
        sudo chown $(id -u):$(id -g) $HOME/.kube/config

Run 'kubectl get nodes' to see this node join the cluster.
[root@master03 ~]# mkdir -p $HOME/.kube
[root@master03 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master03 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

[root@master01 ~]# kubectl get pods -n kube-system
NAME                               READY   STATUS    RESTARTS   AGE
calico-node-7k75b                  2/2     Running   0          2m42s
calico-node-c2mj9                  2/2     Running   0          115s
calico-node-qblqp                  2/2     Running   2          20m
coredns-5c98db65d4-d2lgs           1/1     Running   1          49m
coredns-5c98db65d4-hc22b           1/1     Running   1          49m
etcd-master01                      1/1     Running   1          48m
etcd-master02                      1/1     Running   0          2m40s
etcd-master03                      1/1     Running   0          115s
kube-apiserver-master01            1/1     Running   1          48m
kube-apiserver-master02            1/1     Running   0          102s
kube-apiserver-master03            1/1     Running   1          111s
kube-controller-manager-master01   1/1     Running   3          48m
kube-controller-manager-master02   1/1     Running   0          99s
kube-controller-manager-master03   1/1     Running   0          48s
kube-proxy-k8zxw                   1/1     Running   0          115s
kube-proxy-m68b4                   1/1     Running   0          2m42s
kube-proxy-zvdsk                   1/1     Running   1          49m
kube-scheduler-master01            1/1     Running   3          48m
kube-scheduler-master02            1/1     Running   0          105s
kube-scheduler-master03            1/1     Running   0          49s

[root@master01 ~]# kubectl get nodes
NAME            STATUS   ROLES    AGE    VERSION
cluster-node1   Ready    <none>   2d4h   v1.15.0
cluster-node2   Ready    <none>   2d4h   v1.15.0
master01        Ready    master   2d6h   v1.15.0
master02        Ready    master   2d5h   v1.15.0
master03        Ready    master   2d5h   v1.15.0

二、Node节点安装配置

[root@localhost ~]# hostnamectl set-hostname cluster-node1
[root@localhost ~]# logout
[root@localhost ~]# hostnamectl set-hostname cluster-node2
[root@localhost ~]# logout
[root@master01 ~]# scp /etc/hosts root@cluster-node1:/etc/
The authenticity of host 'cluster-node1 (172.16.216.234)' can't be established.
ECDSA key fingerprint is SHA256:RSjZGjpxNF+3FfNVScnO7si+ixmb5cvjEQChMZANJl8.
ECDSA key fingerprint is MD5:91:c5:3d:0a:22:4a:51:9b:b6:57:04:c8:f4:10:df:fd.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'cluster-node1,172.16.216.234' (ECDSA) to the list of known hosts.
root@cluster-node1's password: 
hosts                                                                                 100%  389   140.9KB/s   00:00    
[root@master01 ~]# scp /etc/hosts root@cluster-node2:/etc/
The authenticity of host 'cluster-node2 (172.16.216.235)' can't be established.
ECDSA key fingerprint is SHA256:RSjZGjpxNF+3FfNVScnO7si+ixmb5cvjEQChMZANJl8.
ECDSA key fingerprint is MD5:91:c5:3d:0a:22:4a:51:9b:b6:57:04:c8:f4:10:df:fd.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'cluster-node2,172.16.216.235' (ECDSA) to the list of known hosts.
root@cluster-node2's password: 
hosts                                                                                 100%  389    17.7KB/s   00:00  
[root@cluster-node1 ~]# cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
[root@cluster-node1 ~]# sysctl --system
[root@cluster-node1 ~]# systemctl enable docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
[root@cluster-node1 ~]# systemctl start docker

[root@cluster-node2 ~]# yum install -y docker
[root@cluster-node2 ~]# cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
[root@cluster-node2 ~]# sysctl --system
[root@cluster-node2 ~]# yum install -y docker
[root@cluster-node2 ~]# systemctl enable docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
[root@cluster-node2 ~]# systemctl start docker
#--------------------------------------------------------------------------
[root@cluster-node1 ~]# cat <<EOF >/etc/yum.repos.d/k8s.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
exclude=kube*
EOF
[root@cluster-node2 ~]# cat <<EOF >/etc/yum.repos.d/k8s.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
exclude=kube*
EOF
[root@cluster-node1 ~]# swapoff -a
[root@cluster-node1 ~]# vim /etc/fstab
#
# /etc/fstab
# Created by anaconda on Sun Aug  5 13:54:05 2018
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/cl-root     /                       xfs     defaults        0 0
UUID=5c427701-1abf-4d52-821d-b7bdc68ed358 /boot                   xfs     defaults        0 0
#/dev/mapper/cl-swap     swap                    swap    defaults        0 0
[root@cluster-node1 ~]# docker pull mirrorgooglecontainers/kube-proxy:v1.15.0
Trying to pull repository docker.io/mirrorgooglecontainers/kube-proxy ... 
v1.15.0: Pulling from docker.io/mirrorgooglecontainers/kube-proxy
6cf6a0b0da0d: Already exists 
8e1ce322a1d9: Pull complete 
b593bfa65f6f: Pull complete 
Digest: sha256:63b8aaf1697550f318e9b46e5a7fc019f1d86912f1f3c9d9070bd00aaa361d0b
[root@cluster-node1 ~]# docker pull mirrorgooglecontainers/pause:3.1
Trying to pull repository docker.io/mirrorgooglecontainers/pause ... 
3.1: Pulling from docker.io/mirrorgooglecontainers/pause
67ddbfb20a22: Pull complete 
Digest: sha256:59eec8837a4d942cc19a52b8c09ea75121acc38114a2c68b98983ce9356b8610
Status: Downloaded newer image for docker.io/mirrorgooglecontainers/pause:3.1
[root@cluster-node1 ~]# docker pull coredns/coredns:1.3.1
Trying to pull repository docker.io/coredns/coredns ... 
1.3.1: Pulling from docker.io/coredns/coredns
Digest: sha256:02382353821b12c21b062c59184e227e001079bb13ebd01f9d3270ba0fcbf1e4
Status: Image is up to date for docker.io/coredns/coredns:1.3.1
[root@cluster-node1 ~]# docker tag docker.io/mirrorgooglecontainers/kube-proxy:v1.15.0 k8s.gcr.io/kube-proxy:v1.15.0
[root@cluster-node1 ~]# docker tag docker.io/mirrorgooglecontainers/pause:3.1 k8s.gcr.io/pause:3.1
[root@cluster-node1 ~]# yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
[root@cluster-node1 ~]# systemctl enable kubelet.service
[root@cluster-node1 ~]# systemctl enable --now kubelet

[root@cluster-node1 ~]# echo 1 > /proc/sys/net/ipv4/ip_forward
[root@cluster-node1 ~]# kubeadm join 172.16.216.30:6443 --token z37llz.huyi3c5j1l3tt1uz --discovery-token-ca-cert-hash sha256:a61ce60107cb929f65416b31b6ae95299c90e482f95aac25cf1d42700ab36481
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.15" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Activating the kubelet service
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

[root@cluster-node2 ~]# swapoff -a
[root@cluster-node2 ~]# vim /etc/fstab
#
# /etc/fstab
# Created by anaconda on Sun Aug  5 13:54:05 2018
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/cl-root     /                       xfs     defaults        0 0
UUID=5c427701-1abf-4d52-821d-b7bdc68ed358 /boot                   xfs     defaults        0 0
#/dev/mapper/cl-swap     swap                    swap    defaults        0 0

[root@cluster-node2 ~]# docker pull mirrorgooglecontainers/kube-proxy:v1.15.0
Trying to pull repository docker.io/mirrorgooglecontainers/kube-proxy ... 
v1.15.0: Pulling from docker.io/mirrorgooglecontainers/kube-proxy
6cf6a0b0da0d: Already exists 
8e1ce322a1d9: Pull complete 
b593bfa65f6f: Pull complete 
Digest: sha256:63b8aaf1697550f318e9b46e5a7fc019f1d86912f1f3c9d9070bd00aaa361d0b
Status: Downloaded newer image for docker.io/mirrorgooglecontainers/kube-proxy:v1.15.0
[root@cluster-node2 ~]# docker pull mirrorgooglecontainers/pause:3.1
Trying to pull repository docker.io/mirrorgooglecontainers/pause ... 
3.1: Pulling from docker.io/mirrorgooglecontainers/pause
67ddbfb20a22: Pull complete 
Digest: sha256:59eec8837a4d942cc19a52b8c09ea75121acc38114a2c68b98983ce9356b8610
Status: Downloaded newer image for docker.io/mirrorgooglecontainers/pause:3.1
[root@cluster-node2 ~]# docker tag docker.io/mirrorgooglecontainers/kube-proxy:v1.15.0 k8s.gcr.io/kube-proxy:v1.15.0
[root@cluster-node2 ~]# docker tag docker.io/mirrorgooglecontainers/pause:3.1 k8s.gcr.io/pause:3.1
[root@cluster-node2 ~]# yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
[root@cluster-node2 ~]# systemctl enable kubelet.service
[root@cluster-node2 ~]# systemctl enable --now kubelet

[root@cluster-node2 sysctl.d]# echo 1 > /proc/sys/net/ipv4/ip_forward
[root@cluster-node2 sysctl.d]# kubeadm join 172.16.216.30:6443 --token z37llz.huyi3c5j1l3tt1uz --discovery-token-ca-cert-hash sha256:a61ce60107cb929f65416b31b6ae95299c90e482f95aac25cf1d42700ab36481
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.15" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Activating the kubelet service
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.


向AI问一下细节

免责声明:本站发布的内容(图片、视频和文字)以原创、转载和分享为主,文章观点不代表本网站立场,如果涉及侵权请联系站长邮箱:is@yisu.com进行举报,并提供相关证据,一经查实,将立刻删除涉嫌侵权内容。

AI