[root@master01 k8s]# cd /mnt/ //进入宿主机挂载目录
[root@master01 mnt]# ls
etcd-cert etcd-v3.3.10-linux-amd64.tar.gz k8s-cert.sh master.zip
etcd-cert.sh flannel.sh kubeconfig.sh node.zip
etcd.sh flannel-v0.10.0-linux-amd64.tar.gz kubernetes-server-linux-amd64.tar.gz
[root@master01 mnt]# cp master.zip /root/k8s/ //复制压缩包到k8s工作目录
[root@master01 mnt]# cd /root/k8s/ //进入k8s工作目录
[root@master01 k8s]# ls
cfssl.sh etcd-v3.3.10-linux-amd64 kubernetes-server-linux-amd64.tar.gz
etcd-cert etcd-v3.3.10-linux-amd64.tar.gz master.zip
etcd.sh flannel-v0.10.0-linux-amd64.tar.gz
[root@master01 k8s]# unzip master.zip //解压压缩包
Archive: master.zip
inflating: apiserver.sh
inflating: controller-manager.sh
inflating: scheduler.sh
[root@master01 k8s]# mkdir /opt/kubernetes/{cfg,bin,ssl} -p //在master01中创建工作目录,之前在node节点中同样也创建过工作目录
[root@master01 k8s]# mkdir k8s-cert //创建自签证书目录
[root@master01 k8s]# cp /mnt/k8s-cert.sh /root/k8s/k8s-cert //将挂载的自签证书脚本移动到k8s工作目录中的自签证书目录
[root@master01 k8s]# cd k8s-cert //进入目录
[root@master01 k8s-cert]# vim k8s-cert.sh //编辑拷贝过来的脚本文件
...
cat > server-csr.json <<EOF
{
"CN": "kubernetes",
"hosts": [
"10.0.0.1",
"127.0.0.1",
"192.168.80.12", //更改地址为master01IP地址
"192.168.80.11", //添加地址为master02IP地址,为之后我们要做的多节点做准备
"192.168.80.100", //添加vrrp地址,为之后要做的负载均衡做准备
"192.168.80.13", //更改地址为node01节点IP地址
"192.168.80.14", //更改地址为node02节点IP地址
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
...
:wq
[root@master01 k8s-cert]# bash k8s-cert.sh //执行脚本,生成证书
2020/02/10 10:59:17 [INFO] generating a new CA key and certificate from CSR
2020/02/10 10:59:17 [INFO] generate received request
2020/02/10 10:59:17 [INFO] received CSR
2020/02/10 10:59:17 [INFO] generating key: rsa-2048
2020/02/10 10:59:17 [INFO] encoded CSR
2020/02/10 10:59:17 [INFO] signed certificate with serial number 10087572098424151492431444614087300651068639826
2020/02/10 10:59:17 [INFO] generate received request
2020/02/10 10:59:17 [INFO] received CSR
2020/02/10 10:59:17 [INFO] generating key: rsa-2048
2020/02/10 10:59:17 [INFO] encoded CSR
2020/02/10 10:59:17 [INFO] signed certificate with serial number 125779224158375570229792859734449149781670193528
2020/02/10 10:59:17 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
2020/02/10 10:59:17 [INFO] generate received request
2020/02/10 10:59:17 [INFO] received CSR
2020/02/10 10:59:17 [INFO] generating key: rsa-2048
2020/02/10 10:59:17 [INFO] encoded CSR
2020/02/10 10:59:17 [INFO] signed certificate with serial number 328087687681727386760831073265687413205940136472
2020/02/10 10:59:17 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
2020/02/10 10:59:17 [INFO] generate received request
2020/02/10 10:59:17 [INFO] received CSR
2020/02/10 10:59:17 [INFO] generating key: rsa-2048
2020/02/10 10:59:18 [INFO] encoded CSR
2020/02/10 10:59:18 [INFO] signed certificate with serial number 525069068228188747147886102005817997066385735072
2020/02/10 10:59:18 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@master01 k8s-cert]# ls *pem //查看 会生成8个证书
admin-key.pem admin.pem ca-key.pem ca.pem kube-proxy-key.pem kube-proxy.pem server-key.pem server.pem
[root@master01 k8s-cert]# cp ca*pem server*pem /opt/kubernetes/ssl/ //将证书移动到k8s工作目录下ssl目录中
配置apiserver
[root@master01 k8s-cert]# cd .. //回到k8s工作目录
[root@master01 k8s]# tar zxvf kubernetes-server-linux-amd64.tar.gz //解压软件包
kubernetes/
kubernetes/server/
kubernetes/server/bin/
...
[root@master01 k8s]# cd kubernetes/server/bin/ //进入加压后软件命令存放目录
[root@master01 bin]# ls
apiextensions-apiserver kube-apiserver.docker_tag kube-proxy
cloud-controller-manager kube-apiserver.tar kube-proxy.docker_tag
cloud-controller-manager.docker_tag kube-controller-manager kube-proxy.tar
cloud-controller-manager.tar kube-controller-manager.docker_tag kube-scheduler
hyperkube kube-controller-manager.tar kube-scheduler.docker_tag
kubeadm kubectl kube-scheduler.tar
kube-apiserver kubelet mounter
[root@master01 bin]# cp kube-apiserver kubectl kube-controller-manager kube-scheduler /opt/kubernetes/bin/ //复制关键命令文件到k8s工作目录的bin目录中
[root@master01 bin]# cd /root/k8s/
[root@master01 k8s]# head -c 16 /dev/urandom | od -An -t x | tr -d ' ' //生成一个序列号
c37758077defd4033bfe95a071689272
[root@master01 k8s]# vim /opt/kubernetes/cfg/token.csv //创建token.csv文件,可以理解为创建一个管理性的角色
c37758077defd4033bfe95a071689272,kubelet-bootstrap,10001,"system:kubelet-bootstrap" //指定用户角色身份,前面的序列号使用生成的序列号
:wq
[root@master01 k8s]# bash apiserver.sh 192.168.80.12 https://192.168.80.12:2379,https://192.168.80.13:2379,https://192.168.80.14:2379 //二进制文件,token,证书都准备好,执行apiserver脚本,同时生成配置文件
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-apiserver.service to /usr/lib/systemd/system/kube-apiserver.service.
[root@master01 k8s]# ps aux | grep kube //检查进程是否启动成功
root 17088 8.7 16.7 402260 312192 ? Ssl 11:17 0:08 /opt/kubernetes/bin/kube-apiserver --logtostderr=true --v=4 --etcd-servers=https://192.168.80.12:2379,https://192.168.80.13:2379,https://192.168.80.14:2379 --bind-address=192.168.80.12 --secure-port=6443 --advertise-address=192.168.80.12 --allow-privileged=true --service-cluster-ip-range=10.0.0.0/24 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction --authorization-mode=RBAC,Node --kubelet-https=true --enable-bootstrap-token-auth --token-auth-file=/opt/kubernetes/cfg/token.csv --service-node-port-range=30000-50000 --tls-cert-file=/opt/kubernetes/ssl/server.pem --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem --client-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem --etcd-cafile=/opt/etcd/ssl/ca.pem --etcd-certfile=/opt/etcd/ssl/server.pem --etcd-keyfile=/opt/etcd/ssl/server-key.pem
root 17101 0.0 0.0 112676 980 pts/0 S+ 11:19 0:00 grep --color=auto kube
[root@master01 k8s]# cat /opt/kubernetes/cfg/kube-apiserver //查看生成的配置文件
KUBE_APISERVER_OPTS="--logtostderr=true \
--v=4 \
--etcd-servers=https://192.168.80.12:2379,https://192.168.80.13:2379,https://192.168.80.14:2379 \
--bind-address=192.168.80.12 \
--secure-port=6443 \
--advertise-address=192.168.80.12 \
--allow-privileged=true \
--service-cluster-ip-range=10.0.0.0/24 \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \
--authorization-mode=RBAC,Node \
--kubelet-https=true \
--enable-bootstrap-token-auth \
--token-auth-file=/opt/kubernetes/cfg/token.csv \
--service-node-port-range=30000-50000 \
--tls-cert-file=/opt/kubernetes/ssl/server.pem \
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \
--client-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/opt/etcd/ssl/ca.pem \
--etcd-certfile=/opt/etcd/ssl/server.pem \
--etcd-keyfile=/opt/etcd/ssl/server-key.pem"
[root@master01 k8s]# netstat -ntap | grep 6443 //查看监听的端口是否开启
tcp 0 0 192.168.80.12:6443 0.0.0.0:* LISTEN 17088/kube-apiserve
tcp 0 0 192.168.80.12:48320 192.168.80.12:6443 ESTABLISHED 17088/kube-apiserve
tcp 0 0 192.168.80.12:6443 192.168.80.12:48320 ESTABLISHED 17088/kube-apiserve
[root@master01 k8s]# netstat -ntap | grep 8080 //查看监听的端口是否开启
tcp 0 0 127.0.0.1:8080 0.0.0.0:* LISTEN 17088/kube-apiserve
[root@master01 k8s]# ./scheduler.sh 127.0.0.1 //直接执行脚本,启动服务,并生成配置文件即可
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-scheduler.service to /usr/lib/systemd/system/kube-scheduler.service.
[root@master01 k8s]# systemctl status kube-scheduler.service //查看服务运行状态
● kube-scheduler.service - Kubernetes Scheduler
Loaded: loaded (/usr/lib/systemd/system/kube-scheduler.service; enabled; vendor preset: disabled)
Active: active (running) since 一 2020-02-10 11:22:13 CST; 2min 46s ago //成功运行
Docs: https://github.com/kubernetes/kubernetes
...
[root@master01 k8s]# chmod +x controller-manager.sh //添加脚本执行权限
[root@master01 k8s]# ./controller-manager.sh 127.0.0.1 //执行脚本,启动服务,并生成配置文件
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-controller-manager.service to /usr/lib/systemd/system/kube-controller-manager.service.
[root@master01 k8s]# systemctl status kube-controller-manager.service //查看运行状态
● kube-controller-manager.service - Kubernetes Controller Manager
Loaded: loaded (/usr/lib/systemd/system/kube-controller-manager.service; enabled; vendor preset: disabled)
Active: active (running) since 一 2020-02-10 11:28:21 CST; 7min ago //成功运行
...
[root@master01 k8s]# /opt/kubernetes/bin/kubectl get cs //查看节点运行状态
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-2 Healthy {"health":"true"}
etcd-0 Healthy {"health":"true"}
etcd-1 Healthy {"health":"true"}
免责声明:本站发布的内容(图片、视频和文字)以原创、转载和分享为主,文章观点不代表本网站立场,如果涉及侵权请联系站长邮箱:is@yisu.com进行举报,并提供相关证据,一经查实,将立刻删除涉嫌侵权内容。