Just wait a minute!

kubernetes集群搭建-初探


2019/7/12 kubernetes docker
  • 本文为初次安装,并不一定具有参考价值。

# 操作系统要求

[root@localhost ~]# uname -a
Linux localhost.localdomain 3.10.0-514.6.1.el7.x86_64 #1 SMP Wed Jan 18 13:06:36 UTC 2017 x86_64 x86_64 x86_64 GNU/Linux
[root@localhost ~]# cat /etc/redhat-release 
CentOS Linux release 7.3.1611 (Core)
1
2
3
4

# 节点分配

节点及功能 主机名 ip
kube-apiserver、kube-controller-manager、kube-scheduler、docker、etcd、registry、flannel K8s-master 10.1.1.100
kubelet、kubeproxy、docker、flannel K8s-node-1 10.1.1.101
kubelet、kubeproxy、docker、flannel K8s-node-2 10.1.1.102

# 基础配置

hostnamectl --static set-hostname  k8s-master
hostnamectl --static set-hostname  k8s-node-1
hostnamectl --static set-hostname  k8s-node-2
1
2
3
cat /etc/hosts
10.1.1.100    k8s-master
10.1.1.100    etcd
10.1.1.100    registry
10.1.1.101    k8s-node-1
10.1.1.102    k8s-node-2
1
2
3
4
5
6
systemctl disable firewalld.service
systemctl stop firewalld.service
1
2

# all 所有机器安装 docker 和 kubernetes

yum update -y
yum remove -y docker
yum install -y yum-utils
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum -y install docker kubernetes
cat /etc/docker/daemon.json
{ "insecure-registries":["registry:5000"] }
systemctl start docker
systemctl enable docker
docker version
1
2
3
4
5
6
7
8
9
10

# master 启动私服

docker run -d -p 5000:5000 --name=registry --restart=always --privileged=true  --log-driver=none -v /home/data/registrydata:/tmp/registry registry
1

# 国外机器拉取被河蟹的镜像并保存传到国内

docker pull gcr.io/google_containers/kubernetes-dashboard-amd64:v1.7.1
docker pull registry.access.redhat.com/rhel7/pod-infrastructure:latest
docker pull gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.4
docker pull myhub.fdccloud.com/library/kubedns-amd64:1.9
docker pull myhub.fdccloud.com/library/kube-dnsmasq-amd64:1.4
docker pull myhub.fdccloud.com/library/dnsmasq-metrics-amd64:1.0
docker pull myhub.fdccloud.com/library/exechealthz-amd64:1.2

docker save gcr.io/google_containers/kubernetes-dashboard-amd64:v1.7.1 > kubernetes-dashboard-amd64.tar
docker save registry.access.redhat.com/rhel7/pod-infrastructure:latest > pod-infrastructure.tar
docker save gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.4 > k8s-dns-kube-dns-amd64.tar
docker save myhub.fdccloud.com/library/kubedns-amd64:1.9  > kubedns-amd64.tar
docker save myhub.fdccloud.com/library/kube-dnsmasq-amd64:1.4  > kube-dnsmasq-amd64.tar
docker save myhub.fdccloud.com/library/dnsmasq-metrics-amd64:1.0  > dnsmasq-metrics-amd64.tar
docker save myhub.fdccloud.com/library/exechealthz-amd64:1.2  > exechealthz-amd64.tar
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15

# node-1导入镜像

docker load < kubernetes-dashboard-amd64.tar
docker load < pod-infrastructure.tar
docker load < k8s-dns-kube-dns-amd64.tar
docker load < kubedns-amd64.tar
docker load < kube-dnsmasq-amd64.tar
docker load < dnsmasq-metrics-amd64.tar
docker load < exechealthz-amd64.tar
1
2
3
4
5
6
7

# node-1 镜像打tag并传至私服

docker tag registry.access.redhat.com/rhel7/pod-infrastructure:latest registry:5000/pod-infrastructure:latest
docker push registry:5000/pod-infrastructure:latest

docker tag gcr.io/google_containers/kubernetes-dashboard-amd64:v1.7.1 registry:5000/kubernetes-dashboard-amd64:v1.7.1
docker push registry:5000/kubernetes-dashboard-amd64:v1.7.1

docker tag gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.4 registry:5000/k8s-dns-kube-dns-amd64:1.14.4
docker push registry:5000/k8s-dns-kube-dns-amd64:1.14.4

docker tag myhub.fdccloud.com/library/kubedns-amd64:1.9 registry:5000/kubedns-amd64:1.9
docker push registry:5000/kubedns-amd64:1.9

docker tag myhub.fdccloud.com/library/kube-dnsmasq-amd64:1.4 registry:5000/kube-dnsmasq-amd64:1.4
docker push registry:5000/kube-dnsmasq-amd64:1.4

docker tag myhub.fdccloud.com/library/dnsmasq-metrics-amd64:1.0 registry:5000/dnsmasq-metrics-amd64:1.0
docker push registry:5000/dnsmasq-metrics-amd64:1.0

docker tag myhub.fdccloud.com/library/exechealthz-amd64:1.2 registry:5000/exechealthz-amd64:1.2
docker push registry:5000/exechealthz-amd64:1.2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20

# node-2 验证拉取私服镜像

docker pull registry:5000/pod-infrastructure:latest
docker pull registry:5000/kubernetes-dashboard-amd64:v1.7.1
docker pull registry:5000/k8s-dns-kube-dns-amd64:1.14.4
docker pull registry:5000/kubedns-amd64:1.9
docker pull registry:5000/kube-dnsmasq-amd64:1.4
docker pull registry:5000/dnsmasq-metrics-amd64:1.0
docker pull registry:5000/exechealthz-amd64:1.2
1
2
3
4
5
6
7

# master 部署etcd服务配置启动检查

yum install etcd -y
1
[root@iZuf63961c1lfkvsm4shneZ ~]# egrep -v '^#' /etc/etcd/etcd.conf | grep -v '^$'
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379,http://0.0.0.0:4001"
ETCD_NAME="default"
ETCD_ADVERTISE_CLIENT_URLS="http://etcd:2379,http://etcd:4001"
[root@iZuf63961c1lfkvsm4shneZ ~]# 
1
2
3
4
5
6
[root@iZuf63961c1lfkvsm4shneZ ~]# systemctl start etcd
[root@iZuf63961c1lfkvsm4shneZ ~]# etcdctl set testdir/testkey0 0
0
[root@iZuf63961c1lfkvsm4shneZ ~]# etcdctl get testdir/testkey0
0
[root@iZuf63961c1lfkvsm4shneZ ~]# etcdctl -C http://etcd:4001 cluster-health
member 8e9e05c52164694d is healthy: got healthy result from http://etcd:2379
cluster is healthy
[root@iZuf63961c1lfkvsm4shneZ ~]# etcdctl -C http://etcd:2379 cluster-health
member 8e9e05c52164694d is healthy: got healthy result from http://etcd:2379
cluster is healthy
[root@iZuf63961c1lfkvsm4shneZ ~]# 
1
2
3
4
5
6
7
8
9
10
11
12

# master-kubernetes配置启动

[root@iZuf63961c1lfkvsm4shneZ ~]# egrep -v '^#' /etc/kubernetes/apiserver | grep -v '^$'
KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
KUBE_API_PORT="--port=8080"
KUBE_ETCD_SERVERS="--etcd-servers=http://etcd:2379"
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ResourceQuota"
KUBE_API_ARGS=""
[root@iZuf63961c1lfkvsm4shneZ ~]# egrep -v '^#' /etc/kubernetes/config | grep -v '^$'
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
KUBE_ALLOW_PRIV="--allow-privileged=false"
KUBE_MASTER="--master=http://k8s-master:8080"
[root@iZuf63961c1lfkvsm4shneZ ~]# 
1
2
3
4
5
6
7
8
9
10
11
12
13
systemctl enable kube-apiserver.service
systemctl start kube-apiserver.service
systemctl enable kube-controller-manager.service
systemctl start kube-controller-manager.service
systemctl enable kube-scheduler.service
systemctl start kube-scheduler.service
1
2
3
4
5
6

# node-1 node-2 kubernetes配置启动

[root@iZuf6apa66r2qwfw57sx8dZ ~]# egrep -v '^#' /etc/kubernetes/config | grep -v '^$'
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
KUBE_ALLOW_PRIV="--allow-privileged=false"
KUBE_MASTER="--master=http://k8s-master:8080"
[root@iZuf6apa66r2qwfw57sx8dZ ~]# egrep -v '^#' /etc/kubernetes/kubelet | grep -v '^$'
KUBELET_ADDRESS="--address=0.0.0.0"
KUBELET_HOSTNAME="--hostname-override=k8s-node-1"
KUBELET_API_SERVER="--api-servers=http://k8s-master:8080"
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"
KUBELET_ARGS="--cluster_dns=10.254.0.100 --cluster_domain=cluster.local"
[root@iZuf6apa66r2qwfw57sx8dZ ~]# 
1
2
3
4
5
6
7
8
9
10
11
12
systemctl enable kubelet.service
systemctl start kubelet.service
systemctl enable kube-proxy.service
systemctl start kube-proxy.service
1
2
3
4

# master上查看集群中节点及节点状态

[root@k8s-master ~]#  kubectl -s http://k8s-master:8080 get node
NAME         STATUS    AGE
k8s-node-1   Ready     3m
k8s-node-2   Ready     16s
[root@k8s-master ~]# kubectl get nodes
NAME         STATUS    AGE
k8s-node-1   Ready     3m
k8s-node-2   Ready     43s
1
2
3
4
5
6
7
8

# master node 安装证书

yum install -y *rhsm*
wget http://mirror.centos.org/centos/7/os/x86_64/Packages/python-rhsm-certificates-1.19.10-1.el7_4.x86_64.rpm
rpm2cpio python-rhsm-certificates-1.19.10-1.el7_4.x86_64.rpm | cpio -iv --to-stdout ./etc/rhsm/ca/redhat-uep.pem | tee /etc/rhsm/ca/redhat-uep.pem
1
2
3

# master node 安装Flannel

创建覆盖网络——Flannel

yum install flannel -y
1

master、node上均编辑/etc/sysconfig/flanneld

[root@k8s-master ~]# egrep -v '^#' /etc/sysconfig/flanneld | grep -v '^$'
FLANNEL_ETCD_ENDPOINTS="http://etcd:2379"
FLANNEL_ETCD_PREFIX="/atomic.io/network"
[root@k8s-master ~]# 
1
2
3
4

在master配置etcd中关于flannel的key

[root@k8s-master ~]# etcdctl mk /atomic.io/network/config '{ "Network": "10.0.0.0/16" }'
{ "Network": "10.0.0.0/16" }
1
2

启动Flannel之后,需要依次重启docker、kubernete。

在master执行:

systemctl enable flanneld.service 
systemctl start flanneld.service 
service docker restart
systemctl restart kube-apiserver.service
systemctl restart kube-controller-manager.service
systemctl restart kube-scheduler.service
1
2
3
4
5
6

在node上执行:

systemctl enable flanneld.service 
systemctl start flanneld.service 
service docker restart
systemctl restart kubelet.service
systemctl restart kube-proxy.service
1
2
3
4
5

# 安装kube-dns

在master上安装

kubectl create -f https://raw.githubusercontent.com/koy1619/k8s_yaml/master/kubedns-rc.yaml
kubectl create -f https://raw.githubusercontent.com/koy1619/k8s_yaml/master/kubedns-svc.yaml
1
2

修改各node节点上的/etc/kubernetes/kubelet配置文件,增加如下行:

KUBELET_ARGS="--cluster_dns=10.254.0.100 --cluster_domain=cluster.local"
1

重启各节点:

systemctl restart kubelet
1

# 部署dashboard服务

kubectl create -f https://raw.githubusercontent.com/koy1619/k8s_yaml/master/dashboard-controller.yaml
kubectl create -f https://raw.githubusercontent.com/koy1619/k8s_yaml/master/dashboard-service.yaml
1
2

# master上面查看pod

kubectl get pod  -o wide  --all-namespaces
1

# dashboard访问

http://10.1.1.100:8080/ui

some exception ...

 [root@k8s-master ~]# kubectl describe pods kubernetes-dashboard-1607234690-3bnk2 --namespace=kube-system

  1s    1s    1 {kubelet k8s-node2}           Warning   FailedSync  Error syncing pod, skipping: failed to "StartContainer" for "kubernetes-dashboard" with CrashLoopBackOff: "Back-off 10s restarting failed container=kubernetes-dashboard pod=kubernetes-dashboard-980055440-l9zl5_kube-system(870d02e1-7810-11e7-9517-000c296a54e8)"
  

[解决方案]
[root@k8s-master ~]# vim kubenetes-dashboard.yaml
- --apiserver改成ip,不要使用域名
1
2
3
4
5
6
7
8
访问kubernetes dashboard UI报错
Error: 'dial tcp 10.0.66.2:9090: getsockopt: connection timed out'
Trying to reach: 'http://10.0.66.2:9090/'

原因是master节点和node节点flannel 的网络通信有问题
解决方法: 
1.检查flannel配置;依次重启master和node节点flannel
systemctl restart flanneld
在重建
kubectl create -f dashboard-controller.yaml
kubectl create -f dashboard-service.yaml

2.检查iptables -L -n ,检查node节点上的FORWARD 查看转发是否是drop,如果是drop,则开启
iptables -P FORWARD ACCEPT
在master可以curl通 http://10.0.66.2:9090
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15

以下为参考资料

https://www.cnblogs.com/zhenyuyaodidiao/p/6500897.html
https://blog.csdn.net/bbwangj/article/details/81701300
https://andrewpqc.github.io/2018/04/23/setup-kubernetes-cluster-with-kubeadm/
https://andrewpqc.github.io/2018/04/24/setup-k8s-dashboard-on-cluster/
https://www.cnblogs.com/djhull/archive/2016/12/02/6125130.html
https://blog.csdn.net/xts_huangxin/article/details/51130223
https://blog.csdn.net/weixin_34054866/article/details/87525597

https://blog.csdn.net/wt334502157/article/details/83992120
https://www.cnblogs.com/allcloud/p/7614123.html
https://hub.docker.com
https://my.oschina.net/u/2539854/blog/3023384
https://www.cnblogs.com/lion.net/p/10408510.html
1
2
3
4
5
6
7
8
9
10
11
12
13

常用命令备忘

kubectl create -f kubedns-rc.yaml
kubectl create -f kubedns-svc.yaml
kubectl create -f dashboard-controller.yaml
kubectl create -f dashboard-service.yaml

kubectl delete -f dashboard-controller.yaml
kubectl delete -f dashboard-service.yaml
kubectl delete -f kubedns-rc.yaml
kubectl delete -f kubedns-svc.yaml
1
2
3
4
5
6
7
8
9
kubectl -s http://k8s-master:8080 get node
kubectl get pods --namespace kube-system
kubectl get pod  -o wide  --all-namespaces
kubectl describe pod kube-dns-598252076-hks4w --namespace=kube-system
kubectl logs -f kubernetes-dashboard-2106745705-zz1qp -n kube-system
kubectl logs -f kube-dns-2842662617-65n8h -n kube-system -c kubedns
kubectl logs -f kube-dns-2842662617-65n8h -n kube-system -c dnsmasq-metrics
1
2
3
4
5
6
7
#master删除节点
kubectl delete node node2
#看日志
kubectl logs -f kubernetes-dashboard-2891849301-9g3hb -n kube-system
kubectl logs -f kube-dns-3298054597-lk7gf -n kube-system -c kubedns
kubectl logs -f kube-dns-2842662617-w7t5q -n kube-system -c dnsmasq-metrics
#登陆docker实例
kubectl get pods
kubectl exec -it nginx-3873632884-zhfzt -- /bin/bash
1
2
3
4
5
6
7
8
9
Last Updated: 10/26/2019, 12:10:49 AM