curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
chmod +x ./kubectl
mv ./kubectl /usr/local/bin/kubectl
# 启用shell的提示/自动完成autocompletion
echo "source <(kubectl completion bash)" >> ~/.bashrc
[root@k8s ~]# kubectl version
Client Version: version.Info{Major:"1", Minor:"7", GitVersion:"v1.7.2", GitCommit:"922a86cfcd65915a9b2f69f3f193b8907d741d9c", GitTreeState:"clean", BuildDate:"2017-07-21T08:23:22Z", GoVersion:"go1.8.3", Compiler:"gc", Platform:"linux/amd64"}
The connection to the server localhost:8080 was refused - did you specify the right host or port?
# 配置代理,kubeadm有部分请求应该也是需要走代理的(前面用脚本安装过multinode on docker的经历猜测的)
export NO_PROXY="localhost,127.0.0.1,10.0.0.0/8,192.168.191.138"
export https_proxy=http://192.168.191.138:8118/
export http_proxy=http://192.168.191.138:8118/
# 使用reset重置,网络代理的配置修改了多次(kubeadm初始换过程失败过),还有前几次的初始化没有配置pod地址段
[root@k8s ~]# kubeadm reset
[preflight] Running pre-flight checks
[reset] Stopping the kubelet service
[reset] Unmounting mounted directories in "/var/lib/kubelet"
[reset] Removing kubernetes-managed containers
[reset] Deleting contents of stateful directories: [/var/lib/kubelet /etc/cni/net.d /var/lib/dockershim /var/lib/etcd]
[reset] Deleting contents of config directories: [/etc/kubernetes/manifests /etc/kubernetes/pki]
[reset] Deleting files: [/etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf]
# 使用flannel需要指定pod的网卡地址段(文档要整体看一遍才能少踩坑,囧)
[root@k8s ~]# kubeadm init --skip-preflight-checks --pod-network-cidr=10.244.0.0/16
[kubeadm] WARNING: kubeadm is in beta, please do not use it for production clusters.
[init] Using Kubernetes version: v1.7.2
[init] Using Authorization modes: [Node RBAC]
[preflight] Skipping pre-flight checks
[kubeadm] WARNING: starting in 1.8, tokens expire after 24 hours by default (if you require a non-expiring token use --token-ttl 0)
[certificates] Generated CA certificate and key.
[certificates] Generated API server certificate and key.
[certificates] API Server serving cert is signed for DNS names [k8s kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.191.138]
[certificates] Generated API server kubelet client certificate and key.
[certificates] Generated service account token signing key and public key.
[certificates] Generated front-proxy CA certificate and key.
[certificates] Generated front-proxy client certificate and key.
[certificates] Valid certificates and keys now exist in "/etc/kubernetes/pki"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/scheduler.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/admin.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/kubelet.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/controller-manager.conf"
[apiclient] Created API client, waiting for the control plane to become ready
<-> 这里会停的比较久,要去下载镜像,然后还得启动容器
[apiclient] All control plane components are healthy after 293.004469 seconds
[token] Using token: 2af779.b803df0b1effb3d9
[apiconfig] Created RBAC rules
[addons] Applied essential addon: kube-proxy
[addons] Applied essential addon: kube-dns
Your Kubernetes master has initialized successfully!
To start using your cluster, you need to run (as a regular user):
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
http://kubernetes.io/docs/admin/addons/
You can now join any number of machines by running the following on each node
as root:
kubeadm join --token 2af779.b803df0b1effb3d9 192.168.191.138:6443
[root@k8s ~]#
# master机器已安装httpd服务
[root@k8s html]# ln -s /var/cache/yum/x86_64/7/kubernetes/packages/ k8s
[root@k8s k8s]# createrepo .
# 把镜像全部拷到worker节点
[root@k8s ~]# docker save $( echo $( docker images | grep -v REPOSITORY | awk '{print $1}' ) ) | ssh worker1 docker load
# 配置私有仓库源
[root@worker1 yum.repos.d]# vi k8s.repo
[k8s]
name=Kubernetes
baseurl=http://master/k8s
enabled=1
gpgcheck=0
[root@worker1 yum.repos.d]# yum list | grep k8s
kubeadm.x86_64 1.7.2-0 k8s
kubectl.x86_64 1.7.2-0 k8s
kubelet.x86_64 1.7.2-0 k8s
kubernetes-cni.x86_64 0.5.1-0 k8s
[root@worker1 yum.repos.d]# yum install -y kubelet kubeadm
# 修改cgroup-driver
[root@worker1 yum.repos.d]# vi /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
[root@worker1 yum.repos.d]#
[root@worker1 yum.repos.d]# service docker restart
Redirecting to /bin/systemctl restart docker.service
[root@worker1 yum.repos.d]# systemctl daemon-reload
[root@worker1 yum.repos.d]# systemctl enable kubelet.service
[root@worker1 yum.repos.d]# service kubelet restart
Redirecting to /bin/systemctl restart kubelet.service
# worker节点加入集群(初始化)
[root@worker1 yum.repos.d]# kubeadm join --token 2af779.b803df0b1effb3d9 192.168.191.138:6443 --skip-preflight-checks
[kubeadm] WARNING: kubeadm is in beta, please do not use it for production clusters.
[preflight] Skipping pre-flight checks
[discovery] Trying to connect to API Server "192.168.191.138:6443"
[discovery] Created cluster-info discovery client, requesting info from "https://192.168.191.138:6443"
[discovery] Cluster info signature and contents are valid, will use API Server "https://192.168.191.138:6443"
[discovery] Successfully established connection with API Server "192.168.191.138:6443"
[bootstrap] Detected server version: v1.7.2
[bootstrap] The server supports the Certificates API (certificates.k8s.io/v1beta1)
[csr] Created API client to obtain unique certificate for this node, generating keys and certificate signing request
[csr] Received signed certificate from the API server, generating KubeConfig...
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/kubelet.conf"
Node join complete:
* Certificate signing request sent to master and response
received.
* Kubelet informed of new secure connection details.
Run 'kubectl get nodes' on the master to see this machine join.
[root@k8s ~]# kubectl get nodes
NAME STATUS AGE VERSION
k8s Ready 10h v1.7.2
worker1 Ready 57s v1.7.2
# 清空原来的策略
iptables -t nat -F
ip link set docker0 down
ip link delete docker0
[root@worker1 ~]# cat /usr/lib/systemd/system/docker.service | grep dockerd
ExecStart=/usr/bin/dockerd --bridge=cni0 --ip-masq=false
> Aug 01 08:36:10 k8s dockerd[943]: time="2017-08-01T08:36:10.017266292+08:00" level=fatal msg="Error starting daemon: Error initializing network controller: Error creating default \"bridge\" network: bridge device with non default name cni0 must be created manually"
ip link add name cni0 type bridge
ip link set dev cni0 mtu 1460
# 让flannel来设置IP地址
# ip addr add $NODE_X_BRIDGE_ADDR dev cni0
ip link set dev cni0 up
systemctl restart docker kubelet
https://kubernetes.io/docs/admin/kubeadm/
kubeadm installs add-on components via the API server. Right now this is the internal DNS server and the kube-proxy DaemonSet.
https://github.com/kubernetes/kubernetes/issues/34101
Ok, so it turns out that this flag is not enough, we still have an issue reaching kubernetes service IP. The simplest solution to this is to run kube-proxy with –proxy-mode=userspace. To enable this, you can use kubectl -n kube-system edit ds kube-proxy-amd64 && kubectl -n kube-system delete pods -l name=kube-proxy-amd64.