role | ip |
---|---|
master | 10.125.254.104 |
work | 10.125.254.252 |
work | 10.125.254.120 |
wget https://github.com/kubernetes/kubernetes/releases/download/v1.10.4/kubernetes.tar.gz
每个节点的安装包:
yum install -y conntrack ipvsadm ipset jq iptables curl sysstat
tar xf kubernetes.tar.gz -C /data
cd /data/kubernetes/cluster/
./get-kube-binaries.sh
cd /data/kubernetes/server
tar xf kubernetes-server-linux-amd64.tar.gz
vi /etc/profile.d/k8s.sh
export PATH=$PATH:/data/kubernetes/bin/
yum install golang docker -y
关闭所有机器的swap分区
swapon -s 查看
swapoff -a 关闭
swapon -a 开启
cat /usr/lib/systemd/system/docker.service
[Service]
Type=notify
NotifyAccess=all
EnvironmentFile=-/run/containers/registries.conf
EnvironmentFile=-/etc/sysconfig/docker
EnvironmentFile=-/etc/sysconfig/docker-storage
EnvironmentFile=-/etc/sysconfig/docker-network
EnvironmentFile=-/run/flannel/docker #flannel
EnvironmentFile=-/run/flannel/subnet.env #flannel
Environment=GOTRACEBACK=crash
Environment=http_proxy=http://proxy.xxxx
Environment=https_proxy=http://proxy.xxxxx
Environment=DOCKER_HTTP_HOST_COMPAT=1
Environment=PATH=/usr/libexec/docker:/usr/bin:/usr/sbin
ExecStart=/usr/bin/dockerd-current \
--graph=/data/docker \
--add-runtime docker-runc=/usr/libexec/docker/docker-runc-current \
--default-runtime=docker-runc \
--exec-opt native.cgroupdriver=systemd \
--userland-proxy-path=/usr/libexec/docker/docker-proxy-current \
--init-path=/usr/libexec/docker/docker-init-current \
--seccomp-profile=/etc/docker/seccomp.json \
$OPTIONS \
$DOCKER_STORAGE_OPTIONS \
$DOCKER_NETWORK_OPTIONS \
$ADD_REGISTRY \
$BLOCK_REGISTRY \
$INSECURE_REGISTRY \
$REGISTRIES
ExecReload=/bin/kill -s HUP $MAINPID
- 概念
ca-key.pem
ca.pem
kubernetes-key.pem
kubernetes.pem
kube-proxy.pem
kube-proxy-key.pem
admin.pem
admin-key.pem
使用证书的组件如下:
etcd: 使用ca.pem, kubernetes.pem, kubernetes-key.pem
kube-apiserver: 使用ca.pem, kubernetes.pem, kubernetes-key.pem
kubelet: 使用ca.pem
kube-proxy: 使用ca.pem, kubernetes.pem, kubernetes-key.pem
kubectl: 使用ca.pem, admin-key.pem, admin.pem
生成证书的操作,全部在master节点操作。
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
chmod +x cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
chmod +x cfssljson_linux-amd64
mv * /data/kubernetes/bin/
- 创建CA证书
cat > ca-config.json << EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "87600h"
}
}
}
}
EOF
字段说明: ca-config.json可以定义多个profiles,分别指定不同的过期时间和使用场景等。
参数含义:
1. signing表示可用于签名其它证书
2. server auth表示client可以用该CA对server提供的证书进行验证
3. client auth表示server可以用该CA对client提供的证书进行验证
cat > ca-csr.json << EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
字段说明:
1. "CN": Common Name, kube-apiserver 从证书中提取该字段做完请求的用户名(User Name)
2. "O": Organization, kube-apiserver 从证书中提取该字段做完请求用户所属的组(Group)
生成CA证书和私钥
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
ls
ca-config.json ca.csr ca-csr.json ca-key.pem ca.pem
- 创建kubernetes证书
# etcd三台的IP地址要写进去
cat > kubernetes-csr.json << EOF
{
"CN": "kubernetes",
"hosts": [
"127.0.0.1",
"10.125.254.104",
"10.125.254.252",
"10.125.254.120",
"10.254.0.1",
"kubernetes",
"kubernetes.defualt",
"kubernetes.default.svc",
"kubernetes.defualt.svc.cluster",
"kubernetes.defualt.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes
- 创建admin证书和私钥
cat > admin-csr.json << EOF
{
"CN": "admin",
"host": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "system:masters",
"OU": "System"
}
]
}
EOF
字段说明: CN 用户名, O 组名
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
- 创建kube-proxy证书
cat > kube-proxy-csr.json << EOF
{
"CN": "system:kube-proxy",
"host": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
到此证书全部生成完毕
我们可以校验下证书:
openssl x509 -noout -text -in kubernetes.pem
确认有 DNS:kubernetes, DNS:kubernetes.defualt, DNS:kubernetes.default.svc才算成功
拷贝证书到指定目录
cp *.pem /etc/kubernetes/ssl/
cd /etc/kubernetes/ssl/
scp * 所以节点服务器上的/etc/kubernetes/ssl/
更新系统证书库
yum install ca-certificates -y
update-ca-trust
- 下载安装包
wget https://github.com/coreos/etcd/releases/download/v3.3.7/etcd-v3.3.7-linux-amd64.tar.gz
tar xf etcd-v3.3.7-linux-amd64.tar.gz
mv etcd* /usr/bin
mkdir /var/lib/etcd/
- 新增启动文件
vi /usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos
[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/bin/etcd\
--name=etcd01 \ #其它两台机器是etcd02,etcd03
--cert-file=/etc/kubernetes/ssl/kubernetes.pem \
--key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
--peer-cert-file=/etc/kubernetes/ssl/kubernetes.pem \
--peer-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
--trusted-ca-file=/etc/kubernetes/ssl/ca.pem\
--peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
--initial-advertise-peer-urls=https://10.125.254.104:2380 \
--listen-peer-urls=https://10.125.254.104:2380 \
--listen-client-urls=https://10.125.254.104:2379,https://127.0.0.1:2379 \
--advertise-client-urls=https://10.125.254.104:2379 \
--initial-cluster-token=etcd-cluster-0 \
--initial-cluster=etcd01=https://10.125.254.104:2380,etcd02=https://10.125.254.252:2380,etcd03=https://10.125.254.120:2380 \
--initial-cluster-state=new \
--data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
- 启动查询
systemctl daemon-reload
systemctl restart etcd
etcdctl --endpoints=https://127.0.0.1:2379 --ca-file /etc/kubernetes/ssl/ca.pem --cert-file /etc/kubernetes/ssl/kubernetes.pem --key-file /etc/kubernetes/ssl/kubernetes-key.pem member list
curl --cacert /etc/kubernetes/ssl/ca.pem --cert /etc/kubernetes/ssl/kubernetes.pem --key /etc/kubernetes/ssl/kubernetes-key.pem https://127.0.0.1:2379/health
- 备份
ETCDCTL_API=3
etcdctl --endpoints=https://127.0.0.1:2379 --cacert="/etc/kubernetes/ssl/ca.pem" --cert="/etc/kubernetes/ssl/kubernetes.pem" --key="/etc/kubernetes/ssl/kubernetes-key.pem" snapshot save snapshot.db
- 创建kubectl的config
export KUBE_APISERVER="https://10.125.254.104:6443"
kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/ssl/ca.pem --embed-certs=true --server=${KUBE_APISERVER}
kubectl config set-credentials admin --client-certificate=/etc/kubernetes/ssl/admin.pem --embed-certs=true --client-key=/etc/kubernetes/ssl/admin-key.pem
kubectl config set-context kubernetes --cluster=kubernetes --user=admin
kubectl config use-context kubernetes
- 创建kubelet的config
export KUBE_APISERVER="https://10.125.254.104:6443"
BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
cat > token.csv <<EOF
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF
kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/ssl/ca.pem --embed-certs=true --server=${KUBE_APISERVER} --kubeconfig=bootstrap.kubeconfig
kubectl config set-credentials kubelet-bootstrap --token=${BOOTSTRAP_TOKEN} --kubeconfig=bootstrap.kubeconfig
kubectl config set-context default --cluster=kubernetes --user=kubelet-bootstrap --kubeconfig=bootstrap.kubeconfig
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
- 创建kube-proxy的config
export KUBE_APISERVER="https://10.125.254.104:6443"
kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/ssl/ca.pem --embed-certs=true --server=${KUBE_APISERVER} --kubeconfig=kube-proxy.kubeconfig
kubectl config set-credentials kube-proxy --client-certificate=/etc/kubernetes/ssl/kube-proxy.pem --embed-certs=true --client-key=/etc/kubernetes/ssl/kube-proxy-key.pem --kubeconfig=kube-proxy.kubeconfig
kubectl config set-context default --cluster=kubernetes --user=kube-proxy --kubeconfig=kube-proxy.kubeconfig
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
scp -r *.kubeconfig ip1
scp -r *.kubeconfig ip2
## flannel 只要node节点安装就可以了
- 下载flanneld
wget https://github.com/coreos/flannel/releases/download/v0.10.0/flannel-v0.10.0-linux-amd64.tar.gz
mkdir /run/flannel
- etcd里面创建网络
etcdctl --endpoints=https://10.125.254.104:2379,https://10.125.254.252:2379,https://10.125.254.120:2379 --ca-file=/etc/kubernetes/ssl/ca.pem --cert-file=/etc/kubernetes/ssl/kubernetes.pem --key-file=/etc/kubernetes/ssl/kubernetes-key.pem mkdir /kubernetes/network
flannel 分好几种模式,host-gw, vxlan, aws-vpc等模式,我们这里采用vxlan
etcdctl --endpoints=https://etcd01:2379,https://etcd02:2379,https://etcd03:2379 --ca-file=/etc/kubernetes/ssl/ca.pem --cert-file=/etc/kubernetes/ssl/kubernetes.pem mk /kubernetes/network/config '{"Network":"192.168.0.0/16", "Backend": { "Type": "vxlan"}}'
- 创建启动文件
cat /usr/lib/systemd/system/flanneld.service
[Unit]
Description=Flanneld overlay address etcd agent
After=network.target
After=network-online.target
Wants=network-online.target
After=etcd.service
Before=docker.service
[Service]
Type=notify
ExecStart=/data/k8s/bin/flanneld \
-etcd-cafile=/etc/kubernetes/ssl/ca.pem \
-etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem \
-etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem \
-etcd-endpoints=https://10.125.254.104:2379,https://10.125.254.252:2379,https://10.125.254.120:2379 \
-etcd-prefix=/kubernetes/network \
-iface=eth0
ExecStartPost=/data/k8s/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/docker
Restart=on-failure
[Install]
WantedBy=multi-user.target
RequiredBy=docker.service
- 安装kubernetes-cni
wget https://github.com/containernetworking/plugins/releases/download/v0.7.1/cni-plugins-amd64-v0.7.1.tgz
mkdir -p /etc/cni/net.d
mkdir -p /opt/cni/bin
tar xfz cni-plugins-amd64-v0.7.1.tgz -C /opt/cni/bin
cat > /etc/cni/net.d/10-flannel.conf << EOF
{
"name": "cbr0",
"type": "flannel",
"delegate": {
"isDefaultGateway": true,
"forceAddress": true,
"bridge": "docker0",
"mtu": 1500
}
}
EOF
cat /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables=1
net.bridge.bridge-nf-call-iptables=1
sysctl -p /etc/sysctl.d/k8s.conf
- 启动
systemctl daemon-reload
systemctl start flanneld
etcdctl ls /kubernetes/network/subnets
echo > /etc/kubernetes/basic_auth_file << EOF
admin,admin,1002
EOF
cat /usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
[Service]
ExecStart=/data/k8s/bin/kube-apiserver \
--enable-admission-plugins=Initializers,NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \
#--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction \ 已被弃用
--advertise-address=0.0.0.0 \
--bind-address=0.0.0.0 \
--insecure-bind-address=0.0.0.0 \ # 下个版本将被弃用
--kubelet-https=true \
--runtime-config=rbac.authorization.k8s.io/v1beta1 \
--authorization-mode=RBAC,Node \
--enable-bootstrap-token-auth \
--token-auth-file=/etc/kubernetes/ssl/token.csv \
--service-cluster-ip-range=10.254.0.0/16 \
--service-node-port-range=30000-32767 \
--tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \
--tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
--client-ca-file=/etc/kubernetes/ssl/ca.pem \
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/etc/kubernetes/ssl/ca.pem \
--etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem \
--etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem \
--etcd-servers=https://10.125.254.104:2379,https://10.125.254.252:2379,https://10.125.254.120:2379 \
--enable-swagger-ui=true \
--allow-privileged=true \
--anonymous-auth=false \
--basic-auth-file=/etc/kubernetes/basic_auth_file \
--apiserver-count=3 \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/var/lib/audit.log \
--event-ttl=1h \
--v=2 \
--logtostderr=false
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
cat /usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
ExecStart=/data/k8s/bin/kube-controller-manager \
--address=127.0.0.1 \
--master=http://127.0.0.1:8080 \
--allocate-node-cidrs=true \
--service-cluster-ip-range=10.254.0.0/16 \
--cluster-cidr=192.168.0.0/16 \
--cluster-name=kubernetes \
--cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \
--cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
--service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \
--root-ca-file=/etc/kubernetes/ssl/ca.pem \
--leader-elect=true \
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
cat /usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
ExecStart=/data/k8s/bin/kube-scheduler \
--address=127.0.0.1 \
--master=http://127.0.0.1:8080 \
--leader-elect=true \
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
kubectl get clusterrolebinding
mkdir /var/lib/kubelet
cat /usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service
[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/data/k8s/bin/kubelet \
--address=10.125.254.252 \
--hostname-override=10.125.254.252 \
--pod-infra-container-image=gcr.io/google_containers/pause:latest\
--bootstrap-kubeconfig=/etc/kubernetes/ssl/bootstrap.kubeconfig \
--kubeconfig=/etc/kubernetes/ssl/kubelet.kubeconfig \
--cert-dir=/etc/kubernetes/ssl \
--hairpin-mode promiscuous-bridge \
--allow-privileged=true \
--serialize-image-pulls=false \
--logtostderr=true \
--cgroup-driver=systemd \
--cluster-dns=10.254.0.2 \
--cluster-domain=cluster.local \
--network-plugin=cni \
--cni-conf-dir=/etc/cni/net.d/ \
--cni-bin-dir=/opt/cni/bin \
--network-plugin-mtu=1500 \
--logtostderr=false \
--fail-swap-on=false \
--v=2 \
--max-pods=xxx \ #kubelet最多运行多少pod
--runtime-cgroups=/systemd/system.slice \
--kubelet-cgroups=/systemd/system.slice
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
- 启动
node节点执行: systemctl start kubelet
kubelet.kubeconfig 文件在第一次启动的之前并不存在,当通过csr请求后会自动生成kubelet.kubeconfig文件,如果节点已经有~/.kube/config文件,可以将该文件拷贝到指定路径下,并重新命名为kubelet.kubeconfig, 所有的node节点共用一个kubelet.kubeconfig文件.
master节点执行:
kubectl get csr
kubectl certificate approve node-csr-FpuhKQtu4VQKETRgkZTGn96Kkub7SD5XzCUgrLeHWsI
kubectl get node
NAME STATUS ROLES AGE VERSION
10.125.254.120 Ready <none> 14s v1.10.4
10.125.254.252 Ready <none> 10m v1.10.4
kubectl get componentstatuses
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-1 Healthy {"health":"true"}
etcd-0 Healthy {"health":"true"}
etcd-2 Healthy {"health":"true"}
cat /usr/lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/data/k8s/bin/kube-proxy \
--bind-address=10.125.254.252 \
--hostname-override=10.125.254.252 \
--cluster-cidr=10.254.0.0/16 \
--kubeconfig=/etc/kubernetes/ssl/kube-proxy.kubeconfig \
--logtostderr=false \
--log-dir=/var/log/messages \
--v=2
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
clusterCIDR: 必须与 kube-controller-manager 的 --cluster-cidr 选项值一致;kube-proxy 根据 --cluster-cidr 判断集群内部和外部流量,指定 --cluster-cidr 或 --masquerade-all 选项后 kube-proxy 才会对访问 Service IP 的请求做 SNAT;
cd /data/install/kubernetes/server/kubernetes/gci-trusty/dns # 这是k8s自带的
vi coredns.yaml.in
修改下面几行
61 kubernetes cluster.local in-addr.arpa ip6.arpa {
153 clusterIP: 10.254.0.2
kubectl create -f coredns.yaml.in
测试:
$ cat > my-nginx.yaml <<EOF
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: my-nginx
spec:
replicas: 2
template:
metadata:
labels:
run: my-nginx
spec:
containers:
- name: my-nginx
image: nginx:1.7.9
ports:
- containerPort: 80
EOF
kubectl create -f my-nginx.yaml
kubectl expose deploy my-nginx
kubectl get services --all-namespaces |grep my-nginx
cat > pod-nginx.yaml <<EOF
apiVersion: v1
kind: Pod
metadata:
name: nginx
spec:
containers:
- name: nginx
image: nginx:1.7.9
ports:
- containerPort: 80
EOF
kubectl create -f pod-nginx.yaml
kubectl exec nginx -i -t -- /bin/bash
cat /etc/resolv.conf
ping my-nginx
ping kubernetes
cat kubernetes-dashboard.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Configuration to deploy release version of the Dashboard UI compatible with
# Kubernetes 1.8.
#
# Example usage: kubectl create -f <this_file>
# ------------------- Dashboard Secret ------------------- #
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque
---
# ------------------- Dashboard Service Account ------------------- #
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
---
# ------------------- Dashboard Role & Role Binding ------------------- #
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
rules:
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create"]
# Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create"]
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics from heapster.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
# ------------------- Dashboard Deployment ------------------- #
kind: Deployment
apiVersion: apps/v1beta2
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
- --apiserver-host=http://10.125.254.104:8080
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
# ------------------- Dashboard Service ------------------- #
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
type: NodePort
ports:
- nodePort: 30030
port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard
kubectl create -f kubernetes-dashboard.yaml
上面提到,Dashboard 默认只支持 token 认证,所以如果使用 KubeConfig 文件,需要在该文件中指定 token,不支持使用 client 证书认证。
KUBE_APISERVER=https://10.125.254.104:6443
kubectl create sa dashboard-admin -n kube-system
kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
ADMIN_SECRET=$(kubectl get secrets -n kube-system | grep dashboard-admin | awk '{print $1}')
DASHBOARD_LOGIN_TOKEN=$(kubectl describe secret -n kube-system ${ADMIN_SECRET} | grep -E '^token' | awk '{print $2}')
echo ${DASHBOARD_LOGIN_TOKEN}
创建使用 token 的 KubeConfig 文件
# 设置集群参数
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/cert/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=dashboard.kubeconfig
# 设置客户端认证参数,使用上面创建的 Token
kubectl config set-credentials dashboard_user \
--token=${DASHBOARD_LOGIN_TOKEN} \
--kubeconfig=dashboard.kubeconfig
# 设置上下文参数
kubectl config set-context default \
--cluster=kubernetes \
--user=dashboard_user \
--kubeconfig=dashboard.kubeconfig
# 设置默认上下文
kubectl config use-context default --kubeconfig=dashboard.kubeconfig
然后用dashboard.kubeconfig就可以登录了
- Watch subnets: failed to retrieve subnet leases: context canceled
vi /usr/lib/systemd/system/docker.service
EnvironmentFile=/run/flannel/docker
ExecStart = ... \
$REGISTRIES \
$DOCKER_NETWORK_OPTIONS
添加这些就能启动了
把flanneld ,mk-docker-opts.sh 和启动文件同步到其它节点
-
Failed at step CHDIR spawning /data/k8s/bin/kubelet: No such file or directory
WorkingDirectory=/var/lib/kubelet 的目录/var/lib/kubelet不存在导致的 mkdir /var/lib/kubelet systemctl start kubelet
-
Kubelet: failed to create kubelet: misconfiguration: kubelet cgroup driver: "systemd" is different
原因是docker用的cgroup, kubelet用的sytemd,导致kubelet启动报错, docker info 可以查看docker的信息 卸载docker ,重新安装docker yum remove docker -y yum install docker docker-devel -y