# 分发命令到所有节点 ansible all -m command -a "mkdir -p /export/servers" ansible all -m command -a "mkdir -p /export/logs" ansible all -m command -a "mkdir -p /export/data" ansible all -m command -a "mkdir -p /export/upload"
取消SELINUX设定,关闭防火墙和关闭交换分区
deploy节点执行
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# 取消SELINUX设定 ansible k8s -m command -a "setenforce 0" ansible k8s -m command -a "sed --follow-symlinks -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config" # 关闭防火墙 ansible k8s -m command -a "firewall-cmd --set-default-zone=trusted" ansible k8s -m command -a "firewall-cmd --complete-reload" # 临时关闭交换分区 ansible k8s -m command -a "swapoff -a" # 设置系统加载时关闭交换分区 ansible k8s -m shell -a "yes | cp /etc/fstab /etc/fstab_bak" ansible k8s -m shell -a "cat /etc/fstab_bak | grep -v swap > /etc/fstab" ansible k8s -m shell -a "echo vm.swappiness = 0 >> /etc/sysctl.d/k8s.conf" ansible k8s -m shell -a "sysctl -p /etc/sysctl.d/k8s.conf"
如果对sysctl加载顺序感兴趣,可以执行sysctl --system查看
ulimit优化
ulimit用来限制每个用户可以使用的资源,如CPU、内存、句柄等。
/etc/security/limits.conf设置以下内容:
1 2 3 4 5 6
* soft memlock unlimited * hard memlock unlimited * soft nproc 102400 * hard nproc 102400 * soft nofile 1048576 * hard nofile 1048576
ansible命令:
1 2 3 4 5 6 7 8 9
ansible all -m shell -a "cat >> /etc/security/limits.conf << EOF * soft memlock unlimited * hard memlock unlimited * soft nproc 102400 * hard nproc 102400 * soft nofile 1048576 * hard nofile 1048576 EOF"
# 2.注释下面内容 # https related config #https: # https port for harbor, default is 443 #port: 443 # The path of cert and key files for nginx #certificate: /your/certificate/path #private_key: /your/private/key/path
# 3.配置管理员密码 harbor_admin_password:harbor950101 database: # The password for the root user of Harbor DB. Change this before any production use. password:admin
cd /export/servers/harbor # harbor-offline-installer-v2.4.3.tgz解压后可以看到harbor.v2.4.3.tar.gz,用docker导入 docker load -i harbor.v2.4.3.tar.gz
# 查看镜像 docker images
REPOSITORY TAG IMAGE ID CREATED SIZE goharbor/harbor-exporter v2.4.3 776ac6ee91f4 4 weeks ago 81.5MB goharbor/chartmuseum-photon v2.4.3 f39a9694988d 4 weeks ago 172MB goharbor/redis-photon v2.4.3 b168e9750dc8 4 weeks ago 154MB goharbor/trivy-adapter-photon v2.4.3 a406a715461c 4 weeks ago 251MB goharbor/notary-server-photon v2.4.3 da89404c7cf9 4 weeks ago 109MB goharbor/notary-signer-photon v2.4.3 38468ac13836 4 weeks ago 107MB goharbor/harbor-registryctl v2.4.3 61243a84642b 4 weeks ago 135MB goharbor/registry-photon v2.4.3 9855479dd6fa 4 weeks ago 77.9MB goharbor/nginx-photon v2.4.3 0165c71ef734 4 weeks ago 44.4MB goharbor/harbor-log v2.4.3 57ceb170dac4 4 weeks ago 161MB goharbor/harbor-jobservice v2.4.3 7fea87c4b884 4 weeks ago 219MB goharbor/harbor-core v2.4.3 d864774a3b8f 4 weeks ago 197MB goharbor/harbor-portal v2.4.3 85f00db66862 4 weeks ago 53.4MB goharbor/harbor-db v2.4.3 7693d44a2ad6 4 weeks ago 225MB goharbor/prepare v2.4.3 c882d74725ee 4 weeks ago 268MB
如果没有按照编辑harbor.yml章节注释,会遇到下面报错。
1 2 3
prepare base dir is set to /export/servers/harbor Error happened in config validation... ERROR:root:Error: The protocol is https but attribute ssl_cert is not set
# 镜像打harbor镜像库tag docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.8.4 192.168.2.99:8090/community/coredns:v1.8.4 docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.0-0 192.168.2.99:8090/community/etcd:3.5.0-0 docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.22.4 192.168.2.99:8090/community/kube-apiserver:v1.22.4 docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.22.4 192.168.2.99:8090/community/kube-controller-manager:v1.22.4 docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.22.4 192.168.2.99:8090/community/kube-proxy:v1.22.4 docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.22.4 192.168.2.99:8090/community/kube-scheduler:v1.22.4 docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.5 192.168.2.99:8090/community/pause:3.5 docker tag rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0 192.168.2.99:8090/community/mirrored-flannelcni-flannel-cni-plugin:v1.1.0 docker tag rancher/mirrored-flannelcni-flannel:v0.19.1 192.168.2.99:8090/community/mirrored-flannelcni-flannel:v0.19.1
[root@tsukasa docker-images]# docker push 192.168.2.99:8090/community/mirrored-flannelcni-flannel:v0.19.1 The push refers to repository [192.168.2.99:8090/community/mirrored-flannelcni-flannel] Get "https://192.168.2.99:8090/v2/": http: server gave HTTP response to HTTPS client
2、docker push推送过程中提示project community not found: project community not found
1 2 3 4 5
[root@tsukasa harbor]# docker push 192.168.2.99:8090/community/coredns:v1.8.4 The push refers to repository [192.168.2.99:8090/community/coredns] f72781b18181: Preparing 225df95e717c: Preparing unauthorized: project community not found: project community not found
解决办法是在harbor管理台创建项目
3、docker push进行推送时出现unauthorized: unauthorized to access repository
1 2 3 4 5 6
[root@tsukasa harbor]# docker push 192.168.2.99:8090/community/coredns:v1.8.4 The push refers to repository [192.168.2.99:8090/community/coredns] f72781b18181: Preparing 225df95e717c: Preparing unauthorized: unauthorized to access repository: community/coredns, action: push: unauthorized to access repository: community/coredns, action: push
[root@tsukasa harbor]# docker login 192.168.2.99:8090 Username: admin Password: WARNING! Your password will be stored unencrypted in /root/.docker/config.json. Configure a credential helper to remove this warning. See https://docs.docker.com/engine/reference/commandline/login/#credentials-store
[root@k8s-master ~]# kubeadm init \ > --control-plane-endpoint "192.168.2.100:6443" \ > --image-repository 192.168.2.99:8090/community \ > --kubernetes-version v1.22.4 \ > --service-cidr=172.16.0.0/16 \ > --pod-network-cidr=10.244.0.0/16 \ > --token "abcdef.0123456789abcdef" \ > --token-ttl "0" \ > --upload-certs [init] Using Kubernetes version: v1.22.4 [preflight] Running pre-flight checks [WARNING Firewalld]: firewalld is active, please ensure ports [6443 10250] are open or your cluster may not function correctly [WARNING Hostname]: hostname "k8s-master" could not be reached [WARNING Hostname]: hostname "k8s-master": lookup k8s-master on 223.5.5.5:53: no such host [preflight] Pulling images required for setting up a Kubernetes cluster [preflight] This might take a minute or two, depending on the speed of your internet connection [preflight] You can also perform this action in beforehand using 'kubeadm config images pull' [certs] Using certificateDir folder "/etc/kubernetes/pki" [certs] Generating "ca" certificate and key [certs] Generating "apiserver" certificate and key [certs] apiserver serving cert is signed for DNS names [k8s-master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [172.16.0.1 192.168.2.100] [certs] Generating "apiserver-kubelet-client" certificate and key [certs] Generating "front-proxy-ca" certificate and key [certs] Generating "front-proxy-client" certificate and key [certs] Generating "etcd/ca" certificate and key [certs] Generating "etcd/server" certificate and key [certs] etcd/server serving cert is signed for DNS names [k8s-master localhost] and IPs [192.168.2.100 127.0.0.1 ::1] [certs] Generating "etcd/peer" certificate and key [certs] etcd/peer serving cert is signed for DNS names [k8s-master localhost] and IPs [192.168.2.100 127.0.0.1 ::1] [certs] Generating "etcd/healthcheck-client" certificate and key [certs] Generating "apiserver-etcd-client" certificate and key [certs] Generating "sa" key and public key [kubeconfig] Using kubeconfig folder "/etc/kubernetes" [kubeconfig] Writing "admin.conf" kubeconfig file [kubeconfig] Writing "kubelet.conf" kubeconfig file [kubeconfig] Writing "controller-manager.conf" kubeconfig file [kubeconfig] Writing "scheduler.conf" kubeconfig file [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" [kubelet-start] Starting the kubelet [control-plane] Using manifest folder "/etc/kubernetes/manifests" [control-plane] Creating static Pod manifest for "kube-apiserver" [control-plane] Creating static Pod manifest for "kube-controller-manager" [control-plane] Creating static Pod manifest for "kube-scheduler" [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests" [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s [apiclient] All control plane components are healthy after 7.005575 seconds [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace [kubelet] Creating a ConfigMap "kubelet-config-1.22" in namespace kube-system with the configuration for the kubelets in the cluster [upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace [upload-certs] Using certificate key: 7ed3ef5d6b04a668e7bed67e419fd9f69ce482047350e5b391357005064db994 [mark-control-plane] Marking the node k8s-master as control-plane by adding the labels: [node-role.kubernetes.io/master(deprecated) node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers] [mark-control-plane] Marking the node k8s-master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule] [bootstrap-token] Using token: abcdef.0123456789abcdef [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials [bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token [bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key [addons] Applied essential addon: CoreDNS [addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster. Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of the control-plane node running the following command on each as root:
Please note that the certificate-key gives access to cluster sensitive data, keep it secret! As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use "kubeadm init phase upload-certs --upload-certs" to reload certs afterward.
Then you can join any number of worker nodes by running the following on each as root:
kubectl get all #查看所有启动的配置,包括容器,服务,监视等等等等 --all-namespaces 参数 可以看到所有命名空间的 -o wide 可以看到更多的信息,如容器启动在哪台机器上 kubectl get pods -o wide #查看pod kubectl get pod name --output json #以JSON格式输出POD信息: kubectl get pod name --output yaml #以yaml格式输出POD信息 kubectl get svc #查询所有实例的对外端口 kubectl describe pod xxx-mariadb-0 #查看pod的详情 kubectl get nodes #查看node节点 kubectl get rc,namespace #查看rc ,namespace kubectl exec -ti xxx-jw64d sh #登陆到某个模块实例查日志或配置 kubectl cluster-info #查看集群状态 kubectl get deployments #查询Deployment当前状态 NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE nginx-deployment 3 3 3 3 2h 其中DESIRED为期望的Pod数量,CURRENT为当前的数量,UP-TO-DATE为已更新的数量,AVAILABLE为已运行的数量 kubectl get rs #Replica Set(RS)是k8s新一代的Pod controller kubectl get cs #检查组件状态是否都正常 component status (CS) kubectl get pods --show-labels kubectl describe deployments kubectl get pvc #获取 StatefulSet 的 PersistentVolumeClaims kubectl exec nginx -- printenv | grep SERVICE #检查正在运行的 Nginx Pod 的环境变量 kubectl get services kube-dns --namespace=kube-system #检查 DNS 插件 Service kubectl get pods -o yaml | grep -i podip kubectl exec xxx-2xd57 date#对pod中的容器执行命令 kubectl delete pod name #删除某个POD kubectl delete deployment kubernetes-dashboard --namespace=kubernetes-dashboard #删除 kubectl delete secrets xxx-secrets #或者在页面删除