昆山市做网站,建站宝盒手机版下载,英才网,购物网站服务中心一、系统情况
虚拟机版本#xff1a;esxi 6.7
系统版本#xff1a;centos7.9_2009_x86
配置#xff1a;4核8G#xff08;官网最低要求2核2G#xff09;
192.168.0.137 master节点
192.168.0.139 node2节点
192.168.0.138 node1节点#xff08;节点扩容练习#xf… 一、系统情况
虚拟机版本esxi 6.7
系统版本centos7.9_2009_x86
配置4核8G官网最低要求2核2G
192.168.0.137 master节点
192.168.0.139 node2节点
192.168.0.138 node1节点节点扩容练习
二、环境配置
2.1、所有节点修改防火墙,本次是实验环境图省事选择关闭防火墙如果是生产除非做了公网和内网隔离还是别关闭吧做好相关接口开发就行。
systemctl stop firewalld #停止防火墙
systemctl disable firewalld #设置开机不启动
2.2、所有节点禁用selinux
#修改/etc/selinux/config文件中的SELINUXpermissive
vi /etc/selinux/config
或
# 将 SELinux 设置为 permissive 模式相当于将其禁用
sudo setenforce 0
sudo sed -i s/^SELINUXenforcing$/SELINUXpermissive/ /etc/selinux/config
2.3、所有节点关闭swap分区
#永久禁用swap,删除或注释掉/etc/fstab里的swap设备的挂载命令即可
nano /etc/fstab
#/dev/mapper/centos-swap swap swap defaults 0 0
2.4、修改时区时间
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
date
2.5、所有节点配置hosts
192.168.0.137 master
192.168.0.139 node2
192.168.0.138 node1
2.6、开启bridge-nf-call-iptalbes
cat EOF | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOFsudo modprobe overlay
sudo modprobe br_netfilter# 设置所需的 sysctl 参数参数在重新启动后保持不变
cat EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables 1
net.bridge.bridge-nf-call-ip6tables 1
net.ipv4.ip_forward 1
EOF# 应用 sysctl 参数而不重新启动
sudo sysctl --system通过运行以下指令确认 br_netfilter 和 overlay 模块被加载lsmod | grep br_netfilter
lsmod | grep overlay
通过运行以下指令确认 net.bridge.bridge-nf-call-iptables、net.bridge.bridge-nf-call-ip6tables 和 net.ipv4.ip_forward 系统变量在你的 sysctl 配置中被设置为 1
sysctl net.bridge.bridge-nf-call-iptables net.bridge.bridge-nf-call-ip6tables net.ipv4.ip_forward
三、所有节点安装containerd
3.1、安装containerd
yum install -y yum-utils
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum -y install containerd.io
3.2、生成config.toml配置
containerd config default /etc/containerd/config.toml
3.3、配置 systemd cgroup 驱动 在 /etc/containerd/config.toml 中设置
sed -i s/SystemdCgroup false/SystemdCgroup true/g /etc/containerd/config.toml[plugins.io.containerd.grpc.v1.cri.containerd.runtimes.runc]...[plugins.io.containerd.grpc.v1.cri.containerd.runtimes.runc.options]SystemdCgroup true
将sandbox_image下载地址改为阿里云地址
[plugins.io.containerd.grpc.v1.cri]...sandbox_image registry.aliyuncs.com/google_containers/pause:3.9
3.4、启动containerd 并设置开机自启动
systemctl restart containerd systemctl enable containerd
四、k8s配置阿里云yum源
cat EOF /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name Kubernetes
baseurl https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled 1
gpgcheck 0
repo_gpgcheck 0
gpgkey https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
五、yum安装kubeadm、kubelet、kubectl
5.2、安装kubeadm、kubelet、kubectl
这些说明适用于 Kubernetes 1.28阿里的yum源kubelet版本只更新到1.28.0版本所以下面命令需要加上版本号
yum install -y kubelet-1.28.0 kubeadm-1.28.0 kubectl-1.28.0 --disableexcludeskubernetes
systemctl enable kubelet
六、初始化master节点
kubeadm init \
--apiserver-advertise-address192.168.0.137 \
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version v1.28.0 \
--service-cidr10.96.0.0/12 \
--pod-network-cidr10.244.0.0/16
得到以下内容,表示安装成功
Your Kubernetes control-plane has initialized successfully!To start using your cluster, you need to run the following as a regular user:mkdir -p $HOME/.kubesudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/configsudo chown $(id -u):$(id -g) $HOME/.kube/configAlternatively, if you are the root user, you can run:export KUBECONFIG/etc/kubernetes/admin.confYou should now deploy a pod network to the cluster.
Run kubectl apply -f [podnetwork].yaml with one of the options listed at:https://kubernetes.io/docs/concepts/cluster-administration/addons/Then you can join any number of worker nodes by running the following on each as root:kubeadm join 192.168.0.137:6443 --token 2piab7.b39dqm9kpadxynkm \--discovery-token-ca-cert-hash sha256:c0bc36fedc05d4613ad03c1d6b8639dedb3fd3136d6a6be400e179410e0a0bff
然后按照上面提示一步步执行命令
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
export KUBECONFIG/etc/kubernetes/admin.conf
现在可以看到master节点了
kubectl get node
七、子节点加入master节点
kubeadm join 192.168.0.137:6443 --token 2piab7.b39dqm9kpadxynkm \--discovery-token-ca-cert-hash sha256:c0bc36fedc05d4613ad03c1d6b8639dedb3fd3136d6a6be400e179410e0a0bff
如果遇到的情况是命令卡住不动大概率是token过期了回到master节点执行
kubeadm token create
创建新的token替换后重新执行就行 现在可以看到master节点和子节点了
kubectl get node
八、部署CNI网络插件
8.1、下载cni插件
wget https://github.com/containernetworking/plugins/releases/download/v1.3.0/cni-plugins-linux-amd64-v1.3.0.tgz
mkdir -pv /opt/cni/bin
tar zxvf cni-plugins-linux-amd64-v1.3.0.tgz -C /opt/cni/bin/
8.2、master安装flannel
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml# 有些网络限制可能不能获取得到这个配置
---
kind: Namespace
apiVersion: v1
metadata:name: kube-flannellabels:k8s-app: flannelpod-security.kubernetes.io/enforce: privileged
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:labels:k8s-app: flannelname: flannel
rules:
- apiGroups:- resources:- podsverbs:- get
- apiGroups:- resources:- nodesverbs:- get- list- watch
- apiGroups:- resources:- nodes/statusverbs:- patch
- apiGroups:- networking.k8s.ioresources:- clustercidrsverbs:- list- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:labels:k8s-app: flannelname: flannel
roleRef:apiGroup: rbac.authorization.k8s.iokind: ClusterRolename: flannel
subjects:
- kind: ServiceAccountname: flannelnamespace: kube-flannel
---
apiVersion: v1
kind: ServiceAccount
metadata:labels:k8s-app: flannelname: flannelnamespace: kube-flannel
---
kind: ConfigMap
apiVersion: v1
metadata:name: kube-flannel-cfgnamespace: kube-flannellabels:tier: nodek8s-app: flannelapp: flannel
data:cni-conf.json: |{name: cbr0,cniVersion: 0.3.1,plugins: [{type: flannel,delegate: {hairpinMode: true,isDefaultGateway: true}},{type: portmap,capabilities: {portMappings: true}}]}net-conf.json: |{Network: 10.244.0.0/16,Backend: {Type: vxlan}}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:name: kube-flannel-dsnamespace: kube-flannellabels:tier: nodeapp: flannelk8s-app: flannel
spec:selector:matchLabels:app: flanneltemplate:metadata:labels:tier: nodeapp: flannelspec:affinity:nodeAffinity:requiredDuringSchedulingIgnoredDuringExecution:nodeSelectorTerms:- matchExpressions:- key: kubernetes.io/osoperator: Invalues:- linuxhostNetwork: truepriorityClassName: system-node-criticaltolerations:- operator: Existseffect: NoScheduleserviceAccountName: flannelinitContainers:- name: install-cni-pluginimage: docker.io/flannel/flannel-cni-plugin:v1.2.0command:- cpargs:- -f- /flannel- /opt/cni/bin/flannelvolumeMounts:- name: cni-pluginmountPath: /opt/cni/bin- name: install-cniimage: docker.io/flannel/flannel:v0.24.0command:- cpargs:- -f- /etc/kube-flannel/cni-conf.json- /etc/cni/net.d/10-flannel.conflistvolumeMounts:- name: cnimountPath: /etc/cni/net.d- name: flannel-cfgmountPath: /etc/kube-flannel/containers:- name: kube-flannelimage: docker.io/flannel/flannel:v0.24.0command:- /opt/bin/flanneldargs:- --ip-masq- --kube-subnet-mgrresources:requests:cpu: 100mmemory: 50MisecurityContext:privileged: falsecapabilities:add: [NET_ADMIN, NET_RAW]env:- name: POD_NAMEvalueFrom:fieldRef:fieldPath: metadata.name- name: POD_NAMESPACEvalueFrom:fieldRef:fieldPath: metadata.namespace- name: EVENT_QUEUE_DEPTHvalue: 5000volumeMounts:- name: runmountPath: /run/flannel- name: flannel-cfgmountPath: /etc/kube-flannel/- name: xtables-lockmountPath: /run/xtables.lockvolumes:- name: runhostPath:path: /run/flannel- name: cni-pluginhostPath:path: /opt/cni/bin- name: cnihostPath:path: /etc/cni/net.d- name: flannel-cfgconfigMap:name: kube-flannel-cfg- name: xtables-lockhostPath:path: /run/xtables.locktype: FileOrCreate
8.3、查看节点
kubectl get node
[rootmaster containerd]# kubectl get node
NAME STATUS ROLES AGE VERSION
master Ready control-plane 115m v1.28.0
worker2 Ready none 112m v1.28.0
都已经成为ready了在master服务器执行
查看所有pods状态
kubectl get pods -A
[rootmaster containerd]# kubectl get pods -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-flannel kube-flannel-ds-knclw 1/1 Running 0 99m
kube-flannel kube-flannel-ds-psnhd 1/1 Running 0 99m
kube-system coredns-66f779496c-65t9r 1/1 Running 0 116m
kube-system coredns-66f779496c-sfzz6 1/1 Running 0 116m
kube-system etcd-master 1/1 Running 1 116m
kube-system kube-apiserver-master 1/1 Running 1 117m
kube-system kube-controller-manager-master 1/1 Running 1 117m
kube-system kube-proxy-sfrr8 1/1 Running 0 113m
kube-system kube-proxy-vwn6z 1/1 Running 0 116m
kube-system kube-scheduler-master 1/1 Running 1 116m
testing-sc server-dashboard-7cfc5c6cb6-jrs9d 1/1 Running 0 25m
[rootmaster containerd]#
九、dashboard
个人还是推荐kuboard(https://kuboard.cn/)
十、部署过程异常处理
crictl ps
报错
[rootworker2 containerd]# crictl ps
WARN[0000] runtime connect using default endpoints: [unix:///var/run/dockershim.sock unix:///run/containerd/containerd.sock unix:///run/crio/crio.sock unix:///var/run/cri-dockerd.sock]. As the default settings are now deprecated, you should set the endpoint instead.
WARN[0000] image connect using default endpoints: [unix:///var/run/dockershim.sock unix:///run/containerd/containerd.sock unix:///run/crio/crio.sock unix:///var/run/cri-dockerd.sock]. As the default settings are now deprecated, you should set the endpoint instead.
E0105 11:02:34.298539 32345 remote_runtime.go:390] ListContainers with filter from runtime service failed errrpc error: code Unavailable desc connection error: desc \transport: Error while dialing dial unix /var/run/dockershim.sock: connect: no such file or directory\ filterContainerFilter{Id:,State:ContainerStateValue{State:CONTAINER_RUNNING,},PodSandboxId:,LabelSelector:map[string]string{},}
FATA[0000] listing containers: rpc error: code Unavailable desc connection error: desc transport: Error while dialing dial unix /var/run/dockershim.sock: connect: no such file or directory
原因
crictl依次查找容器运行时当查找第一个 unix:///var/run/dockershim.sock 没有找到所以报错了需要你手动指定当前kubernetes的容器运行时使用什么例如kubernetes 1.24 之后dockershim已经变成了cri-docker所以你需要执行crictl config runtime-endpoint unix:///var/run/cri-dockerd.sock
如果你的容器运行时已经换成了containerd则换成containerd的如crictl config runtime-endpoint unix:///var/run/containerd/containerd.sock
之后你在执行就好了。另外生成的配置在cat /etc/crictl.yaml可以随时修改。
配置私有镜像仓库
/etc/containerd/config.toml文件中找到 [plugins.io.containerd.grpc.v1.cri.registry]这行配置
[plugins.io.containerd.grpc.v1.cri.registry][plugins.io.containerd.grpc.v1.cri.registry.mirrors][plugins.io.containerd.grpc.v1.cri.registry.mirrors.your.harbor.registry]endpoint [https://your.harbor.registry] //此处是https就用https是http就用http不知道是否还需要关闭https安全认证
十一、containerd和docker操作差异
操作DockerContainerd (ctr)Crictl (K8s)查看运行的容器docker psctr task lscrictl ps查看镜像docker imagesctr image lscrictl images查看容器日志docker logs无crictl logs查看容器数据信息docker inspectctr container infocrictl inspect查看容器资源docker stats无crictl stats启动/关闭已有的容器docker start/stopctr task start/killcrictl start/stop运行一个新的容器docker runctr run无修改镜像标签docker tagctr image tag无创建一个新的容器docker createctr container createcrictl create导入镜像docker loadctr image import无导出镜像docker savectr image export无删除容器docker rmctr container rmcrictl rm删除镜像docker rmictr image rmcrictl rmi拉取镜像docker pullctr image pullcrictl pull推送镜像docker pushctr image push无在容器内部执行命令docker exec无crictl exec
十二、部署ingress-nginx-controller
apiVersion: v1
kind: Namespace
metadata:labels:app.kubernetes.io/instance: ingress-nginxapp.kubernetes.io/name: ingress-nginxname: ingress-nginx
---
apiVersion: v1
automountServiceAccountToken: true
kind: ServiceAccount
metadata:labels:app.kubernetes.io/component: controllerapp.kubernetes.io/instance: ingress-nginxapp.kubernetes.io/name: ingress-nginxapp.kubernetes.io/part-of: ingress-nginxapp.kubernetes.io/version: 1.9.0name: ingress-nginxnamespace: ingress-nginx
---
apiVersion: v1
kind: ServiceAccount
metadata:labels:app.kubernetes.io/component: admission-webhookapp.kubernetes.io/instance: ingress-nginxapp.kubernetes.io/name: ingress-nginxapp.kubernetes.io/part-of: ingress-nginxapp.kubernetes.io/version: 1.9.0name: ingress-nginx-admissionnamespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:labels:app.kubernetes.io/component: controllerapp.kubernetes.io/instance: ingress-nginxapp.kubernetes.io/name: ingress-nginxapp.kubernetes.io/part-of: ingress-nginxapp.kubernetes.io/version: 1.9.0name: ingress-nginxnamespace: ingress-nginx
rules:
- apiGroups:- resources:- namespacesverbs:- get
- apiGroups:- resources:- configmaps- pods- secrets- endpointsverbs:- get- list- watch
- apiGroups:- resources:- servicesverbs:- get- list- watch
- apiGroups:- networking.k8s.ioresources:- ingressesverbs:- get- list- watch
- apiGroups:- networking.k8s.ioresources:- ingresses/statusverbs:- update
- apiGroups:- networking.k8s.ioresources:- ingressclassesverbs:- get- list- watch
- apiGroups:- coordination.k8s.ioresourceNames:- ingress-nginx-leaderresources:- leasesverbs:- get- update
- apiGroups:- coordination.k8s.ioresources:- leasesverbs:- create
- apiGroups:- resources:- eventsverbs:- create- patch
- apiGroups:- discovery.k8s.ioresources:- endpointslicesverbs:- list- watch- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:labels:app.kubernetes.io/component: admission-webhookapp.kubernetes.io/instance: ingress-nginxapp.kubernetes.io/name: ingress-nginxapp.kubernetes.io/part-of: ingress-nginxapp.kubernetes.io/version: 1.9.0name: ingress-nginx-admissionnamespace: ingress-nginx
rules:
- apiGroups:- resources:- secretsverbs:- get- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:labels:app.kubernetes.io/instance: ingress-nginxapp.kubernetes.io/name: ingress-nginxapp.kubernetes.io/part-of: ingress-nginxapp.kubernetes.io/version: 1.9.0name: ingress-nginx
rules:
- apiGroups:- resources:- configmaps- endpoints- nodes- pods- secrets- namespacesverbs:- list- watch
- apiGroups:- coordination.k8s.ioresources:- leasesverbs:- list- watch
- apiGroups:- resources:- nodesverbs:- get
- apiGroups:- resources:- servicesverbs:- get- list- watch
- apiGroups:- networking.k8s.ioresources:- ingressesverbs:- get- list- watch
- apiGroups:- resources:- eventsverbs:- create- patch
- apiGroups:- networking.k8s.ioresources:- ingresses/statusverbs:- update
- apiGroups:- networking.k8s.ioresources:- ingressclassesverbs:- get- list- watch
- apiGroups:- discovery.k8s.ioresources:- endpointslicesverbs:- list- watch- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:labels:app.kubernetes.io/component: admission-webhookapp.kubernetes.io/instance: ingress-nginxapp.kubernetes.io/name: ingress-nginxapp.kubernetes.io/part-of: ingress-nginxapp.kubernetes.io/version: 1.9.0name: ingress-nginx-admission
rules:
- apiGroups:- admissionregistration.k8s.ioresources:- validatingwebhookconfigurationsverbs:- get- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:labels:app.kubernetes.io/component: controllerapp.kubernetes.io/instance: ingress-nginxapp.kubernetes.io/name: ingress-nginxapp.kubernetes.io/part-of: ingress-nginxapp.kubernetes.io/version: 1.9.0name: ingress-nginxnamespace: ingress-nginx
roleRef:apiGroup: rbac.authorization.k8s.iokind: Rolename: ingress-nginx
subjects:
- kind: ServiceAccountname: ingress-nginxnamespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:labels:app.kubernetes.io/component: admission-webhookapp.kubernetes.io/instance: ingress-nginxapp.kubernetes.io/name: ingress-nginxapp.kubernetes.io/part-of: ingress-nginxapp.kubernetes.io/version: 1.9.0name: ingress-nginx-admissionnamespace: ingress-nginx
roleRef:apiGroup: rbac.authorization.k8s.iokind: Rolename: ingress-nginx-admission
subjects:
- kind: ServiceAccountname: ingress-nginx-admissionnamespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:labels:app.kubernetes.io/instance: ingress-nginxapp.kubernetes.io/name: ingress-nginxapp.kubernetes.io/part-of: ingress-nginxapp.kubernetes.io/version: 1.9.0name: ingress-nginx
roleRef:apiGroup: rbac.authorization.k8s.iokind: ClusterRolename: ingress-nginx
subjects:
- kind: ServiceAccountname: ingress-nginxnamespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:labels:app.kubernetes.io/component: admission-webhookapp.kubernetes.io/instance: ingress-nginxapp.kubernetes.io/name: ingress-nginxapp.kubernetes.io/part-of: ingress-nginxapp.kubernetes.io/version: 1.9.0name: ingress-nginx-admission
roleRef:apiGroup: rbac.authorization.k8s.iokind: ClusterRolename: ingress-nginx-admission
subjects:
- kind: ServiceAccountname: ingress-nginx-admissionnamespace: ingress-nginx
---
apiVersion: v1
data:allow-snippet-annotations: false
kind: ConfigMap
metadata:labels:app.kubernetes.io/component: controllerapp.kubernetes.io/instance: ingress-nginxapp.kubernetes.io/name: ingress-nginxapp.kubernetes.io/part-of: ingress-nginxapp.kubernetes.io/version: 1.9.0name: ingress-nginx-controllernamespace: ingress-nginx
---
apiVersion: v1
kind: Service
metadata:labels:app.kubernetes.io/component: controllerapp.kubernetes.io/instance: ingress-nginxapp.kubernetes.io/name: ingress-nginxapp.kubernetes.io/part-of: ingress-nginxapp.kubernetes.io/version: 1.9.0name: ingress-nginx-controllernamespace: ingress-nginx
spec:ipFamilies:- IPv4ipFamilyPolicy: SingleStackports:- appProtocol: httpname: httpport: 80protocol: TCPtargetPort: http- appProtocol: httpsname: httpsport: 443protocol: TCPtargetPort: httpsselector:app.kubernetes.io/component: controllerapp.kubernetes.io/instance: ingress-nginxapp.kubernetes.io/name: ingress-nginxtype: NodePort
---
apiVersion: v1
kind: Service
metadata:labels:app.kubernetes.io/component: controllerapp.kubernetes.io/instance: ingress-nginxapp.kubernetes.io/name: ingress-nginxapp.kubernetes.io/part-of: ingress-nginxapp.kubernetes.io/version: 1.9.0name: ingress-nginx-controller-admissionnamespace: ingress-nginx
spec:ports:- appProtocol: httpsname: https-webhookport: 443targetPort: webhookselector:app.kubernetes.io/component: controllerapp.kubernetes.io/instance: ingress-nginxapp.kubernetes.io/name: ingress-nginxtype: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:labels:app.kubernetes.io/component: controllerapp.kubernetes.io/instance: ingress-nginxapp.kubernetes.io/name: ingress-nginxapp.kubernetes.io/part-of: ingress-nginxapp.kubernetes.io/version: 1.9.0name: ingress-nginx-controllernamespace: ingress-nginx
spec:minReadySeconds: 0revisionHistoryLimit: 10selector:matchLabels:app.kubernetes.io/component: controllerapp.kubernetes.io/instance: ingress-nginxapp.kubernetes.io/name: ingress-nginxstrategy:rollingUpdate:maxUnavailable: 1type: RollingUpdatetemplate:metadata:labels:app.kubernetes.io/component: controllerapp.kubernetes.io/instance: ingress-nginxapp.kubernetes.io/name: ingress-nginxapp.kubernetes.io/part-of: ingress-nginxapp.kubernetes.io/version: 1.9.0spec:hostNetwork: truecontainers:- args:- /nginx-ingress-controller- --election-idingress-nginx-leader- --controller-classk8s.io/ingress-nginx- --ingress-classnginx- --configmap$(POD_NAMESPACE)/ingress-nginx-controller- --validating-webhook:8443- --validating-webhook-certificate/usr/local/certificates/cert- --validating-webhook-key/usr/local/certificates/keyenv:- name: POD_NAMEvalueFrom:fieldRef:fieldPath: metadata.name- name: POD_NAMESPACEvalueFrom:fieldRef:fieldPath: metadata.namespace- name: LD_PRELOADvalue: /usr/local/lib/libmimalloc.soimage: giantswarm/ingress-nginx-controller:v1.9.0imagePullPolicy: IfNotPresentlifecycle:preStop:exec:command:- /wait-shutdownlivenessProbe:failureThreshold: 5httpGet:path: /healthzport: 10254scheme: HTTPinitialDelaySeconds: 10periodSeconds: 10successThreshold: 1timeoutSeconds: 1name: controllerports:- containerPort: 80name: httpprotocol: TCP- containerPort: 443name: httpsprotocol: TCP- containerPort: 8443name: webhookprotocol: TCPreadinessProbe:failureThreshold: 3httpGet:path: /healthzport: 10254scheme: HTTPinitialDelaySeconds: 10periodSeconds: 10successThreshold: 1timeoutSeconds: 1resources:requests:cpu: 100mmemory: 90MisecurityContext:allowPrivilegeEscalation: truecapabilities:add:- NET_BIND_SERVICEdrop:- ALLrunAsUser: 101volumeMounts:- mountPath: /usr/local/certificates/name: webhook-certreadOnly: truednsPolicy: ClusterFirstnodeSelector:kubernetes.io/os: linuxserviceAccountName: ingress-nginxterminationGracePeriodSeconds: 300volumes:- name: webhook-certsecret:secretName: ingress-nginx-admission
---
apiVersion: batch/v1
kind: Job
metadata:labels:app.kubernetes.io/component: admission-webhookapp.kubernetes.io/instance: ingress-nginxapp.kubernetes.io/name: ingress-nginxapp.kubernetes.io/part-of: ingress-nginxapp.kubernetes.io/version: 1.9.0name: ingress-nginx-admission-createnamespace: ingress-nginx
spec:template:metadata:labels:app.kubernetes.io/component: admission-webhookapp.kubernetes.io/instance: ingress-nginxapp.kubernetes.io/name: ingress-nginxapp.kubernetes.io/part-of: ingress-nginxapp.kubernetes.io/version: 1.9.0name: ingress-nginx-admission-createspec:containers:- args:- create- --hostingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc- --namespace$(POD_NAMESPACE)- --secret-nameingress-nginx-admissionenv:- name: POD_NAMESPACEvalueFrom:fieldRef:fieldPath: metadata.namespaceimage: dyrnq/kube-webhook-certgen:v20230407imagePullPolicy: IfNotPresentname: createsecurityContext:allowPrivilegeEscalation: falsenodeSelector:kubernetes.io/os: linuxrestartPolicy: OnFailuresecurityContext:fsGroup: 2000runAsNonRoot: truerunAsUser: 2000serviceAccountName: ingress-nginx-admission
---
apiVersion: batch/v1
kind: Job
metadata:labels:app.kubernetes.io/component: admission-webhookapp.kubernetes.io/instance: ingress-nginxapp.kubernetes.io/name: ingress-nginxapp.kubernetes.io/part-of: ingress-nginxapp.kubernetes.io/version: 1.9.0name: ingress-nginx-admission-patchnamespace: ingress-nginx
spec:template:metadata:labels:app.kubernetes.io/component: admission-webhookapp.kubernetes.io/instance: ingress-nginxapp.kubernetes.io/name: ingress-nginxapp.kubernetes.io/part-of: ingress-nginxapp.kubernetes.io/version: 1.9.0name: ingress-nginx-admission-patchspec:containers:- args:- patch- --webhook-nameingress-nginx-admission- --namespace$(POD_NAMESPACE)- --patch-mutatingfalse- --secret-nameingress-nginx-admission- --patch-failure-policyFailenv:- name: POD_NAMESPACEvalueFrom:fieldRef:fieldPath: metadata.namespaceimage: dyrnq/kube-webhook-certgen:v20230407imagePullPolicy: IfNotPresentname: patchsecurityContext:allowPrivilegeEscalation: falsenodeSelector:kubernetes.io/os: linuxrestartPolicy: OnFailuresecurityContext:fsGroup: 2000runAsNonRoot: truerunAsUser: 2000serviceAccountName: ingress-nginx-admission
---
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:labels:app.kubernetes.io/component: controllerapp.kubernetes.io/instance: ingress-nginxapp.kubernetes.io/name: ingress-nginxapp.kubernetes.io/part-of: ingress-nginxapp.kubernetes.io/version: 1.9.0name: nginx
spec:controller: k8s.io/ingress-nginx
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:labels:app.kubernetes.io/component: admission-webhookapp.kubernetes.io/instance: ingress-nginxapp.kubernetes.io/name: ingress-nginxapp.kubernetes.io/part-of: ingress-nginxapp.kubernetes.io/version: 1.9.0name: ingress-nginx-admission
webhooks:
- admissionReviewVersions:- v1clientConfig:service:name: ingress-nginx-controller-admissionnamespace: ingress-nginxpath: /networking/v1/ingressesfailurePolicy: FailmatchPolicy: Equivalentname: validate.nginx.ingress.kubernetes.iorules:- apiGroups:- networking.k8s.ioapiVersions:- v1operations:- CREATE- UPDATEresources:- ingressessideEffects: None
部署ingress-nginx
kubectl apply -f ingress-nginx.yaml# 查看ingress-nginx是否部署成功
[rootmaster containerd]# kubectl get all -n ingress-nginx
NAME READY STATUS RESTARTS AGE
pod/ingress-nginx-admission-create-mr7t8 0/1 Completed 0 70m
pod/ingress-nginx-admission-patch-hnv5n 0/1 Completed 0 70m
pod/ingress-nginx-controller-8dbf764f7-dzwtl 1/1 Running 0 3m14sNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/ingress-nginx-controller NodePort 10.97.182.16 none 80:32542/TCP,443:31704/TCP 70m
service/ingress-nginx-controller-admission ClusterIP 10.102.179.254 none 443/TCP 70mNAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/ingress-nginx-controller 1/1 1 1 70mNAME DESIRED CURRENT READY AGE
replicaset.apps/ingress-nginx-controller-544b486766 0 0 0 70m
replicaset.apps/ingress-nginx-controller-8dbf764f7 1 1 1 3m14sNAME COMPLETIONS DURATION AGE
job.batch/ingress-nginx-admission-create 1/1 15s 70m
job.batch/ingress-nginx-admission-patch 1/1 18s 70m
测试ingress-nginx
---
apiVersion: apps/v1
kind: Deployment
metadata:name: nginx-deploymentnamespace: testing-sc
spec:replicas: 2selector:matchLabels:app: nginx-podtemplate:metadata:labels:app: nginx-podspec:containers:- name: nginximage: nginx:latestports:- containerPort: 80---
apiVersion: apps/v1
kind: Deployment
metadata:name: tomcat-deploymentnamespace: testing-sc
spec:replicas: 2selector:matchLabels:app: tomcat-podtemplate: metadata:labels:app: tomcat-podspec:containers:- name: tomcatimage: tomcat:8.0-alpineports:- containerPort: 8080---
apiVersion: v1
kind: Service
metadata:name: nginx-servicenamespace: testing-sc
spec:selector:app: nginx-podtype: ClusterIPports:- port: 80name: httpprotocol: TCPtargetPort: 80
---
apiVersion: v1
kind: Service
metadata:name: tomcat-servicenamespace: testing-sc
spec:selector:app: tomcat-podtype: ClusterIPports:- port: 8080name: httpprotocol: TCPtargetPort: 8080---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:name: test-ingressnamespace: testing-sc
spec:ingressClassName: nginxrules:- host: tomcat.demo.comhttp:paths:- pathType: Prefixpath: /backend:service:name: tomcat-serviceport:number: 8080- host: nginx.demo.comhttp:paths:- pathType: Prefixpath: /backend:service:name: nginx-serviceport:number: 80# 备注我省略了名称空间的配置因为在这之前我已经有了testing-sc的名称空间如果没有可以使用如下命令创建
kubectl create ns testing-sc
# 或者使用如下yaml
apiVersion: v1
kind: Namespace
metadata:name: testing-sc
准备就续之后应用配置文件到集群
kubectl apply -f nginx-tomcat-test.yaml# 查看部署情况
kubectl get all -n testing-sc
在hosts文件中写入域名解析
192.168.0.139 nginx.demo.com tomcat.demo.com
# ingress-nginx调度到哪个节点上 就写哪个节点的ip(也可以绑定到具体的节点上)