hongming 可以了,感谢,那个写法不知为何不可。换成实际值可以了
[root@master1 ~]# cat /etc/add_hosts
10.0.1.223 master1.cluster.local master1
10.0.1.224 master2.cluster.local master2
10.0.1.225 master3.cluster.local master3
10.0.1.209 node1.cluster.local node1
10.0.1.211 node2.cluster.local node2
10.0.1.212 node3.cluster.local node3
10.0.1.99 lb.kubesphere.local
10.0.1.185 harbor.dockerregistry.com
[root@master1 ~]# kubectl -n kube-system edit cm coredns
# Please edit the object below. Lines beginning with a '#' will be ignored,
# and an empty file will abort the edit. If an error occurs while saving this file will be
# reopened with the relevant failures.
#
apiVersion: v1
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
hosts{
10.0.1.223 master1.cluster.local master1
10.0.1.224 master2.cluster.local master2
10.0.1.225 master3.cluster.local master3
10.0.1.209 node1.cluster.local node1
10.0.1.211 node2.cluster.local node2
10.0.1.212 node3.cluster.local node3
10.0.1.99 lb.kubesphere.local
10.0.1.185 harbor.dockerregistry.com
fallthrough
}
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
forward . /etc/resolv.conf
#forward . 10.233.0.3 {
# force_tcp
#}
cache 30
loop
reload
loadbalance
}
kind: ConfigMap
metadata:
creationTimestamp: "2020-12-19T10:04:53Z"
name: coredns
namespace: kube-system
resourceVersion: "42499"
"/tmp/kubectl-edit-2jjw7.yaml" 47L, 1374C written
configmap/coredns edited
[root@master1 ~]#
[root@master1 ~]#
[root@master1 ~]#
[root@master1 ~]# kubectl get pod -A|grep dns
kube-system coredns-564cdd5d55-7mbxn 1/1 Running 0 91m
kube-system coredns-564cdd5d55-lfctb 1/1 Running 0 91m
kube-system nodelocaldns-grvrk 1/1 Running 0 91m
kube-system nodelocaldns-k4w6g 1/1 Running 0 91m
kube-system nodelocaldns-kvb54 1/1 Running 0 91m
kube-system nodelocaldns-mnn2f 1/1 Running 0 91m
kube-system nodelocaldns-sd8fz 1/1 Running 0 91m
kube-system nodelocaldns-shjst 1/1 Running 0 91m
[root@master1 ~]# kubectl delete -n kube-system pod `kubectl get pod -A|grep dns|awk '{print $2}'`
pod "coredns-564cdd5d55-7mbxn" deleted
pod "coredns-564cdd5d55-lfctb" deleted
pod "nodelocaldns-grvrk" deleted
pod "nodelocaldns-k4w6g" deleted
pod "nodelocaldns-kvb54" deleted
pod "nodelocaldns-mnn2f" deleted
pod "nodelocaldns-sd8fz" deleted
pod "nodelocaldns-shjst" deleted
[root@master1 ~]# kubectl get pod -A|grep dns
kube-system coredns-564cdd5d55-b57mw 0/1 CrashLoopBackOff 2 28s
kube-system coredns-564cdd5d55-tww96 0/1 CrashLoopBackOff 2 28s
kube-system nodelocaldns-82w8t 1/1 Running 0 28s
kube-system nodelocaldns-bx8z6 1/1 Running 0 28s
kube-system nodelocaldns-cznmc 1/1 Running 0 28s
kube-system nodelocaldns-h4s7v 1/1 Running 0 28s
kube-system nodelocaldns-rk6rm 1/1 Running 0 28s
kube-system nodelocaldns-st5fk 1/1 Running 0 28s
[root@master1 ~]# kubectl logs -f -n kube-system coredns-564cdd5d55-b57mw
/etc/coredns/Corefile:6 - Error during parsing: Unknown directive 'hosts{'
[root@master1 ~]# kubectl -n kube-system edit cm coredns
# Please edit the object below. Lines beginning with a '#' will be ignored,
# and an empty file will abort the edit. If an error occurs while saving this file will be
# reopened with the relevant failures.
#
apiVersion: v1
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
hosts {
10.0.1.223 master1.cluster.local master1
10.0.1.224 master2.cluster.local master2
10.0.1.225 master3.cluster.local master3
10.0.1.209 node1.cluster.local node1
10.0.1.211 node2.cluster.local node2
10.0.1.212 node3.cluster.local node3
10.0.1.99 lb.kubesphere.local
10.0.1.185 harbor.dockerregistry.com
fallthrough
}
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
forward . /etc/resolv.conf
#forward . 10.233.0.3 {
# force_tcp
#}
cache 30
loop
reload
loadbalance
}
kind: ConfigMap
metadata:
creationTimestamp: "2020-12-19T10:04:53Z"
name: coredns
namespace: kube-system
resourceVersion: "76280"
"/tmp/kubectl-edit-4czc0.yaml" 47L, 1375C written
configmap/coredns edited
[root@master1 ~]# kubectl delete -n kube-system pod `kubectl get pod -A|grep dns|awk '{print $2}'`
pod "coredns-564cdd5d55-b57mw" deleted
pod "coredns-564cdd5d55-tww96" deleted
pod "nodelocaldns-82w8t" deleted
pod "nodelocaldns-bx8z6" deleted
pod "nodelocaldns-cznmc" deleted
pod "nodelocaldns-h4s7v" deleted
pod "nodelocaldns-rk6rm" deleted
pod "nodelocaldns-st5fk" deleted
[root@master1 ~]# kubectl get pod -A|grep dns
kube-system coredns-564cdd5d55-ffcfv 1/1 Running 0 18s
kube-system coredns-564cdd5d55-wxrjc 1/1 Running 0 18s
kube-system nodelocaldns-22ccz 1/1 Running 0 18s
kube-system nodelocaldns-4ghrt 1/1 Running 0 18s
kube-system nodelocaldns-5xdgx 1/1 Running 0 18s
kube-system nodelocaldns-6k6fc 1/1 Running 0 18s
kube-system nodelocaldns-bf455 1/1 Running 0 18s
kube-system nodelocaldns-cjp4k 1/1 Running 0 18s
[root@master1 ~]# nslookup node1 -n 10.233.0.3
*** Invalid option: n
Server: 10.233.0.3
Address: 10.233.0.3#53
Name: node1
Address: 10.0.1.209