freemankevinK零S
- 已编辑
freemankevin File does not exist: /etc/add_hosts
应该是这个问题吧
hongming 可以了,感谢,那个写法不知为何不可。换成实际值可以了
[root@master1 ~]# cat /etc/add_hosts
10.0.1.223 master1.cluster.local master1
10.0.1.224 master2.cluster.local master2
10.0.1.225 master3.cluster.local master3
10.0.1.209 node1.cluster.local node1
10.0.1.211 node2.cluster.local node2
10.0.1.212 node3.cluster.local node3
10.0.1.99 lb.kubesphere.local
10.0.1.185 harbor.dockerregistry.com
[root@master1 ~]# kubectl -n kube-system edit cm coredns
# Please edit the object below. Lines beginning with a '#' will be ignored,
# and an empty file will abort the edit. If an error occurs while saving this file will be
# reopened with the relevant failures.
#
apiVersion: v1
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
hosts{
10.0.1.223 master1.cluster.local master1
10.0.1.224 master2.cluster.local master2
10.0.1.225 master3.cluster.local master3
10.0.1.209 node1.cluster.local node1
10.0.1.211 node2.cluster.local node2
10.0.1.212 node3.cluster.local node3
10.0.1.99 lb.kubesphere.local
10.0.1.185 harbor.dockerregistry.com
fallthrough
}
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
forward . /etc/resolv.conf
#forward . 10.233.0.3 {
# force_tcp
#}
cache 30
loop
reload
loadbalance
}
kind: ConfigMap
metadata:
creationTimestamp: "2020-12-19T10:04:53Z"
name: coredns
namespace: kube-system
resourceVersion: "42499"
"/tmp/kubectl-edit-2jjw7.yaml" 47L, 1374C written
configmap/coredns edited
[root@master1 ~]#
[root@master1 ~]#
[root@master1 ~]#
[root@master1 ~]# kubectl get pod -A|grep dns
kube-system coredns-564cdd5d55-7mbxn 1/1 Running 0 91m
kube-system coredns-564cdd5d55-lfctb 1/1 Running 0 91m
kube-system nodelocaldns-grvrk 1/1 Running 0 91m
kube-system nodelocaldns-k4w6g 1/1 Running 0 91m
kube-system nodelocaldns-kvb54 1/1 Running 0 91m
kube-system nodelocaldns-mnn2f 1/1 Running 0 91m
kube-system nodelocaldns-sd8fz 1/1 Running 0 91m
kube-system nodelocaldns-shjst 1/1 Running 0 91m
[root@master1 ~]# kubectl delete -n kube-system pod `kubectl get pod -A|grep dns|awk '{print $2}'`
pod "coredns-564cdd5d55-7mbxn" deleted
pod "coredns-564cdd5d55-lfctb" deleted
pod "nodelocaldns-grvrk" deleted
pod "nodelocaldns-k4w6g" deleted
pod "nodelocaldns-kvb54" deleted
pod "nodelocaldns-mnn2f" deleted
pod "nodelocaldns-sd8fz" deleted
pod "nodelocaldns-shjst" deleted
[root@master1 ~]# kubectl get pod -A|grep dns
kube-system coredns-564cdd5d55-b57mw 0/1 CrashLoopBackOff 2 28s
kube-system coredns-564cdd5d55-tww96 0/1 CrashLoopBackOff 2 28s
kube-system nodelocaldns-82w8t 1/1 Running 0 28s
kube-system nodelocaldns-bx8z6 1/1 Running 0 28s
kube-system nodelocaldns-cznmc 1/1 Running 0 28s
kube-system nodelocaldns-h4s7v 1/1 Running 0 28s
kube-system nodelocaldns-rk6rm 1/1 Running 0 28s
kube-system nodelocaldns-st5fk 1/1 Running 0 28s
[root@master1 ~]# kubectl logs -f -n kube-system coredns-564cdd5d55-b57mw
/etc/coredns/Corefile:6 - Error during parsing: Unknown directive 'hosts{'
[root@master1 ~]# kubectl -n kube-system edit cm coredns
# Please edit the object below. Lines beginning with a '#' will be ignored,
# and an empty file will abort the edit. If an error occurs while saving this file will be
# reopened with the relevant failures.
#
apiVersion: v1
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
hosts {
10.0.1.223 master1.cluster.local master1
10.0.1.224 master2.cluster.local master2
10.0.1.225 master3.cluster.local master3
10.0.1.209 node1.cluster.local node1
10.0.1.211 node2.cluster.local node2
10.0.1.212 node3.cluster.local node3
10.0.1.99 lb.kubesphere.local
10.0.1.185 harbor.dockerregistry.com
fallthrough
}
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
forward . /etc/resolv.conf
#forward . 10.233.0.3 {
# force_tcp
#}
cache 30
loop
reload
loadbalance
}
kind: ConfigMap
metadata:
creationTimestamp: "2020-12-19T10:04:53Z"
name: coredns
namespace: kube-system
resourceVersion: "76280"
"/tmp/kubectl-edit-4czc0.yaml" 47L, 1375C written
configmap/coredns edited
[root@master1 ~]# kubectl delete -n kube-system pod `kubectl get pod -A|grep dns|awk '{print $2}'`
pod "coredns-564cdd5d55-b57mw" deleted
pod "coredns-564cdd5d55-tww96" deleted
pod "nodelocaldns-82w8t" deleted
pod "nodelocaldns-bx8z6" deleted
pod "nodelocaldns-cznmc" deleted
pod "nodelocaldns-h4s7v" deleted
pod "nodelocaldns-rk6rm" deleted
pod "nodelocaldns-st5fk" deleted
[root@master1 ~]# kubectl get pod -A|grep dns
kube-system coredns-564cdd5d55-ffcfv 1/1 Running 0 18s
kube-system coredns-564cdd5d55-wxrjc 1/1 Running 0 18s
kube-system nodelocaldns-22ccz 1/1 Running 0 18s
kube-system nodelocaldns-4ghrt 1/1 Running 0 18s
kube-system nodelocaldns-5xdgx 1/1 Running 0 18s
kube-system nodelocaldns-6k6fc 1/1 Running 0 18s
kube-system nodelocaldns-bf455 1/1 Running 0 18s
kube-system nodelocaldns-cjp4k 1/1 Running 0 18s
[root@master1 ~]# nslookup node1 -n 10.233.0.3
*** Invalid option: n
Server: 10.233.0.3
Address: 10.233.0.3#53
Name: node1
Address: 10.0.1.209
供排错的伙伴参考
:COREDNS
[root@master1 ~]# kubectl -n kube-system get cm coredns -oyaml apiVersion: v1 data: Corefile: | .:53 { errors health { lameduck 5s } hosts { 10.0.1.223 master1.cluster.local master1 10.0.1.224 master2.cluster.local master2 10.0.1.225 master3.cluster.local master3 10.0.1.209 node1.cluster.local node1 10.0.1.211 node2.cluster.local node2 10.0.1.212 node3.cluster.local node3 10.0.1.99 lb.kubesphere.local 10.0.1.185 harbor.dockerregistry.com fallthrough } ready kubernetes cluster.local in-addr.arpa ip6.arpa { pods insecure fallthrough in-addr.arpa ip6.arpa ttl 30 } prometheus :9153 forward . /etc/resolv.conf #forward . 10.233.0.3 { # force_tcp #} cache 30 loop reload loadbalance } kind: ConfigMap metadata: creationTimestamp: "2020-12-19T10:04:53Z" name: coredns namespace: kube-system resourceVersion: "77112" selfLink: /api/v1/namespaces/kube-system/configmaps/coredns uid: f53c6d73-842d-4c38-a0b0-9df3b5a74cc4
nodelocaldns
[root@master1 ~]# kubectl -n kube-system get cm nodelocaldns -oyaml apiVersion: v1 data: Corefile: | cluster.local:53 { errors cache { success 9984 30 denial 9984 5 } reload loop bind 169.254.25.10 forward . 10.233.0.3 { force_tcp } prometheus :9253 health 169.254.25.10:9254 } in-addr.arpa:53 { errors cache 30 reload loop bind 169.254.25.10 forward . 10.233.0.3 { force_tcp } prometheus :9253 } ip6.arpa:53 { errors cache 30 reload loop bind 169.254.25.10 forward . 10.233.0.3 { force_tcp } prometheus :9253 } .:53 { errors cache 30 reload loop bind 169.254.25.10 #forward . /etc/resolv.conf forward . 10.233.0.3 { force_tcp } prometheus :9253 } kind: ConfigMap metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"v1","data":{"Corefile":"cluster.local:53 {\n errors\n cache {\n success 9984 30\n denial 9984 5\n }\n reload\n loop\n bind 169.254.25.10\n forward . 10.233.0.3 {\n force_tcp\n }\n prometheus :9253\n health 169.254.25.10:9254\n}\nin-addr.arpa:53 {\n errors\n cache 30\n reload\n loop\n bind 169.254.25.10\n forward . 10.233.0.3 {\n force_tcp\n }\n prometheus :9253\n}\nip6.arpa:53 {\n errors\n cache 30\n reload\n loop\n bind 169.254.25.10\n forward . 10.233.0.3 {\n force_tcp\n }\n prometheus :9253\n}\n.:53 {\n errors\n cache 30\n reload\n loop\n bind 169.254.25.10\n forward . /etc/resolv.conf\n prometheus :9253\n}\n"},"kind":"ConfigMap","metadata":{"annotations":{},"labels":{"addonmanager.kubernetes.io/mode":"EnsureExists"},"name":"nodelocaldns","namespace":"kube-system"}} creationTimestamp: "2020-12-19T10:05:15Z" labels: addonmanager.kubernetes.io/mode: EnsureExists name: nodelocaldns namespace: kube-system resourceVersion: "34304" selfLink: /api/v1/namespaces/kube-system/configmaps/nodelocaldns uid: df5d3c4f-bd1d-46ff-9891-eec753bec78a
改好后修改删除原POD以重启dns
[root@master1 ~]# kubectl delete -n kube-system pod `kubectl get pod -A|grep dns|awk '{print $2}'`
如果没有异常,pod会很快running
[root@master1 ~]# kubectl get pod -A|grep dns kube-system coredns-564cdd5d55-ffcfv 1/1 Running 0 10m kube-system coredns-564cdd5d55-wxrjc 1/1 Running 0 10m kube-system nodelocaldns-22ccz 1/1 Running 0 10m kube-system nodelocaldns-4ghrt 1/1 Running 0 10m kube-system nodelocaldns-5xdgx 1/1 Running 0 10m kube-system nodelocaldns-6k6fc 1/1 Running 0 10m kube-system nodelocaldns-bf455 1/1 Running 0 10m kube-system nodelocaldns-cjp4k 1/1 Running 0 10m
最后再随便找个域名测试下
[root@master1 ~]# yum provides nslookup
[root@master1 ~]# yum -y install bind-utils
[root@master1 ~]# nslookup node1 -n ` kubectl get svc -A|grep dns|awk '{print $4}'`
*** Invalid option: n
Server: 10.233.0.3
Address: 10.233.0.3#53
Name: node1
Address: 10.0.1.209