Jeff

[root@master100 ~]# kubectl -n test-namespace get deployment productpage-v1 -o yaml  
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  annotations:
    creator: admin
    deployment.kubernetes.io/revision: "1"
    kubesphere.io/isElasticReplicas: "false"
    servicemesh.kubesphere.io/enabled: "true"
  creationTimestamp: "2019-10-17T03:29:35Z"
  generation: 1
  labels:
    app: productpage
    app.kubernetes.io/name: bookinfo
    app.kubernetes.io/version: v1
    version: v1
  name: productpage-v1
  namespace: test-namespace
  ownerReferences:
  - apiVersion: app.k8s.io/v1beta1
    blockOwnerDeletion: true
    controller: false
    kind: Application
    name: bookinfo
    uid: 57b01a89-f08e-11e9-93ab-52560ade2364
  resourceVersion: "9687755"
  selfLink: /apis/extensions/v1beta1/namespaces/test-namespace/deployments/productpage-v1
  uid: 57b037eb-f08e-11e9-93ab-52560ade2364
spec:
  progressDeadlineSeconds: 600
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      app: productpage
      app.kubernetes.io/name: bookinfo
      app.kubernetes.io/version: v1
      version: v1
  strategy:
    rollingUpdate:
      maxSurge: 25%
      maxUnavailable: 25%
    type: RollingUpdate
  template:
    metadata:
      annotations:
        sidecar.istio.io/inject: "true"
      creationTimestamp: null
      labels:
        app: productpage
        app.kubernetes.io/name: bookinfo
        app.kubernetes.io/version: v1
        version: v1
    spec:
      containers:
      - image: kubesphere/examples-bookinfo-productpage-v1:1.13.0
        imagePullPolicy: IfNotPresent
        name: productpage
        ports:
        - containerPort: 9080
          name: http-web
          protocol: TCP
        resources:
          limits:
            cpu: "1"
            memory: 1000Mi
          requests:
            cpu: 10m
            memory: 10Mi
        terminationMessagePath: /dev/termination-log
        terminationMessagePolicy: File
      dnsPolicy: ClusterFirst
      restartPolicy: Always
      schedulerName: default-scheduler
      securityContext: {}
      serviceAccount: default
      serviceAccountName: default
      terminationGracePeriodSeconds: 30
status:
  availableReplicas: 1
  conditions:
  - lastTransitionTime: "2019-10-17T03:29:56Z"
    lastUpdateTime: "2019-10-17T03:29:56Z"
    message: Deployment has minimum availability.
    reason: MinimumReplicasAvailable
    status: "True"
    type: Available
  - lastTransitionTime: "2019-10-17T03:29:35Z"
    lastUpdateTime: "2019-10-17T03:29:56Z"
    message: ReplicaSet "productpage-v1-579dfbcddd" has successfully progressed.
    reason: NewReplicaSetAvailable
    status: "True"
    type: Progressing
  observedGeneration: 1
  readyReplicas: 1
  replicas: 1
  updatedReplicas: 1

这个配置看着也是对的,你的电脑能够远程么,我看下,可以把登录方式发到 kubesphere@yunify.com

没法远程。。

我到这一步,都是正常的,这里也显示了istio-proxy,创建后是就绪的,但是查看pod状态,就没有这个istio-proxy了

不行我再手动部署istio官方的实例试试

  • Jeff 回复了此帖

    for-mat 执行下这个命令看下,ns是否打上label了

    kubectl get ns test-namespace -o yaml

      Jeff label是我看istio文档后手动打的

      [root@master100 ~]# kubectl get ns test-namespace -o yaml
      apiVersion: v1
      kind: Namespace
      metadata:
        annotations:
          creator: admin
          openpitrix_runtime: runtime-BVzjOO3LRJQA
        creationTimestamp: "2019-10-16T11:22:14Z"
        finalizers:
        - finalizers.kubesphere.io/namespaces
        labels:
          istio-injection: enabled
          kubesphere.io/workspace: test-workspace
        name: test-namespace
        ownerReferences:
        - apiVersion: tenant.kubesphere.io/v1alpha1
          blockOwnerDeletion: true
          controller: true
          kind: Workspace
          name: test-workspace
          uid: 233958f4-f007-11e9-93ab-52560ade2364
        resourceVersion: "9674211"
        selfLink: /api/v1/namespaces/test-namespace
        uid: 3486cf19-f007-11e9-9044-52560ade2365
      spec:
        finalizers:
        - kubernetes
      status:
        phase: Active
      • Jeff 回复了此帖

        for-mat 把这个label去掉 istio-injection: enabled,重新部署应用试下

          Jeff 去掉试了还是不行,另外我照着istio官方文档操作了下
          也没有注入sidecar,我检查了apiserver启动项
          –admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction \
          也开启了自动sidecar注入,但就是不生效。
          我的k8s是1.13.10
          奇了怪了,我再手动试试。。。

          Jeff 大佬,istio-sidecar-injector的configmap没有values
          [root@master100 bin]# ./istioctl kube-inject -f ../samples/sleep/sleep.yaml | kubectl apply -f -
          Error: missing configuration map key “values” in “istio-sidecar-injector”
          error: no objects passed to apply

          • Jeff 回复了此帖

            for-mat 你的环境是默认安装的么,还是在已有的k8s上安装的?最好能有个环境我看下

            我滴妈呀,终于解决了
            1、我是已有k8s集群上搭的kubesphere
            kube-apiserver的启动参数中,要有–admission-control=MutatingAdmissionWebhook,开启自动注入
            2、MutatingWebhookConfiguration配置有问题
            kubectl -n istio-system edit MutatingWebhookConfiguration istio-sidecar-injector
            下面两个参数改成In和enabled
            operator: In
            values:
            - enabled
            3、给用到的namespace打标签
            kubectl label namespace test-namespace istio-injection=enabled
            4、感谢大佬帮忙

            • Jeff 回复了此帖

              不知道是不是默认都会这样,我装了两次是都不行。也可能是因为我自己搭的集群