Commit 5cf57655 by loganhz Committed by Alena Prokharchyk

Upgrade to Istio v1.3.0

parent 235dfbdd
apiVersion: v1
name: rancher-istio
version: 0.0.2
appVersion: 1.2.5
appVersion: 1.3.0
tillerVersion: ">=2.7.2-0"
description: Helm chart for all istio components
home: https://istio.io/
......
......@@ -8,7 +8,7 @@ The documentation here is for developers only, please follow the installation in
## Introduction
This chart bootstraps all Istio [components](https://istio.io/docs/concepts/what-is-istio/overview.html) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
This chart bootstraps all Istio [components](https://istio.io/docs/concepts/what-is-istio/) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Chart Details
......@@ -136,5 +136,5 @@ To uninstall/delete the `istio` release but continue to track the release:
To uninstall/delete the `istio` release completely and make its name free for later use:
```
$ helm delete istio --purge
$ helm delete --purge istio
```
......@@ -18,7 +18,7 @@ tolerations: []
# There are currently two types of anti-affinity:
# "requiredDuringSchedulingIgnoredDuringExecution"
# "preferredDuringSchedulingIgnoredDuringExecution"
# which denote “hard” vs. “soft” requirements, you can define your values
# which denote "hard” vs. "soft” requirements, you can define your values
# in "podAntiAffinityLabelSelector" and "podAntiAffinityTermLabelSelector"
# correspondingly.
# For example:
......@@ -29,6 +29,6 @@ tolerations: []
# topologyKey: "kubernetes.io/hostname"
# This pod anti-affinity rule says that the pod requires not to be scheduled
# onto a node if that node is already running a pod with label having key
# “security” and value “S1”.
# "security” and value "S1”.
podAntiAffinityLabelSelector: []
podAntiAffinityTermLabelSelector: []
......@@ -28,7 +28,7 @@ rules:
resourceNames: ["istio-galley"]
verbs: ["get"]
- apiGroups: [""]
resources: ["pods", "nodes", "services", "endpoints"]
resources: ["pods", "nodes", "services", "endpoints", "namespaces"]
verbs: ["get", "list", "watch"]
- apiGroups: ["extensions", "networking.k8s.io"]
resources: ["ingresses"]
......@@ -37,3 +37,6 @@ rules:
resources: ["deployments/finalizers"]
resourceNames: ["istio-galley"]
verbs: ["update"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["get", "list", "watch"]
......@@ -10,5 +10,7 @@ metadata:
release: {{ .Release.Name }}
istio: galley
data:
{{- if .Values.global.configValidation }}
validatingwebhookconfiguration.yaml: |-
{{- include "validatingwebhookconfiguration.yaml.tpl" . | indent 4}}
\ No newline at end of file
{{- include "validatingwebhookconfiguration.yaml.tpl" . | indent 4}}
{{- end}}
......@@ -16,8 +16,8 @@ spec:
istio: galley
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
maxSurge: {{ .Values.rollingMaxSurge }}
maxUnavailable: {{ .Values.rollingMaxUnavailable }}
template:
metadata:
labels:
......@@ -58,6 +58,9 @@ spec:
{{- if not $.Values.global.useMCP }}
- --enable-server=false
{{- end }}
{{- if not $.Values.global.configValidation }}
- --enable-validation=false
{{- end }}
- --validation-webhook-config-file
- /etc/config/validatingwebhookconfiguration.yaml
- --monitoringPort={{ .Values.global.monitoringPort }}
......
......@@ -10,7 +10,6 @@ metadata:
release: {{ .Release.Name }}
istio: galley
webhooks:
{{- if .Values.global.configValidation }}
- name: pilot.validation.istio.io
clientConfig:
service:
......@@ -117,4 +116,3 @@ webhooks:
failurePolicy: Fail
sideEffects: None
{{- end }}
{{- end }}
......@@ -3,6 +3,8 @@
#
enabled: true
replicaCount: 1
rollingMaxSurge: 100%
rollingMaxUnavailable: 25%
nodeSelector: {}
tolerations: []
......@@ -12,7 +14,7 @@ tolerations: []
# There are currently two types of anti-affinity:
# "requiredDuringSchedulingIgnoredDuringExecution"
# "preferredDuringSchedulingIgnoredDuringExecution"
# which denote “hard” vs. “soft” requirements, you can define your values
# which denote "hard” vs. "soft” requirements, you can define your values
# in "podAntiAffinityLabelSelector" and "podAntiAffinityTermLabelSelector"
# correspondingly.
# For example:
......@@ -23,6 +25,6 @@ tolerations: []
# topologyKey: "kubernetes.io/hostname"
# This pod anti-affinity rule says that the pod requires not to be scheduled
# onto a node if that node is already running a pod with label having key
# “security” and value “S1”.
# "security” and value "S1”.
podAntiAffinityLabelSelector: []
podAntiAffinityTermLabelSelector: []
......@@ -16,7 +16,7 @@
values:
{{- range $key, $val := .root.Values.global.arch }}
{{- if gt ($val | int) 0 }}
- {{ $key }}
- {{ $key | quote }}
{{- end }}
{{- end }}
{{- $nodeSelector := default .root.Values.global.defaultNodeSelector .nodeSelector -}}
......@@ -24,7 +24,7 @@
- key: {{ $key }}
operator: In
values:
- {{ $val }}
- {{ $val | quote }}
{{- end }}
{{- end }}
......@@ -37,7 +37,7 @@
- key: beta.kubernetes.io/arch
operator: In
values:
- {{ $key }}
- {{ $key | quote }}
{{- end }}
{{- end }}
{{- end }}
......@@ -66,7 +66,7 @@
values:
{{- $vals := split "," $item.values }}
{{- range $i, $v := $vals }}
- {{ $v }}
- {{ $v | quote }}
{{- end }}
{{- end }}
topologyKey: {{ $item.topologyKey }}
......@@ -84,7 +84,7 @@
values:
{{- $vals := split "," $item.values }}
{{- range $i, $v := $vals }}
- {{ $v }}
- {{ $v | quote }}
{{- end }}
{{- end }}
topologyKey: {{ $item.topologyKey }}
......
......@@ -27,6 +27,10 @@ spec:
{{- range $key, $val := $spec.labels }}
{{ $key }}: {{ $val }}
{{- end }}
strategy:
rollingUpdate:
maxSurge: {{ $spec.rollingMaxSurge }}
maxUnavailable: {{ $spec.rollingMaxUnavailable }}
template:
metadata:
labels:
......@@ -134,6 +138,12 @@ spec:
- --envoyMetricsServiceAddress
- {{ $.Values.global.proxy.envoyMetricsService.host }}:{{ $.Values.global.proxy.envoyMetricsService.port }}
{{- end }}
{{- if $.Values.global.proxy.envoyAccessLogService.enabled }}
- --envoyAccessLogService
{{- with $.Values.global.proxy.envoyAccessLogService }}
- '{"address":"{{ .host }}:{{.port }}"{{ if .tlsSettings }},"tlsSettings":{{ .tlsSettings | toJson }}{{- end }}{{ if .tcpKeepalive }},"tcpKeepalive":{{ .tcpKeepalive | toJson }}{{- end }}}'
{{- end }}
{{- end }}
- --proxyAdminPort
- "15000"
- --statusPort
......@@ -206,6 +216,10 @@ spec:
fieldRef:
apiVersion: v1
fieldPath: status.hostIP
- name: SERVICE_ACCOUNT
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
- name: ISTIO_META_POD_NAME
valueFrom:
fieldRef:
......@@ -215,6 +229,12 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: SDS_ENABLED
value: "{{ $.Values.global.sds.enabled }}"
- name: ISTIO_META_WORKLOAD_NAME
value: {{ $key }}
- name: ISTIO_META_OWNER
value: kubernetes://api/apps/v1/namespaces/{{ $spec.namespace | default $.Release.Namespace }}/deployments/{{ $key }}
{{- if $spec.sds }}
{{- if $spec.sds.enabled }}
- name: ISTIO_META_USER_SDS
......@@ -232,11 +252,9 @@ spec:
- name: sdsudspath
mountPath: /var/run/sds
readOnly: true
{{- if $.Values.global.sds.useTrustworthyJwt }}
- name: istio-token
mountPath: /var/run/secrets/tokens
{{- end }}
{{- end }}
{{- if $spec.sds }}
{{- if $spec.sds.enabled }}
- name: ingressgatewaysdsudspath
......@@ -265,15 +283,13 @@ spec:
- name: sdsudspath
hostPath:
path: /var/run/sds
{{- if $.Values.global.sds.useTrustworthyJwt }}
- name: istio-token
projected:
sources:
- serviceAccountToken:
path: istio-token
expirationSeconds: 43200
audience: {{ $.Values.global.trustDomain }}
{{- end }}
audience: {{ $.Values.global.sds.token.aud }}
{{- end }}
- name: istio-certs
secret:
......
......@@ -33,6 +33,8 @@ istio-ingressgateway:
autoscaleMax: 5
# specify replicaCount when autoscaleEnabled: false
# replicaCount: 1
rollingMaxSurge: 100%
rollingMaxUnavailable: 25%
resources:
requests:
cpu: 100m
......@@ -135,7 +137,7 @@ istio-ingressgateway:
# There are currently two types of anti-affinity:
# "requiredDuringSchedulingIgnoredDuringExecution"
# "preferredDuringSchedulingIgnoredDuringExecution"
# which denote “hard” vs. “soft” requirements, you can define your values
# which denote "hard" vs. "soft" requirements, you can define your values
# in "podAntiAffinityLabelSelector" and "podAntiAffinityTermLabelSelector"
# correspondingly.
# For example:
......@@ -146,7 +148,7 @@ istio-ingressgateway:
# topologyKey: "kubernetes.io/hostname"
# This pod anti-affinity rule says that the pod requires not to be scheduled
# onto a node if that node is already running a pod with label having key
# “security” and value “S1”.
# "security" and value "S1".
podAntiAffinityLabelSelector: []
podAntiAffinityTermLabelSelector: []
......@@ -160,13 +162,15 @@ istio-egressgateway:
autoscaleMax: 5
# specify replicaCount when autoscaleEnabled: false
# replicaCount: 1
rollingMaxSurge: 100%
rollingMaxUnavailable: 25%
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 2000m
memory: 256Mi
memory: 1024Mi
cpu:
targetAverageUtilization: 80
serviceAnnotations: {}
......@@ -211,7 +215,7 @@ istio-egressgateway:
# There are currently two types of anti-affinity:
# "requiredDuringSchedulingIgnoredDuringExecution"
# "preferredDuringSchedulingIgnoredDuringExecution"
# which denote “hard” vs. “soft” requirements, you can define your values
# which denote "hard" vs. "soft" requirements, you can define your values
# in "podAntiAffinityLabelSelector" and "podAntiAffinityTermLabelSelector"
# correspondingly.
# For example:
......@@ -222,7 +226,7 @@ istio-egressgateway:
# topologyKey: "kubernetes.io/hostname"
# This pod anti-affinity rule says that the pod requires not to be scheduled
# onto a node if that node is already running a pod with label having key
# “security” and value “S1”.
# "security" and value "S1".
podAntiAffinityLabelSelector: []
podAntiAffinityTermLabelSelector: []
......@@ -239,6 +243,8 @@ istio-ilbgateway:
autoscaleMax: 5
# specify replicaCount when autoscaleEnabled: false
# replicaCount: 1
rollingMaxSurge: 100%
rollingMaxUnavailable: 25%
cpu:
targetAverageUtilization: 80
resources:
......
......@@ -211,7 +211,7 @@
"refId": "H"
},
{
"expr": "sum(container_memory_usage_bytes{container_name=~\"galley\", pod_name=~\"istio-galley-.*\"})",
"expr": "sum(container_memory_usage_bytes{job=\"kubernetes-cadvisor\",container_name=~\"galley\", pod_name=~\"istio-galley-.*\"})",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Total (kis)",
......@@ -402,7 +402,7 @@
"refId": "A"
},
{
"expr": "container_fs_usage_bytes{container_name=~\"galley\", pod_name=~\"istio-galley-.*\"}",
"expr": "container_fs_usage_bytes{job=\"kubernetes-cadvisor\",container_name=~\"galley\", pod_name=~\"istio-galley-.*\"}",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{ container_name }} ",
......@@ -494,14 +494,14 @@
"refId": "A"
},
{
"expr": "galley_mcp_source_clients_total",
"expr": "istio_mcp_clients_total{component=\"galley\"}",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "clients_total",
"refId": "B"
},
{
"expr": "go_goroutines{job=\"galley\"}/galley_mcp_source_clients_total",
"expr": "go_goroutines{job=\"galley\"}/sum(istio_mcp_clients_total{component=\"galley\"}) without (component)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "avg_goroutines_per_client",
......@@ -1558,7 +1558,7 @@
"steppedLine": false,
"targets": [
{
"expr": "sum(galley_mcp_source_clients_total)",
"expr": "sum(istio_mcp_clients_total{component=\"galley\"})",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Clients",
......@@ -1643,7 +1643,7 @@
"steppedLine": false,
"targets": [
{
"expr": "sum by(collection)(irate(galley_mcp_source_request_acks_total[1m]) * 60)",
"expr": "sum by(collection)(irate(istio_mcp_request_acks_total{component=\"galley\"}[1m]) * 60)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "",
......@@ -1728,7 +1728,7 @@
"steppedLine": false,
"targets": [
{
"expr": "rate(galley_mcp_source_request_nacks_total[1m]) * 60",
"expr": "rate(istio_mcp_request_nacks_total{component=\"galley\"}[1m]) * 60",
"format": "time_series",
"intervalFactor": 1,
"refId": "A"
......
......@@ -323,28 +323,28 @@
"steppedLine": false,
"targets": [
{
"expr": "(sum(container_memory_usage_bytes{pod_name=~\"istio-telemetry-.*\"}) / (sum(irate(istio_requests_total[1m])) / 1000)) / (sum(irate(istio_requests_total{source_workload=\"istio-ingressgateway\"}[1m])) >bool 10)",
"expr": "(sum(container_memory_usage_bytes{job=\"kubernetes-cadvisor\",pod_name=~\"istio-telemetry-.*\"}) / (sum(irate(istio_requests_total[1m])) / 1000)) / (sum(irate(istio_requests_total{source_workload=\"istio-ingressgateway\"}[1m])) >bool 10)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "istio-telemetry / 1k rps",
"refId": "A"
},
{
"expr": "sum(container_memory_usage_bytes{pod_name=~\"istio-ingressgateway-.*\"}) / count(container_memory_usage_bytes{pod_name=~\"istio-ingressgateway-.*\",container_name!=\"POD\"})",
"expr": "sum(container_memory_usage_bytes{job=\"kubernetes-cadvisor\",pod_name=~\"istio-ingressgateway-.*\"}) / count(container_memory_usage_bytes{job=\"kubernetes-cadvisor\",pod_name=~\"istio-ingressgateway-.*\",container_name!=\"POD\"})",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "per istio-ingressgateway",
"refId": "B"
},
{
"expr": "sum(container_memory_usage_bytes{namespace!=\"istio-system\",container_name=\"istio-proxy\"}) / count(container_memory_usage_bytes{namespace!=\"istio-system\",container_name=\"istio-proxy\"})",
"expr": "sum(container_memory_usage_bytes{job=\"kubernetes-cadvisor\",namespace!=\"istio-system\",container_name=\"istio-proxy\"}) / count(container_memory_usage_bytes{job=\"kubernetes-cadvisor\",namespace!=\"istio-system\",container_name=\"istio-proxy\"})",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "per istio proxy",
"refId": "C"
},
{
"expr": "(sum(container_memory_usage_bytes{pod_name=~\"istio-policy-.*\"}) / (sum(irate(istio_requests_total[1m])) / 1000))/ (sum(irate(istio_requests_total{source_workload=\"istio-ingressgateway\"}[1m])) >bool 10)",
"expr": "(sum(container_memory_usage_bytes{job=\"kubernetes-cadvisor\",pod_name=~\"istio-policy-.*\"}) / (sum(irate(istio_requests_total[1m])) / 1000))/ (sum(irate(istio_requests_total{source_workload=\"istio-ingressgateway\"}[1m])) >bool 10)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "istio-policy / 1k rps",
......@@ -644,7 +644,7 @@
"steppedLine": false,
"targets": [
{
"expr": "sum(container_memory_usage_bytes{container_name=\"istio-proxy\"})",
"expr": "sum(container_memory_usage_bytes{job=\"kubernetes-cadvisor\",container_name=\"istio-proxy\"})",
"format": "time_series",
"hide": false,
"intervalFactor": 2,
......@@ -818,7 +818,7 @@
"steppedLine": false,
"targets": [
{
"expr": "sum(container_fs_usage_bytes{container_name=\"istio-proxy\"})",
"expr": "sum(container_fs_usage_bytes{job=\"kubernetes-cadvisor\", container_name=\"istio-proxy\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{ container_name }}",
......@@ -976,7 +976,7 @@
"step": 2
},
{
"expr": "sum(container_memory_usage_bytes{container_name=~\"discovery|istio-proxy\", pod_name=~\"istio-pilot-.*\"})",
"expr": "sum(container_memory_usage_bytes{job=\"kubernetes-cadvisor\",container_name=~\"discovery|istio-proxy\", pod_name=~\"istio-pilot-.*\"})",
"format": "time_series",
"hide": false,
"intervalFactor": 2,
......@@ -985,7 +985,7 @@
"step": 2
},
{
"expr": "container_memory_usage_bytes{container_name=~\"discovery|istio-proxy\", pod_name=~\"istio-pilot-.*\"}",
"expr": "container_memory_usage_bytes{job=\"kubernetes-cadvisor\",container_name=~\"discovery|istio-proxy\", pod_name=~\"istio-pilot-.*\"}",
"format": "time_series",
"hide": false,
"intervalFactor": 2,
......@@ -1187,7 +1187,7 @@
"refId": "A"
},
{
"expr": "container_fs_usage_bytes{container_name=~\"discovery|istio-proxy\", pod_name=~\"istio-pilot-.*\"}",
"expr": "container_fs_usage_bytes{job=\"kubernetes-cadvisor\", container_name=~\"discovery|istio-proxy\", pod_name=~\"istio-pilot-.*\"}",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{ container_name }}",
......@@ -1431,7 +1431,7 @@
"step": 2
},
{
"expr": "sum(container_memory_usage_bytes{container_name=~\"mixer|istio-proxy\", pod_name=~\"istio-telemetry-.*\"})",
"expr": "sum(container_memory_usage_bytes{job=\"kubernetes-cadvisor\",container_name=~\"mixer|istio-proxy\", pod_name=~\"istio-telemetry-.*\"})",
"format": "time_series",
"hide": false,
"intervalFactor": 2,
......@@ -1440,7 +1440,7 @@
"step": 2
},
{
"expr": "container_memory_usage_bytes{container_name=~\"mixer|istio-proxy\", pod_name=~\"istio-telemetry-.*\"}",
"expr": "container_memory_usage_bytes{job=\"kubernetes-cadvisor\",container_name=~\"mixer|istio-proxy\", pod_name=~\"istio-telemetry-.*\"}",
"format": "time_series",
"hide": false,
"intervalFactor": 2,
......@@ -1642,7 +1642,7 @@
"refId": "A"
},
{
"expr": "container_fs_usage_bytes{container_name=~\"mixer|istio-proxy\", pod_name=~\"istio-telemetry-.*\"}",
"expr": "container_fs_usage_bytes{job=\"kubernetes-cadvisor\", container_name=~\"mixer|istio-proxy\", pod_name=~\"istio-telemetry-.*\"}",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{ container_name }}",
......
......@@ -263,7 +263,7 @@
"refId": "G"
},
{
"expr": "sum(label_replace(container_memory_usage_bytes{container_name=~\"mixer|istio-proxy\", pod_name=~\"istio-telemetry-.*|istio-policy-.*\"}, \"service\", \"$1\" , \"pod_name\", \"(istio-telemetry|istio-policy)-.*\")) by (service)",
"expr": "sum(label_replace(container_memory_usage_bytes{job=\"kubernetes-cadvisor\", container_name=~\"mixer|istio-proxy\", pod_name=~\"istio-telemetry-.*|istio-policy-.*\"}, \"service\", \"$1\" , \"pod_name\", \"(istio-telemetry|istio-policy)-.*\")) by (service)",
"format": "time_series",
"hide": false,
"intervalFactor": 2,
......@@ -271,7 +271,7 @@
"refId": "C"
},
{
"expr": "sum(label_replace(container_memory_usage_bytes{container_name=~\"mixer|istio-proxy\", pod_name=~\"istio-telemetry-.*|istio-policy-.*\"}, \"service\", \"$1\" , \"pod_name\", \"(istio-telemetry|istio-policy)-.*\")) by (container_name, service)",
"expr": "sum(label_replace(container_memory_usage_bytes{job=\"kubernetes-cadvisor\", container_name=~\"mixer|istio-proxy\", pod_name=~\"istio-telemetry-.*|istio-policy-.*\"}, \"service\", \"$1\" , \"pod_name\", \"(istio-telemetry|istio-policy)-.*\")) by (container_name, service)",
"format": "time_series",
"hide": false,
"intervalFactor": 2,
......@@ -467,7 +467,7 @@
"refId": "A"
},
{
"expr": "sum(label_replace(container_fs_usage_bytes{container_name=~\"mixer|istio-proxy\", pod_name=~\"istio-telemetry-.*|istio-policy-.*\"}, \"service\", \"$1\" , \"pod_name\", \"(istio-telemetry|istio-policy)-.*\")) by (container_name, service)",
"expr": "sum(label_replace(container_fs_usage_bytes{job=\"kubernetes-cadvisor\", container_name=~\"mixer|istio-proxy\", pod_name=~\"istio-telemetry-.*|istio-policy-.*\"}, \"service\", \"$1\" , \"pod_name\", \"(istio-telemetry|istio-policy)-.*\")) by (container_name, service)",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{ service }} - {{ container_name }}",
......
......@@ -44,7 +44,7 @@ spec:
- containerPort: 3000
readinessProbe:
httpGet:
path: /login
path: /api/health
port: 3000
env:
- name: GRAFANA_PORT
......@@ -76,6 +76,17 @@ spec:
{{- end }}
- name: GF_PATHS_DATA
value: /data/grafana
{{- range $key, $value := $.Values.env }}
- name: {{ $key }}
value: {{ $value | quote }}
{{- end }}
{{- range $key, $secret := $.Values.envSecrets }}
- name: {{ $key }}
valueFrom:
secretKeyRef:
name: {{ $secret }}
key: {{ $key | quote }}
{{- end }}
resources:
{{- if .Values.resources }}
{{ toYaml .Values.resources | indent 12 }}
......@@ -98,7 +109,7 @@ spec:
- name: config
mountPath: "/etc/grafana/provisioning/dashboards/dashboardproviders.yaml"
subPath: dashboardproviders.yaml
- name: grafana-proxy
- name: grafana-proxy
image: "{{ template "system_default_registry" . }}{{ .Values.global.nginxProxy.repository }}:{{ .Values.global.nginxProxy.tag }}"
args:
- nginx
......@@ -130,7 +141,7 @@ spec:
volumes:
- name: config
configMap:
name: istio-grafana
name: istio-grafana
- name: grafana-nginx
configMap:
name: grafana-nginx
......
......@@ -19,7 +19,7 @@ spec:
{{- end }}
containers:
- name: "{{ template "grafana.fullname" . }}-test"
image: "{{ template "system_default_registry" . }}{{ .Values.global.proxy.repository }}:{{ .Values.global.proxy.tag }}"
image: "{{ template "system_default_registry" . }}{{ .Values.global.curl.repository }}:{{ .Values.global.curl.tag }}"
imagePullPolicy: "{{ .Values.global.imagePullPolicy }}"
command: ['curl']
args: ['http://grafana:{{ .Values.grafana.service.externalPort }}']
......
......@@ -30,13 +30,43 @@ security:
nodeSelector: {}
tolerations: []
env: {}
# Define additional environment variables for configuring grafana.
# @see https://grafana.com/docs/installation/configuration/#using-environment-variables
# Format: env_variable_name: value
# For example:
# GF_SMTP_ENABLED: true
# GF_SMTP_HOST: email-smtp.eu-west-1.amazonaws.com:2587
# GF_SMTP_FROM_ADDRESS: alerts@mydomain.com
# GF_SMTP_FROM_NAME: Grafana
envSecrets: {}
# The key name and ENV name must match in the secrets file.
# @see https://grafana.com/docs/installation/configuration/#using-environment-variables
# For example:
# ---
# apiVersion: v1
# kind: Secret
# metadata:
# name: grafana-secrets
# namespace: istio-system
# data:
# GF_SMTP_USER: bXl1c2Vy
# GF_SMTP_PASSWORD: bXlwYXNzd29yZA==
# type: Opaque
# ---
# env_variable_key_name: secretsName
# ---
# GF_SMTP_USER: grafana-secrets
# GF_SMTP_PASSWORD: grafana-secrets
# Specify the pod anti-affinity that allows you to constrain which nodes
# your pod is eligible to be scheduled based on labels on pods that are
# already running on the node rather than based on labels on nodes.
# There are currently two types of anti-affinity:
# "requiredDuringSchedulingIgnoredDuringExecution"
# "preferredDuringSchedulingIgnoredDuringExecution"
# which denote “hard” vs. “soft” requirements, you can define your values
# which denote "hard" vs. "soft" requirements, you can define your values
# in "podAntiAffinityLabelSelector" and "podAntiAffinityTermLabelSelector"
# correspondingly.
# For example:
......@@ -47,7 +77,7 @@ tolerations: []
# topologyKey: "kubernetes.io/hostname"
# This pod anti-affinity rule says that the pod requires not to be scheduled
# onto a node if that node is already running a pod with label having key
# “security” and value “S1”.
# "security" and value "S1".
podAntiAffinityLabelSelector: []
podAntiAffinityTermLabelSelector: []
......@@ -60,26 +90,26 @@ datasources:
datasources.yaml:
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
orgId: 1
url: http://prometheus:9090
access: proxy
isDefault: true
jsonData:
timeInterval: 5s
editable: true
- name: Prometheus
type: prometheus
orgId: 1
url: http://prometheus:9090
access: proxy
isDefault: true
jsonData:
timeInterval: 5s
editable: true
dashboardProviders:
dashboardproviders.yaml:
apiVersion: 1
providers:
- name: 'istio'
orgId: 1
folder: 'istio'
type: file
disableDeletion: false
options:
path: /var/lib/grafana/dashboards/istio
- name: 'istio'
orgId: 1
folder: 'istio'
type: file
disableDeletion: false
options:
path: /var/lib/grafana/dashboards/istio
resources: {}
\ No newline at end of file
......@@ -66,7 +66,7 @@ To uninstall/delete the `istio-init` release but continue to track the release:
To uninstall/delete the `istio-init` release completely and make its name free for later use:
```
$ helm delete istio-init --purge
$ helm delete --purge istio-init
```
> Warning: Deleting CRDs will delete any configuration that you have made to Istio.
......
......@@ -22,7 +22,10 @@ spec:
- istio-io
- networking-istio-io
scope: Namespaced
version: v1alpha3
versions:
- name: v1alpha3
served: true
storage: true
additionalPrinterColumns:
- JSONPath: .spec.gateways
description: The names of gateways and sidecars that should apply these routes
......@@ -64,7 +67,10 @@ spec:
- istio-io
- networking-istio-io
scope: Namespaced
version: v1alpha3
versions:
- name: v1alpha3
served: true
storage: true
additionalPrinterColumns:
- JSONPath: .spec.host
description: The name of a service from the service registry
......@@ -102,7 +108,10 @@ spec:
- istio-io
- networking-istio-io
scope: Namespaced
version: v1alpha3
versions:
- name: v1alpha3
served: true
storage: true
additionalPrinterColumns:
- JSONPath: .spec.hosts
description: The hosts associated with the ServiceEntry
......@@ -147,7 +156,10 @@ spec:
- istio-io
- networking-istio-io
scope: Namespaced
version: v1alpha3
versions:
- name: v1alpha3
served: true
storage: true
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
......@@ -170,7 +182,10 @@ spec:
- istio-io
- networking-istio-io
scope: Namespaced
version: v1alpha3
versions:
- name: v1alpha3
served: true
storage: true
---
kind: CustomResourceDefinition
apiVersion: apiextensions.k8s.io/v1beta1
......@@ -193,7 +208,10 @@ spec:
- istio-io
- rbac-istio-io
scope: Cluster
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
---
kind: CustomResourceDefinition
apiVersion: apiextensions.k8s.io/v1beta1
......@@ -216,7 +234,10 @@ spec:
- istio-io
- authentication-istio-io
scope: Namespaced
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
---
kind: CustomResourceDefinition
apiVersion: apiextensions.k8s.io/v1beta1
......@@ -240,7 +261,10 @@ spec:
- istio-io
- authentication-istio-io
scope: Cluster
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
---
kind: CustomResourceDefinition
apiVersion: apiextensions.k8s.io/v1beta1
......@@ -263,7 +287,10 @@ spec:
- istio-io
- apim-istio-io
scope: Namespaced
version: v1alpha2
versions:
- name: v1alpha2
served: true
storage: true
---
kind: CustomResourceDefinition
apiVersion: apiextensions.k8s.io/v1beta1
......@@ -286,7 +313,10 @@ spec:
- istio-io
- apim-istio-io
scope: Namespaced
version: v1alpha2
versions:
- name: v1alpha2
served: true
storage: true
---
kind: CustomResourceDefinition
apiVersion: apiextensions.k8s.io/v1beta1
......@@ -309,7 +339,10 @@ spec:
- istio-io
- apim-istio-io
scope: Namespaced
version: v1alpha2
versions:
- name: v1alpha2
served: true
storage: true
---
kind: CustomResourceDefinition
apiVersion: apiextensions.k8s.io/v1beta1
......@@ -332,7 +365,10 @@ spec:
- istio-io
- apim-istio-io
scope: Namespaced
version: v1alpha2
versions:
- name: v1alpha2
served: true
storage: true
---
kind: CustomResourceDefinition
apiVersion: apiextensions.k8s.io/v1beta1
......@@ -357,7 +393,10 @@ spec:
- istio-io
- policy-istio-io
scope: Namespaced
version: v1alpha2
versions:
- name: v1alpha2
served: true
storage: true
---
kind: CustomResourceDefinition
apiVersion: apiextensions.k8s.io/v1beta1
......@@ -382,7 +421,10 @@ spec:
- istio-io
- policy-istio-io
scope: Namespaced
version: v1alpha2
versions:
- name: v1alpha2
served: true
storage: true
---
kind: CustomResourceDefinition
apiVersion: apiextensions.k8s.io/v1beta1
......@@ -407,7 +449,10 @@ spec:
- istio-io
- rbac-istio-io
scope: Namespaced
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
---
kind: CustomResourceDefinition
apiVersion: apiextensions.k8s.io/v1beta1
......@@ -432,7 +477,10 @@ spec:
- istio-io
- rbac-istio-io
scope: Namespaced
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
---
kind: CustomResourceDefinition
apiVersion: apiextensions.k8s.io/v1beta1
......@@ -457,7 +505,10 @@ spec:
- istio-io
- rbac-istio-io
scope: Namespaced
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
additionalPrinterColumns:
- JSONPath: .spec.roleRef.name
description: The name of the ServiceRole object being referenced
......@@ -494,7 +545,10 @@ spec:
- istio-io
- policy-istio-io
scope: Namespaced
version: v1alpha2
versions:
- name: v1alpha2
served: true
storage: true
---
kind: CustomResourceDefinition
apiVersion: apiextensions.k8s.io/v1beta1
......@@ -519,7 +573,10 @@ spec:
- istio-io
- policy-istio-io
scope: Namespaced
version: v1alpha2
versions:
- name: v1alpha2
served: true
storage: true
---
kind: CustomResourceDefinition
apiVersion: apiextensions.k8s.io/v1beta1
......@@ -544,7 +601,10 @@ spec:
- istio-io
- policy-istio-io
scope: Namespaced
version: v1alpha2
versions:
- name: v1alpha2
served: true
storage: true
---
kind: CustomResourceDefinition
apiVersion: apiextensions.k8s.io/v1beta1
......@@ -569,5 +629,8 @@ spec:
- istio-io
- policy-istio-io
scope: Namespaced
version: v1alpha2
versions:
- name: v1alpha2
served: true
storage: true
---
......@@ -19,5 +19,8 @@ spec:
- istio-io
- networking-istio-io
scope: Namespaced
version: v1alpha3
versions:
- name: v1alpha3
served: true
storage: true
---
......@@ -17,5 +17,8 @@ spec:
- istio-io
- rbac-istio-io
scope: Namespaced
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
---
......@@ -11,7 +11,10 @@ metadata:
"helm.sh/resource-policy": keep
spec:
group: certmanager.k8s.io
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
names:
kind: ClusterIssuer
plural: clusterissuers
......@@ -30,7 +33,10 @@ metadata:
"helm.sh/resource-policy": keep
spec:
group: certmanager.k8s.io
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
names:
kind: Issuer
plural: issuers
......@@ -71,7 +77,10 @@ spec:
name: Age
type: date
group: certmanager.k8s.io
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
scope: Namespaced
names:
kind: Certificate
......
......@@ -30,7 +30,10 @@ spec:
name: Age
type: date
group: certmanager.k8s.io
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
names:
kind: Order
plural: orders
......@@ -66,7 +69,10 @@ spec:
name: Age
type: date
group: certmanager.k8s.io
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
names:
kind: Challenge
plural: challenges
......
apiVersion: v1
kind: ServiceAccount
{{- if .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- range .Values.global.imagePullSecrets }}
- name: {{ . }}
{{- end }}
{{- end }}
metadata:
name: istio-init-service-account
namespace: {{ .Release.Namespace }}
......
......@@ -13,6 +13,10 @@ spec:
selector:
matchLabels:
app: istiocoredns
strategy:
rollingUpdate:
maxSurge: {{ .Values.rollingMaxSurge }}
maxUnavailable: {{ .Values.rollingMaxUnavailable }}
template:
metadata:
name: istiocoredns
......@@ -65,7 +69,7 @@ spec:
command:
- /usr/local/bin/plugin
image: "{{ template "system_default_registry" . }}{{ .Values.pluginImage.repository }}:{{ .Values.pluginImage.tag }}"
imagePullPolicy: {{ .Values.global.imagePullPolicy }}
imagePullPolicy: {{ .Values.global.imagePullPolicy }}
ports:
- containerPort: 8053
name: dns-grpc
......
......@@ -3,6 +3,8 @@
#
enabled: false
replicaCount: 1
rollingMaxSurge: 100%
rollingMaxUnavailable: 25%
# Source code for the plugin can be found at
# https://github.com/istio-ecosystem/istio-coredns-plugin
# The plugin listens for DNS requests from coredns server at 127.0.0.1:8053
......@@ -15,7 +17,7 @@ tolerations: []
# There are currently two types of anti-affinity:
# "requiredDuringSchedulingIgnoredDuringExecution"
# "preferredDuringSchedulingIgnoredDuringExecution"
# which denote “hard” vs. “soft” requirements, you can define your values
# which denote "hard" vs. "soft" requirements, you can define your values
# in "podAntiAffinityLabelSelector" and "podAntiAffinityTermLabelSelector"
# correspondingly.
# For example:
......@@ -26,6 +28,6 @@ tolerations: []
# topologyKey: "kubernetes.io/hostname"
# This pod anti-affinity rule says that the pod requires not to be scheduled
# onto a node if that node is already running a pod with label having key
# “security” and value “S1”.
# "security" and value "S1".
podAntiAffinityLabelSelector: []
podAntiAffinityTermLabelSelector: []
......@@ -2,5 +2,5 @@ apiVersion: v1
description: Kiali is an open source project for service mesh observability, refer to https://www.kiali.io for details.
name: kiali
version: 1.1.0
appVersion: 0.20
appVersion: 1.1.0
tillerVersion: ">=2.7.2"
......@@ -98,6 +98,7 @@ rules:
- destinationrules
- gateways
- serviceentries
- sidecars
- virtualservices
verbs:
- create
......@@ -234,6 +235,7 @@ rules:
- destinationrules
- gateways
- serviceentries
- sidecars
- virtualservices
verbs:
- get
......
{{- if not .Values.dashboard.viewOnlyMode }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
......@@ -10,8 +11,27 @@ metadata:
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kiali{{- if .Values.dashboard.viewOnlyMode }}-viewer{{- end }}
name: kiali
subjects:
- kind: ServiceAccount
name: kiali-service-account
namespace: {{ .Release.Namespace }}
{{- else }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: istio-kiali-viewer-role-binding-{{ .Release.Namespace }}
labels:
app: {{ template "kiali.name" . }}
chart: {{ template "kiali.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kiali-viewer
subjects:
- kind: ServiceAccount
name: kiali-service-account
namespace: {{ .Release.Namespace }}
{{- end }}
......@@ -15,23 +15,21 @@ data:
port: 20001
external_services:
tracing:
service: "tracing/jaeger"
{{- if and .Values.global.rancher (and .Values.global.rancher.domain .Values.global.rancher.clusterId) }}
{{- if not .Values.dashboard.jaegerURL }}
url: 'https://{{ .Values.global.rancher.domain }}/k8s/clusters/{{ .Values.global.rancher.clusterId }}/api/v1/namespaces/{{ .Release.Namespace }}/services/http:tracing:80/proxy/jaeger'
{{- end }}
{{- if .Values.dashboard.jaegerURL }}
url: {{ .Values.dashboard.jaegerURL }}
{{- else }}
url: http://tracing.istio-system:80
{{- end }}
grafana:
{{- if eq .Values.global.monitoring.type "cluster-monitoring" }}
url: https://{{ .Values.global.rancher.domain }}/k8s/clusters/{{ .Values.global.rancher.clusterId }}/api/v1/namespaces/cattle-prometheus/services/http:access-grafana:80/proxy/
service_namespace: cattle-prometheus
service: access-grafana
url: https://{{ .Values.global.rancher.domain }}/k8s/clusters/{{ .Values.global.rancher.clusterId }}/api/v1/namespaces/cattle-prometheus/services/http:access-grafana:80/proxy//
in_cluster_url: http://access-grafana.cattle-prometheus:80
{{- else if eq .Values.global.monitoring.type "built-in" }}
{{- if and .Values.global.rancher (and .Values.global.rancher.domain .Values.global.rancher.clusterId) }}
url: https://{{ .Values.global.rancher.domain }}/k8s/clusters/{{ .Values.global.rancher.clusterId }}/api/v1/namespaces/{{ .Release.Namespace }}/services/http:grafana:80/proxy/
url: https://{{ .Values.global.rancher.domain }}/k8s/clusters/{{ .Values.global.rancher.clusterId }}/api/v1/namespaces/{{ .Release.Namespace }}/services/http:grafana:80/proxy//
in_cluster_url: http://access-grafana.cattle-prometheus:80
{{- end }}
{{- else }}
custom_metrics_url: "http://prometheus.{{ .Release.Namespace }}:9090"
{{- if .Values.dashboard.grafanaURL }}
url: {{ .Values.dashboard.grafanaURL }}
{{- end }}
......@@ -42,4 +40,8 @@ data:
{{- else }}
url: {{ .Values.prometheusAddr }}
{{- end }}
{{- if .Values.security.enabled }}
identity:
cert_file: {{ .Values.security.cert_file }}
private_key_file: {{ .Values.security.private_key_file }}
{{- end}}
......@@ -26,6 +26,7 @@ spec:
scheduler.alpha.kubernetes.io/critical-pod: ""
prometheus.io/scrape: "true"
prometheus.io/port: "9090"
kiali.io/runtimes: go,kiali
spec:
serviceAccountName: kiali-service-account
{{- if .Values.global.priorityClassName }}
......@@ -39,8 +40,28 @@ spec:
- "-config"
- "/kiali-configuration/config.yaml"
- "-v"
- "4"
- "3"
readinessProbe:
httpGet:
path: {{ .Values.contextPath }}/healthz
port: 20001
scheme: {{ if .Values.security.enabled }} 'HTTPS' {{ else }} 'HTTP' {{ end }}
initialDelaySeconds: 5
periodSeconds: 30
livenessProbe:
httpGet:
path: {{ .Values.contextPath }}/healthz
port: 20001
scheme: {{ if .Values.security.enabled }} 'HTTPS' {{ else }} 'HTTP' {{ end }}
initialDelaySeconds: 5
periodSeconds: 30
env:
- name: TRACING_INSECURE_SKIP_VERIFY
value: "true"
- name: GRAFANA_INSECURE_SKIP_VERIFY
value: "true"
- name: TRACING_ENABLED
value: "false"
- name: ACTIVE_NAMESPACE
valueFrom:
fieldRef:
......@@ -66,6 +87,8 @@ spec:
volumeMounts:
- name: kiali-configuration
mountPath: "/kiali-configuration"
- name: kiali-cert
mountPath: "/kiali-cert"
- name: kiali-secret
mountPath: "/kiali-secret"
resources:
......@@ -97,6 +120,12 @@ spec:
- name: kiali-configuration
configMap:
name: kiali
- name: kiali-cert
secret:
secretName: istio.kiali-service-account
{{- if not .Values.security.enabled }}
optional: true
{{- end }}
- name: kiali-nginx
configMap:
name: kiali-nginx
......
......@@ -52,9 +52,9 @@ data:
add_header Cache-Control "public";
proxy_pass http://localhost:20001/;
sub_filter_types text/html;
sub_filter_types application/javascript;
sub_filter_once on;
sub_filter </head> '<script>var path = window.location.pathname; var pathName = path.substring(0, path.lastIndexOf("/proxy") + 6); window.WEB_ROOT = window.WEB_ROOT ? pathName + window.WEB_ROOT:pathName</script></head>';
sub_filter "// This file is intentionally left bank." "window.WEB_ROOT='/k8s/clusters/{{ .Values.global.rancher.clusterId }}/api/v1/namespaces/istio-system/services/http:kiali-http:80/proxy';";
if ($request_filename ~ .*\.(?:js|css|jpg|jpeg|gif|png|ico|cur|gz|svg|svgz|mp4|ogg|ogv|webm)$) {
expires 90d;
}
......
......@@ -19,7 +19,7 @@ spec:
{{- end }}
containers:
- name: "{{ template "kiali.fullname" . }}-test"
image: "{{ template "system_default_registry" . }}{{ .Values.global.proxy.repository }}:{{ .Values.global.proxy.tag }}"
image: "{{ template "system_default_registry" . }}{{ .Values.global.curl.repository }}:{{ .Values.global.curl.tag }}"
imagePullPolicy: "{{ .Values.global.imagePullPolicy }}"
command: ['curl']
args: ['http://kiali:20001']
......
......@@ -13,7 +13,7 @@ tolerations: []
# There are currently two types of anti-affinity:
# "requiredDuringSchedulingIgnoredDuringExecution"
# "preferredDuringSchedulingIgnoredDuringExecution"
# which denote “hard” vs. “soft” requirements, you can define your values
# which denote "hard” vs. "soft” requirements, you can define your values
# in "podAntiAffinityLabelSelector" and "podAntiAffinityTermLabelSelector"
# correspondingly.
# For example:
......@@ -24,7 +24,7 @@ tolerations: []
# topologyKey: "kubernetes.io/hostname"
# This pod anti-affinity rule says that the pod requires not to be scheduled
# onto a node if that node is already running a pod with label having key
# “security” and value “S1”.
# "security” and value "S1”.
podAntiAffinityLabelSelector: []
podAntiAffinityTermLabelSelector: []
......@@ -59,4 +59,9 @@ prometheusAddr: http://prometheus:9090
service:
type: ClusterIP
resources: {}
\ No newline at end of file
resources: {}
security:
enabled: false
cert_file: /kiali-cert/cert-chain.pem
private_key_file: /kiali-cert/key.pem
\ No newline at end of file
......@@ -138,6 +138,8 @@ spec:
valueType: BOOL
quota.cache_hit:
valueType: BOOL
context.proxy_version:
valueType: STRING
---
apiVersion: "config.istio.io/v1alpha2"
......
......@@ -13,16 +13,14 @@
- hostPath:
path: /var/run/sds
name: sds-uds-path
{{- if $.Values.global.sds.useTrustworthyJwt }}
- name: istio-token
projected:
sources:
- serviceAccountToken:
audience: {{ $.Values.global.trustDomain }}
audience: {{ $.Values.global.sds.token.aud }}
expirationSeconds: 43200
path: istio-token
{{- end }}
{{- end }}
- name: uds-socket
emptyDir: {}
- name: policy-adapter-secret
......@@ -68,11 +66,7 @@
{{- else }}
- --useAdapterCRDs=false
{{- end }}
{{- if $.Values.templates.useTemplateCRDs }}
- --useTemplateCRDs=true
{{- else }}
- --useTemplateCRDs=false
{{- end }}
{{- if $.Values.global.tracer.zipkin.address }}
- --trace_zipkin_url=http://{{- $.Values.global.tracer.zipkin.address }}/api/v1/spans
{{- else }}
......@@ -150,6 +144,8 @@
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: SDS_ENABLED
value: "{{ $.Values.global.sds.enabled }}"
resources:
{{- if $.Values.global.proxy.resources }}
{{ toYaml $.Values.global.proxy.resources | indent 10 }}
......@@ -164,11 +160,9 @@
- name: sds-uds-path
mountPath: /var/run/sds
readOnly: true
{{- if $.Values.global.sds.useTrustworthyJwt }}
- name: istio-token
mountPath: /var/run/secrets/tokens
{{- end }}
{{- end }}
- name: uds-socket
mountPath: /sock
- name: policy-adapter-secret
......@@ -188,16 +182,14 @@
- hostPath:
path: /var/run/sds
name: sds-uds-path
{{- if $.Values.global.sds.useTrustworthyJwt }}
- name: istio-token
projected:
sources:
- serviceAccountToken:
audience: {{ $.Values.global.trustDomain }}
audience: {{ $.Values.global.sds.token.aud }}
expirationSeconds: 43200
path: istio-token
{{- end }}
{{- end }}
- name: uds-socket
emptyDir: {}
- name: telemetry-adapter-secret
......@@ -246,11 +238,6 @@
{{- else }}
- --useAdapterCRDs=false
{{- end }}
{{- if $.Values.templates.useTemplateCRDs }}
- --useTemplateCRDs=true
{{- else }}
- --useTemplateCRDs=false
{{- end }}
{{- if $.Values.global.tracer.zipkin.address }}
- --trace_zipkin_url=http://{{- $.Values.global.tracer.zipkin.address }}/api/v1/spans
{{- else }}
......@@ -332,6 +319,8 @@
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: SDS_ENABLED
value: "{{ $.Values.global.sds.enabled }}"
resources:
{{- if $.Values.global.proxy.resources }}
{{ toYaml $.Values.global.proxy.resources | indent 10 }}
......@@ -346,11 +335,9 @@
- name: sds-uds-path
mountPath: /var/run/sds
readOnly: true
{{- if $.Values.global.sds.useTrustworthyJwt }}
- name: istio-token
mountPath: /var/run/secrets/tokens
{{- end }}
{{- end }}
- name: uds-socket
mountPath: /sock
{{- end }}
......@@ -380,8 +367,8 @@ spec:
{{- end }}
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
maxSurge: {{ $spec.rollingMaxSurge }}
maxUnavailable: {{ $spec.rollingMaxUnavailable }}
selector:
matchLabels:
istio: mixer
......
......@@ -15,6 +15,8 @@ policy:
autoscaleMax: 5
cpu:
targetAverageUtilization: 80
rollingMaxSurge: 100%
rollingMaxUnavailable: 25%
telemetry:
enabled: true
......@@ -24,6 +26,8 @@ telemetry:
autoscaleMax: 5
cpu:
targetAverageUtilization: 80
rollingMaxSurge: 100%
rollingMaxUnavailable: 25%
sessionAffinityEnabled: false
# mixer load shedding configuration.
......@@ -43,6 +47,16 @@ telemetry:
cpu: 4800m
memory: 4G
# Set reportBatchMaxEntries to 0 to use the default batching behavior (i.e., every 100 requests).
# A positive value indicates the number of requests that are batched before telemetry data
# is sent to the mixer server
reportBatchMaxEntries: 100
# Set reportBatchMaxTime to 0 to use the default batching behavior (i.e., every 1 second).
# A positive time value indicates the maximum wait time since the last request will telemetry data
# be batched before being sent to the mixer server
reportBatchMaxTime: 1s
podAnnotations: {}
nodeSelector: {}
tolerations: []
......@@ -53,7 +67,7 @@ tolerations: []
# There are currently two types of anti-affinity:
# "requiredDuringSchedulingIgnoredDuringExecution"
# "preferredDuringSchedulingIgnoredDuringExecution"
# which denote “hard” vs. “soft” requirements, you can define your values
# which denote "hard" vs. "soft" requirements, you can define your values
# in "podAntiAffinityLabelSelector" and "podAntiAffinityTermLabelSelector"
# correspondingly.
# For example:
......@@ -64,13 +78,10 @@ tolerations: []
# topologyKey: "kubernetes.io/hostname"
# This pod anti-affinity rule says that the pod requires not to be scheduled
# onto a node if that node is already running a pod with label having key
# “security” and value “S1”.
# "security" and value "S1".
podAntiAffinityLabelSelector: []
podAntiAffinityTermLabelSelector: []
templates:
useTemplateCRDs: false
adapters:
kubernetesenv:
enabled: true
......
......@@ -44,6 +44,10 @@ spec:
{{- end }}
- name: "Trust_Domain"
value: "{{ .Values.global.trustDomain }}"
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumes:
- name: sdsudspath
hostPath:
......
......@@ -18,7 +18,7 @@ tolerations: []
# There are currently two types of anti-affinity:
# "requiredDuringSchedulingIgnoredDuringExecution"
# "preferredDuringSchedulingIgnoredDuringExecution"
# which denote “hard” vs. “soft” requirements, you can define your values
# which denote "hard” vs. "soft” requirements, you can define your values
# in "podAntiAffinityLabelSelector" and "podAntiAffinityTermLabelSelector"
# correspondingly.
# For example:
......@@ -29,6 +29,6 @@ tolerations: []
# topologyKey: "kubernetes.io/hostname"
# This pod anti-affinity rule says that the pod requires not to be scheduled
# onto a node if that node is already running a pod with label having key
# “security” and value “S1”.
# "security” and value "S1”.
podAntiAffinityLabelSelector: []
podAntiAffinityTermLabelSelector: []
......@@ -22,8 +22,8 @@ spec:
{{- end }}
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
maxSurge: {{ .Values.rollingMaxSurge }}
maxUnavailable: {{ .Values.rollingMaxUnavailable }}
selector:
matchLabels:
istio: pilot
......@@ -58,11 +58,9 @@ spec:
- "-a"
- {{ .Release.Namespace }}
{{- end }}
{{- if $.Values.global.controlPlaneSecurityEnabled}}
{{- if not .Values.sidecar }}
{{- if and $.Values.global.controlPlaneSecurityEnabled (not .Values.sidecar)}}
- --secureGrpcAddr
- ":15011"
{{- end }}
{{- else }}
- --secureGrpcAddr
- ""
......@@ -106,8 +104,10 @@ spec:
- name: PILOT_TRACE_SAMPLING
value: "{{ .Values.traceSampling }}"
{{- end }}
- name: PILOT_DISABLE_XDS_MARSHALING_TO_ANY
value: "1"
- name: PILOT_ENABLE_PROTOCOL_SNIFFING_FOR_OUTBOUND
value: "{{ .Values.enableProtocolSniffingForOutbound }}"
- name: PILOT_ENABLE_PROTOCOL_SNIFFING_FOR_INBOUND
value: "{{ .Values.enableProtocolSniffingForInbound }}"
resources:
{{- if .Values.resources }}
{{ toYaml .Values.resources | indent 12 }}
......@@ -163,6 +163,8 @@ spec:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: SDS_ENABLED
value: "{{ $.Values.global.sds.enabled }}"
resources:
{{- if .Values.global.proxy.resources }}
{{ toYaml .Values.global.proxy.resources | indent 12 }}
......@@ -177,27 +179,23 @@ spec:
- name: sds-uds-path
mountPath: /var/run/sds
readOnly: true
{{- if $.Values.global.sds.useTrustworthyJwt }}
- name: istio-token
mountPath: /var/run/secrets/tokens
{{- end }}
{{- end }}
{{- end }}
volumes:
{{- if $.Values.global.sds.enabled }}
- hostPath:
path: /var/run/sds
name: sds-uds-path
{{- if $.Values.global.sds.useTrustworthyJwt }}
- name: istio-token
projected:
sources:
- serviceAccountToken:
audience: {{ $.Values.global.trustDomain }}
audience: {{ $.Values.global.sds.token.aud }}
expirationSeconds: 43200
path: istio-token
{{- end }}
{{- end }}
- name: config-volume
configMap:
name: istio
......
......@@ -7,8 +7,14 @@ autoscaleMin: 1
autoscaleMax: 5
# specify replicaCount when autoscaleEnabled: false
# replicaCount: 1
rollingMaxSurge: 100%
rollingMaxUnavailable: 25%
sidecar: true
traceSampling: 1.0
# if protocol sniffing is enabled for outbound
enableProtocolSniffingForOutbound: true
# if protocol sniffing is enabled for inbound
enableProtocolSniffingForInbound: false
# Resources for a small pilot install
resources:
requests:
......@@ -28,7 +34,7 @@ tolerations: []
# There are currently two types of anti-affinity:
# "requiredDuringSchedulingIgnoredDuringExecution"
# "preferredDuringSchedulingIgnoredDuringExecution"
# which denote “hard” vs. “soft” requirements, you can define your values
# which denote "hard" vs. "soft" requirements, you can define your values
# in "podAntiAffinityLabelSelector" and "podAntiAffinityTermLabelSelector"
# correspondingly.
# For example:
......@@ -39,7 +45,7 @@ tolerations: []
# topologyKey: "kubernetes.io/hostname"
# This pod anti-affinity rule says that the pod requires not to be scheduled
# onto a node if that node is already running a pod with label having key
# “security” and value “S1”.
# "security" and value "S1".
podAntiAffinityLabelSelector: []
podAntiAffinityTermLabelSelector: []
......
......@@ -19,7 +19,7 @@ spec:
{{- end }}
containers:
- name: "{{ template "prometheus.fullname" . }}-test"
image: {{ template "system_default_registry" . }}{{ .Values.global.proxy.repository }}:{{ .Values.global.proxy.tag }}
image: {{ template "system_default_registry" . }}{{ .Values.global.curl.repository }}:{{ .Values.global.curl.tag }}
imagePullPolicy: "{{ .Values.global.imagePullPolicy }}"
command: ['sh', '-c', 'for i in 1 2 3; do curl http://prometheus:9090/-/ready && exit 0 || sleep 15; done; exit 1']
restartPolicy: Never
......
......@@ -14,7 +14,7 @@ tolerations: []
# There are currently two types of anti-affinity:
# "requiredDuringSchedulingIgnoredDuringExecution"
# "preferredDuringSchedulingIgnoredDuringExecution"
# which denote “hard” vs. “soft” requirements, you can define your values
# which denote "hard” vs. "soft” requirements, you can define your values
# in "podAntiAffinityLabelSelector" and "podAntiAffinityTermLabelSelector"
# correspondingly.
# For example:
......@@ -25,7 +25,7 @@ tolerations: []
# topologyKey: "kubernetes.io/hostname"
# This pod anti-affinity rule says that the pod requires not to be scheduled
# onto a node if that node is already running a pod with label having key
# “security” and value “S1”.
# "security” and value "S1”.
podAntiAffinityLabelSelector: []
podAntiAffinityTermLabelSelector: []
......
......@@ -15,7 +15,7 @@ rules:
resources: ["secrets"]
verbs: ["create", "get", "watch", "list", "update", "delete"]
- apiGroups: [""]
resources: ["serviceaccounts", "services"]
resources: ["serviceaccounts", "services", "namespaces"]
verbs: ["get", "watch", "list"]
- apiGroups: ["authentication.k8s.io"]
resources: ["tokenreviews"]
......
{{- if .Values.createMeshPolicy }}
apiVersion: v1
kind: ServiceAccount
{{- if .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- range .Values.global.imagePullSecrets }}
- name: {{ . }}
{{- end }}
{{- end }}
metadata:
name: istio-security-post-install-account
namespace: {{ .Release.Namespace }}
......
......@@ -17,8 +17,8 @@ spec:
istio: citadel
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
maxSurge: {{ .Values.rollingMaxSurge }}
maxUnavailable: {{ .Values.rollingMaxUnavailable }}
template:
metadata:
labels:
......@@ -67,6 +67,9 @@ spec:
- --liveness-probe-interval=60s # interval for health check file update
- --probe-check-interval=15s # interval for health status check
{{- end }}
env:
- name: CITADEL_ENABLE_NAMESPACES_BY_DEFAULT
value: "{{ .Values.enableNamespacesByDefault }}"
{{- if .Values.citadelHealthCheck }}
livenessProbe:
exec:
......
......@@ -19,7 +19,7 @@ spec:
{{- end }}
containers:
- name: "{{ template "security.fullname" . }}-test"
image: "{{ template "system_default_registry" . }}{{ .Values.global.proxy.repository }}:{{ .Values.global.proxy.tag }}"
image: "{{ template "system_default_registry" . }}{{ .Values.global.curl.repository }}:{{ .Values.global.curl.tag }}"
imagePullPolicy: "{{ .Values.global.imagePullPolicy }}"
command: ['sh', '-c', 'for i in 1 2 3; do curl http://istio-citadel:{{ .Values.global.monitoringPort }}/version && exit 0 || sleep 15; done; exit 1']
restartPolicy: Never
......
......@@ -3,6 +3,8 @@
#
enabled: true
replicaCount: 1
rollingMaxSurge: 100%
rollingMaxUnavailable: 25%
selfSigned: true # indicate if self-signed CA is used.
createMeshPolicy: true
nodeSelector: {}
......@@ -13,13 +15,23 @@ citadelHealthCheck: false
# 90*24hour = 2160h
workloadCertTtl: 2160h
# Determines Citadel default behavior if the ca.istio.io/env or ca.istio.io/override
# labels are not found on a given namespace.
#
# For example: consider a namespace called "target", which has neither the "ca.istio.io/env"
# nor the "ca.istio.io/override" namespace labels. To decide whether or not to generate secrets
# for service accounts created in this "target" namespace, Citadel will defer to this option. If the value
# of this option is "true" in this case, secrets will be generated for the "target" namespace.
# If the value of this option is "false" Citadel will not generate secrets upon service account creation.
enableNamespacesByDefault: true
# Specify the pod anti-affinity that allows you to constrain which nodes
# your pod is eligible to be scheduled based on labels on pods that are
# already running on the node rather than based on labels on nodes.
# There are currently two types of anti-affinity:
# "requiredDuringSchedulingIgnoredDuringExecution"
# "preferredDuringSchedulingIgnoredDuringExecution"
# which denote “hard” vs. “soft” requirements, you can define your values
# which denote "hard" vs. "soft" requirements, you can define your values
# in "podAntiAffinityLabelSelector" and "podAntiAffinityTermLabelSelector"
# correspondingly.
# For example:
......@@ -30,6 +42,6 @@ workloadCertTtl: 2160h
# topologyKey: "kubernetes.io/hostname"
# This pod anti-affinity rule says that the pod requires not to be scheduled
# onto a node if that node is already running a pod with label having key
# “security” and value “S1”.
# "security" and value "S1".
podAntiAffinityLabelSelector: []
podAntiAffinityTermLabelSelector: []
......@@ -16,8 +16,8 @@ spec:
istio: sidecar-injector
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
maxSurge: {{ .Values.rollingMaxSurge }}
maxUnavailable: {{ .Values.rollingMaxUnavailable }}
template:
metadata:
labels:
......
......@@ -12,5 +12,8 @@ metadata:
spec:
ports:
- port: 443
name: https-inject
- port: {{ .Values.global.monitoringPort }}
name: http-monitoring
selector:
istio: sidecar-injector
......@@ -3,6 +3,8 @@
#
enabled: true
replicaCount: 1
rollingMaxSurge: 100%
rollingMaxUnavailable: 25%
enableNamespacesByDefault: false
nodeSelector: {}
tolerations: []
......@@ -13,7 +15,7 @@ tolerations: []
# There are currently two types of anti-affinity:
# "requiredDuringSchedulingIgnoredDuringExecution"
# "preferredDuringSchedulingIgnoredDuringExecution"
# which denote “hard” vs. “soft” requirements, you can define your values
# which denote "hard" vs. "soft" requirements, you can define your values
# in "podAntiAffinityLabelSelector" and "podAntiAffinityTermLabelSelector"
# correspondingly.
# For example:
......@@ -24,7 +26,7 @@ tolerations: []
# topologyKey: "kubernetes.io/hostname"
# This pod anti-affinity rule says that the pod requires not to be scheduled
# onto a node if that node is already running a pod with label having key
# “security” and value “S1”.
# "security" and value "S1".
podAntiAffinityLabelSelector: []
podAntiAffinityTermLabelSelector: []
......
......@@ -59,6 +59,16 @@ spec:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
{{- if eq .Values.jaeger.spanStorageType "badger" }}
- name: BADGER_EPHEMERAL
value: "false"
- name: SPAN_STORAGE_TYPE
value: "badger"
- name: BADGER_DIRECTORY_VALUE
value: "/badger/data"
- name: BADGER_DIRECTORY_KEY
value: "/badger/key"
{{- end }}
- name: COLLECTOR_ZIPKIN_HTTP_PORT
value: "9411"
- name: MEMORY_MAX_TRACES
......@@ -73,6 +83,11 @@ spec:
httpGet:
path: /
port: 16686
{{- if eq .Values.jaeger.spanStorageType "badger" }}
volumeMounts:
- name: data
mountPath: /badger
{{- end }}
resources:
{{- if .Values.jaeger.resources }}
{{ toYaml .Values.jaeger.resources | indent 12 }}
......@@ -98,14 +113,6 @@ spec:
resources:
{{ toYaml .Values.jaeger.proxy.resources | indent 12 }}
{{- end }}
volumes:
- name: tracing-nginx
configMap:
name: tracing-nginx
items:
- key: nginx.conf
mode: 438
path: nginx.conf
affinity:
{{- include "nodeaffinity" . | indent 6 }}
{{- include "podAntiAffinity" . | indent 6 }}
......@@ -116,4 +123,21 @@ spec:
tolerations:
{{ toYaml .Values.global.defaultTolerations | indent 6 }}
{{- end }}
volumes:
- name: tracing-nginx
configMap:
name: tracing-nginx
items:
- key: nginx.conf
mode: 438
path: nginx.conf
{{- if eq .Values.jaeger.spanStorageType "badger" }}
- name: data
{{- if .Values.jaeger.persist }}
persistentVolumeClaim:
claimName: istio-jaeger-pvc
{{- else }}
emptyDir: {}
{{- end }}
{{- end }}
{{ end }}
{{- if eq .Values.provider "jaeger" }}
{{- if .Values.jaeger.persist }}
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: istio-jaeger-pvc
namespace: {{ .Release.Namespace }}
labels:
app: jaeger
chart: {{ template "tracing.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
spec:
storageClassName: {{ .Values.jaeger.storageClassName }}
accessModes:
- {{ .Values.jaeger.accessMode }}
resources:
requests:
storage: 5Gi
{{- end }}
{{- end }}
......@@ -18,7 +18,7 @@ spec:
{{- end }}
containers:
- name: "{{ .Values.provider }}-test"
image: "{{ template "system_default_registry" . }}{{ .Values.global.proxy.repository }}:{{ .Values.global.proxy.tag }}"
image: "{{ template "system_default_registry" . }}{{ .Values.global.curl.repository }}:{{ .Values.global.curl.tag }}"
imagePullPolicy: "{{ .Values.global.imagePullPolicy }}"
command: ['curl']
{{- if eq .Values.provider "jaeger" }}
......
......@@ -13,7 +13,7 @@ tolerations: []
# There are currently two types of anti-affinity:
# "requiredDuringSchedulingIgnoredDuringExecution"
# "preferredDuringSchedulingIgnoredDuringExecution"
# which denote “hard” vs. “soft” requirements, you can define your values
# which denote "hard" vs. "soft" requirements, you can define your values
# in "podAntiAffinityLabelSelector" and "podAntiAffinityTermLabelSelector"
# correspondingly.
# For example:
......@@ -24,7 +24,7 @@ tolerations: []
# topologyKey: "kubernetes.io/hostname"
# This pod anti-affinity rule says that the pod requires not to be scheduled
# onto a node if that node is already running a pod with label having key
# “security” and value “S1”.
# "security" and value "S1".
podAntiAffinityLabelSelector: []
podAntiAffinityTermLabelSelector: []
......@@ -33,6 +33,11 @@ jaeger:
max_traces: 50000
proxy:
resources: {}
# spanStorageType value can be "memory" and "badger" for all-in-one image
spanStorageType: badger
persist: false
storageClassName: ""
accessMode: ReadWriteMany
zipkin:
probeStartupDelay: 200
......@@ -52,7 +57,7 @@ zipkin:
maxSpans: 500000
node:
cpus: 2
proxy:
proxy:
resources: {}
service:
......
......@@ -8,10 +8,12 @@ initContainers:
image: "{{ .Values.global.systemDefaultRegistry }}/{{ .Values.global.proxy_init.repository }}:{{ .Values.global.proxy_init.tag }}"
{{- else }}
image: "{{ .Values.global.proxy_init.repository }}:{{ .Values.global.proxy_init.tag }}"
{{- end }}
{{- end }}
args:
- "-p"
- "15001"
- "-z"
- "15006"
- "-u"
- 1337
- "-m"
......@@ -21,7 +23,7 @@ initContainers:
- "-x"
- "{{ annotation .ObjectMeta `traffic.sidecar.istio.io/excludeOutboundIPRanges` .Values.global.proxy.excludeIPRanges }}"
- "-b"
- "{{ annotation .ObjectMeta `traffic.sidecar.istio.io/includeInboundPorts` (includeInboundPorts .Spec.Containers) }}"
- "{{ annotation .ObjectMeta `traffic.sidecar.istio.io/includeInboundPorts` `*` }}"
- "-d"
- "{{ excludeInboundPort (annotation .ObjectMeta `status.sidecar.istio.io/port` .Values.global.proxy.statusPort) (annotation .ObjectMeta `traffic.sidecar.istio.io/excludeInboundPorts` .Values.global.proxy.excludeInboundPorts) }}"
{{ if or (isset .ObjectMeta.Annotations `traffic.sidecar.istio.io/excludeOutboundPorts`) (ne .Values.global.proxy.excludeOutboundPorts "") -}}
......@@ -33,13 +35,12 @@ initContainers:
- "{{ index .ObjectMeta.Annotations `traffic.sidecar.istio.io/kubevirtInterfaces` }}"
{{ end -}}
imagePullPolicy: "{{ .Values.global.imagePullPolicy }}"
{{- if .Values.global.proxy.init.resources }}
resources:
requests:
cpu: 10m
memory: 10Mi
limits:
cpu: 100m
memory: 50Mi
{{ toYaml .Values.global.proxy.init.resources | indent 4 }}
{{- else }}
resources: {}
{{- end }}
securityContext:
runAsUser: 0
runAsNonRoot: false
......@@ -50,11 +51,6 @@ initContainers:
privileged: true
{{- end }}
restartPolicy: Always
env:
{{- if contains "*" (annotation .ObjectMeta `traffic.sidecar.istio.io/includeInboundPorts` "") }}
- name: INBOUND_CAPTURE_PORT
value: 15006
{{- end }}
{{- end }}
{{ end -}}
{{- if eq .Values.global.proxy.enableCoreDump true }}
......@@ -65,10 +61,10 @@ initContainers:
command:
- /bin/sh
{{- if .Values.global.systemDefaultRegistry }}
image: "{{ .Values.global.systemDefaultRegistry }}/{{ .Values.global.proxy_init.repository }}:{{ .Values.global.proxy_init.tag }}"
image: "{{ .Values.global.systemDefaultRegistry }}/{{ .Values.global.proxy.enableCoreDumpImage }}"
{{- else }}
image: "{{ .Values.global.proxy_init.repository }}:{{ .Values.global.proxy_init.tag }}"
{{- end }}
image: "{{ .Values.global.proxy.enableCoreDumpImage }}"
{{- end }}
imagePullPolicy: IfNotPresent
resources: {}
securityContext:
......@@ -83,7 +79,7 @@ containers:
image: "{{ .Values.global.systemDefaultRegistry }}/{{ .Values.global.proxy.repository }}:{{ .Values.global.proxy.tag }}"
{{- else }}
image: "{{ annotation .ObjectMeta `sidecar.istio.io/proxyImage` .Values.global.proxy.repository}}:{{ .Values.global.proxy.tag }}"
{{- end }}
{{- end }}
ports:
- containerPort: 15090
protocol: TCP
......@@ -140,7 +136,11 @@ containers:
{{- end }}
{{- if .Values.global.proxy.envoyMetricsService.enabled }}
- --envoyMetricsServiceAddress
- "{{ .ProxyConfig.EnvoyMetricsServiceAddress }}"
- "{{ .ProxyConfig.GetEnvoyMetricsService.GetAddress }}"
{{- end }}
{{- if .Values.global.proxy.envoyAccessLogService.enabled }}
- --envoyAccessLogService
- '{{ structToJSON .ProxyConfig.EnvoyAccessLogService }}'
{{- end }}
- --proxyAdminPort
- "{{ .ProxyConfig.ProxyAdminPort }}"
......@@ -164,6 +164,17 @@ containers:
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: ISTIO_META_POD_PORTS
value: |-
[
{{- range $index1, $c := .Spec.Containers }}
{{- range $index2, $p := $c.Ports }}
{{if or (ne $index1 0) (ne $index2 0)}},{{end}}{{ structToJSON $p }}
{{- end}}
{{- end}}
]
- name: ISTIO_META_CLUSTER_ID
value: "{{ valueOrDefault .Values.global.multicluster.clusterName `Kubernetes` }}"
- name: POD_NAMESPACE
valueFrom:
fieldRef:
......@@ -172,6 +183,10 @@ containers:
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: SERVICE_ACCOUNT
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
{{ if eq .Values.global.proxy.tracer "datadog" }}
- name: HOST_IP
valueFrom:
......@@ -186,6 +201,8 @@ containers:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: SDS_ENABLED
value: {{ $.Values.global.sds.enabled }}
- name: ISTIO_META_INTERCEPTION_MODE
value: "{{ or (index .ObjectMeta.Annotations `sidecar.istio.io/interceptionMode`) .ProxyConfig.InterceptionMode.String }}"
- name: ISTIO_META_INCLUDE_INBOUND_PORTS
......@@ -204,6 +221,14 @@ containers:
value: |
{{ toJSON .ObjectMeta.Labels }}
{{ end }}
{{- if .DeploymentMeta.Name }}
- name: ISTIO_META_WORKLOAD_NAME
value: {{ .DeploymentMeta.Name }}
{{ end }}
{{- if and .TypeMeta.APIVersion .DeploymentMeta.Name }}
- name: ISTIO_META_OWNER
value: kubernetes://api/{{ .TypeMeta.APIVersion }}/namespaces/{{ valueOrDefault .DeploymentMeta.Namespace `default` }}/{{ toLower .TypeMeta.Kind}}s/{{ .DeploymentMeta.Name }}
{{- end}}
{{- if (isset .ObjectMeta.Annotations `sidecar.istio.io/bootstrapOverride`) }}
- name: ISTIO_BOOTSTRAP_OVERRIDE
value: "/etc/istio/custom-bootstrap/custom_bootstrap.json"
......@@ -212,6 +237,13 @@ containers:
- name: ISTIO_META_SDS_TOKEN_PATH
value: "{{ .Values.global.sds.customTokenDirectory -}}/sdstoken"
{{- end }}
{{- if .Values.global.meshID }}
- name: ISTIO_META_MESH_ID
value: "{{ .Values.global.meshID }}"
{{- else if .Values.global.trustDomain }}
- name: ISTIO_META_MESH_ID
value: "{{ .Values.global.trustDomain }}"
{{- end }}
imagePullPolicy: {{ .Values.global.imagePullPolicy }}
{{ if ne (annotation .ObjectMeta `status.sidecar.istio.io/port` .Values.global.proxy.statusPort) `0` }}
readinessProbe:
......@@ -235,7 +267,7 @@ containers:
- NET_ADMIN
runAsGroup: 1337
{{ else -}}
{{ if and .Values.global.sds.enabled .Values.global.sds.useTrustworthyJwt }}
{{ if .Values.global.sds.enabled }}
runAsGroup: 1337
{{- end }}
runAsUser: 1337
......@@ -265,10 +297,8 @@ containers:
- mountPath: /var/run/sds
name: sds-uds-path
readOnly: true
{{- if .Values.global.sds.useTrustworthyJwt }}
- mountPath: /var/run/secrets/tokens
name: istio-token
{{- end }}
{{- if .Values.global.sds.customTokenDirectory }}
- mountPath: "{{ .Values.global.sds.customTokenDirectory -}}"
name: custom-sds-token
......@@ -303,20 +333,18 @@ volumes:
- name: sds-uds-path
hostPath:
path: /var/run/sds
- name: istio-token
projected:
sources:
- serviceAccountToken:
path: istio-token
expirationSeconds: 43200
audience: {{ .Values.global.sds.token.aud }}
{{- if .Values.global.sds.customTokenDirectory }}
- name: custom-sds-token
secret:
secretName: sdstokensecret
{{- end }}
{{- if .Values.global.sds.useTrustworthyJwt }}
- name: istio-token
projected:
sources:
- serviceAccountToken:
path: istio-token
expirationSeconds: 43200
audience: {{ .Values.global.trustDomain }}
{{- end }}
{{- else }}
- name: istio-certs
secret:
......@@ -345,4 +373,14 @@ dnsConfig:
{{- range .Values.global.podDNSSearchNamespaces }}
- {{ render . }}
{{- end }}
{{- end }}
\ No newline at end of file
{{- end }}
podRedirectAnnot:
sidecar.istio.io/interceptionMode: "{{ annotation .ObjectMeta `sidecar.istio.io/interceptionMode` .ProxyConfig.InterceptionMode }}"
traffic.sidecar.istio.io/includeOutboundIPRanges: "{{ annotation .ObjectMeta `traffic.sidecar.istio.io/includeOutboundIPRanges` .Values.global.proxy.includeIPRanges }}"
traffic.sidecar.istio.io/excludeOutboundIPRanges: "{{ annotation .ObjectMeta `traffic.sidecar.istio.io/excludeOutboundIPRanges` .Values.global.proxy.excludeIPRanges }}"
traffic.sidecar.istio.io/includeInboundPorts: "{{ annotation .ObjectMeta `traffic.sidecar.istio.io/includeInboundPorts` (includeInboundPorts .Spec.Containers) }}"
traffic.sidecar.istio.io/excludeInboundPorts: "{{ excludeInboundPort (annotation .ObjectMeta `status.sidecar.istio.io/port` .Values.global.proxy.statusPort) (annotation .ObjectMeta `traffic.sidecar.istio.io/excludeInboundPorts` .Values.global.proxy.excludeInboundPorts) }}"
{{ if or (isset .ObjectMeta.Annotations `traffic.sidecar.istio.io/excludeOutboundPorts`) (ne .Values.global.proxy.excludeOutboundPorts "") }}
traffic.sidecar.istio.io/excludeOutboundPorts: "{{ annotation .ObjectMeta `traffic.sidecar.istio.io/excludeOutboundPorts` .Values.global.proxy.excludeOutboundPorts }}"
{{- end }}
traffic.sidecar.istio.io/kubevirtInterfaces: "{{ index .ObjectMeta.Annotations `traffic.sidecar.istio.io/kubevirtInterfaces` }}"
labels:
rancher.istio.v0.0.2: 1.2.5
rancher.istio.v0.0.2: 1.3.0
rancher_min_version: 2.3.0-rc1
Thank you for installing {{ .Chart.Name }}.
Thank you for installing {{ .Chart.Name | title }}.
Your release is named {{ .Release.Name }}.
Your release is named {{ .Release.Name | title }}.
To get started running application with Istio, execute the following steps:
......@@ -26,4 +26,4 @@ $ kubectl apply -f <(istioctl kube-inject -f <your-application>.yaml)
{{- end }}
For more information on running Istio, visit:
https://istio.io/
\ No newline at end of file
https://istio.io/
......@@ -20,7 +20,7 @@
values:
{{- range $key, $val := .Values.global.arch }}
{{- if gt ($val | int) 0 }}
- {{ $key }}
- {{ $key | quote }}
{{- end }}
{{- end }}
{{- $nodeSelector := default .Values.global.defaultNodeSelector .Values.nodeSelector -}}
......@@ -28,7 +28,7 @@
- key: {{ $key }}
operator: In
values:
- {{ $val }}
- {{ $val | quote }}
{{- end }}
{{- end }}
......@@ -41,7 +41,7 @@
- key: beta.kubernetes.io/arch
operator: In
values:
- {{ $key }}
- {{ $key | quote }}
{{- end }}
{{- end }}
{{- end }}
......@@ -70,7 +70,7 @@
values:
{{- $vals := split "," $item.values }}
{{- range $i, $v := $vals }}
- {{ $v }}
- {{ $v | quote }}
{{- end }}
{{- end }}
topologyKey: {{ $item.topologyKey }}
......@@ -88,7 +88,7 @@
values:
{{- $vals := split "," $item.values }}
{{- range $i, $v := $vals }}
- {{ $v }}
- {{ $v | quote }}
{{- end }}
{{- end }}
topologyKey: {{ $item.topologyKey }}
......
......@@ -19,6 +19,21 @@ data:
disablePolicyChecks: true
{{- end }}
{{- if .Values.mixer.telemetry.reportBatchMaxEntries }}
# reportBatchMaxEntries is the number of requests that are batched before telemetry data is sent to the mixer server
reportBatchMaxEntries: {{ .Values.mixer.telemetry.reportBatchMaxEntries }}
{{- end }}
{{- if .Values.mixer.telemetry.reportBatchMaxTime }}
# reportBatchMaxTime is the max waiting time before the telemetry data of a request is sent to the mixer server
reportBatchMaxTime: {{ .Values.mixer.telemetry.reportBatchMaxTime }}
{{- end }}
{{- if .Values.mixer.telemetry.sessionAffinityEnabled }}
# sidecarToTelemetrySessionAffinity will create a STRICT_DNS type cluster for istio-telemetry.
sidecarToTelemetrySessionAffinity: {{ .Values.mixer.telemetry.sessionAffinityEnabled }}
{{- end }}
# Set enableTracing to false to disable request tracing.
enableTracing: {{ .Values.global.enableTracing }}
......@@ -35,6 +50,8 @@ data:
# Set accessLogEncoding to JSON or TEXT to configure sidecar access log
accessLogEncoding: '{{ .Values.global.proxy.accessLogEncoding }}'
enableEnvoyAccessLogService: {{ .Values.global.proxy.envoyAccessLogService.enabled }}
{{- if .Values.global.istioRemote }}
{{- if .Values.global.remotePolicyAddress }}
......@@ -84,50 +101,48 @@ data:
# Default connect timeout for dynamic clusters generated by Pilot and returned via XDS
connectTimeout: 10s
# Automatic protocol detection uses a set of heuristics to
# determine whether the connection is using TLS or not (on the
# server side), as well as the application protocol being used
# (e.g., http vs tcp). These heuristics rely on the client sending
# the first bits of data. For server first protocols like MySQL,
# MongoDB, etc., Envoy will timeout on the protocol detection after
# the specified period, defaulting to non mTLS plain TCP
# traffic. Set this field to tweak the period that Envoy will wait
# for the client to send the first bits of data. (MUST BE >=1ms)
protocolDetectionTimeout: {{ .Values.global.proxy.protocolDetectionTimeout }}
# DNS refresh rate for Envoy clusters of type STRICT_DNS
dnsRefreshRate: {{ .Values.global.proxy.dnsRefreshRate }}
# Unix Domain Socket through which envoy communicates with NodeAgent SDS to get
# key/cert for mTLS. Use secret-mount files instead of SDS if set to empty.
sdsUdsPath: {{ .Values.global.sds.udsPath }}
# This flag is used by secret discovery service(SDS).
# If set to true(prerequisite: https://kubernetes.io/docs/concepts/storage/volumes/#projected), Istio will inject volumes mount
# for k8s service account JWT, so that K8s API server mounts k8s service account JWT to envoy container, which
# will be used to generate key/cert eventually. This isn't supported for non-k8s case.
enableSdsTokenMount: {{ .Values.global.sds.useTrustworthyJwt }}
# This flag is used by secret discovery service(SDS).
# If set to true, envoy will fetch normal k8s service account JWT from '/var/run/secrets/kubernetes.io/serviceaccount/token'
# (https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod)
# and pass to sds server, which will be used to request key/cert eventually.
# this flag is ignored if enableSdsTokenMount is set.
# This isn't supported for non-k8s case.
sdsUseK8sSaJwt: {{ .Values.global.sds.useNormalJwt }}
# key/cert for mTLS. Use secret-mount files instead of SDS if set to empty.
sdsUdsPath: {{ .Values.global.sds.udsPath | quote }}
# The trust domain corresponds to the trust root of a system.
# Refer to https://github.com/spiffe/spiffe/blob/master/standards/SPIFFE-ID.md#21-trust-domain
trustDomain: {{ .Values.global.trustDomain }}
trustDomain: {{ .Values.global.trustDomain | quote }}
# Set the default behavior of the sidecar for handling outbound traffic from the application:
# ALLOW_ANY - outbound traffic to unknown destinations will be allowed, in case there are no
# services or ServiceEntries for the destination port
# REGISTRY_ONLY - restrict outbound traffic to services defined in the service registry as well
# as those defined through ServiceEntries
# as those defined through ServiceEntries
outboundTrafficPolicy:
mode: {{ .Values.global.outboundTrafficPolicy.mode }}
{{- if .Values.global.localityLbSetting.enabled }}
localityLbSetting:
{{ toYaml .Values.global.localityLbSetting | indent 6 }}
{{ toYaml .Values.global.localityLbSetting | trim | indent 6 }}
{{- end }}
# The namespace to treat as the administrative root namespace for istio
# configuration.
{{- if .Values.global.configRootNamespace }}
{{- if .Values.global.configRootNamespace }}
rootNamespace: {{ .Values.global.configRootNamespace }}
{{- else }}
{{- else }}
rootNamespace: {{ .Release.Namespace }}
{{- end }}
{{- end }}
{{- if .Values.global.defaultConfigVisibilitySettings }}
defaultServiceExportTo:
......@@ -222,6 +237,9 @@ data:
datadog:
# Address of the Datadog Agent
address: {{ .Values.global.tracer.datadog.address }}
{{- else if eq .Values.global.proxy.tracer "stackdriver" }}
tracing:
stackdriver: {}
{{- end }}
{{- if .Values.global.proxy.envoyStatsd.enabled }}
......@@ -233,7 +251,23 @@ data:
{{- if .Values.global.proxy.envoyMetricsService.enabled }}
#
# Envoy's Metrics Service stats sink pushes Envoy metrics to a remote collector via the Metrics Service gRPC API.
envoyMetricsServiceAddress: {{ .Values.global.proxy.envoyMetricsService.host }}:{{ .Values.global.proxy.envoyMetricsService.port }}
envoyMetricsService:
address: {{ .Values.global.proxy.envoyMetricsService.host }}:{{ .Values.global.proxy.envoyMetricsService.port }}
{{- end}}
{{- if .Values.global.proxy.envoyAccessLogService.enabled }}
#
# Envoy's AccessLog Service pushes access logs to a remote collector via the Access Log Service gRPC API.
envoyAccessLogService:
address: {{ .Values.global.proxy.envoyAccessLogService.host }}:{{ .Values.global.proxy.envoyAccessLogService.port }}
{{- if .Values.global.proxy.envoyAccessLogService.tlsSettings }}
tlsSettings:
{{ toYaml .Values.global.proxy.envoyAccessLogService.tlsSettings | indent 10 }}
{{- end}}
{{- if .Values.global.proxy.envoyAccessLogService.tcpKeepalive }}
tcpKeepalive:
{{ toYaml .Values.global.proxy.envoyAccessLogService.tcpKeepalive | indent 10 }}
{{- end}}
{{- end}}
{{- $defPilotHostname := printf "istio-pilot.%s" .Release.Namespace }}
......@@ -261,7 +295,7 @@ data:
discoveryAddress: {{ $pilotAddress }}:15010
{{- end }}
{{- end }}
# Configuration file for the mesh networks to be used by the Split Horizon EDS.
meshNetworks: |-
{{- if .Values.global.meshNetworks }}
......
......@@ -17,9 +17,9 @@ data:
config: |-
policy: {{ .Values.global.proxy.autoInject }}
alwaysInjectSelector:
{{ toYaml .Values.sidecarInjectorWebhook.alwaysInjectSelector | indent 6 }}
{{ toYaml .Values.sidecarInjectorWebhook.alwaysInjectSelector | trim | indent 6 }}
neverInjectSelector:
{{ toYaml .Values.sidecarInjectorWebhook.neverInjectSelector | indent 6 }}
{{ toYaml .Values.sidecarInjectorWebhook.neverInjectSelector | trim | indent 6 }}
template: |-
{{ .Files.Get "files/injection-template.yaml" | indent 6 }}
{{ .Files.Get "files/injection-template.yaml" | trim | indent 6 }}
{{- end }}
......@@ -12,7 +12,7 @@
}
]
},
"editable": false,
"editable": true,
"gnetId": null,
"graphTooltip": 0,
"links": [],
......@@ -201,7 +201,7 @@
"refId": "H"
},
{
"expr": "sum(container_memory_usage_bytes{container_name=~\"galley\", pod_name=~\"istio-galley-.*\"})",
"expr": "sum(container_memory_usage_bytes{job=\"kubernetes-cadvisor\",container_name=~\"galley\", pod_name=~\"istio-galley-.*\"})",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Total (kis)",
......@@ -286,14 +286,14 @@
"steppedLine": false,
"targets": [
{
"expr": "sum(rate(container_cpu_usage_seconds_total{container_name=~\"galley\", pod_name=~\"istio-galley-.*\"}[4m]))",
"expr": "sum(rate(container_cpu_usage_seconds_total{job=\"kubernetes-cadvisor\",container_name=~\"galley\", pod_name=~\"istio-galley-.*\"}[4m]))",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "Total (k8s)",
"refId": "A"
},
{
"expr": "sum(rate(container_cpu_usage_seconds_total{container_name=~\"galley\", pod_name=~\"istio-galley-.*\"}[4m])) by (container_name)",
"expr": "sum(rate(container_cpu_usage_seconds_total{job=\"kubernetes-cadvisor\",container_name=~\"galley\", pod_name=~\"istio-galley-.*\"}[4m])) by (container_name)",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{ container_name }} (k8s)",
......@@ -392,7 +392,7 @@
"refId": "A"
},
{
"expr": "container_fs_usage_bytes{container_name=~\"galley\", pod_name=~\"istio-galley-.*\"}",
"expr": "container_fs_usage_bytes{job=\"kubernetes-cadvisor\",container_name=~\"galley\", pod_name=~\"istio-galley-.*\"}",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{ container_name }} ",
......@@ -484,14 +484,14 @@
"refId": "A"
},
{
"expr": "galley_mcp_source_clients_total",
"expr": "istio_mcp_clients_total{component=\"galley\"}",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "clients_total",
"refId": "B"
},
{
"expr": "go_goroutines{job=\"istio/galley\"}/galley_mcp_source_clients_total",
"expr": "go_goroutines{job=\"istio/galley\"}/sum(istio_mcp_clients_total{component=\"galley\"}) without (component)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "avg_goroutines_per_client",
......@@ -1548,7 +1548,7 @@
"steppedLine": false,
"targets": [
{
"expr": "sum(galley_mcp_source_clients_total)",
"expr": "sum(istio_mcp_clients_total{component=\"galley\"})",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Clients",
......@@ -1633,7 +1633,7 @@
"steppedLine": false,
"targets": [
{
"expr": "sum by(collection)(irate(galley_mcp_source_request_acks_total[4m]) * 60)",
"expr": "sum by(collection)(irate(istio_mcp_request_acks_total{component=\"galley\"}[4m]) * 60)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "",
......@@ -1718,7 +1718,7 @@
"steppedLine": false,
"targets": [
{
"expr": "rate(galley_mcp_source_request_nacks_total[4m]) * 60",
"expr": "rate(istio_mcp_request_nacks_total{component=\"galley\"}[4m]) * 60",
"format": "time_series",
"intervalFactor": 1,
"refId": "A"
......
......@@ -50,7 +50,7 @@
}
]
},
"editable": false,
"editable": true,
"gnetId": null,
"graphTooltip": 1,
"id": null,
......@@ -263,7 +263,7 @@
"refId": "G"
},
{
"expr": "sum(label_replace(container_memory_usage_bytes{container_name=~\"mixer|istio-proxy\", pod_name=~\"istio-telemetry-.*|istio-policy-.*\"}, \"service\", \"$1\" , \"pod_name\", \"(istio-telemetry|istio-policy)-.*\")) by (service)",
"expr": "sum(label_replace(container_memory_usage_bytes{job=\"kubernetes-cadvisor\",container_name=~\"mixer|istio-proxy\", pod_name=~\"istio-telemetry-.*|istio-policy-.*\"}, \"service\", \"$1\" , \"pod_name\", \"(istio-telemetry|istio-policy)-.*\")) by (service)",
"format": "time_series",
"hide": false,
"intervalFactor": 2,
......@@ -271,7 +271,7 @@
"refId": "C"
},
{
"expr": "sum(label_replace(container_memory_usage_bytes{container_name=~\"mixer|istio-proxy\", pod_name=~\"istio-telemetry-.*|istio-policy-.*\"}, \"service\", \"$1\" , \"pod_name\", \"(istio-telemetry|istio-policy)-.*\")) by (container_name, service)",
"expr": "sum(label_replace(container_memory_usage_bytes{job=\"kubernetes-cadvisor\",container_name=~\"mixer|istio-proxy\", pod_name=~\"istio-telemetry-.*|istio-policy-.*\"}, \"service\", \"$1\" , \"pod_name\", \"(istio-telemetry|istio-policy)-.*\")) by (container_name, service)",
"format": "time_series",
"hide": false,
"intervalFactor": 2,
......@@ -356,7 +356,7 @@
"steppedLine": false,
"targets": [
{
"expr": "label_replace(sum(rate(container_cpu_usage_seconds_total{container_name=~\"mixer|istio-proxy\", pod_name=~\"istio-telemetry-.*|istio-policy-.*\"}[4m])) by (pod_name), \"service\", \"$1\" , \"pod_name\", \"(istio-telemetry|istio-policy)-.*\")",
"expr": "label_replace(sum(rate(container_cpu_usage_seconds_total{job=\"kubernetes-cadvisor\",container_name=~\"mixer|istio-proxy\", pod_name=~\"istio-telemetry-.*|istio-policy-.*\"}[4m])) by (pod_name), \"service\", \"$1\" , \"pod_name\", \"(istio-telemetry|istio-policy)-.*\")",
"format": "time_series",
"hide": false,
"intervalFactor": 2,
......@@ -364,7 +364,7 @@
"refId": "A"
},
{
"expr": "label_replace(sum(rate(container_cpu_usage_seconds_total{container_name=~\"mixer|istio-proxy\", pod_name=~\"istio-telemetry-.*|istio-policy-.*\"}[4m])) by (container_name, pod_name), \"service\", \"$1\" , \"pod_name\", \"(istio-telemetry|istio-policy)-.*\")",
"expr": "label_replace(sum(rate(container_cpu_usage_seconds_total{job=\"kubernetes-cadvisor\",container_name=~\"mixer|istio-proxy\", pod_name=~\"istio-telemetry-.*|istio-policy-.*\"}[4m])) by (container_name, pod_name), \"service\", \"$1\" , \"pod_name\", \"(istio-telemetry|istio-policy)-.*\")",
"format": "time_series",
"hide": false,
"intervalFactor": 2,
......@@ -467,7 +467,7 @@
"refId": "A"
},
{
"expr": "sum(label_replace(container_fs_usage_bytes{container_name=~\"mixer|istio-proxy\", pod_name=~\"istio-telemetry-.*|istio-policy-.*\"}, \"service\", \"$1\" , \"pod_name\", \"(istio-telemetry|istio-policy)-.*\")) by (container_name, service)",
"expr": "sum(label_replace(container_fs_usage_bytes{job=\"kubernetes-cadvisor\", container_name=~\"mixer|istio-proxy\", pod_name=~\"istio-telemetry-.*|istio-policy-.*\"}, \"service\", \"$1\" , \"pod_name\", \"(istio-telemetry|istio-policy)-.*\")) by (container_name, service)",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{ service }} - {{ container_name }}",
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment