Commit b7fd40d0 by loganhz Committed by Craig Jellick

Bump Istio to 1.3.1

parent 5cf57655
apiVersion: v1
name: rancher-istio
version: 0.0.2
appVersion: 1.3.0
appVersion: 1.3.1
tillerVersion: ">=2.7.2-0"
description: Helm chart for all istio components
home: https://istio.io/
......
......@@ -248,7 +248,7 @@ spec:
sourceOwner: source.owner | ""
destinationApp: destination.labels["app"] | ""
destinationIp: destination.ip | ip("0.0.0.0")
destinationServiceHost: destination.service.host | ""
destinationServiceHost: destination.service.host | request.host | ""
destinationWorkload: destination.workload.name | ""
destinationName: destination.name | ""
destinationNamespace: destination.namespace | ""
......@@ -388,7 +388,7 @@ spec:
destination_principal: destination.principal | "unknown"
destination_app: destination.labels["app"] | "unknown"
destination_version: destination.labels["version"] | "unknown"
destination_service: destination.service.host | "unknown"
destination_service: destination.service.host | request.host | "unknown"
destination_service_name: destination.service.name | "unknown"
destination_service_namespace: destination.service.namespace | "unknown"
request_protocol: api.protocol | context.protocol | "unknown"
......@@ -425,7 +425,7 @@ spec:
destination_principal: destination.principal | "unknown"
destination_app: destination.labels["app"] | "unknown"
destination_version: destination.labels["version"] | "unknown"
destination_service: destination.service.host | "unknown"
destination_service: destination.service.host | request.host | "unknown"
destination_service_name: destination.service.name | "unknown"
destination_service_namespace: destination.service.namespace | "unknown"
request_protocol: api.protocol | context.protocol | "unknown"
......@@ -462,7 +462,7 @@ spec:
destination_principal: destination.principal | "unknown"
destination_app: destination.labels["app"] | "unknown"
destination_version: destination.labels["version"] | "unknown"
destination_service: destination.service.host | "unknown"
destination_service: destination.service.host | request.host | "unknown"
destination_service_name: destination.service.name | "unknown"
destination_service_namespace: destination.service.namespace | "unknown"
request_protocol: api.protocol | context.protocol | "unknown"
......@@ -499,7 +499,7 @@ spec:
destination_principal: destination.principal | "unknown"
destination_app: destination.labels["app"] | "unknown"
destination_version: destination.labels["version"] | "unknown"
destination_service: destination.service.host | "unknown"
destination_service: destination.service.host | request.host | "unknown"
destination_service_name: destination.service.name | "unknown"
destination_service_namespace: destination.service.namespace | "unknown"
request_protocol: api.protocol | context.protocol | "unknown"
......@@ -602,7 +602,7 @@ spec:
destination_principal: destination.principal | "unknown"
destination_app: destination.labels["app"] | "unknown"
destination_version: destination.labels["version"] | "unknown"
destination_service: destination.service.name | "unknown"
destination_service: destination.service.host | "unknown"
destination_service_name: destination.service.name | "unknown"
destination_service_namespace: destination.service.namespace | "unknown"
connection_security_policy: conditional((context.reporter.kind | "inbound") == "outbound", "unknown", conditional(connection.mtls | false, "mutual_tls", "none"))
......@@ -635,7 +635,7 @@ spec:
destination_principal: destination.principal | "unknown"
destination_app: destination.labels["app"] | "unknown"
destination_version: destination.labels["version"] | "unknown"
destination_service: destination.service.name | "unknown"
destination_service: destination.service.host | "unknown"
destination_service_name: destination.service.name | "unknown"
destination_service_namespace: destination.service.namespace | "unknown"
connection_security_policy: conditional((context.reporter.kind | "inbound") == "outbound", "unknown", conditional(connection.mtls | false, "mutual_tls", "none"))
......
......@@ -173,6 +173,9 @@
{{- define "telemetry_container" }}
spec:
serviceAccountName: istio-mixer-service-account
{{- if $.Values.global.priorityClassName }}
priorityClassName: "{{ $.Values.global.priorityClassName }}"
{{- end }}
volumes:
- name: istio-certs
secret:
......
......@@ -42,7 +42,7 @@ spec:
value: "{{ $val }}"
{{- end }}
{{- end }}
- name: "Trust_Domain"
- name: "TRUST_DOMAIN"
value: "{{ .Values.global.trustDomain }}"
- name: NAMESPACE
valueFrom:
......
......@@ -8,7 +8,7 @@ env:
# CA endpoint.
CA_ADDR: ""
# names of authentication provider's plugins.
Plugins: ""
PLUGINS: ""
nodeSelector: {}
tolerations: []
......
# The reason for creating a ServiceAccount and ClusterRole specifically for this
# post-delete hooked job is because the citadel ServiceAccount is being deleted
# before this hook is launched. On the other hand, running this hook before the
# deletion of the citadel (e.g. pre-delete) won't delete the secrets because they
# will be re-created immediately by the to-be-deleted citadel.
#
# It's also important that the ServiceAccount, ClusterRole and ClusterRoleBinding
# will be ready before running the hooked Job therefore the hook weights.
apiVersion: v1
kind: ServiceAccount
{{- if .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- range .Values.global.imagePullSecrets }}
- name: {{ . }}
{{- end }}
{{- end }}
metadata:
name: istio-cleanup-secrets-service-account
namespace: {{ .Release.Namespace }}
annotations:
"helm.sh/hook": post-delete
"helm.sh/hook-delete-policy": hook-succeeded
"helm.sh/hook-weight": "1"
labels:
app: {{ template "security.name" . }}
chart: {{ template "security.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
{{- if .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- range .Values.global.imagePullSecrets }}
- name: {{ . }}
{{- end }}
{{- end }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: istio-cleanup-secrets-{{ .Release.Namespace }}
annotations:
"helm.sh/hook": post-delete
"helm.sh/hook-delete-policy": hook-succeeded
"helm.sh/hook-weight": "1"
labels:
app: {{ template "security.name" . }}
chart: {{ template "security.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["list", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: istio-cleanup-secrets-{{ .Release.Namespace }}
annotations:
"helm.sh/hook": post-delete
"helm.sh/hook-delete-policy": hook-succeeded
"helm.sh/hook-weight": "2"
labels:
app: {{ template "security.name" . }}
chart: {{ template "security.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: istio-cleanup-secrets-{{ .Release.Namespace }}
subjects:
- kind: ServiceAccount
name: istio-cleanup-secrets-service-account
namespace: {{ .Release.Namespace }}
---
apiVersion: batch/v1
kind: Job
metadata:
name: istio-cleanup-secrets-{{ .Values.global.tag | printf "%v" | trunc 32 }}
namespace: {{ .Release.Namespace }}
annotations:
"helm.sh/hook": post-delete
"helm.sh/hook-delete-policy": hook-succeeded
"helm.sh/hook-weight": "3"
labels:
app: {{ template "security.name" . }}
chart: {{ template "security.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
spec:
template:
metadata:
name: istio-cleanup-secrets
labels:
app: {{ template "security.name" . }}
chart: {{ template "security.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
spec:
serviceAccountName: istio-cleanup-secrets-service-account
containers:
- name: kubectl
image: "{{ template "system_default_registry" . }}{{ .Values.global.kubectl.repository }}:{{ .Values.global.kubectl.tag }}"
imagePullPolicy: IfNotPresent
command:
- /bin/bash
- -c
- >
kubectl get secret --all-namespaces | grep "istio.io/key-and-cert" | while read -r entry; do
ns=$(echo $entry | awk '{print $1}');
name=$(echo $entry | awk '{print $2}');
kubectl delete secret $name -n $ns;
done
restartPolicy: OnFailure
affinity:
{{- include "nodeaffinity" . | indent 6 }}
{{- include "podAntiAffinity" . | indent 6 }}
{{- if .Values.tolerations }}
tolerations:
{{ toYaml .Values.tolerations | indent 6 }}
{{- else if .Values.global.defaultTolerations }}
tolerations:
{{ toYaml .Values.global.defaultTolerations | indent 6 }}
{{- end }}
......@@ -187,12 +187,18 @@ containers:
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
{{ if eq .Values.global.proxy.tracer "datadog" }}
{{- if eq .Values.global.proxy.tracer "datadog" }}
- name: HOST_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
{{ end }}
{{- if isset .ObjectMeta.Annotations `apm.datadoghq.com/env` }}
{{- range $key, $value := fromJSON (index .ObjectMeta.Annotations `apm.datadoghq.com/env`) }}
- name: {{ $key }}
value: "{{ $value }}"
{{- end }}
{{- end }}
{{- end }}
- name: ISTIO_META_POD_NAME
valueFrom:
fieldRef:
......
labels:
rancher.istio.v0.0.2: 1.3.0
rancher.istio.v0.0.2: 1.3.1
rancher_min_version: 2.3.0-rc1
......@@ -26,7 +26,7 @@ gateways:
#
sidecarInjectorWebhook:
repository: rancher/istio-sidecar_injector
tag: "1.3.0"
tag: "1.3.1"
enabled: true
#
......@@ -35,7 +35,7 @@ sidecarInjectorWebhook:
#
galley:
repository: rancher/istio-galley
tag: 1.3.0
tag: 1.3.1
enabled: true
#
......@@ -44,7 +44,7 @@ galley:
# @see charts/mixer/values.yaml, it takes precedence
mixer:
repository: rancher/istio-mixer
tag: "1.3.0"
tag: "1.3.1"
enabled: true
policy:
# if policy is enabled the global.disablePolicyChecks has affect.
......@@ -58,7 +58,7 @@ mixer:
# @see charts/pilot/values.yaml
pilot:
repository: rancher/istio-pilot
tag: "1.3.0"
tag: "1.3.1"
enabled: true
#
......@@ -66,7 +66,7 @@ pilot:
#
security:
repository: rancher/istio-citadel
tag: "1.3.0"
tag: "1.3.1"
enabled: true
#
......@@ -160,7 +160,7 @@ global:
# Default tag for Istio images.
# tag: release-1.1-latest-daily
tag: 1.3.0
tag: 1.3.1
# Comma-separated minimum per-scope logging level of messages to output, in the form of <scope>:<level>,<scope>:<level>
# The control plane has different scopes depending on component, but can configure default log level across all components
......@@ -170,7 +170,7 @@ global:
kubectl:
repository: rancher/istio-kubectl
tag: 1.3.0
tag: 1.3.1
# monitoring port used by mixer, pilot, galley
monitoringPort: 15014
......@@ -207,7 +207,7 @@ global:
cpu: 10m
memory: 10Mi
repository: rancher/istio-proxyv2
tag: 1.3.0
tag: 1.3.1
# cluster domain. Default value is "cluster.local".
clusterDomain: "cluster.local"
......@@ -354,7 +354,7 @@ global:
proxy_init:
# Base name for the proxy_init container, used to configure iptables.
repository: rancher/istio-proxy_init
tag: "1.3.0"
tag: "1.3.1"
# imagePullPolicy is applied to istio control plane components.
# local tests require IfNotPresent, to avoid uploading to dockerhub.
......@@ -560,7 +560,7 @@ global:
nodeAgent:
repository: rancher/istio-node-agent-k8s
tag: "1.3.0"
tag: "1.3.1"
sds:
# SDS enabled. IF set to true, mTLS certificates for the sidecars will be
# distributed through the SecretDiscoveryService instead of using K8S secrets to mount the certificates.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment