Unverified Commit ccc19d8d by Guangbo Committed by GitHub

Merge pull request #79 from guangbochen/master

bump cert-manager, chartmuseum. nfs-server, k8s-dashboard and kubeflow charts
parents 597c17ea 2824b80d
name: cert-manager name: cert-manager
version: v0.4.1 version: v0.5.2
appVersion: v0.4.1 appVersion: v0.5.2
description: A Helm chart for cert-manager description: A Helm chart for cert-manager
home: https://github.com/jetstack/cert-manager home: https://github.com/jetstack/cert-manager
keywords: keywords:
...@@ -10,7 +10,7 @@ keywords: ...@@ -10,7 +10,7 @@ keywords:
- tls - tls
sources: sources:
- https://github.com/jetstack/cert-manager - https://github.com/jetstack/cert-manager
icon: https://letsencrypt.org/images/letsencrypt-logo-horizontal.svg icon: file://../letsencrypt-logo-horizontal.svg
maintainers: maintainers:
- name: munnerz - name: munnerz
email: james@jetstack.io email: james@jetstack.io
...@@ -42,7 +42,7 @@ The following table lists the configurable parameters of the cert-manager chart ...@@ -42,7 +42,7 @@ The following table lists the configurable parameters of the cert-manager chart
| Parameter | Description | Default | | Parameter | Description | Default |
| --------- | ----------- | ------- | | --------- | ----------- | ------- |
| `image.repository` | Image repository | `quay.io/jetstack/cert-manager-controller` | | `image.repository` | Image repository | `quay.io/jetstack/cert-manager-controller` |
| `image.tag` | Image tag | `v0.4.1` | | `image.tag` | Image tag | `v0.5.2` |
| `image.pullPolicy` | Image pull policy | `IfNotPresent` | | `image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `replicaCount` | Number of cert-manager replicas | `1` | | `replicaCount` | Number of cert-manager replicas | `1` |
| `createCustomResource` | Create CRD/TPR with this release | `true` | | `createCustomResource` | Create CRD/TPR with this release | `true` |
...@@ -54,7 +54,7 @@ The following table lists the configurable parameters of the cert-manager chart ...@@ -54,7 +54,7 @@ The following table lists the configurable parameters of the cert-manager chart
| `rbac.create` | If `true`, create and use RBAC resources | `true` | | `rbac.create` | If `true`, create and use RBAC resources | `true` |
| `serviceAccount.create` | If `true`, create a new service account | `true` | | `serviceAccount.create` | If `true`, create a new service account | `true` |
| `serviceAccount.name` | Service account to be used. If not set and `serviceAccount.create` is `true`, a name is generated using the fullname template | | | `serviceAccount.name` | Service account to be used. If not set and `serviceAccount.create` is `true`, a name is generated using the fullname template | |
| `resources` | CPU/memory resource requests/limits | `requests: {cpu: 10m, memory: 32Mi}` | | `resources` | CPU/memory resource requests/limits | |
| `nodeSelector` | Node labels for pod assignment | `{}` | | `nodeSelector` | Node labels for pod assignment | `{}` |
| `affinity` | Node affinity for pod assignment | `{}` | | `affinity` | Node affinity for pod assignment | `{}` |
| `tolerations` | Node tolerations for pod assignment | `[]` | | `tolerations` | Node tolerations for pod assignment | `[]` |
...@@ -69,6 +69,14 @@ The following table lists the configurable parameters of the cert-manager chart ...@@ -69,6 +69,14 @@ The following table lists the configurable parameters of the cert-manager chart
| `http_proxy` | Value of the `HTTP_PROXY` environment variable in the cert-manager pod | | | `http_proxy` | Value of the `HTTP_PROXY` environment variable in the cert-manager pod | |
| `https_proxy` | Value of the `HTTPS_PROXY` environment variable in the cert-manager pod | | | `https_proxy` | Value of the `HTTPS_PROXY` environment variable in the cert-manager pod | |
| `no_proxy` | Value of the `NO_PROXY` environment variable in the cert-manager pod | | | `no_proxy` | Value of the `NO_PROXY` environment variable in the cert-manager pod | |
| `webhook.enabled` | Toggles whether the validating webhook component should be installed | `false` |
| `webhook.replicaCount` | Number of cert-manager webhook replicas | `1` |
| `webhook.podAnnotations` | Annotations to add to the webhook pods | `{}` |
| `webhook.extraArgs` | Optional flags for cert-manager webhook component | `[]` |
| `webhook.resources` | CPU/memory resource requests/limits for the webhook pods | |
| `webhook.image.repository` | Webhook image repository | `quay.io/jetstack/cert-manager-webhook` |
| `webhook.image.tag` | Webhook image tag | `v0.5.2` |
| `webhook.image.pullPolicy` | Webhook image pull policy | `IfNotPresent` |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`.
......
...@@ -14,17 +14,39 @@ questions: ...@@ -14,17 +14,39 @@ questions:
type: string type: string
label: Cert-Manager Docker Image Name label: Cert-Manager Docker Image Name
- variable: image.tag - variable: image.tag
default: "v0.4.1" default: "v0.5.2"
description: "Cert-Manager Docker image tag" description: "Cert-Manager Docker image tag"
type: string type: string
label: Cert-Manager Image Tag label: Cert-Manager Image Tag
- variable: createCustomResource - variable: webhook.image.repository
default: "quay.io/jetstack/cert-manager-webhook"
description: "cert-manager webhook image name"
type: string
label: Cert-Manager Webhook Image Name
show_if: webhook.enabled=true&&defaultImage=false
- variable: webhook.image.tag
default: "v0.5.2"
description: "cert-manager webhook image tag"
type: string
label: Cert-Manager Webhook Image Tag
show_if: webhook.enabled=true&&defaultImage=false
- variable: replicaCount
default: 1
description: "Replica count of cert-manager"
min: 1
max: 5
label: Replica count of cert-manager
type: int
group: "Settings"
- variable: clusterissuerEnabled
default: true default: true
description: "Create Custom Resource Definition(CRD) of certmanager" description: "Create Default Cluster Issuer"
label: Create Custom Resource Definition(CRD) label: Create Default Cluster Issuer
type: boolean type: boolean
group: "Settings" group: "Settings"
- variable: ingressShim.defaultIssuerName show_subquestion_if: true
subquestions:
- variable: ingressShim.defaultIssuerName
default: "letsencrypt-staging" default: "letsencrypt-staging"
required: true required: true
type: enum type: enum
...@@ -34,10 +56,36 @@ questions: ...@@ -34,10 +56,36 @@ questions:
options: options:
- "letsencrypt-staging" - "letsencrypt-staging"
- "letsencrypt-prod" - "letsencrypt-prod"
- variable: letsencrypt.email - variable: letsencrypt.email
default: "" default: ""
required: true required: true
type: string type: string
description: "Let's Encrypt register email, for https://letsencrypt.org/docs/staging-environment" description: "Let's Encrypt register email, for https://letsencrypt.org/docs/staging-environment"
label: Let's Encrypt Client Register Email label: Let's Encrypt Client Register Email
group: "Settings" group: "Settings"
- variable: createCustomResource
default: true
description: "Create Custom Resource Definition(CRD) of certmanager"
label: Create Custom Resource Definition(CRD)
type: boolean
group: "CRD Settings"
- variable: webhook.enabled
default: false
description: "Enabled Webhook"
label: Enabled Webhook
type: boolean
show_subquestion_if: true
group: "Webhook Settings"
subquestions:
- variable: webhook.replicaCount
default: 1
description: "Replica count of webhook pod"
min: 1
max: 5
label: Webhook Replica count
type: int
- variable: webhook.extraArgs
default: "[]"
description: "Optional additional arguments for webhook"
label: Optional Additional Arguments for Webhook
type: string
dependencies:
- name: webhook
repository: file://./webhook
version: v0.5.2
digest: sha256:efe1f1adfd027b361934a3d745760b10d947a050048e5046b257e79cd5c40ea2
generated: 2018-11-23T14:56:41.555379+08:00
# requirements.yaml
dependencies:
- name: webhook
version: "v0.5.2"
repository: "file://./webhook"
condition: webhook.enabled
...@@ -3,4 +3,7 @@ apiVersion: v1 ...@@ -3,4 +3,7 @@ apiVersion: v1
kind: Namespace kind: Namespace
metadata: metadata:
name: {{ .Release.Namespace | quote }} name: {{ .Release.Namespace | quote }}
labels:
name: {{ .Release.Namespace | quote }}
certmanager.k8s.io/disable-validation: "true"
{{- end }} {{- end }}
...@@ -3,14 +3,14 @@ apiVersion: apiextensions.k8s.io/v1beta1 ...@@ -3,14 +3,14 @@ apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition kind: CustomResourceDefinition
metadata: metadata:
name: certificates.certmanager.k8s.io name: certificates.certmanager.k8s.io
annotations:
"helm.sh/hook": "crd-install"
"helm.sh/hook-delete-policy": "before-hook-creation"
labels: labels:
app: {{ template "cert-manager.name" . }} app: {{ template "cert-manager.name" . }}
chart: {{ template "cert-manager.chart" . }} chart: {{ template "cert-manager.chart" . }}
release: {{ .Release.Name }} release: {{ .Release.Name }}
heritage: {{ .Release.Service }} heritage: {{ .Release.Service }}
annotations:
"helm.sh/hook": "crd-install"
"helm.sh/hook-delete-policy": "before-hook-creation"
spec: spec:
group: certmanager.k8s.io group: certmanager.k8s.io
version: v1alpha1 version: v1alpha1
......
...@@ -3,14 +3,14 @@ apiVersion: apiextensions.k8s.io/v1beta1 ...@@ -3,14 +3,14 @@ apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition kind: CustomResourceDefinition
metadata: metadata:
name: clusterissuers.certmanager.k8s.io name: clusterissuers.certmanager.k8s.io
annotations:
"helm.sh/hook": "crd-install"
"helm.sh/hook-delete-policy": "before-hook-creation"
labels: labels:
app: {{ template "cert-manager.name" . }} app: {{ template "cert-manager.name" . }}
chart: {{ template "cert-manager.chart" . }} chart: {{ template "cert-manager.chart" . }}
release: {{ .Release.Name }} release: {{ .Release.Name }}
heritage: {{ .Release.Service }} heritage: {{ .Release.Service }}
annotations:
"helm.sh/hook": "crd-install"
"helm.sh/hook-delete-policy": "before-hook-creation"
spec: spec:
group: certmanager.k8s.io group: certmanager.k8s.io
version: v1alpha1 version: v1alpha1
......
...@@ -60,6 +60,14 @@ spec: ...@@ -60,6 +60,14 @@ spec:
- --default-acme-issuer-dns01-provider-name={{ .defaultACMEDNS01ChallengeProvider }} - --default-acme-issuer-dns01-provider-name={{ .defaultACMEDNS01ChallengeProvider }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- if .Values.affinity }}
affinity:
{{ toYaml .Values.affinity | indent 10 }}
{{- end }}
{{- if .Values.tolerations }}
tolerations:
{{ toYaml .Values.tolerations | indent 10 }}
{{- end }}
env: env:
- name: POD_NAMESPACE - name: POD_NAMESPACE
valueFrom: valueFrom:
......
...@@ -3,13 +3,14 @@ apiVersion: apiextensions.k8s.io/v1beta1 ...@@ -3,13 +3,14 @@ apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition kind: CustomResourceDefinition
metadata: metadata:
name: issuers.certmanager.k8s.io name: issuers.certmanager.k8s.io
annotations:
"helm.sh/hook": "crd-install"
"helm.sh/hook-delete-policy": "before-hook-creation"
labels: labels:
app: {{ template "cert-manager.name" . }} app: {{ template "cert-manager.name" . }}
chart: {{ template "cert-manager.chart" . }} chart: {{ template "cert-manager.chart" . }}
release: {{ .Release.Name }} release: {{ .Release.Name }}
heritage: {{ .Release.Service }} heritage: {{ .Release.Service }}
annotations:
"helm.sh/hook": crd-install
spec: spec:
group: certmanager.k8s.io group: certmanager.k8s.io
version: v1alpha1 version: v1alpha1
......
...@@ -13,12 +13,7 @@ rules: ...@@ -13,12 +13,7 @@ rules:
resources: ["certificates", "issuers", "clusterissuers"] resources: ["certificates", "issuers", "clusterissuers"]
verbs: ["*"] verbs: ["*"]
- apiGroups: [""] - apiGroups: [""]
# TODO: remove endpoints once 0.4 is released. We include it here in case resources: ["configmaps", "secrets", "events", "services", "pods"]
# users use the 'master' version of the Helm chart with a 0.2.x release of
# cert-manager that still performs leader election with Endpoint resources.
# We advise users don't do this, but some will anyway and this will reduce
# friction.
resources: ["endpoints", "configmaps", "secrets", "events", "services", "pods"]
verbs: ["*"] verbs: ["*"]
- apiGroups: ["extensions"] - apiGroups: ["extensions"]
resources: ["ingresses"] resources: ["ingresses"]
......
...@@ -5,7 +5,7 @@ replicaCount: 1 ...@@ -5,7 +5,7 @@ replicaCount: 1
image: image:
repository: quay.io/jetstack/cert-manager-controller repository: quay.io/jetstack/cert-manager-controller
tag: v0.4.1 tag: v0.5.2
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
createCustomResource: true createCustomResource: true
...@@ -69,11 +69,32 @@ ingressShim: ...@@ -69,11 +69,32 @@ ingressShim:
defaultACMEChallengeType: "http01" defaultACMEChallengeType: "http01"
# defaultACMEDNS01ChallengeProvider: "" # defaultACMEDNS01ChallengeProvider: ""
webhook:
enabled: false
replicaCount: 1
podAnnotations: {}
# Optional additional arguments for webhook
extraArgs: []
resources: {}
# requests:
# cpu: 10m
# memory: 32Mi
image:
repository: quay.io/jetstack/cert-manager-webhook
tag: v0.5.2
pullPolicy: IfNotPresent
letsencrypt: letsencrypt:
enabled: true
email: user@example.com email: user@example.com
# specifies custom letsencrypt server, both staing and prod letsencrypt has default value in clusterissuer.yaml # specifies custom letsencrypt server, both staing and prod letsencrypt has default value in clusterissuer.yaml
server: "https://https://acme-staging-v02.api.letsencrypt.org/directory" server: "https://https://acme-staging-v02.api.letsencrypt.org/directory"
clusterissuerEnabled: true
# This is used by the static manifest generator in order to create a static # This is used by the static manifest generator in order to create a static
# namespace manifest for the namespace that cert-manager is being installed # namespace manifest for the namespace that cert-manager is being installed
# within. It should **not** be used if you are using Helm for deployment. # within. It should **not** be used if you are using Helm for deployment.
...@@ -84,4 +105,24 @@ createNamespaceResource: false ...@@ -84,4 +105,24 @@ createNamespaceResource: false
# http_proxy: "http://proxy:8080" # http_proxy: "http://proxy:8080"
# no_proxy: 127.0.0.1,localhost # no_proxy: 127.0.0.1,localhost
clusterissuerEnabled: true # expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core
# for example:
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: foo.bar.com/role
# operator: In
# values:
# - master
affinity: {}
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core
# for example:
# tolerations:
# - key: foo.bar.com/role
# operator: Equal
# value: master
# effect: NoSchedule
tolerations: []
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
apiVersion: v1
appVersion: "v0.5.2"
description: A Helm chart for deploying the cert-manager webhook component
name: webhook
version: "v0.5.2"
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "webhook.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "webhook.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "webhook.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- define "webhook.selfSignedIssuer" -}}
{{ printf "%s-selfsign" (include "webhook.fullname" .) }}
{{- end -}}
{{- define "webhook.rootCAIssuer" -}}
{{ printf "%s-ca" (include "webhook.fullname" .) }}
{{- end -}}
{{- define "webhook.rootCACertificate" -}}
{{ printf "%s-ca" (include "webhook.fullname" .) }}
{{- end -}}
{{- define "webhook.servingCertificate" -}}
{{ printf "%s-webhook-tls" (include "webhook.fullname" .) }}
{{- end -}}
apiVersion: apiregistration.k8s.io/v1beta1
kind: APIService
metadata:
name: v1beta1.admission.certmanager.k8s.io
labels:
app: {{ include "webhook.name" . }}
chart: {{ include "webhook.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
group: admission.certmanager.k8s.io
groupPriorityMinimum: 1000
versionPriority: 15
service:
name: {{ include "webhook.fullname" . }}
namespace: "{{ .Release.Namespace }}"
version: v1beta1
## This file contains a CronJob that runs every 24h to automatically update the
## caBundle set on the APIService and ValidatingWebhookConfiguration resource.
## This allows us to store the CA bundle in a Secret resource which is
## generated by cert-manager's 'selfsigned' Issuer.
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: {{ include "webhook.fullname" . }}-ca-sync
namespace: {{ .Release.Namespace | quote }}
labels:
app: {{ include "webhook.name" . }}
chart: {{ include "webhook.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
schedule: "* * */24 * *"
jobTemplate:
spec:
template:
metadata:
labels:
app: ca-helper
spec:
serviceAccountName: {{ include "webhook.fullname" . }}-ca-sync
restartPolicy: OnFailure
containers:
- name: ca-helper
image: quay.io/munnerz/apiextensions-ca-helper:v0.1.0
imagePullPolicy: IfNotPresent
args:
- -config=/config/config
volumeMounts:
- name: config
mountPath: /config
resources:
requests:
cpu: 10m
memory: 32Mi
limits:
cpu: 100m
memory: 128Mi
volumes:
- name: config
configMap:
name: {{ include "webhook.fullname" . }}-ca-sync
---
apiVersion: batch/v1
kind: Job
metadata:
name: {{ include "webhook.fullname" . }}-ca-sync
namespace: {{ .Release.Namespace | quote }}
labels:
app: {{ include "webhook.name" . }}
chart: {{ include "webhook.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
template:
metadata:
labels:
app: ca-helper
spec:
serviceAccountName: {{ include "webhook.fullname" . }}-ca-sync
restartPolicy: OnFailure
containers:
- name: ca-helper
image: quay.io/munnerz/apiextensions-ca-helper:canary
imagePullPolicy: IfNotPresent
args:
- -config=/config/config
volumeMounts:
- name: config
mountPath: /config
resources:
requests:
cpu: 10m
memory: 32Mi
limits:
cpu: 100m
memory: 128Mi
volumes:
- name: config
configMap:
name: {{ include "webhook.fullname" . }}-ca-sync
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "webhook.fullname" . }}-ca-sync
namespace: {{ .Release.Namespace | quote }}
labels:
app: {{ include "webhook.name" . }}
chart: {{ include "webhook.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
data:
config: |-
{
"apiServices": [
{
"name": "v1beta1.admission.certmanager.k8s.io",
"secret": {
"name": "{{ include "webhook.rootCACertificate" . }}",
"namespace": "{{ .Release.Namespace }}",
"key": "tls.crt"
}
}
],
"validatingWebhookConfigurations": [
{
"name": "{{ include "webhook.fullname" . }}",
"file": {
"path": "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
}
}
]
}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "webhook.fullname" . }}-ca-sync
namespace: {{ .Release.Namespace | quote }}
labels:
app: {{ include "webhook.name" . }}
chart: {{ include "webhook.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: {{ include "webhook.fullname" . }}-ca-sync
labels:
app: {{ include "webhook.name" . }}
chart: {{ include "webhook.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
resourceNames:
- {{ include "webhook.rootCACertificate" . }}
- apiGroups: ["admissionregistration.k8s.io"]
resources: ["validatingwebhookconfigurations", "mutatingwebhookconfigurations"]
verbs: ["get", "update"]
resourceNames:
- {{ include "webhook.fullname" . }}
- apiGroups: ["apiregistration.k8s.io"]
resources: ["apiservices"]
verbs: ["get", "update"]
resourceNames:
- v1beta1.admission.certmanager.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: {{ include "webhook.fullname" . }}-ca-sync
labels:
app: {{ include "webhook.name" . }}
chart: {{ include "webhook.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "webhook.fullname" . }}-ca-sync
subjects:
- name: {{ include "webhook.fullname" . }}-ca-sync
namespace: {{ .Release.Namespace }}
kind: ServiceAccount
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: {{ include "webhook.fullname" . }}
namespace: {{ .Release.Namespace | quote }}
labels:
app: {{ include "webhook.name" . }}
chart: {{ include "webhook.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app: {{ include "webhook.name" . }}
release: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ include "webhook.name" . }}
release: {{ .Release.Name }}
annotations:
{{- if .Values.podAnnotations }}
{{ toYaml .Values.podAnnotations | indent 8 }}
{{- end }}
spec:
serviceAccountName: {{ include "webhook.fullname" . }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- --v=12
- --tls-cert-file=/certs/tls.crt
- --tls-private-key-file=/certs/tls.key
- --disable-admission-plugins=NamespaceLifecycle,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,Initializers
{{- if .Values.extraArgs }}
{{ toYaml .Values.extraArgs | indent 10 }}
{{- end }}
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
resources:
{{ toYaml .Values.resources | indent 12 }}
volumeMounts:
- name: certs
mountPath: /certs
volumes:
- name: certs
secret:
secretName: {{ include "webhook.servingCertificate" . }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}
---
# Create a selfsigned Issuer, in order to create a root CA certificate for
# signing webhook serving certificates
apiVersion: certmanager.k8s.io/v1alpha1
kind: Issuer
metadata:
name: {{ include "webhook.selfSignedIssuer" . }}
namespace: {{ .Release.Namespace | quote }}
labels:
app: {{ include "webhook.name" . }}
chart: {{ include "webhook.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
selfsigned: {}
---
# Generate a CA Certificate used to sign certificates for the webhook
apiVersion: certmanager.k8s.io/v1alpha1
kind: Certificate
metadata:
name: {{ include "webhook.rootCACertificate" . }}
namespace: {{ .Release.Namespace | quote }}
labels:
app: {{ include "webhook.name" . }}
chart: {{ include "webhook.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
secretName: {{ include "webhook.rootCACertificate" . }}
issuerRef:
name: {{ include "webhook.selfSignedIssuer" . }}
commonName: "ca.webhook.cert-manager"
isCA: true
---
# Create an Issuer that uses the above generated CA certificate to issue certs
apiVersion: certmanager.k8s.io/v1alpha1
kind: Issuer
metadata:
name: {{ include "webhook.rootCAIssuer" . }}
namespace: {{ .Release.Namespace | quote }}
labels:
app: {{ include "webhook.name" . }}
chart: {{ include "webhook.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
ca:
secretName: {{ include "webhook.rootCACertificate" . }}
---
# Finally, generate a serving certificate for the webhook to use
apiVersion: certmanager.k8s.io/v1alpha1
kind: Certificate
metadata:
name: {{ include "webhook.servingCertificate" . }}
namespace: {{ .Release.Namespace | quote }}
labels:
app: {{ include "webhook.name" . }}
chart: {{ include "webhook.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
secretName: {{ include "webhook.servingCertificate" . }}
issuerRef:
name: {{ include "webhook.rootCAIssuer" . }}
dnsNames:
- {{ include "webhook.fullname" . }}
- {{ include "webhook.fullname" . }}.{{ .Release.Namespace }}
- {{ include "webhook.fullname" . }}.{{ .Release.Namespace }}.svc
### Webhook ###
---
# apiserver gets the auth-delegator role to delegate auth decisions to
# the core apiserver
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: {{ include "webhook.fullname" . }}:auth-delegator
labels:
app: {{ include "webhook.name" . }}
chart: {{ include "webhook.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- apiGroup: ""
kind: ServiceAccount
name: {{ include "webhook.fullname" . }}
namespace: {{ .Release.Namespace }}
---
# apiserver gets the ability to read authentication. This allows it to
# read the specific configmap that has the requestheader-* entries to
# api agg
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: {{ include "webhook.fullname" . }}:webhook-authentication-reader
namespace: kube-system
labels:
app: {{ include "webhook.name" . }}
chart: {{ include "webhook.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- apiGroup: ""
kind: ServiceAccount
name: {{ include "webhook.fullname" . }}
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "webhook.fullname" . }}:webhook-requester
labels:
app: {{ include "webhook.name" . }}
chart: {{ include "webhook.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
rules:
- apiGroups:
- admission.certmanager.k8s.io
resources:
- certificates
- issuers
- clusterissuers
verbs:
- create
apiVersion: v1
kind: Service
metadata:
name: {{ include "webhook.fullname" . }}
namespace: {{ .Release.Namespace | quote }}
labels:
app: {{ include "webhook.name" . }}
chart: {{ include "webhook.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
type: ClusterIP
ports:
- name: https
port: 443
targetPort: 443
selector:
app: {{ include "webhook.name" . }}
release: {{ .Release.Name }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "webhook.fullname" . }}
namespace: {{ .Release.Namespace | quote }}
labels:
app: {{ include "webhook.name" . }}
chart: {{ include "webhook.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
apiVersion: admissionregistration.k8s.io/v1beta1
kind: ValidatingWebhookConfiguration
metadata:
name: {{ include "webhook.fullname" . }}
labels:
app: {{ include "webhook.name" . }}
chart: {{ include "webhook.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
webhooks:
- name: certificates.admission.certmanager.k8s.io
namespaceSelector:
matchExpressions:
- key: "certmanager.k8s.io/disable-validation"
operator: "NotIn"
values:
- "true"
- key: "name"
operator: "NotIn"
values:
- {{ .Release.Namespace }}
rules:
- apiGroups:
- "certmanager.k8s.io"
apiVersions:
- v1alpha1
operations:
- CREATE
- UPDATE
resources:
- certificates
failurePolicy: Fail
clientConfig:
service:
name: kubernetes
namespace: default
path: /apis/admission.certmanager.k8s.io/v1beta1/certificates
- name: issuers.admission.certmanager.k8s.io
namespaceSelector:
matchExpressions:
- key: "certmanager.k8s.io/disable-validation"
operator: "NotIn"
values:
- "true"
- key: "name"
operator: "NotIn"
values:
- {{ .Release.Namespace }}
rules:
- apiGroups:
- "certmanager.k8s.io"
apiVersions:
- v1alpha1
operations:
- CREATE
- UPDATE
resources:
- issuers
failurePolicy: Fail
clientConfig:
service:
name: kubernetes
namespace: default
path: /apis/admission.certmanager.k8s.io/v1beta1/issuers
- name: clusterissuers.admission.certmanager.k8s.io
namespaceSelector:
matchExpressions:
- key: "certmanager.k8s.io/disable-validation"
operator: "NotIn"
values:
- "true"
- key: "name"
operator: "NotIn"
values:
- {{ .Release.Namespace }}
rules:
- apiGroups:
- "certmanager.k8s.io"
apiVersions:
- v1alpha1
operations:
- CREATE
- UPDATE
resources:
- clusterissuers
failurePolicy: Fail
clientConfig:
service:
name: kubernetes
namespace: default
path: /apis/admission.certmanager.k8s.io/v1beta1/clusterissuers
replicaCount: 1
podAnnotations: {}
# Optional additional arguments for webhook
extraArgs: []
resources: {}
# requests:
# cpu: 10m
# memory: 32Mi
image:
repository: quay.io/jetstack/cert-manager-webhook
tag: v0.5.2
pullPolicy: IfNotPresent
<?xml version="1.0"?>
<svg width="339" height="81" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg">
<g>
<title>Layer 1</title>
<g id="svg_1">
<g id="svg_2">
<g id="svg_3">
<path id="svg_4" d="m76.621002,68.878998l0,-31.406998l7.629997,0l0,24.796997l12.153999,0l0,6.609001l-19.783997,0l0,0.000999z" fill="#2C3C69"/>
<path id="svg_5" d="m121.546997,58.098999c0,0.295998 0,0.592003 0,0.888s-0.014999,0.576004 -0.043991,0.843002l-16.013008,0c0.059006,0.620995 0.244003,1.182999 0.555,1.685997s0.709999,0.938004 1.196999,1.308998c0.488007,0.370003 1.035004,0.658005 1.642006,0.864006c0.604996,0.208 1.233994,0.310997 1.884995,0.310997c1.153,0 2.130005,-0.213997 2.928001,-0.642998c0.799004,-0.429001 1.449005,-0.983002 1.952003,-1.664001l5.056992,3.194c-1.034988,1.507996 -2.401993,2.668999 -4.102997,3.482002c-1.700996,0.811996 -3.675995,1.219994 -5.921997,1.219994c-1.656998,0 -3.223999,-0.259995 -4.702003,-0.775993c-1.478996,-0.518005 -2.771996,-1.271004 -3.881996,-2.263c-1.108002,-0.990005 -1.981003,-2.210007 -2.616997,-3.659004s-0.953003,-3.104996 -0.953003,-4.969002c0,-1.802994 0.309998,-3.437996 0.931,-4.900997s1.463997,-2.706001 2.528999,-3.726002c1.064003,-1.021 2.32,-1.811996 3.771004,-2.373997c1.447998,-0.561001 3.015999,-0.843002 4.700996,-0.843002c1.625999,0 3.120003,0.274002 4.480003,0.820999s2.528,1.338001 3.504997,2.373001c0.975998,1.035 1.735992,2.292 2.283997,3.771c0.546005,1.478001 0.819,3.165001 0.819,5.056zm-6.697998,-2.794998c0,-1.153 -0.362,-2.144001 -1.086998,-2.972c-0.724998,-0.827 -1.812004,-1.242001 -3.260002,-1.242001c-0.709999,0 -1.360001,0.111 -1.951996,0.333s-1.108002,0.525002 -1.553001,0.909c-0.443001,0.384998 -0.798004,0.835999 -1.064003,1.354c-0.265999,0.517998 -0.414001,1.057999 -0.443001,1.618l9.359001,0z" fill="#2C3C69"/>
<path id="svg_6" d="m133.167999,52.200001l0,8.461002c0,1.038994 0.199997,1.816994 0.600006,2.337997c0.397995,0.519997 1.11499,0.778 2.151001,0.778c0.353989,0 0.730988,-0.028 1.130997,-0.088997c0.399002,-0.059006 0.731003,-0.147003 0.998001,-0.266006l0.088989,5.323006c-0.502991,0.176994 -1.138992,0.332001 -1.906998,0.465996c-0.769989,0.133003 -1.537994,0.199005 -2.306992,0.199005c-1.479004,0 -2.722,-0.186005 -3.727005,-0.556007c-1.005997,-0.369995 -1.811996,-0.903999 -2.416992,-1.601997c-0.60701,-0.695999 -1.043015,-1.526001 -1.309006,-2.489998c-0.266998,-0.962997 -0.399002,-2.038002 -0.399002,-3.223999l0,-9.338001l-3.548996,0l0,-5.412003l3.503998,0l0,-5.810997l7.141998,0l0,5.810997l5.190002,0l0,5.412003l-5.190002,0z" fill="#2C3C69"/>
<path id="svg_7" d="m161.912994,53.307999c-0.59201,-0.560997 -1.286011,-1.034 -2.085007,-1.418999c-0.798004,-0.383999 -1.640991,-0.577 -2.528,-0.577c-0.681,0 -1.30899,0.133999 -1.884995,0.398998c-0.576996,0.267002 -0.865005,0.726002 -0.865005,1.375c0,0.621002 0.317001,1.064003 0.953003,1.331001s1.664001,0.562 3.082993,0.887001c0.828003,0.177998 1.664001,0.43 2.507004,0.754997s1.604004,0.754005 2.283997,1.286003c0.680008,0.531998 1.227005,1.182999 1.642014,1.951996c0.412994,0.769005 0.620987,1.686005 0.620987,2.75c0,1.391006 -0.280991,2.565002 -0.842987,3.526001c-0.562012,0.960999 -1.294006,1.737 -2.196014,2.329002s-1.914993,1.019997 -3.037994,1.286003c-1.123993,0.266998 -2.248001,0.398994 -3.371002,0.398994c-1.804993,0 -3.570999,-0.287994 -5.302002,-0.864998c-1.728989,-0.575996 -3.17099,-1.427002 -4.32399,-2.551003l4.080994,-4.303001c0.649002,0.710007 1.447998,1.302002 2.395004,1.774002c0.945999,0.473999 1.951996,0.709999 3.016998,0.709999c0.591995,0 1.175995,-0.140999 1.751999,-0.421997c0.576996,-0.279999 0.865005,-0.776001 0.865005,-1.485001c0,-0.681 -0.354004,-1.182999 -1.064011,-1.509003c-0.709991,-0.324997 -1.817993,-0.664993 -3.326996,-1.020996c-0.768997,-0.177002 -1.537994,-0.413002 -2.306992,-0.709c-0.770004,-0.295998 -1.457001,-0.694 -2.062012,-1.197998c-0.605988,-0.502007 -1.10199,-1.123001 -1.485992,-1.863007c-0.384003,-0.737995 -0.576004,-1.625996 -0.576004,-2.660995c0,-1.331001 0.279999,-2.462002 0.843002,-3.394001c0.561996,-0.931999 1.285995,-1.692001 2.173996,-2.284c0.887009,-0.591999 1.87001,-1.027 2.949005,-1.308998s2.151001,-0.422001 3.216995,-0.422001c1.654999,0 3.274002,0.259998 4.856003,0.776001c1.582001,0.517998 2.921005,1.293999 4.014999,2.328999l-3.994995,4.127998z" fill="#2C3C69"/>
<path id="svg_8" d="m179.567993,68.878998l0,-31.406998l21.113998,0l0,6.388l-13.794998,0l0,5.944l13.041,0l0,6.077l-13.041,0l0,6.521l14.593994,0l0,6.476997l-21.912994,0z" fill="#2C3C69"/>
<path id="svg_9" d="m220.675003,68.878998l0,-12.065994c0,-0.621002 -0.053009,-1.212002 -0.154999,-1.774002c-0.104004,-0.562 -0.274002,-1.057003 -0.511002,-1.486c-0.237,-0.428001 -0.569,-0.769001 -0.998001,-1.021c-0.429001,-0.25 -0.968994,-0.377003 -1.619003,-0.377003s-1.220001,0.127003 -1.707993,0.377003c-0.487,0.251999 -0.895004,0.599998 -1.220001,1.042999s-0.569,0.953999 -0.731003,1.529999c-0.162994,0.577 -0.244003,1.175999 -0.244003,1.797001l0,11.976997l-7.319,0l0,-22.091l7.053009,0l0,3.061001l0.088989,0c0.266998,-0.473 0.613007,-0.938 1.042999,-1.396c0.428009,-0.459 0.932007,-0.850998 1.508011,-1.175999c0.576996,-0.325001 1.204987,-0.591999 1.885986,-0.799c0.680008,-0.206001 1.404007,-0.311001 2.173004,-0.311001c1.479004,0 2.735001,0.266998 3.770996,0.799s1.87001,1.220001 2.507004,2.062c0.636002,0.842999 1.094009,1.812 1.375,2.904999c0.279999,1.095001 0.421005,2.189003 0.421005,3.283001l0,13.661999l-7.320999,0l0,0.000999z" fill="#2C3C69"/>
<path id="svg_10" d="m246.713013,53.929001c-0.415009,-0.532001 -0.977005,-0.959999 -1.686005,-1.285999c-0.709991,-0.325001 -1.436005,-0.488003 -2.173996,-0.488003c-0.770004,0 -1.464005,0.155003 -2.085007,0.466s-1.153,0.726002 -1.597,1.242001c-0.442993,0.518002 -0.791992,1.117001 -1.042999,1.797001c-0.250992,0.681004 -0.376999,1.404003 -0.376999,2.174c0,0.768997 0.117996,1.493004 0.354996,2.173004c0.236008,0.681 0.583008,1.279999 1.042007,1.796997c0.457993,0.517998 1.005005,0.924995 1.641998,1.220001c0.636002,0.295998 1.352997,0.443001 2.151001,0.443001c0.738007,0 1.470993,-0.139999 2.195007,-0.421005s1.30899,-0.687996 1.751984,-1.220001l4.037018,4.924004c-0.917023,0.887001 -2.101013,1.582001 -3.549011,2.084999c-1.44899,0.501999 -2.987,0.753998 -4.612991,0.753998c-1.74501,0 -3.374008,-0.266998 -4.887009,-0.798996c-1.511993,-0.531998 -2.826004,-1.308998 -3.940994,-2.329002c-1.115997,-1.019997 -1.992996,-2.253998 -2.632996,-3.702995s-0.959,-3.090004 -0.959,-4.924004c0,-1.804001 0.318985,-3.431 0.959,-4.880001c0.639999,-1.447998 1.516998,-2.683998 2.632996,-3.703999c1.11499,-1.021 2.429993,-1.804001 3.940994,-2.351002c1.513,-0.546997 3.126999,-0.820999 4.843002,-0.820999c0.798004,0 1.589005,0.074001 2.373001,0.223c0.783005,0.147003 1.536987,0.348 2.261993,0.599003s1.390015,0.562 1.996002,0.931999s1.132019,0.776001 1.575012,1.219997l-4.212006,4.877003z" fill="#2C3C69"/>
<path id="svg_11" d="m268.032013,52.776001c-0.325989,-0.089001 -0.644012,-0.146999 -0.95401,-0.177002s-0.613983,-0.044998 -0.908997,-0.044998c-0.975983,0 -1.796997,0.177998 -2.462006,0.530998c-0.664978,0.354 -1.196991,0.781002 -1.596985,1.283001c-0.399017,0.500999 -0.688019,1.047001 -0.865021,1.636997s-0.265991,1.105003 -0.265991,1.548004l0,11.324997l-7.274994,0l0,-22.063999l7.009003,0l0,3.194l0.088989,0c0.562012,-1.132 1.359009,-2.055 2.395996,-2.77c1.034027,-0.715 2.232025,-1.071999 3.593018,-1.071999c0.294983,0 0.583984,0.015999 0.86499,0.044998c0.279999,0.029003 0.51001,0.074001 0.688019,0.133003l-0.312012,6.431999z" fill="#2C3C69"/>
<path id="svg_12" d="m285.122009,72.206001c-0.442993,1.153 -0.938995,2.181 -1.485992,3.083c-0.546997,0.901001 -1.197021,1.669998 -1.951019,2.306999c-0.753998,0.636002 -1.641998,1.114998 -2.661987,1.441002c-1.019989,0.324997 -2.226013,0.487999 -3.61499,0.487999c-0.681,0 -1.382996,-0.044998 -2.106018,-0.134003c-0.725983,-0.088997 -1.354004,-0.207001 -1.885986,-0.353996l0.798981,-6.121002c0.354004,0.116997 0.746002,0.213997 1.176025,0.288002c0.427979,0.072998 0.819977,0.110001 1.174988,0.110001c1.123993,0 1.937012,-0.259003 2.440002,-0.776001c0.501984,-0.518005 0.931,-1.249001 1.286011,-2.195l0.709991,-1.818001l-9.22699,-21.736l8.072998,0l4.923981,14.195l0.132996,0l4.391998,-14.195l7.718018,0l-9.893005,25.417z" fill="#2C3C69"/>
<path id="svg_13" d="m321.496002,57.745003c0,1.537994 -0.237,3.016998 -0.709991,4.435997c-0.473999,1.419998 -1.161011,2.668999 -2.062012,3.748001c-0.902008,1.080002 -2.003998,1.945 -3.304993,2.596001c-1.302002,0.649002 -2.779999,0.975998 -4.437012,0.975998c-1.359985,0 -2.645996,-0.273003 -3.859009,-0.82s-2.15799,-1.293999 -2.838989,-2.239998l-0.087982,0l0,13.085999l-7.275024,0l0,-32.739002l6.920013,0l0,2.706001l0.132996,0c0.681,-0.887001 1.618988,-1.662998 2.816986,-2.328999c1.197021,-0.665001 2.609009,-0.998001 4.236023,-0.998001c1.596985,0 3.044983,0.311001 4.346985,0.931999c1.300995,0.621002 2.402008,1.464001 3.304993,2.528s1.597015,2.299999 2.085022,3.704002c0.488007,1.404999 0.731995,2.876999 0.731995,4.414001zm-7.053009,0c0,-0.709999 -0.110016,-1.403999 -0.332001,-2.085003c-0.222015,-0.68 -0.548004,-1.278999 -0.97699,-1.797001c-0.429016,-0.516998 -0.969025,-0.938 -1.619019,-1.264s-1.403992,-0.487999 -2.261993,-0.487999c-0.828003,0 -1.567993,0.162998 -2.217987,0.487999c-0.651001,0.325001 -1.206024,0.754002 -1.664001,1.285999c-0.459015,0.532001 -0.813019,1.139 -1.064026,1.818001c-0.251984,0.681004 -0.376984,1.375004 -0.376984,2.085003s0.125,1.404999 0.376984,2.084999c0.251007,0.681 0.605011,1.285995 1.064026,1.818001c0.457977,0.531998 1.013,0.961998 1.664001,1.286995c0.648987,0.325005 1.389984,0.487 2.217987,0.487c0.856995,0 1.610992,-0.161995 2.261993,-0.487s1.190002,-0.754997 1.619019,-1.286995s0.754974,-1.146004 0.97699,-1.841003c0.221008,-0.693001 0.332001,-1.394997 0.332001,-2.104996z" fill="#2C3C69"/>
<path id="svg_14" d="m333.118011,52.200001l0,8.461002c0,1.038994 0.200012,1.816994 0.600006,2.337997c0.39798,0.519997 1.11499,0.778 2.151001,0.778c0.354004,0 0.730988,-0.028 1.130981,-0.088997c0.399017,-0.059006 0.731018,-0.147003 0.998016,-0.266006l0.088989,5.323006c-0.502991,0.176994 -1.139008,0.332001 -1.906982,0.465996c-0.77002,0.133003 -1.538025,0.199005 -2.307007,0.199005c-1.479004,0 -2.722015,-0.186005 -3.72702,-0.556007c-1.005981,-0.369995 -1.811981,-0.903999 -2.416992,-1.601997c-0.606995,-0.695999 -1.042999,-1.526001 -1.30899,-2.489998c-0.266998,-0.962997 -0.399017,-2.038002 -0.399017,-3.223999l0,-9.338001l-3.548981,0l0,-5.412003l3.503998,0l0,-5.810997l7.141998,0l0,5.810997l5.190002,0l0,5.412003l-5.190002,0z" fill="#2C3C69"/>
</g>
</g>
<path id="svg_15" d="m145.009995,36.869999c-2.182999,0 -3.891998,1.573002 -3.891998,3.582001c0,2.116001 1.438995,3.536999 3.582001,3.536999c0.182999,0 0.355988,-0.016998 0.518997,-0.049999c-0.343002,1.566002 -1.852005,2.690002 -3.278,2.915001l-0.290009,0.046001l0,3.376999l0.376007,-0.035999c1.729996,-0.165001 3.438995,-0.951 4.690994,-2.157001c1.632004,-1.572998 2.49501,-3.843998 2.49501,-6.568001c0,-2.691998 -1.76799,-4.646 -4.203003,-4.646z" fill="#2C3C69"/>
</g>
<g id="svg_16">
<path id="svg_17" d="m46.488998,37.568001l-8.039997,0l0,-4.128002c0,-3.296997 -2.683002,-5.979 -5.98,-5.979c-3.297001,0 -5.979,2.683002 -5.979,5.979l0,4.128002l-8.040001,0l0,-4.128002c0,-7.73 6.288998,-14.019999 14.02,-14.019999s14.02,6.289 14.02,14.019999l0,4.128002l-0.001003,0z" fill="#F9A11D"/>
</g>
<path id="svg_18" d="m49.731998,37.568001l-34.524998,0c-1.474001,0 -2.68,1.205997 -2.68,2.68l0,25.540001c0,1.473999 1.205999,2.68 2.68,2.68l34.524998,0c1.474003,0 2.68,-1.206001 2.68,-2.68l0,-25.540001c0,-1.474003 -1.205997,-2.68 -2.68,-2.68zm-15.512997,16.769001l0,3.460995c0,0.966003 -0.784,1.749001 -1.749001,1.749001s-1.749001,-0.783997 -1.749001,-1.749001l0,-3.459995c-1.076,-0.611 -1.803001,-1.764 -1.803001,-3.09c0,-1.962002 1.591,-3.552002 3.552002,-3.552002c1.961998,0 3.551998,1.591 3.551998,3.552002c0,1.325001 -0.727001,2.478001 -1.802998,3.089001z" fill="#2C3C69"/>
<path id="svg_19" d="m11.707001,33.759998l-8.331,0c-1.351001,0 -2.446,-1.094997 -2.446,-2.445999s1.094999,-2.445999 2.446,-2.445999l8.331,0c1.351,0 2.445999,1.095001 2.445999,2.445999s-1.096001,2.445999 -2.445999,2.445999z" fill="#F9A11D"/>
<path id="svg_20" d="m17.575001,20.655001c-0.546001,0 -1.097,-0.182001 -1.552,-0.557001l-6.59,-5.418999c-1.043001,-0.858002 -1.194,-2.399 -0.335001,-3.443001c0.858,-1.042999 2.399,-1.194 3.443001,-0.336l6.59,5.419001c1.042999,0.858 1.194,2.399 0.334999,3.442999c-0.483,0.589001 -1.184,0.893002 -1.890999,0.893002z" fill="#F9A11D"/>
<path id="svg_21" d="m32.469002,14.895c-1.351002,0 -2.446003,-1.095001 -2.446003,-2.446001l0,-8.396999c0,-1.351 1.095001,-2.446 2.446003,-2.446s2.445999,1.095 2.445999,2.446l0,8.396999c0,1.351 -1.095001,2.446001 -2.445999,2.446001z" fill="#F9A11D"/>
<g id="svg_22">
<g id="svg_23">
<path id="svg_24" d="m47.362999,20.655001c-0.707001,0 -1.406998,-0.304001 -1.890999,-0.893002c-0.858002,-1.042999 -0.708,-2.584999 0.334999,-3.442999l6.59,-5.419001c1.044003,-0.858 2.585003,-0.706999 3.442997,0.336c0.858002,1.042999 0.708,2.584999 -0.334999,3.443001l-6.589996,5.418999c-0.455002,0.375 -1.005001,0.557001 -1.552002,0.557001z" fill="#F9A11D"/>
</g>
</g>
<path id="svg_25" d="m61.563004,33.759998l-8.410004,0c-1.351002,0 -2.445999,-1.094997 -2.445999,-2.445999s1.094997,-2.445999 2.445999,-2.445999l8.410004,0c1.350998,0 2.445999,1.095001 2.445999,2.445999s-1.095001,2.445999 -2.445999,2.445999z" fill="#F9A11D"/>
</g>
</svg>
\ No newline at end of file
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
name: cert-manager
version: v0.4.1
appVersion: v0.4.1
description: A Helm chart for cert-manager
home: https://github.com/jetstack/cert-manager
keywords:
- cert-manager
- kube-lego
- letsencrypt
- tls
sources:
- https://github.com/jetstack/cert-manager
icon: https://letsencrypt.org/images/letsencrypt-logo-horizontal.svg
maintainers:
- name: munnerz
email: james@jetstack.io
approvers:
- munnerz
- simonswine
- kragniz
reviewers:
- munnerz
- unguiculus
- simonswine
- kragniz
## Installing the Chart
Full installation instructions, including details on how to configure extra
functionality in cert-manager can be found in the [getting started docs](https://cert-manager.readthedocs.io/en/latest/getting-started/).
To install the chart with the release name `my-release`:
```console
$ helm install --name my-release stable/cert-manager
```
In order to begin issuing certificates, you will need to set up a ClusterIssuer
or Issuer resource (for example, by creating a 'letsencrypt-staging' issuer).
More information on the different types of issuers and how to configure them
can be found in our documentation:
https://cert-manager.readthedocs.io/en/latest/reference/issuers.html
For information on how to configure cert-manager to automatically provision
Certificates for Ingress resources, take a look at the `ingress-shim`
documentation:
https://cert-manager.readthedocs.io/en/latest/reference/ingress-shim.html
> **Tip**: List all releases using `helm list`
## Uninstalling the Chart
To uninstall/delete the `my-release` deployment:
```console
$ helm delete my-release
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following table lists the configurable parameters of the cert-manager chart and their default values.
| Parameter | Description | Default |
| --------- | ----------- | ------- |
| `image.repository` | Image repository | `quay.io/jetstack/cert-manager-controller` |
| `image.tag` | Image tag | `v0.4.1` |
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `replicaCount` | Number of cert-manager replicas | `1` |
| `createCustomResource` | Create CRD/TPR with this release | `true` |
| `clusterResourceNamespace` | Override the namespace used to store DNS provider credentials etc. for ClusterIssuer resources | Same namespace as cert-manager pod
| `leaderElection.Namespace` | Override the namespace used to store the ConfigMap for leader election | Same namespace as cert-manager pod
| `certificateResourceShortNames` | Custom aliases for Certificate CRD | `["cert", "certs"]` |
| `extraArgs` | Optional flags for cert-manager | `[]` |
| `extraEnv` | Optional environment variables for cert-manager | `[]` |
| `rbac.create` | If `true`, create and use RBAC resources | `true` |
| `serviceAccount.create` | If `true`, create a new service account | `true` |
| `serviceAccount.name` | Service account to be used. If not set and `serviceAccount.create` is `true`, a name is generated using the fullname template | |
| `resources` | CPU/memory resource requests/limits | `requests: {cpu: 10m, memory: 32Mi}` |
| `nodeSelector` | Node labels for pod assignment | `{}` |
| `affinity` | Node affinity for pod assignment | `{}` |
| `tolerations` | Node tolerations for pod assignment | `[]` |
| `ingressShim.defaultIssuerName` | Optional default issuer to use for ingress resources | |
| `ingressShim.defaultIssuerKind` | Optional default issuer kind to use for ingress resources | |
| `ingressShim.defaultACMEChallengeType` | Optional default challenge type to use for ingresses using ACME issuers | |
| `ingressShim.defaultACMEDNS01ChallengeProvider` | Optional default DNS01 challenge provider to use for ingresses using ACME issuers with DNS01 | |
| `podAnnotations` | Annotations to add to the cert-manager pod | `{}` |
| `podDnsPolicy` | Optional cert-manager pod [DNS policy](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pods-dns-policy) | |
| `podDnsConfig` | Optional cert-manager pod [DNS configurations](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pods-dns-config) | |
| `podLabels` | Labels to add to the cert-manager pod | `{}` |
| `http_proxy` | Value of the `HTTP_PROXY` environment variable in the cert-manager pod | |
| `https_proxy` | Value of the `HTTPS_PROXY` environment variable in the cert-manager pod | |
| `no_proxy` | Value of the `NO_PROXY` environment variable in the cert-manager pod | |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`.
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
```console
$ helm install --name my-release -f values.yaml .
```
> **Tip**: You can use the default [values.yaml](values.yaml)
## Contributing
This chart is maintained at [github.com/jetstack/cert-manager](https://github.com/jetstack/cert-manager/tree/master/contrib/charts/cert-manager).
# cert-manager
cert-manager is a Kubernetes addon to automate the management and issuance of
TLS certificates from various issuing sources.
It will ensure certificates are valid and up to date periodically, and attempt
to renew certificates at an appropriate time before expiry.
## How to Use It
### [Ingress-shim](https://cert-manager.readthedocs.io/en/latest/reference/ingress-shim.html#ingress-shim)
Cert-manager will create Certificate resources that reference the `ClusterIssuer` for all Ingresses that have following annotations.
```
kubernetes.io/tls-acme: "true"
certmanager.k8s.io/cluster-issuer: letsencrypt-staging # your cluerissuer name
nginx.ingress.kubernetes.io/secure-backends: "true" # optional
```
For cert-manager to work properly, the following information has to be added on your ingress definition.
```
spec:
tls:
- hosts:
- host.example.com
secretName: host-example-crt
```
minimum_rancher_version: v2.1.0
questions:
- variable: defaultImage
default: true
description: "Use default Docker image"
label: Use Default Image
type: boolean
show_subquestion_if: false
group: "Container Images"
subquestions:
- variable: image.repository
default: "quay.io/jetstack/cert-manager-controller"
description: "Cert-Manager Docker image name"
type: string
label: Cert-Manager Docker Image Name
- variable: image.tag
default: "v0.4.1"
description: "Cert-Manager Docker image tag"
type: string
label: Cert-Manager Image Tag
- variable: createCustomResource
default: true
description: "Create Custom Resource Definition(CRD) of certmanager"
label: Create Custom Resource Definition(CRD)
type: boolean
group: "Settings"
- variable: ingressShim.defaultIssuerName
default: "letsencrypt-staging"
required: true
type: enum
description: "Let's Encrypt ACME clients, use staging environment to avoid hitting rate limits"
label: Let's Encrypt Cluster Issuer clients
group: "Settings"
options:
- "letsencrypt-staging"
- "letsencrypt-prod"
- variable: letsencrypt.email
default: ""
required: true
type: string
description: "Let's Encrypt register email, for https://letsencrypt.org/docs/staging-environment"
label: Let's Encrypt Client Register Email
group: "Settings"
{{ if .Values.createNamespaceResource }}
apiVersion: v1
kind: Namespace
metadata:
name: {{ .Release.Namespace | quote }}
{{- end }}
cert-manager has been deployed successfully!
1. In order to begin issuing certificates, you will need to set up a ClusterIssuer
or Issuer resource, and we have set a default ClusterIssuer called `{{ .Values.letsencrypt.name }}` with your register email <{{ .Values.letsencrypt.email }}>.
More information on the different types of issuers and how to configure them
can be found in our documentation:
https://cert-manager.readthedocs.io/en/latest/reference/issuers.html
2. Cert-manager will create Certificate resources that reference the ClusterIssuer `{{ .Values.letsencrypt.name }}` for all Ingresses that have a `kubernetes.io/tls-acme: "true"` annotation.
For information on how to configure cert-manager to automatically provision
Certificates for Ingress resources, take a look at the `ingress-shim`
documentation:
https://cert-manager.readthedocs.io/en/latest/reference/ingress-shim.html
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "cert-manager.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "cert-manager.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "cert-manager.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "cert-manager.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "cert-manager.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{- if .Values.createCustomResource -}}
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: certificates.certmanager.k8s.io
labels:
app: {{ template "cert-manager.name" . }}
chart: {{ template "cert-manager.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
annotations:
"helm.sh/hook": "crd-install"
"helm.sh/hook-delete-policy": "before-hook-creation"
spec:
group: certmanager.k8s.io
version: v1alpha1
scope: Namespaced
names:
kind: Certificate
plural: certificates
{{- if .Values.certificateResourceShortNames }}
shortNames:
{{ toYaml .Values.certificateResourceShortNames | indent 6 }}
{{- end -}}
{{- end -}}
{{- if .Values.createCustomResource -}}
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterissuers.certmanager.k8s.io
labels:
app: {{ template "cert-manager.name" . }}
chart: {{ template "cert-manager.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
annotations:
"helm.sh/hook": "crd-install"
"helm.sh/hook-delete-policy": "before-hook-creation"
spec:
group: certmanager.k8s.io
version: v1alpha1
names:
kind: ClusterIssuer
plural: clusterissuers
scope: Cluster
{{- end -}}
{{- if .Values.clusterissuerEnabled -}}
apiVersion: certmanager.k8s.io/v1alpha1
kind: ClusterIssuer
metadata:
name: {{ .Values.ingressShim.defaultIssuerName }}
labels:
app: {{ template "cert-manager.name" . }}
chart: {{ template "cert-manager.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
acme:
{{- if eq .Values.ingressShim.defaultIssuerName "letsencrypt-prod" }}
server: "https://acme-v02.api.letsencrypt.org/directory"
{{- else if eq .Values.ingressShim.defaultIssuerName "letsencrypt-staging" }}
server: "https://acme-staging-v02.api.letsencrypt.org/directory"
{{- else }}
server: {{ .Values.letsencrypt.server }}
{{- end }}
email: {{ .Values.letsencrypt.email }}
privateKeySecretRef:
name: {{ .Values.ingressShim.defaultIssuerName }}-account-key
http01: {}
{{- end -}}
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: {{ template "cert-manager.fullname" . }}
namespace: {{ .Release.Namespace | quote }}
labels:
app: {{ template "cert-manager.name" . }}
chart: {{ template "cert-manager.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app: {{ template "cert-manager.name" . }}
release: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ template "cert-manager.name" . }}
release: {{ .Release.Name }}
{{- if .Values.podLabels }}
{{ toYaml .Values.podLabels | indent 8 }}
{{- end }}
annotations:
{{- if .Values.podAnnotations }}
{{ toYaml .Values.podAnnotations | indent 8 }}
{{- end }}
spec:
serviceAccountName: {{ template "cert-manager.serviceAccountName" . }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
{{- if .Values.clusterResourceNamespace }}
- --cluster-resource-namespace={{ .Values.clusterResourceNamespace }}
{{- else }}
- --cluster-resource-namespace=$(POD_NAMESPACE)
{{- end }}
{{- if .Values.leaderElection.namespace }}
- --leader-election-namespace={{ .Values.leaderElection.namespace }}
{{- else }}
- --leader-election-namespace=$(POD_NAMESPACE)
{{- end }}
{{- if .Values.extraArgs }}
{{ toYaml .Values.extraArgs | indent 10 }}
{{- end }}
{{- with .Values.ingressShim }}
{{- if .defaultIssuerName }}
- --default-issuer-name={{ .defaultIssuerName }}
{{- end }}
{{- if .defaultIssuerKind }}
- --default-issuer-kind={{ .defaultIssuerKind }}
{{- end }}
{{- if .defaultACMEChallengeType }}
- --default-acme-issuer-challenge-type={{ .defaultACMEChallengeType }}
{{- end }}
{{- if .defaultACMEDNS01ChallengeProvider }}
- --default-acme-issuer-dns01-provider-name={{ .defaultACMEDNS01ChallengeProvider }}
{{- end }}
{{- end }}
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- if .Values.extraEnv }}
{{ toYaml .Values.extraEnv | indent 10 }}
{{- end }}
{{- if .Values.http_proxy }}
- name: HTTP_PROXY
value: {{ .Values.http_proxy }}
{{- end }}
{{- if .Values.https_proxy }}
- name: HTTPS_PROXY
value: {{ .Values.https_proxy }}
{{- end }}
{{- if .Values.no_proxy }}
- name: NO_PROXY
value: {{ .Values.no_proxy }}
{{- end }}
resources:
{{ toYaml .Values.resources | indent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}
{{- if .Values.podDnsPolicy }}
dnsPolicy: {{ .Values.podDnsPolicy }}
{{- end }}
{{- if .Values.podDnsConfig }}
dnsConfig:
{{ toYaml .Values.podDnsConfig | indent 8 }}
{{- end }}
{{- if .Values.createCustomResource -}}
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: issuers.certmanager.k8s.io
labels:
app: {{ template "cert-manager.name" . }}
chart: {{ template "cert-manager.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
annotations:
"helm.sh/hook": crd-install
spec:
group: certmanager.k8s.io
version: v1alpha1
names:
kind: Issuer
plural: issuers
scope: Namespaced
{{- end -}}
{{- if .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: {{ template "cert-manager.fullname" . }}
labels:
app: {{ template "cert-manager.name" . }}
chart: {{ template "cert-manager.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
rules:
- apiGroups: ["certmanager.k8s.io"]
resources: ["certificates", "issuers", "clusterissuers"]
verbs: ["*"]
- apiGroups: [""]
# TODO: remove endpoints once 0.4 is released. We include it here in case
# users use the 'master' version of the Helm chart with a 0.2.x release of
# cert-manager that still performs leader election with Endpoint resources.
# We advise users don't do this, but some will anyway and this will reduce
# friction.
resources: ["endpoints", "configmaps", "secrets", "events", "services", "pods"]
verbs: ["*"]
- apiGroups: ["extensions"]
resources: ["ingresses"]
verbs: ["*"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: {{ template "cert-manager.fullname" . }}
labels:
app: {{ template "cert-manager.name" . }}
chart: {{ template "cert-manager.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "cert-manager.fullname" . }}
subjects:
- name: {{ template "cert-manager.serviceAccountName" . }}
namespace: {{ .Release.Namespace | quote }}
kind: ServiceAccount
{{- end -}}
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "cert-manager.serviceAccountName" . }}
namespace: {{ .Release.Namespace | quote }}
labels:
app: {{ template "cert-manager.name" . }}
chart: {{ template "cert-manager.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- end -}}
# Default values for cert-manager.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: quay.io/jetstack/cert-manager-controller
tag: v0.4.1
pullPolicy: IfNotPresent
createCustomResource: true
# Override the namespace used to store DNS provider credentials etc. for ClusterIssuer
# resources. By default, the same namespace as cert-manager is deployed within is
# used. This namespace will not be automatically created by the Helm chart.
clusterResourceNamespace: ""
leaderElection:
# Override the namespace used to store the ConfigMap for leader election
namespace: ""
certificateResourceShortNames: ["cert", "certs"]
rbac:
# Specifies whether RBAC resources should be created
create: true
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name:
# Optional additional arguments
extraArgs: []
# Use this flag to set a namespace that cert-manager will use to store
# supporting resources required for each ClusterIssuer (default is kube-system)
# - --cluster-resource-namespace=kube-system
extraEnv: []
# - name: SOME_VAR
# value: 'some value'
resources: {}
# requests:
# cpu: 10m
# memory: 32Mi
podAnnotations: {}
podLabels: {}
# Optional DNS settings, useful if you have a public and private DNS zone for
# the same domain on Route 53. What follows is an example of ensuring
# cert-manager can access an ingress or DNS TXT records at all times.
# NOTE: This requires Kubernetes 1.10 or `CustomPodDNS` feature gate enabled for
# the cluster to work.
# podDnsPolicy: "None"
# podDnsConfig:
# nameservers:
# - "1.1.1.1"
# - "8.8.8.8"
nodeSelector: {}
ingressShim:
defaultIssuerName: "letsencrypt-staging"
defaultIssuerKind: "ClusterIssuer"
defaultACMEChallengeType: "http01"
# defaultACMEDNS01ChallengeProvider: ""
letsencrypt:
email: user@example.com
# specifies custom letsencrypt server, both staing and prod letsencrypt has default value in clusterissuer.yaml
server: "https://https://acme-staging-v02.api.letsencrypt.org/directory"
# This is used by the static manifest generator in order to create a static
# namespace manifest for the namespace that cert-manager is being installed
# within. It should **not** be used if you are using Helm for deployment.
createNamespaceResource: false
# Use these variables to configure the HTTP_PROXY environment variables
# http_proxy: "http://proxy:8080"
# http_proxy: "http://proxy:8080"
# no_proxy: 127.0.0.1,localhost
clusterissuerEnabled: true
apiVersion: v1 apiVersion: v1
description: Helm Chart Repository with support for Amazon S3 and Google Cloud Storage description: Helm Chart Repository with support for Amazon S3 and Google Cloud Storage
name: chartmuseum name: chartmuseum
version: 1.6.0 version: 1.6.2
appVersion: 0.7.1 appVersion: 0.7.1
home: https://github.com/chartmuseum/chartmuseum home: https://github.com/chartmuseum/chartmuseum
icon: https://raw.githubusercontent.com/chartmuseum/chartmuseum/master/logo.png icon: file://../logo.png
keywords: keywords:
- chartmuseum - chartmuseum
- helm - helm
......
...@@ -108,6 +108,8 @@ their default values. See values.yaml for all available options. ...@@ -108,6 +108,8 @@ their default values. See values.yaml for all available options.
| `gcp.secret.enabled` | Flag for the GCP service account | `false` | | `gcp.secret.enabled` | Flag for the GCP service account | `false` |
| `gcp.secret.name` | Secret name for the GCP json file | `` | | `gcp.secret.name` | Secret name for the GCP json file | `` |
| `gcp.secret.key` | Secret key for te GCP json file | `credentials.json` | | `gcp.secret.key` | Secret key for te GCP json file | `credentials.json` |
| `service.type` | Kubernetes Service type | `ClusterIP` |
| `service.clusterIP` | Static clusterIP or None for headless services| `nil` |
Specify each parameter using the `--set key=value[,key=value]` argument to Specify each parameter using the `--set key=value[,key=value]` argument to
`helm install`. `helm install`.
......
...@@ -245,6 +245,11 @@ questions: ...@@ -245,6 +245,11 @@ questions:
group: "Storage Options" group: "Storage Options"
label: Default StorageClass for Local Storage label: Default StorageClass for Local Storage
show_if: "env.open.STORAGE=local&&persistence.enabled=true" show_if: "env.open.STORAGE=local&&persistence.enabled=true"
- variable: persistence.existingClaim
default: ""
description: "If not empty, uses the specified existing PVC instead of creating new one"
type: pvc
label: Uses Existing Persistent Volume Cliam for LocalStorage
# Service and L7 LoadBalancer # Service and L7 LoadBalancer
- variable: ingress.enabled - variable: ingress.enabled
default: true default: true
......
...@@ -25,6 +25,6 @@ OR ...@@ -25,6 +25,6 @@ OR
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "chartmuseum.name" . }}" -l "release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "chartmuseum.name" . }}" -l "release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo http://127.0.0.1:8080{{ .Values.env.open.CONTEXT_PATH }}/ echo http://127.0.0.1:8080{{ .Values.env.open.CONTEXT_PATH }}/
kubectl port-forward $POD_NAME 8080:8080 kubectl port-forward $POD_NAME 8080:8080 --namespace {{ .Release.Namespace }}
{{- end }} {{- end }}
...@@ -100,7 +100,7 @@ spec: ...@@ -100,7 +100,7 @@ spec:
{{- end }} {{- end }}
volumes: volumes:
- name: storage-volume - name: storage-volume
{{- if and .Values.persistence.enabled (eq .Values.env.open.STORAGE "local") }} {{- if .Values.persistence.enabled }}
persistentVolumeClaim: persistentVolumeClaim:
claimName: {{ .Values.persistence.existingClaim | default (include "chartmuseum.fullname" .) }} claimName: {{ .Values.persistence.existingClaim | default (include "chartmuseum.fullname" .) }}
{{- else }} {{- else }}
......
{{- if .Values.ingress.enabled -}}
{{- $servicePort := .Values.service.externalPort -}} {{- $servicePort := .Values.service.externalPort -}}
{{- $serviceName := include "chartmuseum.fullname" . -}} {{- $serviceName := include "chartmuseum.fullname" . -}}
{{- if .Values.ingress.enabled }}
---
apiVersion: extensions/v1beta1 apiVersion: extensions/v1beta1
kind: Ingress kind: Ingress
metadata: metadata:
name: {{ template "chartmuseum.fullname" . }} name: {{ include "chartmuseum.fullname" . }}
labels:
app: {{ template "chartmuseum.name" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
annotations: annotations:
{{ toYaml .Values.ingress.annotations | indent 4 }} {{ toYaml .Values.ingress.annotations | indent 4 }}
labels: labels:
......
{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) -}} {{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) -}}
{{- if eq .Values.env.open.STORAGE "local" }}
kind: PersistentVolumeClaim kind: PersistentVolumeClaim
apiVersion: v1 apiVersion: v1
metadata: metadata:
...@@ -21,4 +20,3 @@ spec: ...@@ -21,4 +20,3 @@ spec:
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- end }}
...@@ -12,6 +12,11 @@ metadata: ...@@ -12,6 +12,11 @@ metadata:
{{- end }} {{- end }}
spec: spec:
type: {{ .Values.service.type }} type: {{ .Values.service.type }}
{{- if eq .Values.service.type "ClusterIP" }}
{{- if .Values.service.clusterIP }}
clusterIP: {{ .Values.service.clusterIP }}
{{- end }}
{{- end }}
ports: ports:
- port: {{ .Values.service.externalPort }} - port: {{ .Values.service.externalPort }}
{{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }} {{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }}
......
...@@ -98,18 +98,18 @@ replica: ...@@ -98,18 +98,18 @@ replica:
# iam.amazonaws.com/role: role-arn # iam.amazonaws.com/role: role-arn
service: service:
type: ClusterIP type: ClusterIP
# clusterIP: None
externalPort: 8080 externalPort: 8080
nodePort: nodePort:
annotations: {} annotations: {}
# resources: {} resources: {}
resources: # limits:
limits: # cpu: 100m
cpu: 100m # memory: 128Mi
memory: 128Mi # requests:
requests: # cpu: 80m
cpu: 80m # memory: 64Mi
memory: 64Mi
probes: probes:
liveness: liveness:
......
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
# OWNERS file for Kubernetes
OWNERS
apiVersion: v1
description: Helm Chart Repository with support for Amazon S3 and Google Cloud Storage
name: chartmuseum
version: 1.6.0
appVersion: 0.7.1
home: https://github.com/chartmuseum/chartmuseum
icon: https://raw.githubusercontent.com/chartmuseum/chartmuseum/master/logo.png
keywords:
- chartmuseum
- helm
- charts repo
maintainers:
- name: codefresh-io
email: opensource@codefresh.io
- name: cloudposse
email: hello@cloudposse.com
- name: chartmuseum
email: chartmuseum@gmail.com
approvers:
- jdolitsky
- goruha
reviewers:
- jdolitsky
- goruha
# ChartMuseum Helm Chart
Deploy your own private ChartMuseum.
Please also see https://github.com/kubernetes-helm/chartmuseum
## Table of Content
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
- [Prerequisites](#prerequisites)
- [Configuration](#configuration)
- [Installation](#installation)
- [Using with Amazon S3](#using-with-amazon-s3)
- [permissions grant with access keys](#permissions-grant-with-access-keys)
- [permissions grant with IAM instance profile](#permissions-grant-with-iam-instance-profile)
- [permissions grant with IAM assumed role](#permissions-grant-with-iam-assumed-role)
- [Using with Google Cloud Storage](#using-with-google-cloud-storage)
- [Using with Microsoft Azure Blob Storage](#using-with-microsoft-azure-blob-storage)
- [Using with Alibaba Cloud OSS Storage](#using-with-alibaba-cloud-oss-storage)
- [Using with local filesystem storage](#using-with-local-filesystem-storage)
- [Example storage class](#example-storage-class)
- [Uninstall](#uninstall)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
## Prerequisites
* Kubernetes with extensions/v1beta1 available
* [If enabled] A persistent storage resource and RW access to it
* [If enabled] Kubernetes StorageClass for dynamic provisioning
## Configuration
By default this chart will not have persistent storage, and the API service
will be *DISABLED*. This protects against unauthorized access to the API
with default configuration values.
For a more robust solution supply helm install with a custom values.yaml
You are also required to create the StorageClass resource ahead of time:
```
kubectl create -f /path/to/storage_class.yaml
```
The following table lists common configurable parameters of the chart and
their default values. See values.yaml for all available options.
| Parameter | Description | Default |
|----------------------------------------|---------------------------------------------|-----------------------------------------------------|
| `image.pullPolicy` | Container pull policy | `IfNotPresent` |
| `image.repository` | Container image to use | `chartmuseum/chartmuseum` |
| `image.tag` | Container image tag to deploy | `v0.7.1` |
| `persistence.accessMode` | Access mode to use for PVC | `ReadWriteOnce` |
| `persistence.enabled` | Whether to use a PVC for persistent storage | `false` |
| `persistence.size` | Amount of space to claim for PVC | `8Gi` |
| `persistence.storageClass` | Storage Class to use for PVC | `-` |
| `replicaCount` | k8s replicas | `1` |
| `resources.limits.cpu` | Container maximum CPU | `100m` |
| `resources.limits.memory` | Container maximum memory | `128Mi` |
| `resources.requests.cpu` | Container requested CPU | `80m` |
| `resources.requests.memory` | Container requested memory | `64Mi` |
| `serviceAccount.create` | If true, create the service account | `false` |
| `serviceAccount.name` | Name of the serviceAccount to create or use | `{{ chartmuseum.fullname }}` |
| `securityContext` | Map of securityContext for the pod | `{}` |
| `nodeSelector` | Map of node labels for pod assignment | `{}` |
| `tolerations` | List of node taints to tolerate | `[]` |
| `affinity` | Map of node/pod affinities | `{}` |
| `env.open.STORAGE` | Storage Backend to use | `local` |
| `env.open.ALIBABA_BUCKET` | Bucket to store charts in for Alibaba | `` |
| `env.open.ALIBABA_PREFIX` | Prefix to store charts under for Alibaba | `` |
| `env.open.ALIBABA_ENDPOINT` | Alternative Alibaba endpoint | `` |
| `env.open.ALIBABA_SSE` | Server side encryption algorithm to use | `` |
| `env.open.AMAZON_BUCKET` | Bucket to store charts in for AWS | `` |
| `env.open.AMAZON_ENDPOINT` | Alternative AWS endpoint | `` |
| `env.open.AMAZON_PREFIX` | Prefix to store charts under for AWS | `` |
| `env.open.AMAZON_REGION` | Region to use for bucket access for AWS | `` |
| `env.open.AMAZON_SSE` | Server side encryption algorithm to use | `` |
| `env.open.GOOGLE_BUCKET` | Bucket to store charts in for GCP | `` |
| `env.open.GOOGLE_PREFIX` | Prefix to store charts under for GCP | `` |
| `env.open.STORAGE_MICROSOFT_CONTAINER` | Container to store charts under for MS | `` |
| `env.open.STORAGE_MICROSOFT_PREFIX` | Prefix to store charts under for MS | `` |
| `env.open.STORAGE_OPENSTACK_CONTAINER` | Container to store charts for openstack | `` |
| `env.open.STORAGE_OPENSTACK_PREFIX` | Prefix to store charts for openstack | `` |
| `env.open.STORAGE_OPENSTACK_REGION` | Region of openstack container | `` |
| `env.open.STORAGE_OPENSTACK_CACERT` | Path to a CA cert bundle for openstack | `` |
| `env.open.CHART_POST_FORM_FIELD_NAME` | Form field to query for chart file content | `` |
| `env.open.PROV_POST_FORM_FIELD_NAME` | Form field to query for chart provenance | `` |
| `env.open.DEPTH` | levels of nested repos for multitenancy. | `0` |
| `env.open.DEBUG` | Show debug messages | `false` |
| `env.open.LOG_JSON` | Output structured logs in JSON | `true` |
| `env.open.DISABLE_STATEFILES` | Disable use of index-cache.yaml | `false` |
| `env.open.DISABLE_METRICS` | Disable Prometheus metrics | `true` |
| `env.open.DISABLE_API` | Disable all routes prefixed with /api | `true` |
| `env.open.ALLOW_OVERWRITE` | Allow chart versions to be re-uploaded | `false` |
| `env.open.CHART_URL` | Absolute url for .tgzs in index.yaml | `` |
| `env.open.AUTH_ANONYMOUS_GET` | Allow anon GET operations when auth is used | `false` |
| `env.open.CONTEXT_PATH` | Set the base context path | `` |
| `env.open.INDEX_LIMIT` | Parallel scan limit for the repo indexer | `` |
| `env.open.CACHE` | Cache store, can be one of: redis | `` |
| `env.open.CACHE_REDIS_ADDR` | Address of Redis service (host:port) | `` |
| `env.open.CACHE_REDIS_DB` | Redis database to be selected after connect | `0` |
| `env.secret.BASIC_AUTH_USER` | Username for basic HTTP authentication | `` |
| `env.secret.BASIC_AUTH_PASS` | Password for basic HTTP authentication | `` |
| `env.secret.CACHE_REDIS_PASSWORD` | Redis requirepass server configuration | `` |
| `gcp.secret.enabled` | Flag for the GCP service account | `false` |
| `gcp.secret.name` | Secret name for the GCP json file | `` |
| `gcp.secret.key` | Secret key for te GCP json file | `credentials.json` |
Specify each parameter using the `--set key=value[,key=value]` argument to
`helm install`.
## Installation
```shell
helm install --name my-chartmuseum -f custom.yaml stable/chartmuseum
```
### Using with Amazon S3
Make sure your environment is properly setup to access `my-s3-bucket`
You need at least the following permissions inside your IAM Policy
```yaml
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AllowListObjects",
"Effect": "Allow",
"Action": [
"s3:ListBucket"
],
"Resource": "arn:aws:s3:::my-s3-bucket"
},
{
"Sid": "AllowObjectsCRUD",
"Effect": "Allow",
"Action": [
"s3:DeleteObject",
"s3:GetObject",
"s3:PutObject"
],
"Resource": "arn:aws:s3:::my-s3-bucket/*"
}
]
}
```
You can grant it to `chartmuseum` by several ways:
#### permissions grant with access keys
Grant permissions to `special user` and us it's access keys for auth on aws
Specify `custom.yaml` with such values
```yaml
env:
open:
STORAGE: amazon
STORAGE_AMAZON_BUCKET: my-s3-bucket
STORAGE_AMAZON_PREFIX:
STORAGE_AMAZON_REGION: us-east-1
secret:
AWS_ACCESS_KEY_ID: "********" ## aws access key id value
AWS_SECRET_ACCESS_KEY: "********" ## aws access key secret value
```
Run command to install
```shell
helm install --name my-chartmuseum -f custom.yaml stable/chartmuseum
```
#### permissions grant with IAM instance profile
You can grant permissions to k8s node IAM instance profile.
For more information read this [article](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html)
Specify `custom.yaml` with such values
```yaml
env:
open:
STORAGE: amazon
STORAGE_AMAZON_BUCKET: my-s3-bucket
STORAGE_AMAZON_PREFIX:
STORAGE_AMAZON_REGION: us-east-1
```
Run command to install
```shell
helm install --name my-chartmuseum -f custom.yaml stable/chartmuseum
```
#### permissions grant with IAM assumed role
To provide access with assumed role you need to install [kube2iam](https://github.com/kubernetes/charts/tree/master/stable/kube2iam)
and create role with granded permissions.
Specify `custom.yaml` with such values
```yaml
env:
open:
STORAGE: amazon
STORAGE_AMAZON_BUCKET: my-s3-bucket
STORAGE_AMAZON_PREFIX:
STORAGE_AMAZON_REGION: us-east-1
replica:
annotations:
iam.amazonaws.com/role: "{assumed role name}"
```
Run command to install
```shell
helm install --name my-chartmuseum -f custom.yaml stable/chartmuseum
```
### Using with Google Cloud Storage
Make sure your environment is properly setup to access `my-gcs-bucket`
Specify `custom.yaml` with such values
```yaml
env:
open:
STORAGE: google
STORAGE_GOOGLE_BUCKET: my-gcs-bucket
STORAGE_GOOGLE_PREFIX:
```
### Using with Google Cloud Storage and a Google Service Account
A Google service account credentials are stored in a json file. There are two approaches here. Ideally you don't want to send your secrets to tiller. In that case, before installing this chart, you should create a secret with those credentials:
```shell
kubectl create secret generic chartmuseum-secret --from-file=credentials.json="my-project-45e35d85a593.json"
```
Then you can either use a `VALUES` yaml with your values or set those values in the command line:
```shell
helm install stable/chartmuseum --debug --set gcp.secret.enabled=true,env.open.STORAGE=google,env.open.DISABLE_API=false,env.open.STORAGE_GOOGLE_BUCKET=my-gcp-chartmuseum,gcp.secret.name=chartmuseum-secret
```
If you prefer to use a yaml file:
```yaml
env:
open:
STORAGE: google
STORAGE_GOOGLE_BUCKET: my-gcs-bucket
STORAGE_GOOGLE_PREFIX:
gcp:
secret:
enabled: true
name: chartmuseum-secret
key: credentials.json
```
Run command to install
```shell
helm install --name my-chartmuseum -f custom.yaml stable/chartmuseum
```
In case that you don't mind adding your secret to tiller (you shouldn't do it), this are the commands
```yaml
env:
open:
STORAGE: google
STORAGE_GOOGLE_BUCKET: my-gcs-bucket
STORAGE_GOOGLE_PREFIX:
secret:
GOOGLE_CREDENTIALS_JSON: my-json-file-base64-encoded
gcp:
secret:
enabled: true
```
Run command to install
```shell
helm install --name my-chartmuseum -f custom.yaml stable/chartmuseum
```
To set the values directly in the command line, use the follosing command. Note that we have to base64 encode the json file because we cannot pass a multi-line text as a value.
```shell
export JSONKEY=$(cat my-project-77e35d85a593.json | base64)
helm install stable/chartmuseum --debug --set gcp.secret.enabled=true,env.secret.GOOGLE_CREDENTIALS_JSON=${JSONKEY},env.open.STORAGE=google,env.open.DISABLE_API=false,env.open.STORAGE_GOOGLE_BUCKET=my-gcp-chartmuseum
```
### Using with Microsoft Azure Blob Storage
Make sure your environment is properly setup to access `mycontainer`.
To do so, you must set the following env vars:
- `AZURE_STORAGE_ACCOUNT`
- `AZURE_STORAGE_ACCESS_KEY`
Specify `custom.yaml` with such values
```yaml
env:
open:
STORAGE: microsoft
STORAGE_MICROSOFT_CONTAINER: mycontainer
# prefix to store charts for microsoft storage backend
STORAGE_MICROSOFT_PREFIX:
secret:
AZURE_STORAGE_ACCOUNT: "********" ## azure storage account
AZURE_STORAGE_ACCESS_KEY: "********" ## azure storage account access key
```
Run command to install
```shell
helm install --name my-chartmuseum -f custom.yaml stable/chartmuseum
```
### Using with Alibaba Cloud OSS Storage
Make sure your environment is properly setup to access `my-oss-bucket`.
To do so, you must set the following env vars:
- `ALIBABA_CLOUD_ACCESS_KEY_ID`
- `ALIBABA_CLOUD_ACCESS_KEY_SECRET`
Specify `custom.yaml` with such values
```yaml
env:
open:
STORAGE: alibaba
STORAGE_ALIBABA_BUCKET: my-oss-bucket
STORAGE_ALIBABA_PREFIX:
STORAGE_ALIBABA_ENDPOINT: oss-cn-beijing.aliyuncs.com
secret:
ALIBABA_CLOUD_ACCESS_KEY_ID: "********" ## alibaba OSS access key id
ALIBABA_CLOUD_ACCESS_KEY_SECRET: "********" ## alibaba OSS access key secret
```
Run command to install
```shell
helm install --name my-chartmuseum -f custom.yaml stable/chartmuseum
```
### Using with Openstack Object Storage
Make sure your environment is properly setup to access `mycontainer`.
To do so, you must set the following env vars (depending on your openstack version):
- `OS_AUTH_URL`
- either `OS_PROJECT_NAME` or `OS_TENANT_NAME` or `OS_PROJECT_ID` or `OS_TENANT_ID`
- either `OS_DOMAIN_NAME` or `OS_DOMAIN_ID`
- either `OS_USERNAME` or `OS_USERID`
- `OS_PASSWORD`
Specify `custom.yaml` with such values
```yaml
env:
open:
STORAGE: openstack
STORAGE_OPENSTACK_CONTAINER: mycontainer
STORAGE_OPENSTACK_PREFIX:
STORAGE_OPENSTACK_REGION: YOURREGION
secret:
OS_AUTH_URL: https://myauth.url.com/v2.0/
OS_TENANT_ID: yourtenantid
OS_USERNAME: yourusername
OS_PASSWORD: yourpassword
```
Run command to install
```shell
helm install --name my-chartmuseum -f custom.yaml stable/chartmuseum
```
### Using with local filesystem storage
By default chartmuseum uses local filesystem storage.
But on pod recreation it will lose all charts, to prevent that enable persistent storage.
```yaml
env:
open:
STORAGE: local
persistence:
enabled: true
accessMode: ReadWriteOnce
size: 8Gi
## A manually managed Persistent Volume and Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
# existingClaim:
## Chartmuseum data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
```
Run command to install
```shell
helm install --name my-chartmuseum -f custom.yaml stable/chartmuseum
```
#### Example storage class
Example storage-class.yaml provided here for use with a Ceph cluster.
```
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: storage-volume
provisioner: kubernetes.io/rbd
parameters:
monitors: "10.11.12.13:4567,10.11.12.14:4567"
adminId: admin
adminSecretName: thesecret
adminSecretNamespace: default
pool: chartstore
userId: user
userSecretName: thesecret
```
## Uninstall
By default, a deliberate uninstall will result in the persistent volume
claim being deleted.
```shell
helm delete my-chartmuseum
```
To delete the deployment and its history:
```shell
helm delete --purge my-chartmuseum
```
questions:
- variable: defaultImage
default: true
description: "Use default Docker image"
label: Use Default Image
type: boolean
show_subquestion_if: false
group: "Container Images"
subquestions:
- variable: image.repository
default: "chartmuseum/chartmuseum"
description: "Docker image repository"
type: string
label: Image Repository
- variable: image.tag
default: "v0.7.1"
description: "Docker image tag"
type: string
label: Image Tag
# storage settings
- variable: env.open.STORAGE
required: true
default: "local"
description: "Storage Backend to use"
type: enum
label: Storage Backend To Use
group: "Storage Options"
options:
- "local"
- "amazon"
- "alibaba"
- "google"
- "microsoft"
- "openstack"
- variable: env.open.STORAGE_AMAZON_BUCKET
default: ""
type: string
description: "S3 bucket to store charts for amazon storage backend"
label: S3 Bucket Name
group: "Storage Options"
required: true
show_if: "env.open.STORAGE=amazon"
- variable: env.open.STORAGE_AMAZON_PREFIX
default: ""
type: string
description: "prefix to store charts for amazon storage backend"
label: Prefix To Store Charts Under for AWS S3 (Optional)
group: "Storage Options"
show_if: "env.open.STORAGE=amazon"
- variable: env.open.STORAGE_AMAZON_REGION
default: ""
required: true
type: string
description: "Region to use for bucket access for AWS "
label: Region for S3 Bucket Storage
group: "Storage Options"
show_if: "env.open.STORAGE=amazon"
# Alibaba Cloud Storage Options
- variable: env.open.STORAGE_ALIBABA_BUCKET
default: ""
type: string
description: "OSS bucket to store charts for alibaba storage backend"
label: OSS Bucket Name
group: "Storage Options"
show_if: "env.open.STORAGE=alibaba"
- variable: env.open.STORAGE_ALIBABA_PREFIX
default: ""
type: string
description: "Prefix to store charts for alibaba storage backend"
label: OSS Bucket Prefix
group: "Storage Options"
show_if: "env.open.STORAGE=alibaba"
- variable: env.open.STORAGE_ALIBABA_ENDPOINT
default: ""
type: string
description: "OSS endpoint to store charts for alibaba storage backend"
label: Alternative OSS Storage Endpoint
group: "Storage Options"
show_if: "env.open.STORAGE=alibaba"
# Google Storage Options
- variable: env.open.STORAGE_GOOGLE_BUCKET
default: ""
type: string
description: "GCS bucket to store charts for google storage backend"
label: GCS Bucket Name
group: "Storage Options"
show_if: "env.open.STORAGE=google"
- variable: env.open.STORAGE_GOOGLE_PREFIX
default: ""
type: string
description: "Prefix to store charts for google storage backend"
label: GCS Bucket Prefix
group: "Storage Options"
show_if: "env.open.STORAGE=google"
# Microsoft Azure Storage Options
- variable: env.open.STORAGE_MICROSOFT_CONTAINER
default: ""
type: string
description: "Container to store charts for microsoft storage backend"
label: Microsoft Azure Blob Storage Name
group: "Storage Options"
show_if: "env.open.STORAGE=microsoft"
- variable: env.open.STORAGE_MICROSOFT_PREFIX
default: ""
type: string
description: "Prefix to store charts for microsoft storage backend"
label: Microsoft Azure Blob Storage Prefix
group: "Storage Options"
show_if: "env.open.STORAGE=microsoft"
# OpenStack Storage Options
- variable: env.open.STORAGE_OPENSTACK_CONTAINER
default: ""
type: string
description: "Prefix to store charts for openstack storage backend"
label: Openstack Object Storage Container Name
group: "Storage Options"
show_if: "env.open.STORAGE=openstack"
- variable: env.open.STORAGE_OPENSTACK_PREFIX
default: ""
type: string
description: "Prefix to store charts for openstack storage backend"
label: Prefix To Openstack Object Storage Container
group: "Storage Options"
show_if: "env.open.STORAGE=openstack"
- variable: env.open.STORAGE_OPENSTACK_REGION
default: ""
type: string
description: "Region of openstack container"
label: Region Of Openstack Object Storage Container
group: "Storage Options"
show_if: "env.open.STORAGE=openstack"
# Storage Secret
- variable: env.secret.AWS_ACCESS_KEY_ID
default: ""
type: string
description: "AWS access key id value"
label: AWS Access Key ID Value
group: "Storage Secret"
show_if: "env.open.STORAGE=amazon"
- variable: env.secret.AWS_SECRET_ACCESS_KEY
default: ""
type: string
description: "aws access key secret value "
label: AWS Access Key Secret Value
group: "Storage Secret"
show_if: "env.open.STORAGE=amazon"
- variable: env.secret.ALIBABA_CLOUD_ACCESS_KEY_ID
default: ""
type: string
description: "alibaba OSS access key id"
label: OSS Access Key ID
group: "Storage Secret"
show_if: "env.open.STORAGE=alibaba"
- variable: env.secret.ALIBABA_CLOUD_ACCESS_KEY_SECRET
default: ""
type: string
description: "alibaba OSS access key secret "
label: OSS Access Key Secret
group: "Storage Secret"
show_if: "env.open.STORAGE=alibaba"
- variable: gcp.secret.enabled
default: false
type: boolean
description: "Flag for the GCP service account"
label: Enable GCP Service Account
group: "Storage Secret"
show_if: "env.open.STORAGE=google"
show_subquestion_if: true
subquestions:
- variable: gcp.secret.name
default: ""
type: string
description: "secret name for the gcp json file"
label: Secret Name For The GCP Json File
- variable: gcp.secret.key
default: "credentials.json"
type: string
description: "Secret key for the GCP json file"
label: Secret Key For The GCP Json File
# Openstack Object Storage secret
- variable: env.secret.OS_AUTH_URL
default: ""
type: string
description: "Openstack object storage auth url"
label: Auth URL Of Openstack Object Storage
group: "Storage Secret"
show_if: "env.open.STORAGE=openstack"
- variable: env.secret.OS_TENANT_ID
default: ""
type: string
description: "Openstack object storage tenant id"
label: Tenant ID Of Openstack Object Storage
group: "Storage Secret"
show_if: "env.open.STORAGE=openstack"
- variable: env.secret.OS_USERNAME
default: ""
type: string
description: "Openstack object storage username"
label: Username Of Openstack Object Storage
group: "Storage Secret"
show_if: "env.open.STORAGE=openstack"
- variable: env.secret.OS_PASSWORD
default: ""
type: string
description: "Openstack object storage password"
label: Password Of Openstack Object Storage
group: "Storage Secret"
show_if: "env.open.STORAGE=openstack"
# Microsoft azure secret
- variable: env.secret.AZURE_STORAGE_ACCOUNT
default: ""
type: string
description: "azure storage account"
label: Azure Storage Account
group: "Storage Secret"
show_if: "env.open.STORAGE=microsoft"
- variable: env.secret.AZURE_STORAGE_ACCESS_KEY
default: ""
type: string
description: "azure storage account access key "
label: Azure Storage Account Access Key
group: "Storage Secret"
show_if: "env.open.STORAGE=microsoft"
# Local Storage Settings
- variable: persistence.enabled
default: true
type: boolean
description: "use a PVC for persistent storage for local storage"
label: Enable Persistent Storage For Local Storage
group: "Storage Options"
show_if: "env.open.STORAGE=local"
show_subquestion_if: true
subquestions:
- variable: persistence.size
default: "10Gi"
type: string
description: "Local Storage Persistent Volume Size"
label: Local Storage Volume Size
group: "Storage Options"
show_if: "env.open.STORAGE=local&&persistence.enabled=true"
- variable: persistence.storageClass
default: ""
description: "If undefined or null, uses the default StorageClass. Default to null"
type: storageclass
group: "Storage Options"
label: Default StorageClass for Local Storage
show_if: "env.open.STORAGE=local&&persistence.enabled=true"
- variable: persistence.existingClaim
default: ""
description: "If not empty, uses the specified existing PVC instead of creating new one"
type: pvc
label: Existing Persistent Volume for LocalStorage
# Service and L7 LoadBalancer
- variable: ingress.enabled
default: true
description: "Expose app using Layer 7 Load Balancer - ingress"
type: boolean
label: Expose app using Layer 7 Load Balancer
show_subquestion_if: true
group: "Services and Load Balancing"
subquestions:
- variable: ingress.hosts[0]
default: "xip.io"
description: "Hostname to your app installation"
type: hostname
required: true
label: Hostname
- variable: service.type
required: true
default: "NodePort"
description: "ChartMuseum Service Type"
type: enum
label: Service Type For ChartMuseum
group: "Services and Load Balancing"
show_if: "ingress.enabled=false"
options:
- "ClusterIP"
- "NodePort"
- "LoadBalancer"
# chartmuseum options
- variable: env.secret.BASIC_AUTH_USER
default: ""
description: "Username for basic http authentication"
type: string
label: Username For Basic Http Authentication (Optional)
group: "ChartMuseum Settings"
- variable: env.secret.BASIC_AUTH_PASS
default: ""
description: "Password for basic http authentication"
type: string
label: Password For Basic Http Authentication (Optional)
group: "ChartMuseum Settings"
- variable: env.open.CHART_URL
default: ""
description: "Absolute url for .tgzs in index.yaml"
type: string
label: Absolute URL For .tgzs In index.yaml
group: "ChartMuseum Settings"
- variable: env.open.SHOW_ADVANCED
default: false
description: "Show advanced ChartMuseum settings"
type: boolean
label: Show Advanced ChartMuseum Settings
group: "ChartMuseum Settings"
show_subquestion_if: true
subquestions:
- variable: env.open.DEPTH
default: "0"
description: "Levels of nested repos for multitenancy."
type: string
label: Levels Of Nested Repos For Multitenancy
- variable: env.open.ALLOW_OVERWRITE
default: false
description: "Allow chart versions to be re-uploaded"
type: boolean
label: Allow Chart Versions To Be Re-uploaded
- variable: env.open.AUTH_ANONYMOUS_GET
default: false
description: "Allow anonymous GET operations when auth is used"
type: boolean
label: Allow Anonymous GET Operations When Auth Is Used
- variable: env.open.DISABLE_METRICS
default: true
description: "Disable Prometheus metrics of Chartmuseum"
type: boolean
label: Disable Prometheus Metrics
** Please be patient while the chart is being deployed **
Get the ChartMuseum URL by running:
{{- if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "chartmuseum.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT{{ .Values.env.open.CONTEXT_PATH }}/
{{- else if contains "LoadBalancer" .Values.service.type }}
** Please ensure an external IP is associated to the {{ template "chartmuseum.fullname" . }} service before proceeding **
** Watch the status using: kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "chartmuseum.fullname" . }} **
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "chartmuseum.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ .Values.service.externalPort }}{{ .Values.env.open.CONTEXT_PATH }}/
OR
export SERVICE_HOST=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "chartmuseum.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].hostname}')
echo http://$SERVICE_HOST:{{ .Values.service.externalPort }}{{ .Values.env.open.CONTEXT_PATH }}/
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "chartmuseum.name" . }}" -l "release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo http://127.0.0.1:8080{{ .Values.env.open.CONTEXT_PATH }}/
kubectl port-forward $POD_NAME 8080:8080
{{- end }}
{{- /*
name defines a template for the name of the chartmuseum chart.
The prevailing wisdom is that names should only contain a-z, 0-9 plus dot (.) and dash (-), and should
not exceed 63 characters.
Parameters:
- .Values.nameOverride: Replaces the computed name with this given name
- .Values.namePrefix: Prefix
- .Values.global.namePrefix: Global prefix
- .Values.nameSuffix: Suffix
- .Values.global.nameSuffix: Global suffix
The applied order is: "global prefix + prefix + name + suffix + global suffix"
Usage: 'name: "{{- template "chartmuseum.name" . -}}"'
*/ -}}
{{- define "chartmuseum.name"}}
{{- $global := default (dict) .Values.global -}}
{{- $base := default .Chart.Name .Values.nameOverride -}}
{{- $gpre := default "" $global.namePrefix -}}
{{- $pre := default "" .Values.namePrefix -}}
{{- $suf := default "" .Values.nameSuffix -}}
{{- $gsuf := default "" $global.nameSuffix -}}
{{- $name := print $gpre $pre $base $suf $gsuf -}}
{{- $name | lower | trunc 54 | trimSuffix "-" -}}
{{- end -}}
{{- /*
fullname defines a suitably unique name for a resource by combining
the release name and the chartmuseum chart name.
The prevailing wisdom is that names should only contain a-z, 0-9 plus dot (.) and dash (-), and should
not exceed 63 characters.
Parameters:
- .Values.fullnameOverride: Replaces the computed name with this given name
- .Values.fullnamePrefix: Prefix
- .Values.global.fullnamePrefix: Global prefix
- .Values.fullnameSuffix: Suffix
- .Values.global.fullnameSuffix: Global suffix
The applied order is: "global prefix + prefix + name + suffix + global suffix"
Usage: 'name: "{{- template "chartmuseum.fullname" . -}}"'
*/ -}}
{{- define "chartmuseum.fullname"}}
{{- $global := default (dict) .Values.global -}}
{{- $base := default (printf "%s-%s" .Release.Name .Chart.Name) .Values.fullnameOverride -}}
{{- $gpre := default "" $global.fullnamePrefix -}}
{{- $pre := default "" .Values.fullnamePrefix -}}
{{- $suf := default "" .Values.fullnameSuffix -}}
{{- $gsuf := default "" $global.fullnameSuffix -}}
{{- $name := print $gpre $pre $base $suf $gsuf -}}
{{- $name | lower | trunc 54 | trimSuffix "-" -}}
{{- end -}}
{{- /*
chartmuseum.labels.standard prints the standard chartmuseum Helm labels.
The standard labels are frequently used in metadata.
*/ -}}
{{- define "chartmuseum.labels.standard" -}}
app: {{ template "chartmuseum.name" . }}
chart: {{ template "chartmuseum.chartref" . }}
heritage: {{ .Release.Service | quote }}
release: {{ .Release.Name | quote }}
{{- end -}}
{{- /*
chartmuseum.chartref prints a chart name and version.
It does minimal escaping for use in Kubernetes labels.
Example output:
chartmuseum-0.4.5
*/ -}}
{{- define "chartmuseum.chartref" -}}
{{- replace "+" "_" .Chart.Version | printf "%s-%s" .Chart.Name -}}
{{- end -}}
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: {{ include "chartmuseum.fullname" . }}
annotations:
{{ toYaml .Values.deployment.annotations | indent 4 }}
labels:
{{ include "chartmuseum.labels.standard" . | indent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
strategy:
{{ toYaml .Values.strategy | indent 4 }}
revisionHistoryLimit: 10
template:
metadata:
name: {{ include "chartmuseum.fullname" . }}
annotations:
{{ toYaml .Values.replica.annotations | indent 8 }}
labels:
app: {{ template "chartmuseum.name" . }}
release: {{ .Release.Name | quote }}
spec:
containers:
- name: {{ .Chart.Name }}
image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
{{- range $name, $value := .Values.env.open }}
{{- if not (empty $value) }}
- name: {{ $name | quote }}
value: {{ $value | quote }}
{{- end }}
{{- end }}
{{- if .Values.gcp.secret.enabled }}
- name: GOOGLE_APPLICATION_CREDENTIALS
value: "/etc/secrets/google/credentials.json"
{{- end }}
{{- $secret_name := include "chartmuseum.fullname" . }}
{{- range $name, $value := .Values.env.secret }}
{{- if not ( empty $value) }}
- name: {{ $name | quote }}
valueFrom:
secretKeyRef:
name: {{ $secret_name }}
key: {{ $name | quote }}
{{- end }}
{{- end }}
args:
- --port=8080
{{- if eq .Values.env.open.STORAGE "local" }}
- --storage-local-rootdir=/storage
{{- end }}
ports:
- name: http
containerPort: 8080
livenessProbe:
httpGet:
path: {{ .Values.env.open.CONTEXT_PATH }}/health
port: http
{{ toYaml .Values.probes.liveness | indent 10 }}
readinessProbe:
httpGet:
path: {{ .Values.env.open.CONTEXT_PATH }}/health
port: http
{{ toYaml .Values.probes.readiness | indent 10 }}
{{- if eq .Values.env.open.STORAGE "local" }}
volumeMounts:
- mountPath: /storage
name: storage-volume
{{- end }}
{{- if .Values.gcp.secret.enabled }}
volumeMounts:
- mountPath: /etc/secrets/google
name: {{ include "chartmuseum.fullname" . }}-gcp
{{- end }}
{{- with .Values.resources }}
resources:
{{ toYaml . | indent 10 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}
{{- if .Values.serviceAccount.create }}
serviceAccountName: {{ include "chartmuseum.fullname" . }}
{{- else if .Values.serviceAccount.name }}
serviceAccountName: {{ .Values.serviceAccount.name }}
{{- end }}
{{- with .Values.securityContext }}
securityContext:
{{ toYaml . | indent 8 }}
{{- end }}
volumes:
- name: storage-volume
{{- if and .Values.persistence.enabled (eq .Values.env.open.STORAGE "local") }}
persistentVolumeClaim:
claimName: {{ .Values.persistence.existingClaim | default (include "chartmuseum.fullname" .) }}
{{- else }}
emptyDir: {}
{{- end -}}
{{ if .Values.gcp.secret.enabled }}
- name: {{ include "chartmuseum.fullname" . }}-gcp
secret:
{{ if .Values.env.secret.GOOGLE_CREDENTIALS_JSON }}
secretName: {{ include "chartmuseum.fullname" . }}
items:
- key: GOOGLE_CREDENTIALS_JSON
path: credentials.json
{{ else }}
secretName: {{ .Values.gcp.secret.name }}
items:
- key: {{ .Values.gcp.secret.key }}
path: credentials.json
{{ end }}
{{ end }}
{{- if .Values.ingress.enabled -}}
{{- $servicePort := .Values.service.externalPort -}}
{{- $serviceName := include "chartmuseum.fullname" . -}}
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: {{ template "chartmuseum.fullname" . }}
labels:
app: {{ template "chartmuseum.name" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
annotations:
{{ toYaml .Values.ingress.annotations | indent 4 }}
labels:
{{- if .Values.ingress.labels }}
{{ toYaml .Values.ingress.labels | indent 4 }}
{{- end }}
{{ include "chartmuseum.labels.standard" . | indent 4 }}
spec:
rules:
{{- range $host := .Values.ingress.hosts }}
- host: {{ $host }}
http:
paths:
- path:
backend:
serviceName: {{ $serviceName }}
servicePort: {{ $servicePort }}
{{- end -}}
{{- if .Values.ingress.tls }}
tls:
{{ toYaml .Values.ingress.tls | indent 4 }}
{{- end -}}
{{- end -}}
{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) -}}
{{- if eq .Values.env.open.STORAGE "local" }}
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: {{ include "chartmuseum.fullname" . }}
labels:
app: {{ include "chartmuseum.fullname" . }}
release: {{ .Release.Name | quote }}
spec:
accessModes:
- {{ .Values.persistence.accessMode | quote }}
resources:
requests:
storage: {{ .Values.persistence.size | quote }}
{{- if .Values.persistence.storageClass }}
{{- if (eq "-" .Values.persistence.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: "{{ .Values.persistence.storageClass }}"
{{- end }}
{{- end }}
{{- end }}
{{- end }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "chartmuseum.fullname" . }}
labels:
{{ include "chartmuseum.labels.standard" . | indent 4 }}
type: Opaque
data:
{{- range $name, $value := .Values.env.secret }}
{{- if not (empty $value) }}
{{- if eq $name "GOOGLE_CREDENTIALS_JSON" }}
{{ $name }}: {{ $value }}
{{- else }}
{{ $name }}: {{ $value | b64enc }}
{{- end }}
{{- end }}
{{- end }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "chartmuseum.fullname" . }}
annotations:
{{ toYaml .Values.service.annotations | indent 4 }}
labels:
{{ include "chartmuseum.labels.standard" . | indent 4 }}
{{- if .Values.service.annotations }}
annotations:
{{ toYaml .Values.service.annotations | indent 4 }}
{{- end }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.externalPort }}
{{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }}
nodePort: {{.Values.service.nodePort}}
{{- else }}
targetPort: http
{{- end }}
protocol: TCP
name: http
selector:
app: {{ template "chartmuseum.name" . }}
release: {{ .Release.Name | quote }}
{{- if .Values.serviceAccount.create -}}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "chartmuseum.fullname" . }}
labels:
{{ include "chartmuseum.labels.standard" . | indent 4 }}
{{- end -}}
replicaCount: 1
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 0
image:
repository: chartmuseum/chartmuseum
tag: v0.7.1
pullPolicy: IfNotPresent
env:
open:
# storage backend, can be one of: local, alibaba, amazon, google, microsoft
STORAGE: local
# oss bucket to store charts for alibaba storage backend
STORAGE_ALIBABA_BUCKET:
# prefix to store charts for alibaba storage backend
STORAGE_ALIBABA_PREFIX:
# oss endpoint to store charts for alibaba storage backend
STORAGE_ALIBABA_ENDPOINT:
# server side encryption algorithm for alibaba storage backend, can be one
# of: AES256 or KMS
STORAGE_ALIBABA_SSE:
# s3 bucket to store charts for amazon storage backend
STORAGE_AMAZON_BUCKET:
# prefix to store charts for amazon storage backend
STORAGE_AMAZON_PREFIX:
# region of s3 bucket to store charts
STORAGE_AMAZON_REGION:
# alternative s3 endpoint
STORAGE_AMAZON_ENDPOINT:
# server side encryption algorithm
STORAGE_AMAZON_SSE:
# gcs bucket to store charts for google storage backend
STORAGE_GOOGLE_BUCKET:
# prefix to store charts for google storage backend
STORAGE_GOOGLE_PREFIX:
# container to store charts for microsoft storage backend
STORAGE_MICROSOFT_CONTAINER:
# prefix to store charts for microsoft storage backend
STORAGE_MICROSOFT_PREFIX:
# container to store charts for openstack storage backend
STORAGE_OPENSTACK_CONTAINER:
# prefix to store charts for openstack storage backend
STORAGE_OPENSTACK_PREFIX:
# region of openstack container
STORAGE_OPENSTACK_REGION:
# path to a CA cert bundle for your openstack endpoint
STORAGE_OPENSTACK_CACERT:
# form field which will be queried for the chart file content
CHART_POST_FORM_FIELD_NAME: chart
# form field which will be queried for the provenance file content
PROV_POST_FORM_FIELD_NAME: prov
# levels of nested repos for multitenancy. The default depth is 0 (singletenant server)
DEPTH: 0
# show debug messages
DEBUG: false
# output structured logs as json
LOG_JSON: true
# disable use of index-cache.yaml
DISABLE_STATEFILES: false
# disable Prometheus metrics
DISABLE_METRICS: true
# disable all routes prefixed with /api
DISABLE_API: false
# allow chart versions to be re-uploaded
ALLOW_OVERWRITE: false
# absolute url for .tgzs in index.yaml
CHART_URL:
# allow anonymous GET operations when auth is used
AUTH_ANONYMOUS_GET: false
# sets the base context path
CONTEXT_PATH:
# parallel scan limit for the repo indexer
INDEX_LIMIT: 0
# cache store, can be one of: redis (leave blank for inmemory cache)
CACHE:
# address of Redis service (host:port)
CACHE_REDIS_ADDR:
# Redis database to be selected after connect
CACHE_REDIS_DB: 0
secret:
# username for basic http authentication
BASIC_AUTH_USER:
# password for basic http authentication
BASIC_AUTH_PASS:
# GCP service account json file
GOOGLE_CREDENTIALS_JSON:
# Redis requirepass server configuration
CACHE_REDIS_PASSWORD:
deployment:
## Chartmuseum Deployment annotations
annotations: {}
# name: value
replica:
## Chartmuseum Replicas annotations
annotations: {}
## Read more about kube2iam to provide access to s3 https://github.com/jtblin/kube2iam
# iam.amazonaws.com/role: role-arn
service:
type: ClusterIP
externalPort: 8080
nodePort:
annotations: {}
# resources: {}
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 80m
memory: 64Mi
probes:
liveness:
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
readiness:
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
serviceAccount:
create: false
# name:
securityContext: {}
nodeSelector: {}
tolerations: []
affinity: {}
persistence:
enabled: false
accessMode: ReadWriteOnce
size: 8Gi
## A manually managed Persistent Volume and Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
# existingClaim:
## Chartmuseum data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
## Ingress for load balancer
ingress:
enabled: false
## Chartmuseum Ingress labels
##
# labels:
# dns: "route53"
## Chartmuseum Ingress annotations
##
# annotations:
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
## Chartmuseum Ingress hostnames
## Must be provided if Ingress is enabled
##
# hosts:
# chartmuseum.domain.com:
# - /charts
# - /index.yaml
## Chartmuseum Ingress TLS configuration
## Secrets must be manually created in the namespace
##
# tls:
# - secretName: chartmuseum-server-tls
# hosts:
# - chartmuseum.domain.com
# Adding secrets to tiller is not a great option, so If you want to use an existing
# secret that contains the json file, you can use the following entries
gcp:
secret:
enabled: false
# Name of the secret that contains the encoded json
name:
# Secret key that holds the json value.
key: credentials.json
name: kubernetes-dashboard name: kubernetes-dashboard
version: 0.6.8 version: 0.8.0
appVersion: 1.8.3 appVersion: 1.10.0
description: General-purpose web UI for Kubernetes clusters description: General-purpose web UI for Kubernetes clusters
keywords: keywords:
- kubernetes - kubernetes
......
# kubernetes-dashboard
[Kubernetes Dashboard](https://github.com/kubernetes/dashboard) is a general purpose, web-based UI for Kubernetes clusters. It allows users to manage applications running in the cluster and troubleshoot them, as well as manage the cluster itself.
## Access control
It is critical for the Kubernetes cluster to correctly setup access control of Kubernetes Dashboard. See this [guide](https://github.com/kubernetes/dashboard/wiki/Access-control) for best practises.
It is highly recommended to use RBAC with minimal privileges needed for Dashboard to run.
## Configuration ## Configuration
The following table lists the configurable parameters of the kubernetes-dashboard chart and their default values. The following table lists the configurable parameters of the kubernetes-dashboard chart and their default values.
| Parameter | Description | Default | | Parameter | Description | Default |
|---------------------------|-----------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------| |-------------------------------------|-----------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------|
| `image.repository` | Repository for container image | `k8s.gcr.io/kubernetes-dashboard-amd64` | | `image.repository` | Repository for container image | `k8s.gcr.io/kubernetes-dashboard-amd64` |
| `image.tag` | Image tag | `v1.8.3` | | `image.tag` | Image tag | `v1.10.0` |
| `image.pullPolicy` | Image pull policy | `IfNotPresent` | | `image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `replicaCount` | Number of replicas | `1` |
| `extraArgs` | Additional container arguments | `[]` | | `extraArgs` | Additional container arguments | `[]` |
| `nodeSelector` | node labels for pod assignment | `{}` | | `nodeSelector` | node labels for pod assignment | `{}` |
| `tolerations` | List of node taints to tolerate (requires Kubernetes >= 1.6) | `[]` | | `tolerations` | List of node taints to tolerate (requires Kubernetes >= 1.6) | `[]` |
| `affinity` | Affinity for pod assignment | `[]` |
| `service.externalPort` | Dashboard external port | 443 | | `service.externalPort` | Dashboard external port | 443 |
| `service.internalPort` | Dashboard internal port | 443 | | `service.internalPort` | Dashboard internal port | 443 |
| `ingress.annotations` | Specify ingress class | `kubernetes.io/ingress.class: nginx` | | `ingress.annotations` | Specify ingress class | `kubernetes.io/ingress.class: nginx` |
...@@ -17,8 +28,32 @@ The following table lists the configurable parameters of the kubernetes-dashboar ...@@ -17,8 +28,32 @@ The following table lists the configurable parameters of the kubernetes-dashboar
| `ingress.path` | Path to match against incoming requests. Must begin with a '/' | `/` | | `ingress.path` | Path to match against incoming requests. Must begin with a '/' | `/` |
| `ingress.hosts` | Dashboard Hostnames | `nil` | | `ingress.hosts` | Dashboard Hostnames | `nil` |
| `ingress.tls` | Ingress TLS configuration | `[]` | | `ingress.tls` | Ingress TLS configuration | `[]` |
| `resources` | Pod resource requests & limits | `limits: {cpu: 100m, memory: 50Mi}, requests: {cpu: 100m, memory: 50Mi}` | | `resources` | Pod resource requests & limits | `limits: {cpu: 100m, memory: 100Mi}, requests: {cpu: 100m, memory: 100Mi}` |
| `rbac.create` | Create & use RBAC resources | `true` | | `rbac.create` | Create & use RBAC resources | `true` |
| `rbac.clusterAdminRole` | "cluster-admin" ClusterRole will be used for dashboard ServiceAccount ([NOT RECOMMENDED](#access-control)) | `false` | | `rbac.clusterAdminRole` | "cluster-admin" ClusterRole will be used for dashboard ServiceAccount ([NOT RECOMMENDED](#access-control)) | `false` |
| `serviceAccount.create` | Whether a new service account name that the agent will use should be created. | `true` | | `serviceAccount.create` | Whether a new service account name that the agent will use should be created. | `true` |
| `serviceAccount.name` | Service account to be used. If not set and serviceAccount.create is `true` a name is generated using the fullname template. | | | `serviceAccount.name` | Service account to be used. If not set and serviceAccount.create is `true` a name is generated using the fullname template. | |
| `livenessProbe.initialDelaySeconds` | Number of seconds to wait before sending first probe | 30 |
| `livenessProbe.timeoutSeconds` | Number of seconds to wait for probe response | 30 |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```console
$ helm install stable/kubernetes-dashboard --name my-release \
--set=service.externalPort=8080,resources.limits.cpu=200m
```
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
```console
$ helm install stable/kubernetes-dashboard --name my-release -f values.yaml
```
> **Tip**: You can use the default [values.yaml](values.yaml)
## Using the dashboard with 'kubectl proxy'
When running 'kubectl proxy', the address `localhost:8001/ui` automatically expands to `http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/`. For this to reach the dashboard, the name of the service must be 'kubernetes-dashboard', not any other value as set by Helm. You can manually specify this using the value 'fullnameOverride':
```
fullnameOverride: 'kubernetes-dashboard'
```
namespace: kube-system
rancher_version: v2.0.7 rancher_version: v2.0.7
categories: categories:
- dashboard - dashboard
...@@ -17,7 +16,7 @@ questions: ...@@ -17,7 +16,7 @@ questions:
type: string type: string
label: Image Repository label: Image Repository
- variable: image.tag - variable: image.tag
default: "v1.8.3" default: "v1.10.0"
description: "Docker image tag" description: "Docker image tag"
type: string type: string
label: Image Tag label: Image Tag
......
...@@ -26,5 +26,7 @@ Get the Kubernetes Dashboard URL by running: ...@@ -26,5 +26,7 @@ Get the Kubernetes Dashboard URL by running:
{{- else if contains "ClusterIP" .Values.service.type }} {{- else if contains "ClusterIP" .Values.service.type }}
Get the Kubernetes Dashboard URL by running: Get the Kubernetes Dashboard URL by running:
kubectl cluster-info | grep dashboard export POD_NAME=$(kubectl get pods -n {{ .Release.Namespace }} -l "app={{ template "kubernetes-dashboard.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo https://127.0.0.1:8443/
kubectl -n {{ .Release.Namespace }} port-forward $POD_NAME 8443:8443
{{- end }} {{- end }}
...@@ -12,7 +12,7 @@ metadata: ...@@ -12,7 +12,7 @@ metadata:
{{ toYaml .Values.labels | indent 4 }} {{ toYaml .Values.labels | indent 4 }}
{{- end }} {{- end }}
spec: spec:
replicas: 1 replicas: {{ .Values.replicaCount }}
strategy: strategy:
rollingUpdate: rollingUpdate:
maxSurge: 0 maxSurge: 0
...@@ -50,8 +50,8 @@ spec: ...@@ -50,8 +50,8 @@ spec:
scheme: HTTPS scheme: HTTPS
path: / path: /
port: 8443 port: 8443
initialDelaySeconds: 30 initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
timeoutSeconds: 30 timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
resources: resources:
{{ toYaml .Values.resources | indent 10 }} {{ toYaml .Values.resources | indent 10 }}
{{- if .Values.nodeSelector }} {{- if .Values.nodeSelector }}
...@@ -68,3 +68,7 @@ spec: ...@@ -68,3 +68,7 @@ spec:
tolerations: tolerations:
{{ toYaml .Values.tolerations | indent 8 }} {{ toYaml .Values.tolerations | indent 8 }}
{{- end }} {{- end }}
{{- if .Values.affinity }}
affinity:
{{ toYaml .Values.affinity | indent 8 }}
{{- end }}
...@@ -13,7 +13,6 @@ metadata: ...@@ -13,7 +13,6 @@ metadata:
heritage: {{ .Release.Service }} heritage: {{ .Release.Service }}
{{- if .Values.ingress.annotations }} {{- if .Values.ingress.annotations }}
annotations: annotations:
nginx.org/redirect-to-https: true
{{ toYaml .Values.ingress.annotations | indent 4 }} {{ toYaml .Values.ingress.annotations | indent 4 }}
{{- end }} {{- end }}
spec: spec:
......
...@@ -8,7 +8,7 @@ metadata: ...@@ -8,7 +8,7 @@ metadata:
heritage: {{ .Release.Service }} heritage: {{ .Release.Service }}
release: {{ .Release.Name }} release: {{ .Release.Name }}
name: {{ template "kubernetes-dashboard.fullname" . }} name: {{ template "kubernetes-dashboard.fullname" . }}
namespace: kube-system namespace: {{ .Release.Namespace }}
rules: rules:
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret. # Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
- apiGroups: - apiGroups:
......
{{- if .Values.rbac.create }} {{- if .Values.rbac.create }}
{{- if .Values.rbac.clusterAdminRole }} {{- if .Values.rbac.clusterAdminRole }}
# Cluster role binding for clusterAdminRole == true # Cluster role binding for clusterAdminRole == true
apiVersion: rbac.authorization.k8s.io/v1beta1 apiVersion: rbac.authorization.k8s.io/v1beta1
...@@ -17,7 +18,7 @@ roleRef: ...@@ -17,7 +18,7 @@ roleRef:
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
name: {{ template "kubernetes-dashboard.serviceAccountName" . }} name: {{ template "kubernetes-dashboard.serviceAccountName" . }}
namespace: kube-system namespace: {{ .Release.Namespace }}
{{- else -}} {{- else -}}
# Role binding for clusterAdminRole == false # Role binding for clusterAdminRole == false
apiVersion: rbac.authorization.k8s.io/v1beta1 apiVersion: rbac.authorization.k8s.io/v1beta1
...@@ -29,7 +30,7 @@ metadata: ...@@ -29,7 +30,7 @@ metadata:
heritage: {{ .Release.Service }} heritage: {{ .Release.Service }}
release: {{ .Release.Name }} release: {{ .Release.Name }}
name: {{ template "kubernetes-dashboard.fullname" . }} name: {{ template "kubernetes-dashboard.fullname" . }}
namespace: kube-system namespace: {{ .Release.Namespace }}
roleRef: roleRef:
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
kind: Role kind: Role
...@@ -37,6 +38,6 @@ roleRef: ...@@ -37,6 +38,6 @@ roleRef:
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
name: {{ template "kubernetes-dashboard.serviceAccountName" . }} name: {{ template "kubernetes-dashboard.serviceAccountName" . }}
namespace: kube-system namespace: {{ .Release.Namespace }}
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}
...@@ -7,5 +7,5 @@ metadata: ...@@ -7,5 +7,5 @@ metadata:
heritage: {{ .Release.Service }} heritage: {{ .Release.Service }}
release: {{ .Release.Name }} release: {{ .Release.Name }}
name: {{ template "kubernetes-dashboard.fullname" . }} name: {{ template "kubernetes-dashboard.fullname" . }}
namespace: kube-system namespace: {{ .Release.Namespace }}
type: Opaque type: Opaque
...@@ -8,5 +8,5 @@ metadata: ...@@ -8,5 +8,5 @@ metadata:
heritage: {{ .Release.Service }} heritage: {{ .Release.Service }}
release: {{ .Release.Name }} release: {{ .Release.Name }}
name: {{ template "kubernetes-dashboard.serviceAccountName" . }} name: {{ template "kubernetes-dashboard.serviceAccountName" . }}
namespace: kube-system namespace: {{ .Release.Namespace }}
{{- end -}} {{- end -}}
...@@ -18,8 +18,7 @@ metadata: ...@@ -18,8 +18,7 @@ metadata:
spec: spec:
type: {{ .Values.service.type }} type: {{ .Values.service.type }}
ports: ports:
- name: https - port: {{ .Values.service.externalPort }}
port: {{ .Values.service.externalPort }}
targetPort: https targetPort: https
{{- if .Values.service.nodePort }} {{- if .Values.service.nodePort }}
nodePort: {{ .Values.service.nodePort }} nodePort: {{ .Values.service.nodePort }}
......
...@@ -5,10 +5,12 @@ ...@@ -5,10 +5,12 @@
image: image:
repository: k8s.gcr.io/kubernetes-dashboard-amd64 repository: k8s.gcr.io/kubernetes-dashboard-amd64
tag: v1.8.3 tag: v1.10.0
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
## Here labels can be added to the kubernets dashboard deployment replicaCount: 1
## Here labels can be added to the kubernetes dashboard deployment
## ##
labels: {} labels: {}
# kubernetes.io/cluster-service: "true" # kubernetes.io/cluster-service: "true"
...@@ -32,6 +34,11 @@ tolerations: [] ...@@ -32,6 +34,11 @@ tolerations: []
# value: "value" # value: "value"
# effect: "NoSchedule|PreferNoSchedule|NoExecute" # effect: "NoSchedule|PreferNoSchedule|NoExecute"
## Affinity
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
service: service:
type: ClusterIP type: ClusterIP
externalPort: 443 externalPort: 443
...@@ -54,10 +61,10 @@ service: ...@@ -54,10 +61,10 @@ service:
resources: resources:
limits: limits:
cpu: 100m cpu: 100m
memory: 50Mi memory: 100Mi
requests: requests:
cpu: 100m cpu: 100m
memory: 50Mi memory: 100Mi
ingress: ingress:
## If true, Kubernetes Dashboard Ingress will be created. ## If true, Kubernetes Dashboard Ingress will be created.
...@@ -73,7 +80,7 @@ ingress: ...@@ -73,7 +80,7 @@ ingress:
## Kubernetes Dashboard Ingress path ## Kubernetes Dashboard Ingress path
## ##
path: "" path: /
## Kubernetes Dashboard Ingress hostnames ## Kubernetes Dashboard Ingress hostnames
## Must be provided if Ingress is enabled ## Must be provided if Ingress is enabled
...@@ -103,3 +110,9 @@ serviceAccount: ...@@ -103,3 +110,9 @@ serviceAccount:
# The name of the service account to use. # The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template # If not set and create is true, a name is generated using the fullname template
name: name:
livenessProbe:
# Number of seconds to wait before sending first probe
initialDelaySeconds: 30
# Number of seconds to wait for probe response
timeoutSeconds: 30
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
name: kubernetes-dashboard
version: 0.6.8
appVersion: 1.8.3
description: General-purpose web UI for Kubernetes clusters
keywords:
- kubernetes
- dashboard
home: https://github.com/kubernetes/dashboard
sources:
- https://github.com/kubernetes/dashboard
maintainers:
- name: kfox1111
email: Kevin.Fox@pnnl.gov
icon: https://raw.githubusercontent.com/kubernetes/kubernetes/master/logo/logo.svg
## Configuration
The following table lists the configurable parameters of the kubernetes-dashboard chart and their default values.
| Parameter | Description | Default |
|---------------------------|-----------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------|
| `image.repository` | Repository for container image | `k8s.gcr.io/kubernetes-dashboard-amd64` |
| `image.tag` | Image tag | `v1.8.3` |
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `extraArgs` | Additional container arguments | `[]` |
| `nodeSelector` | node labels for pod assignment | `{}` |
| `tolerations` | List of node taints to tolerate (requires Kubernetes >= 1.6) | `[]` |
| `service.externalPort` | Dashboard external port | 443 |
| `service.internalPort` | Dashboard internal port | 443 |
| `ingress.annotations` | Specify ingress class | `kubernetes.io/ingress.class: nginx` |
| `ingress.enabled` | Enable ingress controller resource | `false` |
| `ingress.path` | Path to match against incoming requests. Must begin with a '/' | `/` |
| `ingress.hosts` | Dashboard Hostnames | `nil` |
| `ingress.tls` | Ingress TLS configuration | `[]` |
| `resources` | Pod resource requests & limits | `limits: {cpu: 100m, memory: 50Mi}, requests: {cpu: 100m, memory: 50Mi}` |
| `rbac.create` | Create & use RBAC resources | `true` |
| `rbac.clusterAdminRole` | "cluster-admin" ClusterRole will be used for dashboard ServiceAccount ([NOT RECOMMENDED](#access-control)) | `false` |
| `serviceAccount.create` | Whether a new service account name that the agent will use should be created. | `true` |
| `serviceAccount.name` | Service account to be used. If not set and serviceAccount.create is `true` a name is generated using the fullname template. | |
# kubernetes-dashboard
[Kubernetes Dashboard](https://github.com/kubernetes/dashboard) is a general purpose, web-based UI for Kubernetes clusters. It allows users to manage applications running in the cluster and troubleshoot them, as well as manage the cluster itself.
## Access control
IMPORTANT:
You must be a cluster admin to be able to deploy Kubernetes Dashboard.
WARNING:
Once the Dashboard is deployed with cluster admin role, anyone with access to this project can access the Dashboard and therefore gain access to the entire Kubernetes cluster!!!
It is critical for the Kubernetes cluster to correctly setup access control of Kubernetes Dashboard. See this [guide](https://github.com/kubernetes/dashboard/wiki/Access-control) for best practises.
It is highly recommended to use RBAC with minimal privileges needed for Dashboard to run.
`Notes: Dashboard is required to be installed in the System Project`
namespace: kube-system
rancher_version: v2.0.7
categories:
- dashboard
questions:
- variable: defaultImage
default: true
description: "Use default Docker image"
label: Use Default Image
type: boolean
show_subquestion_if: false
group: "Container Images"
subquestions:
- variable: image.repository
default: "k8s.gcr.io/kubernetes-dashboard-amd64"
description: "Docker image repository"
type: string
label: Image Repository
- variable: image.tag
default: "v1.8.3"
description: "Docker image tag"
type: string
label: Image Tag
- variable: rbac.clusterAdminRole
required: true
default: false
description: "IMPORTANT: Granting admin privileges to Dashboard's Service Account might be a security risk, makeing sure that you know what you are doing before proceeding."
type: boolean
label: "IMPORTANT: Enable Dashboard Cluster Admin Role"
*********************************************************************************
*** PLEASE BE PATIENT: kubernetes-dashboard may take a few minutes to install ***
*********************************************************************************
{{- if .Values.ingress.enabled }}
From outside the cluster, the server URL(s) are:
{{- range .Values.ingress.hosts }}
https://{{ . }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
Get the Kubernetes Dashboard URL by running:
export NODE_PORT=$(kubectl get -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "kubernetes-dashboard.fullname" . }})
export NODE_IP=$(kubectl get nodes -o jsonpath="{.items[0].status.addresses[0].address}")
echo https://$NODE_IP:$NODE_PORT/
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
Watch the status with: 'kubectl get svc -w {{ template "kubernetes-dashboard.fullname" . }}'
Get the Kubernetes Dashboard URL by running:
export SERVICE_IP=$(kubectl get svc {{ template "kubernetes-dashboard.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo https://$SERVICE_IP/
{{- else if contains "ClusterIP" .Values.service.type }}
Get the Kubernetes Dashboard URL by running:
kubectl cluster-info | grep dashboard
{{- end }}
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "kubernetes-dashboard.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "kubernetes-dashboard.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "kubernetes-dashboard.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "kubernetes-dashboard.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "kubernetes-dashboard.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: {{ template "kubernetes-dashboard.fullname" . }}
labels:
app: {{ template "kubernetes-dashboard.name" . }}
chart: {{ template "kubernetes-dashboard.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
kubernetes.io/cluster-service: "true"
{{- if .Values.labels }}
{{ toYaml .Values.labels | indent 4 }}
{{- end }}
spec:
replicas: 1
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
app: {{ template "kubernetes-dashboard.name" . }}
release: {{ .Release.Name }}
kubernetes.io/cluster-service: "true"
spec:
serviceAccountName: {{ template "kubernetes-dashboard.serviceAccountName" . }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- --auto-generate-certificates
{{- if .Values.extraArgs }}
{{ toYaml .Values.extraArgs | indent 10 }}
{{- end }}
ports:
- name: https
containerPort: 8443
protocol: TCP
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
resources:
{{ toYaml .Values.resources | indent 10 }}
{{- if .Values.nodeSelector }}
nodeSelector:
{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: {{ template "kubernetes-dashboard.fullname" . }}
- name: tmp-volume
emptyDir: {}
{{- if .Values.tolerations }}
tolerations:
{{ toYaml .Values.tolerations | indent 8 }}
{{- end }}
{{- if .Values.ingress.enabled -}}
{{- $serviceName := include "kubernetes-dashboard.fullname" . -}}
{{- $servicePort := .Values.service.externalPort -}}
{{- $path := .Values.ingress.path -}}
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: {{ template "kubernetes-dashboard.fullname" . }}
labels:
app: {{ template "kubernetes-dashboard.name" . }}
chart: {{ template "kubernetes-dashboard.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- if .Values.ingress.annotations }}
annotations:
nginx.org/redirect-to-https: true
{{ toYaml .Values.ingress.annotations | indent 4 }}
{{- end }}
spec:
rules:
{{- if .Values.ingress.hosts }}
{{- range $host := .Values.ingress.hosts }}
- host: {{ $host }}
http:
paths:
- path: {{ $path }}
backend:
serviceName: {{ $serviceName }}
servicePort: {{ $servicePort }}
{{- end -}}
{{- else }}
- http:
paths:
- path: {{ $path }}
backend:
serviceName: {{ $serviceName }}
servicePort: {{ $servicePort }}
{{- end -}}
{{- if .Values.ingress.tls }}
tls:
{{ toYaml .Values.ingress.tls | indent 4 }}
{{- end -}}
{{- end -}}
{{- if and .Values.rbac.create (not .Values.rbac.clusterAdminRole) }}
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
labels:
app: {{ template "kubernetes-dashboard.name" . }}
chart: {{ template "kubernetes-dashboard.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "kubernetes-dashboard.fullname" . }}
namespace: kube-system
rules:
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
# Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups:
- ""
resources:
- secrets
resourceNames:
- kubernetes-dashboard-key-holder
- {{ template "kubernetes-dashboard.fullname" . }}
verbs:
- get
- update
- delete
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
- kubernetes-dashboard-settings
verbs:
- get
- update
# Allow Dashboard to get metrics from heapster.
- apiGroups:
- ""
resources:
- services
resourceNames:
- heapster
verbs:
- proxy
- apiGroups:
- ""
resources:
- services/proxy
resourceNames:
- heapster
- "http:heapster:"
- "https:heapster:"
verbs:
- get
{{- end -}}
{{- if .Values.rbac.create }}
{{- if .Values.rbac.clusterAdminRole }}
# Cluster role binding for clusterAdminRole == true
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
labels:
app: {{ template "kubernetes-dashboard.name" . }}
chart: {{ template "kubernetes-dashboard.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "kubernetes-dashboard.fullname" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: {{ template "kubernetes-dashboard.serviceAccountName" . }}
namespace: kube-system
{{- else -}}
# Role binding for clusterAdminRole == false
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
labels:
app: {{ template "kubernetes-dashboard.name" . }}
chart: {{ template "kubernetes-dashboard.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "kubernetes-dashboard.fullname" . }}
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ template "kubernetes-dashboard.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ template "kubernetes-dashboard.serviceAccountName" . }}
namespace: kube-system
{{- end -}}
{{- end -}}
apiVersion: v1
kind: Secret
metadata:
labels:
app: {{ template "kubernetes-dashboard.name" . }}
chart: {{ template "kubernetes-dashboard.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "kubernetes-dashboard.fullname" . }}
namespace: kube-system
type: Opaque
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: {{ template "kubernetes-dashboard.name" . }}
chart: {{ template "kubernetes-dashboard.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "kubernetes-dashboard.serviceAccountName" . }}
namespace: kube-system
{{- end -}}
apiVersion: v1
kind: Service
metadata:
name: {{ template "kubernetes-dashboard.fullname" . }}
labels:
app: {{ template "kubernetes-dashboard.name" . }}
chart: {{ template "kubernetes-dashboard.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
kubernetes.io/cluster-service: "true"
{{- if .Values.service.labels }}
{{ toYaml .Values.service.labels | indent 4 }}
{{- end }}
{{- if .Values.service.annotations }}
annotations:
{{ toYaml .Values.service.annotations | indent 4 }}
{{- end }}
spec:
type: {{ .Values.service.type }}
ports:
- name: https
port: {{ .Values.service.externalPort }}
targetPort: https
{{- if .Values.service.nodePort }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
selector:
app: {{ template "kubernetes-dashboard.name" . }}
release: {{ .Release.Name }}
# Default values for kubernetes-dashboard
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value
image:
repository: k8s.gcr.io/kubernetes-dashboard-amd64
tag: v1.8.3
pullPolicy: IfNotPresent
## Here labels can be added to the kubernets dashboard deployment
##
labels: {}
# kubernetes.io/cluster-service: "true"
# kubernetes.io/name: "Kubernetes Dashboard"
## Additional container arguments
##
# extraArgs:
# - --enable-insecure-login
# - --system-banner="Welcome to Kubernetes"
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## List of node taints to tolerate (requires Kubernetes >= 1.6)
tolerations: []
# - key: "key"
# operator: "Equal|Exists"
# value: "value"
# effect: "NoSchedule|PreferNoSchedule|NoExecute"
service:
type: ClusterIP
externalPort: 443
## This allows an override of the heapster service name
## Default: {{ .Chart.Name }}
##
# nameOverride:
## Kubernetes Dashboard Service annotations
##
annotations: {}
# foo.io/bar: "true"
## Here labels can be added to the Kubernetes Dashboard service
##
labels: {}
# kubernetes.io/name: "Kubernetes Dashboard"
resources:
limits:
cpu: 100m
memory: 50Mi
requests:
cpu: 100m
memory: 50Mi
ingress:
## If true, Kubernetes Dashboard Ingress will be created.
##
enabled: false
## Kubernetes Dashboard Ingress annotations
##
# annotations:
# kubernetes.io/ingress.class: nginx
# nginx.ingress.kubernetes.io/secure-backends: "true"
# kubernetes.io/tls-acme: 'true'
## Kubernetes Dashboard Ingress path
##
path: ""
## Kubernetes Dashboard Ingress hostnames
## Must be provided if Ingress is enabled
##
# hosts:
# - kubernetes-dashboard.domain.com
## Kubernetes Dashboard Ingress TLS configuration
## Secrets must be manually created in the namespace
##
# tls:
# - secretName: kubernetes-dashboard-tls
# hosts:
# - kubernetes-dashboard.domain.com
rbac:
# Specifies whether RBAC resources should be created
create: true
# Specifies whether cluster-admin ClusterRole will be used for dashboard
# ServiceAccount (NOT RECOMMENDED).
clusterAdminRole: false
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name:
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
apiVersion: v1
appVersion: "1.0"
description: Machine Learning Toolkit for Kubernetes
name: kubeflow
version: 0.1.0
icon: file://../icon.jpg
maintainers:
- name: guangbochen
email: support@rancher.com
home: https://www.kubeflow.org/docs/about/kubeflow/
# Kubeflow
The Kubeflow project is dedicated to making deployments of machine learning (ML) workflows on Kubernetes simple, portable and scalable. Our goal is not to recreate other services, but to provide a straightforward way to deploy best-of-breed open-source systems for ML to diverse infrastructures. Anywhere you are running Kubernetes, you should be able to run Kubeflow
## Who should consider using Kubeflow?
Based on the current functionality you should consider using Kubeflow if:
- You want to train/serve TensorFlow models in different environments (e.g. local, on prem, and cloud)
- You want to use Jupyter notebooks to manage TensorFlow training jobs
- You want to launch training jobs that use resources – such as additional CPUs or GPUs – that aren’t available on your personal computer
- You want to combine TensorFlow with other processes
> For example, you may want to use [tensorflow/agents](https://github.com/google-research/batch-ppo) to run simulations to generate data for training reinforcement learning models.
This list is based ONLY on current capabilities. We are investing significant resources to expand the functionality and actively soliciting help from companies and individuals interested in contributing (see [Contributing](https://www.kubeflow.org/docs/about/contributing/)).
## How it works?
For more details of how Kubeflow works please reference the [Kubeflow Doc](https://www.kubeflow.org/docs/about/kubeflow/).
# Kubeflow
The Kubeflow project is dedicated to making deployments of machine learning (ML) workflows on Kubernetes simple, portable and scalable. Our goal is not to recreate other services, but to provide a straightforward way to deploy best-of-breed open-source systems for ML to diverse infrastructures. Anywhere you are running Kubernetes, you should be able to run Kubeflow
## Who should consider using Kubeflow?
Based on the current functionality you should consider using Kubeflow if:
- You want to train/serve TensorFlow models in different environments (e.g. local, on prem, and cloud)
- You want to use Jupyter notebooks to manage TensorFlow training jobs
- You want to launch training jobs that use resources – such as additional CPUs or GPUs – that aren’t available on your personal computer
- You want to combine TensorFlow with other processes
> For example, you may want to use [tensorflow/agents](https://github.com/google-research/batch-ppo) to run simulations to generate data for training reinforcement learning models.
categories:
- machine learning
labels:
io.rancher.certified: experimental
namespace: kubeflow
questions:
- variable: ambassador.service.type
default: "NodePort"
description: "Define Kubeflow Ambassador UI service type"
type: enum
required: true
options:
- "ClusterIP"
- "NodePort"
- "Rancher-Proxy"
label: Kubeflow Ambassador UI Service
group: "Kubeflow Ambassador Settings"
show_subquestion_if: "NodePort"
subquestions:
- variable: ambassador.service.nodePort
default: ""
description: "NodePort port number(to set explicitly, choose port between 30000-32767)"
type: int
min: 30000
max: 32767
label: Ambassador UI Service NodePort number
- variable: katib.vizier.service.type
default: "ClusterIP"
description: "Define kubeflow katib vizier service type"
type: enum
required: true
options:
- "ClusterIP"
- "NodePort"
label: Kubeflow Katib Vizier Service
group: "Kubeflow Katib Settings"
show_subquestion_if: "NodePort"
subquestions:
- variable: katib.vizier.service.nodePort
default: ""
description: "NodePort port number(to set explicitly, choose port between 30000-32767)"
type: int
min: 30000
max: 32767
label: Katib Vizier Service NodePort number
- variable: katib.vizierdb.persistence.enabled
default: false
description: "Enable persistent volume for Katib Vizier"
type: boolean
required: true
label: Katib Vizier Persistent Volume Enabled
show_subquestion_if: true
group: "Kubeflow Katib Settings"
subquestions:
- variable: katib.vizierdb.persistence.size
default: "10Gi"
description: "Katib Vizier Persistent Volume Size"
type: string
label: Katib Vizier Volume Size
- variable: katib.vizierdb.persistence.storageClass
default: ""
description: "If undefined or null, uses the default StorageClass. Default to null"
type: storageclass
label: Default StorageClass for Katib Vizier
- variable: katib.vizierdb.persistence.existingClaim
default: ""
description: "If not empty, uses the specified existing PVC instead of creating new one"
type: string
label: Existing Persistent Volume Claim for Katib Vizier
- variable: jupyterhub.enabled
default: true
description: "Enable jupyterhub of single-user Jupyter notebook server"
type: boolean
required: true
label: Enable JupyterHub
group: "JupyterHub Settings"
show_subquestion_if: true
subquestions:
- variable: jupyterhub.image.repository
default: "gcr.io/kubeflow/jupyterhub-k8s"
description: "Docker image of the JupyterHub"
type: string
label: JupyterHub Image Repository
- variable: jupyterhub.image.tag
default: "v20180531-3bb991b1"
description: "The image tag of JupyterHub"
type: string
label: JupyterHub Image Tag
- variable: tfJobOperator.enabled
default: true
description: "Enable tensorflow job operator"
type: boolean
required: true
label: Enable TensorFlow Job Operator
group: "TensorFlow Operator Settings"
show_subquestion_if: true
subquestions:
- variable: tfJobOperator.image.repository
default: "gcr.io/kubeflow-images-public/tf_operator"
description: "Docker image of the TensorFlow Job Operator"
type: string
label: Tensorflow Job Operator Image Repository
- variable: tfJobOperator.image.tag
default: "v0.3.0"
description: "The image tag of Tensorflow Job Operator"
type: string
label: Tensorflow Job Operator Image Tag
- variable: pytorchOperator.enabled
default: false
description: "Enable PyTorch - a deep learning framework."
type: boolean
required: true
label: Enable PyTorch Operator
group: "PyTorch Settings"
show_subquestion_if: true
subquestions:
- variable: pytorchOperator.image.repository
default: "gcr.io/kubeflow-images-public/pytorch-operator"
description: "Docker image of the PyTorch operator"
type: string
label: PyTorch Operator Image Repository
- variable: pytorchOperator.image.tag
default: "v0.3.0"
description: "The image tag of PyTorch operator"
type: string
label: PyTorch Operator Image Tag
- variable: chainer.enabled
default: false
description: "Enable Chainer operator - a flexible framework of neural networks"
type: boolean
required: true
label: Enable Chainer Operator
group: "Chainer Settings"
show_subquestion_if: true
subquestions:
- variable: chainer.image.repository
default: "gcr.io/kubeflow-images-public/chainer-operator"
description: "Docker image of the chainer operator"
type: string
label: Chainer Operator Image Repository
- variable: chainer.image.tag
default: "v0.3.0"
description: "The image tag of chainer operator"
type: string
label: Chainer Operator Image Tag
- variable: mxnetOperator.enabled
default: false
description: "Enable apache MXNet - a flexible and efficient library for deep learning."
type: boolean
required: true
label: Enable Apache MXNet Operator
group: "Apache MXNet Settings"
show_subquestion_if: true
subquestions:
- variable: mxnetOperator.image.repository
default: "mxjob/mxnet-operator"
description: "Docker image of the MXNet operator"
type: string
label: MXNet Operator Image Repository
- variable: mxnetOperator.image.tag
default: "v1"
description: "The image tag of MXNet operator"
type: string
label: MXNet Operator Image Tag
- variable: pytorchOperator.enabled
default: false
description: "Enable PyTorch - a deep learning framework."
type: boolean
required: true
label: Enable PyTorch Operator
group: "PyTorch Settings"
show_subquestion_if: true
subquestions:
- variable: pytorchOperator.image.repository
default: "gcr.io/kubeflow-images-public/pytorch-operator"
description: "Docker image of the PyTorch operator"
type: string
label: PyTorch Operator Image Repository
- variable: pytorchOperator.image.tag
default: "v0.3.0"
description: "The image tag of PyTorch operator"
type: string
label: PyTorch Operator Image Tag
import json
import os
import string
import escapism
from kubespawner.spawner import KubeSpawner
from jhub_remote_user_authenticator.remote_user_auth import RemoteUserAuthenticator
from oauthenticator.github import GitHubOAuthenticator
SERVICE_ACCOUNT_SECRET_MOUNT = '/var/run/secrets/sa'
class KubeFormSpawner(KubeSpawner):
# relies on HTML5 for image datalist
def _options_form_default(self):
global registry, repoName
return '''
<table style="width: 100%;">
<tr>
<td style="width: 30%;"><label for='image'>Image</label></td>
<td style="width: 70%;"><input value="" list="image" name="image" placeholder='repo/image:tag' style="width: 100%;">
<datalist id="image">
<option value="{0}/{1}/tensorflow-1.4.1-notebook-cpu:v0.3.1">
<option value="{0}/{1}/tensorflow-1.4.1-notebook-gpu:v0.3.1">
<option value="{0}/{1}/tensorflow-1.5.1-notebook-cpu:v0.3.1">
<option value="{0}/{1}/tensorflow-1.5.1-notebook-gpu:v0.3.1">
<option value="{0}/{1}/tensorflow-1.6.0-notebook-cpu:v0.3.1">
<option value="{0}/{1}/tensorflow-1.6.0-notebook-gpu:v0.3.1">
<option value="{0}/{1}/tensorflow-1.7.0-notebook-cpu:v0.3.1">
<option value="{0}/{1}/tensorflow-1.7.0-notebook-gpu:v0.3.1">
<option value="{0}/{1}/tensorflow-1.8.0-notebook-cpu:v0.3.1">
<option value="{0}/{1}/tensorflow-1.8.0-notebook-gpu:v0.3.1">
<option value="{0}/{1}/tensorflow-1.9.0-notebook-cpu:v0.3.1">
<option value="{0}/{1}/tensorflow-1.9.0-notebook-gpu:v0.3.1">
<option value="{0}/{1}/tensorflow-1.10.1-notebook-cpu:v0.3.1">
<option value="{0}/{1}/tensorflow-1.10.1-notebook-gpu:v0.3.1">
</datalist>
</td>
</tr>
</table>
<div style="text-align: center; padding: 10px;">
<a id="toggle_advanced_options" style="margin: 20%; cursor: pointer; font-weight: bold;">Advanced</a>
</div>
<table id="advanced_fields" style="display: none; width: 100%; border-spacing: 0px 25px; border-collapse: separate;">
<tr>
<td><label for='cpu_guarantee'>CPU</label></td>
<td><input style="width: 100%;" name='cpu_guarantee' placeholder='200m, 1.0, 2.5, etc'></input></td>
</tr>
<tr>
<td><label for='mem_guarantee'>Memory</label></td>
<td><input style="width: 100%;" name='mem_guarantee' placeholder='100Mi, 1.5Gi'></input></td>
</tr>
<tr>
<td><label for='extra_resource_limits'>Extra Resource Limits</label></td>
<td><input style="width: 100%;" name='extra_resource_limits' placeholder='{{nvidia.com/gpu: 3}}'></input></td>
</tr>
</table>
<script type="text/javascript">
$('#toggle_advanced_options').on('click', function(e){{
$('#advanced_fields').toggle();
}});
</script>
'''.format(registry, repoName)
def options_from_form(self, formdata):
options = {}
options['image'] = formdata.get('image', [''])[0].strip()
options['cpu_guarantee'] = formdata.get(
'cpu_guarantee', [''])[0].strip()
options['mem_guarantee'] = formdata.get(
'mem_guarantee', [''])[0].strip()
options['extra_resource_limits'] = formdata.get(
'extra_resource_limits', [''])[0].strip()
return options
@property
def singleuser_image_spec(self):
global cloud
if cloud == 'ack':
image = 'registry.aliyuncs.com/kubeflow-images-public/tensorflow-notebook-cpu:v0.2.1'
else:
image = 'gcr.io/kubeflow-images-public/tensorflow-1.8.0-notebook-cpu:v0.3.1'
if self.user_options.get('image'):
image = self.user_options['image']
return image
image_spec = singleuser_image_spec
@property
def cpu_guarantee(self):
cpu = '500m'
if self.user_options.get('cpu_guarantee'):
cpu = self.user_options['cpu_guarantee']
return cpu
@property
def mem_guarantee(self):
mem = '1Gi'
if self.user_options.get('mem_guarantee'):
mem = self.user_options['mem_guarantee']
return mem
@property
def extra_resource_limits(self):
extra = ''
if self.user_options.get('extra_resource_limits'):
extra = json.loads(self.user_options['extra_resource_limits'])
return extra
def get_env(self):
env = super(KubeFormSpawner, self).get_env()
gcp_secret_name = os.environ.get('GCP_SECRET_NAME')
if gcp_secret_name:
env['GOOGLE_APPLICATION_CREDENTIALS'] = '{}/{}.json'.format(SERVICE_ACCOUNT_SECRET_MOUNT, gcp_secret_name)
return env
# TODO(kkasravi): add unit test
def _parse_user_name(self, username):
safe_chars = set(string.ascii_lowercase + string.digits)
name = username.split(':')[-1]
legacy = ''.join([s if s in safe_chars else '-' for s in name.lower()])
safe = escapism.escape(name, safe=safe_chars, escape_char='-').lower()
return legacy, safe, name
def _expand_user_properties(self, template):
# override KubeSpawner method to remove prefix accounts.google: for iap
# and truncate to 63 characters
# Set servername based on whether named-server initialised
if self.name:
servername = '-{}'.format(self.name)
else:
servername = ''
legacy, safe, name = self._parse_user_name(self.user.name)
rname = template.format(
userid=self.user.id,
username=safe,
unescaped_username=name,
legacy_escape_username=legacy,
servername=servername
)[:63]
return rname
###################################################
# JupyterHub Options
###################################################
c.JupyterHub.ip = '0.0.0.0'
c.JupyterHub.hub_ip = '0.0.0.0'
# Don't try to cleanup servers on exit - since in general for k8s, we want
# the hub to be able to restart without losing user containers
c.JupyterHub.cleanup_servers = False
###################################################
###################################################
# Spawner Options
###################################################
cloud = os.environ.get('CLOUD_NAME')
registry = os.environ.get('REGISTRY')
repoName = os.environ.get('REPO_NAME')
c.JupyterHub.spawner_class = KubeFormSpawner
# Set both singleuser_image_spec and image_spec because
# singleuser_image_spec has been deprecated in a future release
c.KubeSpawner.singleuser_image_spec = '{0}/{1}/tensorflow-notebook'.format(registry, repoName)
c.KubeSpawner.image_spec = '{0}/{1}/tensorflow-notebook'.format(registry, repoName)
c.KubeSpawner.cmd = 'start-singleuser.sh'
c.KubeSpawner.args = ['--allow-root']
# gpu images are very large ~15GB. need a large timeout.
c.KubeSpawner.start_timeout = 60 * 30
# Increase timeout to 5 minutes to avoid HTTP 500 errors on JupyterHub
c.KubeSpawner.http_timeout = 60 * 5
# Volume setup
c.KubeSpawner.singleuser_uid = 1000
c.KubeSpawner.singleuser_fs_gid = 100
c.KubeSpawner.singleuser_working_dir = '/home/jovyan'
volumes = []
volume_mounts = []
# Allow environment vars to override uid and gid.
# This allows local host path mounts to be read/writable
env_uid = os.environ.get('NOTEBOOK_UID')
if env_uid:
c.KubeSpawner.singleuser_uid = int(env_uid)
env_gid = os.environ.get('NOTEBOOK_GID')
if env_gid:
c.KubeSpawner.singleuser_fs_gid = int(env_gid)
access_local_fs = os.environ.get('ACCESS_LOCAL_FS')
if access_local_fs == 'true':
def modify_pod_hook(spawner, pod):
pod.spec.containers[0].lifecycle = {
'postStart' : {
'exec' : {
'command' : ['ln', '-s', '/mnt/local-notebooks', '/home/jovyan/local-notebooks' ]
}
}
}
return pod
c.KubeSpawner.modify_pod_hook = modify_pod_hook
###################################################
# Persistent volume options
###################################################
# Using persistent storage requires a default storage class.
# TODO(jlewi): Verify this works on minikube.
# see https://github.com/kubeflow/kubeflow/pull/22#issuecomment-350500944
pvc_mount = os.environ.get('NOTEBOOK_PVC_MOUNT')
if pvc_mount and pvc_mount != 'null':
c.KubeSpawner.user_storage_pvc_ensure = True
c.KubeSpawner.storage_pvc_ensure = True
# How much disk space do we want?
c.KubeSpawner.user_storage_capacity = '10Gi'
c.KubeSpawner.storage_capacity = '10Gi'
c.KubeSpawner.pvc_name_template = 'claim-{username}{servername}'
volumes.append(
{
'name': 'volume-{username}{servername}',
'persistentVolumeClaim': {
'claimName': 'claim-{username}{servername}'
}
}
)
volume_mounts.append(
{
'mountPath': pvc_mount,
'name': 'volume-{username}{servername}'
}
)
c.KubeSpawner.volumes = volumes
c.KubeSpawner.volume_mounts = volume_mounts
# Set both service_account and singleuser_service_account because
# singleuser_service_account has been deprecated in a future release
c.KubeSpawner.service_account = 'jupyter-notebook'
c.KubeSpawner.singleuser_service_account = 'jupyter-notebook'
# Authenticator
if os.environ.get('KF_AUTHENTICATOR') == 'iap':
c.JupyterHub.authenticator_class ='jhub_remote_user_authenticator.remote_user_auth.RemoteUserAuthenticator'
c.RemoteUserAuthenticator.header_name = 'x-goog-authenticated-user-email'
else:
c.JupyterHub.authenticator_class = 'dummyauthenticator.DummyAuthenticator'
if os.environ.get('DEFAULT_JUPYTERLAB').lower() == 'true':
c.KubeSpawner.default_url = '/lab'
# PVCs
pvcs = os.environ.get('KF_PVC_LIST')
if pvcs and pvcs != 'null':
for pvc in pvcs.split(','):
volumes.append({
'name': pvc,
'persistentVolumeClaim': {
'claimName': pvc
}
})
volume_mounts.append({
'name': pvc,
'mountPath': '/mnt/' + pvc
})
gcp_secret_name = os.environ.get('GCP_SECRET_NAME')
if gcp_secret_name:
volumes.append({
'name': gcp_secret_name,
'secret': {
'secretName': gcp_secret_name,
}
})
volume_mounts.append({
'name': gcp_secret_name,
'mountPath': SERVICE_ACCOUNT_SECRET_MOUNT
})
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "kubeflow.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "kubeflow.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "kubeflow.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
ksonnet.io/component: ambassador
name: ambassador
namespace: kubeflow
spec:
replicas: {{ .Values.ambassador.replicas }}
template:
metadata:
labels:
service: ambassador
namespace: kubeflow
spec:
containers:
- name: ambassador
env:
- name: AMBASSADOR_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: AMBASSADOR_SINGLE_NAMESPACE
value: "true"
image: "{{ .Values.ambassador.image.repository }}:{{ .Values.ambassador.image.tag }}"
livenessProbe:
httpGet:
path: /ambassador/v0/check_alive
port: 8877
initialDelaySeconds: 30
periodSeconds: 30
readinessProbe:
httpGet:
path: /ambassador/v0/check_ready
port: 8877
initialDelaySeconds: 30
periodSeconds: 30
resources:
limits:
cpu: 1
memory: 400Mi
requests:
cpu: 200m
memory: 100Mi
- name: statsd
image: "{{ .Values.ambassador.statsdImage.repository }}:{{ .Values.ambassador.statsdImage.tag }}"
- name: statsd-sink
image: "{{ .Values.ambassador.exporterImage.repository }}:{{ .Values.ambassador.exporterImage.tag }}"
restartPolicy: Always
serviceAccountName: ambassador
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
ksonnet.io/component: ambassador
name: ambassador
namespace: kubeflow
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
labels:
ksonnet.io/component: ambassador
name: ambassador
namespace: kubeflow
rules:
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- update
- patch
- get
- list
- watch
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
labels:
ksonnet.io/component: ambassador
name: ambassador
namespace: kubeflow
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ambassador
subjects:
- kind: ServiceAccount
name: ambassador
namespace: kubeflow
apiVersion: v1
kind: Service
metadata:
labels:
ksonnet.io/component: ambassador
service: ambassador
{{- if eq .Values.ambassador.service.type "Rancher-Proxy" }}
kubernetes.io/cluster-service: "true"
{{- end }}
name: ambassador
namespace: kubeflow
spec:
ports:
- name: ambassador
port: 80
targetPort: 80
{{- if .Values.ambassador.service.nodePort }}
nodePort: {{ .Values.ambassador.service.nodePort }}
{{- end }}
selector:
service: ambassador
{{- if eq .Values.ambassador.service.type "Rancher-Proxy" }}
type: ClusterIP
{{- else }}
type: {{ .Values.ambassador.service.type }}
{{- end }}
---
apiVersion: v1
kind: Service
metadata:
labels:
ksonnet.io/component: ambassador
service: ambassador-admin
name: ambassador-admin
namespace: kubeflow
spec:
ports:
- name: ambassador-admin
port: 8877
targetPort: 8877
selector:
service: ambassador
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
annotations:
getambassador.io/config: |-
---
apiVersion: ambassador/v0
kind: Mapping
name: k8s-dashboard-ui-mapping
prefix: /k8s/ui/
rewrite: /
tls: true
service: kubernetes-dashboard.kube-system
labels:
ksonnet.io/component: ambassador
name: k8s-dashboard
namespace: kubeflow
spec:
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
annotations:
prometheus.io/port: "9102"
prometheus.io/scrape: "true"
labels:
ksonnet.io/component: ambassador
service: ambassador
name: statsd-sink
namespace: kubeflow
spec:
ports:
- name: statsd-sink
port: 9102
protocol: TCP
targetPort: 9102
selector:
service: ambassador
type: ClusterIP
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
ksonnet.io/component: centraldashboard
name: centraldashboard
namespace: kubeflow
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
app: centraldashboard
ksonnet.io/component: centraldashboard
name: centraldashboard
namespace: kubeflow
rules:
- apiGroups:
- ""
resources:
- pods
- pods/exec
- pods/log
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
apiVersion: v1
kind: Service
metadata:
annotations:
getambassador.io/config: |-
---
apiVersion: ambassador/v0
kind: Mapping
name: centralui-mapping
prefix: /
rewrite: /
service: centraldashboard.kubeflow
labels:
app: centraldashboard
ksonnet.io/component: centraldashboard
name: centraldashboard
namespace: kubeflow
spec:
ports:
- port: 80
targetPort: 8082
selector:
app: centraldashboard
sessionAffinity: None
type: ClusterIP
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: centraldashboard
ksonnet.io/component: centraldashboard
name: centraldashboard
namespace: kubeflow
spec:
template:
metadata:
labels:
app: centraldashboard
spec:
containers:
- image: "{{ .Values.centraldashboard.image.repository }}:{{ .Values.centraldashboard.image.tag }}"
name: centraldashboard
ports:
- containerPort: 8082
serviceAccountName: centraldashboard
{{- if .Values.chainerOperator.enabled }}
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
ksonnet.io/component: chainer-operator
annotations:
"helm.sh/hook": crd-install
"helm.sh/hook-delete-policy": before-hook-creation
name: chainerjobs.kubeflow.org
spec:
group: kubeflow.org
names:
categories:
- all
kind: ChainerJob
plural: chainerjobs
shortNames:
- chj
- chjs
- chjob
- chjobs
singular: chainerjob
scope: Namespaced
version: v1alpha1
{{- end }}
{{- if .Values.chainerOperator.enabled }}
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: chainer-operator
ksonnet.io/component: chainer-operator
name: chainer-operator
namespace: kubeflow
spec:
replicas: 1
selector:
matchLabels:
app: chainer-operator
template:
metadata:
labels:
app: chainer-operator
spec:
containers:
- args:
- -v
- "2"
- -stderrthreshold
- INFO
image: "{{ .Values.chainerOperator.image.repository }}:{{ .Values.chainerOperator.image.tag }}"
imagePullPolicy: Always
name: chainer-operator
serviceAccountName: chainer-operator
{{- end }}
{{- if .Values.chainerOperator.enabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
ksonnet.io/component: chainer-operator
name: chainer-operator
rules:
- apiGroups:
- ""
resources:
- configmaps
- serviceaccounts
verbs:
- create
- update
- list
- watch
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- apiGroups:
- ""
resources:
- pods/exec
verbs:
- create
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- roles
- rolebindings
verbs:
- create
- update
- list
- watch
- apiGroups:
- apps
resources:
- statefulsets
verbs:
- get
- create
- list
- update
- watch
- apiGroups:
- batch
resources:
- jobs
verbs:
- create
- list
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- create
- get
- apiGroups:
- kubeflow.org
resources:
- chainerjobs
verbs:
- '*'
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
ksonnet.io/component: chainer-operator
name: chainer-operator
namespace: kubeflow
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
ksonnet.io/component: chainer-operator
name: chainer-operator
namespace: kubeflow
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: chainer-operator
subjects:
- kind: ServiceAccount
name: chainer-operator
namespace: kubeflow
{{- end }}
{{- if .Values.jupyterhub.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
labels:
ksonnet.io/component: jupyterhub
name: jupyterhub-config
namespace: kubeflow
data:
{{ (.Files.Glob "scripts/jupyterhub_config.py").AsConfig | indent 2 }}
{{- end }}
{{- if .Values.jupyterhub.enabled }}
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: jupyter-hub
ksonnet.io/component: jupyterhub
name: jupyter-hub
namespace: kubeflow
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
ksonnet.io/component: jupyterhub
name: jupyter-notebook
namespace: kubeflow
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
labels:
ksonnet.io/component: jupyterhub
name: jupyter-notebook-role
namespace: kubeflow
rules:
- apiGroups:
- ""
resources:
- pods
- services
verbs:
- '*'
- apiGroups:
- ""
- apps
- extensions
resources:
- deployments
- replicasets
verbs:
- '*'
- apiGroups:
- kubeflow.org
resources:
- '*'
verbs:
- '*'
- apiGroups:
- batch
resources:
- jobs
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
labels:
ksonnet.io/component: jupyterhub
name: jupyter-role
namespace: kubeflow
rules:
- apiGroups:
- ""
resources:
- pods
- persistentvolumeclaims
verbs:
- get
- watch
- list
- create
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- get
- watch
- list
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
labels:
ksonnet.io/component: jupyterhub
name: jupyter-notebook-role
namespace: kubeflow
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: jupyter-notebook-role
subjects:
- kind: ServiceAccount
name: jupyter-notebook
namespace: kubeflow
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
labels:
ksonnet.io/component: jupyterhub
name: jupyter-role
namespace: kubeflow
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: jupyter-role
subjects:
- kind: ServiceAccount
name: jupyter-hub
namespace: kubeflow
{{- end }}
{{- if .Values.jupyterhub.enabled }}
apiVersion: v1
kind: Service
metadata:
annotations:
prometheus.io/scrape: "true"
labels:
app: tf-hub
ksonnet.io/component: jupyterhub
name: tf-hub-0
namespace: kubeflow
spec:
clusterIP: None
ports:
- name: hub
port: 8000
selector:
app: tf-hub
---
apiVersion: v1
kind: Service
metadata:
annotations:
getambassador.io/config: |-
---
apiVersion: ambassador/v0
kind: Mapping
name: tf-hub-lb-hub-mapping
prefix: /hub/
rewrite: /hub/
timeout_ms: 300000
service: tf-hub-lb.kubeflow
use_websocket: true
---
apiVersion: ambassador/v0
kind: Mapping
name: tf-hub-lb-user-mapping
prefix: /user/
rewrite: /user/
timeout_ms: 300000
service: tf-hub-lb.kubeflow
use_websocket: true
labels:
app: tf-hub-lb
ksonnet.io/component: jupyterhub
name: tf-hub-lb
namespace: kubeflow
spec:
ports:
- name: hub
port: 80
targetPort: 8000
selector:
app: tf-hub
type: {{ .Values.jupyterhub.service.type }}
{{- end }}
{{- if .Values.jupyterhub.enabled }}
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
labels:
ksonnet.io/component: jupyterhub
name: tf-hub
namespace: kubeflow
spec:
replicas: 1
serviceName: ""
template:
metadata:
labels:
app: tf-hub
spec:
containers:
- command:
- jupyterhub
- -f
- /etc/config/jupyterhub_config.py
env:
- name: NOTEBOOK_PVC_MOUNT
value: /home/jovyan
- name: CLOUD_NAME
value: "null"
- name: REGISTRY
value: gcr.io
- name: REPO_NAME
value: kubeflow-images-public
- name: KF_AUTHENTICATOR
value: "null"
- name: DEFAULT_JUPYTERLAB
value: "false"
- name: KF_PVC_LIST
value: "null"
image: "{{ .Values.jupyterhub.image.repository }}:{{ .Values.jupyterhub.image.tag }}"
name: tf-hub
ports:
- containerPort: 8000
- containerPort: 8081
volumeMounts:
- mountPath: /etc/config
name: config-volume
serviceAccountName: jupyter-hub
volumes:
- configMap:
name: jupyterhub-config
name: config-volume
updateStrategy:
type: RollingUpdate
{{- end }}
apiVersion: v1
kind: ConfigMap
metadata:
labels:
ksonnet.io/component: katib
name: metricscollector-template
namespace: kubeflow
data:
defaultMetricsCollectorTemplate.yaml: "apiVersion: batch/v1beta1\nkind: CronJob\nmetadata:\n
\ name: {{.WorkerId}}\n namespace: {{.NameSpace}} \nspec:\n schedule: \"*/1
* * * *\"\n successfulJobsHistoryLimit: 1\n failedJobsHistoryLimit: 1\n jobTemplate:\n
\ spec:\n template:\n spec:\n serviceAccountName: metrics-collector\n
\ containers:\n - name: {{.WorkerId}}\n image: katib/metrics-collector\n
\ args:\n - \"./metricscollector\"\n - \"-s\"\n
\ - \"{{.StudyId}}\"\n - \"-t\"\n - \"{{.TrialId}}\"\n
\ - \"-w\"\n - \"{{.WorkerId}}\"\n - \"-n\"\n
\ - \"{{.NameSpace}}\"\n restartPolicy: Never\n"
---
apiVersion: v1
kind: ConfigMap
metadata:
labels:
ksonnet.io/component: katib
name: worker-template
namespace: kubeflow
data:
defaultWorkerTemplate.yaml: |
apiVersion: batch/v1
namespace: kubeflow
kind: Job
metadata:
name: {{.WorkerId}}
spec:
template:
spec:
containers:
- name: {{.WorkerId}}
image: alpine
restartPolicy: Never
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
ksonnet.io/component: katib
annotations:
"helm.sh/hook": crd-install
"helm.sh/hook-delete-policy": before-hook-creation
name: studyjobs.kubeflow.org
spec:
group: kubeflow.org
names:
kind: StudyJob
plural: studyjobs
singular: studyjob
version: v1alpha1
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: studyjob-controller
ksonnet.io/component: katib
name: studyjob-controller
namespace: kubeflow
spec:
replicas: 1
selector:
matchLabels:
app: studyjob-controller
template:
metadata:
labels:
app: studyjob-controller
spec:
containers:
- image: "{{ .Values.katib.studyJobControllerImage.repository }}:{{ .Values.katib.studyJobControllerImage.tag }}"
imagePullPolicy: Always
name: studyjob-controller
volumeMounts:
- mountPath: /worker-template
name: worker-template
- mountPath: /metricscollector-template
name: metricscollector-template
serviceAccountName: studyjob-controller
volumes:
- configMap:
name: worker-template
name: worker-template
- configMap:
name: metricscollector-template
name: metricscollector-template
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: vizier
component: core
ksonnet.io/component: katib
name: vizier-core
namespace: kubeflow
spec:
replicas: 1
template:
metadata:
labels:
app: vizier
component: core
name: vizier-core
spec:
containers:
- args:
- ./vizier-manager
- -w
- kubernetes
- -i
- k-cluster.example.net
image: "{{ .Values.katib.vizierCoreImage.repository }}:{{ .Values.katib.vizierCoreImage.tag }}"
# image: gcr.io/kubeflow-images-public/katib/vizier-core:v0.1.2-alpha-45-g3dce496
name: vizier-core
ports:
- containerPort: 6789
name: api
serviceAccountName: vizier-core
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: vizier
component: db
ksonnet.io/component: katib
name: vizier-db
namespace: kubeflow
spec:
replicas: 1
template:
metadata:
labels:
app: vizier
component: db
name: vizier-db
spec:
containers:
- args:
- --datadir
- /var/lib/mysql/datadir
env:
- name: MYSQL_ROOT_PASSWORD
value: test
- name: MYSQL_ALLOW_EMPTY_PASSWORD
value: "true"
- name: MYSQL_DATABASE
value: vizier
image: "{{ .Values.katib.vizierDbImage.repository }}:{{ .Values.katib.vizierDbImage.tag }}"
name: vizier-db
ports:
- containerPort: 3306
name: dbapi
volumeMounts:
- mountPath: /var/lib/mysql
name: vizier-db
volumes:
- name: vizier-db
{{- if .Values.katib.vizierdb.persistence.enabled }}
persistentVolumeClaim:
claimName: {{ .Values.katib.vizierdb.persistence.existingClaim | default ("vizier-db") }}
{{- else }}
emptyDir: {}
{{- end }}
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: vizier
component: suggestion-bayesianoptimization
ksonnet.io/component: katib
name: vizier-suggestion-bayesianoptimization
namespace: kubeflow
spec:
replicas: 1
template:
metadata:
labels:
app: vizier
component: suggestion-bayesianoptimization
name: vizier-suggestion-bayesianoptimization
spec:
containers:
- image: "{{ .Values.katib.suggestionBayesianOptimizationImage.repository }}:{{ .Values.katib.suggestionBayesianOptimizationImage.tag }}"
name: vizier-suggestion-bayesianoptimization
ports:
- containerPort: 6789
name: api
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: vizier
component: suggestion-grid
ksonnet.io/component: katib
name: vizier-suggestion-grid
namespace: kubeflow
spec:
replicas: 1
template:
metadata:
labels:
app: vizier
component: suggestion-grid
name: vizier-suggestion-grid
spec:
containers:
- image: "{{ .Values.katib.suggestionGridImage.repository }}:{{ .Values.katib.suggestionGridImage.tag }}"
# - image: gcr.io/kubeflow-images-public/katib/suggestion-grid:v0.1.2-alpha-45-g3dce496
name: vizier-suggestion-grid
ports:
- containerPort: 6789
name: api
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: vizier
component: suggestion-hyperband
ksonnet.io/component: katib
name: vizier-suggestion-hyperband
namespace: kubeflow
spec:
replicas: 1
template:
metadata:
labels:
app: vizier
component: suggestion-hyperband
name: vizier-suggestion-hyperband
spec:
containers:
- image: "{{ .Values.katib.suggestionHyperbandImage.repository }}:{{ .Values.katib.suggestionHyperbandImage.tag }}"
# - image: gcr.io/kubeflow-images-public/katib/suggestion-hyperband:v0.1.2-alpha-45-g3dce496
name: vizier-suggestion-hyperband
ports:
- containerPort: 6789
name: api
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: vizier
component: suggestion-random
ksonnet.io/component: katib
name: vizier-suggestion-random
namespace: kubeflow
spec:
replicas: 1
template:
metadata:
labels:
app: vizier
component: suggestion-random
name: vizier-suggestion-random
spec:
containers:
- image: "{{ .Values.katib.suggestionRandomImage.repository }}:{{ .Values.katib.suggestionRandomImage.tag }}"
# - image: gcr.io/kubeflow-images-public/katib/suggestion-random:v0.1.2-alpha-45-g3dce496
name: vizier-suggestion-random
ports:
- containerPort: 6789
name: api
apiVersion: v1
kind: Service
metadata:
labels:
app: modeldb
component: backend
ksonnet.io/component: katib
name: modeldb-backend
namespace: kubeflow
spec:
ports:
- name: api
port: 6543
protocol: TCP
selector:
app: modeldb
component: backend
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
labels:
app: modeldb
component: db
ksonnet.io/component: katib
name: modeldb-db
namespace: kubeflow
spec:
ports:
- name: dbapi
port: 27017
protocol: TCP
selector:
app: modeldb
component: db
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
annotations:
getambassador.io/config: |-
---
apiVersion: ambassador/v0
kind: Mapping
name: modeldb-mapping
prefix: /katib/
rewrite: /katib/
method: GET
service: modeldb-frontend.kubeflow:3000
labels:
app: modeldb
component: frontend
ksonnet.io/component: katib
name: modeldb-frontend
namespace: kubeflow
spec:
ports:
- name: api
port: 3000
protocol: TCP
selector:
app: modeldb
component: frontend
type: ClusterIP
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: modeldb
component: backend
ksonnet.io/component: katib
name: modeldb-backend
namespace: kubeflow
spec:
replicas: 1
template:
metadata:
labels:
app: modeldb
component: backend
name: modeldb-backend
spec:
containers:
- args:
- modeldb-db
image: "{{ .Values.katib.modeldbImage.repository }}:{{ .Values.katib.modeldbImage.tag }}"
name: modeldb-backend
ports:
- containerPort: 6543
name: api
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: modeldb
component: db
ksonnet.io/component: katib
name: modeldb-db
namespace: kubeflow
spec:
replicas: 1
template:
metadata:
labels:
app: modeldb
component: db
name: modeldb-db
spec:
containers:
- image: "{{ .Values.katib.modeldbDatabaseImage.repository }}:{{ .Values.katib.modeldbDatabaseImage.tag }}"
name: modeldb-db
ports:
- containerPort: 27017
name: dbapi
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: modeldb
component: frontend
ksonnet.io/component: katib
name: modeldb-frontend
namespace: kubeflow
spec:
replicas: 1
template:
metadata:
labels:
app: modeldb
component: frontend
name: modeldb-frontend
spec:
containers:
- args:
- modeldb-backend
env:
- name: ROOT_PATH
value: /katib
# image: gcr.io/kubeflow-images-public/katib/katib-frontend:v0.1.2-alpha-45-g3dce496
image: "{{ .Values.katib.modeldbFrontendImage.repository }}:{{ .Values.katib.modeldbFrontendImage.tag }}"
imagePullPolicy: IfNotPresent
name: modeldb-frontend
ports:
- containerPort: 3000
name: webapi
{{- if and .Values.katib.vizierdb.persistence.enabled (not .Values.katib.vizierdb.persistence.existingClaim) }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
labels:
app: vizier
component: db
ksonnet.io/component: katib
name: vizier-db
namespace: kubeflow
spec:
accessModes:
- {{ .Values.katib.vizierdb.persistence.accessMode | quote }}
resources:
requests:
storage: {{ .Values.katib.vizierdb.persistence.size | quote }}
{{- if .Values.katib.vizierdb.persistence.storageClass }}
{{- if (eq "-" .Values.katib.vizierdb.persistence.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: "{{ .Values.katib.vizierdb.persistence.storageClass }}"
{{- end }}
{{- end }}
{{- end -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
ksonnet.io/component: katib
name: metrics-collector
rules:
- apiGroups:
- ""
resources:
- pods
- pods/log
- pods/status
verbs:
- '*'
- apiGroups:
- batch
resources:
- jobs
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
ksonnet.io/component: katib
name: studyjob-controller
rules:
- apiGroups:
- ""
resources:
- configmaps
- serviceaccounts
verbs:
- create
- update
- list
- watch
- apiGroups:
- batch
resources:
- jobs
- cronjobs
verbs:
- '*'
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- create
- get
- apiGroups:
- kubeflow.org
resources:
- studyjobs
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
ksonnet.io/component: katib
name: vizier-core
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/*
- pods/log
- pods/status
- services
- persistentvolumes
- persistentvolumes/status
- persistentvolumeclaims
- persistentvolumeclaims/status
verbs:
- '*'
- apiGroups:
- batch
resources:
- jobs
- jobs/status
verbs:
- '*'
- apiGroups:
- extensions
resources:
- ingresses
- ingresses/status
- deployments
- deployments/status
verbs:
- '*'
- apiGroups:
- ""
resources:
- services
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
labels:
ksonnet.io/component: katib
name: vizier-core
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: vizier-core
subjects:
- kind: ServiceAccount
name: vizier-core
namespace: kubeflow
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
ksonnet.io/component: katib
name: metrics-collector
namespace: kubeflow
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
ksonnet.io/component: katib
name: studyjob-controller
namespace: kubeflow
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
ksonnet.io/component: katib
name: vizier-core
namespace: kubeflow
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
ksonnet.io/component: katib
name: metrics-collector
namespace: kubeflow
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: metrics-collector
subjects:
- kind: ServiceAccount
name: metrics-collector
namespace: kubeflow
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
ksonnet.io/component: katib
name: studyjob-controller
namespace: kubeflow
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: studyjob-controller
subjects:
- kind: ServiceAccount
name: studyjob-controller
namespace: kubeflow
apiVersion: v1
kind: Service
metadata:
labels:
app: vizier
component: core
ksonnet.io/component: katib
name: vizier-core
namespace: kubeflow
spec:
ports:
- name: api
{{- if .Values.katib.vizier.service.nodePort }}
nodePort: {{ .Values.katib.vizier.service.nodePort }}
{{- end }}
port: 6789
protocol: TCP
selector:
app: vizier
component: core
type: {{ .Values.katib.vizier.service.type }}
---
apiVersion: v1
kind: Service
metadata:
labels:
app: vizier
component: db
ksonnet.io/component: katib
name: vizier-db
namespace: kubeflow
spec:
ports:
- name: dbapi
port: 3306
protocol: TCP
selector:
app: vizier
component: db
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
labels:
app: vizier
component: suggestion-bayesianoptimization
ksonnet.io/component: katib
name: vizier-suggestion-bayesianoptimization
namespace: kubeflow
spec:
ports:
- name: api
port: 6789
protocol: TCP
selector:
app: vizier
component: suggestion-bayesianoptimization
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
labels:
app: vizier
component: suggestion-grid
ksonnet.io/component: katib
name: vizier-suggestion-grid
namespace: kubeflow
spec:
ports:
- name: api
port: 6789
protocol: TCP
selector:
app: vizier
component: suggestion-grid
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
labels:
app: vizier
component: suggestion-hyperband
ksonnet.io/component: katib
name: vizier-suggestion-hyperband
namespace: kubeflow
spec:
ports:
- name: api
port: 6789
protocol: TCP
selector:
app: vizier
component: suggestion-hyperband
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
labels:
app: vizier
component: suggestion-random
ksonnet.io/component: katib
name: vizier-suggestion-random
namespace: kubeflow
spec:
ports:
- name: api
port: 6789
protocol: TCP
selector:
app: vizier
component: suggestion-random
type: ClusterIP
{{- if .Values.mxnetOperator.enabled }}
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
ksonnet.io/component: mxnet-operator
annotations:
"helm.sh/hook": crd-install
"helm.sh/hook-delete-policy": before-hook-creation
name: mxjobs.kubeflow.org
spec:
group: kubeflow.org
names:
kind: MXJob
plural: mxjobs
singular: mxjob
version: v1alpha1
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
app: mxnet-operator
ksonnet.io/component: mxnet-operator
name: mxnet-operator
rules:
- apiGroups:
- kubeflow.org
resources: - mxjobs
verbs:
- '*'
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- '*'
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
verbs:
- '*'
- apiGroups:
- batch
resources:
- jobs
verbs:
- '*'
- apiGroups:
- ""
resources:
- configmaps
- pods
- services
- endpoints
- persistentvolumeclaims
- events
verbs:
- '*'
- apiGroups:
- apps
- extensions
resources:
- deployments
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
labels:
app: mxnet-operator
ksonnet.io/component: mxnet-operator
name: mxnet-operator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: mxnet-operator
subjects:
- kind: ServiceAccount
name: mxnet-operator
namespace: kubeflow
---
apiVersion: v1
data:
controller_config_file.yaml: |-
{
}
kind: ConfigMap
metadata:
labels:
ksonnet.io/component: mxnet-operator
name: mxnet-operator-config
namespace: kubeflow
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: mxnet-operator
ksonnet.io/component: mxnet-operator
name: mxnet-operator
namespace: kubeflow
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
ksonnet.io/component: mxnet-operator
name: mxnet-operator
namespace: kubeflow
spec:
replicas: 1
template:
metadata:
labels:
name: mxnet-operator
spec:
containers:
- command:
- /opt/mlkube/mxnet-operator
- --alsologtostderr
- -v=1
env:
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
image: "{{ .Values.mxnetOperator.image.repository }}:{{ .Values.mxnetOperator.image.tag }}"
imagePullPolicy: Always
name: mxnet-operator
volumeMounts:
- mountPath: /etc/config
name: config-volume
serviceAccountName: mxnet-operator
volumes:
- configMap:
name: mxnet-operator-config
name: config-volume
{{- end }}
{{- if .Values.pytorchOperator.enabled }}
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
ksonnet.io/component: pytorch-operator
annotations:
"helm.sh/hook": crd-install
"helm.sh/hook-delete-policy": before-hook-creation
name: pytorchjobs.kubeflow.org
spec:
group: kubeflow.org
names:
kind: PyTorchJob
plural: pytorchjobs
singular: pytorchjob
validation:
openAPIV3Schema:
properties:
spec:
properties:
pytorchReplicaSpecs:
properties:
Master:
properties:
replicas:
maximum: 1
minimum: 1
type: integer
Worker:
properties:
replicas:
minimum: 1
type: integer
version: v1alpha2
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
app: pytorch-operator
ksonnet.io/component: pytorch-operator
name: pytorch-operator
rules:
- apiGroups:
- kubeflow.org
resources:
- pytorchjobs
verbs:
- '*'
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- '*'
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
verbs:
- '*'
- apiGroups:
- batch
resources:
- jobs
verbs:
- '*'
- apiGroups:
- ""
resources:
- configmaps
- pods
- services
- endpoints
- persistentvolumeclaims
- events
verbs:
- '*'
- apiGroups:
- apps
- extensions
resources:
- deployments
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
labels:
app: pytorch-operator
ksonnet.io/component: pytorch-operator
name: pytorch-operator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: pytorch-operator
subjects:
- kind: ServiceAccount
name: pytorch-operator
namespace: kubeflow
---
apiVersion: v1
data:
controller_config_file.yaml: |-
{
}
kind: ConfigMap
metadata:
labels:
ksonnet.io/component: pytorch-operator
name: pytorch-operator-config
namespace: kubeflow
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: pytorch-operator
ksonnet.io/component: pytorch-operator
name: pytorch-operator
namespace: kubeflow
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
ksonnet.io/component: pytorch-operator
name: pytorch-operator
namespace: kubeflow
spec:
replicas: 1
template:
metadata:
labels:
name: pytorch-operator
spec:
containers:
- command:
- /pytorch-operator.v2
- --alsologtostderr
- -v=1
env:
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
image: "{{ .Values.pytorchOperator.image.repository }}:{{ .Values.pytorchOperator.image.tag }}"
name: pytorch-operator
volumeMounts:
- mountPath: /etc/config
name: config-volume
serviceAccountName: pytorch-operator
volumes:
- configMap:
name: pytorch-operator-config
name: config-volume
{{- end }}
{{- if .Values.tfJobOperator.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
labels:
ksonnet.io/component: tf-job-operator
name: tf-job-operator-config
namespace: kubeflow
data:
controller_config_file.yaml: |-
{
"grpcServerFilePath": "/opt/mlkube/grpc_tensorflow_server/grpc_tensorflow_server.py"
}
{{- end }}
{{- if .Values.tfJobOperator.enabled }}
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
ksonnet.io/component: tf-job-operator
annotations:
"helm.sh/hook": crd-install
"helm.sh/hook-delete-policy": before-hook-creation
name: tfjobs.kubeflow.org
spec:
version: v1alpha2
group: kubeflow.org
names:
kind: TFJob
plural: tfjobs
singular: tfjob
validation:
openAPIV3Schema:
properties:
spec:
properties:
tfReplicaSpecs:
properties:
Chief:
properties:
replicas:
maximum: 1
minimum: 1
type: integer
PS:
properties:
replicas:
minimum: 1
type: integer
Worker:
properties:
replicas:
minimum: 1
type: integer
{{- end }}
{{- if .Values.tfJobOperator.enabled }}
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
ksonnet.io/component: tf-job-operator
name: tf-job-dashboard
namespace: kubeflow
spec:
template:
metadata:
labels:
name: tf-job-dashboard
spec:
containers:
- command:
- /opt/tensorflow_k8s/dashboard/backend
env:
- name: KUBEFLOW_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: "{{ .Values.tfJobOperator.image.repository }}:{{ .Values.tfJobOperator.image.tag }}"
name: tf-job-dashboard
ports:
- containerPort: 8080
serviceAccountName: tf-job-dashboard
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
ksonnet.io/component: tf-job-operator
name: tf-job-operator-v1alpha2
namespace: kubeflow
spec:
replicas: 1
template:
metadata:
labels:
name: tf-job-operator
spec:
containers:
- command:
- /opt/kubeflow/tf-operator.v2
- --alsologtostderr
- -v=1
env:
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
image: "{{ .Values.tfJobOperator.image.repository }}:{{ .Values.tfJobOperator.image.tag }}"
name: tf-job-operator
volumeMounts:
- mountPath: /etc/config
name: config-volume
serviceAccountName: tf-job-operator
volumes:
- configMap:
name: tf-job-operator-config
name: config-volume
{{- end }}
{{- if .Values.tfJobOperator.enabled }}
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
app: tf-job-dashboard
ksonnet.io/component: tf-job-operator
name: tf-job-dashboard
rules:
- apiGroups:
- tensorflow.org
- kubeflow.org
resources:
- tfjobs
verbs:
- '*'
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- '*'
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
verbs:
- '*'
- apiGroups:
- batch
resources:
- jobs
verbs:
- '*'
- apiGroups:
- ""
resources:
- configmaps
- pods
- services
- endpoints
- persistentvolumeclaims
- events
- pods/log
- namespaces
verbs:
- '*'
- apiGroups:
- apps
- extensions
resources:
- deployments
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
app: tf-job-operator
ksonnet.io/component: tf-job-operator
name: tf-job-operator
rules:
- apiGroups:
- tensorflow.org
- kubeflow.org
resources:
- tfjobs
verbs:
- '*'
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- '*'
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
verbs:
- '*'
- apiGroups:
- batch
resources:
- jobs
verbs:
- '*'
- apiGroups:
- ""
resources:
- configmaps
- pods
- services
- endpoints
- persistentvolumeclaims
- events
verbs:
- '*'
- apiGroups:
- apps
- extensions
resources:
- deployments
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
labels:
app: tf-job-dashboard
ksonnet.io/component: tf-job-operator
name: tf-job-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: tf-job-dashboard
subjects:
- kind: ServiceAccount
name: tf-job-dashboard
namespace: kubeflow
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
labels:
app: tf-job-operator
ksonnet.io/component: tf-job-operator
name: tf-job-operator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: tf-job-operator
subjects:
- kind: ServiceAccount
name: tf-job-operator
namespace: kubeflow
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: tf-job-dashboard
ksonnet.io/component: tf-job-operator
name: tf-job-dashboard
namespace: kubeflow
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: tf-job-operator
ksonnet.io/component: tf-job-operator
name: tf-job-operator
namespace: kubeflow
{{- end }}
{{- if .Values.tfJobOperator.enabled }}
apiVersion: v1
kind: Service
metadata:
annotations:
getambassador.io/config: |-
---
apiVersion: ambassador/v0
kind: Mapping
name: tfjobs-ui-mapping
prefix: /tfjobs/
rewrite: /tfjobs/
service: tf-job-dashboard.kubeflow
labels:
ksonnet.io/component: tf-job-operator
name: tf-job-dashboard
namespace: kubeflow
spec:
ports:
- port: 80
targetPort: 8080
selector:
name: tf-job-dashboard
type: ClusterIP
{{- end }}
# Default values for kubeflow.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
ambassador:
replicas: 3
image:
repository: quay.io/datawire/ambassador
tag: 0.40.2
statsdImage:
repository: quay.io/datawire/statsd
tag: 0.38.0
exporterImage:
repository: prom/statsd-exporter
tag: v0.8.0
service:
type: ClusterIP
centraldashboard:
image:
repository: gcr.io/kubeflow-images-public/centraldashboard
tag: v0.3.0
katib:
modeldbImage:
repository: gcr.io/kubeflow-images-public/modeldb-backend
tag: v0.2.0
modeldbDatabaseImage:
repository: mongo
tag: 3.4
modeldbFrontendImage:
repository: katib/katib-frontend
tag: latest
studyJobControllerImage:
repository: katib/studyjob-controller
tag: v0.3
vizierCoreImage:
repository: katib/vizier-core
tag: v0.3
vizierDbImage:
repository: mysql
tag: 8.0.3
suggestionBayesianOptimizationImage:
repository: gcr.io/kubeflow-images-public/katib/suggestion-bayesianoptimization
tag: v0.1.2-alpha-45-g3dce496
suggestionGridImage:
repository: katib/suggestion-grid
tag: v0.3
suggestionHyperbandImage:
repository: katib/suggestion-hyperband
tag: v0.3
suggestionRandomImage:
repository: katib/suggestion-random
tag: v0.3
vizierdb:
persistence:
enabled: true
## mariadb data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 10Gi
vizier:
service:
type: ClusterIP
# nodePort: 30678
jupyterhub:
enabled: true
image:
repository: gcr.io/kubeflow/jupyterhub-k8s
tag: v20180531-3bb991b1
service:
type: ClusterIP
tfJobOperator:
enabled: true
image:
repository: gcr.io/kubeflow-images-public/tf_operator
tag: v0.3.0
chainerOperator:
enabled: false
image:
repository: gcr.io/kubeflow-images-public/chainer-operator
tag: v0.3.0
mxnetOperator:
enabled: false
image:
repository: mxjob/mxnet-operator
tag: v1
pytorchOperator:
enabled: false
image:
repository: gcr.io/kubeflow-images-public/pytorch-operator
tag: v0.3.0
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
apiVersion: v1
appVersion: 1.0.9
description: nfs-server-provisioner is an out-of-tree dynamic provisioner for Kubernetes. You can use it to quickly & easily deploy shared storage that works almost anywhere.
name: nfs-provisioner
icon: file://../nfs-logo.png
version: 0.2.1
maintainers:
- name: kiall
email: kiall@macinnes.ie
home: https://github.com/kubernetes/charts/tree/master/stable/nfs-server-provisioner
sources:
- https://github.com/kubernetes-incubator/external-storage/tree/master/nfs
keywords:
- nfs
- storage
approvers:
- kiall
reviewers:
- kiall
# NFS Server Provisioner
[NFS Server Provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/nfs)
is an out-of-tree dynamic provisioner for Kubernetes. You can use it to quickly
& easily deploy shared storage that works almost anywhere.
This chart will deploy the Kubernetes [external-storage projects](https://github.com/kubernetes-incubator/external-storage)
`nfs` provisioner. This provisioner includes a built in NFS server, and is not intended for connecting to a pre-existing
NFS server. If you have a pre-existing NFS Server, please consider using the [NFS Client Provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client)
instead.
## Introduction
This chart bootstraps a [nfs-server-provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/nfs)
deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh)
package manager.
## Configuration
The following table lists the configurable parameters of the kibana chart and
their default values.
| Parameter | Description | Default |
|:-------------------------------|:----------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------|
| `imagePullSecrets` | Specify image pull secrets | `nil` (does not add image pull secrets to deployed pods) |
| `image.repository` | The image repository to pull from | `quay.io/kubernetes_incubator/nfs-provisioner` |
| `image.tag` | The image tag to pull from | `v1.0.8` |
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `service.type` | service type | `ClusterIP` |
| `service.nfsPort` | TCP port on which the nfs-server-provisioner NFS service is exposed | `2049` |
| `service.mountdPort` | TCP port on which the nfs-server-provisioner mountd service is exposed | `20048` |
| `service.rpcbindPort` | TCP port on which the nfs-server-provisioner RPC service is exposed | `51413` |
| `service.nfsNodePort` | if `service.type` is `NodePort` and this is non-empty, sets the nfs-server-provisioner node port of the NFS service | `nil` |
| `service.mountdNodePort` | if `service.type` is `NodePort` and this is non-empty, sets the nfs-server-provisioner node port of the mountd service | `nil` |
| `service.rpcbindNodePort` | if `service.type` is `NodePort` and this is non-empty, sets the nfs-server-provisioner node port of the RPC service | `nil` |
| `persistence.enabled` | Enable config persistence using PVC | `false` |
| `persistence.storageClass` | PVC Storage Class for config volume | `nil` |
| `persistence.accessMode` | PVC Access Mode for config volume | `ReadWriteOnce` |
| `persistence.size` | PVC Storage Request for config volume | `1Gi` |
| `storageClass.create` | Enable creation of a StorageClass to consume this nfs-server-provisioner instance | `true` |
| `storageClass.provisionerName` | The provisioner name for the storageclass | `cluster.local/{release-name}-{chart-name}` |
| `storageClass.defaultClass` | Whether to set the created StorageClass as the clusters default StorageClass | `false` |
| `storageClass.name` | The name to assign the created StorageClass | `nfs` |
| `storageClass.parameters` | Parameters for StorageClass | `mountOptions: vers=4.1` |
| `storageClass.reclaimPolicy` | ReclaimPolicy field of the class, which can be either Delete or Retain | `Delete` |
| `resources` | Resource limits for nfs-server-provisioner pod | `{}` |
| `nodeSelector` | Map of node labels for pod assignment | `{}` |
| `tolerations` | List of node taints to tolerate | `[]` |
| `affinity` | Map of node/pod affinities | `{}` |
```console
$ helm install stable/nfs-server-provisioner --name my-release \
--set=image.tag=v1.0.8,resources.limits.cpu=200m
```
Alternatively, a YAML file that specifies the values for the above parameters
can be provided while installing the chart. For example,
```console
$ helm install stable/nfs-server-provisioner --name my-release -f values.yaml
```
> **Tip**: You can use the default [values.yaml](values.yaml) as an example
## Persistence
The nfs-server-provisioner image stores it's configuration data, and importantly, **the dynamic volumes it
manages** `/export` path of the container.
The chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/)
volume at this location. The volume can be created using dynamic volume
provisioning. However, **it is highly recommended** to explicitly specify
a storageclass to use rather than accept the clusters default, or pre-create
a volume for each replica.
If this chart is deployed with more than 1 replica, `storageClass.defaultClass=true`
and `persistence.storageClass`, then the 2nd+ replica will end up using the 1st
replica to provision storage - which is likely never a desired outcome.
## Recommended Persistence Configuration Examples
The following is a recommended configuration example when another storage class
exists to provide persistence:
persistence:
enabled: true
storageClass: "standard"
size: 200Gi
storageClass:
defaultClass: true
On many clusters, the cloud provider integration will create a "standard" storage
class which will create a volume (e.g. a Google Compute Engine Persistent Disk or
Amazon EBS volume) to provide persistence.
---
The following is a recommended configuration example when another storage class
does not exist to provide persistence:
persistence:
enabled: true
storageClass: "-"
size: 200Gi
storageClass:
defaultClass: true
In this configuration, a `PersistentVolume` must be created for each replica
to use. Installing the Helm chart, and then inspecting the `PersistentVolumeClaim`'s
created will provide the necessary names for your `PersistentVolume`'s to bind to.
An example of the necessary `PersistentVolume`:
apiVersion: v1
kind: PersistentVolume
metadata:
name: data-nfs-server-provisioner-0
spec:
capacity:
storage: 200Gi
accessModes:
- ReadWriteOnce
gcePersistentDisk:
fsType: "ext4"
pdName: "data-nfs-server-provisioner-0"
claimRef:
namespace: kube-system
name: data-nfs-server-provisioner-0
---
The following is a recommended configration example for running on bare metal with a hostPath volume:
persistence:
enabled: true
storageClass: "-"
size: 200Gi
storageClass:
defaultClass: true
nodeSelector:
kubernetes.io/hostname: {node-name}
In this configuration, a `PersistentVolume` must be created for each replica
to use. Installing the Helm chart, and then inspecting the `PersistentVolumeClaim`'s
created will provide the necessary names for your `PersistentVolume`'s to bind to.
An example of the necessary `PersistentVolume`:
apiVersion: v1
kind: PersistentVolume
metadata:
name: data-nfs-server-provisioner-0
spec:
capacity:
storage: 200Gi
accessModes:
- ReadWriteOnce
hostPath:
path: /srv/volumes/data-nfs-server-provisioner-0
claimRef:
namespace: kube-system
name: data-nfs-server-provisioner-0
> **Warning**: `hostPath` volumes cannot be migrated between machines by Kubernetes, as such,
in this example, we have restricted the `nfs-server-provisioner` pod to run on a single node. This
is unsuitable for production deployments.
# NFS Server Provisioner
[NFS Server Provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/nfs)
is an out-of-tree dynamic provisioner for Kubernetes. You can use it to quickly
& easily deploy shared storage that works almost anywhere.
This chart will deploy the Kubernetes [external-storage projects](https://github.com/kubernetes-incubator/external-storage)
`nfs` provisioner. This provisioner includes a built in NFS server, and is not intended for connecting to a pre-existing
NFS server. If you have a pre-existing NFS Server, please consider using the [NFS Client Provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client)
instead.
categories:
- storage
labels:
io.rancher.certified: experimental
questions:
- variable: defaultImage
default: true
description: "Use default Docker image"
label: Use Default Image
type: boolean
group: "Container Images"
show_subquestion_if: false
subquestions:
- variable: image.repository
default: "quay.io/kubernetes_incubator/nfs-provisioner"
description: "Docker image name"
type: string
label: NFS Image Name
- variable: image.tag
default: "v1.0.9"
description: "NFS image tag"
type: string
label: Image Tag
- variable: storageClass.create
default: true
description: "Creating the StorageClass"
type: boolean
required: true
label: Creating the StorageClass
group: "StorageClass Setting"
show_subquestion_if: true
subquestions:
- variable: storageClass.defaultClass
default: false
description: "Set StorageClass as the default StorageClass"
type: boolean
label: Set StorageClass as the default StorageClass
- variable: storageClass.reclaimPolicy
default: "Delete"
description: "ReclaimPolicy of the Created StorageClass"
type: enum
label: ReclaimPolicy of the Created StorageClass
options:
- "Delete"
- "Retain"
- variable: storageClass.allowVolumeExpansion
default: true
description: "AllowVolumeExpansion shows whether the storage class allow volume expand"
type: boolean
label: AllowVolumeExpansion Shows Whether The StorageClass Allow Volume Expand
- variable: service.type
default: "ClusterIP"
description: "The type of service to create for the nfs-provisioner"
type: enum
label: nfs-provisioner Service Type
group: "Service Settings"
required: true
options:
- "ClusterIP"
- "NodePort"
show_subquestion_if: "NodePort"
subquestions:
- variable: service.nfsNodePort
default: ""
description: "Sepcify the nodePort of the NFS service"
type: int
label: NodePort of the NFS service
min: 30000
max: 32767
- variable: service.mountdNodePort
default: ""
description: "Sepcify the nodePort of the mountd service"
type: int
label: NodePort of the mountd service
min: 30000
max: 32767
- variable: service.rpcbindNodePort
default: ""
description: "Sepcify the nodePort of the RPC service"
type: int
label: NodePort of the RCP service
min: 30000
max: 32767
# persistence volume settings
- variable: persistence.enabled
default: false
description: "Enable persistent volume for the nfs-server-provisioner"
type: boolean
required: true
label: Persistent Volume Enabled for nfs-server-provisioner
show_subquestion_if: true
group: "Persistent Storage"
subquestions:
- variable: persistence.size
default: "20Gi"
description: "nfs-server-provisionner Persistent Volume Size"
type: string
label: nfs-server-provisionner Volume Size
- variable: persistence.storageClass
default: ""
description: "If undefined or null, uses the default StorageClass. Default to null"
type: storageclass
label: Default StorageClass for nfs-server-provisionner
The NFS Provisioner service has now been installed.
{{ if .Values.storageClass.create -}}
A storage class named '{{ .Values.storageClass.name }}' has now been created
and is available to provision dynamic volumes.
You can use this storageclass by creating a `PersistentVolumeClaim` with the
correct storageClassName attribute. For example:
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-dynamic-volume-claim
spec:
storageClassName: "{{ .Values.storageClass.name }}"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Mi
{{ else -}}
A storage class has NOT been created. You may create a custom `StorageClass`
resource with a `provisioner` attribute of `{{ template "nfs-provisioner.provisionerName" . }}`.
{{ end -}}
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "nfs-provisioner.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "nfs-provisioner.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "nfs-provisioner.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "nfs-provisioner.provisionerName" -}}
{{- if .Values.storageClass.provisionerName -}}
{{- printf .Values.storageClass.provisionerName -}}
{{- else -}}
cluster.local/{{ template "nfs-provisioner.fullname" . -}}
{{- end -}}
{{- end -}}
{{ if .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "nfs-provisioner.fullname" . }}
labels:
app: {{ template "nfs-provisioner.name" . }}
chart: {{ template "nfs-provisioner.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["services", "endpoints"]
verbs: ["get"]
- apiGroups: ["extensions"]
resources: ["podsecuritypolicies"]
resourceNames: ["nfs-provisioner"]
verbs: ["use"]
{{- end -}}
{{- if .Values.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: {{ template "nfs-provisioner.name" . }}
chart: {{ template "nfs-provisioner.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "nfs-provisioner.fullname" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "nfs-provisioner.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ template "nfs-provisioner.fullname" . }}
namespace: {{ .Release.Namespace }}
{{- end -}}
apiVersion: v1
kind: Service
metadata:
name: {{ template "nfs-provisioner.fullname" . }}
labels:
app: {{ template "nfs-provisioner.name" . }}
chart: {{ template "nfs-provisioner.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.nfsPort }}
targetPort: nfs
protocol: TCP
name: nfs
{{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nfsNodePort))) }}
nodePort: {{ .Values.service.nfsNodePort }}
{{- end }}
- port: {{ .Values.service.mountdPort }}
targetPort: mountd
protocol: TCP
name: mountd
{{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.mountdNodePort))) }}
nodePort: {{ .Values.service.mountdNodePort }}
{{- end }}
- port: {{ .Values.service.rpcbindPort }}
targetPort: rpcbind-tcp
protocol: TCP
name: rpcbind-tcp
{{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.rpcbindNodePort))) }}
nodePort: {{ .Values.service.rpcbindNodePort }}
{{- end }}
- port: {{ .Values.service.rpcbindPort }}
targetPort: rpcbind-udp
protocol: UDP
name: rpcbind-udp
{{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.rpcbindNodePort))) }}
nodePort: {{ .Values.service.rpcbindNodePort }}
{{- end }}
{{- if .Values.service.externalIPs }}
externalIPs:
{{ toYaml .Values.service.externalIPs | indent 4 }}
{{- end }}
selector:
app: {{ template "nfs-provisioner.name" . }}
release: {{ .Release.Name }}
{{- if .Values.rbac.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: {{ template "nfs-provisioner.name" . }}
chart: {{ template "nfs-provisioner.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "nfs-provisioner.fullname" . }}
{{- end -}}
apiVersion: apps/v1beta2
kind: StatefulSet
metadata:
name: {{ template "nfs-provisioner.fullname" . }}
labels:
app: {{ template "nfs-provisioner.name" . }}
chart: {{ template "nfs-provisioner.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
spec:
# TODO: Investigate how/if nfs-provisioner can be scaled out beyond 1 replica
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app: {{ template "nfs-provisioner.name" . }}
release: {{ .Release.Name }}
serviceName: {{ template "nfs-provisioner.fullname" . }}
template:
metadata:
labels:
app: {{ template "nfs-provisioner.name" . }}
chart: {{ template "nfs-provisioner.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
spec:
# NOTE: This is 10 seconds longer than the default nfs-provisioner --grace-period value of 90sec
terminationGracePeriodSeconds: 100
serviceAccountName: {{ if .Values.rbac.create }}{{ template "nfs-provisioner.fullname" . }}{{ else }}{{ .Values.rbac.serviceAccountName | quote }}{{ end }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: nfs
containerPort: 2049
protocol: TCP
- name: mountd
containerPort: 20048
protocol: TCP
- name: rpcbind-tcp
containerPort: 111
protocol: TCP
- name: rpcbind-udp
containerPort: 111
protocol: UDP
securityContext:
capabilities:
add:
- DAC_READ_SEARCH
- SYS_RESOURCE
args:
- "-provisioner={{ template "nfs-provisioner.provisionerName" . }}"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: SERVICE_NAME
value: {{ template "nfs-provisioner.fullname" . }}
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: data
mountPath: /export
resources:
{{- with .Values.resources }}
resources:
{{ toYaml . | indent 12 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}
{{- if not .Values.persistence.enabled }}
volumes:
- name: data
emptyDir: {}
{{- end }}
{{- if .Values.persistence.enabled }}
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: [ {{ .Values.persistence.accessMode | quote }} ]
{{- if .Values.persistence.storageClass }}
{{- if (eq "-" .Values.persistence.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: {{ .Values.persistence.storageClass | quote }}
{{- end }}
{{- end }}
resources:
requests:
storage: {{ .Values.persistence.size | quote }}
{{- end }}
{{ if .Values.storageClass.create -}}
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: {{ template "nfs-provisioner.fullname" . }}
labels:
app: {{ template "nfs-provisioner.name" . }}
chart: {{ template "nfs-provisioner.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
{{- if .Values.storageClass.defaultClass }}
annotations:
storageclass.kubernetes.io/is-default-class: "true"
{{- end }}
allowVolumeExpansion: {{ .Values.storageClass.allowVolumeExpansion }}
provisioner: {{ template "nfs-provisioner.provisionerName" . }}
reclaimPolicy: {{ .Values.storageClass.reclaimPolicy }}
{{ end -}}
{{- with .Values.storageClass.parameters }}
parameters:
{{ toYaml . | indent 2 }}
{{- end }}
# Default values for nfs-provisioner.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
# imagePullSecrets:
image:
repository: quay.io/kubernetes_incubator/nfs-provisioner
tag: v1.0.9
pullPolicy: IfNotPresent
service:
type: ClusterIP
nfsPort: 2049
mountdPort: 20048
rpcbindPort: 51413
# nfsNodePort:
# mountdNodePort:
# rpcbindNodePort:
externalIPs: []
persistence:
enabled: false
## Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 1Gi
## For creating the StorageClass automatically:
storageClass:
create: true
## Set a provisioner name. If unset, a name will be generated.
# provisionerName:
## Set StorageClass as the default StorageClass
## Ignored if storageClass.create is false
defaultClass: false
## Set a StorageClass name
## Ignored if storageClass.create is false
# name: nfs
## StorageClass parameters
parameters:
mountOptions: vers=4.1
## ReclaimPolicy field of the class, which can be either Delete or Retain
reclaimPolicy: Delete
## AllowVolumeExpansion shows whether the storage class allow volume expand
allowVolumeExpansion: true
## For RBAC support:
rbac:
create: true
## Ignored if rbac.create is true
##
serviceAccountName: default
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
...@@ -2,4 +2,9 @@ apiVersion: v1 ...@@ -2,4 +2,9 @@ apiVersion: v1
description: nfs-provisioner is an out-of-tree dynamic provisioner for Kubernetes. description: nfs-provisioner is an out-of-tree dynamic provisioner for Kubernetes.
name: nfs-provisioner name: nfs-provisioner
version: 0.1.2 version: 0.1.2
icon: https://wiki.amahi.org/images/a/a9/Nfs-logo.png icon: file://../nfs-logo.png
appVersion: 1.0.8
maintainers:
- name: kiall
email: kiall@macinnes.ie
home: https://github.com/kubernetes/charts/tree/master/stable/nfs-server-provisioner
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment