Unverified Commit 4dab21d8 by Denise Committed by GitHub

Merge pull request #205 from rancher/dev

updates for openebs, storageos and harness
parents 4f46758c 844b165d
apiVersion: v1
name: harness-delegate
version: 1.0.0
appVersion: 1.0.0
description: Deploys harness delegate
home: https://harness.io/
icon: https://harness.io/wp-content/uploads/2019/07/logo-black.png
keywords:
- harness.io
- harness delegate
maintainers:
- email: anshul@harness.io
name: anshul-harness
- email: puneet.saraswat@harness.io
name: puneetsar
# Harness Delegate
The [Harness Delegate](https://docs.harness.io/article/de9t8iiynt-harness-architecture) is a service that you run in your local network or VPC to connect Harness Manager with your artifact servers, infrastructure, collaboration providers, and verification providers.
## Introduction
This chart creates a [Harness Delegate](https://docs.harness.io/article/h9tkwmkrm7-delegate-installation) deployment on a [Kubernetes](http://kubernetes.io) cluster, using the [Helm](https://helm.sh) package manager.
## Installing the Chart
To add a Harness Helm repo named `harness`:
```console
$ helm repo add harness https://app.harness.io/storage/harness-download/harness-helm-charts/
```
The chart requires some account-specific information. You can download the account-specific `delegate-helm-values.yaml` file by going to Harness Manager > Setup > Harness Delegates > Download Delegate > Helm Values YAML.
To install the chart using release name `my-release` and the `delegate-helm-values.yaml`
```console
$ helm install --name my-release harness/harness-delegate -f delegate-helm-values.yaml
```
Above command deploys Harness Delegate on the Kubernetes cluster.
## Uninstalling the Chart
To uninstall/delete the `my-release` deployment:
```console
$ helm del --purge my-release
```
## Useful commands
Get pod names:
```console
kubectl get pods -n harness-delegate
```
See startup logs:
```console
kubectl logs <pod-name> -n harness-delegate -f
```
Run a shell in a pod:
```console
kubectl exec <pod-name> -n harness-delegate -it -- bash
```
# Harness Delegate
The [Harness Delegate](https://docs.harness.io/article/de9t8iiynt-harness-architecture) is a service that you run in your local network or VPC to connect Harness Manager with your artifact servers, infrastructure, collaboration providers, and verification providers.
## Introduction
This chart creates a [Harness Delegate](https://docs.harness.io/article/h9tkwmkrm7-delegate-installation) deployment on a [Kubernetes](http://kubernetes.io) cluster, using the [Helm](https://helm.sh) package manager.
categories:
- CI/CD
labels:
io.rancher.certified: partner
questions:
- variable: accountId
default: ""
description: "Account ID to which the Harness Delegate will connect"
type: string
required: true
label: Account ID
group: "Account Details"
- variable: accountSecret
default: ""
type: string
required: true
label: Account Secret
group: "Account Details"
- variable: accountIdShort
default: ""
description: "6-character identifier for the account"
type: string
required: true
label: Short Account ID
group: "Account Details"
# Delegate Configuration
- variable: delegateName
default: "harness-delegate"
description: "Name of the Harness Delegate"
type: string
required: true
label: Delegate Name
group: "Delegate Configuration"
- variable: clusterWideRbacScope
default: true
description: "Role-based access control: Set to True for cluster-wide, or False for namespace-wide"
type: boolean
required: true
label: RBAC scope
group: "Delegate Configuration"
# Advanced Server Settings
- variable: advancedOptions
default: false
label: Show Advanced Server Configurations
type: boolean
show_subquestion_if: true
group: "Advanced Server Options"
subquestions:
- variable: managerHostAndPort
default: "https://app.harness.io"
description: "URL of the Harness server"
type: string
label: Harness Server
- variable: watcherStorageUrl
default: "https://app.harness.io/storage/wingswatchers"
type: string
label: Watcher Storage URL
- variable: watcherCheckLocation
default: "watcherprod.txt"
description: "Watcher file name"
type: string
label: Watcher File
- variable: delegateStorageUrl
default: "https://app.harness.io/storage/wingsdelegates"
type: string
label: Delegate Storage URL
- variable: delegateCheckLocation
default: "delegateprod.txt"
description: "Delegate file name"
type: string
label: Delegate File
- variable: delegateProfile
default: ""
description: "ID of the Delegate profile that must run when the Delegate launches"
type: string
label: Delegate Profile ID
- variable: helmDesiredVersion
default: ""
description: "Helm version to be installed in the Delegate"
type: string
label: Helm version
# Advanced Proxy Settings
- variable: advancedProxyOptions
default: false
label: Show Advanced Proxy Configurations
type: boolean
show_subquestion_if: true
group: "Advanced Proxy Options"
subquestions:
- variable: proxyManager
default: "true"
description: "Set to True if the Harness Delegate should go through the proxy"
type: boolean
label: Proxy Manager
- variable: pollForTasks
default: "false"
description: "Set to True if the proxy does not support WebSocket Protocol (wss)"
type: boolean
label: Poll For Tasks
- variable: proxyHost
default: ""
description: "URL of the proxy host"
type: string
label: Proxy Host
- variable: proxyPort
default: ""
type: string
label: Proxy Port
- variable: proxyUser
default: ""
type: string
label: Proxy Username
- variable: proxyPassword
default: ""
type: string
label: Proxy Password
- variable: proxyScheme
default: ""
description: "Select http or https"
type: enum
label: Proxy Scheme
options:
- ""
- "http"
- "https"
- variable: noProxy
default: ""
description: "Enter a comma-separated list of suffixes for which the proxy is not required (.example.com, <specifichost>, ...). Do not insert leading wildcards"
type: string
label: No Proxy Domains
apiVersion: v1
kind: ServiceAccount
metadata:
name: harness-admin-sa
---
{{- if .Values.clusterWideRbacScope}}
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: {{ .Values.delegateName }}-cluster-admin
subjects:
- kind: ServiceAccount
name: harness-admin-sa
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
{{- else }}
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: {{ .Values.delegateName }}-admin
rules:
- apiGroups: ["", "extensions", "apps"]
resources: ["deployments", "replicasets", "pods", "configmaps", "secrets", "services"]
verbs: ["*"]
- apiGroups: ["", "extensions", "apps"]
resources: ["events", "namespaces", "resourcequotas", "limitranges"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: {{ .Values.delegateName }}-admin
subjects:
- kind: ServiceAccount
name: harness-admin-sa
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ .Values.delegateName }}-admin
{{- end}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Values.delegateName }}-configmap
data:
ACCOUNT_ID: {{ required "A valid .Values.accountId entry required!" .Values.accountId }}
ACCOUNT_SECRET: {{ required "A valid .Values.accountSecret entry required!" .Values.accountSecret }}
MANAGER_HOST_AND_PORT : {{ .Values.managerHostAndPort }}
WATCHER_STORAGE_URL: {{ .Values.watcherStorageUrl }}
WATCHER_CHECK_LOCATION: {{ .Values.watcherCheckLocation }}
DELEGATE_STORAGE_URL: {{ .Values.delegateStorageUrl }}
DELEGATE_CHECK_LOCATION: {{ .Values.delegateCheckLocation }}
DEPLOY_MODE: "KUBERNETES"
DELEGATE_NAME: {{ .Values.delegateName | quote }}
DELEGATE_PROFILE: {{ .Values.delegateProfile | quote }}
PROXY_HOST: {{ .Values.proxyHost | quote }}
PROXY_PORT: {{ .Values.proxyPort | quote }}
PROXY_SCHEME: {{ .Values.proxyScheme | quote }}
NO_PROXY: {{ .Values.noProxy | quote }}
PROXY_MANAGER: {{ .Values.proxyManager | quote }}
POLL_FOR_TASKS: {{ .Values.pollForTasks | quote }}
HELM_DESIRED_VERSION: {{ .Values.helmDesiredVersion | quote }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ .Values.delegateName }}-secret
type: Opaque
data:
# Enter base64 encoded username and password, if needed
PROXY_USER: {{ .Values.proxyUser | quote }}
PROXY_PASSWORD: {{ .Values.proxyPassword | quote }}
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
labels:
harness.io/name: {{ .Values.delegateName }}
name: {{ .Values.delegateName }}-{{ required "A valid .Values.accountIdShort entry required!" .Values.accountIdShort }}
spec:
replicas: 1
selector:
matchLabels:
harness.io/name: {{ .Values.delegateName }}
serviceName: ""
template:
metadata:
labels:
harness.io/name: {{ .Values.delegateName }}
spec:
serviceAccountName: harness-admin-sa
containers:
- image: {{ .Values.image }}
imagePullPolicy: Always
name: harness-delegate-instance
resources:
{{ toYaml .Values.resources | indent 10 }}
envFrom:
- configMapRef:
name: {{ .Values.delegateName }}-configmap
- secretRef:
name: {{ .Values.delegateName }}-secret
restartPolicy: Always
# Default values for harness-delegate chart.
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value
# These are the required values that are needed for harness-delegate
# chart deployment. You can download these account specific values by
# going to Harness > Setup > Installations page
# Account Id to which the delegate will be connecting
accountId: ""
# Account Secret
accountSecret: ""
# Short 6 character identifier of the account
accountIdShort: ""
# These are the prepopulated default values that are needed for
# harness-delegate chart deployment
# Delegate name
delegateName: harness-delegate
# If the RBAC scope is at cluster or namespace level
clusterWideRbacScope: true
# For connecting to Harness production SAAS environment, leave these
# values as it is. For on-prem installation, these will be overriden
# and you can download at Harness > Setup > Installations page
managerHostAndPort: https://app.harness.io
watcherStorageUrl: https://app.harness.io/storage/wingswatchers
watcherCheckLocation: watcherprod.txt
delegateStorageUrl: https://app.harness.io/storage/wingsdelegates
delegateCheckLocation: delegateprod.txt
# Delegate image to be deployed
image: harness/delegate:latest
# Delegate resource limits
resources:
limits:
cpu: "1"
memory: "8Gi"
requests:
cpu: "500m"
memory: "6Gi"
# If the harness delegate should go through proxy then set it to true
proxyManager: "true"
# If the proxy doesn't support web socket (wss) protocol then set it
# to true
pollForTasks: "false"
# Proxy settings if the delegate will be running behind proxy
proxyHost: ""
proxyPort: ""
proxyUser: ""
proxyPassword: ""
# Allowed values are http or https
proxyScheme: ""
# Enter a comma separated list of suffixes for which proxy is not
# required. Do not use leading wildcards (.company.com,specifichost)
# (optional):
noProxy: ""
# Id of the delegate profile that needs to run when the delegate is
# coming up
delegateProfile: ""
# Helm version to be installed in delegate
helmDesiredVersion: ""
apiVersion: v1
version: 1.0.0
version: 1.1.0
name: openebs
appVersion: 1.0.0
appVersion: 1.1.0
description: Containerized Storage for Containers
icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/openebs/icon/color/openebs-icon-color.png
home: http://www.openebs.io/
......
......@@ -40,49 +40,52 @@ The following table lists the configurable parameters of the OpenEBS chart and t
| `rbac.create` | Enable RBAC Resources | `true` |
| `image.pullPolicy` | Container pull policy | `IfNotPresent` |
| `apiserver.image` | Image for API Server | `quay.io/openebs/m-apiserver` |
| `apiserver.imageTag` | Image Tag for API Server | `1.0.0` |
| `apiserver.imageTag` | Image Tag for API Server | `1.1.0` |
| `apiserver.replicas` | Number of API Server Replicas | `1` |
| `apiserver.sparse.enabled` | Create Sparse Pool based on Sparsefile | `false` |
| `provisioner.image` | Image for Provisioner | `quay.io/openebs/openebs-k8s-provisioner` |
| `provisioner.imageTag` | Image Tag for Provisioner | `1.0.0` |
| `provisioner.imageTag` | Image Tag for Provisioner | `1.1.0` |
| `provisioner.replicas` | Number of Provisioner Replicas | `1` |
| `localProvisioner.image` | Image for localProvisioner | `quay.io/openebs/provisioner-localpv` |
| `localProvisioner.imageTag` | Image Tag for localProvisioner | `1.0.0` |
| `localProvisioner.imageTag` | Image Tag for localProvisioner | `1.1.0` |
| `localProvisioner.replicas` | Number of localProvisioner Replicas | `1` |
| `localProvisioner.basePath` | BasePath for hostPath volumes on Nodes | `/var/openebs/local` |
| `webhook.image` | Image for admision server | `quay.io/openebs/admission-server` |
| `webhook.imageTag` | Image Tag for admission server | `1.0.0` |
| `webhook.imageTag` | Image Tag for admission server | `1.1.0` |
| `webhook.replicas` | Number of admission server Replicas | `1` |
| `snapshotOperator.provisioner.image` | Image for Snapshot Provisioner | `quay.io/openebs/snapshot-provisioner` |
| `snapshotOperator.provisioner.imageTag` | Image Tag for Snapshot Provisioner | `1.0.0` |
| `snapshotOperator.provisioner.imageTag` | Image Tag for Snapshot Provisioner | `1.1.0` |
| `snapshotOperator.controller.image` | Image for Snapshot Controller | `quay.io/openebs/snapshot-controller` |
| `snapshotOperator.controller.imageTag` | Image Tag for Snapshot Controller | `1.0.0` |
| `snapshotOperator.controller.imageTag` | Image Tag for Snapshot Controller | `1.1.0` |
| `snapshotOperator.replicas` | Number of Snapshot Operator Replicas | `1` |
| `ndm.image` | Image for Node Disk Manager | `quay.io/openebs/node-disk-manager-amd64` |
| `ndm.imageTag` | Image Tag for Node Disk Manager | `v0.4.0` |
| `ndm.imageTag` | Image Tag for Node Disk Manager | `v0.4.1` |
| `ndm.sparse.path` | Directory where Sparse files are created | `/var/openebs/sparse` |
| `ndm.sparse.size` | Size of the sparse file in bytes | `10737418240` |
| `ndm.sparse.count` | Number of sparse files to be created | `1` |
| `ndm.filters.excludeVendors` | Exclude devices with specified vendor | `CLOUDBYT,OpenEBS` |
| `ndm.filters.excludePaths` | Exclude devices with specified path patterns | `loop,fd0,sr0,/dev/ram,/dev/dm-,/dev/md` |
| `ndm.filters.includePaths` | Include devices with specified path patterns | `""` |
| `ndm.filters.excludePaths` | Exclude devices with specified path patterns | `loop,fd0,sr0,/dev/ram,/dev/dm-,/dev/md` |
| `ndm.probes.enableSeachest` | Enable Seachest probe for NDM | `false` |
| `ndmOperator.image` | Image for NDM Operator | `quay.io/openebs/node-disk-operator-amd64`|
| `ndmOperator.imageTag` | Image Tag for NDM Operator | `v0.4.0` |
| `ndmOperator.imageTag` | Image Tag for NDM Operator | `v0.4.1` |
| `jiva.image` | Image for Jiva | `quay.io/openebs/jiva` |
| `jiva.imageTag` | Image Tag for Jiva | `1.0.0` |
| `jiva.imageTag` | Image Tag for Jiva | `1.1.0` |
| `jiva.replicas` | Number of Jiva Replicas | `3` |
| `cstor.pool.image` | Image for cStor Pool | `quay.io/openebs/cstor-pool` |
| `cstor.pool.imageTag` | Image Tag for cStor Pool | `1.0.0` |
| `cstor.pool.imageTag` | Image Tag for cStor Pool | `1.1.0` |
| `cstor.poolMgmt.image` | Image for cStor Pool Management | `quay.io/openebs/cstor-pool-mgmt` |
| `cstor.poolMgmt.imageTag` | Image Tag for cStor Pool Management | `1.0.0` |
| `cstor.poolMgmt.imageTag` | Image Tag for cStor Pool Management | `1.1.0` |
| `cstor.target.image` | Image for cStor Target | `quay.io/openebs/cstor-istgt` |
| `cstor.target.imageTag` | Image Tag for cStor Target | `1.0.0` |
| `cstor.target.imageTag` | Image Tag for cStor Target | `1.1.0` |
| `cstor.volumeMgmt.image` | Image for cStor Volume Management | `quay.io/openebs/cstor-volume-mgmt` |
| `cstor.volumeMgmt.imageTag` | Image Tag for cStor Volume Management | `1.0.0` |
| `cstor.volumeMgmt.imageTag` | Image Tag for cStor Volume Management | `1.1.0` |
| `policies.monitoring.image` | Image for Prometheus Exporter | `quay.io/openebs/m-exporter` |
| `policies.monitoring.imageTag` | Image Tag for Prometheus Exporter | `1.0.0` |
| `policies.monitoring.imageTag` | Image Tag for Prometheus Exporter | `1.1.0` |
| `analytics.enabled` | Enable sending stats to Google Analytics | `true` |
| `analytics.pingInterval` | Duration(hours) between sending ping stat | `24h` |
| `defaultStorageConfig.enabled` | Enable default storage class installation | `true` |
| `HealthCheck.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | | 30 |
| `HealthCheck.periodSeconds` | How often to perform the liveness probe | `60` | | 10 |
......
......@@ -18,7 +18,7 @@ questions:
type: string
label: API Server Image
- variable: apiserver.imageTag
default: "1.0.0"
default: "1.1.0"
description: "The image tag of API Server image"
type: string
label: Image Tag For OpenEBS API Server Image
......@@ -28,7 +28,7 @@ questions:
type: string
label: Provisioner Image
- variable: provisioner.imageTag
default: "1.0.0"
default: "1.1.0"
description: "The image tag of Provisioner image"
type: string
label: Image Tag For Provisioner Image
......@@ -38,7 +38,7 @@ questions:
type: string
label: Snapshot Controller Image
- variable: snapshotOperator.controller.imageTag
default: "1.0.0"
default: "1.1.0"
description: "The image tag of Snapshot Controller image"
type: string
label: Image Tag For OpenEBS Snapshot Controller Image
......@@ -48,7 +48,7 @@ questions:
type: string
label: Snapshot Provisioner Image
- variable: snapshotOperator.provisioner.imageTag
default: "1.0.0"
default: "1.1.0"
description: "The image tag of Snapshot Provisioner image"
type: string
label: Image Tag For OpenEBS Snapshot Provisioner Image
......@@ -58,7 +58,7 @@ questions:
type: string
label: Node Disk Manager Image
- variable: ndm.imageTag
default: "v0.4.0"
default: "v0.4.1"
description: "The image tag of NDM image"
type: string
label: Image Tag For Node Disk Manager Image
......@@ -68,7 +68,7 @@ questions:
type: string
label: Node Disk Operator Image
- variable: ndo.imageTag
default: "v0.4.0"
default: "v0.4.1"
description: "The image tag of NDO image"
type: string
label: Image Tag For Node Disk Manager Image
......@@ -78,7 +78,7 @@ questions:
type: string
label: Jiva Storage Engine Image
- variable: jiva.imageTag
default: "1.0.0"
default: "1.1.0"
description: "The image tag of Jiva image"
type: string
label: Image Tag For OpenEBS Jiva Storage Engine Image
......@@ -88,7 +88,7 @@ questions:
type: string
label: cStor Storage Engine Pool Image
- variable: cstor.pool.imageTag
default: "1.0.0"
default: "1.1.0"
description: "The image tag of cStor Storage Engine Pool image"
type: string
label: Image Tag For OpenEBS cStor Storage Engine Pool Image
......@@ -98,7 +98,7 @@ questions:
type: string
label: cStor Storage Engine Pool Management Image
- variable: cstor.poolMgmt.imageTag
default: "1.0.0"
default: "1.1.0"
description: "The image tag of cStor Storage Engine Pool Management image"
type: string
label: Image Tag For OpenEBS cStor Storage Engine Pool Management Image
......@@ -108,7 +108,7 @@ questions:
type: string
label: cStor Storage Engine Target Image
- variable: cstor.target.imageTag
default: "1.0.0"
default: "1.1.0"
description: "The image tag of cStor Storage Engine Target image"
type: string
label: Image Tag For OpenEBS cStor Storage Engine Target Image
......@@ -118,7 +118,7 @@ questions:
type: string
label: cStor Storage Engine Target Management Image
- variable: cstor.volumeMgmt.imageTag
default: "1.0.0"
default: "1.1.0"
description: "The image tag of cStor Storage Engine Target Management image"
type: string
label: Image Tag For OpenEBS cStor Storage Engine Target Management Image
......@@ -129,7 +129,7 @@ questions:
label: Monitoring Exporter Image
show_if: "policies.monitoring.enabled=true&&defaultImage=false"
- variable: policies.monitoring.imageTag
default: "1.0.0"
default: "1.1.0"
description: "The image tag of OpenEBS Exporter"
type: string
label: Image Tag For OpenEBS Exporter Image
......
......@@ -23,7 +23,7 @@ rules:
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: [ "get", "list", "create", "update", "delete"]
verbs: [ "get", "list", "create", "update", "delete", "patch"]
- apiGroups: ["*"]
resources: [ "disks", "blockdevices", "blockdeviceclaims"]
verbs: ["*" ]
......@@ -34,11 +34,14 @@ rules:
resources: [ "castemplates", "runtasks"]
verbs: ["*" ]
- apiGroups: ["*"]
resources: [ "cstorpools", "cstorpools/finalizers", "cstorvolumereplicas", "cstorvolumes"]
resources: [ "cstorpools", "cstorpools/finalizers", "cstorvolumereplicas", "cstorvolumes", "cstorvolumeclaims"]
verbs: ["*" ]
- apiGroups: ["*"]
resources: [ "cstorbackups", "cstorrestores", "cstorcompletedbackups"]
verbs: ["*" ]
- apiGroups: ["*"]
resources: [ "upgradetasks"]
verbs: ["*" ]
- nonResourceURLs: ["/metrics"]
verbs: ["get"]
{{- end }}
......@@ -10,6 +10,7 @@ metadata:
component: apiserver
name: maya-apiserver
openebs.io/component-name: maya-apiserver
openebs.io/version: {{ .Values.release.version }}
spec:
replicas: {{ .Values.apiserver.replicas }}
selector:
......@@ -51,6 +52,10 @@ spec:
value: "{{ .Values.apiserver.sparse.enabled }}"
- name: OPENEBS_IO_CSTOR_POOL_SPARSE_DIR
value: "{{ .Values.ndm.sparse.path }}"
- name: OPENEBS_IO_CREATE_DEFAULT_STORAGE_CONFIG
value: "{{ .Values.defaultStorageConfig.enabled }}"
- name: OPENEBS_IO_CSTOR_TARGET_DIR
value: "{{ .Values.ndm.sparse.path }}"
# OPENEBS_NAMESPACE provides the namespace of this deployment as an
# environment variable
- name: OPENEBS_NAMESPACE
......
......@@ -14,8 +14,11 @@ metadata:
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: admission-webhook
openebs.io/component-name: admission-webhook
webhooks:
# failurePolicy Fail means that an error calling the webhook causes the admission to fail.
- name: admission-webhook.openebs.io
failurePolicy: Fail
clientConfig:
service:
name: admission-server-svc
......@@ -42,6 +45,7 @@ metadata:
chart: {{ template "openebs.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
openebs.io/component-name: admission-webhook
type: Opaque
data:
{{- if .Values.webhook.generateTLS }}
......
......@@ -12,14 +12,14 @@ serviceAccount:
release:
# "openebs.io/version" label for control plane components
version: "1.0.0"
version: "1.1.0"
image:
pullPolicy: IfNotPresent
apiserver:
image: "quay.io/openebs/m-apiserver"
imageTag: "1.0.0"
imageTag: "1.1.0"
replicas: 1
ports:
externalPort: 5656
......@@ -33,9 +33,12 @@ apiserver:
initialDelaySeconds: 30
periodSeconds: 60
defaultStorageConfig:
enabled: "true"
provisioner:
image: "quay.io/openebs/openebs-k8s-provisioner"
imageTag: "1.0.0"
imageTag: "1.1.0"
replicas: 1
nodeSelector: {}
tolerations: []
......@@ -46,7 +49,7 @@ provisioner:
localprovisioner:
image: "quay.io/openebs/provisioner-localpv"
imageTag: "1.0.0"
imageTag: "1.1.0"
helperImage: "quay.io/openebs/openebs-tools"
helperImageTag: "3.8"
replicas: 1
......@@ -61,10 +64,10 @@ localprovisioner:
snapshotOperator:
controller:
image: "quay.io/openebs/snapshot-controller"
imageTag: "1.0.0"
imageTag: "1.1.0"
provisioner:
image: "quay.io/openebs/snapshot-provisioner"
imageTag: "1.0.0"
imageTag: "1.1.0"
replicas: 1
upgradeStrategy: "Recreate"
nodeSelector: {}
......@@ -76,7 +79,7 @@ snapshotOperator:
ndm:
image: "quay.io/openebs/node-disk-manager-amd64"
imageTag: "v0.4.0"
imageTag: "v0.4.1"
sparse:
path: "/var/openebs/sparse"
size: "10737418240"
......@@ -85,6 +88,8 @@ ndm:
excludeVendors: "CLOUDBYT,OpenEBS"
includePaths: ""
excludePaths: "loop,fd0,sr0,/dev/ram,/dev/dm-,/dev/md"
probes:
enableSeachest: false
nodeSelector: {}
healthCheck:
initialDelaySeconds: 30
......@@ -92,7 +97,7 @@ ndm:
ndmOperator:
image: "quay.io/openebs/node-disk-operator-amd64"
imageTag: "v0.4.0"
imageTag: "v0.4.1"
replicas: 1
upgradeStrategy: Recreate
nodeSelector: {}
......@@ -106,7 +111,7 @@ ndmOperator:
webhook:
image: "quay.io/openebs/admission-server"
imageTag: "1.0.0"
imageTag: "1.1.0"
generateTLS: true
replicas: 1
nodeSelector: {}
......@@ -115,28 +120,28 @@ webhook:
jiva:
image: "quay.io/openebs/jiva"
imageTag: "1.0.0"
imageTag: "1.1.0"
replicas: 3
cstor:
pool:
image: "quay.io/openebs/cstor-pool"
imageTag: "1.0.0"
imageTag: "1.1.0"
poolMgmt:
image: "quay.io/openebs/cstor-pool-mgmt"
imageTag: "1.0.0"
imageTag: "1.1.0"
target:
image: "quay.io/openebs/cstor-istgt"
imageTag: "1.0.0"
imageTag: "1.1.0"
volumeMgmt:
image: "quay.io/openebs/cstor-volume-mgmt"
imageTag: "1.0.0"
imageTag: "1.1.0"
policies:
monitoring:
enabled: true
image: "quay.io/openebs/m-exporter"
imageTag: "1.0.0"
imageTag: "1.1.0"
analytics:
enabled: true
......
apiVersion: v1
version: 1.0.0
name: openebs
appVersion: 1.0.0
description: Containerized Storage for Containers
icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/openebs/icon/color/openebs-icon-color.png
home: http://www.openebs.io/
keywords:
- cloud-native-storage
- block-storage
- iSCSI
- storage
sources:
- https://github.com/openebs/openebs
maintainers:
- name: kmova
email: kiran.mova@openebs.io
- name: prateekpandey14
email: prateek.pandey@openebs.io
OpenEBS
=======
[OpenEBS](https://github.com/openebs/openebs) is an open source storage platform that provides persistent and containerized block storage for DevOps and container environments.
OpenEBS can be deployed on any Kubernetes cluster - either in cloud, on-premise or developer laptop (minikube). OpenEBS itself is deployed as just another container on your cluster, and enables storage services that can be designated on a per pod, application, cluster or container level.
Introduction
------------
This chart bootstraps OpenEBS deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Kubernetes 1.10+ with RBAC enabled
- iSCSI PV support in the underlying infrastructure
## Installing OpenEBS
```
helm install --namespace openebs stable/openebs
```
## Installing OpenEBS with the release name `my-release`:
```
helm install --name `my-release` --namespace openebs stable/openebs
```
## To uninstall/delete the `my-release` deployment:
```
helm ls --all
helm delete `my-release`
```
## Configuration
The following table lists the configurable parameters of the OpenEBS chart and their default values.
| Parameter | Description | Default |
| ----------------------------------------| --------------------------------------------- | ----------------------------------------- |
| `rbac.create` | Enable RBAC Resources | `true` |
| `image.pullPolicy` | Container pull policy | `IfNotPresent` |
| `apiserver.image` | Image for API Server | `quay.io/openebs/m-apiserver` |
| `apiserver.imageTag` | Image Tag for API Server | `1.0.0` |
| `apiserver.replicas` | Number of API Server Replicas | `1` |
| `apiserver.sparse.enabled` | Create Sparse Pool based on Sparsefile | `false` |
| `provisioner.image` | Image for Provisioner | `quay.io/openebs/openebs-k8s-provisioner` |
| `provisioner.imageTag` | Image Tag for Provisioner | `1.0.0` |
| `provisioner.replicas` | Number of Provisioner Replicas | `1` |
| `localProvisioner.image` | Image for localProvisioner | `quay.io/openebs/provisioner-localpv` |
| `localProvisioner.imageTag` | Image Tag for localProvisioner | `1.0.0` |
| `localProvisioner.replicas` | Number of localProvisioner Replicas | `1` |
| `localProvisioner.basePath` | BasePath for hostPath volumes on Nodes | `/var/openebs/local` |
| `webhook.image` | Image for admision server | `quay.io/openebs/admission-server` |
| `webhook.imageTag` | Image Tag for admission server | `1.0.0` |
| `webhook.replicas` | Number of admission server Replicas | `1` |
| `snapshotOperator.provisioner.image` | Image for Snapshot Provisioner | `quay.io/openebs/snapshot-provisioner` |
| `snapshotOperator.provisioner.imageTag` | Image Tag for Snapshot Provisioner | `1.0.0` |
| `snapshotOperator.controller.image` | Image for Snapshot Controller | `quay.io/openebs/snapshot-controller` |
| `snapshotOperator.controller.imageTag` | Image Tag for Snapshot Controller | `1.0.0` |
| `snapshotOperator.replicas` | Number of Snapshot Operator Replicas | `1` |
| `ndm.image` | Image for Node Disk Manager | `quay.io/openebs/node-disk-manager-amd64` |
| `ndm.imageTag` | Image Tag for Node Disk Manager | `v0.4.0` |
| `ndm.sparse.path` | Directory where Sparse files are created | `/var/openebs/sparse` |
| `ndm.sparse.size` | Size of the sparse file in bytes | `10737418240` |
| `ndm.sparse.count` | Number of sparse files to be created | `1` |
| `ndm.filters.excludeVendors` | Exclude devices with specified vendor | `CLOUDBYT,OpenEBS` |
| `ndm.filters.excludePaths` | Exclude devices with specified path patterns | `loop,fd0,sr0,/dev/ram,/dev/dm-,/dev/md` |
| `ndm.filters.includePaths` | Include devices with specified path patterns | `""` |
| `ndmOperator.image` | Image for NDM Operator | `quay.io/openebs/node-disk-operator-amd64`|
| `ndmOperator.imageTag` | Image Tag for NDM Operator | `v0.4.0` |
| `jiva.image` | Image for Jiva | `quay.io/openebs/jiva` |
| `jiva.imageTag` | Image Tag for Jiva | `1.0.0` |
| `jiva.replicas` | Number of Jiva Replicas | `3` |
| `cstor.pool.image` | Image for cStor Pool | `quay.io/openebs/cstor-pool` |
| `cstor.pool.imageTag` | Image Tag for cStor Pool | `1.0.0` |
| `cstor.poolMgmt.image` | Image for cStor Pool Management | `quay.io/openebs/cstor-pool-mgmt` |
| `cstor.poolMgmt.imageTag` | Image Tag for cStor Pool Management | `1.0.0` |
| `cstor.target.image` | Image for cStor Target | `quay.io/openebs/cstor-istgt` |
| `cstor.target.imageTag` | Image Tag for cStor Target | `1.0.0` |
| `cstor.volumeMgmt.image` | Image for cStor Volume Management | `quay.io/openebs/cstor-volume-mgmt` |
| `cstor.volumeMgmt.imageTag` | Image Tag for cStor Volume Management | `1.0.0` |
| `policies.monitoring.image` | Image for Prometheus Exporter | `quay.io/openebs/m-exporter` |
| `policies.monitoring.imageTag` | Image Tag for Prometheus Exporter | `1.0.0` |
| `analytics.enabled` | Enable sending stats to Google Analytics | `true` |
| `analytics.pingInterval` | Duration(hours) between sending ping stat | `24h` |
| `HealthCheck.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | | 30 |
| `HealthCheck.periodSeconds` | How often to perform the liveness probe | `60` | | 10 |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`.
Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
```shell
helm install --name `my-release` -f values.yaml stable/openebs
```
> **Tip**: You can use the default [values.yaml](values.yaml)
# OpenEBS
OpenEBS is an open source storage platform that provides persistent container attached, cloud-native block storage for DevOps and for Kubernetes environments.
OpenEBS allows you to treat your persistent workload containers, such as DBs on containers, just like other containers. OpenEBS itself is deployed as just another container on your host and enables storage services that can be designated on a per pod, application, cluster or container level, including:
- Data persistence across nodes, dramatically reducing time spent rebuilding Cassandra rings for example.
- Synchronization of data across availability zones and cloud providers.
- Use of commodity hardware plus a container engine to deliver so called container attached block storage.
- Integration with Kubernetes, so developer and application intent flows into OpenEBS configurations automatically.
- Management of tiering to and from S3 and other targets.
categories:
- storage
namespace: openebs
labels:
io.rancher.certified: partner
questions:
- variable: defaultImage
default: "true"
description: "Use default OpenEBS images"
label: Use Default Image
type: boolean
show_subquestion_if: false
group: "Container Images"
subquestions:
- variable: apiserver.image
default: "quay.io/openebs/m-apiserver"
description: "Default API Server image for OpenEBS"
type: string
label: API Server Image
- variable: apiserver.imageTag
default: "1.0.0"
description: "The image tag of API Server image"
type: string
label: Image Tag For OpenEBS API Server Image
- variable: provisioner.image
default: "quay.io/openebs/openebs-k8s-provisioner"
description: "Default K8s Provisioner image for OpenEBS"
type: string
label: Provisioner Image
- variable: provisioner.imageTag
default: "1.0.0"
description: "The image tag of Provisioner image"
type: string
label: Image Tag For Provisioner Image
- variable: snapshotOperator.controller.image
default: "quay.io/openebs/snapshot-controller"
description: "Default Snapshot Controller image for OpenEBS"
type: string
label: Snapshot Controller Image
- variable: snapshotOperator.controller.imageTag
default: "1.0.0"
description: "The image tag of Snapshot Controller image"
type: string
label: Image Tag For OpenEBS Snapshot Controller Image
- variable: snapshotOperator.provisioner.image
default: "quay.io/openebs/snapshot-provisioner"
description: "Default Snapshot Provisioner image for OpenEBS"
type: string
label: Snapshot Provisioner Image
- variable: snapshotOperator.provisioner.imageTag
default: "1.0.0"
description: "The image tag of Snapshot Provisioner image"
type: string
label: Image Tag For OpenEBS Snapshot Provisioner Image
- variable: ndm.image
default: "quay.io/openebs/node-disk-manager-amd64"
description: "Default NDM image"
type: string
label: Node Disk Manager Image
- variable: ndm.imageTag
default: "v0.4.0"
description: "The image tag of NDM image"
type: string
label: Image Tag For Node Disk Manager Image
- variable: ndo.image
default: "quay.io/openebs/node-disk-operator-amd64"
description: "Default NDO image"
type: string
label: Node Disk Operator Image
- variable: ndo.imageTag
default: "v0.4.0"
description: "The image tag of NDO image"
type: string
label: Image Tag For Node Disk Manager Image
- variable: jiva.image
default: "quay.io/openebs/jiva"
description: "Default Jiva Storage Engine image for OpenEBS"
type: string
label: Jiva Storage Engine Image
- variable: jiva.imageTag
default: "1.0.0"
description: "The image tag of Jiva image"
type: string
label: Image Tag For OpenEBS Jiva Storage Engine Image
- variable: cstor.pool.image
default: "quay.io/openebs/cstor-pool"
description: "Default cStor Storage Engine Pool image for OpenEBS"
type: string
label: cStor Storage Engine Pool Image
- variable: cstor.pool.imageTag
default: "1.0.0"
description: "The image tag of cStor Storage Engine Pool image"
type: string
label: Image Tag For OpenEBS cStor Storage Engine Pool Image
- variable: cstor.poolMgmt.image
default: "quay.io/openebs/cstor-pool-mgmt"
description: "Default cStor Storage Engine Pool Management image for OpenEBS"
type: string
label: cStor Storage Engine Pool Management Image
- variable: cstor.poolMgmt.imageTag
default: "1.0.0"
description: "The image tag of cStor Storage Engine Pool Management image"
type: string
label: Image Tag For OpenEBS cStor Storage Engine Pool Management Image
- variable: cstor.target.image
default: "quay.io/openebs/cstor-istgt"
description: "Default cStor Storage Engine Target image for OpenEBS"
type: string
label: cStor Storage Engine Target Image
- variable: cstor.target.imageTag
default: "1.0.0"
description: "The image tag of cStor Storage Engine Target image"
type: string
label: Image Tag For OpenEBS cStor Storage Engine Target Image
- variable: cstor.volumeMgmt.image
default: "quay.io/openebs/cstor-volume-mgmt"
description: "Default cStor Storage Engine Target Management image for OpenEBS"
type: string
label: cStor Storage Engine Target Management Image
- variable: cstor.volumeMgmt.imageTag
default: "1.0.0"
description: "The image tag of cStor Storage Engine Target Management image"
type: string
label: Image Tag For OpenEBS cStor Storage Engine Target Management Image
- variable: policies.monitoring.image
default: "quay.io/openebs/m-exporter"
description: "Default OpeneEBS Volume and pool Exporter image"
type: string
label: Monitoring Exporter Image
show_if: "policies.monitoring.enabled=true&&defaultImage=false"
- variable: policies.monitoring.imageTag
default: "1.0.0"
description: "The image tag of OpenEBS Exporter"
type: string
label: Image Tag For OpenEBS Exporter Image
show_if: "policies.monitoring.enabled=true&&defaultImage=false"
- variable: ndm.filters.excludeVendors
default: 'CLOUDBYT\,OpenEBS'
type: string
description: "Configure NDM to filter disks from following vendors"
label: Filter Disks belonging to vendors
group: "NDM Disk Filter by Vendor "
- variable: ndm.filters.excludePaths
default: 'loop\,fd0\,sr0\,/dev/ram\,/dev/dm-\,/dev/md'
type: string
description: "Configure NDM to filter disks from following paths"
label: Filter Disks belonging to paths
group: "NDM Disk Filter by Path"
- variable: ndm.sparse.enabled
default: "true"
description: "Create a cStor Pool on Sparse Disks"
label: Create cStor Pool on Sprase Disks
type: boolean
show_subquestion_if: true
group: "NDM Sparse Disk Settings"
subquestions:
- variable: ndm.sparse.size
default: "10737418240"
description: "Default Size of Sparse Disk"
type: string
label: Sparse Disk Size in bytes
- variable: ndm.sparse.count
default: "1"
description: "Number of Sparse Disks"
type: string
label: Number of Sparse Disks
- variable: ndm.sparse.path
default: "/var/openebs/sparse"
description: "Directory where Sparse Disks should be created"
type: string
label: Directory for Sparse Disks
- variable: defaultPorts
default: "true"
description: "Use default Communication Ports"
label: Use Default Ports
type: boolean
show_subquestion_if: false
group: "Communication Ports"
subquestions:
- variable: apiserver.ports.externalPort
default: 5656
description: "Default External Port for OpenEBS API Server"
type: int
min: 0
max: 9999
label: OpenEBS API Server External Port
- variable: apiserver.ports.internalPort
default: 5656
description: "Default Internal Port for OpenEBS API Server"
type: int
min: 0
max: 9999
label: OpenEBS API Server Internal Port
- variable: policies.monitoring.enabled
default: true
description: "Enable prometheus monitoring"
type: boolean
label: Enable Prometheus Monitoring
group: "Monitoring Settings"
- variable: analytics.enabled
default: true
description: "Enable sending anonymous statistics to OpenEBS Google Analytics"
type: boolean
label: Enable updating OpenEBS with usage details
group: "Anonymous Analytics"
The OpenEBS has been installed. Check its status by running:
$ kubectl get pods -n {{ .Release.Namespace }}
For dynamically creating OpenEBS Volumes, you can either create a new StorageClass or
use one of the default storage classes provided by OpenEBS.
Use `kubectl get sc` to see the list of installed OpenEBS StorageClasses. A sample
PVC spec using `openebs-jiva-default` StorageClass is given below:"
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: demo-vol-claim
spec:
storageClassName: openebs-jiva-default
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5G
---
For more information, please visit http://docs.openebs.io/.
Please note that, OpenEBS uses iSCSI for connecting applications with the
OpenEBS Volumes and your nodes should have the iSCSI initiator installed.
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "openebs.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "openebs.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "openebs.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "openebs.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "openebs.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{- if .Values.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "openebs.fullname" . }}
labels:
app: {{ template "openebs.name" . }}
chart: {{ template "openebs.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
rules:
- apiGroups: ["*"]
resources: ["nodes", "nodes/proxy"]
verbs: ["*"]
- apiGroups: ["*"]
resources: ["namespaces", "services", "pods", "deployments", "events", "endpoints", "configmaps", "jobs"]
verbs: ["*"]
- apiGroups: ["*"]
resources: ["storageclasses", "persistentvolumeclaims", "persistentvolumes"]
verbs: ["*"]
- apiGroups: ["volumesnapshot.external-storage.k8s.io"]
resources: ["volumesnapshots", "volumesnapshotdatas"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: [ "get", "list", "create", "update", "delete"]
- apiGroups: ["*"]
resources: [ "disks", "blockdevices", "blockdeviceclaims"]
verbs: ["*" ]
- apiGroups: ["*"]
resources: [ "storagepoolclaims", "storagepoolclaims/finalizers","storagepools"]
verbs: ["*" ]
- apiGroups: ["*"]
resources: [ "castemplates", "runtasks"]
verbs: ["*" ]
- apiGroups: ["*"]
resources: [ "cstorpools", "cstorpools/finalizers", "cstorvolumereplicas", "cstorvolumes"]
verbs: ["*" ]
- apiGroups: ["*"]
resources: [ "cstorbackups", "cstorrestores", "cstorcompletedbackups"]
verbs: ["*" ]
- nonResourceURLs: ["/metrics"]
verbs: ["get"]
{{- end }}
{{- if .Values.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "openebs.fullname" . }}
labels:
app: {{ template "openebs.name" . }}
chart: {{ template "openebs.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "openebs.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ template "openebs.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end }}
# This is the node-disk-manager related config.
# It can be used to customize the disks probes and filters
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "openebs.fullname" . }}-ndm-config
labels:
app: {{ template "openebs.name" . }}
chart: {{ template "openebs.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: ndm-config
data:
# udev-probe is default or primary probe which should be enabled to run ndm
# filterconfigs contails configs of filters - in ther form fo include
# and exclude comma separated strings
node-disk-manager.config: |
probeconfigs:
- key: udev-probe
name: udev probe
state: true
- key: seachest-probe
name: seachest probe
state: true
- key: smart-probe
name: smart probe
state: true
filterconfigs:
- key: os-disk-exclude-filter
name: os disk exclude filter
state: true
exclude: "/,/etc/hosts,/boot"
- key: vendor-filter
name: vendor filter
state: true
include: ""
exclude: "{{ .Values.ndm.filters.excludeVendors }}"
- key: path-filter
name: path filter
state: true
include: "{{ .Values.ndm.filters.includePaths }}"
exclude: "{{ .Values.ndm.filters.excludePaths }}"
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: {{ template "openebs.fullname" . }}-ndm
labels:
app: {{ template "openebs.name" . }}
chart: {{ template "openebs.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: ndm
spec:
updateStrategy:
type: "RollingUpdate"
selector:
matchLabels:
app: {{ template "openebs.name" . }}
release: {{ .Release.Name }}
component: ndm
template:
metadata:
labels:
app: {{ template "openebs.name" . }}
release: {{ .Release.Name }}
component: ndm
openebs.io/component-name: ndm
name: openebs-ndm
openebs.io/version: {{ .Values.release.version }}
spec:
serviceAccountName: {{ template "openebs.serviceAccountName" . }}
hostNetwork: true
containers:
- name: {{ template "openebs.name" . }}-ndm
image: "{{ .Values.ndm.image }}:{{ .Values.ndm.imageTag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
privileged: true
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
# pass hostname as env variable using downward API to the NDM container
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
{{- if .Values.ndm.sparse }}
{{- if .Values.ndm.sparse.path }}
# specify the directory where the sparse files need to be created.
# if not specified, then sparse files will not be created.
- name: SPARSE_FILE_DIR
value: "{{ .Values.ndm.sparse.path }}"
{{- end }}
{{- if .Values.ndm.sparse.size }}
# Size(bytes) of the sparse file to be created.
- name: SPARSE_FILE_SIZE
value: "{{ .Values.ndm.sparse.size }}"
{{- end }}
{{- if .Values.ndm.sparse.count }}
# Specify the number of sparse files to be created
- name: SPARSE_FILE_COUNT
value: "{{ .Values.ndm.sparse.count }}"
{{- end }}
{{- end }}
livenessProbe:
exec:
command:
- pgrep
- ".*ndm"
initialDelaySeconds: {{ .Values.ndm.healthCheck.initialDelaySeconds }}
periodSeconds: {{ .Values.ndm.healthCheck.periodSeconds }}
volumeMounts:
- name: config
mountPath: /host/node-disk-manager.config
subPath: node-disk-manager.config
readOnly: true
- name: udev
mountPath: /run/udev
- name: procmount
mountPath: /host/proc
readOnly: true
{{- if .Values.ndm.sparse }}
{{- if .Values.ndm.sparse.path }}
- name: sparsepath
mountPath: {{ .Values.ndm.sparse.path }}
{{- end }}
{{- end }}
volumes:
- name: config
configMap:
name: {{ template "openebs.fullname" . }}-ndm-config
- name: udev
hostPath:
path: /run/udev
type: Directory
# mount /proc (to access mount file of process 1 of host) inside container
# to read mount-point of disks and partitions
- name: procmount
hostPath:
path: /proc
type: Directory
{{- if .Values.ndm.sparse }}
{{- if .Values.ndm.sparse.path }}
- name: sparsepath
hostPath:
path: {{ .Values.ndm.sparse.path }}
{{- end }}
{{- end }}
# By default the node-disk-manager will be run on all kubernetes nodes
# If you would like to limit this to only some nodes, say the nodes
# that have storage attached, you could label those node and use
# nodeSelector.
#
# e.g. label the storage nodes with - "openebs.io/nodegroup"="storage-node"
# kubectl label node <node-name> "openebs.io/nodegroup"="storage-node"
#nodeSelector:
# "openebs.io/nodegroup": "storage-node"
{{- if .Values.ndm.nodeSelector }}
nodeSelector:
{{ toYaml .Values.ndm.nodeSelector | indent 8 }}
{{- end }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "openebs.fullname" . }}-admission-server
labels:
app: admission-webhook
chart: {{ template "openebs.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: admission-webhook
spec:
replicas: {{ .Values.webhook.replicas }}
selector:
matchLabels:
app: admission-webhook
template:
metadata:
labels:
app: admission-webhook
name: admission-webhook
openebs.io/version: {{ .Values.release.version }}
openebs.io/component-name: admission-webhook
spec:
{{- if .Values.webhook.nodeSelector }}
nodeSelector:
{{ toYaml .Values.webhook.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.webhook.tolerations }}
tolerations:
{{ toYaml .Values.webhook.tolerations | indent 8 }}
{{- end }}
{{- if .Values.webhook.affinity }}
affinity:
{{ toYaml .Values.webhook.affinity | indent 8 }}
{{- end }}
serviceAccountName: {{ template "openebs.serviceAccountName" . }}
containers:
- name: admission-webhook
image: "{{ .Values.webhook.image }}:{{ .Values.webhook.imageTag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- -tlsCertFile=/etc/webhook/certs/cert.pem
- -tlsKeyFile=/etc/webhook/certs/key.pem
- -alsologtostderr
- -v=8
- 2>&1
volumeMounts:
- name: webhook-certs
mountPath: /etc/webhook/certs
readOnly: true
volumes:
- name: webhook-certs
secret:
secretName: admission-server-certs
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "openebs.fullname" . }}-localpv-provisioner
labels:
app: {{ template "openebs.name" . }}
chart: {{ template "openebs.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: localpv-provisioner
openebs.io/component-name: openebs-localpv-provisioner
spec:
replicas: {{ .Values.provisioner.replicas }}
selector:
matchLabels:
app: {{ template "openebs.name" . }}
release: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ template "openebs.name" . }}
release: {{ .Release.Name }}
component: localpv-provisioner
name: openebs-localpv-provisioner
openebs.io/component-name: openebs-localpv-provisioner
openebs.io/version: {{ .Values.release.version }}
spec:
serviceAccountName: {{ template "openebs.serviceAccountName" . }}
containers:
- name: {{ template "openebs.name" . }}-localpv-provisioner
image: "{{ .Values.localprovisioner.image }}:{{ .Values.localprovisioner.imageTag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
# OPENEBS_IO_K8S_MASTER enables openebs provisioner to connect to K8s
# based on this address. This is ignored if empty.
# This is supported for openebs provisioner version 0.5.2 onwards
#- name: OPENEBS_IO_K8S_MASTER
# value: "http://10.128.0.12:8080"
# OPENEBS_IO_KUBE_CONFIG enables openebs provisioner to connect to K8s
# based on this config. This is ignored if empty.
# This is supported for openebs provisioner version 0.5.2 onwards
#- name: OPENEBS_IO_KUBE_CONFIG
# value: "/home/ubuntu/.kube/config"
# OPENEBS_NAMESPACE is the namespace that this provisioner will
# lookup to find maya api service
- name: OPENEBS_NAMESPACE
value: "{{ .Release.Namespace }}"
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# OPENEBS_IO_BASE_PATH is the environment variable that provides the
# default base path on the node where host-path PVs will be provisioned.
- name: OPENEBS_IO_ENABLE_ANALYTICS
value: "{{ .Values.analytics.enabled }}"
- name: OPENEBS_IO_BASE_PATH
value: "{{ .Values.localprovisioner.basePath }}"
- name: OPENEBS_IO_HELPER_IMAGE
value: "{{ .Values.localprovisioner.helperImage }}:{{ .Values.localprovisioner.helperImageTag }}"
- name: OPENEBS_IO_INSTALLER_TYPE
value: "charts-helm"
livenessProbe:
exec:
command:
- pgrep
- ".*localpv"
initialDelaySeconds: {{ .Values.localprovisioner.healthCheck.initialDelaySeconds }}
periodSeconds: {{ .Values.localprovisioner.healthCheck.periodSeconds }}
{{- if .Values.localprovisioner.nodeSelector }}
nodeSelector:
{{ toYaml .Values.localprovisioner.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.localprovisioner.tolerations }}
tolerations:
{{ toYaml .Values.localprovisioner.tolerations | indent 8 }}
{{- end }}
{{- if .Values.localprovisioner.affinity }}
affinity:
{{ toYaml .Values.localprovisioner.affinity | indent 8 }}
{{- end }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "openebs.fullname" . }}-apiserver
labels:
app: {{ template "openebs.name" . }}
chart: {{ template "openebs.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: apiserver
name: maya-apiserver
openebs.io/component-name: maya-apiserver
spec:
replicas: {{ .Values.apiserver.replicas }}
selector:
matchLabels:
app: {{ template "openebs.name" . }}
release: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ template "openebs.name" . }}
release: {{ .Release.Name }}
component: apiserver
name: maya-apiserver
openebs.io/component-name: maya-apiserver
openebs.io/version: {{ .Values.release.version }}
spec:
serviceAccountName: {{ template "openebs.serviceAccountName" . }}
containers:
- name: {{ template "openebs.name" . }}-apiserver
image: "{{ .Values.apiserver.image }}:{{ .Values.apiserver.imageTag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- containerPort: {{ .Values.apiserver.ports.internalPort }}
env:
# OPENEBS_IO_KUBE_CONFIG enables maya api service to connect to K8s
# based on this config. This is ignored if empty.
# This is supported for maya api server version 0.5.2 onwards
#- name: OPENEBS_IO_KUBE_CONFIG
# value: "/home/ubuntu/.kube/config"
# OPENEBS_IO_K8S_MASTER enables maya api service to connect to K8s
# based on this address. This is ignored if empty.
# This is supported for maya api server version 0.5.2 onwards
#- name: OPENEBS_IO_K8S_MASTER
# value: "http://172.28.128.3:8080"
# OPENEBS_IO_INSTALL_DEFAULT_CSTOR_SPARSE_POOL decides whether default cstor sparse pool should be
# configured as a part of openebs installation.
# If "true" a default cstor sparse pool will be configured, if "false" it will not be configured.
- name: OPENEBS_IO_INSTALL_DEFAULT_CSTOR_SPARSE_POOL
value: "{{ .Values.apiserver.sparse.enabled }}"
- name: OPENEBS_IO_CSTOR_POOL_SPARSE_DIR
value: "{{ .Values.ndm.sparse.path }}"
# OPENEBS_NAMESPACE provides the namespace of this deployment as an
# environment variable
- name: OPENEBS_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
# OPENEBS_SERVICE_ACCOUNT provides the service account of this pod as
# environment variable
- name: OPENEBS_SERVICE_ACCOUNT
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
# OPENEBS_MAYA_POD_NAME provides the name of this pod as
# environment variable
- name: OPENEBS_MAYA_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: OPENEBS_IO_JIVA_CONTROLLER_IMAGE
value: "{{ .Values.jiva.image }}:{{ .Values.jiva.imageTag }}"
- name: OPENEBS_IO_JIVA_REPLICA_IMAGE
value: "{{ .Values.jiva.image }}:{{ .Values.jiva.imageTag }}"
- name: OPENEBS_IO_JIVA_REPLICA_COUNT
value: "{{ .Values.jiva.replicas }}"
- name: OPENEBS_IO_CSTOR_TARGET_IMAGE
value: "{{ .Values.cstor.target.image }}:{{ .Values.cstor.target.imageTag }}"
- name: OPENEBS_IO_CSTOR_POOL_IMAGE
value: "{{ .Values.cstor.pool.image }}:{{ .Values.cstor.pool.imageTag }}"
- name: OPENEBS_IO_CSTOR_POOL_MGMT_IMAGE
value: "{{ .Values.cstor.poolMgmt.image }}:{{ .Values.cstor.poolMgmt.imageTag }}"
- name: OPENEBS_IO_CSTOR_VOLUME_MGMT_IMAGE
value: "{{ .Values.cstor.volumeMgmt.image }}:{{ .Values.cstor.volumeMgmt.imageTag }}"
- name: OPENEBS_IO_VOLUME_MONITOR_IMAGE
value: "{{ .Values.policies.monitoring.image }}:{{ .Values.policies.monitoring.imageTag }}"
- name: OPENEBS_IO_CSTOR_POOL_EXPORTER_IMAGE
value: "{{ .Values.policies.monitoring.image }}:{{ .Values.policies.monitoring.imageTag }}"
# OPENEBS_IO_ENABLE_ANALYTICS if set to true sends anonymous usage
# events to Google Analytics
- name: OPENEBS_IO_ENABLE_ANALYTICS
value: "{{ .Values.analytics.enabled }}"
# OPENEBS_IO_ANALYTICS_PING_INTERVAL can be used to specify the duration (in hours)
# for periodic ping events sent to Google Analytics. Default is 24 hours.
- name: OPENEBS_IO_ANALYTICS_PING_INTERVAL
value: "{{ .Values.analytics.pingInterval }}"
- name: OPENEBS_IO_INSTALLER_TYPE
value: "charts-helm"
livenessProbe:
exec:
command:
- /usr/local/bin/mayactl
- version
initialDelaySeconds: {{ .Values.apiserver.healthCheck.initialDelaySeconds }}
periodSeconds: {{ .Values.apiserver.healthCheck.periodSeconds }}
{{- if .Values.apiserver.nodeSelector }}
nodeSelector:
{{ toYaml .Values.apiserver.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.apiserver.tolerations }}
tolerations:
{{ toYaml .Values.apiserver.tolerations | indent 8 }}
{{- end }}
{{- if .Values.apiserver.affinity }}
affinity:
{{ toYaml .Values.apiserver.affinity | indent 8 }}
{{- end }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "openebs.fullname" . }}-provisioner
labels:
app: {{ template "openebs.name" . }}
chart: {{ template "openebs.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: provisioner
spec:
replicas: {{ .Values.provisioner.replicas }}
selector:
matchLabels:
app: {{ template "openebs.name" . }}
release: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ template "openebs.name" . }}
release: {{ .Release.Name }}
component: provisioner
name: openebs-provisioner
openebs.io/component-name: openebs-provisioner
openebs.io/version: {{ .Values.release.version }}
spec:
serviceAccountName: {{ template "openebs.serviceAccountName" . }}
containers:
- name: {{ template "openebs.name" . }}-provisioner
image: "{{ .Values.provisioner.image }}:{{ .Values.provisioner.imageTag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
# OPENEBS_IO_K8S_MASTER enables openebs provisioner to connect to K8s
# based on this address. This is ignored if empty.
# This is supported for openebs provisioner version 0.5.2 onwards
#- name: OPENEBS_IO_K8S_MASTER
# value: "http://10.128.0.12:8080"
# OPENEBS_IO_KUBE_CONFIG enables openebs provisioner to connect to K8s
# based on this config. This is ignored if empty.
# This is supported for openebs provisioner version 0.5.2 onwards
#- name: OPENEBS_IO_KUBE_CONFIG
# value: "/home/ubuntu/.kube/config"
# OPENEBS_NAMESPACE is the namespace that this provisioner will
# lookup to find maya api service
- name: OPENEBS_NAMESPACE
value: "{{ .Release.Namespace }}"
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# OPENEBS_MAYA_SERVICE_NAME provides the maya-apiserver K8s service name,
# that provisioner should forward the volume create/delete requests.
# If not present, "maya-apiserver-service" will be used for lookup.
# This is supported for openebs provisioner version 0.5.3-RC1 onwards
- name: OPENEBS_MAYA_SERVICE_NAME
value: "{{ template "openebs.fullname" . }}-apiservice"
# The following values will be set as annotations to the PV object.
# Refer : https://github.com/openebs/external-storage/pull/15
#- name: OPENEBS_MONITOR_URL
# value: "{{ .Values.provisioner.monitorUrl }}"
#- name: OPENEBS_MONITOR_VOLKEY
# value: "{{ .Values.provisioner.monitorVolumeKey }}"
#- name: MAYA_PORTAL_URL
# value: "{{ .Values.provisioner.mayaPortalUrl }}"
livenessProbe:
exec:
command:
- pgrep
- ".*openebs"
initialDelaySeconds: {{ .Values.provisioner.healthCheck.initialDelaySeconds }}
periodSeconds: {{ .Values.provisioner.healthCheck.periodSeconds }}
{{- if .Values.provisioner.nodeSelector }}
nodeSelector:
{{ toYaml .Values.provisioner.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.provisioner.tolerations }}
tolerations:
{{ toYaml .Values.provisioner.tolerations | indent 8 }}
{{- end }}
{{- if .Values.provisioner.affinity }}
affinity:
{{ toYaml .Values.provisioner.affinity | indent 8 }}
{{- end }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "openebs.fullname" . }}-snapshot-operator
labels:
app: {{ template "openebs.name" . }}
chart: {{ template "openebs.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: snapshot-operator
spec:
replicas: {{ .Values.snapshotOperator.replicas }}
selector:
matchLabels:
app: {{ template "openebs.name" . }}
release: {{ .Release.Name }}
strategy:
type: {{ .Values.snapshotOperator.upgradeStrategy }}
template:
metadata:
labels:
app: {{ template "openebs.name" . }}
release: {{ .Release.Name }}
component: snapshot-operator
name: openebs-snapshot-operator
openebs.io/version: {{ .Values.release.version }}
openebs.io/component-name: openebs-snapshot-operator
spec:
serviceAccountName: {{ template "openebs.serviceAccountName" . }}
containers:
- name: {{ template "openebs.name" . }}-snapshot-controller
image: "{{ .Values.snapshotOperator.controller.image }}:{{ .Values.snapshotOperator.controller.imageTag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
# OPENEBS_IO_K8S_MASTER enables openebs snapshot controller to connect to K8s
# based on this address. This is ignored if empty.
# This is supported for openebs snapshot controller version 0.6-RC1 onwards
#- name: OPENEBS_IO_K8S_MASTER
# value: "http://10.128.0.12:8080"
# OPENEBS_IO_KUBE_CONFIG enables openebs snapshot controller to connect to K8s
# based on this config. This is ignored if empty.
# This is supported for openebs snapshot controller version 0.6-RC1 onwards
#- name: OPENEBS_IO_KUBE_CONFIG
# value: "/home/ubuntu/.kube/config"
# OPENEBS_NAMESPACE is the namespace that this snapshot controller will
# lookup to find maya api service
- name: OPENEBS_NAMESPACE
value: "{{ .Release.Namespace }}"
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# OPENEBS_MAYA_SERVICE_NAME provides the maya-apiserver K8s service name,
# that snapshot controller should forward the volume snapshot requests.
# If not present, "maya-apiserver-service" will be used for lookup.
# This is supported for openebs snapshot controller version 0.6-RC1 onwards
- name: OPENEBS_MAYA_SERVICE_NAME
value: "{{ template "openebs.fullname" . }}-apiservice"
livenessProbe:
exec:
command:
- pgrep
- ".*controller"
initialDelaySeconds: {{ .Values.snapshotOperator.healthCheck.initialDelaySeconds }}
periodSeconds: {{ .Values.snapshotOperator.healthCheck.periodSeconds }}
- name: {{ template "openebs.name" . }}-snapshot-provisioner
image: "{{ .Values.snapshotOperator.provisioner.image }}:{{ .Values.snapshotOperator.provisioner.imageTag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
# OPENEBS_IO_K8S_MASTER enables openebs snapshot provisioner to connect to K8s
# based on this address. This is ignored if empty.
# This is supported for openebs snapshot provisioner version 0.6-RC1 onwards
#- name: OPENEBS_IO_K8S_MASTER
# value: "http://10.128.0.12:8080"
# OPENEBS_IO_KUBE_CONFIG enables openebs snapshot provisioner to connect to K8s
# based on this config. This is ignored if empty.
# This is supported for openebs snapshot provisioner version 0.6-RC1 onwards
#- name: OPENEBS_IO_KUBE_CONFIG
# value: "/home/ubuntu/.kube/config"
# OPENEBS_NAMESPACE is the namespace that this snapshot provisioner will
# lookup to find maya api service
- name: OPENEBS_NAMESPACE
value: "{{ .Release.Namespace }}"
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# OPENEBS_MAYA_SERVICE_NAME provides the maya-apiserver K8s service name,
# that snapshot provisioner should forward the volume snapshot PV requests.
# If not present, "maya-apiserver-service" will be used for lookup.
# This is supported for openebs snapshot provisioner version 0.6-RC1 onwards
- name: OPENEBS_MAYA_SERVICE_NAME
value: "{{ template "openebs.fullname" . }}-apiservice"
livenessProbe:
exec:
command:
- pgrep
- ".*provisioner"
initialDelaySeconds: {{ .Values.snapshotOperator.healthCheck.initialDelaySeconds }}
periodSeconds: {{ .Values.snapshotOperator.healthCheck.periodSeconds }}
{{- if .Values.snapshotOperator.nodeSelector }}
nodeSelector:
{{ toYaml .Values.snapshotOperator.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.snapshotOperator.tolerations }}
tolerations:
{{ toYaml .Values.snapshotOperator.tolerations | indent 8 }}
{{- end }}
{{- if .Values.snapshotOperator.affinity }}
affinity:
{{ toYaml .Values.snapshotOperator.affinity | indent 8 }}
{{- end }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "openebs.fullname" . }}-ndm-operator
labels:
app: {{ template "openebs.name" . }}
chart: {{ template "openebs.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: ndm-operator
openebs.io/component-name: ndm-operator
name: ndm-operator
spec:
replicas: {{ .Values.ndmOperator.replicas }}
strategy:
type: {{ .Values.ndmOperator.upgradeStrategy }}
selector:
matchLabels:
app: {{ template "openebs.name" . }}
release: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ template "openebs.name" . }}
release: {{ .Release.Name }}
component: ndm-operator
name: ndm-operator
openebs.io/component-name: ndm-operator
openebs.io/version: {{ .Values.release.version }}
spec:
serviceAccountName: {{ template "openebs.serviceAccountName" . }}
containers:
- name: {{ template "openebs.fullname" . }}-ndm-operator
image: "{{ .Values.ndmOperator.image }}:{{ .Values.ndmOperator.imageTag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
readinessProbe:
exec:
command:
- stat
- /tmp/operator-sdk-ready
initialDelaySeconds: {{ .Values.ndmOperator.readinessCheck.initialDelaySeconds }}
periodSeconds: {{ .Values.ndmOperator.readinessCheck.periodSeconds }}
failureThreshold: {{ .Values.ndmOperator.readinessCheck.failureThreshold }}
env:
- name: WATCH_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: OPERATOR_NAME
value: "node-disk-operator"
- name: CLEANUP_JOB_IMAGE
value: "{{ .Values.ndmOperator.cleanupImage }}:{{ .Values.ndmOperator.cleanupImageTag }}"
{{- if .Values.ndmOperator.nodeSelector }}
nodeSelector:
{{ toYaml .Values.ndmOperator.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.ndmOperator.tolerations }}
tolerations:
{{ toYaml .Values.ndmOperator.tolerations | indent 8 }}
{{- end }}
apiVersion: v1
kind: Service
metadata:
name: admission-server-svc
labels:
app: admission-webhook
chart: {{ template "openebs.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
ports:
- port: 443
targetPort: 443
selector:
app: admission-webhook
apiVersion: v1
kind: Service
metadata:
name: {{ template "openebs.fullname" . }}-apiservice
labels:
app: {{ template "openebs.name" . }}
chart: {{ template "openebs.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
ports:
- name: api
port: {{ .Values.apiserver.ports.externalPort }}
targetPort: {{ .Values.apiserver.ports.internalPort }}
protocol: TCP
selector:
app: {{ template "openebs.name" . }}
release: {{ .Release.Name }}
component: apiserver
sessionAffinity: None
{{- if .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "openebs.serviceAccountName" . }}
labels:
app: {{ template "openebs.name" . }}
chart: {{ template "openebs.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- end }}
{{- $ca := genCA "admission-server-ca" 3650 }}
{{- $cn := printf "admission-server-svc" }}
{{- $altName1 := printf "admission-server-svc.%s" .Release.Namespace }}
{{- $altName2 := printf "admission-server-svc.%s.svc" .Release.Namespace }}
{{- $cert := genSignedCert $cn nil (list $altName1 $altName2) 3650 $ca }}
---
apiVersion: admissionregistration.k8s.io/v1beta1
kind: ValidatingWebhookConfiguration
metadata:
name: openebs-validation-webhook-cfg
labels:
app: {{ template "openebs.name" . }}
chart: {{ template "openebs.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: admission-webhook
webhooks:
- name: admission-webhook.openebs.io
clientConfig:
service:
name: admission-server-svc
namespace: {{ .Release.Namespace }}
path: "/validate"
{{- if .Values.webhook.generateTLS }}
caBundle: {{ b64enc $ca.Cert }}
{{- else }}
caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURpekNDQW5PZ0F3SUJBZ0lKQUk5NG9wdWdKb1drTUEwR0NTcUdTSWIzRFFFQkN3VUFNRnd4Q3pBSkJnTlYKQkFZVEFuaDRNUW93Q0FZRFZRUUlEQUY0TVFvd0NBWURWUVFIREFGNE1Rb3dDQVlEVlFRS0RBRjRNUW93Q0FZRApWUVFMREFGNE1Rc3dDUVlEVlFRRERBSmpZVEVRTUE0R0NTcUdTSWIzRFFFSkFSWUJlREFlRncweE9UQXpNREl3Ck56TXlOREZhRncweU1EQXpNREV3TnpNeU5ERmFNRnd4Q3pBSkJnTlZCQVlUQW5oNE1Rb3dDQVlEVlFRSURBRjQKTVFvd0NBWURWUVFIREFGNE1Rb3dDQVlEVlFRS0RBRjRNUW93Q0FZRFZRUUxEQUY0TVFzd0NRWURWUVFEREFKagpZVEVRTUE0R0NTcUdTSWIzRFFFSkFSWUJlRENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DCmdnRUJBT0pxNmI2dnI0cDMzM3FRaHJQbmNCVFVIUE1ESnJtaEYvOU44NjZodzFvOGZLclFwNkJmRkcvZEQ0N2gKVGcvWnJ0U2VHT0NoRjFxSEk1dGp3SlVEeGphSUM3U0FkZGpxb1pJUGFoT1pjVlpxZE1POVVFTlFUbktIRXczVQpCUjJUaHdydi9QTTRxZitUazdRa1J6Y2VJQXg1VS9lbUlEV2t4NEk3RlRYQk1XT1hGUTNoRlFtWFppZHpHN21mCnZJTlhYN0krOHR3QVM0alNSdGhxYjVUTzMwYmpxQTFzY0RRdXlZU2R6OVg5TGw1WU1QSUtSZHpnYUR1d1Q5QkQKZjNxT1VqazN6M1FZd0IvWmowaXJtQlpKejJla0V3a1QxbWlyUHF2NTA5QVJ5V1U2QUlSSTN6dnB6S2tWeFJUaApmcUROa1M5SmRRV1Q3RW9vN2lITmRtZlhOYmtDQXdFQUFhTlFNRTR3SFFZRFZSME9CQllFRk1ORzZGeGlMYWFmCjFld2w1RDd1SXJiK0UrSE9NQjhHQTFVZEl3UVlNQmFBRk1ORzZGeGlMYWFmMWV3bDVEN3VJcmIrRStIT01Bd0cKQTFVZEV3UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFHQnYxeC92OWRnWU1ZY1h5TU9MUUNENgpVZWNsS3YzSFRTVGUybXZQcTZoTW56K0ExOGF6RWhPU0xONHZuQUNSd2pzRmVobWIrWk9wMVlYWDkzMi9OckRxCk1XUmh1bENiblFndjlPNVdHWXBDQUR1dnBBMkwyT200aU50S0FucUpGNm5ubHI1UFdQZnVJelB1eVlvQUpKRDkKSFpZRjVwa2hac0EwdDlUTDFuUmdPbFY4elZ0eUg2TTVDWm5nSEpjWG9CWlVvSlBvcGJsc3BpUnh6dzBkMUU0SgpUVmVHaXZFa0RJNFpFYTVuTzZyTUZzcXJ1L21ydVQwN1FCaWd5ZzlEY3h0QU5TUTczQUhOemNRUWpZMWg3L2RiCmJ6QXQ2aWxNZXZKc2lpVFlGYjRPb0dIVW53S2tTQUJuazFNQW5oUUhvYUNuS2dXZE1vU3orQWVuYkhzYXJSMD0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
{{- end }}
rules:
- operations: [ "CREATE", "DELETE" ]
apiGroups: ["*"]
apiVersions: ["*"]
resources: ["persistentvolumeclaims"]
---
apiVersion: v1
kind: Secret
metadata:
name: admission-server-certs
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "openebs.name" . }}
chart: {{ template "openebs.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
type: Opaque
data:
{{- if .Values.webhook.generateTLS }}
cert.pem: {{ b64enc $cert.Cert }}
key.pem: {{ b64enc $cert.Key }}
{{- else }}
cert.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQ3VENDQXRXZ0F3SUJBZ0lVYk84NS9JR0ZXYTA2Vm11WVdTWjdxaTUybmRRd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1hERUxNQWtHQTFVRUJoTUNlSGd4Q2pBSUJnTlZCQWdNQVhneENqQUlCZ05WQkFjTUFYZ3hDakFJQmdOVgpCQW9NQVhneENqQUlCZ05WQkFzTUFYZ3hDekFKQmdOVkJBTU1BbU5oTVJBd0RnWUpLb1pJaHZjTkFRa0JGZ0Y0Ck1CNFhEVEU1TURNd01qQTNNek13TUZvWERUSXdNRE13TVRBM01qYzFNbG93S3pFcE1DY0dBMVVFQXhNZ1lXUnQKYVhOemFXOXVMWE5sY25abGNpMXpkbU11YjNCbGJtVmljeTV6ZG1Nd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQQpBNElCRHdBd2dnRUtBb0lCQVFERk5MRE1xKzd6eFZidDNPcnFhaVUyOFB6K25ZeFRCblA0NVhFWGFjSUpPWG1aClM1c2ZjMjM3WVNWS0I5Tlp4cXNYT08wcXpWb0xtNlZ0UDJjREpWZGZIVUQ0QXBZSC94UVBVTktrcFg3K0NVTFEKZ3VBNWowOXozdkFaeDJidXBTaXFFdE1mVldqNkh5V0Jyd2FuZW9IaVVXVVdpbmtnUXpCQzR1SWtiRkE2djYrZwp4ZzAwS09TY2NFRWY3eU5McjBvejBKVHRpRm1aS1pVVVBwK3N3WTRpRTZ3RER5bVVnTmY4SW8wUEExVkQ1TE9vCkFwQ0l2WDJyb1RNd3VkR1VrZUc1VTA2OWIrMWtQMEJsUWdDZk9TQTBmZEN3Snp0aWE1aHpaUlVIWGxFOVArN0kKekgyR0xXeHh1aHJPTlFmT25HcVRiUE13UmowekZIdmcycUo1azJ2VkFnTUJBQUdqZ2Rjd2dkUXdEZ1lEVlIwUApBUUgvQkFRREFnV2dNQk1HQTFVZEpRUU1NQW9HQ0NzR0FRVUZCd01CTUF3R0ExVWRFd0VCL3dRQ01BQXdIUVlEClZSME9CQllFRklnOVFSOSsyVW12THQwQXY4MlYwZml0bU81WE1COEdBMVVkSXdRWU1CYUFGTU5HNkZ4aUxhYWYKMWV3bDVEN3VJcmIrRStIT01GOEdBMVVkRVFSWU1GYUNGR0ZrYldsemMybHZiaTF6WlhKMlpYSXRjM1pqZ2h4aApaRzFwYzNOcGIyNHRjMlZ5ZG1WeUxYTjJZeTV2Y0dWdVpXSnpnaUJoWkcxcGMzTnBiMjR0YzJWeWRtVnlMWE4yCll5NXZjR1Z1WldKekxuTjJZekFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBSlpJRzd2d0RYaWxhWUFCS1Brc0oKZVJtdml4ZnYybTRVTVdzdlBKVVVJTXhHbzhtc1J6aWhBRjVuTExzaURKRDl4MjhraXZXaGUwbWE4aWVHYjY5Sgp1U1N4bys0OStaV3NVaTB3UlRDMi9ZWGlkWS9xNDU2c1g4ck9qQURDZlFUcFpYc2ZyekVWa2Q4NE0zdU5GTmhnCnMyWmxJMnNDTWljYXExNWxIWEh3akFkY2FqZit1VklwOXNHUElsMUhmZFcxWVFLc0NoU3dhdi80NUZJcFlMSVYKM3hiS2ZIbmh2czhJck5ZbTVIenAvVVdvcFN1Tm5tS1IwWGo3cXpGcllUYzV3eHZ3VVZrKzVpZFFreWMwZ0RDcApGbkFVdEdmaUVUQnBhU3pISjQ4STZqUFpneVE0NzlZMmRxRUtXcWtyc0RkZ2tVcXlnNGlQQ0YwWC9YVU9YU3VGClNnPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
key.pem: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBeFRTd3pLdnU4OFZXN2R6cTZtb2xOdkQ4L3AyTVV3WnorT1Z4RjJuQ0NUbDVtVXViCkgzTnQrMkVsU2dmVFdjYXJGemp0S3MxYUM1dWxiVDluQXlWWFh4MUErQUtXQi84VUQxRFNwS1YrL2dsQzBJTGcKT1k5UGM5N3dHY2RtN3FVb3FoTFRIMVZvK2g4bGdhOEdwM3FCNGxGbEZvcDVJRU13UXVMaUpHeFFPcit2b01ZTgpOQ2prbkhCQkgrOGpTNjlLTTlDVTdZaFptU21WRkQ2ZnJNR09JaE9zQXc4cGxJRFgvQ0tORHdOVlErU3pxQUtRCmlMMTlxNkV6TUxuUmxKSGh1Vk5PdlcvdFpEOUFaVUlBbnprZ05IM1FzQ2M3WW11WWMyVVZCMTVSUFQvdXlNeDkKaGkxc2Nib2F6alVIenB4cWsyenpNRVk5TXhSNzROcWllWk5yMVFJREFRQUJBb0lCQVFDcXRIT2VsKzRlUWVKLwp3RTN4WUxTYUhIMURnZWxvTFJ2U2hmb2hSRURjYjA0ZExsODNHRnBKMGN2UGkzcWVLZVVNRXhEcGpoeTJFNk5kCk1CYmhtRDlMYkMxREFpb1EvZkxGVnpjZm9zcU02RU5YN3hKZGdQcEwyTjJKMHh2ODFDYWhJZTV6SHlIaDhYZ3MKQysvOHBZVXMvVHcrQ052VTI1UTVNZUNEbXViUUVuemJqQ3lIQm5SVmw1dVF6bk8zWEt2NEVyejdBT1BBWmFJTQozYmNFNC83c1JGczM4SE1aMVZTZ2JxUi9rM1N5SEFzNXhNWHVtY0hMMTBkK0FVK21BQ0svUThpdWJHMm9kNnJiCko3S0RONmFuUzRPZk4zZ3RtaEppN3ZsTjJVL3JycHdnblI0d3Y0bmV4U1ZlamYzQU9iaU9jNnYzZ0xJbXJ2Q3oKNzFETDFPaTVBb0dCQU9HeFp2RWFUSFFnNFdaQVJZbXlGZEtZeXY2MURDc1JycElmUlh3Q1YrcnBZTFM2NlV4SQprWHJISlNreWFqTjNTOXVsZUtUTXRWaU5wY2JCcjVNZ0lOaFFvdThRc2dpZlZHWFJGQ3d0OXJ3MGNDbEc1Y2pCClZ3bUQzYWFBTGR5WVQvbHc4dnk1Zndqc1hFZHd1OEQ2cC9rd0ZzMmlwZWQ4QVFPUVZlQ1dPeXF6QW9HQkFOK3YKL2VxKzZ5NHhPZ2ZtQ01KcHJ0THBBN1J0M3FsU0JKbEw3RkNsQXRCeUUxazBPTVIrZTdhSDBVTDdYWVR4YlBLOApBYnRZR3lzWDkydGM3RHlaU0k0cDFjUHhvcHdzNkt3N0RYZUt0YTNnVkRmSXVuZ3haR25XWjk2WmNjcEhyVzgyCnl5OTk5dTQ2WE1tQWZwSzEvbGxjdGdLem5FUVp5ZkhEUmlWdVVQTlhBb0dCQUxkMGxORDNKNTVkKzlvNTlFeHgKVGZ2WjUyZ1Rrc2lQbnU5NEsrc1puSTEvRnZUUjJrSC8yd0dLVDFLbGdGNUZZb3d3ZlZpNGJkQ0ZrM04walZ0eQppa0JMaTZYNFZEOWVCQ1NmUjE2Q0hrWHQraDRUVzBWTW80dEFmVE9TamJUNnVrZHc0Sk05MVYxVGc4OHVlKy9wCjBCQm1YcUxZeXpMWFFadTcvNUtIaTZDeEFvR0FaTWV2R0E5eWVEcFhrZTF6THR4Y2xzdkREb3lkMEIyUzB0cGgKR3lodEx5cm1Tcjk3Z0JRWWV2R1FONlIyeXduVzh6bi9jYi9OWmNvRGdFeTZac2NNNkhneXhuaGNzZzZOdWVOVgpPdkcwenlVTjdLQTBXeWl0dS8yTWlMOExoSDVzeG5taWE4Qk4rNkV4NHR0UXE1cnhnS09Eb1kzNHJyb0x3VEFnCnI0YVhWRHNDZ1lBYnRwZXhvNTJ4VmJkTzZCL3B5RUU2cEJCS1FkK3hiVkJNMDZwUzArSlFudSt5SVBmeXFhekwKbGdYTEhBSm01bU9Sb2RFRHk0WlVJRkM5RmhraGcrV0ZzSHJCOXpGU1IrZFc2Uzg1eFA4ZGxHVE42S2cydXJNQQowNTRCQUh4RWhPNU9QblNqT0VHSmQwYTdGQmc1UlkxN0RRQlFxV25SZENURHlDWmU0OStLcWc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
{{- end }}
# Default values for openebs.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
rbac:
# Specifies whether RBAC resources should be created
create: true
serviceAccount:
create: true
name:
release:
# "openebs.io/version" label for control plane components
version: "1.0.0"
image:
pullPolicy: IfNotPresent
apiserver:
image: "quay.io/openebs/m-apiserver"
imageTag: "1.0.0"
replicas: 1
ports:
externalPort: 5656
internalPort: 5656
sparse:
enabled: "false"
nodeSelector: {}
tolerations: []
affinity: {}
healthCheck:
initialDelaySeconds: 30
periodSeconds: 60
provisioner:
image: "quay.io/openebs/openebs-k8s-provisioner"
imageTag: "1.0.0"
replicas: 1
nodeSelector: {}
tolerations: []
affinity: {}
healthCheck:
initialDelaySeconds: 30
periodSeconds: 60
localprovisioner:
image: "quay.io/openebs/provisioner-localpv"
imageTag: "1.0.0"
helperImage: "quay.io/openebs/openebs-tools"
helperImageTag: "3.8"
replicas: 1
basePath: "/var/openebs/local"
nodeSelector: {}
tolerations: []
affinity: {}
healthCheck:
initialDelaySeconds: 30
periodSeconds: 60
snapshotOperator:
controller:
image: "quay.io/openebs/snapshot-controller"
imageTag: "1.0.0"
provisioner:
image: "quay.io/openebs/snapshot-provisioner"
imageTag: "1.0.0"
replicas: 1
upgradeStrategy: "Recreate"
nodeSelector: {}
tolerations: []
affinity: {}
healthCheck:
initialDelaySeconds: 30
periodSeconds: 60
ndm:
image: "quay.io/openebs/node-disk-manager-amd64"
imageTag: "v0.4.0"
sparse:
path: "/var/openebs/sparse"
size: "10737418240"
count: "1"
filters:
excludeVendors: "CLOUDBYT,OpenEBS"
includePaths: ""
excludePaths: "loop,fd0,sr0,/dev/ram,/dev/dm-,/dev/md"
nodeSelector: {}
healthCheck:
initialDelaySeconds: 30
periodSeconds: 60
ndmOperator:
image: "quay.io/openebs/node-disk-operator-amd64"
imageTag: "v0.4.0"
replicas: 1
upgradeStrategy: Recreate
nodeSelector: {}
tolerations: []
readinessCheck:
initialDelaySeconds: 4
periodSeconds: 10
failureThreshold: 1
cleanupImage: "quay.io/openebs/linux-utils"
cleanupImageTag: "3.9"
webhook:
image: "quay.io/openebs/admission-server"
imageTag: "1.0.0"
generateTLS: true
replicas: 1
nodeSelector: {}
tolerations: []
affinity: {}
jiva:
image: "quay.io/openebs/jiva"
imageTag: "1.0.0"
replicas: 3
cstor:
pool:
image: "quay.io/openebs/cstor-pool"
imageTag: "1.0.0"
poolMgmt:
image: "quay.io/openebs/cstor-pool-mgmt"
imageTag: "1.0.0"
target:
image: "quay.io/openebs/cstor-istgt"
imageTag: "1.0.0"
volumeMgmt:
image: "quay.io/openebs/cstor-volume-mgmt"
imageTag: "1.0.0"
policies:
monitoring:
enabled: true
image: "quay.io/openebs/m-exporter"
imageTag: "1.0.0"
analytics:
enabled: true
# Specify in hours the duration after which a ping event needs to be sent.
pingInterval: "24h"
apiVersion: v1
appVersion: "1.3.0"
description: Cloud Native storage for containers
name: storageos-operator
version: 0.2.11
tillerVersion: ">=2.10.0"
keywords:
- storage
- block-storage
- volume
- operator
home: https://storageos.com
icon: https://storageos.com/wp-content/themes/storageOS/images/logo.svg
sources:
- https://github.com/storageos
maintainers:
- name: croomes
email: simon.croome@storageos.com
- name: darkowlzz
email: sunny.gogoi@storageos.com
MIT License
Copyright (c) 2019 StorageOS
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
# StorageOS Operator Helm Chart
> **Note**: This is the recommended chart to use for installing StorageOS. It
installs the StorageOS Operator, and then installs a StorageOS cluster with a
minimal configuration. Other Helm charts
([storageoscluster-operator](https://github.com/storageos/charts/tree/master/stable/storageoscluster-operator)
and
[storageos](https://github.com/storageos/charts/tree/master/stable/storageos))
will be deprecated.
[StorageOS](https://storageos.com) is a software-based storage platform
designed for cloud-native applications. By deploying StorageOS on your
Kubernetes cluster, local storage from cluster node is aggregated into a
distributed pool, and persistent volumes created from it using the native
Kubernetes volume driver are available instantly to pods wherever they move in
the cluster.
Features such as replication, encryption and caching help protect data and
maximise performance.
This chart installs a StorageOS Cluster Operator which helps deploy and
configure a StorageOS cluster on kubernetes.
## Prerequisites
- Helm 2.10+
- Kubernetes 1.9+.
- Privileged mode containers (enabled by default)
- Kubernetes 1.9 only:
- Feature gate: MountPropagation=true. This can be done by appending
`--feature-gates MountPropagation=true` to the kube-apiserver and kubelet
services.
Refer to the [StorageOS prerequisites
docs](https://docs.storageos.com/docs/prerequisites/overview) for more
information.
## Installing the chart
```console
# Add storageos charts repo.
$ helm repo add storageos https://charts.storageos.com
# Install the chart in a namespace.
$ helm install storageos/storageos-operator --namespace storageos-operator
```
This will install the StorageOSCluster operator in `storageos-operator`
namespace and deploys StorageOS with a minimal configuration.
> **Tip**: List all releases using `helm list`
## Creating a StorageOS cluster manually
The Helm chart supports a subset of StorageOSCluster custom resource parameters.
For advanced configurations, you may wish to create the cluster resource
manually and only use the Helm chart to install the Operator.
To disable auto-provisioning the cluster with the Helm chart, set
`cluster.create` to false:
```yaml
cluster:
...
create: false
```
Create a secret to store storageos cluster secrets:
```yaml
apiVersion: v1
kind: Secret
metadata:
name: "storageos-api"
namespace: "default"
labels:
app: "storageos"
type: "kubernetes.io/storageos"
data:
# echo -n '<secret>' | base64
apiAddress: c3RvcmFnZW9zOjU3MDU=
apiUsername: c3RvcmFnZW9z
apiPassword: c3RvcmFnZW9z
```
Create a `StorageOSCluster` custom resource and refer the above secret in
`secretRefName` and `secretRefNamespace` fields.
```yaml
apiVersion: "storageos.com/v1"
kind: "StorageOSCluster"
metadata:
name: "example-storageos"
namespace: "default"
spec:
secretRefName: "storageos-api"
secretRefNamespace: "default"
```
Once the `StorageOSCluster` configuration is applied, the StorageOSCluster
operator will create a StorageOS cluster in the `storageos` namespace by
default.
Most installations will want to use the default [CSI](https://kubernetes-csi.github.io/docs/)
driver. To use the [Native Driver](https://kubernetes.io/docs/concepts/storage/volumes/#storageos)
instead, disable CSI:
```yaml
spec:
...
csi:
enable: false
...
```
in the above `StorageOSCluster` resource config.
Learn more about advanced configuration options
[here](https://github.com/storageos/cluster-operator/blob/master/README.md#storageoscluster-resource-configuration).
To check cluster status, run:
```bash
$ kubectl get storageoscluster
NAME READY STATUS AGE
example-storageos 3/3 Running 4m
```
All the events related to this cluster are logged as part of the cluster object
and can be viewed by describing the object.
```bash
$ kubectl describe storageoscluster example-storageos
Name: example-storageos
Namespace: default
Labels: <none>
...
...
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning ChangedStatus 1m (x2 over 1m) storageos-operator 0/3 StorageOS nodes are functional
Normal ChangedStatus 35s storageos-operator 3/3 StorageOS nodes are functional. Cluster healthy
```
## Configuration
The following tables lists the configurable parameters of the StorageOSCluster
Operator chart and their default values.
Parameter | Description | Default
--------- | ----------- | -------
`operator.image.repository` | StorageOS Operator container image repository | `storageos/cluster-operator`
`operator.image.tag` | StorageOS Operator container image tag | `1.3.0`
`operator.image.pullPolicy` | StorageOS Operator container image pull policy | `IfNotPresent`
`podSecurityPolicy.enabled` | If true, create & use PodSecurityPolicy resources | `false`
`podSecurityPolicy.annotations` | Specify pod annotations in the pod security policy | `{}`
`cluster.create` | If true, auto-create the StorageOS cluster | `true`
`cluster.name` | Name of the storageos deployment | `storageos`
`cluster.namespace` | Namespace to install the StorageOS cluster into | `kube-system`
`cluster.secretRefName` | Name of the secret containing StorageOS API credentials | `storageos-api`
`cluster.admin.username` | Username to authenticate to the StorageOS API with | `storageos`
`cluster.admin.password` | Password to authenticate to the StorageOS API with |
`cluster.sharedDir` | The path shared into to kubelet container when running kubelet in a container |
`cluster.kvBackend.embedded` | Use StorageOS embedded etcd | `true`
`cluster.kvBackend.address` | List of etcd targets, in the form ip[:port], separated by commas |
`cluster.kvBackend.backend` | Key-Value store backend name | `etcd`
`cluster.kvBackend.tlsSecretName` | Name of the secret containing kv backend tls cert |
`cluster.kvBackend.tlsSecretNamespace` | Namespace of the secret containing kv backend tls cert |
`cluster.nodeSelectorTerm.key` | Key of the node selector term used for pod placement |
`cluster.nodeSelectorTerm.value` | Value of the node selector term used for pod placement |
`cluster.toleration.key` | Key of the pod toleration parameter |
`cluster.toleration.value` | Value of the pod toleration parameter |
`cluster.disableTelemetry` | If true, no telemetry data will be collected from the cluster | `false`
`cluster.images.node.repository` | StorageOS Node container image repository | `storageos/node`
`cluster.images.node.tag` | StorageOS Node container image tag | `1.3.0`
`cluster.csi.enable` | If true, CSI driver is enabled | `true`
`cluster.csi.deploymentStrategy` | Whether CSI helpers should be deployed as a `deployment` or `statefulset` | `deployment`
## Deleting a StorageOS Cluster
Deleting the `StorageOSCluster` custom resource object would delete the
storageos cluster and all the associated resources.
In the above example,
```bash
kubectl delete storageoscluster example-storageos
```
would delete the custom resource and the cluster.
## Uninstalling the Chart
To uninstall/delete the storageos cluster operator deployment:
```bash
helm delete --purge <release-name>
```
Learn more about configuring the StorageOS Operator on
[GitHub](https://github.com/storageos/cluster-operator).
# StorageOS Operator
[StorageOS](https://storageos.com) is a cloud native, software-defined storage
platform that transforms commodity server or cloud based disk capacity into
enterprise-class persistent storage for containers. StorageOS is ideal for
deploying databases, message busses, and other mission-critical stateful
solutions, where rapid recovery and fault tolerance are essential.
The StorageOS Operator installs and manages StorageOS within a cluster.
Cluster nodes may contribute local or attached disk-based storage into a
distributed pool, which is then available to all cluster members via a
global namespace.
By default, a minimal configuration of StorageOS is installed. To set advanced
configurations, disable the default installation of StorageOS and create a
custom StorageOSCluster resource
([documentation](https://docs.storageos.com/docs/reference/cluster-operator/examples)).
`Notes: The StorageOS Operator must be installed in the System Project with
Cluster Role`
podSecurityPolicy:
enabled: true
cluster:
# Disable cluster creation in CI, should install the operator only.
create: false
categories:
- storage
labels:
io.rancher.certified: partner
questions:
- variable: k8sDistro
default: rancher
description: "Kubernetes Distribution"
show_if: false
# Operator image configuration.
- variable: defaultImage
default: true
description: "Use default Docker images"
label: Use Default Images
type: boolean
show_subquestion_if: false
group: "Container Images"
subquestions:
- variable: operator.image.pullPolicy
default: IfNotPresent
description: "Operator Image pull policy"
type: enum
label: Operator Image pull policy
options:
- IfNotPresent
- Always
- Never
- variable: operator.image.repository
default: "storageos/cluster-operator"
description: "StorageOS operator image name"
type: string
label: StorageOS Operator Image Name
- variable: operator.image.tag
default: "1.3.0"
description: "StorageOS Operator image tag"
type: string
label: StorageOS Operator Image Tag
# Default minimal cluster configuration.
- variable: cluster.create
default: true
type: boolean
description: "Install StorageOS cluster with minimal configurations"
label: "Install StorageOS cluster"
show_subquestion_if: true
group: "StorageOS Cluster"
subquestions:
# CSI configuration.
- variable: cluster.csi.enable
default: true
description: "Use Container Storage Interface (CSI) driver"
label: Use CSI Driver
type: boolean
# Cluster metadata.
- variable: cluster.name
default: "storageos"
description: "Name of the StorageOS cluster deployment"
type: string
label: Name
- variable: cluster.namespace
default: "kube-system"
description: "Namespace of the StorageOS cluster deployment. `kube-system` recommended to avoid pre-emption when node is under load."
type: string
label: Namespace
# Node container image.
- variable: cluster.images.node.repository
default: "storageos/node"
description: "StorageOS node container image name"
type: string
label: StorageOS Node Container Image Name
- variable: cluster.images.node.tag
default: "1.3.0"
description: "StorageOS Node container image tag"
type: string
label: StorageOS Node Container Image Tag
# Credentials.
- variable: cluster.admin.username
default: "admin"
description: "Username of the StorageOS administrator account"
type: string
label: Username
- variable: cluster.admin.password
default: ""
description: "Password of the StorageOS administrator account. If empty, a random password will be generated."
type: password
label: Password
# Telemetry.
- variable: cluster.disableTelemetry
default: false
type: boolean
description: "Disable telemetry data collection. See https://docs.storageos.com/docs/reference/telemetry for more information."
label: Disable Telemetry
# KV store backend.
- variable: cluster.kvBackend.embedded
default: true
type: boolean
description: "Use embedded KV store for testing. Select false to use external etcd for production deployments."
label: "Use embedded KV store"
- variable: cluster.kvBackend.address
default: "10.0.0.1:2379"
description: "List of etcd targets, in the form ip[:port], separated by commas. Prefer multiple direct endpoints over a single load-balanced endpoint. Only used if not using embedded KV store."
type: string
label: External etcd address(es)
show_if: "cluster.kvBackend.embedded=false"
- variable: cluster.kvBackend.tls
default: false
type: boolean
description: "Enable etcd TLS"
label: "TLS should be configured for external etcd to protect configuration data (Optional)."
show_if: "cluster.kvBackend.embedded=false"
- variable: cluster.kvBackend.tlsSecretName
required: false
default: ""
description: "Name of the secret that contains the etcd TLS certs. This secret is typically shared with etcd."
type: string
label: External etcd TLS secret name
show_if: "cluster.kvBackend.tls=true"
- variable: cluster.kvBackend.tlsSecretNamespace
required: false
default: ""
description: "Namespace of the secret that contains the etcd TLS certs. This secret is typically shared with etcd."
type: string
label: External etcd TLS secret namespace
show_if: "cluster.kvBackend.tls=true"
# Node Selector Term.
- variable: cluster.nodeSelectorTerm.key
required: false
default: ""
description: "Key of the node selector term match expression used to select the nodes to install StorageOS on, e.g. `node-role.kubernetes.io/worker`"
type: string
label: Node selector term key
- variable: cluster.nodeSelectorTerm.value
required: false
default: ""
description: "Value of the node selector term match expression used to select the nodes to install StorageOS on."
type: string
label: Node selector term value
# Pod tolerations.
- variable: cluster.toleration.key
required: false
default: ""
description: "Key of pod toleration with operator 'Equal' and effect 'NoSchedule'"
type: string
label: Pod toleration key
- variable: cluster.toleration.value
required: false
default: ""
description: "Value of pod toleration with operator 'Equal' and effect 'NoSchedule'"
type: string
label: Pod toleration value
# Shared Directory
- variable: cluster.sharedDir
required: false
default: "/var/lib/kubelet/plugins/kubernetes.io~storageos"
description: "Shared Directory should be set if running kubelet in a container. This should be the path shared into to kubelet container, typically: '/var/lib/kubelet/plugins/kubernetes.io~storageos'. If not set, defaults will be used."
type: string
label: Shared Directory
StorageOS Operator deployed.
If you disabled automatic cluster creation, you can deploy a StorageOS cluster
by creating a custom StorageOSCluster resource:
1. Create a secret containing StorageOS cluster credentials. This secret
contains the API username and password that will be used to authenticate to the
StorageOS cluster. Base64 encode the username and password that you want to use
for your StorageOS cluster.
apiVersion: v1
kind: Secret
metadata:
name: storageos-api
namespace: default
labels:
app: storageos
type: kubernetes.io/storageos
data:
# echo -n '<secret>' | base64
apiUsername: c3RvcmFnZW9z
apiPassword: c3RvcmFnZW9z
2. Create a StorageOS custom resource that references the secret created
above (storageos-api in the above example). When the resource is created, the
cluster will be deployed.
apiVersion: storageos.com/v1
kind: StorageOSCluster
metadata:
name: example-storageos
namespace: default
spec:
secretRefName: storageos-api
secretRefNamespace: default
csi:
enable: true
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "storageos.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "storageos.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "storageos.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "storageos.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "storageos.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{- if .Values.cluster.create }}
# ClusterRole, ClusterRoleBinding and ServiceAccounts have hook-failed in
# hook-delete-policy to make it easy to rerun the whole setup even after a
# failure, else the rerun fails with existing resource error.
# Hook delete policy before-hook-creation ensures any other leftover resources
# from previous run gets deleted when run again.
# The Job resources will not be deleted to help investigage the failure.
# Since the resources created by the operator are not managed by the chart, each
# of them must be individually deleted in separate jobs.
apiVersion: v1
kind: ServiceAccount
metadata:
name: storageos-cleanup
namespace: {{ .Release.Namespace }}
annotations:
"helm.sh/hook": post-delete
"helm.sh/hook-delete-policy": "hook-succeeded, hook-failed, before-hook-creation"
"helm.sh/hook-weight": "1"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: storageos:cleanup
annotations:
"helm.sh/hook": post-delete
"helm.sh/hook-delete-policy": "hook-succeeded, hook-failed, before-hook-creation"
"helm.sh/hook-weight": "1"
rules:
# Using apiGroup "apps" for daemonsets fails and the permission error indicates
# that it's in group "extensions". Not sure if it's a Job specific behavior,
# because the daemonsets deployed by the operator use "apps" apiGroup.
- apiGroups:
- extensions
resources:
- daemonsets
- deployments
verbs:
- delete
- apiGroups:
- apps
resources:
- statefulsets
verbs:
- delete
- apiGroups:
- rbac.authorization.k8s.io
resources:
- roles
- rolebindings
- clusterroles
- clusterrolebindings
verbs:
- delete
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
verbs:
- delete
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- delete
- apiGroups:
- ""
resources:
- serviceaccounts
- secrets
- services
- configmaps
verbs:
- delete
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: storageos:cleanup
annotations:
"helm.sh/hook": post-delete
"helm.sh/hook-delete-policy": "hook-succeeded, hook-failed, before-hook-creation"
"helm.sh/hook-weight": "2"
subjects:
- name: storageos-cleanup
kind: ServiceAccount
namespace: {{ .Release.Namespace }}
roleRef:
name: storageos:cleanup
kind: ClusterRole
apiGroup: rbac.authorization.k8s.io
---
# Iterate through the Values.cleanup list and create jobs to delete all the
# unmanaged resources of the cluster.
{{- range .Values.cleanup }}
apiVersion: batch/v1
kind: Job
metadata:
name: "storageos-{{ .name }}-cleanup"
namespace: {{ .namespace }}
annotations:
"helm.sh/hook": post-delete
"helm.sh/hook-delete-policy": "hook-succeeded, before-hook-creation"
"helm.sh/hook-weight": "3"
spec:
template:
spec:
serviceAccountName: storageos-cleanup
containers:
- name: "storageos-{{ .name }}-cleanup"
image: bitnami/kubectl:1.14.1
command:
- kubectl
- -n
- {{ $.Values.cluster.namespace }}
- delete
{{- range .command }}
- {{ . | quote }}
{{- end }}
- --ignore-not-found=true
restartPolicy: Never
backoffLimit: 4
---
{{- end }}
{{- end }}
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: jobs.storageos.com
annotations:
"helm.sh/hook": crd-install
spec:
group: storageos.com
names:
kind: Job
listKind: JobList
plural: jobs
singular: job
scope: Namespaced
version: v1
validation:
openAPIV3Schema:
properties:
apiVersion:
type: string
kind:
type: string
metadata: {}
spec:
properties:
image:
type: string
args: {}
mountPath:
type: string
hostPath:
type: string
completionWord:
type: string
labelSelector:
type: string
nodeSelectorTerms: {}
tolerations: {}
status:
properties:
completed:
type: boolean
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "storageos.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "storageos.name" . }}
chart: {{ template "storageos.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
replicas: 1
selector:
matchLabels:
app: {{ template "storageos.name" . }}
release: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ template "storageos.name" . }}
release: {{ .Release.Name }}
spec:
serviceAccountName: {{ template "storageos.serviceAccountName" . }}
containers:
- name: storageos-operator
image: "{{ .Values.operator.image.repository }}:{{ .Values.operator.image.tag }}"
imagePullPolicy: {{ .Values.operator.image.pullPolicy }}
ports:
- containerPort: 60000
name: metrics
command:
- cluster-operator
env:
- name: WATCH_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: OPERATOR_NAME
value: "cluster-operator"
{{- if .Values.podSecurityPolicy.enabled }}
apiVersion: extensions/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "storageos.fullname" . }}-psp
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "storageos.name" . }}
chart: {{ template "storageos.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
annotations:
{{- if .Values.podSecurityPolicy.annotations }}
{{ toYaml .Values.podSecurityPolicy.annotations | indent 4 }}
{{- end }}
spec:
volumes:
- '*'
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'RunAsAny'
{{- end }}
# Role for storageos operator
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: storageos:operator
labels:
app: {{ template "storageos.name" . }}
chart: {{ template "storageos.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
rules:
- apiGroups:
- storageos.com
resources:
- storageosclusters
- storageosupgrades
- jobs
verbs:
- "*"
- apiGroups:
- apps
resources:
- statefulsets
- daemonsets
- deployments
- replicasets
verbs:
- "*"
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- get
- update
- create
- apiGroups:
- ""
resources:
- pods
verbs:
- list
- watch
- get
- update
- patch
- delete
- create
- apiGroups:
- ""
resources:
- events
- namespaces
- serviceaccounts
- secrets
- services
- persistentvolumeclaims
- persistentvolumes
- configmaps
- replicationcontrollers
- pods/binding
- endpoints
verbs:
- create
- patch
- get
- list
- delete
- watch
- update
- apiGroups:
- rbac.authorization.k8s.io
resources:
- roles
- rolebindings
- clusterroles
- clusterrolebindings
verbs:
- create
- delete
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
- volumeattachments
- csinodeinfos
verbs:
- create
- delete
- watch
- list
- get
- update
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- create
- delete
- apiGroups:
- csi.storage.k8s.io
resources:
- csidrivers
verbs:
- create
- delete
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- list
- watch
# OpenShift specific rule.
- apiGroups:
- security.openshift.io
resources:
- securitycontextconstraints
verbs:
- create
- delete
- update
- get
- use
resourceNames:
- privileged
---
# Bind operator service account to storageos-operator role
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: storageos:operator
labels:
app: {{ template "storageos.name" . }}
chart: {{ template "storageos.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
subjects:
- kind: ServiceAccount
name: {{ template "storageos.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: storageos:operator
apiGroup: rbac.authorization.k8s.io
{{- if .Values.podSecurityPolicy.enabled }}
---
# ClusterRole for using pod security policy.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: storageos:psp-user
labels:
app: {{ template "storageos.name" . }}
chart: {{ template "storageos.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
rules:
- apiGroups: ["extensions"]
resources: ["podsecuritypolicies"]
verbs: ["use"]
resourceNames:
- {{ template "storageos.fullname" . }}-psp
---
# Bind pod security policy cluster role to the operator service account.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: storageos:psp-user
labels:
app: {{ template "storageos.name" . }}
chart: {{ template "storageos.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: storageos:psp-user
subjects:
- kind: ServiceAccount
name: {{ template "storageos.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end }}
{{- if .Values.cluster.create }}
apiVersion: v1
kind: Secret
metadata:
name: {{ .Values.cluster.secretRefName }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "storageos.name" . }}
chart: {{ template "storageos.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
type: "kubernetes.io/storageos"
data:
apiUsername: {{ default "" .Values.cluster.admin.username | b64enc | quote }}
{{ if .Values.cluster.admin.password }}
apiPassword: {{ default "" .Values.cluster.admin.password | b64enc | quote }}
{{ else }}
apiPassword: {{ randAlphaNum 10 | b64enc | quote }}
{{ end }}
# Add base64 encoded TLS cert and key below if ingress.tls is set to true.
# tls.crt:
# tls.key:
# Add base64 encoded creds below for CSI credentials.
# csiProvisionUsername:
# csiProvisionPassword:
# csiControllerPublishUsername:
# csiControllerPublishPassword:
# csiNodePublishUsername:
# csiNodePublishPassword:
{{- end }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "storageos.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "storageos.name" . }}
chart: {{ template "storageos.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- if .Values.cluster.create }}
apiVersion: storageos.com/v1
kind: StorageOSCluster
metadata:
name: {{ .Values.cluster.name }}
namespace: {{ .Release.Namespace }}
spec:
namespace: {{ .Values.cluster.namespace }}
secretRefName: {{ .Values.cluster.secretRefName }}
secretRefNamespace: {{ .Release.Namespace }}
disableTelemetry: {{ .Values.cluster.disableTelemetry }}
{{- if .Values.k8sDistro }}
k8sDistro: {{ .Values.k8sDistro }}
{{- end }}
{{- if .Values.cluster.images.node.repository }}
images:
nodeContainer: "{{ .Values.cluster.images.node.repository }}:{{ .Values.cluster.images.node.tag }}"
{{- end }}
csi:
enable: {{ .Values.cluster.csi.enable }}
deploymentStrategy: {{ .Values.cluster.csi.deploymentStrategy }}
{{- if .Values.cluster.sharedDir }}
sharedDir: {{ .Values.cluster.sharedDir }}
{{- end }}
{{- if eq .Values.cluster.kvBackend.embedded false }}
kvBackend:
address: {{ .Values.cluster.kvBackend.address }}
backend: {{ .Values.cluster.kvBackend.backend }}
{{- end }}
{{- if .Values.cluster.kvBackend.tlsSecretName }}
tlsEtcdSecretRefName: {{ .Values.cluster.kvBackend.tlsSecretName }}
{{- end }}
{{- if .Values.cluster.kvBackend.tlsSecretNamespace }}
tlsEtcdSecretRefNamespace: {{ .Values.cluster.kvBackend.tlsSecretNamespace }}
{{- end }}
{{- if .Values.cluster.nodeSelectorTerm.key }}
nodeSelectorTerms:
- matchExpressions:
- key: {{ .Values.cluster.nodeSelectorTerm.key }}
operator: In
values:
- "{{ .Values.cluster.nodeSelectorTerm.value }}"
{{- end }}
{{- if .Values.cluster.toleration.key }}
tolerations:
- key: {{ .Values.cluster.toleration.key }}
operator: "Equal"
value: {{ .Values.cluster.toleration.value }}
effect: "NoSchedule"
{{- end }}
{{- end }}
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: storageosclusters.storageos.com
annotations:
"helm.sh/hook": crd-install
spec:
group: storageos.com
names:
kind: StorageOSCluster
listKind: StorageOSClusterList
plural: storageosclusters
singular: storageoscluster
shortNames:
- stos
scope: Namespaced
version: v1
additionalPrinterColumns:
- name: Ready
type: string
description: Ready status of the storageos nodes.
JSONPath: .status.ready
- name: Status
type: string
description: Status of the whole cluster.
JSONPath: .status.phase
- name: Age
type: date
JSONPath: .metadata.creationTimestamp
validation:
openAPIV3Schema:
properties:
apiVersion:
type: string
kind:
type: string
metadata: {}
spec:
properties:
join:
type: string
namespace:
type: string
k8sDistro:
type: string
disableFencing:
type: boolean
disableTelemetry:
type: boolean
disableTCMU:
type: boolean
forceTCMU:
type: boolean
disableScheduler:
type: boolean
images:
properties:
nodeContainer:
type: string
initContainer:
type: string
csiDriverRegistrarContainer:
type: string
csiExternalProvisionerContainer:
type: string
csiExternalAttacherContainer:
type: string
csiLivenessProbeContainer:
type: string
csi:
properties:
enable:
type: boolean
enableProvisionCreds:
type: boolean
enableControllerPublishCreds:
type: boolean
enableNodePublishCreds:
type: boolean
deploymentStrategy:
type: string
service:
properties:
name:
type: string
type:
type: string
externalPort:
type: integer
format: int32
internalPort:
type: integer
format: int32
secretRefName:
type: string
secretRefNamespace:
type: string
tlsEtcdSecretRefName:
type: string
tlsEtcdSecretRefNamespace:
type: string
sharedDir:
type: string
ingress:
properties:
enable:
type: boolean
hostname:
type: string
tls:
type: boolean
annotations: {}
kvBackend:
properties:
address:
type: string
backend:
type: string
pause:
type: boolean
debug:
type: boolean
nodeSelectorTerms: {}
tolerations: {}
resources:
properties:
limits: {}
requests: {}
status:
properties:
phase:
type: string
nodeHealthStatus: {}
nodes:
type: array
items:
type: string
ready:
type: string
members:
properties:
ready: {}
unready: {}
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: storageosupgrades.storageos.com
annotations:
"helm.sh/hook": crd-install
spec:
group: storageos.com
names:
kind: StorageOSUpgrade
listKind: StorageOSUpgradeList
plural: storageosupgrades
singular: storageosupgrade
scope: Namespaced
version: v1
validation:
openAPIV3Schema:
properties:
apiVersion:
type: string
kind:
type: string
metadata: {}
spec:
properties:
newImage:
type: string
status:
properties:
completed:
type: boolean
# Default values for storageos.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
name: storageos-operator
k8sDistro: default
serviceAccount:
create: true
name: storageos-operator-sa
podSecurityPolicy:
enabled: false
annotations: {}
## Specify pod annotations
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
##
# seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
# seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
# apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
# operator-specific configuation parameters.
operator:
image:
repository: storageos/cluster-operator
tag: 1.3.0
pullPolicy: IfNotPresent
# cluster-specific configuation parameters.
cluster:
# set create to true if the operator should auto-create the StorageOS cluster.
create: true
# Name of the deployment.
name: storageos
# Namespace to install the StorageOS cluster into.
namespace: kube-system
# Name of the secret containing StorageOS API credentials.
secretRefName: storageos-api
# Default admin account.
admin:
# Username to authenticate to the StorageOS API with.
username: storageos
# Password to authenticate to the StorageOS API with. If empty, a random
# password will be generated and set in the secretRefName secret.
password:
# sharedDir should be set if running kubelet in a container. This should
# be the path shared into to kubelet container, typically:
# "/var/lib/kubelet/plugins/kubernetes.io~storageos". If not set, defaults
# will be used.
sharedDir:
# Key-Value store backend.
kvBackend:
embedded: true
address:
backend: etcd
tlsSecretName:
tlsSecretNamespace:
# Node selector terms to install StorageOS on.
nodeSelectorTerm:
key:
value:
# Pod toleration for the StorageOS pods.
toleration:
key:
value:
# To disable anonymous usage reporting across the cluster, set to true.
# Defaults to false. To help improve the product, data such as API usage and
# StorageOS configuration information is collected.
disableTelemetry: false
images:
# nodeContainer is the StorageOS node image to use, available from the
# [Docker Hub](https://hub.docker.com/r/storageos/node/).
node:
repository: storageos/node
tag: 1.3.0
csi:
enable: true
deploymentStrategy: deployment
# The following is used for cleaning up unmanaged cluster resources when
# auto-install is enabled.
cleanup:
- name: daemonset
command:
- "daemonset"
- "storageos-daemonset"
- name: statefulset
command:
- "statefulset"
- "storageos-statefulset"
- name: csi-helper
command:
- "deployment"
- "storageos-csi-helper"
- name: scheduler
command:
- "deployment"
- "storageos-scheduler"
- name: configmap
command:
- "configmap"
- "storageos-scheduler-config"
- "storageos-scheduler-policy"
- name: serviceaccount
command:
- "serviceaccount"
- "storageos-daemonset-sa"
- "storageos-statefulset-sa"
- name: role
command:
- "role"
- "storageos:key-management"
- name: rolebinding
command:
- "rolebinding"
- "storageos:key-management"
- name: secret
command:
- "secret"
- "init-secret"
- name: service
command:
- "service"
- "storageos"
- name: clusterrole
command:
- "clusterrole"
- "storageos:driver-registrar"
- "storageos:csi-attacher"
- "storageos:csi-provisioner"
- "storageos:pod-fencer"
- "storageos:scheduler-extender"
- name: clusterrolebinding
command:
- "clusterrolebinding"
- "storageos:csi-provisioner"
- "storageos:csi-attacher"
- "storageos:driver-registrar"
- "storageos:k8s-driver-registrar"
- "storageos:pod-fencer"
- "storageos:scheduler-extender"
- name: storageclass
command:
- "storageclass"
- "fast"
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment