Commit f8057dbb by Frank Mai Committed by Craig Jellick

Sync up the root values.yaml with sub-charts

parent 896ade91
enabledRBAC: true enabledRBAC: true
## Already exist ServiceAccount
##
serviceAccountName: ""
## CRD apiGroup
##
apiGroup: "monitoring.coreos.com"
## The name of a secret in the same kubernetes namespace which contains the Alertmanager config ## The name of a secret in the same kubernetes namespace which contains the Alertmanager config
## If defined this will be used instead of the `config` block values. ## If defined this will be used instead of the `config` block values.
## The name of the secret must be alertmanager-{{ .Release.Name }} and its data must contain, at least, a key called `alertmanager.yaml` ## The name of the secret must be alertmanager-{{ .Release.Name }} and its data must contain, at least, a key called `alertmanager.yaml`
...@@ -15,27 +7,6 @@ apiGroup: "monitoring.coreos.com" ...@@ -15,27 +7,6 @@ apiGroup: "monitoring.coreos.com"
## ##
configFromSecret: "" configFromSecret: ""
## Alertmanager configuration directives
## Ref: https://prometheus.io/docs/alerting/configuration/
##
config: {}
#
# An example config:
# global:
# resolve_timeout: 5m
# route:
# group_by: ['job']
# group_wait: 30s
# group_interval: 5m
# repeat_interval: 12h
# receiver: 'null'
# routes:
# - match:
# alertname: DeadMansSwitch
# receiver: 'null'
# receivers:
# - name: 'null'
## Alertmanager template files to include ## Alertmanager template files to include
# #
templates: {} templates: {}
...@@ -56,20 +27,6 @@ templates: {} ...@@ -56,20 +27,6 @@ templates: {}
# {{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}` # {{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
# {{ end }} # {{ end }}
## Alertmanager container image
##
image:
repository: quay.io/prometheus/alertmanager
tag: v0.15.2
proxy:
repository: nginx
tag: 1.15.8-alpine
## Node labels for Alertmanager pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelectors: []
## Tolerations for use with node taints ## Tolerations for use with node taints
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
## ##
...@@ -87,25 +44,6 @@ paused: false ...@@ -87,25 +44,6 @@ paused: false
## ##
replicaCount: 1 replicaCount: 1
## Resource limits & requests
## Ref: https://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
core:
limits:
memory: 500Mi
cpu: 1000m
requests:
memory: 100Mi
cpu: 100m
proxy:
limits:
memory: 100Mi
cpu: 100m
requests:
memory: 50Mi
cpu: 50m
## How long to retain metrics ## How long to retain metrics
## ##
retention: 24h retention: 24h
...@@ -129,17 +67,6 @@ storageSpec: {} ...@@ -129,17 +67,6 @@ storageSpec: {}
# storage: 50Gi # storage: 50Gi
# selector: {} # selector: {}
## Easy way to create persistent data
##
persistence: {}
# enabled: true
# storageClass: gluster
# accessMode: "ReadWriteOnce"
# size: 50Gi
sidecarsSpec: [] sidecarsSpec: []
# - name: sidecar # - name: sidecar
# image: registry/name:tag # image: registry/name:tag
securityContext: {}
## CRD apiGroup
##
apiGroup: "monitoring.coreos.com"
## Custom endpoints
##
endpoints: []
ports:
metrics:
scheme: http
name: metrics
port: 9153
protocol: TCP
serviceSelectorLabels:
k8s-app: kube-dns
## Skip verification until we have resolved why the certificate validation
## for the kubelet on API server nodes fail.
##
insecureSkipVerify: true
\ No newline at end of file
## CRD apiGroup
##
apiGroup: "monitoring.coreos.com"
## Custom endpoints
##
endpoints: []
ports:
metrics:
scheme: http
name: metrics
port: 10252
protocol: TCP
serviceSelectorLabels:
k8s-app: kube-controller-manager
## Skip verification until we have resolved why the certificate validation
## for the kubelet on API server nodes fail.
##
insecureSkipVerify: true
## CRD apiGroup
##
apiGroup: "monitoring.coreos.com"
## Custom endpoints
##
endpoints: []
ports:
metrics:
dnsmasq:
scheme: http
name: dnsmasq-metrics
port: 10054
protocol: TCP
skydns:
scheme: http
name: skydns-metrics
port: 10055
protocol: TCP
serviceSelectorLabels:
k8s-app: kube-dns
## Skip verification until we have resolved why the certificate validation
## for the kubelet on API server nodes fail.
##
insecureSkipVerify: true
## CRD apiGroup
##
apiGroup: "monitoring.coreos.com"
## Custom endpoints
##
endpoints: []
ports:
metrics:
scheme: https
name: metrics
port: 4001
protocol: TCP
serviceSelectorLabels:
k8s-app: etcd-server
## Skip verification until we have resolved why the certificate validation
## for the kubelet on API server nodes fail.
##
insecureSkipVerify: true
## TLS Cofiguration for the service monitor, default to none, but append cert and keyfile if passed
##
caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
certFile: ""
keyFile: ""
\ No newline at end of file
## CRD apiGroup
##
apiGroup: "monitoring.coreos.com"
## Custom endpoints
##
endpoints: []
ports:
metrics:
scheme: http
name: http-metrics
port: 10251
protocol: TCP
serviceSelectorLabels:
component: kube-scheduler
## Skip verification until we have resolved why the certificate validation
## for the kubelet on API server nodes fail.
##
insecureSkipVerify: true
enabledRBAC: true enabledRBAC: true
## Already exist ServiceAccount
##
serviceAccountName: ""
## CRD apiGroup
##
apiGroup: "monitoring.coreos.com"
# Default values for kube-state-metrics.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: quay.io/coreos/kube-state-metrics
tag: v1.4.0
resources:
limits:
cpu: 100m
memory: 200Mi
requests:
cpu: 100m
memory: 130Mi
## Node Selector to constrain pods to run on particular nodes
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
##
nodeSelectors: []
## Tolerations for use with node taints ## Tolerations for use with node taints
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
## ##
...@@ -38,5 +8,3 @@ tolerations: [] ...@@ -38,5 +8,3 @@ tolerations: []
# operator: "Equal" # operator: "Equal"
# value: "value" # value: "value"
# effect: "NoSchedule" # effect: "NoSchedule"
securityContext: {}
## CRD apiGroup
##
apiGroup: "monitoring.coreos.com"
serviceSelectorLabels:
k8s-app: kubelet
## Skip verification until we have resolved why the certificate validation
## for the kubelet on API server nodes fail.
##
insecureSkipVerify: true
## Set false when using GKE
##
https: true
## CRD apiGroup
##
apiGroup: "monitoring.coreos.com"
ports:
metrics:
scheme: https
name: https
serviceSelectorLabels:
component: apiserver
provider: kubernetes
insecureSkipVerify: true
enabledRBAC: true enabledRBAC: true
## Already exist ServiceAccount
##
serviceAccountName: ""
## CRD apiGroup
##
apiGroup: "monitoring.coreos.com"
## Expertor listens on where and exports on host
##
ports:
metrics:
port: 9100
enabledHostNetwork: true
enabledHostPID: true
image:
repository: quay.io/prometheus/node-exporter
tag: v0.17.0
resources:
limits:
cpu: 200m
memory: 50Mi
requests:
cpu: 100m
memory: 30Mi
## Node Selector to constrain pods to run on particular nodes
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
##
nodeSelectors: []
collectors: {}
## ##
# Default collector settings # Default collector settings
## ##
......
level: cluster
enabledRBAC: true enabledRBAC: true
## Already exist ServiceAccount
##
serviceAccountName: ""
## CRD apiGroup
##
apiGroup: "monitoring.coreos.com"
## Node labels for Grafana pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelectors: []
## Tolerations for use with node taints ## Tolerations for use with node taints
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
## ##
...@@ -33,18 +18,6 @@ tolerations: [] ...@@ -33,18 +18,6 @@ tolerations: []
# value: extra-var-value-2 # value: extra-var-value-2
extraVars: extraVars:
## Grafana Docker image
##
image:
repository: grafana/grafana
tag: 5.3.0
tool:
repository: rancher/prometheus-auth
tag: v0.1.0
proxy:
repository: nginx
tag: 1.15.8-alpine
storageSpec: {} storageSpec: {}
# storageClassName: default # storageClassName: default
# accessModes: # accessModes:
...@@ -54,14 +27,6 @@ storageSpec: {} ...@@ -54,14 +27,6 @@ storageSpec: {}
# storage: 2Gi # storage: 2Gi
# selector: {} # selector: {}
## Easy way to create persistent data
##
persistence: {}
# enabled: true
# storageClass: gluster
# accessMode: "ReadWriteOnce"
# size: 50Gi
## Resource limits & requests ## Resource limits & requests
## Ref: https://kubernetes.io/docs/user-guide/compute-resources/ ## Ref: https://kubernetes.io/docs/user-guide/compute-resources/
resources: resources:
...@@ -86,5 +51,3 @@ resources: ...@@ -86,5 +51,3 @@ resources:
requests: requests:
memory: 50Mi memory: 50Mi
cpu: 50m cpu: 50m
prometheusDatasourceURL: ""
level: cluster
enabledRBAC: true enabledRBAC: true
## Already exist ServiceAccount
##
serviceAccountName: ""
serviceAccountNameOverride: ""
## CRD apiGroup
##
apiGroup: "monitoring.coreos.com"
## Alertmanagers to which alerts will be sent
## Ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#alertmanagerendpoints
##
alertingEndpoints: []
# - name: ""
# namespace: ""
# port: http
# scheme: http
## External labels to add to any time series or alerts when communicating with external systems ## External labels to add to any time series or alerts when communicating with external systems
## ##
externalLabels: {} externalLabels: {}
...@@ -29,39 +8,6 @@ externalLabels: {} ...@@ -29,39 +8,6 @@ externalLabels: {}
## ##
additionalRulesLabels: {} additionalRulesLabels: {}
## Prometheus container image
##
image:
repository: quay.io/prometheus/prometheus
tag: v2.4.3
auth:
repository: rancher/prometheus-auth
tag: v0.2.0
proxy:
repository: nginx
tag: 1.15.8-alpine
auth:
args:
- --proxy-url
- http://localhost:9090
- --listen-address
- $(POD_IP):9090
- --filter-reader-labels
- prometheus
- --filter-reader-labels
- prometheus_replica
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
## Node labels for Prometheus pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelectors: []
## Tolerations for use with node taints ## Tolerations for use with node taints
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
## ##
...@@ -71,12 +17,10 @@ tolerations: [] ...@@ -71,12 +17,10 @@ tolerations: []
# value: "value" # value: "value"
# effect: "NoSchedule" # effect: "NoSchedule"
## If true, the Operator won't process any Prometheus configuration changes ## If true, the Operator won't process any Prometheus configuration changes
## ##
paused: false paused: false
## Number of Prometheus replicas desired ## Number of Prometheus replicas desired
## ##
replicaCount: 1 replicaCount: 1
...@@ -91,83 +35,10 @@ remoteRead: [] ...@@ -91,83 +35,10 @@ remoteRead: []
remoteWrite: [] remoteWrite: []
# - url: http://remote1/push # - url: http://remote1/push
## Resource limits & requests
## Ref: https://kubernetes.io/docs/user-guide/compute-resources/
resources:
inits:
limits:
memory: 50Mi
cpu: 50m
requests:
memory: 50Mi
cpu: 50m
core:
limits:
memory: 500Mi
cpu: 1000m
requests:
memory: 100Mi
cpu: 100m
proxy:
limits:
memory: 100Mi
cpu: 100m
requests:
memory: 50Mi
cpu: 50m
auth:
limits:
memory: 200Mi
cpu: 500m
requests:
memory: 100Mi
cpu: 100m
## How long to retain metrics ## How long to retain metrics
## ##
retention: 24h retention: 24h
## Namespaces to be selected for PrometheusRules discovery.
## If unspecified, only the same namespace as the Prometheus object is in is used.
ruleNamespaceSelector: {}
## Rules CRD selector
## Ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/design.md
##
## 1. If `matchLabels` is used, `PrometheusRules` must contain all the labels from
## `matchLabels` in order to be be matched by Prometheus
## 2. If `matchExpressions` is used `PrometheusRules` must contain at least one label
## from `matchExpressions` in order to be matched by Prometheus
## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels
ruleSelector: {}
# ruleSelector: {
# matchExpressions: [{key: prometheus, operator: In, values: [example-rules, example-rules-2]}]
# }
### OR
# ruleSelector: {
# matchLabels: {role: example-rules}
# }
## List of Secrets in the same namespace as the Prometheus
## object, which shall be mounted into the Prometheus Pods.
## Ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
##
secrets: []
## Namespaces to be selected for ServiceMonitors discovery.
## If unspecified, only the same namespace as the Prometheus object is in is used.
serviceMonitorNamespaceSelector: {}
## ServiceMonitor CRD selector
## Ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/design.md
##
## 1. If `matchLabels` is used, `ServiceMonitors` must contain all the labels from
## `matchLabels` in order to be be matched by Prometheus
## 2. If `matchExpressions` is used `ServiceMonitors` must contain at least one label
## from `matchExpressions` in order to be matched by Prometheus
## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels
serviceMonitorSelector: {}
logLevel: "info" logLevel: "info"
## Prometheus StorageSpec for persistent data ## Prometheus StorageSpec for persistent data
...@@ -181,14 +52,6 @@ storageSpec: {} ...@@ -181,14 +52,6 @@ storageSpec: {}
# storage: 50Gi # storage: 50Gi
# selector: {} # selector: {}
## Easy way to create persistent data
##
persistence: {}
# enabled: true
# storageClass: gluster
# accessMode: "ReadWriteOnce"
# size: 50Gi
## Prometheus AdditionalScrapeConfigs ## Prometheus AdditionalScrapeConfigs
## Ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec ## Ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
## ##
...@@ -209,7 +72,3 @@ additionalAlertManagerConfigs: [] ...@@ -209,7 +72,3 @@ additionalAlertManagerConfigs: []
sidecarsSpec: [] sidecarsSpec: []
# - name: sidecar # - name: sidecar
# image: registry/name:tag # image: registry/name:tag
additionalBindingClusterRoles: []
securityContext: {}
enabled: false enabled: false
nameOverride: "prometheus-operator" nameOverride: "prometheus-operator"
enabledRBAC: true enabledRBAC: true
## CRD apiGroup
##
apiGroup: "monitoring.coreos.com" apiGroup: "monitoring.coreos.com"
## Prometheus-operator image
##
image: image:
repository: quay.io/coreos/prometheus-operator repository: quay.io/coreos/prometheus-operator
tag: v0.26.0 tag: v0.26.0
## Prometheus-config-reloader image to use for config and rule reloading
##
prometheusConfigReloader: prometheusConfigReloader:
repository: quay.io/coreos/prometheus-config-reloader repository: quay.io/coreos/prometheus-config-reloader
tag: v0.26.0 tag: v0.26.0
## Configmap-reload image to use for reloading configmaps
##
configmapReload: configmapReload:
repository: quay.io/coreos/configmap-reload repository: quay.io/coreos/configmap-reload
tag: v0.0.1 tag: v0.0.1
## Node labels for prometheus-operator pod assignment
##
nodeSelectors: []
## Tolerations for use with node taints
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
logFormat: "logfmt"
logLevel: "info"
manageCRDs: false
withValidation: true
## Prometheus-operator resource limits & requests
## Ref: https://kubernetes.io/docs/user-guide/compute-resources/
##
resources: resources:
limits: limits:
cpu: 200m cpu: 200m
...@@ -51,7 +18,12 @@ resources: ...@@ -51,7 +18,12 @@ resources:
requests: requests:
cpu: 100m cpu: 100m
memory: 50Mi memory: 50Mi
nodeSelectors: []
tolerations: []
logFormat: "logfmt"
logLevel: "info"
manageCRDs: false
withValidation: true
## Already exist ServiceAccount ## Already exist ServiceAccount
## ##
serviceAccountName: "" serviceAccountName: ""
...@@ -194,6 +166,8 @@ exporter-node: ...@@ -194,6 +166,8 @@ exporter-node:
collectors: {} collectors: {}
enabledHostNetwork: true enabledHostNetwork: true
enabledHostPID: true enabledHostPID: true
## Already exist ServiceAccount
##
serviceAccountName: "" serviceAccountName: ""
exporter-kube-state: exporter-kube-state:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment