deploy-templates/prometheus-postgres-exporter/values.yaml (262 lines of code) (raw):
# Default values for prometheus-postgres-exporter.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: quay.io/prometheuscommunity/postgres-exporter
tag: v0.10.0
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistrKeySecretName
service:
type: ClusterIP
port: 80
targetPort: 9187
name: http
labels: {}
annotations: {}
serviceMonitor:
# When set true then use a ServiceMonitor to configure scraping
enabled: true
# Set the namespace the ServiceMonitor should be deployed
namespace: openshift-monitoring
# Set how frequently Prometheus should scrape
# interval: 30s
# Set path to cloudwatch-exporter telemtery-path
# telemetryPath: /metrics
# Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator
# labels:
# Set timeout for scrape
# timeout: 10s
# Set of labels to transfer from the Kubernetes Service onto the target
# targetLabels: []
# MetricRelabelConfigs to apply to samples before ingestion
# metricRelabelings: []
prometheusRule:
enabled: false
additionalLabels: {}
namespace: ""
rules: []
## These are just examples rules, please adapt them to your needs.
## Make sure to constraint the rules to the current prometheus-postgres-exporter service.
# - alert: HugeReplicationLag
# expr: pg_replication_lag{service="{{ template "prometheus-postgres-exporter.fullname" . }}"} / 3600 > 1
# for: 1m
# labels:
# severity: critical
# annotations:
# description: replication for {{ template "prometheus-postgres-exporter.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s).
# summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s).
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
rbac:
# Specifies whether RBAC resources should be created
create: true
# Specifies whether a PodSecurityPolicy should be created
pspEnabled: false
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
# Add annotations to the ServiceAccount, useful for EKS IAM Roles for Service Accounts or Google Workload Identity.
annotations: {}
securityContext: {}
# The securityContext this Pod should use. See https://kubernetes.io/docs/concepts/policy/security-context/ for more.
# runAsUser: 65534
config:
datasource:
enabled: true
# Specify one of both datasource or datasourceSecret
host: keycloak-postgresql
user: postgres
# Only one of password and passwordSecret can be specified
# password:
# Specify passwordSecret if DB password is stored in secret.
passwordSecret:
# Secret name
name: keycloak-postgresql
# Password key inside secret
key: postgresql-password
port: "5432"
database: ''
sslmode: disable
datasourceSecret: {}
# Specifies if datasource should be sourced from secret value in format: postgresql://login:password@hostname:port/dbname?sslmode=disable
# Multiple Postgres databases can be configured by comma separated postgres connection strings
# Secret name
# name:
# Connection string key inside secret
# key:
disableDefaultMetrics: false
disableSettingsMetrics: false
autoDiscoverDatabases: false
excludeDatabases: []
includeDatabases: []
constantLabels: {}
# possible values debug, info, warn, error, fatal
logLevel: ""
# These are the default queries that the exporter will run, extracted from: https://github.com/prometheus-community/postgres_exporter/blob/master/queries.yaml
queries: |-
pg_replication:
query: "SELECT CASE WHEN NOT pg_is_in_recovery() THEN 0 ELSE GREATEST (0, EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp()))) END AS lag"
master: true
metrics:
- lag:
usage: "GAUGE"
description: "Replication lag behind master in seconds"
pg_postmaster:
query: "SELECT pg_postmaster_start_time as start_time_seconds from pg_postmaster_start_time()"
master: true
metrics:
- start_time_seconds:
usage: "GAUGE"
description: "Time at which postmaster started"
pg_stat_user_tables:
query: |
SELECT
current_database() datname,
schemaname,
relname,
seq_scan,
seq_tup_read,
idx_scan,
idx_tup_fetch,
n_tup_ins,
n_tup_upd,
n_tup_del,
n_tup_hot_upd,
n_live_tup,
n_dead_tup,
n_mod_since_analyze,
COALESCE(last_vacuum, '1970-01-01Z') as last_vacuum,
COALESCE(last_autovacuum, '1970-01-01Z') as last_autovacuum,
COALESCE(last_analyze, '1970-01-01Z') as last_analyze,
COALESCE(last_autoanalyze, '1970-01-01Z') as last_autoanalyze,
vacuum_count,
autovacuum_count,
analyze_count,
autoanalyze_count
FROM
pg_stat_user_tables
metrics:
- datname:
usage: "LABEL"
description: "Name of current database"
- schemaname:
usage: "LABEL"
description: "Name of the schema that this table is in"
- relname:
usage: "LABEL"
description: "Name of this table"
- seq_scan:
usage: "COUNTER"
description: "Number of sequential scans initiated on this table"
- seq_tup_read:
usage: "COUNTER"
description: "Number of live rows fetched by sequential scans"
- idx_scan:
usage: "COUNTER"
description: "Number of index scans initiated on this table"
- idx_tup_fetch:
usage: "COUNTER"
description: "Number of live rows fetched by index scans"
- n_tup_ins:
usage: "COUNTER"
description: "Number of rows inserted"
- n_tup_upd:
usage: "COUNTER"
description: "Number of rows updated"
- n_tup_del:
usage: "COUNTER"
description: "Number of rows deleted"
- n_tup_hot_upd:
usage: "COUNTER"
description: "Number of rows HOT updated (i.e., with no separate index update required)"
- n_live_tup:
usage: "GAUGE"
description: "Estimated number of live rows"
- n_dead_tup:
usage: "GAUGE"
description: "Estimated number of dead rows"
- n_mod_since_analyze:
usage: "GAUGE"
description: "Estimated number of rows changed since last analyze"
- last_vacuum:
usage: "GAUGE"
description: "Last time at which this table was manually vacuumed (not counting VACUUM FULL)"
- last_autovacuum:
usage: "GAUGE"
description: "Last time at which this table was vacuumed by the autovacuum daemon"
- last_analyze:
usage: "GAUGE"
description: "Last time at which this table was manually analyzed"
- last_autoanalyze:
usage: "GAUGE"
description: "Last time at which this table was analyzed by the autovacuum daemon"
- vacuum_count:
usage: "COUNTER"
description: "Number of times this table has been manually vacuumed (not counting VACUUM FULL)"
- autovacuum_count:
usage: "COUNTER"
description: "Number of times this table has been vacuumed by the autovacuum daemon"
- analyze_count:
usage: "COUNTER"
description: "Number of times this table has been manually analyzed"
- autoanalyze_count:
usage: "COUNTER"
description: "Number of times this table has been analyzed by the autovacuum daemon"
pg_statio_user_tables:
query: "SELECT current_database() datname, schemaname, relname, heap_blks_read, heap_blks_hit, idx_blks_read, idx_blks_hit, toast_blks_read, toast_blks_hit, tidx_blks_read, tidx_blks_hit FROM pg_statio_user_tables"
metrics:
- datname:
usage: "LABEL"
description: "Name of current database"
- schemaname:
usage: "LABEL"
description: "Name of the schema that this table is in"
- relname:
usage: "LABEL"
description: "Name of this table"
- heap_blks_read:
usage: "COUNTER"
description: "Number of disk blocks read from this table"
- heap_blks_hit:
usage: "COUNTER"
description: "Number of buffer hits in this table"
- idx_blks_read:
usage: "COUNTER"
description: "Number of disk blocks read from all indexes on this table"
- idx_blks_hit:
usage: "COUNTER"
description: "Number of buffer hits in all indexes on this table"
- toast_blks_read:
usage: "COUNTER"
description: "Number of disk blocks read from this table's TOAST table (if any)"
- toast_blks_hit:
usage: "COUNTER"
description: "Number of buffer hits in this table's TOAST table (if any)"
- tidx_blks_read:
usage: "COUNTER"
description: "Number of disk blocks read from this table's TOAST table indexes (if any)"
- tidx_blks_hit:
usage: "COUNTER"
description: "Number of buffer hits in this table's TOAST table indexes (if any)"
pg_database:
query: "SELECT pg_database.datname, pg_database_size(pg_database.datname) as size_bytes FROM pg_database"
master: true
cache_seconds: 30
metrics:
- datname:
usage: "LABEL"
description: "Name of the database"
- size_bytes:
usage: "GAUGE"
description: "Disk space used by the database"
pg_stat_activity_idle:
query: |
WITH
metrics AS (
SELECT
application_name,
SUM(EXTRACT(EPOCH FROM (CURRENT_TIMESTAMP - state_change))::bigint)::float AS process_seconds_sum,
COUNT(*) AS process_seconds_count
FROM pg_stat_activity
WHERE state = 'idle'
GROUP BY application_name
),
buckets AS (
SELECT
application_name,
le,
SUM(
CASE WHEN EXTRACT(EPOCH FROM (CURRENT_TIMESTAMP - state_change)) <= le
THEN 1
ELSE 0
END
)::bigint AS bucket
FROM
pg_stat_activity,
UNNEST(ARRAY[1, 2, 5, 15, 30, 60, 90, 120, 300]) AS le
GROUP BY application_name, le
ORDER BY application_name, le
)
SELECT
application_name,
process_seconds_sum,
process_seconds_count,
ARRAY_AGG(le) AS process_seconds,
ARRAY_AGG(bucket) AS process_seconds_bucket
FROM metrics JOIN buckets USING (application_name)
GROUP BY 1, 2, 3
metrics:
- application_name:
usage: "LABEL"
description: "Application Name"
- process_seconds:
usage: "HISTOGRAM"
description: "Idle time of server processes"
nodeSelector: {}
tolerations: []
affinity: {}
annotations: {}
podLabels: {}
# Configurable health checks
livenessProbe:
initialDelaySeconds: 0
timeoutSeconds: 1
readinessProbe:
initialDelaySeconds: 0
timeoutSeconds: 1
# Init containers, e. g. for secrets creation before the exporter
initContainers: []
# - name:
# image:
# volumeMounts:
# - name: creds
# mountPath: /creds
# Additional sidecar containers, e. g. for a database proxy, such as Google's cloudsql-proxy
extraContainers: []
# Additional volumes, e. g. for secrets used in an extraContainer
extraVolumes: []
# Uncomment for mounting custom ca-certificates
# - name: ssl-certs
# secret:
# defaultMode: 420
# items:
# - key: ca-certificates.crt
# path: ca-certificates.crt
# secretName: ssl-certs
# Additional volume mounts
extraVolumeMounts: []
# Uncomment for mounting custom ca-certificates file into container
# - name: ssl-certs
# mountPath: /etc/ssl/certs/ca-certificates.crt
# subPath: ca-certificates.crt