-
-
Notifications
You must be signed in to change notification settings - Fork 0
feat: bigg #2070
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Draft
buroa
wants to merge
5
commits into
main
Choose a base branch
from
buroa/bigg
base: main
Could not load branches
Branch not found: {{ refName }}
Loading
Could not load tags
Nothing to show
Loading
Are you sure you want to change the base?
Some commits from the old base branch may be removed from the timeline,
and old review comments may become outdated.
Draft
feat: bigg #2070
Conversation
This file contains hidden or bidirectio
10BC0
nal Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
--- kubernetes/apps/kube-system/coredns/app Kustomization: kube-system/coredns HelmRelease: kube-system/coredns
+++ kubernetes/apps/kube-system/coredns/app Kustomization: kube-system/coredns HelmRelease: kube-system/coredns
@@ -17,10 +17,62 @@
retries: -1
interval: 1h
upgrade:
cleanupOnFail: true
remediation:
retries: 3
- valuesFrom:
- - kind: ConfigMap
- name: coredns-values-g9f489mcmc
+ values:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/control-plane
+ operator: Exists
+ fullnameOverride: coredns
+ image:
+ repository: mirror.gcr.io/coredns/coredns
+ k8sAppLabelOverride: kube-dns
+ replicaCount: 2
+ servers:
+ - plugins:
+ - name: errors
+ - configBlock: lameduck 5s
+ name: health
+ - name: ready
+ - configBlock: |-
+ pods verified
+ fallthrough in-addr.arpa ip6.arpa
+ name: kubernetes
+ parameters: cluster.local in-addr.arpa ip6.arpa
+ - name: autopath
+ parameters: '@kubernetes'
+ - name: forward
+ parameters: . /etc/resolv.conf
+ - configBlock: |-
+ prefetch 20
+ serve_stale
+ name: cache
+ - name: loop
+ - name: reload
+ - name: loadbalance
+ - name: prometheus
+ parameters: 0.0.0.0:9153
+ - configBlock: class error
+ name: log
+ port: 53
+ zones:
+ - scheme: dns://
+ use_tcp: true
+ zone: .
+ service:
+ clusterIP: 10.245.0.10
+ name: kube-dns
+ serviceAccount:
+ create: true
+ tolerations:
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ operator: Exists
--- kubernetes/apps/kube-system/coredns/app Kustomization: kube-system/coredns ConfigMap: kube-system/coredns-values-g9f489mcmc
+++ kubernetes/apps/kube-system/coredns/app Kustomization: kube-system/coredns ConfigMap: kube-system/coredns-values-g9f489mcmc
@@ -1,70 +0,0 @@
----
-apiVersion: v1
-data:
- values.yaml: |
- ---
- fullnameOverride: coredns
- image:
- repository: mirror.gcr.io/coredns/coredns
- replicaCount: 2
- k8sAppLabelOverride: kube-dns
- serviceAccount:
- create: true
- service:
- name: kube-dns
- clusterIP: 10.245.0.10
- servers:
- - zones:
- - zone: .
- scheme: dns://
- use_tcp: true
- port: 53
- plugins:
- - name: errors
- - name: health
- configBlock: |-
- lameduck 5s
- - name: ready
- - name: kubernetes
- parameters: cluster.local in-addr.arpa ip6.arpa
- configBlock: |-
- pods verified
- fallthrough in-addr.arpa ip6.arpa
- - nam
10BC0
e: autopath
- parameters: "@kubernetes"
- - name: forward
- parameters: . /etc/resolv.conf
- - name: cache
- configBlock: |-
- prefetch 20
- serve_stale
- - name: loop
- - name: reload
- - name: loadbalance
- - name: prometheus
- parameters: 0.0.0.0:9153
- - name: log
- configBlock: |-
- class error
- affinity:
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- nodeSelectorTerms:
- - matchExpressions:
- - key: node-role.kubernetes.io/control-plane
- operator: Exists
- tolerations:
- - key: CriticalAddonsOnly
- operator: Exists
- - key: node-role.kubernetes.io/control-plane
- operator: Exists
- effect: NoSchedule
-kind: ConfigMap
-metadata:
- labels:
- app.kubernetes.io/name: coredns
- kustomize.toolkit.fluxcd.io/name: coredns
- kustomize.toolkit.fluxcd.io/namespace: kube-system
- name: coredns-values-g9f489mcmc
- namespace: kube-system
-
--- kubernetes/apps/networking/echo-server/app Kustomization: networking/echo-server HelmRelease: networking/echo-server
+++ kubernetes/apps/networking/echo-server/app Kustomization: networking/echo-server HelmRelease: networking/echo-server
@@ -24,51 +24,52 @@
values:
controllers:
echo-server:
containers:
app:
env:
+ HTTP_PORT: 80
LOG_IGNORE_PATH: /healthz
LOG_WITHOUT_NEWLINE: true
- PORT: 8080
PROMETHEUS_ENABLED: true
image:
repository: ghcr.io/mendhak/http-https-echo
tag: 37@sha256:f55000d9196bd3c853d384af7315f509d21ffb85de315c26e9874033b9f83e15
probes:
liveness:
custom: true
enabled: true
spec:
failureThreshold: 3
httpGet:
path: /healthz
- port: 8080
+ port: 80
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
readiness:
custom: true
enabled: true
spec:
failureThreshold: 3
httpGet:
path: /healthz
- port: 8080
+ port: 80
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
replicas: 2
strategy: RollingUpdate
defaultPodOptions:
+ hostUsers: false
securityContext:
runAsGroup: 568
runAsNonRoot: true
runAsUser: 568
route:
app:
@@ -79,12 +80,12 @@
namespace: kube-system
sectionName: https
service:
app:
ports:
http:
- port: 8080
+ port: 80
serviceMonitor:
app:
endpoints:
- port: http
--- kubernetes/apps/observability/keda/app Kustomization: observability/keda HelmRelease: observability/keda
+++ kubernetes/apps/observability/keda/app Kustomization: observability/keda HelmRelease: observability/keda
@@ -10,18 +10,16 @@
namespace: observability
spec:
chartRef:
kind: OCIRepository
name: keda
install:
- crds: CreateReplace
remediation:
retries: -1
interval: 1h
upgrade:
cleanupOnFail: true
- crds: CreateReplace
remediation:
retries: 3
values:
enableServiceLinks: false
--- kubernetes/apps/cert-manager/cert-manager/app Kustomization: cert-manager/cert-manager HelmRelease: cert-manager/cert-manager
+++ kubernetes/apps/cert-manager/cert-manager/app Kustomization: cert-manager/cert-manager HelmRelease: cert-manager/cert-manager
@@ -17,10 +17,17 @@
retries: -1
interval: 1h
upgrade:
cleanupOnFail: true
remediation:
retries: 3
- valuesFrom:
- - kind: ConfigMap
- name: cert-manager-values-hgg6hf7kh2
+ values:
+ crds:
+ enabled: true
+ dns01RecursiveNameservers: https://1.1.1.1:443/dns-query,https://1.0.0.1:443/dns-query
+ dns01RecursiveNameserversOnly: true
+ prometheus:
+ enabled: true
+ servicemonitor:
+ enabled: true
+ replicaCount: 1
--- kubernetes/apps/cert-manager/cert-manager/app Kustomization: cert-manager/cert-manager ConfigMap: cert-manager/cert-manager-values-hgg6hf7kh2
+++ kubernetes/apps/cert-manager/cert-manager/app Kustomization: cert-manager/cert-manager ConfigMap: cert-manager/cert-manager-values-hgg6hf7kh2
@@ -1,23 +0,0 @@
----
-apiVersion: v1
-data:
- values.yaml: |
- ---
- crds:
- enabled: true
- replicaCount: 1
- dns01RecursiveNameservers: https://1.1.1.1:443/dns-query,https://1.0.0.1:443/dns-query
- dns01RecursiveNameserversOnly: true
- prometheus:
- enabled: true
- servicemonitor:
- enabled: true
-kind: ConfigMap
-metadata:
- labels:
- app.kubernetes.io/name: cert-manager
- kustomize.toolkit.fluxcd.io/name: cert-manager
- kustomize.toolkit.fluxcd.io/namespace: cert-manager
- name: cert-manager-values-hgg6hf7kh2
- namespace: cert-manager
-
--- kubernetes/apps/kube-system/cilium/app Kustomization: kube-system/cilium HelmRelease: kube-system/cilium
+++ kubernetes/apps/kube-system/cilium/app Kustomization: kube-system/cilium HelmRelease: kube-system/cilium
@@ -17,10 +17,89 @@
retries: -1
interval: 1h
upgrade:
cleanupOnFail: true
remediation:
retries: 3
- valuesFrom:
- - kind: ConfigMap
- name: cilium-values-d62dd94ccf
+ values:
+ autoDirectNodeRoutes: true
+ bandwidthManager:
+ bbr: true
+ enabled: true
+ bgpControlPlane:
+ enabled: true
+ bpf:
+ datapathMode: netkit
+ masquerade: true
+ preallocateMaps: true
+ cgroup:
+ automount:
+ enabled: false
+ hostRoot: /sys/fs/cgroup
+ cluster:
+ id: 1
+ name: main
+ cni:
+ exclusive: false
+ dashboards:
+ enabled: true
+ enableIPv4BIGTCP: true
+ endpointRoutes:
+ enabled: true
+ envoy:
+ rollOutPods: true
+ gatewayAPI:
+ enableAlpn: true
+ enabled: true
+ xffNumTrustedHops: 1
+ hubble:
+ enabled: false
+ ipam:
+ mode: kubernetes
+ ipv4NativeRoutingCIDR: 10.244.0.0/16
+ k8sServiceHost: 127.0.0.1
+ k8sServicePort: 7445
+ kubeProxyReplacement: true
+ kubeProxyReplacementHealthzBindAddr: 0.0.0.0:10256
+ l2announcements:
+ enabled: true
+ loadBalancer:
+ algorithm: maglev
+ mode: dsr
+ localRedirectPolicy: true
+ operator:
+ dashboards:
+ enabled: true
+ prometheus:
+ enabled: true
+ serviceMonitor:
+ enabled: true
+ replicas: 1
+ rollOutPods: true
+ prometheus:
+ enabled: true
+ serviceMonitor:
+ enabled: true
+ trustCRDsExist: true
+ rollOutCiliumPods: true
+ routingMode: native
+ securityContext:
+ capabilities:
+ ciliumAgent:
+ - CHOWN
+ - KILL
+ - NET_ADMIN
+ - NET_RAW
+ - IPC_LOCK
+ - SYS_ADMIN
+ - SYS_RESOURCE
+ - PERFMON
+ - BPF
+ - DAC_OVERRIDE
+ - FOWNER
+ - SETGID
+ - SETUID
+ cleanCiliumState:
+ - NET_ADMIN
+ - SYS_ADMIN
+ - SYS_RESOURCE
--- kubernetes/apps/kube-system/cilium/app Kustomization: kube-system/cilium ConfigMap: kube-system/cilium-values-d62dd94ccf
+++ kubernetes/apps/kube-system/cilium/app Kustomization: kube-system/cilium ConfigMap: kube-system/cilium-values-d62dd94ccf
@@ -1,96 +0,0 @@
----
-apiVersion: v1
-data:
- values.yaml: |
- ---
- autoDirectNodeRoutes: true
- bandwidthManager:
- enabled: true
- bbr: true
- bpf:
- datapathMode: netkit
- masquerade: true
- preallocateMaps: true
- # tproxy: true
- bgpControlPlane:
- enabled: true
- cgroup:
- automount:
- enabled: false
- hostRoot: /sys/fs/cgroup
- cluster:
- id: 1
- name: main
- cni:
- exclusive: false
- dashboards:
- enabled: true
- enableIPv4BIGTCP: true
- endpointRoutes:
- enabled: true
- envoy:
- rollOutPods: true
- gatewayAPI:
- enabled: true
- enableAlpn: true
- xffNumTrustedHops: 1
- hubble:
- enabled: false
- ipam:
- mode: kubernetes
- ipv4NativeRoutingCIDR: 10.244.0.0/16
- k8sServiceHost: 127.0.0.1
- k8sServicePort: 7445
- kubeProxyReplacement: true
- kubeProxyReplacementHealthzBindAddr: 0.0.0.0:10256
- l2announcements:
- enabled: true
- loadBalancer:
- algorithm: maglev
- mode: dsr
- localRedirectPolicy: true
- operator:
- replicas: 1
- rollOutPods: true
- prometheus:
- enabled: true
- serviceMonitor:
- enabled: true
- dashboards:
- enabled: true
- prometheus:
- enabled: true
- serviceMonitor:
- enabled: true
- trustCRDsExist: true
- rollOutCiliumPods: true
- routingMode: native
- securityContext:
- capabilities:
- ciliumAgent:
- - CHOWN
- - KILL
- - NET_ADMIN
- - NET_RAW
- - IPC_LOCK
- - SYS_ADMIN
- - SYS_RESOURCE
- - PERFMON
- - BPF
- - DAC_OVERRIDE
- - FOWNER
- - SETGID
- - SETUID
- cleanCiliumState:
- - NET_ADMIN
- - SYS_ADMIN
- - SYS_RESOURCE
-kind: ConfigMap
-metadata:
- labels:
- app.kubernetes.io/name: cilium
- kustomize.toolkit.fluxcd.io/name: cilium
- kustomize.toolkit.fluxcd.io/namespace: kube-system
- name: cilium-values-d62dd94ccf
- namespace: kube-system
-
--- kubernetes/apps/observability/blackbox-exporter/app Kustomization: observability/blackbox-exporter HelmRelease: observability/blackbox-exporter
+++ kubernetes/apps/observability/blackbox-exporter/app Kustomization: observability/blackbox-exporter HelmRelease: observability/blackbox-exporter
@@ -23,27 +23,27 @@
values:
config:
modules:
http_2xx:
http:
follow_redirects: true
- preferred_ip_protocol: ipv4
+ preferred_ip_protocol: ip4
valid_http_versions:
- HTTP/1.1
- HTTP/2.0
prober: http
timeout: 5s
icmp:
icmp:
- preferred_ip_protocol: ipv4
+ preferred_ip_protocol: ip4
prober: icmp
timeout: 5s
tcp_connect:
prober: tcp
tcp:
- preferred_ip_protocol: ipv4
+ preferred_ip_protocol: ip4
timeout: 5s
fullnameOverride: blackbox-exporter
securityContext:
capabilities:
add:
- NET_RAW
--- kubernetes/apps/external-secrets/external-secrets/app Kustomization: external-secrets/external-secrets HelmRelease: external-secrets/external-secrets
+++ kubernetes/apps/external-secrets/external-secrets/app Kustomization: external-secrets/external-secrets HelmRelease: external-secrets/external-secrets
@@ -17,10 +17,27 @@
retries: -1
interval: 1h
upgrade:
cleanupOnFail: true
remediation:
retries: 3
- valuesFrom:
- - kind: ConfigMap
- name: external-secrets-values-h9g78hg67k
+ values:
+ certController:
+ image:
+ repository: ghcr.io/external-secrets/external-secrets
+ serviceMonitor:
+ enabled: true
+ interval: 1m
+ image:
+ repository: ghcr.io/external-secrets/external-secrets
+ leaderElect: true
+ replicaCount: 1
+ serviceMonitor:
+ enabled: true
+ interval: 1m
+ webhook:
+ image:
+ repository: ghcr.io/external-secrets/external-secrets
+ serviceMonitor:
+ enabled: true
+ interval: 1m
--- kubernetes/apps/external-secrets/external-secrets/app Kustomization: external-secrets/external-secrets ConfigMap: external-secrets/external-secrets-values-h9g78hg67k
+++ kubernetes/apps/external-secrets/external-secrets/app Kustomization: external-secrets/external-secrets ConfigMap: external-secrets/external-secrets-values-h9g78hg67k
@@ -1,34 +0,0 @@
----
-apiVersion: v1
-data:
- values.yaml: |
- ---
- installCRDs: true
- replicaCount: 1
- leaderElect: true
- image:
- repository: ghcr.io/external-secrets/external-secrets
- webhook:
- image:
- repository: ghcr.io/external-secrets/external-secrets
- serviceMonitor:
- enabled: true
- interval: 1m
- certController:
- image:
- repository: ghcr.io/external-secrets/external-secrets
- serviceMonitor:
- enabled: true
- interval: 1m
- serviceMonitor:
- enabled: true
- interval: 1m
-kind: ConfigMap
-metadata:
- labels:
- app.kubernetes.io/name: external-secrets
- kustomize.toolkit.fluxcd.io/name: external-secrets
- kustomize.toolkit.fluxcd.io/namespace: external-secrets
- name: external-secrets-values-h9g78hg67k
- namespace: external-secrets
-
--- kubernetes/apps/observability/karma/app Kustomization: observability/karma HelmRelease: observability/karma
+++ kubernetes/apps/observability/karma/app Kustomization: observability/karma HelmRelease: observability/karma
@@ -25,47 +25,47 @@
controllers:
karma:
containers:
app:
env:
ALERTMANAGER_URI: http://alertmanager-operated.observability.svc.cluster.local:9093
+ LISTEN_PORT: 80
image:
repository: ghcr.io/prymitive/karma
tag: v0.121@sha256:9f0ad820df1b1d0af562de3b3c545a52ddfce8d7492f434a2276e45f3a1f7e28
probes:
liveness:
custom: true
enabled: true
spec:
failureThreshold: 3
httpGet:
path: /health
- port: 8080
+ port: 80
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
readiness:
custom: true
enabled: true
spec:
failureThreshold: 3
httpGet:
path: /health
- port: 8080
+ port: 80
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
- replicas: 2
- strategy: RollingUpdate
defaultPodOptions:
+ hostUsers: false
securityContext:
runAsGroup: 568
runAsNonRoot: true
runAsUser: 568
route:
app:
@@ -76,12 +76,12 @@
namespace: kube-system
sectionName: https
service:
app:
ports:
http:
- port: 8080
+ port: 80
serviceMonitor:
app:
endpoints:
- port: http
--- kubernetes/apps/observability/kromgo/app Kustomization: observability/kromgo HelmRelease: observability/kromgo
+++ kubernetes/apps/observability/kromgo/app Kustomization: observability/kromgo HelmRelease: observability/kromgo
@@ -26,76 +26,81 @@
kromgo:
annotations:
reloader.stakater.com/auto: 'true'
containers:
app:
env:
- HEALTH_PORT: 8888
+ HEALTH_PORT: 8080
PROMETHEUS_URL: http://prometheus-operated.observability.svc.cluster.local:9090
- SERVER_PORT: 8080
+ SERVER_PORT: 80
image:
repository: ghcr.io/kashalls/kromgo
tag: v0.7.1@sha256:d8fca4ff9b696abc4ca019c76fa629c39e844e4d9435f4afac87a97b1eeae152
probes:
liveness:
custom: true
enabled: true
spec:
failureThreshold: 3
httpGet:
path: /-/ready
- port: 8888
+ port: 8080
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
readiness:
custom: true
enabled: true
spec:
failureThreshold: 3
httpGet:
path: /-/ready
- port: 8888
+ port: 8080
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
replicas: 2
strategy: RollingUpdate
defaultPodOptions:
+ hostUsers: false
securityContext:
runAsGroup: 568
runAsNonRoot: true
runAsUser: 568
persistence:
config:
globalMounts:
- path: /kromgo/config.yaml
readOnly: true
subPath: config.yaml
- name: kromgo-configmap
+ name: '{{ .Release.Name }}-configmap'
type: configMap
route:
app:
hostnames:
- '{{ .Release.Name }}.youmans.io'
parentRefs:
- name: external
namespace: kube-system
sectionName: https
+ rules:
+ - backendRefs:
+ - identifier: app
+ port: 80
service:
app:
ports:
health:
- port: 8888
+ port: 8080
http:
- port: 8080
+ port: 80
serviceMonitor:
app:
endpoints:
- port: health
--- kubernetes/apps/flux-system/flux-operator/app Kustomization: flux-system/flux-operator HelmRelease: flux-system/flux-operator
+++ kubernetes/apps/flux-system/flux-operator/app Kustomization: flux-system/flux-operator HelmRelease: flux-system/flux-operator
@@ -17,10 +17,10 @@
retries: -1
interval: 1h
upgrade:
cleanupOnFail: true
remediation:
retries: 3
- valuesFrom:
- - kind: ConfigMap
- name: flux-operator-values-fb7h5gm7k8
+ values:
+ serviceMonitor:
+ create: true
--- kubernetes/apps/flux-system/flux-operator/app Kustomization: flux-system/flux-operator ConfigMap: flux-system/flux-operator-values-fb7h5gm7k8
+++ kubernetes/apps/flux-system/flux-operator/app Kustomization: flux-system/flux-operator ConfigMap: flux-system/flux-operator-values-fb7h5gm7k8
@@ -1,16 +0,0 @@
----
-apiVersion: v1
-data:
- values.yaml: |
- ---
- serviceMonitor:
- create: true
-kind: ConfigMap
-metadata:
- labels:
- app.kubernetes.io/name: flux-operator
- kustomize.toolkit.fluxcd.io/name: flux-operator
- kustomize.toolkit.fluxcd.io/namespace: flux-system
- name: flux-operator-values-fb7h5gm7k8
- namespace: flux-system
-
--- kubernetes/apps/system-upgrade/system-upgrade-controller/app Kustomization: system-upgrade/system-upgrade-controller HelmRelease: system-upgrade/system-upgrade-controller
+++ kubernetes/apps/system-upgrade/system-upgrade-controller/app Kustomization: system-upgrade/system-upgrade-controller HelmRelease: system-upgrade/system-upgrade-controller
@@ -25,17 +25,14 @@
controllers:
system-upgrade-controller:
containers:
app:
env:
SYSTEM_UPGRADE_CONTROLLER_LEADER_ELECT: true
- SYSTEM_UPGRADE_CONTROLLER_NAME: system-upgrade-controller
- SYSTEM_UPGRADE_CONTROLLER_NAMESPACE:
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
+ SYSTEM_UPGRADE_CONTROLLER_NAME: '{{ .Release.Name }}'
+ SYSTEM_UPGRADE_CONTROLLER_NAMESPACE: '{{ .Release.Namespace }}'
SYSTEM_UPGRADE_CONTROLLER_NODE_NAME:
valueFrom:
fieldRef:
fieldPath: spec.nodeName
SYSTEM_UPGRADE_JOB_PRIVILEGED: false
image:
@@ -44,16 +41,15 @@
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
- replicas: 2
serviceAccount:
name: system-upgrade-controller
- strategy: RollingUpdate
defaultPodOptions:
+ hostUsers: false
securityContext:
runAsGroup: 568
runAsNonRoot: true
runAsUser: 568
--- kubernetes/apps/external-secrets/onepassword/app Kustomization: external-secrets/onepassword HelmRelease: external-secrets/onepassword
+++ kubernetes/apps/external-secrets/onepassword/app Kustomization: external-secrets/onepassword HelmRelease: external-secrets/onepassword
@@ -33,36 +33,40 @@
OP_BUS_PORT: 11220
OP_HTTP_PORT: 80
OP_SESSION:
valueFrom:
secretKeyRef:
key: 1password-credentials.json
- name: onepassword-secret
+ name: '{{ .Release.Name }}-secret'
XDG_DATA_HOME: /config
image:
- repository: docker.io/1password/connect-api
- tag: 1.7.4@sha256:f97189814239381e6dd88577f2b0b838a64e006a460608455b3127c15b174601
+ repository: ghcr.io/1password/connect-api
+ tag: 1.7.4@sha256:7d2132985f2f05b7fe4cfaf76314adeeb5abb745f051685b1c561130ab22ade3
probes:
liveness:
custom: true
enabled: true
spec:
failureThreshold: 3
httpGet:
path: /heartbeat
port: 80
- initialDelaySeconds: 15
- periodSeconds: 30
+ initialDelaySeconds: 0
+ periodSeconds: 10
+ timeoutSeconds: 1
readiness:
custom: true
enabled: true
spec:
+ failureThreshold: 3
httpGet:
path: /health
port: 80
- initialDelaySeconds: 15
+ initialDelaySeconds: 0
+ periodSeconds: 10
+ timeoutSeconds: 1
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
@@ -72,44 +76,49 @@
OP_BUS_PORT: 11221
OP_HTTP_PORT: 8081
OP_SESSION:
valueFrom:
secretKeyRef:
key: 1password-credentials.json
- name: onepassword-secret
+ name: '{{ .Release.Name }}-secret'
XDG_DATA_HOME: /config
image:
- repository: docker.io/1password/connect-sync
- tag: 1.7.4@sha256:27e7ec47e1ad8eaa2f54764fa0736954a5119d0155dea3c923c481c89c5f964c
+ repository: ghcr.io/1password/connect-sync
+ tag: 1.7.4@sha256:b2b9beb06e40615c55f698e2efc06cad5bdb1f82e09e60d1aac6d7bf3d57ec43
probes:
liveness:
custom: true
enabled: true
spec:
failureThreshold: 3
httpGet:
path: /heartbeat
port: 8081
- initialDelaySeconds: 15
- periodSeconds: 30
+ initialDelaySeconds: 0
+ periodSeconds: 10
+ timeoutSeconds: 1
readiness:
custom: true
enabled: true
spec:
+ failureThreshold: 3
httpGet:
path: /health
port: 8081
- initialDelaySeconds: 15
+ initialDelaySeconds: 0
+ periodSeconds: 10
+ timeoutSeconds: 1
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
strategy: RollingUpdate
defaultPodOptions:
+ hostUsers: false
securityContext:
runAsGroup: 999
runAsNonRoot: true
runAsUser: 999
persistence:
config:
--- kubernetes/apps/flux-system/flux-instance/app Kustomization: flux-system/flux-instance HelmRelease: flux-system/flux-instance
+++ kubernetes/apps/flux-system/flux-instance/app Kustomization: flux-system/flux-instance HelmRelease: flux-system/flux-instance
@@ -17,10 +17,101 @@
retries: -1
interval: 1h
upgrade:
cleanupOnFail: true
remediation:
retries: 3
- valuesFrom:
- - kind: ConfigMap
- name: flux-instance-values-hh24bh95tg
+ values:
+ instance:
+ cluster:
+ networkPolicy: false
+ commonMetadata:
+ labels:
+ app.kubernetes.io/name: flux
+ components:
+ - source-controller
+ - kustomize-controller
+ - helm-controller
+ - notification-controller
+ distribution:
+ version: 2.6.4
+ kustomize:
+ patches:
+ - patch: |
+ - op: add
+ path: /spec/template/spec/containers/0/args/-
+ value: --concurrent=10
+ - op: add
+ path: /spec/template/spec/containers/0/args/-
+ value: --requeue-dependency=5s
+ target:
+ kind: Deployment
+ name: (kustomize-controller|helm-controller|source-controller)
+ - patch: |
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: all
+ spec:
+ template:
+ spec:
+ containers:
+ - name: manager
+ resources:
+ limits:
+ memory: 2Gi
+ target:
+ kind: Deployment
+ name: (kustomize-controller|helm-controller|source-controller)
+ - patch: |
+ - op: add
+ path: /spec/template/spec/containers/0/args/-
+ value: --concurrent=20
+ - op: replace
+ path: /spec/template/spec/volumes/0
+ value:
+ name: temp
+ emptyDir:
+ medium: Memory
+ target:
+ kind: Deployment
+ name: kustomize-controller
+ - patch: |
+ - op: add
+ path: /spec/template/spec/containers/0/args/-
+ value: --helm-cache-max-size=10
+ - op: add
+ path: /spec/template/spec/containers/0/args/-
+ value: --helm-cache-ttl=60m
+ - op: add
+ path: /spec/template/spec/containers/0/args/-
+ value: --helm-cache-purge-interval=5m
+ target:
+ kind: Deployment
+ name: source-controller
+ - patch: |
+ - op: add
+ path: /spec/template/spec/containers/0/args/-
+ value: --feature-gates=OOMWatch=true
+ - op: add
+ path: /spec/template/spec/containers/0/args/-
+ value: --oom-watch-memory-threshold=95
+ - op: add
+ path: /spec/template/spec/containers/0/args/-
+ value: --oom-watch-interval=500ms
+ target:
+ kind: Deployment
+ name: helm-controller
+ - patch: |
+ - op: add
+ path: /spec/template/spec/containers/0/args/-
+ value: --feature-gates=DisableChartDigestTracking=true
+ target:
+ kind: Deployment
+ name: helm-controller
+ sync:
+ interval: 1h
+ kind: GitRepository
+ path: ./kubernetes/flux/cluster
+ ref: refs/heads/main
+ url: https://github.com/dotcomscripts/k8s-gitops
--- kubernetes/apps/flux-system/flux-instance/app Kustomization: flux-system/flux-instance ConfigMap: flux-system/flux-instance-values-hh24bh95tg
+++ kubernetes/apps/flux-system/flux-instance/app Kustomization: flux-system/flux-instance ConfigMap: flux-system/flux-instance-values-hh24bh95tg
@@ -1,114 +0,0 @@
----
-apiVersion: v1
-data:
- values.yaml: |
- ---
- instance:
- distribution:
- # renovate: datasource=github-releases depName=controlplaneio-fluxcd/distribution
- version: 2.6.4
- cluster:
- networkPolicy: false
- components:
- - source-controller
- - kustomize-controller
- - helm-controller
- - notification-controller
- sync:
- kind: GitRepository
- url: https://github.com/dotcomscripts/k8s-gitops
- ref: refs/heads/main
- path: ./kubernetes/flux/cluster
- interval: 1h
- commonMetadata:
- labels:
- app.kubernetes.io/name: flux
- kustomize:
- patches:
- - # Increase the number of workers
- patch: |
- - op: add
- path: /spec/template/spec/containers/0/args/-
- value: --concurrent=10
- - op: add
- path: /spec/template/spec/containers/0/args/-
- value: --requeue-dependency=5s
- target:
- kind: Deployment
- name: (kustomize-controller|helm-controller|source-controller)
- - # Increase the memory limits
- patch: |
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: all
- spec:
- template:
- spec:
- containers:
- - name: manager
- resources:
- limits:
- memory: 2Gi
- target:
- kind: Deployment
- name: (kustomize-controller|helm-controller|source-controller)
- - # Enable in-memory kustomize builds
- patch: |
- - op: add
- path: /spec/template/spec/containers/0/args/-
- value: --concurrent=20
- - op: replace
- path: /spec/template/spec/volumes/0
- value:
- name: temp
- emptyDir:
- medium: Memory
- target:
- kind: Deployment
- name: kustomize-controller
- - # Enable Helm repositories caching
- patch: |
- - op: add
- path: /spec/template/spec/containers/0/args/-
- value: --helm-cache-max-size=10
- - op: add
- path: /spec/template/spec/containers/0/args/-
- value: --helm-cache-ttl=60m
- - op: add
- path: /spec/template/spec/containers/0/args/-
- value: --helm-cache-purge-interval=5m
- target:
- kind: Deployment
- name: source-controller
- - # Flux near OOM detection for Helm
- patch: |
- - op: add
- path: /spec/template/spec/containers/0/args/-
- value: --feature-gates=OOMWatch=true
- - op: add
- path: /spec/template/spec/containers/0/args/-
- value: --oom-watch-memory-threshold=95
- - op: add
- path: /spec/template/spec/containers/0/args/-
- value: --oom-watch-interval=500ms
- target:
- kind: Deployment
- name: helm-controller
- - # Disable chart digest tracking
- patch: |
- - op: add
- path: /spec/template/spec/containers/0/args/-
- value: --feature-gates=DisableChartDigestTracking=true
- target:
-
A720
kind: Deployment
- name: helm-controller
-kind: ConfigMap
-metadata:
- labels:
- app.kubernetes.io/name: flux-instance
- kustomize.toolkit.fluxcd.io/name: flux-instance
- kustomize.toolkit.fluxcd.io/namespace: flux-system
- name: flux-instance-values-hh24bh95tg
- namespace: flux-system
-
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: actions-runner-system/actions-runner-controller
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: actions-runner-system/actions-runner-controller
@@ -8,18 +8,22 @@
name: actions-runner-controller
namespace: actions-runner-system
spec:
commonMetadata:
labels:
app.kubernetes.io/name: actions-runner-controller
+ healthChecks:
+ - apiVersion: helm.toolkit.fluxcd.io/v2
+ kind: HelmRelease
+ name: actions-runner-controller
+ namespace: actions-runner-system
interval: 1h
path: ./kubernetes/apps/actions-runner-system/actions-runner-controller/app
prune: true
retryInterval: 2m
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: actions-runner-system
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: actions-runner-system/actions-runner-controller-runners
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: actions-runner-system/actions-runner-controller-runners
@@ -13,20 +13,17 @@
app.kubernetes.io/name: actions-runner-controller-runners
dependsOn:
- name: actions-runner-controller
namespace: actions-runner-system
- name: democratic-csi
namespace: democratic-csi
- - name: onepassword-store
- namespace: external-secrets
interval: 1h
path: ./kubernetes/apps/actions-runner-system/actions-runner-controller/runners
prune: true
retryInterval: 2m
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: actions-runner-system
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: cert-manager/cert-manager
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: cert-manager/cert-manager
@@ -8,18 +8,22 @@
name: cert-manager
namespace: cert-manager
spec:
commonMetadata:
labels:
app.kubernetes.io/name: cert-manager
+ healthChecks:
+ - apiVersion: helm.toolkit.fluxcd.io/v2
+ kind: HelmRelease
+ name: cert-manager
+ namespace: cert-manager
interval: 1h
path: ./kubernetes/apps/cert-manager/cert-manager/app
prune: true
retryInterval: 2m
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: cert-manager
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: cert-manager/cert-manager-issuers
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: cert-manager/cert-manager-issuers
@@ -11,14 +11,12 @@
commonMetadata:
labels:
app.kubernetes.io/name: cert-manager-issuers
dependsOn:
- name: cert-manager
namespace: cert-manager
- - name: onepassword-store
- namespace: external-secrets
healthCheckExprs:
- apiVersion: cert-manager.io/v1
current: status.conditions.filter(e, e.type == 'Ready').all(e, e.status == 'True')
failed: status.conditions.filter(e, e.type == 'Ready').all(e, e.status == 'False')
kind: ClusterIssuer
interval: 1h
@@ -28,8 +26,7 @@
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: cert-manager
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: databases/cloudnative-pg
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: databases/cloudnative-pg
@@ -18,8 +18,7 @@
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: databases
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: databases/cloudnative-pg-barman-cloud
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: databases/cloudnative-pg-barman-cloud
@@ -26,8 +26,7 @@
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: databases
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: databases/cloudnative-pg-cluster
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: databases/cloudnative-pg-cluster
@@ -15,14 +15,12 @@
- name: cloudnative-pg
namespace: databases
- name: cloudnative-pg-barman-cloud
namespace: databases
- name: democratic-csi
namespace: democratic-csi
- - name: onepassword-store
- namespace: external-secrets
healthCheckExprs:
- apiVersion: postgresql.cnpg.io/v1
current: status.conditions.filter(e, e.type == 'Ready').all(e, e.status == 'True')
failed: status.conditions.filter(e, e.type == 'Ready').all(e, e.status == 'False')
kind: Cluster
interval: 1h
@@ -35,8 +33,7 @@
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: databases
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: democratic-csi/democratic-csi
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: democratic-csi/democratic-csi
@@ -9,20 +9,24 @@
namespace: democratic-csi
spec:
commonMetadata:
labels:
app.kubernetes.io/name: democratic-csi
dependsOn:
- - name: snapshot-controller
+ - name: volsync
namespace: volsync-system
+ healthChecks:
+ - apiVersion: helm.toolkit.fluxcd.io/v2
+ kind: HelmRelease
+ name: democratic-csi
+ namespace: democratic-csi
interval: 1h
path: ./kubernetes/apps/democratic-csi/democratic-csi/app
prune: true
retryInterval: 2m
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: democratic-csi
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: external-secrets/external-secrets
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: external-secrets/external-secrets
@@ -18,8 +18,7 @@
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: external-secrets
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: external-secrets/onepassword
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: external-secrets/onepassword
@@ -8,21 +8,22 @@
name: onepassword
namespace: external-secrets
spec:
commonMetadata:
labels:
app.kubernetes.io/name: onepassword
- dependsOn:
- - name: external-secrets
+ healthChecks:
+ - apiVersion: helm.toolkit.fluxcd.io/v2
+ kind: HelmRelease
+ name: onepassword
namespace: external-secrets
interval: 1h
path: ./kubernetes/apps/external-secrets/onepassword/app
prune: true
retryInterval: 2m
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: external-secrets
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: external-secrets/onepassword-store
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: external-secrets/onepassword-store
@@ -26,8 +26,7 @@
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: external-secrets
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: flux-system/flux-instance
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: flux-system/flux-instance
@@ -21,8 +21,7 @@
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: flux-system
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: flux-system/flux-operator
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: flux-system/flux-operator
@@ -8,18 +8,22 @@
name: flux-operator
namespace: flux-system
spec:
commonMetadata:
labels:
app.kubernetes.io/name: flux-operator
+ healthChecks:
+ - apiVersion: helm.toolkit.fluxcd.io/v2
+ kind: HelmRelease
+ name: flux-operator
+ namespace: flux-system
interval: 1h
path: ./kubernetes/apps/flux-system/flux-operator/app
prune: true
retryInterval: 2m
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: flux-system
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: kube-system/cilium
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: kube-system/cilium
@@ -8,18 +8,22 @@
name: cilium
namespace: kube-system
spec:
commonMetadata:
labels:
app.kubernetes.io/name: cilium
+ healthChecks:
+ - apiVersion: helm.toolkit.fluxcd.io/v2
+ kind: HelmRelease
+ name: cilium
+ namespace: kube-system
interval: 1h
path: ./kubernetes/apps/kube-system/cilium/app
prune: true
retryInterval: 2m
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: kube-system
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: kube-system/coredns
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: kube-system/coredns
@@ -18,8 +18,7 @@
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: kube-system
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: kube-system/intel-device-plugin-operator
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: kube-system/intel-device-plugin-operator
@@ -8,18 +8,22 @@
name: intel-device-plugin-operator
namespace: kube-system
spec:
commonMetadata:
labels:
app.kubernetes.io/name: intel-device-plugin-operator
+ healthChecks:
+ - apiVersion: helm.toolkit.fluxcd.io/v2
+ kind: HelmRelease
+ name: intel-device-plugin-operator
+ namespace: kube-system
interval: 1h
path: ./kubernetes/apps/kube-system/intel-device-plugin/app
prune: true
retryInterval: 2m
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: kube-system
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: kube-system/intel-device-plugin-gpu
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: kube-system/intel-device-plugin-gpu
@@ -21,8 +21,7 @@
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: kube-system
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: kube-system/metrics-server
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: kube-system/metrics-server
@@ -18,8 +18,7 @@
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: kube-system
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: kube-system/reloader
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: kube-system/reloader
@@ -18,8 +18,7 @@
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: kube-system
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: media/autobrr
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: media/autobrr
@@ -11,20 +11,17 @@
commonMetadata:
labels:
app.kubernetes.io/name: autobrr
dependsOn:
- name: cloudnative-pg-cluster
namespace: databases
- - name: onepassword-store
- namespace: external-secrets
interval: 1h
path: ./kubernetes/apps/media/autobrr/app
prune: true
retryInterval: 2m
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: media
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: media/cross-seed
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: media/cross-seed
@@ -9,23 +9,17 @@
namespace: media
spec:
commonMetadata:
labels:
app.kubernetes.io/name: cross-seed
components:
- - ../../../../components/nfs-scaler
+ - ../../../../components/keda/nfs-scaler
- ../../../../components/volsync
dependsOn:
- name: democratic-csi
namespace: democratic-csi
- - name: onepassword-store
- namespace: external-secrets
- - name: qbittorrent
- namespace: media
- - name: volsync
- namespace: volsync-system
interval: 1h
path: ./kubernetes/apps/media/cross-seed/app
postBuild:
substitute:
APP: cross-seed
VOLSYNC_CAPACITY: 2Gi
@@ -34,8 +28,7 @@
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: media
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: media/jellyseerr
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: media/jellyseerr
@@ -14,16 +14,12 @@
components:
- ../../../../components/gatus
- ../../../../components/volsync
dependsOn:
- name: democratic-csi
namespace: democratic-csi
- - name: onepassword-store
- namespace: external-secrets
- - name: volsync
- namespace: volsync-system
interval: 1h
path: ./kubernetes/apps/media/jellyseerr/app
postBuild:
substitute:
APP: jellyseerr
GATUS_PATH: /api/v1/status
@@ -33,8 +29,7 @@
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: media
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: media/plex
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: media/plex
@@ -10,13 +10,13 @@
spec:
commonMetadata:
labels:
app.kubernetes.io/name: plex
components:
- ../../../../components/gatus
- - ../../../../components/nfs-scaler
+ - ../../../../components/keda/nfs-scaler
- ../../../../components/volsync
dependsOn:
- name: democratic-csi
namespace: democratic-csi
- name: intel-device-plugin-gpu
namespace: kube-system
@@ -35,8 +35,7 @@
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: media
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: media/prowlarr
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: media/prowlarr
@@ -11,20 +11,17 @@
commonMetadata:
labels:
app.kubernetes.io/name: prowlarr
dependsOn:
- name: cloudnative-pg-cluster
namespace: databases
- - name: onepassword-store
- namespace: external-secrets
interval: 1h
path: ./kubernetes/apps/media/prowlarr/app
prune: true
retryInterval: 2m
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: media
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: media/qbittorrent
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: media/qbittorrent
@@ -9,13 +9,13 @@
namespace: media
spec:
commonMetadata:
labels:
app.kubernetes.io/name: qbittorrent
components:
- - ../../../../components/nfs-scaler
+ - ../../../../components/keda/nfs-scaler
- ../../../../components/volsync
dependsOn:
- name: democratic-csi
namespace: democratic-csi
- name: volsync
namespace: volsync-system
@@ -30,8 +30,7 @@
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: media
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: media/radarr
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: media/radarr
@@ -9,20 +9,18 @@
namespace: media
spec:
commonMetadata:
labels:
app.kubernetes.io/name: radarr
components:
- - ../../../../components/nfs-scaler
+ - ../../../../components/keda/nfs-scaler
dependsOn:
- name: cloudnative-pg-cluster
namespace: databases
- name: democratic-csi
namespace: democratic-csi
- - name: onepassword-store
- namespace: external-secrets
interval: 1h
path: ./kubernetes/apps/media/radarr/app
postBuild:
substitute:
APP: radarr
prune: true
@@ -30,8 +28,7 @@
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: media
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: media/recyclarr
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: media/recyclarr
@@ -13,16 +13,12 @@
app.kubernetes.io/name: recyclarr
components:
- ../../../../components/volsync
dependsOn:
- name: democratic-csi
namespace: democratic-csi
- - name: onepassword-store
- namespace: external-secrets
- - name: volsync
- namespace: volsync-system
interval: 1h
path: ./kubernetes/apps/media/recyclarr/app
postBuild:
substitute:
APP: recyclarr
VOLSYNC_CAPACITY: 2Gi
@@ -31,8 +27,7 @@
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: media
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: media/sonarr
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: media/sonarr
@@ -9,20 +9,18 @@
namespace: media
spec:
commonMetadata:
labels:
app.kubernetes.io/name: sonarr
components:
- - ../../../../components/nfs-scaler
+ - ../../../../components/keda/nfs-scaler
dependsOn:
- name: cloudnative-pg-cluster
namespace: databases
- name: democratic-csi
namespace: democratic-csi
- - name: onepassword-store
- namespace: external-secrets
interval: 1h
path: ./kubernetes/apps/media/sonarr/app
postBuild:
substitute:
APP: sonarr
prune: true
@@ -30,8 +28,7 @@
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: media
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: media/tautulli
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: media/tautulli
@@ -29,8 +29,7 @@
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: media
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: media/tqm
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: media/tqm
@@ -21,8 +21,7 @@
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: media
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: networking/cloudflared
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: networking/cloudflared
@@ -8,21 +8,17 @@
name: cloudflared
namespace: networking
spec:
commonMetadata:
labels:
app.kubernetes.io/name: cloudflared
- dependsOn:
- - name: onepassword-store
- namespace: external-secrets
interval: 1h
path: ./kubernetes/apps/networking/cloudflared/app
prune: true
retryInterval: 2m
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: networking
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: networking/echo-server
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: networking/echo-server
@@ -24,8 +24,7 @@
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: networking
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: networking/external-dns-cloudflare
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: networking/external-dns-cloudflare
@@ -8,21 +8,17 @@
name: external-dns-cloudflare
namespace: networking
spec:
commonMetadata:
labels:
app.kubernetes.io/name: external-dns-cloudflare
- dependsOn:
- - name: onepassword-store
- namespace: external-secrets
interval: 1h
path: ./kubernetes/apps/networking/external-dns/cloudflare
prune: true
retryInterval: 2m
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: networking
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: networking/external-dns-unifi
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: networking/external-dns-unifi
@@ -8,21 +8,17 @@
name: external-dns-unifi
namespace: networking
spec:
commonMetadata:
labels:
app.kubernetes.io/name: external-dns-unifi
- dependsOn:
- - name: onepassword-store
- namespace: external-secrets
interval: 1h
path: ./kubernetes/apps/networking/external-dns/unifi
prune: true
retryInterval: 2m
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: networking
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: networking/smtp-relay
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: networking/smtp-relay
@@ -8,21 +8,17 @@
name: smtp-relay
namespace: networking
spec:
commonMetadata:
labels:
app.kubernetes.io/name: smtp-relay
- dependsOn:
- - name: onepassword-store
- namespace: external-secrets
interval: 1h
path: ./kubernetes/apps/networking/smtp-relay/app
prune: true
retryInterval: 2m
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: networking
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: observability/alloy
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: observability/alloy
@@ -18,8 +18,7 @@
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: observability
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: observability/blackbox-exporter
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: observability/blackbox-exporter
@@ -8,18 +8,22 @@
name: blackbox-exporter
namespace: observability
spec:
commonMetadata:
labels:
app.kubernetes.io/name: blackbox-exporter
+ healthChecks:
+ - apiVersion: helm.toolkit.fluxcd.io/v2
+ kind: HelmRelease
+ name: blackbox-exporter
+ namespace: observability
interval: 1h
path: ./kubernetes/apps/observability/blackbox-exporter/app
prune: true
retryInterval: 2m
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: observability
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: observability/gatus
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: observability/gatus
@@ -8,29 +8,17 @@
name: gatus
namespace: observability
spec:
commonMetadata:
labels:
app.kubernetes.io/name: gatus
- components:
- - ../../../../components/gatus
- dependsOn:
- - name: cloudnative-pg-cluster
- namespace: databases
- - name: onepassword-store
- namespace: external-secrets
interval: 1h
path: ./kubernetes/apps/observability/gatus/app
- postBuild:
- substitute:
- APP: gatus
- GATUS_PATH: /health
prune: true
retryInterval: 2m
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: observability
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: observability/grafana
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: observability/grafana
@@ -8,21 +8,17 @@
name: grafana
namespace: observability
spec:
commonMetadata:
labels:
app.kubernetes.io/name: grafana
- dependsOn:
- - name: onepassword-store
- namespace: external-secrets
interval: 1h
path: ./kubernetes/apps/observability/grafana/app
prune: true
retryInterval: 2m
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: observability
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: observability/karma
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: observability/karma
@@ -18,8 +18,7 @@
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: observability
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: observability/keda
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: observability/keda
@@ -18,8 +18,7 @@
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: observability
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: observability/kromgo
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: observability/kromgo
@@ -24,8 +24,7 @@
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: observability
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: observability/kube-prometheus-stack
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: observability/kube-prometheus-stack
@@ -11,20 +11,17 @@
commonMetadata:
labels:
app.kubernetes.io/name: kube-prometheus-stack
dependsOn:
- name: democratic-csi
namespace: democratic-csi
- - name: onepassword-store
- namespace: external-secrets
interval: 1h
path: ./kubernetes/apps/observability/kube-prometheus-stack/app
prune: true
retryInterval: 2m
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: observability
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: observability/loki
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: observability/loki
@@ -21,8 +21,7 @@
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: observability
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: observability/silence-operator
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: observability/silence-operator
@@ -8,18 +8,22 @@
name: silence-operator
namespace: observability
spec:
commonMetadata:
labels:
app.kubernetes.io/name: silence-operator
+ healthChecks:
+ - apiVersion: helm.toolkit.fluxcd.io/v2
+ kind: HelmRelease
+ name: silence-operator
+ namespace: observability
interval: 1h
path: ./kubernetes/apps/observability/silence-operator/app
prune: true
retryInterval: 2m
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: observability
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: observability/silence-operator-silences
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: observability/silence-operator-silences
@@ -21,8 +21,7 @@
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: observability
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: observability/smartctl-exporter
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: observability/smartctl-exporter
@@ -18,8 +18,7 @@
sourceRef:
kind: GitRepository
n
64A0
ame: flux-system
namespace: flux-system
targetNamespace: observability
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: observability/snmp-exporter
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: observability/snmp-exporter
@@ -18,8 +18,7 @@
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: observability
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: observability/unpoller
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: observability/unpoller
@@ -8,21 +8,17 @@
name: unpoller
namespace: observability
spec:
commonMetadata:
labels:
app.kubernetes.io/name: unpoller
- dependsOn:
- - name: onepassword-store
- namespace: external-secrets
interval: 1h
path: ./kubernetes/apps/observability/unpoller/app
prune: true
retryInterval: 2m
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: observability
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: system-upgrade/system-upgrade-controller
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: system-upgrade/system-upgrade-controller
@@ -8,18 +8,22 @@
name: system-upgrade-controller
namespace: system-upgrade
spec:
commonMetadata:
labels:
app.kubernetes.io/name: system-upgrade-controller
+ healthChecks:
+ - apiVersion: helm.toolkit.fluxcd.io/v2
+ kind: HelmRelease
+ name: system-upgrade-controller
+ namespace: system-upgrade
interval: 1h
path: ./kubernetes/apps/system-upgrade/system-upgrade-controller/app
prune: true
retryInterval: 2m
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: system-upgrade
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: system-upgrade/system-upgrade-controller-plans
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: system-upgrade/system-upgrade-controller-plans
@@ -13,20 +13,15 @@
app.kubernetes.io/name: system-upgrade-controller-plans
dependsOn:
- name: system-upgrade-controller
namespace: system-upgrade
interval: 1h
path: ./kubernetes/apps/system-upgrade/system-upgrade-controller/plans
- postBuild:
- substitute:
- KUBERNETES_VERSION: v1.34.0
- TALOS_VERSION: v1.11.0
prune: true
retryInterval: 2m
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: system-upgrade
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: volsync-system/snapshot-controller
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: volsync-system/snapshot-controller
@@ -8,18 +8,22 @@
name: snapshot-controller
namespace: volsync-system
spec:
commonMetadata:
labels:
app.kubernetes.io/name: snapshot-controller
+ healthChecks:
+ - apiVersion: helm.toolkit.fluxcd.io/v2
+ kind: HelmRelease
+ name: snapshot-controller
+ namespace: volsync-system
interval: 1h
path: ./kubernetes/apps/volsync-system/snapshot-controller/app
prune: true
retryInterval: 2m
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: volsync-system
timeout: 5m
- wait: true
--- kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: volsync-system/volsync
+++ kubernetes/apps Kustomization: flux-system/cluster-apps Kustomization: volsync-system/volsync
@@ -11,18 +11,22 @@
commonMetadata:
labels:
app.kubernetes.io/name: volsync
dependsOn:
- name: snapshot-controller
namespace: volsync-system
+ healthChecks:
+ - apiVersion: helm.toolkit.fluxcd.io/v2
+ kind: HelmRelease
+ name: volsync
+ namespace: volsync-system
interval: 1h
path: ./kubernetes/apps/volsync-system/volsync/app
prune: true
retryInterval: 2m
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: volsync-system
timeout: 5m
- wait: true
--- kubernetes/apps/media/plex/app Kustomization: media/plex HelmRelease: media/plex
+++ kubernetes/apps/media/plex/app Kustomization: media/plex HelmRelease: media/plex
@@ -75,19 +75,18 @@
fsGroup: 568
fsGroupChangePolicy: OnRootMismatch
runAsGroup: 568
runAsNonRoot: true
runAsUser: 568
supplementalGroups:
- - 44
- 65537
persistence:
config:
- existingClaim: plex
+ existingClaim: '{{ .Release.Name }}'
config-cache:
- existingClaim: plex-cache
+ existingClaim: '{{ .Release.Name }}-cache'
globalMounts:
- path: /config/Library/Application Support/Plex Media Server/Cache
media:
globalMounts:
- readOnly: true
path: /volume1/PLEX
--- kubernetes/apps/media/tautulli/app Kustomization: media/tautulli HelmRelease: media/tautulli
+++ kubernetes/apps/media/tautulli/app Kustomization: media/tautulli HelmRelease: media/tautulli
@@ -21,61 +21,68 @@
cleanupOnFail: true
remediation:
retries: 3
values:
controllers:
tautulli:
+ annotations:
+ reloader.stakater.com/auto: 'true'
containers:
app:
env:
- TAUTULLI__PORT: 8181
+ TAUTULLI_HTTP_BASE_URL: https://{{ .Release.Name }}.k13.dev
+ TAUTULLI_HTTP_PORT: 80
TZ: America/New_York
+ envFrom:
+ - secretRef:
+ name: '{{ .Release.Name }}-secret'
image:
repository: ghcr.io/home-operations/tautulli
tag: 2.15.3@sha256:3e0eaca8c082ebe121a0ae9125bea1b4e2d177fca34ac8df4ec14a28e62f63a4
probes:
liveness:
custom: true
enabled: true
spec:
failureThreshold: 3
httpGet:
path: /status
- port: 8181
+ port: 80
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
readiness:
custom: true
enabled: true
spec:
failureThreshold: 3
httpGet:
path: /status
- port: 8181
+ port: 80
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
defaultPodOptions:
+ hostUsers: false
securityContext:
fsGroup: 568
fsGroupChangePolicy: OnRootMismatch
runAsGroup: 568
runAsNonRoot: true
runAsUser: 568
persistence:
config:
- existingClaim: tautulli
+ existingClaim: '{{ .Release.Name }}'
config-cache:
- existingClaim: tautulli-cache
+ existingClaim: '{{ .Release.Name }}-cache'
globalMounts:
- path: /config/cache
tmpfs:
advancedMounts:
tautulli:
app:
@@ -93,8 +100,8 @@
namespace: kube-system
sectionName: https
service:
app:
ports:
http:
- port: 8181
+ port: 80
--- kubernetes/apps/media/tautulli/app Kustomization: media/tautulli ExternalSecret: media/tautulli
+++ kubernetes/apps/media/tautulli/app Kustomization: media/tautulli ExternalSecret: media/tautulli
@@ -0,0 +1,23 @@
+---
+apiVersion: external-secrets.io/v1
+kind: ExternalSecret
+metadata:
+ labels:
+ app.kubernetes.io/name: tautulli
+ kustomize.toolkit.fluxcd.io/name: tautulli
+ kustomize.toolkit.fluxcd.io/namespace: media
+ name: tautulli
+ namespace: media
+spec:
+ dataFrom:
+ - extract:
+ key: tautulli
+ secretStoreRef:
+ kind: ClusterSecretStore
+ name: onepassword
+ target:
+ name: tautulli-secret
+ template:
+ data:
+ TAUTULLI_API_KEY: '{{ .TAUTULLI_API_KEY }}'
+
--- kubernetes/apps/media/qbittorrent/app Kustomization: media/qbittorrent HelmRelease: media/qbittorrent
+++ kubernetes/apps/media/qbittorrent/app Kustomization: media/qbittorrent HelmRelease: media/qbittorrent
@@ -25,37 +25,37 @@
controllers:
qbittorrent:
containers:
app:
env:
QBT_TORRENTING_PORT: 50413
- QBT_WEBUI_PORT: 8080
+ QBT_WEBUI_PORT: 80
TZ: America/New_York
image:
repository: ghcr.io/home-operations/qbittorrent
tag: 5.1.2@sha256:9dd0164cc23e9c937e0af27fd7c3f627d1df30c182cf62ed34d3f129c55dc0e8
probes:
liveness:
custom: true
enabled: true
spec:
failureThreshold: 3
httpGet:
path: /api/v2/app/version
- port: 8080
+ port: 80
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
readiness:
custom: true
enabled: true
spec:
failureThreshold: 3
httpGet:
path: /api/v2/app/version
- port: 8080
+ port: 80
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
startup:
enabled: true
spec:
@@ -75,21 +75,33 @@
runAsNonRoot: true
runAsUser: 568
supplementalGroups:
- 65536
persistence:
config:
- existingClaim: qbittorrent
+ existingClaim: '{{ .Release.Name }}'
media:
globalMounts:
- path: /media/downloads/torrents
subPath: downloads/torrents
path: /volume1/PLEX
server: nas.internal
type: nfs
- tmp:
+ qbrr:
+ globalMounts:
+ - readOnly: true
+ image: ghcr.io/buroa/qbrr:0.1.1@sha256:fe4a1d100ba896999253a5122d9c77cc4920779d618c23f765458cb6ec7383ca
+ type: image
+ tmpfs:
+ advancedMounts:
+ qbittorrent:
+ app:
+ - path: /config/qBittorrent/logs
+ subPath: logs
+ - path: /tmp
+ subPath: tmp
type: emptyDir
route:
app:
hostnames:
- '{{ .Release.Name }}.youmans.io'
- qb.youmans.io
@@ -97,19 +109,20 @@
- name: internal
namespace: kube-system
sectionName: https
rules:
- backendRefs:
- identifier: app
- port: 8080
+ port: 80
service:
app:
- forceRename: qbittorrent
+ forceRename: '{{ .Release.Name }}'
ports:
http:
- port: 8080
+ port: 80
+ primary: true
bittorrent:
annotations:
lbipam.cilium.io/ips: 10.0.2.82, ::ffff:10.0.2.82
ports:
bittorrent-tcp:
port: 50413
--- kubernetes/apps/media/jellyseerr/app Kustomization: media/jellyseerr HelmRelease: media/jellyseerr
+++ kubernetes/apps/media/jellyseerr/app Kustomization: media/jellyseerr HelmRelease: media/jellyseerr
@@ -21,69 +21,68 @@
cleanupOnFail: true
remediation:
retries: 3
values:
controllers:
jellyseerr:
- annotations:
- reloader.stakater.com/auto: 'true'
containers:
app:
env:
LOG_LEVEL: info
- PORT: 5055
+ PORT: 80
TZ: America/New_York
envFrom:
- secretRef:
- name: jellyseerr-secret
+ name: '{{ .Release.Name }}-secret'
image:
repository: ghcr.io/fallenbagel/jellyseerr
tag: 2.7.3@sha256:9cc9e9ee6cd5cf5a23feb45c37742ba34cfd6314d81d259cddb373a97ac92cdd
probes:
liveness:
custom: true
enabled: true
spec:
failureThreshold: 3
httpGet:
path: /api/v1/status
- port: 5055
+ port: 80
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
readiness:
custom: true
enabled: true
spec:
failureThreshold: 3
httpGet:
path: /api/v1/status
- port: 5055
+ port: 80
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
defaultPodOptions:
+ hostUsers: false
securityContext:
fsGroup: 568
fsGroupChangePolicy: OnRootMismatch
runAsGroup: 568
runAsNonRoot: true
runAsUser: 568
persistence:
config:
- existingClaim: jellyseerr
+ existingClaim: '{{ .Release.Name }}'
globalMounts:
- path: /app/config
config-cache:
- existingClaim: jellyseerr-cache
+ existingClaim: '{{ .Release.Name }}-cache'
globalMounts:
- path: /app/config/cache
tmpfs:
advancedMounts:
jellyseerr:
app:
@@ -102,8 +101,8 @@
namespace: kube-system
sectionName: https
service:
app:
ports:
http:
- port: 5055
+ port: 80
--- kubernetes/apps/observability/unpoller/app Kustomization: observability/unpoller HelmRelease: observability/unpoller
+++ kubernetes/apps/observability/unpoller/app Kustomization: observability/unpoller HelmRelease: observability/unpoller
@@ -28,42 +28,62 @@
reloader.stakater.com/auto: 'true'
containers:
app:
env:
TZ: America/New_York
UP_INFLUXDB_DISABLE: true
+ UP_PROMETHEUS_HTTP_LISTEN: 0.0.0.0:8080
UP_UNIFI_DEFAULT_ROLE: k8s-gitops
UP_UNIFI_DEFAULT_URL: https://unifi.internal
UP_UNIFI_DEFAULT_VERIFY_SSL: false
envFrom:
- secretRef:
- name: unpoller-secret
+ name: '{{ .Release.Name }}-secret'
image:
repository: ghcr.io/unpoller/unpoller
tag: v2.15.4@sha256:788a890f2dc5aef3e99ce430917221c43b4e084464d38bc6537a8c7294ef8770
probes:
liveness:
+ custom: true
enabled: true
+ spec:
+ failureThreshold: 3
+ httpGet:
+ path: /health
+ port: 8080
+ initialDelaySeconds: 0
+ periodSeconds: 10
+ timeoutSeconds: 1
readiness:
+ custom: true
enabled: true
+ spec:
+ failureThreshold: 3
+ httpGet:
+ path: /health
+ port: 8080
+ initialDelaySeconds: 0
+ periodSeconds: 10
+ timeoutSeconds: 1
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
defaultPodOptions:
+ hostUsers: false
securityContext:
runAsGroup: 568
runAsNonRoot: true
runAsUser: 568
service:
app:
ports:
http:
- port: 9130
|
--- HelmRelease: databases/barman-cloud Service: databases/barman-cloud
+++ HelmRelease: databases/barman-cloud Service: databases/barman-cloud
@@ -1,28 +0,0 @@
----
-apiVersion: v1
-kind: Service
-metadata:
- name: barman-cloud
- labels:
- app.kubernetes.io/instance: barman-cloud
- app.kubernetes.io/managed-by: Helm
- app.kubernetes.io/name: barman-cloud
- app.kubernetes.io/service: barman-cloud
- cnpg.io/pluginName: barman-cloud.cloudnative-pg.io
- annotations:
- cnpg.io/pluginClientSecret: barman-cloud-client-tls
- cnpg.io/pluginPort: '9090'
- cnpg.io/pluginServerSecret: barman-cloud-server-tls
- namespace: databases
-spec:
- type: ClusterIP
- ports:
- - port: 9090
- targetPort: 9090
- protocol: TCP
- name: http
- selector:
- app.kubernetes.io/controller: barman-cloud
- app.kubernetes.io/instance: barman-cloud
- app.kubernetes.io/name: barman-cloud
-
--- HelmRelease: databases/barman-cloud Deployment: databases/barman-cloud
+++ HelmRelease: databases/barman-cloud Deployment: databases/barman-cloud
@@ -1,92 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: barman-cloud
- labels:
- app.kubernetes.io/controller: barman-cloud
- app.kubernetes.io/instance: barman-cloud
- app.kubernetes.io/managed-by: Helm
- app.kubernetes.io/name: barman-cloud
- namespace: databases
-spec:
- revisionHistoryLimit: 3
- replicas: 1
- strategy:
- type: Recreate
- selector:
- matchLabels:
- app.kubernetes.io/controller: barman-cloud
- app.kubernetes.io/name: barman-cloud
- app.kubernetes.io/instance: barman-cloud
- template:
- metadata:
- labels:
- app.kubernetes.io/controller: barman-cloud
- app.kubernetes.io/instance: barman-cloud
- app.kubernetes.io/name: barman-cloud
- spec:
- enableServiceLinks: false
- serviceAccountName: barman-cloud
- automountServiceAccountToken: true
- securityContext:
- runAsGroup: 568
- runAsNonRoot: true
- runAsUser: 568
- hostIPC: false
- hostNetwork: false
- hostPID: false
- dnsPolicy: ClusterFirst
- containers:
- - args:
- - operator
- - --leader-elect
- - --server-cert=/server/tls.crt
- - --server-key=/server/tls.key
- - --client-cert=/client/tls.crt
- - --server-address=:9090
- env:
- - name: SIDECAR_IMAGE
- value: ghcr.io/cloudnative-pg/plugin-barman-cloud-sidecar:v0.6.0
- image: ghcr.io/cloudnative-pg/plugin-barman-cloud:v0.6.0@sha256:2adabf02728307119a22c13abb9efc52371ad6d74106db6204c6dee5abe75fb8
- livenessProbe:
- failureThreshold: 3
- httpGet:
- path: /healthz
- port: 8081
- initialDelaySeconds: 0
- periodSeconds: 10
- timeoutSeconds: 1
- name: app
- readinessProbe:
- failureThreshold: 3
- httpGet:
- path: /readyz
- port: 8081
- initialDelaySeconds: 0
- periodSeconds: 10
- timeoutSeconds: 1
- resources:
- limits:
- memory: 128Mi
- requests:
- cpu: 10m
- securityContext:
- allowPrivilegeEscalation: false
- capabilities:
- drop:
- - ALL
- readOnlyRootFilesystem: true
- volumeMounts:
- - mountPath: /client
- name: client
- - mountPath: /server
- name: server
- volumes:
- - name: client
- secret:
- secretName: barman-cloud-client-tls
- - name: server
- secret:
- secretName: barman-cloud-server-tls
-
--- HelmRelease: media/qbittorrent Service: media/qbittorrent
+++ HelmRelease: media/qbittorrent Service: media/qbittorrent
@@ -9,14 +9,14 @@
app.kubernetes.io/name: qbittorrent
app.kubernetes.io/service: qbittorrent
namespace: media
spec:
type: ClusterIP
ports:
- - port: 8080
- targetPort: 8080
+ - port: 80
+ targetPort: 80
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: qbittorrent
app.kubernetes.io/instance: qbittorrent
app.kubernetes.io/name: qbittorrent
--- HelmRelease: media/qbittorrent Deployment: media/qbittorrent
+++ HelmRelease: media/qbittorrent Deployment: media/qbittorrent
@@ -43,30 +43,30 @@
dnsPolicy: ClusterFirst
containers:
- env:
- name: QBT_TORRENTING_PORT
value: '50413'
- name: QBT_WEBUI_PORT
- value: '8080'
+ value: '80'
- name: TZ
value: America/New_York
image: ghcr.io/home-operations/qbittorrent:5.1.2@sha256:9dd0164cc23e9c937e0af27fd7c3f627d1df30c182cf62ed34d3f129c55dc0e8
livenessProbe:
failureThreshold: 3
httpGet:
path: /api/v2/app/version
- port: 8080
+ port: 80
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
name: app
readinessProbe:
failureThreshold: 3
httpGet:
path: /api/v2/app/version
- port: 8080
+ port: 80
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
securityContext:
allowPrivilegeEscalation: false
capabilities:
@@ -75,27 +75,37 @@
readOnlyRootFilesystem: true
startupProbe:
failureThreshold: 30
initialDelaySeconds: 0
periodSeconds: 10
tcpSocket:
- port: 8080
+ port: 80
timeoutSeconds: 1
volume
64A0
Mounts:
- mountPath: /config
name: config
- mountPath: /media/downloads/torrents
name: media
subPath: downloads/torrents
+ - mountPath: /qbrr
+ name: qbrr
+ readOnly: true
+ - mountPath: /config/qBittorrent/logs
+ name: tmpfs
+ subPath: logs
- mountPath: /tmp
- name: tmp
+ name: tmpfs
+ subPath: tmp
volumes:
- name: config
persistentVolumeClaim:
claimName: qbittorrent
- name: media
nfs:
path: /volume1/PLEX
server: nas.internal
+ - image:
+ reference: ghcr.io/buroa/qbrr:0.1.1@sha256:fe4a1d100ba896999253a5122d9c77cc4920779d618c23f765458cb6ec7383ca
+ name: qbrr
- emptyDir: {}
- name: tmp
+ name: tmpfs
--- HelmRelease: media/qbittorrent HTTPRoute: media/qbittorrent
+++ HelmRelease: media/qbittorrent HTTPRoute: media/qbittorrent
@@ -21,9 +21,9 @@
rules:
- backendRefs:
- group: ''
kind: Service
name: qbittorrent
namespace: media
- port: 8080
+ port: 80
weight: 1
--- HelmRelease: media/prowlarr Service: media/prowlarr
+++ HelmRelease: media/prowlarr Service: media/prowlarr
@@ -9,14 +9,14 @@
app.kubernetes.io/name: prowlarr
app.kubernetes.io/service: prowlarr
namespace: media
spec:
type: ClusterIP
ports:
- - port: 9696
- targetPort: 9696
+ - port: 80
+ targetPort: 80
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: prowlarr
app.kubernetes.io/instance: prowlarr
app.kubernetes.io/name: prowlarr
--- HelmRelease: media/prowlarr Deployment: media/prowlarr
+++ HelmRelease: media/prowlarr Deployment: media/prowlarr
@@ -29,25 +29,22 @@
app.kubernetes.io/name: prowlarr
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
+ fsGroup: 568
+ fsGroupChangePolicy: OnRootMismatch
runAsGroup: 568
runAsNonRoot: true
runAsUser: 568
hostIPC: false
hostNetwork: false
hostPID: false
+ hostUsers: false
dnsPolicy: ClusterFirst
- initContainers:
- - envFrom:
- - secretRef:
- name: prowlarr-secret
- image: ghcr.io/home-operations/postgres-init:17
- name: init-db
containers:
- env:
- name: PROWLARR__APP__INSTANCENAME
value: Prowlarr
- name: PROWLARR__APP__THEME
value: dark
@@ -55,49 +52,54 @@
value: External
- name: PROWLARR__AUTH__REQUIRED
value: DisabledForLocalAddresses
- name: PROWLARR__LOG__LEVEL
value: info
- name: PROWLARR__SERVER__PORT
- value: '9696'
+ value: '80'
- name: PROWLARR__UPDATE__BRANCH
value: develop
- name: TZ
value: America/New_York
envFrom:
- secretRef:
name: prowlarr-secret
image: ghcr.io/home-operations/prowlarr:2.0.5.5160@sha256:47cede4c9e57c53af75b8b6e1382576ede241b9d847733ba449d723ca8cb51c8
livenessProbe:
failureThreshold: 3
httpGet:
path: /ping
- port: 9696
+ port: 80
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
name: app
readinessProbe:
failureThreshold: 3
httpGet:
path: /ping
- port: 9696
+ port: 80
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /config
+ name: config
+ - mountPath: /config/logs
name: tmpfs
- subPath: config
+ subPath: logs
- mountPath: /tmp
name: tmpfs
subPath: tmp
volumes:
+ - name: config
+ persistentVolumeClaim:
+ claimName: prowlarr
- emptyDir: {}
name: tmpfs
--- HelmRelease: media/prowlarr HTTPRoute: media/prowlarr
+++ HelmRelease: media/prowlarr HTTPRoute: media/prowlarr
@@ -20,9 +20,9 @@
rules:
- backendRefs:
- group: ''
kind: Service
name: prowlarr
namespace: media
- port: 9696
+ port: 80
weight: 1
--- HelmRelease: media/cross-seed Service: media/cross-seed
+++ HelmRelease: media/cross-seed Service: media/cross-seed
@@ -9,14 +9,14 @@
app.kubernetes.io/name: cross-seed
app.kubernetes.io/service: cross-seed
namespace: media
spec:
type: ClusterIP
ports:
- - port: 2468
- targetPort: 2468
+ - port: 80
+ targetPort: 80
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: cross-seed
app.kubernetes.io/instance: cross-seed
app.kubernetes.io/name: cross-seed
--- HelmRelease: media/cross-seed Deployment: media/cross-seed
+++ HelmRelease: media/cross-seed Deployment: media/cross-seed
@@ -46,33 +46,33 @@
containers:
- args:
- daemon
- -v
env:
- name: CROSS_SEED_PORT
- value: '2468'
+ value: '80'
- name: TZ
value: America/New_York
envFrom:
- secretRef:
name: cross-seed-secret
image: ghcr.io/cross-seed/cross-seed:6.13.2@sha256:9d55669b92af084ac3487fa717d3a87d7b46781b24a5f0903fff993e82715452
livenessProbe:
failureThreshold: 3
httpGet:
path: /api/ping
- port: 2468
+ port: 80
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
name: app
readinessProbe:
failureThreshold: 3
httpGet:
path: /api/ping
- port: 2468
+ port: 80
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
securityContext:
allowPrivilegeEscalation: false
capabilities:
--- HelmRelease: media/jellyseerr Service: media/jellyseerr
+++ HelmRelease: media/jellyseerr Service: media/jellyseerr
@@ -9,14 +9,14 @@
app.kubernetes.io/name: jellyseerr
app.kubernetes.io/service: jellyseerr
namespace: media
spec:
type: ClusterIP
ports:
- - port: 5055
- targetPort: 5055
+ - port: 80
+ targetPort: 80
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: jellyseerr
app.kubernetes.io/instance: jellyseerr
app.kubernetes.io/name: jellyseerr
--- HelmRelease: media/jellyseerr Deployment: media/jellyseerr
+++ HelmRelease: media/jellyseerr Deployment: media/jellyseerr
@@ -5,14 +5,12 @@
name: jellyseerr
labels:
app.kubernetes.io/controller: jellyseerr
app.kubernetes.io/instance: jellyseerr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: jellyseerr
- annotations:
- reloader.stakater.com/auto: 'true'
namespace: media
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
@@ -37,39 +35,40 @@
runAsGroup: 568
runAsNonRoot: true
runAsUser: 568
hostIPC: false
hostNetwork: false
hostPID: false
+ hostUsers: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: LOG_LEVEL
value: info
- name: PORT
- value: '5055'
+ value: '80'
- name: TZ
value: America/New_York
envFrom:
- secretRef:
name: jellyseerr-secret
image: ghcr.io/fallenbagel/jellyseerr:2.7.3@sha256:9cc9e9ee6cd5cf5a23feb45c37742ba34cfd6314d81d259cddb373a97ac92cdd
livenessProbe:
failureThreshold: 3
httpGet:
path: /api/v1/status
- port: 5055
+ port: 80
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
name: app
readinessProbe:
failureThreshold: 3
httpGet:
path: /api/v1/status
- port: 5055
+ port: 80
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
securityContext:
allowPrivilegeEscalation: false
capabilities:
--- HelmRelease: media/jellyseerr HTTPRoute: media/jellyseerr
+++ HelmRelease: media/jellyseerr HTTPRoute: media/jellyseerr
@@ -21,9 +21,9 @@
rules:
- backendRefs:
- group: ''
kind: Service
name: jellyseerr
namespace: media
- port: 5055
+ port: 80
weight: 1
--- HelmRelease: external-secrets/onepassword Deployment: external-secrets/onepassword
+++ HelmRelease: external-secrets/onepassword Deployment: external-secrets/onepassword
@@ -35,12 +35,13 @@
runAsGroup: 999
runAsNonRoot: true
runAsUser: 999
hostIPC: false
hostNetwork: false
hostPID: false
+ hostUsers: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: OP_BUS_PEERS
value: localhost:11221
- name: OP_BUS_PORT
@@ -51,26 +52,30 @@
valueFrom:
secretKeyRef:
key: 1password-credentials.json
name: onepassword-secret
- name: XDG_DATA_HOME
value: /config
- image: docker.io/1password/connect-api:1.7.4@sha256:f97189814239381e6dd88577f2b0b838a64e006a460608455b3127c15b174601
+ image: ghcr.io/1password/connect-api:1.7.4@sha256:7d2132985f2f05b7fe4cfaf76314adeeb5abb745f051685b1c561130ab22ade3
livenessProbe:
failureThreshold: 3
httpGet:
path: /heartbeat
port: 80
- initialDelaySeconds: 15
- periodSeconds: 30
+ initialDelaySeconds: 0
+ periodSeconds: 10
+ timeoutSeconds: 1
name: api
readinessProbe:
+ failureThreshold: 3
httpGet:
path: /health
port: 80
- initialDelaySeconds: 15
+ initialDelaySeconds: 0
+ periodSeconds: 10
+ timeoutSeconds: 1
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
@@ -88,26 +93,30 @@
valueFrom:
secretKeyRef:
key: 1password-credentials.json
name: onepassword-secret
- name: XDG_DATA_HOME
value: /config
- image: docker.io/1password/connect-sync:1.7.4@sha256:27e7ec47e1ad8eaa2f54764fa0736954a5119d0155dea3c923c481c89c5f964c
+ image: ghcr.io/1password/connect-sync:1.7.4@sha256:b2b9beb06e40615c55f698e2efc06cad5bdb1f82e09e60d1aac6d7bf3d57ec43
livenessProbe:
failureThreshold: 3
httpGet:
path: /heartbeat
port: 8081
- initialDelaySeconds: 15
- periodSeconds: 30
+ initialDelaySeconds: 0
+ periodSeconds: 10
+ timeoutSeconds: 1
name: sync
readinessProbe:
+ failureThreshold: 3
httpGet:
path: /health
port: 8081
- initialDelaySeconds: 15
+ initialDelaySeconds: 0
+ periodSeconds: 10
+ timeoutSeconds: 1
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
--- HelmRelease: media/recyclarr CronJob: media/recyclarr
+++ HelmRelease: media/recyclarr CronJob: media/recyclarr
@@ -36,12 +36,13 @@
runAsGroup: 568
runAsNonRoot: true
runAsUser: 568
hostIPC: false
hostNetwork: false
hostPID: false
+ hostUsers: false
dnsPolicy: ClusterFirst
restartPolicy: Never
containers:
- args:
- sync
env:
@@ -68,12 +69,15 @@
- mountPath: /config/logs
name: tmpfs
subPath: logs
- mountPath: /config/repositories
name: tmpfs
subPath: repositories
+ - mountPath: /tmp
+ name: tmpfs
+ subPath: tmp
volumes:
- name: config
persistentVolumeClaim:
claimName: recyclarr
- configMap:
name: recyclarr-configmap
--- HelmRelease: media/plex Deployment: media/plex
+++ HelmRelease: media/plex Deployment: media/plex
@@ -33,13 +33,12 @@
fsGroup: 568
fsGroupChangePolicy: OnRootMismatch
runAsGroup: 568
runAsNonRoot: true
runAsUser: 568
supplementalGroups:
- - 44
- 65537
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
--- HelmRelease: observability/blackbox-exporter ConfigMap: observability/blackbox-exporter
+++ HelmRelease: observability/blackbox-exporter ConfigMap: observability/blackbox-exporter
@@ -11,23 +11,23 @@
data:
blackbox.yaml: |
modules:
http_2xx:
http:
follow_redirects: true
- preferred_ip_protocol: ipv4
+ preferred_ip_protocol: ip4
valid_http_versions:
- HTTP/1.1
- HTTP/2.0
prober: http
timeout: 5s
icmp:
icmp:
- preferred_ip_protocol: ipv4
+ preferred_ip_protocol: ip4
prober: icmp
timeout: 5s
tcp_connect:
prober: tcp
tcp:
- preferred_ip_protocol: ipv4
+ preferred_ip_protocol: ip4
timeout: 5s
--- HelmRelease: databases/cloudnative-pg ServiceAccount: databases/cloudnative-pg
+++ HelmRelease: databases/cloudnative-pg ServiceAccount: databases/cloudnative-pg
@@ -1,11 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: cloudnative-pg
- namespace: databases
- labels:
- app.kubernetes.io/name: cloudnative-pg
- app.kubernetes.io/instance: cloudnative-pg
- app.kubernetes.io/managed-by: Helm
-
--- HelmRelease: databases/cloudnative-pg ConfigMap: databases/cnpg-grafana-dashboard
+++ HelmRelease: databases/cloudnative-pg ConfigMap: databases/cnpg-grafana-dashboard
@@ -1,9200 +0,0 @@
----
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: cnpg-grafana-dashboard
- namespace: databases
- labels:
- grafana_dashboard: '1'
-data:
- cnp.json: |-
- {
- "__inputs": [
- {
- "name": "DS_PROMETHEUS",
- "label": "Prometheus",
- "description": "",
- "type": "datasource",
- "pluginId": "prometheus",
- "pluginName": "Prometheus"
- },
- {
- "name": "DS_EXPRESSION",
- "label": "Expression",
- "description": "",
- "type": "datasource",
- "pluginId": "__expr__"
- }
- ],
- "__elements": {},
- "__requires": [
- {
- "type": "datasource",
- "id": "__expr__",
- "version": "1.0.0"
- },
- {
- "type": "panel",
- "id": "alertlist",
- "name": "Alert list",
- "version": ""
- },
- {
- "type": "panel",
- "id": "bargauge",
- "name": "Bar gauge",
- "version": ""
- },
- {
- "type": "panel",
- "id": "gauge",
- "name": "Gauge",
- "version": ""
- },
- {
- "type": "grafana",
- "id": "grafana",
- "name": "Grafana",
- "version": "10.3.3"
- },
- {
- "type": "panel",
- "id": "heatmap",
- "name": "Heatmap",
- "version": ""
-
10540
},
- {
- "type": "datasource",
- "id": "prometheus",
- "name": "Prometheus",
- "version": "1.0.0"
- },
- {
- "type": "panel",
- "id": "stat",
- "name": "Stat",
- "version": ""
- },
- {
- "type": "panel",
- "id": "table",
- "name": "Table",
- "version": ""
- },
- {
- "type": "panel",
- "id": "text",
- "name": "Text",
- "version": ""
- },
- {
- "type": "panel",
- "id": "timeseries",
- "name": "Time series",
- "version": ""
- }
- ],
- "annotations": {
- "list": [
- {
- "builtIn": 1,
- "datasource": {
- "type": "datasource",
- "uid": "grafana"
- },
- "enable": true,
- "hide": true,
- "iconColor": "rgba(0, 211, 255, 1)",
- "name": "Annotations & Alerts",
- "target": {
- "limit": 100,
- "matchAny": false,
- "tags": [],
- "type": "dashboard"
- },
- "type": "dashboard"
- }
- ]
- },
- "editable": true,
- "fiscalYearStartMonth": 0,
- "graphTooltip": 1,
- "id": null,
- "links": [
- {
- "asDropdown": false,
- "icon": "external link",
- "includeVars": false,
- "keepTime": false,
- "tags": [
- "cloudnativepg"
- ],
- "targetBlank": false,
- "title": "Related Dashboards",
- "tooltip": "",
- "type": "dashboards",
- "url": ""
- }
- ],
- "liveNow": false,
- "panels": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
- },
- "gridPos": {
- "h": 7,
- "w": 3,
- "x": 0,
- "y": 0
- },
- "id": 676,
- "options": {
- "alertInstanceLabelFilter": "{namespace=~\"$namespace\",pod=~\"$instances\"}",
- "alertName": "",
- "dashboardAlerts": false,
- "folder": "",
- "groupBy": [],
- "groupMode": "default",
- "maxItems": 20,
- "sortOrder": 1,
- "stateFilter": {
- "error": true,
- "firing": true,
- "noData": false,
- "normal": true,
- "pending": true
- },
- "viewMode": "list"
- },
- "title": "Alerts",
- "type": "alertlist"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
- },
- "gridPos": {
- "h": 1,
- "w": 4,
- "x": 3,
- "y": 0
- },
- "id": 586,
- "options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
- },
- "content": "",
- "mode": "markdown"
- },
- "pluginVersion": "10.3.3",
- "title": "Health",
- "type": "text"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
- },
- "gridPos": {
- "h": 1,
- "w": 12,
- "x": 7,
- "y": 0
- },
- "id": 336,
- "options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
- },
- "content": "",
- "mode": "markdown"
- },
- "pluginVersion": "10.3.3",
- "title": "Overview",
- "type": "text"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
- },
- "gridPos": {
- "h": 1,
- "w": 2,
- "x": 19,
- "y": 0
- },
- "id": 352,
- "options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
- },
- "content": "",
- "mode": "markdown"
- },
- "pluginVersion": "10.3.3",
- "title": "Storage",
- "type": "text"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
- },
- "gridPos": {
- "h": 1,
- "w": 3,
- "x": 21,
- "y": 0
- },
- "id": 354,
- "options": {
- "code": {
- "language": "plaintext",
- "showLineNumbers": false,
- "showMiniMap": false
- },
- "content": "",
- "mode": "markdown"
- },
- "pluginVersion": "10.3.3",
- "title": "Backups",
- "type": "text"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
- },
- "description": "Cluster Replication Health represents the availability of replica servers available to replace the primary in case of a failure.",
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "thresholds"
- },
- "mappings": [
- {
- "options": {
- "0": {
- "color": "red",
- "index": 2,
- "text": "None"
- },
- "1": {
- "color": "orange",
- "index": 1,
- "text": "Degraded"
- }
- },
- "type": "value"
- },
- {
- "options": {
- "from": 2,
- "result": {
- "color": "green",
- "index": 0,
- "text": "Healthy"
- },
- "to": 999
- },
- "type": "range"
- }
- ],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- }
- ]
- },
- "unitScale": true
- },
- "overrides": []
- },
- "gridPos": {
- "h": 2,
- "w": 2,
- "x": 3,
- "y": 1
- },
- "id": 585,
- "options": {
- "colorMode": "background",
- "graphMode": "none",
- "justifyMode": "auto",
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [
- "lastNotNull"
- ],
- "fields": "",
- "values": false
- },
- "showPercentChange": false,
- "textMode": "auto",
- "wideLayout": true
- },
- "pluginVersion": "10.3.3",
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
- },
- "editorMode": "code",
- "expr": "max(cnpg_pg_replication_streaming_replicas{namespace=~\"$namespace\", pod=~\"$instances\"} - cnpg_pg_replication_is_wal_receiver_up{namespace=~\"$namespace\", pod=~\"$instances\"})",
- "legendFormat": "Replication",
- "range": true,
- "refId": "A"
- }
- ],
- "type": "stat"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
- },
- "description": "High lag indicates issue with replication. Network or storage interfaces may not have enough bandwidth to handle incoming traffic and replication at the same time.",
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "thresholds"
- },
- "mappings": [
- {
- "options": {
- "match": "null",
[Diff truncated by flux-local]
--- HelmRelease: databases/cloudnative-pg ConfigMap: databases/cnpg-controller-manager-config
+++ HelmRelease: databases/cloudnative-pg ConfigMap: databases/cnpg-controller-manager-config
@@ -1,12 +0,0 @@
----
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: cnpg-controller-manager-config
- namespace: databases
- labels:
- app.kubernetes.io/name: cloudnative-pg
- app.kubernetes.io/instance: cloudnative-pg
- app.kubernetes.io/managed-by: Helm
-data: {}
-
--- HelmRelease: databases/cloudnative-pg ConfigMap: databases/cnpg-default-monitoring
+++ HelmRelease: databases/cloudnative-pg ConfigMap: databases/cnpg-default-monitoring
@@ -1,493 +0,0 @@
----
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: cnpg-default-monitoring
- namespace: databases
- labels:
- app.kubernetes.io/name: cloudnative-pg
- app.kubernetes.io/instance: cloudnative-pg
- app.kubernetes.io/managed-by: Helm
- cnpg.io/reload: ''
-data:
- queries: |
- backends:
- query: |
- SELECT sa.datname
- , sa.usename
- , sa.application_name
- , states.state
- , COALESCE(sa.count, 0) AS total
- , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds
- FROM ( VALUES ('active')
- , ('idle')
- , ('idle in transaction')
- , ('idle in transaction (aborted)')
- , ('fastpath function call')
- , ('disabled')
- ) AS states(state)
- LEFT JOIN (
- SELECT datname
- , state
- , usename
- , COALESCE(application_name, '') AS application_name
- , COUNT(*)
- , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs
- FROM pg_catalog.pg_stat_activity
- GROUP BY datname, state, usename, application_name
- ) sa ON states.state = sa.state
- WHERE sa.usename IS NOT NULL
- metrics:
- - datname:
- usage: "LABEL"
- description: "Name of the database"
- - usename:
- usage: "LABEL"
- description: "Name of the user"
- - application_name:
- usage: "LABEL"
- description: "Name of the application"
- - state:
- usage: "LABEL"
- description: "State of the backend"
- - total:
- usage: "GAUGE"
- description: "Number of backends"
- - max_tx_duration_seconds:
- usage: "GAUGE"
- description: "Maximum duration of a transaction in seconds"
-
- backends_waiting:
- query: |
- SELECT count(*) AS total
- FROM pg_catalog.pg_locks blocked_locks
- JOIN pg_catalog.pg_locks blocking_locks
- ON blocking_locks.locktype = blocked_locks.locktype
- AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database
- AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation
- AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page
- AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple
- AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid
- AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid
- AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid
- AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid
- AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid
- AND blocking_locks.pid != blocked_locks.pid
- JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid
- WHERE NOT blocked_locks.granted
- metrics:
- - total:
- usage: "GAUGE"
- description: "Total number of backends that are currently waiting on other queries"
-
- pg_database:
- query: |
- SELECT datname
- , pg_catalog.pg_database_size(datname) AS size_bytes
- , pg_catalog.age(datfrozenxid) AS xid_age
- , pg_catalog.mxid_age(datminmxid) AS mxid_age
- FROM pg_catalog.pg_database
- WHERE datallowconn
- metrics:
- - datname:
- usage: "LABEL"
- description: "Name of the database"
- - size_bytes:
- usage: "GAUGE"
- description: "Disk space used by the database"
- - xid_age:
- usage: "GAUGE"
- description: "Number of transactions from the frozen XID to the current one"
- - mxid_age:
- usage: "GAUGE"
- description: "Number of multiple transactions (Multixact) from the frozen XID to the current one"
-
- pg_postmaster:
- query: |
- SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time
- FROM pg_catalog.pg_postmaster_start_time()
- metrics:
- - start_time:
- usage: "GAUGE"
- description: "Time at which postgres started (based on epoch)"
-
- pg_replication:
- query: "SELECT CASE WHEN (
- NOT pg_catalog.pg_is_in_recovery()
- OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn())
- THEN 0
- ELSE GREATEST (0,
- EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp())))
- END AS lag,
- pg_catalog.pg_is_in_recovery() AS in_recovery,
- EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up,
- (SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas"
- metrics:
- - lag:
- usage: "GAUGE"
- description: "Replication lag behind primary in seconds"
- - in_recovery:
- usage: "GAUGE"
- description: "Whether the instance is in recovery"
- - is_wal_receiver_up:
- usage: "GAUGE"
- description: "Whether the instance wal_receiver is up"
- - streaming_replicas:
- usage: "GAUGE"
- description: "Number of streaming replicas connected to the instance"
-
- pg_replication_slots:
- query: |
- SELECT slot_name,
- slot_type,
- database,
- active,
- (CASE pg_catalog.pg_is_in_recovery()
- WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn)
- ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn)
- END) as pg_wal_lsn_diff
- FROM pg_catalog.pg_replication_slots
- WHERE NOT temporary
- metrics:
- - slot_name:
- usage: "LABEL"
- description: "Name of the replication slot"
- - slot_type:
- usage: "LABEL"
- description: "Type of the replication slot"
- - database:
- usage: "LABEL"
- description: "Name of the database"
- - active:
- usage: "GAUGE"
- description: "Flag indicating whether the slot is active"
- - pg_wal_lsn_diff:
- usage: "GAUGE"
- description: "Replication lag in bytes"
-
- pg_stat_archiver:
- query: |
- SELECT archived_count
- , failed_count
- , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival
- , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure
- , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time
- , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time
- , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn
- , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn
- , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time
- FROM pg_catalog.pg_stat_archiver
- metrics:
- - archived_count:
- usage: "COUNTER"
- description: "Number of WAL files that have been successfully archived"
- - failed_count:
- usage: "COUNTER"
- description: "Number of failed attempts for archiving WAL files"
- - seconds_since_last_archival:
- usage: "GAUGE"
- description: "Seconds since the last successful archival operation"
- - seconds_since_last_failure:
- usage: "GAUGE"
- description: "Seconds since the last failed archival operation"
- - last_archived_time:
- usage: "GAUGE"
- description: "Epoch of the last time WAL archiving succeeded"
- - last_failed_time:
- usage: "GAUGE"
- description: "Epoch of the last time WAL archiving failed"
- - last_archived_wal_start_lsn:
- usage: "GAUGE"
- description: "Archived WAL start LSN"
- - last_failed_wal_start_lsn:
- usage: "GAUGE"
- description: "Last failed WAL LSN"
- - stats_reset_time:
- usage: "GAUGE"
- description: "Time at which these statistics were last reset"
-
- pg_stat_bgwriter:
- runonserver: "<17.0.0"
- query: |
- SELECT checkpoints_timed
- , checkpoints_req
- , checkpoint_write_time
- , checkpoint_sync_time
- , buffers_checkpoint
- , buffers_clean
- , maxwritten_clean
- , buffers_backend
- , buffers_backend_fsync
- , buffers_alloc
- FROM pg_catalog.pg_stat_bgwriter
- metrics:
- - checkpoints_timed:
- usage: "COUNTER"
- description: "Number of scheduled checkpoints that have been performed"
- - checkpoints_req:
- usage: "COUNTER"
- description: "Number of requested checkpoints that have been performed"
- - checkpoint_write_time:
- usage: "COUNTER"
- description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds"
- - checkpoint_sync_time:
- usage: "COUNTER"
- description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds"
- - buffers_checkpoint:
- usage: "COUNTER"
[Diff truncated by flux-local]
--- HelmRelease: databases/cloudnative-pg ClusterRole: databases/cloudnative-pg
+++ HelmRelease: databases/cloudnative-pg ClusterRole: databases/cloudnative-pg
@@ -1,237 +0,0 @@
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- name: cloudnative-pg
- labels:
- app.kubernetes.io/name: cloudnative-pg
- app.kubernetes.io/instance: cloudnative-pg
- app.kubernetes.io/managed-by: Helm
-rules:
-- apiGroups:
- - ''
- resources:
- - nodes
- verbs:
- - get
- - list
- - watch
-- apiGroups:
- - admissionregistration.k8s.io
- resources:
- - mutatingwebhookconfigurations
- - validatingwebhookconfigurations
- verbs:
- - get
- - patch
-- apiGroups:
- - postgresql.cnpg.io
- resources:
- - clusterimagecatalogs
- verbs:
- - get
- - list
- - watch
-- apiGroups:
- - ''
- resources:
- - configmaps
- - secrets
- - services
- verbs:
- - create
- - delete
- - get
- - list
- - patch
- - update
- - watch
-- apiGroups:
- - ''
- resources:
- - configmaps/status
- - secrets/status
- verbs:
- - get
- - patch
- - update
-- apiGroups:
- - ''
- resources:
- - events
- verbs:
- - create
- - patch
-- apiGroups:
- - ''
- resources:
- - persistentvolumeclaims
- - pods
- - pods/exec
- verbs:
- - create
- - delete
- - get
- - list
- - patch
- - watch
-- apiGroups:
- - ''
- resources:
- - pods/status
- verbs:
- - get
-- apiGroups:
- - ''
- resources:
- - serviceaccounts
- verbs:
- - create
- - get
- - list
- - patch
- - update
- - watch
-- apiGroups:
- - apps
- resources:
- - deployments
- verbs:
- - create
- - delete
- - get
- - list
- - patch
- - update
- - watch
-- apiGroups:
- - batch
- resources:
- - jobs
- verbs:
- - create
- - delete
- - get
- - list
- - patch
- - watch
-- apiGroups:
- - coordination.k8s.io
- resources:
- - leases
- verbs:
- - create
- - get
- - update
-- apiGroups:
- - monitoring.coreos.com
- resources:
- - podmonitors
- verbs:
- - create
- - delete
- - get
- - list
- - patch
- - watch
-- apiGroups:
- - policy
- resources:
- - poddisruptionbudgets
- verbs:
- - create
- - delete
- - get
- - list
- - patch
- - update
- - watch
-- apiGroups:
- - postgresql.cnpg.io
- resources:
- - backups
- - clusters
- - databases
- - poolers
- - publications
- - scheduledbackups
- - subscriptions
- verbs:
- - create
- - delete
- - get
- - list
- - patch
- - update
- - watch
-- apiGroups:
- - postgresql.cnpg.io
- resources:
- - failoverquorums
- verbs:
- - create
- - delete
- - get
- - list
- - watch
-- apiGroups:
- - postgresql.cnpg.io
- resources:
- - backups/status
- - databases/status
- - publications/status
- - scheduledbackups/status
- - subscriptions/status
- verbs:
- - get
- - patch
- - update
-- apiGroups:
- - postgresql.cnpg.io
- resources:
- - imagecatalogs
- verbs:
- - get
- - list
- - watch
-- apiGroups:
- - postgresql.cnpg.io
- resources:
- - clusters/finalizers
- - poolers/finalizers
- verbs:
- - update
-- apiGroups:
- - postgresql.cnpg.io
- resources:
- - clusters/status
- - poolers/status
- - failoverquorums/status
- verbs:
- - get
- - patch
- - update
- - watch
-- apiGroups:
- - rbac.authorization.k8s.io
- resources:
- - rolebindings
- - roles
- verbs:
- - create
- - get
- - list
- - patch
- - update
- - watch
-- apiGroups:
- - snapshot.storage.k8s.io
- resources:
- - volumesnapshots
- verbs:
- - create
- - get
- - list
- - patch
- - watch
-
--- HelmRelease: databases/cloudnative-pg ClusterRole: databases/cloudnative-pg-view
+++ HelmRelease: databases/cloudnative-pg ClusterRole: databases/cloudnative-pg-view
@@ -1,28 +0,0 @@
----
-a
680
piVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- name: cloudnative-pg-view
- labels:
- app.kubernetes.io/name: cloudnative-pg
- app.kubernetes.io/instance: cloudnative-pg
- app.kubernetes.io/managed-by: Helm
-rules:
-- apiGroups:
- - postgresql.cnpg.io
- resources:
- - backups
- - clusters
- - databases
- - failoverquorums
- - poolers
- - publications
- - scheduledbackups
- - imagecatalogs
- - clusterimagecatalogs
- - subscriptions
-
10540
verbs:
- - get
- - list
- - watch
-
--- HelmRelease: databases/cloudnative-pg ClusterRole: databases/cloudnative-pg-edit
+++ HelmRelease: databases/cloudnative-pg ClusterRole: databases/cloudnative-pg-edit
@@ -1,30 +0,0 @@
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- name: cloudnative-pg-edit
- labels:
- app.kubernetes.io/name: cloudnative-pg
- app.kubernetes.io/instance: cloudnative-pg
- app.kubernetes.io/managed-by: Helm
-rules:
-- apiGroups:
- - postgresql.cnpg.io
- resources:
- - backups
- - clusters
- - databases
- - failoverquorums
- - poolers
- - publications
- - scheduledbackups
- - imagecatalogs
- - clusterimagecatalogs
- - subscriptions
- verbs:
- - create
- - delete
- - deletecollection
- - patch
- - update
-
--- HelmRelease: databases/cloudnative-pg ClusterRoleBinding: databases/cloudnative-pg
+++ HelmRelease: databases/cloudnative-pg ClusterRoleBinding: databases/cloudnative-pg
@@ -1,18 +0,0 @@
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- name: cloudnative-pg
- labels:
- app.kubernetes.io/name: cloudnative-pg
- app.kubernetes.io/instance: cloudnative-pg
- app.kubernetes.io/managed-by: Helm
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: cloudnative-pg
-subjects:
-- kind: ServiceAccount
- name: cloudnative-pg
- namespace: databases
-
--- HelmRelease: databases/cloudnative-pg Service: databases/cnpg-webhook-service
+++ HelmRelease: databases/cloudnative-pg Service: databases/cnpg-webhook-service
@@ -1,20 +0,0 @@
----
-apiVersion: v1
-kind: Service
-metadata:
- name: cnpg-webhook-service
- namespace: databases
- labels:
- app.kubernetes.io/name: cloudnative-pg
- app.kubernetes.io/instance: cloudnative-pg
- app.kubernetes.io/managed-by: Helm
-spec:
- type: ClusterIP
- ports:
- - port: 443
- targetPort: webhook-server
- name: webhook-server
- selector:
- app.kubernetes.io/name: cloudnative-pg
- app.kubernetes.io/instance: cloudnative-pg
-
--- HelmRelease: databases/cloudnative-pg Deployment: databases/cloudnative-pg
+++ HelmRelease: databases/cloudnative-pg Deployment: databases/cloudnative-pg
@@ -1,103 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: cloudnative-pg
- namespace: databases
- labels:
- app.kubernetes.io/name: cloudnative-pg
- app.kubernetes.io/instance: cloudnative-pg
- app.kubernetes.io/managed-by: Helm
-spec:
- replicas: 1
- selector:
- matchLabels:
- app.kubernetes.io/name: cloudnative-pg
- app.kubernetes.io/instance: cloudnative-pg
- template:
- metadata:
- annotations:
- checksum/rbac: 3a1af6c0dcd69ad9d2da35797d7d33f534a3db5f3c3375b51b109386548a6060
- checksum/monitoring-config: ac78470f67c828fc2b43e34d4eb7d4712837509bec4e991a1e5dccd5bddfff47
- labels:
- app.kubernetes.io/name: cloudnative-pg
- app.kubernetes.io/instance: cloudnative-pg
- spec:
- containers:
- - args:
- - controller
- - --leader-elect
- - --max-concurrent-reconciles=10
- - --config-map-name=cnpg-controller-manager-config
- - --webhook-port=9443
- command:
- - /manager
- env:
- - name: OPERATOR_IMAGE_NAME
- value: ghcr.io/cloudnative-pg/cloudnative-pg:1.27.0
- - name: OPERATOR_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: MONITORING_QUERIES_CONFIGMAP
- value: cnpg-default-monitoring
- image: ghcr.io/cloudnative-pg/cloudnative-pg:1.27.0
- imagePullPolicy: IfNotPresent
- livenessProbe:
- httpGet:
- path: /readyz
- port: 9443
- scheme: HTTPS
- initialDelaySeconds: 3
- name: manager
- ports:
- - containerPort: 8080
- name: metrics
- protocol: TCP
- - containerPort: 9443
- name: webhook-server
- protocol: TCP
- readinessProbe:
- httpGet:
- path: /readyz
- port: 9443
- scheme: HTTPS
- initialDelaySeconds: 3
- resources: {}
- securityContext:
- allowPrivilegeEscalation: false
- capabilities:
- drop:
- - ALL
- readOnlyRootFilesystem: true
- runAsGroup: 10001
- runAsUser: 10001
- seccompProfile:
- type: RuntimeDefault
- startupProbe:
- failureThreshold: 6
- httpGet:
- path: /readyz
- port: 9443
- scheme: HTTPS
- periodSeconds: 5
- volumeMounts:
- - mountPath: /controller
- name: scratch-data
- - mountPath: /run/secrets/cnpg.io/webhook
- name: webhook-certificates
- securityContext:
- runAsNonRoot: true
- seccompProfile:
- type: RuntimeDefault
- serviceAccountName: cloudnative-pg
- terminationGracePeriodSeconds: 10
- volumes:
- - emptyDir: {}
- name: scratch-data
- - name: webhook-certificates
- secret:
- defaultMode: 420
- optional: true
- secretName: cnpg-webhook-cert
-
--- HelmRelease: databases/cloudnative-pg MutatingWebhookConfiguration: databases/cnpg-mutating-webhook-configuration
+++ HelmRelease: databases/cloudnative-pg MutatingWebhookConfiguration: databases/cnpg-mutating-webhook-configuration
@@ -1,95 +0,0 @@
----
-apiVersion: admissionregistration.k8s.io/v1
-kind: MutatingWebhookConfiguration
-metadata:
- name: cnpg-mutating-webhook-configuration
- labels:
- app.kubernetes.io/name: cloudnative-pg
- app.kubernetes.io/instance: cloudnative-pg
- app.kubernetes.io/managed-by: Helm
-webhooks:
-- admissionReviewVersions:
- - v1
- clientConfig:
- service:
- name: cnpg-webhook-service
- namespace: databases
- path: /mutate-postgresql-cnpg-io-v1-backup
- port: 443
- failurePolicy: Fail
- name: mbackup.cnpg.io
- rules:
- - apiGroups:
- - postgresql.cnpg.io
- apiVersions:
- - v1
- operations:
- - CREATE
- - UPDATE
- resources:
- - backups
- sideEffects: None
-- admissionReviewVersions:
- - v1
- clientConfig:
- service:
- name: cnpg-webhook-service
- namespace: databases
- path: /mutate-postgresql-cnpg-io-v1-cluster
- port: 443
- failurePolicy: Fail
- name: mcluster.cnpg.io
- rules:
- - apiGroups:
- - postgresql.cnpg.io
- apiVersions:
- - v1
- operations:
- - CREATE
- - UPDATE
- resources:
- - clusters
- sideEffects: None
-- admissionReviewVersions:
- - v1
- clientConfig:
- service:
- name: cnpg-webhook-service
- namespace: databases
- path: /mutate-postgresql-cnpg-io-v1-database
- port: 443
- failurePolicy: Fail
- name: mdatabase.cnpg.io
- rules:
- - apiGroups:
- - postgresql.cnpg.io
- apiVersions:
- - v1
- operations:
- - CREATE
- - UPDATE
- resources:
- - databases
- sideEffects: None
-- admissionReviewVersions:
- - v1
- clientConfig:
- service:
- name: cnpg-webhook-service
- namespace: databases
- path: /mutate-postgresql-cnpg-io-v1-scheduledbackup
- port: 443
- failurePolicy: Fail
- name: mscheduledbackup.cnpg.io
- rules:
- - apiGroups:
- - postgresql.cnpg.io
- apiVersions:
- - v1
- operations:
- - CREATE
- - UPDATE
- resources:
- - scheduledbackups
- sideEffects: None
-
--- HelmRelease: databases/cloudnative-pg ValidatingWebhookConfiguration: databases/cnpg-validating-webhook-configuration
+++ HelmRelease: databases/cloudnative-pg ValidatingWebhookConfiguration: databases/cnpg-validating-webhook-configuration
@@ -1,116 +0,0 @@
----
-apiVersion: admissionregistration.k8s.io/v1
-kind: ValidatingWebhookConfiguration
-metadata:
- name: cnpg-validating-webhook-configuration
- labels:
- app.kubernetes.io/name: cloudnative-pg
- app.kubernetes.io/instance: cloudnative-pg
- app.kubernetes.io/managed-by: Helm
-webhooks:
-- admissionReviewVersions:
- - v1
- clientConfig:
- service:
- name: cnpg-webhook-service
- namespace: databases
- path: /validate-postgresql-cnpg-io-v1-backup
- port: 443
- failurePolicy: Fail
- name: vbackup.cnpg.io
- rules:
- - apiGroups:
- - postgresql.cnpg.io
- apiVersions:
- - v1
- operations:
- - CREATE
- - UPDATE
- resources:
- - backups
- sideEffects: None
-- admissionReviewVersions:
- - v1
- clientConfig:
- service:
- name: cnpg-webhook-service
- namespace: databases
- path: /validate-postgresql-cnpg-io-v1-cluster
- port: 443
- failurePolicy: Fail
- name: vcluster.cnpg.io
- rules:
- - apiGroups:
- - postgresql.cnpg.io
- apiVersions:
- - v1
- operations:
- - CREATE
- - UPDATE
- resources:
- - clusters
- sideEffects: None
-- admissionReviewVersions:
- - v1
- clientConfig:
- service:
- name: cnpg-webhook-service
- namespace: databases
- path: /validate-postgresql-cnpg-io-v1-scheduledbackup
- port: 443
- failurePolicy: Fail
- name: vscheduledbackup.cnpg.io
- rules:
- - apiGroups:
- - postgresql.cnpg.io
- apiVersions:
- - v1
- operations:
- - CREATE
- - UPDATE
- resources:
- - scheduledbackups
- sideEffects: None
-- admissionReviewVersions:
- - v1
- clientConfig:
- service:
- name: cnpg-webhook-service
- namespace: databases
- path: /validate-postgresql-cnpg-io-v1-database
- port: 443
- failurePolicy: Fail
- name: vdatabase.cnpg.io
- rules:
- - apiGroups:
- - postgresql.cnpg.io
- apiVersions:
- - v1
- operations:
- - CREATE
- - UPDATE
- resources:
- - databases
- sideEffects: None
-- admissionReviewVersions:
- - v1
- clientConfig:
- service:
- name: cnpg-webhook-service
- namespace: databases
- path: /validate-postgresql-cnpg-io-v1-pooler
- port: 443
- failurePolicy: Fail
- name: vpooler.cnpg.io
- rules:
- - apiGroups:
- - postgresql.cnpg.io
- apiVersions:
- - v1
- operations:
- - CREATE
- - UPDATE
- resources:
- - poolers
- sideEffects: None
-
--- HelmRelease: networking/external-dns-cloudflare Deployment: networking/external-dns-cloudflare
+++ HelmRelease: networking/external-dns-cloudflare Deployment: networking/external-dns-cloudflare
@@ -35,12 +35,13 @@
runAsGroup: 568
runAsNonRoot: true
runAsUser: 568
hostIPC: false
hostNetwork: false
hostPID: false
+ hostUsers: false
dnsPolicy: ClusterFirst
containers:
- args:
- --cloudflare-dns-records-per-page=1000
- --cloudflare-proxied
- --domain-filter=youmans.io
@@ -56,13 +57,13 @@
- --txt-owner-id=k8s
- --txt-prefix=k8s.
- --zone-id-filter=$(CF_ZONE_ID)
envFrom:
- secretRef:
name: external-dns-cloudflare-secret
- image: registry.k8s.io/external-dns/external-dns:v0.18.0@sha256:f90738b35be265d50141d5c21e6f6049c3da7cd761682c40214117a2951b80bc
+ image: registry.k8s.io/external-dns/external-dns:v0.19.0@sha256:f76114338104264f655b23138444481b20bb9d6125742c7240fac25936fe164e
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 7979
initialDelaySeconds: 0
--- HelmRelease: media/autobrr Service: media/autobrr
+++ HelmRelease: media/autobrr Service: media/autobrr
@@ -9,18 +9,18 @@
app.kubernetes.io/name: autobrr
app.kubernetes.io/service: autobrr
namespace: media
spec:
type: ClusterIP
ports:
- - port: 7474
- targetPort: 7474
+ - port: 80
+ targetPort: 80
protocol: TCP
name: http
- - port: 9094
- targetPort: 9094
+ - port: 8080
+ targetPort: 8080
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: autobrr
app.kubernetes.io/instance: autobrr
app.kubernetes.io/name: autobrr
--- HelmRelease: media/autobrr Deployment: media/autobrr
+++ HelmRelease: media/autobrr Deployment: media/autobrr
@@ -29,25 +29,22 @@
app.kubernetes.io/name: autobrr
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
+ fsGroup: 568
+ fsGroupChangePolicy: OnRootMismatch
runAsGroup: 568
runAsNonRoot: true
runAsUser: 568
hostIPC: false
hostNetwork: false
hostPID: false
+ hostUsers: false
dnsPolicy: ClusterFirst
- initContainers:
- - envFrom:
- - secretRef:
- name: autobrr-secret
- image: ghcr.io/home-operations/postgres-init:17
- name: init-db
containers:
- env:
- name: AUTOBRR__CHECK_FOR_UPDATES
value: 'false'
- name: AUTOBRR__HOST
value: 0.0.0.0
@@ -55,49 +52,54 @@
value: INFO
- name: AUTOBRR__METRICS_ENABLED
value: 'true'
- name: AUTOBRR__METRICS_HOST
value: 0.0.0.0
- name: AUTOBRR__METRICS_PORT
- value: '9094'
+ value: '8080'
- name: AUTOBRR__PORT
- value: '7474'
+ value: '80'
- name: TZ
value: America/New_York
envFrom:
- secretRef:
name: autobrr-secret
image: ghcr.io/autobrr/autobrr:v1.65.0@sha256:494e821e7a9c9a1279d1541522a65ed06b03d0b66563e827e3d29b9a63e61ddc
livenessProbe:
failureThreshold: 3
httpGet:
path: /api/healthz/liveness
- port: 7474
+ port: 80
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
name: app
readinessProbe:
failureThreshold: 3
httpGet:
path: /api/healthz/liveness
- port: 7474
+ port: 80
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
volumeMounts:
+ - mountPath: /config
+ name: config
- mountPath: /config/log
name: tmpfs
subPath: log
- mountPath: /tmp
name: tmpfs
subPath: tmp
volumes:
+ - name: config
+ persistentVolumeClaim:
+ claimName: autobrr
- emptyDir: {}
name: tmpfs
--- HelmRelease: media/autobrr HTTPRoute: media/autobrr
+++ HelmRelease: media/autobrr HTTPRoute: media/autobrr
@@ -20,9 +20,9 @@
rules:
- backendRefs:
- group: ''
kind: Service
name: autobrr
namespace: media
- port: 7474
+ port: 80
weight: 1
--- HelmRelease: networking/echo-server Service: networking/echo-server
+++ HelmRelease: networking/echo-server Service: networking/echo-server
@@ -9,14 +9,14 @@
app.kubernetes.io/name: echo-server
app.kubernetes.io/service: echo-server
namespace: networking
spec:
type: ClusterIP
ports:
- - port: 8080
- targetPort: 8080
+ - port: 80
+ targetPort: 80
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: echo-server
app.kubernetes.io/instance: echo-server
app.kubernetes.io/name: echo-server
--- HelmRelease: networking/echo-server Deployment: networking/echo-server
+++ HelmRelease: networking/echo-server Deployment: networking/echo-server
@@ -33,38 +33,39 @@
runAsGroup: 568
runAsNonRoot: true
runAsUser: 568
hostIPC: false
hostNetwork: false
hostPID: false
+ hostUsers: false
dnsPolicy: ClusterFirst
containers:
- env:
+ - name: HTTP_PORT
+ value: '80'
- name: LOG_IGNORE_PATH
value: /healthz
- name: LOG_WITHOUT_NEWLINE
value: 'true'
- - name: PORT
- value: '8080'
- name: PROMETHEUS_ENABLED
value: 'true'
image: ghcr.io/mendhak/http-https-echo:37@sha256:f55000d9196bd3c853d384af7315f509d21ffb85de315c26e9874033b9f83e15
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
- port: 8080
+ port: 80
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
name: app
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
- port: 8080
+ port: 80
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
securityContext:
allowPrivilegeEscalation: false
capabilities:
--- HelmRelease: networking/echo-server HTTPRoute: networking/echo-server
+++ HelmRelease: networking/echo-server HTTPRoute: networking/echo-server
@@ -20,9 +20,9 @@
rules:
- backendRefs:
- group: ''
kind: Service
name: echo-server
namespace: networking
- port: 8080
+ port: 80
weight: 1
--- HelmRelease: networking/external-dns-unifi Deployment: networking/external-dns-unifi
+++ HelmRelease: networking/external-dns-unifi Deployment: networking/external-dns-unifi
@@ -35,12 +35,13 @@
runAsGroup: 568
runAsNonRoot: true
runAsUser: 568
hostIPC: false
hostNetwork: false
hostPID: false
+ hostUsers: false
dnsPolicy: ClusterFirst
initContainers:
- env:
- name: UNIFI_HOST
value: https://unifi.internal
envFrom:
@@ -82,13 +83,13 @@
- --provider=webhook
- --registry=txt
- --source=gateway-httproute
- --source=service
- --txt-owner-id=k8s
- --txt-prefix=k8s.
- image: registry.k8s.io/external-dns/external-dns:v0.18.0@sha256:f90738b35be265d50141d5c21e6f6049c3da7cd761682c40214117a2951b80bc
+ image: registry.k8s.io/external-dns/external-dns:v0.19.0@sha256:f76114338104264f655b23138444481b20bb9d6125742c7240fac25936fe164e
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 7979
initialDelaySeconds: 0
--- HelmRelease: networking/cloudflared Service: networking/cloudflared
+++ HelmRelease: networking/cloudflared Service: networking/cloudflared
@@ -9,14 +9,14 @@
app.kubernetes.io/name: cloudflared
app.kubernetes.io/service: cloudflared
namespace: networking
spec:
type: ClusterIP
ports:
- - port: 2000
- targetPort: 2000
+ - port: 8080
+ targetPort: 8080
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: cloudflared
app.kubernetes.io/instance: cloudflared
app.kubernetes.io/name: cloudflared
--- HelmRelease: networking/cloudflared Deployment: networking/cloudflared
+++ HelmRelease: networking/cloudflared Deployment: networking/cloudflared
@@ -35,22 +35,23 @@
runAsGroup: 568
runAsNonRoot: true
runAsUser: 568
hostIPC: false
hostNetwork: false
hostPID: false
+ hostUsers: false
dnsPolicy: ClusterFirst
containers:
- args:
- tunnel
- run
env:
- name: NO_AUTOUPDATE
value: 'true'
- name: TUNNEL_METRICS
- value: 0.0.0.0:2000
+ value: 0.0.0.0:8080
- name: TUNNEL_ORIGIN_ENABLE_HTTP2
value: 'true'
- name: TUNNEL_POST_QUANTUM
value: 'true'
- name: TUNNEL_TRANSPORT_PROTOCOL
value: quic
@@ -59,22 +60,22 @@
name: cloudflared-secret
image: docker.io/cloudflare/cloudflared:2025.8.1@sha256:b77d84e8704db38db22c22661cf7e56468c526e3a6a5fe9c8b7c151452fa1472
livenessProbe:
failureThreshold: 3
httpGet:
path: /ready
- port: 2000
+ port: 8080
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
name: app
readinessProbe:
failureThreshold: 3
httpGet:
path: /ready
- port: 2000
+ port: 8080
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
securityContext:
allowPrivilegeEscalation: false
capabilities:
--- HelmRelease: observability/karma Service: observability/karma
+++ HelmRelease: observability/karma Service: observability/karma
@@ -9,14 +9,14 @@
app.kubernetes.io/name: karma
app.kubernetes.io/service: karma
namespace: observability
spec:
type: ClusterIP
ports:
- - port: 8080
- targetPort: 8080
+ - port: 80
+ targetPort: 80
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: karma
app.kubernetes.io/instance: karma
app.kubernetes.io/name: karma
--- HelmRelease: observability/karma Deployment: observability/karma
+++ HelmRelease: observability/karma Deployment: observability/karma
@@ -8,15 +8,15 @@
app.kubernetes.io/instance: karma
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: karma
namespace: observability
spec:
revisionHistoryLimit: 3
- replicas: 2
+ replicas: 1
strategy:
- type: RollingUpdate
+ type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: karma
app.kubernetes.io/name: karma
app.kubernetes.io/instance: karma
template:
@@ -33,32 +33,35 @@
runAsGroup: 568
runAsNonRoot: true
runAsUser: 568
hostIPC: false
hostNetwork: false
hostPID: false
+ hostUsers: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: ALERTMANAGER_URI
value: http://alertmanager-operated.observability.svc.cluster.local:9093
+ - name: LISTEN_PORT
+ value: '80'
image: ghcr.io/prymitive/karma:v0.121@sha256:9f0ad820df1b1d0af562de3b3c545a52ddfce8d7492f434a2276e45f3a1f7e28
livenessProbe:
failureThreshold: 3
httpGet:
path: /health
- port: 8080
+ port: 80
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
name: app
readinessProbe:
failureThreshold: 3
httpGet:
path: /health
- port: 8080
+ port: 80
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
securityContext:
allowPrivilegeEscalation: false
capabilities:
--- HelmRelease: observability/karma HTTPRoute: observability/karma
+++ HelmRelease: observability/karma HTTPRoute: observability/karma
@@ -20,9 +20,9 @@
rules:
- backendRefs:
- group: ''
kind: Service
name: karma
namespace: observability
- port: 8080
+ port: 80
weight: 1
--- HelmRelease: observability/gatus Deployment: observability/gatus
+++ HelmRelease: observability/gatus Deployment: observability/gatus
@@ -35,26 +35,15 @@
runAsGroup: 568
runAsNonRoot: true
runAsUser: 568
hostIPC: false
hostNetwork: false
hostPID: false
+ hostUsers: false
dnsPolicy: ClusterFirst
initContainers:
- - envFrom:
- - secretRef:
- name: gatus-secret
- image: ghcr.io/home-operations/postgres-init:17
- name: init-db
- volumeMounts:
- - mountPath: /config
- name: config
- - mountPath: /config/config.yaml
- name: config-file
- readOnly: true
- subPath: config.yaml
- env:
- name: FOLDER
value: /config
- name: LABEL
value: gatus.io/enabled
- name: METHOD
@@ -82,15 +71,12 @@
- name: GATUS_DELAY_START_SECONDS
value: '5'
- name: GATUS_WEB_PORT
value: '80'
- name: TZ
value: America/New_York
- envFrom:
- - secretRef:
- name: gatus-secret
image: ghcr.io/twin/gatus:v5.23.2@sha256:041514059279f102d8e549a7c7c9f813ae9a0bf505c6d7c37aea9201af0bec3a
livenessProbe:
failureThreshold: 3
httpGet:
path: /health
port: 80
--- HelmRelease: networking/smtp-relay Deployment: networking/smtp-relay
+++ HelmRelease: networking/smtp-relay Deployment: networking/smtp-relay
@@ -35,12 +35,13 @@
runAsGroup: 568
runAsNonRoot: true
runAsUser: 568
hostIPC: false
hostNetwork: false
hostPID: false
+ hostUsers: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: SMTP_RELAY_METRICS_PORT
value: '8080'
- name: SMTP_RELAY_SERVER_PORT
@@ -70,20 +71,20 @@
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
volumeMounts:
+ - mountPath: /cache
+ name: cache
- mountPath: /data/maddy.conf
name: config
readOnly: true
subPath: maddy.conf
- - mountPath: /tmp
- name: tmp
volumes:
+ - emptyDir:
+ medium: Memory
+ name: cache
- configMap:
name: smtp-relay-configmap
name: config
- - emptyDir:
- medium: Memory
- name: tmp
--- HelmRelease: media/radarr Service: media/radarr
+++ HelmRelease: media/radarr Service: media/radarr
@@ -9,14 +9,14 @@
app.kubernetes.io/name: radarr
app.kubernetes.io/service: radarr
namespace: media
spec:
type: ClusterIP
ports:
- - port: 7878
- targetPort: 7878
+ - port: 80
+ targetPort: 80
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: radarr
app.kubernetes.io/instance: radarr
app.kubernetes.io/name: radarr
--- HelmRelease: media/radarr Deployment: media/radarr
+++ HelmRelease: media/radarr Deployment: media/radarr
@@ -40,23 +40,12 @@
supplementalGroups:
- 65536
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
- initContainers:
- - envFrom:
- - secretRef:
- name: radarr-secret
- image: ghcr.io/home-operations/postgres-init:17
- name: init-db
- volumeMounts:
- - mountPath: /config
- name: config
- - mountPath: /media
- name: media
containers:
- env:
- name: RADARR__APP__INSTANCENAME
value: Radarr
- name: RADARR__APP__THEME
value: dark
@@ -64,59 +53,64 @@
value: External
- name: RADARR__AUTH__REQUIRED
value: DisabledForLocalAddresses
- name: RADARR__LOG__LEVEL
value: info
- name: RADARR__SERVER__PORT
- value: '7878'
+ value: '80'
- name: RADARR__UPDATE__BRANCH
value: develop
- name: TZ
value: America/New_York
envFrom:
- secretRef:
name: radarr-secret
image: ghcr.io/home-operations/radarr:5.27.5.10184@sha256:af67ce80302e21f7228e2da671943c48b27a25faf27e19c4fcb7ad33c1c0e3c0
livenessProbe:
failureThreshold: 3
httpGet:
path: /ping
- port: 7878
+ port: 80
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
name: app
readinessProbe:
failureThreshold: 3
httpGet:
path: /ping
- port: 7878
+ port: 80
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /config
name: config
+ - mountPath: /config/MediaCover
+ name: config-cache
- mountPath: /media
name: media
- mountPath: /config/logs
name: tmpfs
subPath: logs
- mountPath: /tmp
name: tmpfs
subPath: tmp
volumes:
- name: config
persistentVolumeClaim:
claimName: radarr
+ - name: config-cache
+ persistentVolumeClaim:
+ claimName: radarr-cache
- name: media
nfs:
path: /volume1/PLEX
server: nas.internal
- emptyDir: {}
name: tmpfs
--- HelmRelease: media/radarr HTTPRoute: media/radarr
+++ HelmRelease: media/radarr HTTPRoute: media/radarr
@@ -20,9 +20,9 @@
rules:
- backendRefs:
- group: ''
kind: Service
name: radarr
namespace: media
- port: 7878
+ port: 80
weight: 1
--- HelmRelease: media/tautulli Service: media/tautulli
+++ HelmRelease: media/tautulli Service: media/tautulli
@@ -9,14 +9,14 @@
app.kubernetes.io/name: tautulli
app.kubernetes.io/service: tautulli
namespace: media
spec:
type: ClusterIP
ports:
- - port: 8181
- targetPort: 8181
+ - port: 80
+ targetPort: 80
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: tautulli
app.kubernetes.io/instance: tautulli
app.kubernetes.io/name: tautulli
--- HelmRelease: media/tautulli Deployment: media/tautulli
+++ HelmRelease: media/tautulli Deployment: media/tautulli
@@ -5,12 +5,14 @@
name: tautulli
labels:
app.kubernetes.io/controller: tautulli
app.kubernetes.io/instance: tautulli
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tautulli
+ annotations:
+ reloader.stakater.com/auto: 'true'
namespace: media
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
@@ -35,34 +37,40 @@
runAsGroup: 568
runAsNonRoot: true
runAsUser: 568
hostIPC: false
hostNetwork: false
hostPID: false
+ hostUsers: false
dnsPolicy: ClusterFirst
containers:
- env:
- - name: TAUTULLI__PORT
- value: '8181'
+ - name: TAUTULLI_HTTP_BASE_URL
+ value: https://tautulli.youmans.io
+ - name: TAUTULLI_HTTP_PORT
+ value: '80'
- name: TZ
value: America/New_York
+ envFrom:
+ - secretRef:
+ name: tautulli-secret
image: ghcr.io/home-operations/tautulli:2.15.3@sha256:3e0eaca8c082ebe121a0ae9125bea1b4e2d177fca34ac8df4ec14a28e62f63a4
livenessProbe:
failureThreshold: 3
httpGet:
path: /status
- port: 8181
+ port: 80
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
name: app
readinessProbe:
failureThreshold: 3
httpGet:
path: /status
- port: 8181
+ port: 80
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
securityContext:
allowPrivilegeEscalation: false
capabilities:
--- HelmRelease: media/tautulli HTTPRoute: media/tautulli
+++ HelmRelease: media/tautulli HTTPRoute: media/tautulli
@@ -20,9 +20,9 @@
rules:
- backendRefs:
- group: ''
kind: Service
name: tautulli
namespace: media
- port: 8181
+ port: 80
weight: 1
--- HelmRelease: observability/kromgo Service: observability/kromgo
+++ HelmRelease: observability/kromgo Service: observability/kromgo
@@ -9,18 +9,18 @@
app.kubernetes.io/name: kromgo
app.kubernetes.io/service: kromgo
namespace: observability
spec:
type: ClusterIP
ports:
- - port: 8888
- targetPort: 8888
+ - port: 8080
+ targetPort: 8080
protocol: TCP
name: health
- - port: 8080
- targetPort: 8080
+ - port: 80
+ targetPort: 80
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: kromgo
app.kubernetes.io/instance: kromgo
app.kubernetes.io/name: kromgo
--- HelmRelease: observability/kromgo Deployment: observability/kromgo
+++ HelmRelease: observability/kromgo Deployment: observability/kromgo
@@ -35,36 +35,37 @@
runAsGroup: 568
runAsNonRoot: true
runAsUser: 568
hostIPC: false
hostNetwork: false
hostPID: false
+ hostUsers: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: HEALTH_PORT
- value: '8888'
+ value: '8080'
- name: PROMETHEUS_URL
value: http://prometheus-operated.observability.svc.cluster.local:9090
- name: SERVER_PORT
- value: '8080'
+ value: '80'
image: ghcr.io/kashalls/kromgo:v0.7.1@sha256:d8fca4ff9b696abc4ca019c76fa629c39e844e4d9435f4afac87a97b1eeae152
livenessProbe:
failureThreshold: 3
httpGet:
path: /-/ready
- port: 8888
+ port: 8080
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
name: app
readinessProbe:
failureThreshold: 3
httpGet:
path: /-/ready
- port: 8888
+ port: 8080
680
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
securityContext:
allowPrivilegeEscalation: false
capabilities:
--- HelmRelease: observability/kromgo HTTPRoute: observability/kromgo
+++ HelmRelease: observability/kromgo HTTPRoute: observability/kromgo
@@ -20,9 +20,9 @@
rules:
- backendRefs:
- group: ''
kind: Service
name: kromgo
namespace: observability
- port: 8888
+ port: 80
weight: 1
--- HelmRelease: media/sonarr Service: media/sonarr
+++ HelmRelease: media/sonarr Service: media/sonarr
@@ -9,14 +9,14 @@
app.kubernetes.io/name: sonarr
app.kubernetes.io/service: sonarr
namespace: media
spec:
type: ClusterIP
ports:
- - port: 8989
- targetPort: 8989
+ - port: 80
+ targetPort: 80
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: sonarr
app.kubernetes.io/instance: sonarr
app.kubernetes.io/name: sonarr
--- HelmRelease: media/sonarr Deployment: media/sonarr
+++ HelmRelease: media/sonarr Deployment: media/sonarr
D249
span>
@@ -40,23 +40,12 @@
supplementalGroups:
- 65536
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
- initContainers:
- - envFrom:
- - secretRef:
- name: sonarr-secret
- image: ghcr.io/home-operations/postgres-init:17
- name: init-db
- volumeMounts:
- - mountPath: /config
- name: config
- - mountPath: /media
- name: media
containers:
- env:
- name: SONARR__APP__INSTANCENAME
value: Sonarr
- name: SONARR__APP__THEME
value: dark
@@ -64,59 +53,64 @@
value: External
- name: SONARR__AUTH__REQUIRED
value: DisabledForLocalAddresses
- name: SONARR__LOG__LEVEL
value: info
- name: SONARR__SERVER__PORT
- value: '8989'
+ value: '80'
- name: SONARR__UPDATE__BRANCH
value: develop
- name: TZ
value: America/New_York
envFrom:
- secretRef:
name: sonarr-secret
image: ghcr.io/home-operations/sonarr:4.0.15.2940@sha256:ca6c735014bdfb04ce043bf1323a068ab1d1228eea5bab8305ca0722df7baf78
livenessProbe:
failureThreshold: 3
httpGet:
path: /ping
- port: 8989
+ port: 80
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
name: app
readinessProbe:
failureThreshold: 3
httpGet:
path: /ping
- port: 8989
+ port: 80
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /config
name: config
+ - mountPath: /config/MediaCover
+ name: config-cache
- mountPath: /media
name: media
- mountPath: /config/logs
name: tmpfs
subPath: logs
- mountPath: /tmp
name: tmpfs
subPath: tmp
volumes:
- name: config
persistentVolumeClaim:
claimName: sonarr
+ - name: config-cache
+ persistentVolumeClaim:
+ claimName: sonarr-cache
- name: media
nfs:
path: /volume1/PLEX
server: nas.internal
- emptyDir: {}
name: tmpfs
--- HelmRelease: media/sonarr HTTPRoute: media/sonarr
+++ HelmRelease: media/sonarr HTTPRoute: media/sonarr
@@ -20,9 +20,9 @@
rules:
- backendRefs:
- group: ''
kind: Service
name: sonarr
namespace: media
- port: 8989
+ port: 80
weight: 1
--- HelmRelease: system-upgrade/system-upgrade-controller Deployment: system-upgrade/system-upgrade-controller
+++ HelmRelease: system-upgrade/system-upgrade-controller Deployment: system-upgrade/system-upgrade-controller
@@ -8,15 +8,15 @@
app.kubernetes.io/instance: system-upgrade-controller
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: system-upgrade-controller
namespace: system-upgrade
spec:
revisionHistoryLimit: 3
- replicas: 2
+ replicas: 1
strategy:
- type: RollingUpdate
+ type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: system-upgrade-controller
app.kubernetes.io/name: system-upgrade-controller
app.kubernetes.io/instance: system-upgrade-controller
template:
@@ -33,23 +33,22 @@
runAsGroup: 568
runAsNonRoot: true
runAsUser: 568
hostIPC: false
hostNetwork: false
hostPID: false
+ hostUsers: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: SYSTEM_UPGRADE_CONTROLLER_LEADER_ELECT
value: 'true'
- name: SYSTEM_UPGRADE_CONTROLLER_NAME
value: system-upgrade-controller
- name: SYSTEM_UPGRADE_CONTROLLER_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
+ value: system-upgrade
- name: SYSTEM_UPGRADE_CONTROLLER_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: SYSTEM_UPGRADE_JOB_PRIVILEGED
value: 'false'
--- HelmRelease: observability/unpoller Service: observability/unpoller
+++ HelmRelease: observability/unpoller Service: observability/unpoller
@@ -9,14 +9,14 @@
app.kubernetes.io/name: unpoller
app.kubernetes.io/service: unpoller
namespace: observability
spec:
type: ClusterIP
ports:
- - port: 9130
- targetPort: 9130
+ - port: 8080
+ targetPort: 8080
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: unpoller
app.kubernetes.io/instance: unpoller
app.kubernetes.io/name: unpoller
--- HelmRelease: observability/unpoller Deployment: observability/unpoller
+++ HelmRelease: observability/unpoller Deployment: observability/unpoller
@@ -35,43 +35,48 @@
runAsGroup: 568
runAsNonRoot: true
runAsUser: 568
hostIPC: false
hostNetwork: false
hostPID: false
+ hostUsers: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: America/New_York
- name: UP_INFLUXDB_DISABLE
value: 'true'
+ - name: UP_PROMETHEUS_HTTP_LISTEN
+ value: 0.0.0.0:8080
- name: UP_UNIFI_DEFAULT_ROLE
value: k8s-gitops
- name: UP_UNIFI_DEFAULT_URL
value: https://unifi.internal
- name: UP_UNIFI_DEFAULT_VERIFY_SSL
value: 'false'
envFrom:
- secretRef:
name: unpoller-secret
image: ghcr.io/unpoller/unpoller:v2.15.4@sha256:788a890f2dc5aef3e99ce430917221c43b4e084464d38bc6537a8c7294ef8770
livenessProbe:
failureThreshold: 3
+ httpGet:
+ path: /health
+ port: 8080
initialDelaySeconds: 0
periodSeconds: 10
- tcpSocket:
- port: 9130
timeoutSeconds: 1
name: app
readinessProbe:
failureThreshold: 3
+ httpGet:
+ path: /health
+ port: 8080
initialDelaySeconds: 0
periodSeconds: 10
- tcpSocket:
- port: 9130
timeoutSeconds: 1
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Labels
area/bootstrap
area/github
Changes made in the github directory
area/kubernetes
Changes made in the kubernetes directory
area/talos
area/taskfile
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
No description provided.