194 Commits

Author SHA1 Message Date
Renovate Bot
987097baba chore(deps): update helm release nextcloud to v8.9.1 2026-02-15 00:02:50 +00:00
Daniël Groothuis
9073ff9094 chore(osx): Added OSX container 2026-02-02 11:52:14 +01:00
Daniël Groothuis
52b3ee5eaa chore(osx): Added OSX container 2026-02-02 11:51:30 +01:00
Daniël Groothuis
078148732b chore(osx): Added OSX container 2026-02-02 11:47:26 +01:00
Daniël Groothuis
5a5caf45ee chore(osx): Added OSX container 2026-02-02 11:46:29 +01:00
Daniël Groothuis
2080d3d913 chore(osx): Added OSX container 2026-02-02 11:43:49 +01:00
Daniël Groothuis
797dc958d2 chore(osx): Added OSX container 2026-02-02 11:39:46 +01:00
bbfc8bbb27 Merge pull request 'chore(deps): update dependency argoproj/argo-cd to v3.2.6' (#35) from renovate/argoproj-argo-cd-3.x into main
Reviewed-on: #35
2026-01-24 16:00:25 +00:00
a5f0a6b081 Merge pull request 'chore(deps): update helm release penpot to v0.32.0' (#37) from renovate/penpot-0.x into main
Reviewed-on: #37
2026-01-24 15:58:56 +00:00
1bf0be751b Merge pull request 'chore(deps): update helm release gitea to v12.5.0' (#39) from renovate/gitea-12.x into main
Reviewed-on: #39
2026-01-24 15:55:26 +00:00
91ecd3b4c1 Update clusters/artemis/apps/kustomization.yaml 2026-01-24 15:46:50 +00:00
f5b3b5efe5 Update manifests/artemis/immich/volumeClaims.yaml 2026-01-24 15:42:58 +00:00
657c26e122 Update manifests/artemis/immich/values.yaml 2026-01-24 15:41:53 +00:00
49aa5f32f0 Update clusters/artemis/apps/external-secrets/application.yaml 2026-01-24 15:36:11 +00:00
a186c62acd Update manifests/artemis/mailu/kustomization.yaml 2026-01-24 15:14:42 +00:00
3818186562 Update manifests/artemis/mailu/kustomization.yaml 2026-01-24 15:12:53 +00:00
e2517be2b6 Update manifests/artemis/gitea/values.yaml 2026-01-24 15:07:09 +00:00
Renovate Bot
db123ab04f chore(deps): update helm release gitea to v12.5.0 2026-01-24 15:06:33 +00:00
Renovate Bot
2a206af9f6 chore(deps): update dependency argoproj/argo-cd to v3.2.6 2026-01-23 00:02:35 +00:00
Renovate Bot
1f12d004d7 chore(deps): update helm release penpot to v0.32.0 2026-01-07 21:39:10 +00:00
a9dc9a5cb7 Merge pull request 'chore(deps): update helm release external-secrets to v1' (#28) from renovate/external-secrets-1.x into main
Reviewed-on: #28
2025-12-22 09:36:51 +00:00
bbe1014a37 Merge pull request 'chore(deps): update actions/checkout action to v6' (#31) from renovate/actions-checkout-6.x into main
Reviewed-on: #31
2025-12-22 09:35:16 +00:00
8ffb57e1b7 Merge pull request 'chore(deps): update helm release penpot to v0.30.0' (#30) from renovate/penpot-0.x into main
Reviewed-on: #30
2025-12-22 09:32:41 +00:00
09961b68f9 Merge pull request 'chore(deps): update helm release immich to v0.10.3' (#29) from renovate/immich-0.x into main
Reviewed-on: #29
2025-12-22 09:03:04 +00:00
1352394ca5 Merge pull request 'chore(deps): update helm release mailu to v2.6.3' (#25) from renovate/mailu-2.x into main
Reviewed-on: #25
2025-12-22 08:53:36 +00:00
4fd96cf953 Merge pull request 'chore(deps): update dependency argoproj/argo-cd to v3.2.2' (#33) from renovate/argoproj-argo-cd-3.x into main
Reviewed-on: #33
2025-12-22 08:51:20 +00:00
Renovate Bot
680e0822a6 chore(deps): update helm release external-secrets to v1 2025-12-22 08:07:23 +00:00
Renovate Bot
3c0f9a713d chore(deps): update helm release mailu to v2.6.3 2025-12-22 08:07:11 +00:00
Renovate Bot
f9e156e522 chore(deps): update dependency argoproj/argo-cd to v3.2.2 2025-12-22 08:07:01 +00:00
Renovate Bot
5861f677a4 chore(deps): update actions/checkout action to v6 2025-11-21 00:01:59 +00:00
Renovate Bot
aa92ad14ef chore(deps): update helm release penpot to v0.30.0 2025-11-15 00:02:32 +00:00
Renovate Bot
fd2df98297 chore(deps): update helm release immich to v0.10.3 2025-11-15 00:02:27 +00:00
Daniël Groothuis
4a17940c9f chore(osx): Added OSX container 2025-11-11 17:37:01 +01:00
Daniël Groothuis
01c5b31cbb chore(osx): Added OSX container 2025-11-11 17:32:41 +01:00
Daniël Groothuis
83a0e6b8ee chore(mailu): Reverted rate limit 2025-11-10 15:10:25 +01:00
Daniël Groothuis
0e40fc2ca4 chore(backstage): Added SA for backstage to ArgoCD 2025-11-06 22:49:02 +01:00
Daniël Groothuis
36548f1ec5 chore(backstage): Added SA for backstage to ArgoCD 2025-11-06 22:47:10 +01:00
Daniël Groothuis
c8b5e32163 chore(backstage): Added SA for backstage to ArgoCD 2025-11-06 22:41:27 +01:00
Daniël Groothuis
14b0561828 chore(backstage): Added SA for backstage to ArgoCD 2025-11-06 22:40:44 +01:00
Daniël Groothuis
de8b6e0001 Merge remote-tracking branch 'origin/main' 2025-11-06 22:32:25 +01:00
Daniël Groothuis
e99d6cd772 chore(backstage): Added SA for backstage to ArgoCD 2025-11-06 22:32:12 +01:00
9201b1ddc1 Merge pull request 'chore(deps): update helm release nextcloud to v8.5.2' (#27) from renovate/nextcloud-8.x into main
Reviewed-on: #27
2025-11-06 21:08:08 +00:00
Daniël Groothuis
2b31072b1d chore(backstage): Added SA for backstage to ArgoCD 2025-11-06 21:54:13 +01:00
Daniël Groothuis
3c9c55b4d3 chore(backstage): Added SA for backstage to ArgoCD 2025-11-06 21:14:36 +01:00
Renovate Bot
afae9ae15b chore(deps): update helm release nextcloud to v8.5.2 2025-11-06 00:02:42 +00:00
Daniël Groothuis
0eda7b4ad2 chore(backstage): Added SA for backstage to ArgoCD 2025-11-05 21:56:30 +01:00
Daniël Groothuis
e3587553d7 chore(backstage): Added SA for backstage to ArgoCD 2025-11-05 21:50:48 +01:00
Daniël Groothuis
4c5dd7ae3d chore(backstage): Added SA for backstage to ArgoCD 2025-11-05 21:43:31 +01:00
Daniël Groothuis
ee3048f478 chore(backstage): Added SA for backstage to ArgoCD 2025-11-05 21:37:49 +01:00
Daniël Groothuis
179eb7a6dc chore(backstage): Added SA for backstage to ArgoCD 2025-11-05 21:37:01 +01:00
Daniël Groothuis
b1103e3136 chore(backstage): Added SA for backstage to ArgoCD 2025-11-05 21:36:25 +01:00
Daniël Groothuis
2cedf7f2b9 chore(backstage): Added SA for backstage to ArgoCD 2025-11-05 21:34:21 +01:00
Daniël Groothuis
c1a59cb710 chore(backstage): Added SA for backstage to ArgoCD 2025-11-05 21:30:04 +01:00
Daniël Groothuis
845ba4ead0 chore(backstage): Added SA for backstage to ArgoCD 2025-11-05 21:28:46 +01:00
Daniël Groothuis
001138f965 chore(backstage): Added SA for backstage to ArgoCD 2025-11-05 21:28:03 +01:00
Daniël Groothuis
5391a06e24 chore(backstage): Added SA for backstage to ArgoCD 2025-11-05 21:24:09 +01:00
Daniël Groothuis
e7b62d426c chore(backstage): Added SA for backstage to ArgoCD 2025-11-05 21:17:08 +01:00
Daniël Groothuis
a64bdf2ed0 chore(backstage): Added SA for backstage to ArgoCD 2025-11-05 21:14:17 +01:00
Daniël Groothuis
4f5acfc9a6 chore(backstage): Added SA for backstage to ArgoCD 2025-11-05 21:13:31 +01:00
Daniël Groothuis
c4754ea41a chore(backstage): Added SA for backstage to ArgoCD 2025-11-05 21:00:46 +01:00
Daniël Groothuis
134581bcce chore(backstage): Added SA for backstage to ArgoCD 2025-11-05 20:54:00 +01:00
Daniël Groothuis
3f3d99e8d0 chore(backstage): Updated catalogs 2025-11-05 20:35:40 +01:00
Daniël Groothuis
5e57066ccb chore(backstage): Updated catalogs 2025-11-05 20:34:39 +01:00
Daniël Groothuis
3e13ddb1cb feat(nextcloud): Added first draft for nextcloud deployment 2025-11-05 16:09:37 +01:00
Daniël Groothuis
16fcec670e feat(nextcloud): Added first draft for nextcloud deployment 2025-11-05 15:55:08 +01:00
Daniël Groothuis
c16d485a54 feat(nextcloud): Added first draft for nextcloud deployment 2025-11-05 15:52:37 +01:00
Daniël Groothuis
cf69895b68 feat(nextcloud): Added first draft for nextcloud deployment 2025-11-05 15:44:31 +01:00
Daniël Groothuis
e85a70957f feat(nextcloud): Added first draft for nextcloud deployment 2025-11-05 15:34:47 +01:00
Daniël Groothuis
82e626be39 feat(nextcloud): Added first draft for nextcloud deployment 2025-11-05 15:29:50 +01:00
Daniël Groothuis
aa4425cf19 feat(nextcloud): Added first draft for nextcloud deployment 2025-11-05 15:26:49 +01:00
Daniël Groothuis
06b192f780 feat(nextcloud): Added first draft for nextcloud deployment 2025-11-05 15:19:14 +01:00
7c2a40f2ac Merge pull request 'chore(deps): update dependency argoproj/argo-cd to v3.2.0' (#26) from renovate/argoproj-argo-cd-3.x into main
Reviewed-on: #26
2025-11-05 08:08:38 +00:00
Renovate Bot
c3068f9693 chore(deps): update dependency argoproj/argo-cd to v3.2.0 2025-11-05 00:02:07 +00:00
f76ec5a53b Update renovate.json 2025-11-03 14:27:12 +00:00
d28610a28a revert 9e2961f09c
revert Merge pull request 'chore(deps): update helm release mailu to v2.5.1' (#14) from renovate/mailu-2.x into main

Reviewed-on: #14
2025-11-03 14:23:02 +00:00
9e2961f09c Merge pull request 'chore(deps): update helm release mailu to v2.5.1' (#14) from renovate/mailu-2.x into main
Reviewed-on: #14
2025-11-03 14:21:31 +00:00
a1fba4a308 Merge pull request 'chore(deps): update helm release vaultwarden to v0.34.4' (#16) from renovate/vaultwarden-0.x into main
Reviewed-on: #16
2025-11-03 10:40:14 +00:00
0fbfd6f5f0 Merge pull request 'chore(deps): update helm release vault to v0.31.0' (#15) from renovate/vault-0.x into main
Reviewed-on: #15
2025-11-03 10:17:25 +00:00
8e550f98c5 Merge pull request 'chore(deps): update dependency argoproj/argo-cd to v3' (#24) from renovate/argoproj-argo-cd-3.x into main
Reviewed-on: #24
2025-11-03 10:14:32 +00:00
Renovate Bot
75ab95d9b1 chore(deps): update dependency argoproj/argo-cd to v3 2025-11-03 10:13:36 +00:00
7159dc0b20 Merge pull request 'chore(deps): update dependency argoproj/argo-cd to v2.14.20' (#21) from renovate/argoproj-argo-cd-2.x into main
Reviewed-on: #21
2025-11-03 10:04:50 +00:00
5e7c1acbef Update manifests/artemis/nextcloud/values.yaml 2025-11-02 18:32:39 +00:00
f735ec9b22 Update manifests/artemis/uptime-kuma/deployment.yaml 2025-11-02 17:50:40 +00:00
cea23020dc Merge pull request 'chore(deps): update actions/checkout action to v5' (#22) from renovate/actions-checkout-5.x into main
Reviewed-on: #22
2025-11-02 17:49:29 +00:00
Renovate Bot
9b1fc474ad chore(deps): update actions/checkout action to v5 2025-11-02 16:30:41 +00:00
Renovate Bot
8cd8dbc54d chore(deps): update dependency argoproj/argo-cd to v2.14.20 2025-11-02 16:30:31 +00:00
7b141bb89b Update renovate.json 2025-11-02 16:17:18 +00:00
4d523486b5 Update manifests/artemis/gitea/values.yaml 2025-11-02 16:11:57 +00:00
21bb310576 Update manifests/artemis/gitea/values.yaml 2025-11-02 16:09:45 +00:00
820c6703cc Update manifests/artemis/gitea/values.yaml 2025-11-02 16:03:30 +00:00
Daniël Groothuis
a217a2e5fc feat(nextcloud): Added first draft for nextcloud deployment 2025-11-01 23:27:59 +01:00
Daniël Groothuis
357d494073 feat(nextcloud): Added first draft for nextcloud deployment 2025-11-01 23:17:23 +01:00
Daniël Groothuis
d15ff6c2c0 feat(nextcloud): Added first draft for nextcloud deployment 2025-11-01 23:16:54 +01:00
Daniël Groothuis
a5a8c0912a feat(nextcloud): Added first draft for nextcloud deployment 2025-11-01 23:15:54 +01:00
Daniël Groothuis
18e368be40 feat(owncloud): Added first draft for owncloud deployment 2025-11-01 22:57:21 +01:00
Daniël Groothuis
eec40a680c feat(owncloud): Added first draft for owncloud deployment 2025-11-01 22:52:41 +01:00
Daniël Groothuis
c20e5f2d34 feat(owncloud): Added first draft for owncloud deployment 2025-11-01 20:25:59 +01:00
Daniël Groothuis
9cac63a132 feat(owncloud): Added first draft for owncloud deployment 2025-11-01 20:23:42 +01:00
Daniël Groothuis
512186fa1c feat(owncloud): Added first draft for owncloud deployment 2025-11-01 20:19:26 +01:00
Daniël Groothuis
d0574f0a9f feat(owncloud): Added first draft for owncloud deployment 2025-11-01 20:17:32 +01:00
4e56529d45 Update manifests/artemis/gitea/values.yaml
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-11-01 15:52:44 +00:00
4744de9f44 Update manifests/artemis/gitea/values.yaml
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 1m0s
2025-11-01 15:34:17 +00:00
Renovate Bot
28b69c85eb chore(deps): update helm release vaultwarden to v0.34.4
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 1m19s
2025-11-01 15:24:24 +00:00
Renovate Bot
87d63496a7 chore(deps): update helm release vault to v0.31.0
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 58s
2025-11-01 15:17:31 +00:00
Renovate Bot
acafcd1841 chore(deps): update helm release mailu to v2.5.1 2025-11-01 15:17:27 +00:00
6c952fc9c0 Update renovate.json
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 56s
2025-11-01 15:16:01 +00:00
a5a80e8949 Update renovate.json
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 57s
2025-11-01 15:07:15 +00:00
e2eed7bdaa Merge pull request 'chore(deps): update helm release external-secrets to v0.20.4' (#12) from renovate/external-secrets-0.x into main
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 1m13s
Reviewed-on: #12
2025-11-01 15:02:44 +00:00
Renovate Bot
f4c7340216 chore(deps): update helm release external-secrets to v0.20.4
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 1m11s
2025-11-01 00:01:42 +00:00
600999a08f Merge pull request 'chore(deps): update helm release cloudnative-pg to v0.26.1' (#7) from renovate/cloudnative-pg-0.x into main
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 58s
Reviewed-on: #7
2025-10-31 17:07:58 +00:00
13407630d5 Update renovate.json
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 1m3s
2025-10-31 17:01:23 +00:00
3026b1ef33 Update renovate.json
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 56s
2025-10-31 16:56:29 +00:00
Renovate Bot
526b8073ba chore(deps): update helm release cloudnative-pg to v0.26.1
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 59s
2025-10-31 16:48:50 +00:00
8044148153 Add renovate.json
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-31 16:43:06 +00:00
Daniël Groothuis
fbb9dc6803 feat(digital-garden): Added digital garden deployment
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 1m37s
2025-10-31 17:15:28 +01:00
Daniël Groothuis
2b52a58b7a feat(digital-garden): Added digital garden deployment
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-31 17:15:00 +01:00
Daniël Groothuis
146f0aba8b feat(digital-garden): Added digital garden deployment
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-31 17:12:27 +01:00
Daniël Groothuis
70c6c62d90 feat(digital-garden): Added digital garden deployment 2025-10-31 17:03:21 +01:00
Daniël Groothuis
630efbeaaf feat(digital-garden): Added digital garden deployment 2025-10-31 17:01:55 +01:00
Daniël Groothuis
b3753b3400 chore(kener): Removed Kener as it doesn't bring any value 2025-10-30 11:29:34 +01:00
Daniël Groothuis
f0c1a554c8 chore(kener): Added kustomize file 2025-10-30 10:54:00 +01:00
Daniël Groothuis
5a28daec87 chore(kener): Added kustomize file 2025-10-30 10:48:39 +01:00
Daniël Groothuis
c47e7ed3d0 chore(kener): First implementation of Kener 2025-10-30 10:47:16 +01:00
Daniël Groothuis
b8e858f21f chore(backstage): Removed backstage 2025-10-30 10:08:48 +01:00
Daniël Groothuis
1db12d6e31 chore(backstage): Added ingress for backstage 2025-10-29 09:33:12 +01:00
Daniël Groothuis
2db587a457 chore(backstage): Added database and secrets for backstage 2025-10-29 08:59:57 +01:00
Daniël Groothuis
ad65e98c58 chore(backstage): Added database and secrets for backstage 2025-10-29 08:39:07 +01:00
Daniël Groothuis
3562ec6e05 chore(backstage): Added database and secrets for backstage 2025-10-29 08:27:26 +01:00
Daniël Groothuis
a22925d95e chore(backstage): Added database and secrets for backstage 2025-10-28 21:55:29 +01:00
Daniël Groothuis
718c581ccd chore(backstage): Added database and secrets for backstage 2025-10-28 21:55:13 +01:00
Daniël Groothuis
9f2393a478 chore(backstage): Added database and secrets for backstage 2025-10-28 21:52:46 +01:00
Daniël Groothuis
d16efcde3c chore(backstage): Added database and secrets for backstage 2025-10-28 21:45:36 +01:00
Daniël Groothuis
8603742901 chore(backstage): Added database and secrets for backstage 2025-10-28 21:34:13 +01:00
Daniël Groothuis
f6e4f44984 chore(gitea-runners): Updated values for Dind support 2025-10-28 20:45:16 +01:00
Daniël Groothuis
c8bb379ffe chore(gitea-runners): Updated values for Dind support 2025-10-28 20:06:48 +01:00
Daniël Groothuis
4ddfe4e8b7 chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-28 19:34:50 +01:00
Daniël Groothuis
a4996f29b2 chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Failing after 16s
2025-10-28 19:29:03 +01:00
Daniël Groothuis
cbe66f6fd1 chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Failing after 1m18s
2025-10-28 19:20:05 +01:00
Daniël Groothuis
4ddb948f6b chore(gitea-runners): Updated values for Dind support
Some checks are pending
Validate K8s manifests / validate-manifests (push) Waiting to run
2025-10-28 19:19:21 +01:00
Daniël Groothuis
8d4331a0c6 chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-28 19:06:24 +01:00
Daniël Groothuis
0f0171ad32 chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-28 19:05:19 +01:00
Daniël Groothuis
984141b037 chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-28 19:03:47 +01:00
Daniël Groothuis
9380bc3b04 chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-28 19:02:09 +01:00
Daniël Groothuis
b4b9d0427b chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-28 19:00:34 +01:00
Daniël Groothuis
239ed7b214 chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-28 18:55:35 +01:00
Daniël Groothuis
6b5efb494b chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-28 18:53:42 +01:00
Daniël Groothuis
bbe5488871 chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-28 18:51:23 +01:00
Daniël Groothuis
13364cd31e chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-28 18:48:30 +01:00
Daniël Groothuis
24fef18693 chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-28 18:47:30 +01:00
Daniël Groothuis
ee65613844 chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-28 18:41:42 +01:00
Daniël Groothuis
a6fee6c9c9 chore(gitea-runners): Updated values for Dind support
Some checks are pending
Validate K8s manifests / validate-manifests (push) Waiting to run
2025-10-28 18:39:01 +01:00
Daniël Groothuis
8a7008aca6 chore(gitea-runners): Updated values for Dind support
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 2m54s
2025-10-28 18:16:31 +01:00
Daniël Groothuis
8fa081ad5a chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-28 18:12:40 +01:00
Daniël Groothuis
724cd8c964 chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-28 18:09:49 +01:00
Daniël Groothuis
a4e9e566bf chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-28 18:06:59 +01:00
Daniël Groothuis
765afc3bfb chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-28 18:03:36 +01:00
Daniël Groothuis
258ba64bcc chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Failing after 1m36s
2025-10-28 17:58:33 +01:00
Daniël Groothuis
5e64d08f93 chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Failing after 1m17s
2025-10-28 16:35:06 +01:00
Daniël Groothuis
bdefefd39d chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-28 16:31:06 +01:00
Daniël Groothuis
69d83f786f chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Failing after 37s
2025-10-28 16:25:15 +01:00
Daniël Groothuis
78e0807f5f chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Failing after 38s
2025-10-28 15:16:13 +01:00
Daniël Groothuis
3ee97913b5 chore(cleanup): Removed secret definition 2025-10-28 13:04:59 +01:00
Daniël Groothuis
a19daebdff chore(gitea): Updated values for SMTP support
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 55s
2025-10-28 11:57:57 +01:00
Daniël Groothuis
f28411a448 chore(gitea): Updated values for SMTP support
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 1m6s
2025-10-28 11:52:44 +01:00
Daniël Groothuis
ebb97c96df chore(gitea): Updated values for SMTP support
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 55s
2025-10-28 11:25:52 +01:00
Daniël Groothuis
b56703b2d2 chore(gitea): Updated values for SMTP support
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-28 11:24:27 +01:00
Daniël Groothuis
efb2e7a7b1 chore(gitea): Updated values for SMTP support
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 55s
2025-10-28 11:11:43 +01:00
Daniël Groothuis
3f204a8719 chore(penpot): Updated values for OIDC
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 1m59s
2025-10-27 19:33:53 +01:00
Daniël Groothuis
ee67994d1c chore(penpot): Updated values for OIDC
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 1m43s
2025-10-27 19:14:45 +01:00
Daniël Groothuis
fef3391adb chore(penpot): Updated values for OIDC
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-27 19:13:59 +01:00
Daniël Groothuis
c764510be8 chore(penpot): Updated values for OIDC
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 58s
2025-10-27 19:09:43 +01:00
Daniël Groothuis
dff6e22868 chore(penpot): Added OIDC configuration for SSO
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 2m8s
2025-10-27 19:04:41 +01:00
Daniël Groothuis
e6131c2561 chore(maint): Added shell script to monitor disk space
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 1m53s
2025-10-25 22:37:23 +02:00
Daniël Groothuis
c6c81e8e6f chore(immich): First immich deployment
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 1m9s
2025-10-25 20:05:46 +02:00
Daniël Groothuis
e9e9207dc6 chore(immich): First immich deployment
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-25 20:04:09 +02:00
Daniël Groothuis
96513f659c chore(immich): First immich deployment
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 1m6s
2025-10-25 20:02:23 +02:00
Daniël Groothuis
fd175b27cf chore(immich): First immich deployment
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 58s
2025-10-25 18:26:31 +02:00
Daniël Groothuis
f47c9d0804 chore(immich): First immich deployment
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 2m4s
2025-10-25 18:08:18 +02:00
Daniël Groothuis
87fbc47560 chore(maint): Updated readme
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 53s
2025-10-24 11:03:32 +02:00
Daniël Groothuis
82341b1996 chore(maint): Updated readme
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 1m3s
2025-10-24 10:59:03 +02:00
Daniël Groothuis
9f84351cf8 chore(maint): Updated readme
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 49s
2025-10-24 10:57:55 +02:00
Daniël Groothuis
ee2cac3e2a chore(maint): Updated readme
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 53s
2025-10-24 10:48:23 +02:00
Daniël Groothuis
1e426eeb3b chore(penpot): Changed values to in clude postgres and valkey
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 58s
2025-10-24 10:38:36 +02:00
Daniël Groothuis
feab63c773 chore(penpot): Added first deployment
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 2m11s
2025-10-24 10:34:42 +02:00
Daniël Groothuis
e97a0bd896 chore(ntfy): Added basic auth to ntfy
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 1m2s
2025-10-23 23:52:02 +02:00
Daniël Groothuis
61eaea7178 chore(ntfy): Added basic auth to ntfy
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 8s
Validate K8s manifests / scan-secrets (push) Successful in 54s
2025-10-23 23:38:57 +02:00
Daniël Groothuis
ee3bd7104a chore(ntfy): Added basic auth to ntfy
Some checks failed
Validate K8s manifests / validate-manifests (push) Failing after 4s
Validate K8s manifests / scan-secrets (push) Successful in 56s
2025-10-23 23:20:27 +02:00
Daniël Groothuis
99d6d935c4 chore(ntfy): Added basic auth to ntfy
Some checks failed
Validate K8s manifests / validate-manifests (push) Failing after 4s
Validate K8s manifests / scan-secrets (push) Has been cancelled
2025-10-23 23:18:35 +02:00
Daniël Groothuis
98c1b69853 chore(ntfy): Added Ntfy for Notifications
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 3s
Validate K8s manifests / scan-secrets (push) Successful in 48s
2025-10-23 23:06:41 +02:00
Daniël Groothuis
a8964422f7 chore(ntfy): Added Ntfy for Notifications
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 3s
Validate K8s manifests / scan-secrets (push) Successful in 3m13s
2025-10-23 23:03:06 +02:00
Daniël Groothuis
096f1e3204 chore(gitea): Edited readme
Some checks are pending
Validate K8s manifests / scan-secrets (push) Waiting to run
Validate K8s manifests / validate-manifests (push) Successful in 10s
2025-10-23 19:26:39 +02:00
Daniël Groothuis
be866e9354 chore(gitea): Added secrets scanning
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 4s
Validate K8s manifests / scan-secrets (push) Successful in 1m17s
2025-10-23 19:20:22 +02:00
Daniël Groothuis
597166b103 chore(gitea): fixed linting errors
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 7s
2025-10-23 19:12:34 +02:00
Daniël Groothuis
1789e422fe chore(gitea): updated yamllint config
Some checks failed
Validate K8s manifests / validate-manifests (push) Failing after 7s
2025-10-23 19:09:45 +02:00
79 changed files with 3399 additions and 108 deletions

View File

@@ -1,16 +0,0 @@
name: Validate K8s manifests
run-name: ${{ gitea.actor }} is validating K8s manifests
on: [push]
jobs:
validate-manifests:
runs-on: ubuntu-latest
steps:
- name: 📥Checkout
uses: actions/checkout@v3
- name: 🚀Validating the manifests
uses: frenck/action-yamllint@v1.5.0
with:
config: ".yamllint"
path: "manifests/"
warnings: false

View File

@@ -0,0 +1,24 @@
name: Validate K8s manifests
run-name: ${{ gitea.actor }} is validating K8s manifests
on: [push]
jobs:
validate-manifests:
runs-on: ubuntu-latest
steps:
- name: 📥Checkout
uses: actions/checkout@v6
- name: 🚀Validating the manifests
uses: frenck/action-yamllint@v1.5.0
with:
config: ".yamllint"
path: "manifests/"
warnings: false
- name: 🤐Secret Scanning
uses: onboardbase/securelog-scan@main
with:
mask: "true" # that is mask secret value e.g sk_******
verify: "true" # that is verify potential secrets against their service provider
- name: Completed 🎉
run: |-
curl -u "${{ secrets.NTFY_USER }}:${{ secrets.NTFY_PASSWORD }}" -d "DGSE Cluster deployments has been succesfully updated" -H "Title: DGSE Cloud" -H "X-Tags: white_check_mark" https://notifications.dgse.cloud/runners

View File

@@ -28,7 +28,7 @@ rules:
indentation: enable
key-duplicates: enable
key-ordering: disable
line-length: warning
line-length: disable
new-line-at-end-of-file: enable
new-lines: enable
octal-values: disable

View File

@@ -1,3 +1,4 @@
![Production status](https://git.dgse.cloud/DGSE/kubernetes/actions/workflows/production.yaml/badge.svg?style=flat-square)
# Kubernetes GitOps with ArgoCD for DGSE Cloud
This repository contains the Kubernetes manifests and configurations for deploying and managing applications on the DGSE Cloud using GitOps with ArgoCD.
@@ -26,6 +27,14 @@ The repository is organized as follows:
- **Mailu**: The mail server for sending and receiving emails.
- **PocketID**: The identity provider for managing user authentication and authorization.
### Miscellaneous Applications
- **Penpot**: The open-source design and prototyping tool.
- **Ntfy**: The notification service for sending alerts and notifications.
### Public Websites
- **Groothuis.io**: Personal Development notes and wiki
- **DanielGroothuis.com**: Personal website and blog
### File Locations
`/clusters/artemis/apps` Hosts all ArgoCD applications and projects for the Artemis cluster.
@@ -44,3 +53,18 @@ chmod +x ./init-app.sh
This will prompt you to enter the name of the application and the cluster you want to add it to. The script will then create the necessary files and directories for the application.
It'll add a `.placeholder` file where the manifests will live.
## Important URLs
- [ArgoCD](https://cd.dgse.cloud) (SSO Authentication required)
- [Gitea](https://git.dgse.cloud) (SSO Authentication required)
- [Vault](https://sealed.dgse.cloud) (Token Based Authentication required)
- [Vaultwarden](https://vault.dgse.cloud) (Basic Authentication required)
- [Mailu](https://mail.dgse.cloud) (Basic Authentication required)
- [PocketID](https://auth.dgse.cloud) (SSO Authentication required)
- [Uptime Kuma](https://uptime.dgse.cloud) (Basic Authentication required)
- [Ntfy](https://notifications.dgse.cloud) (Basic Authentication required)
- [Penpot](https://penpot.dgse.cloud) (SSO Authentication required)
## Important Notes
- When adding a new application, keep in mind that after push to main, ArgoCD will automatically sync the resources to the cluster.
- When using secrets, use Vault to store them and reference them in the manifests.

26
catalog-info.yaml Normal file
View File

@@ -0,0 +1,26 @@
---
apiVersion: backstage.io/v1alpha1
kind: Domain
metadata:
name: dgse-cloud
description: "Infrastructure for DGSE Cloud services."
spec:
owner: dgse-cloud
---
apiVersion: backstage.io/v1alpha1
kind: Location
metadata:
name: artemis-cluster
description: A collection of all entities running on the Artemis cluster
spec:
targets:
- ./clusters/artemis/catalog.yaml
- ./clusters/artemis/apps/argocd/catalog.yaml
- ./clusters/artemis/apps/cnpg/catalog.yaml
- ./clusters/artemis/apps/digital-garden/catalog.yaml
- ./clusters/artemis/apps/external-secrets/catalog.yaml
- ./clusters/artemis/apps/gitea/catalog.yaml
- ./clusters/artemis/apps/gitea-runners/catalog.yaml
- ./clusters/artemis/apps/immich/catalog.yaml
- ./clusters/artemis/apps/mailu/catalog.yaml
- ./clusters/artemis/apps/nextcloud/catalog.yaml

View File

@@ -0,0 +1,18 @@
---
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: argocd
description: "ArgoCD is a declarative, GitOps continuous delivery tool for Kubernetes."
links:
- url: https://cd.dgse.cloud
title: Dashboard
icon: dashboard
annotations:
argocd/app-name: argocd
argocd/app-namespace: argocd
spec:
type: service
lifecycle: production
owner: owners
system: artemis-cluster

View File

@@ -0,0 +1,14 @@
---
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: cnpg
description: "CloudNativePG is a Kubernetes operator that manages PostgreSQL databases in a cloud-native way."
annotations:
argocd/app-name: cnpg
argocd/app-namespace: cnpg-system
spec:
type: service
lifecycle: production
owner: owners
system: artemis-cluster

View File

@@ -0,0 +1,17 @@
---
apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: digital-garden
spec:
description: My digital garden
sourceRepos:
- '*'
sourceNamespaces:
- '*'
destinations:
- namespace: 'digital-garden'
server: '*'
clusterResourceWhitelist:
- group: '*'
kind: '*'

View File

@@ -0,0 +1,24 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: digital-garden
namespace: digital-garden
labels:
platform.dgse.cloud/cluster: artemis
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: digital-garden
source:
repoURL: 'https://git.dgse.cloud/DGSE/kubernetes.git'
path: manifests/artemis/digital-garden
targetRevision: main
destination:
namespace: digital-garden
name: in-cluster
syncPolicy:
syncOptions:
- CreateNamespace=true
automated:
prune: true
selfHeal: true

View File

@@ -0,0 +1,18 @@
---
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: digital-garden
description: "A collection of notes, essays, and other writing that is published on the web."
links:
- url: https://groothuis.io
title: Public Website
icon: web
annotations:
argocd/app-name: digital-garden
argocd/app-namespace: digital-garden
spec:
type: website
lifecycle: production
owner: owners
system: artemis-cluster

View File

@@ -0,0 +1,7 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- app-project.yaml
- application.yaml

View File

@@ -18,6 +18,7 @@ spec:
name: in-cluster
syncPolicy:
syncOptions:
- ServerSideApply=true
- CreateNamespace=true
automated:
prune: true

View File

@@ -0,0 +1,14 @@
---
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: external-secrets
description: "Vault Secrets Operator to sync secrets from Vault to Kubernetes"
annotations:
argocd/app-name: external-secrets
argocd/app-namespace: external-secrets
spec:
type: service
lifecycle: production
owner: owners
system: artemis-cluster

View File

@@ -0,0 +1,16 @@
---
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: gitea-runners
description: "Gitea Action Runners"
annotations:
argocd/app-name: gitea-runners
argocd/app-namespace: gitea-runners
spec:
type: service
lifecycle: production
owner: owners
system: artemis-cluster
dependencyOf:
- component:gitea

View File

@@ -0,0 +1,20 @@
---
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: gitea
description: "Self-hosted Git Server"
links:
- url: https://git.dgse.cloud
title: Git Server
icon: web
annotations:
argocd/app-name: gitea
argocd/app-namespace: gitea
spec:
type: service
lifecycle: production
owner: owners
system: artemis-cluster
dependsOn:
- Component:gitea-runners

View File

@@ -0,0 +1,17 @@
---
apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: immich
spec:
description: Cloud Media Server
sourceRepos:
- '*'
sourceNamespaces:
- '*'
destinations:
- namespace: 'immich'
server: '*'
clusterResourceWhitelist:
- group: '*'
kind: '*'

View File

@@ -0,0 +1,24 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: immich
namespace: immich
labels:
platform.dgse.cloud/cluster: artemis
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: immich
source:
repoURL: 'https://git.dgse.cloud/DGSE/kubernetes.git'
path: manifests/artemis/immich
targetRevision: main
destination:
namespace: immich
name: in-cluster
syncPolicy:
syncOptions:
- CreateNamespace=true
automated:
prune: true
selfHeal: true

View File

@@ -0,0 +1,18 @@
---
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: immich
description: "Self-hosted photo and video backup solution directly from your mobile phone."
links:
- url: https://photos.dgse.cloud
title: Git Server
icon: web
annotations:
argocd/app-name: immich
argocd/app-namespace: immich
spec:
type: service
lifecycle: production
owner: owners
system: artemis-cluster

View File

@@ -0,0 +1,7 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- app-project.yaml
- application.yaml

View File

@@ -0,0 +1,17 @@
---
apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: kaneo
spec:
description: Project Management
sourceRepos:
- '*'
sourceNamespaces:
- '*'
destinations:
- namespace: 'kaneo'
server: '*'
clusterResourceWhitelist:
- group: '*'
kind: '*'

View File

@@ -0,0 +1,24 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: kaneo
namespace: kaneo
labels:
platform.dgse.cloud/cluster: artemis
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: kaneo
source:
repoURL: 'https://git.dgse.cloud/DGSE/kubernetes.git'
path: manifests/artemis/kaneo
targetRevision: main
destination:
namespace: kaneo
name: in-cluster
syncPolicy:
syncOptions:
- CreateNamespace=true
automated:
prune: true
selfHeal: true

View File

@@ -0,0 +1,7 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- app-project.yaml
- application.yaml

View File

@@ -13,3 +13,7 @@ resources:
- pocket-id
- vaultwarden
- mailu
- penpot
- immich
- digital-garden
- kaneo

View File

@@ -0,0 +1,18 @@
---
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: mailu
description: "Self-hosted mail server"
links:
- url: https://mail.dgse.cloud
title: Mail Server
icon: web
annotations:
argocd/app-name: mailu
argocd/app-namespace: mailu
spec:
type: service
lifecycle: production
owner: owners
system: artemis-cluster

View File

@@ -0,0 +1,17 @@
---
apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: nextcloud
spec:
description: Self Hosted Cloud
sourceRepos:
- '*'
sourceNamespaces:
- '*'
destinations:
- namespace: 'nextcloud'
server: '*'
clusterResourceWhitelist:
- group: '*'
kind: '*'

View File

@@ -0,0 +1,24 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: nextcloud
namespace: nextcloud
labels:
platform.dgse.cloud/cluster: artemis
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: nextcloud
source:
repoURL: 'https://git.dgse.cloud/DGSE/kubernetes.git'
path: manifests/artemis/nextcloud
targetRevision: main
destination:
namespace: nextcloud
name: in-cluster
syncPolicy:
syncOptions:
- CreateNamespace=true
automated:
prune: true
selfHeal: true

View File

@@ -0,0 +1,19 @@
---
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: nextcloud
description: "Self-hosted photo and video backup solution directly from your mobile phone."
links:
- url: https://nextcloud.dgse.cloud
title: Git Server
icon: web
annotations:
argocd/app-name: nextcloud
argocd/app-namespace: nextcloud
backstage.io/techdocs-ref: dir:.
spec:
type: service
lifecycle: production
owner: owners
system: artemis-cluster

View File

@@ -0,0 +1,7 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- app-project.yaml
- application.yaml

View File

@@ -0,0 +1,17 @@
---
apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: ntfy
spec:
description: Notification server
sourceRepos:
- '*'
sourceNamespaces:
- '*'
destinations:
- namespace: 'ntfy'
server: '*'
clusterResourceWhitelist:
- group: '*'
kind: '*'

View File

@@ -0,0 +1,24 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: ntfy
namespace: ntfy
labels:
platform.dgse.cloud/cluster: artemis
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: ntfy
source:
repoURL: 'https://git.dgse.cloud/DGSE/kubernetes.git'
path: manifests/artemis/ntfy
targetRevision: main
destination:
namespace: ntfy
name: in-cluster
syncPolicy:
syncOptions:
- CreateNamespace=true
automated:
prune: true
selfHeal: true

View File

@@ -0,0 +1,7 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- app-project.yaml
- application.yaml

View File

@@ -0,0 +1,17 @@
---
apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: penpot
spec:
description: Design tool
sourceRepos:
- '*'
sourceNamespaces:
- '*'
destinations:
- namespace: 'penpot'
server: '*'
clusterResourceWhitelist:
- group: '*'
kind: '*'

View File

@@ -0,0 +1,24 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: penpot
namespace: penpot
labels:
platform.dgse.cloud/cluster: artemis
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: penpot
source:
repoURL: 'https://git.dgse.cloud/DGSE/kubernetes.git'
path: manifests/artemis/penpot
targetRevision: main
destination:
namespace: penpot
name: in-cluster
syncPolicy:
syncOptions:
- CreateNamespace=true
automated:
prune: true
selfHeal: true

View File

@@ -0,0 +1,7 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- app-project.yaml
- application.yaml

View File

@@ -0,0 +1,9 @@
---
apiVersion: backstage.io/v1alpha1
kind: System
metadata:
name: artemis-cluster
description: "The Artemis cluster is a Kubernetes cluster hosting all infra for DGSE Cloud."
spec:
owner: owners
domain: dgse-cloud

144
diskspace.sh Executable file
View File

@@ -0,0 +1,144 @@
# filename: diskspace.sh
#!/usr/bin/env bash
set -euo pipefail
SERVERS=(
"controller.dgse.cloud"
"worker-0.dgse.cloud"
"worker-1.dgse.cloud"
)
SSH_OPTS=(
-o BatchMode=no
-o ConnectTimeout=8
-o StrictHostKeyChecking=accept-new
-o ServerAliveInterval=10
-o ServerAliveCountMax=2
)
EXCLUDE_TYPES_REGEX='^(tmpfs|devtmpfs|overlay|squashfs|proc|sysfs|cgroup2?|pstore|rpc_pipefs|nsfs|bpf)$'
EXCLUDE_MOUNTS_REGEX='^/(dev|proc|sys|run)(/|$)'
to_mib() {
local val num u
val="$(echo "$1" | awk '{print toupper($0)}')"
if [[ "$val" =~ ^([0-9]*\.?[0-9]+)([KMGTPE]?)(I?B)?$ ]]; then
num="${BASH_REMATCH[1]}"
u="${BASH_REMATCH[2]}"
case "$u" in
"") printf "%.0f\n" "$num" ;;
K) awk -v n="$num" 'BEGIN{printf "%.0f\n", n/1024}' ;;
M) printf "%.0f\n" "$num" ;;
G) awk -v n="$num" 'BEGIN{printf "%.0f\n", n*1024}' ;;
T) awk -v n="$num" 'BEGIN{printf "%.0f\n", n*1024*1024}' ;;
P) awk -v n="$num" 'BEGIN{printf "%.0f\n", n*1024*1024*1024}' ;;
E) awk -v n="$num" 'BEGIN{printf "%.0f\n", n*1024*1024*1024*1024}' ;;
*) echo 0 ;;
esac
else
echo 0
fi
}
mib_pretty() {
local mib="$1"
if (( mib < 1024 )); then
printf "%d MiB" "$mib"
elif (( mib < 1024*1024 )); then
awk -v m="$mib" 'BEGIN{printf "%.2f GiB", m/1024}'
else
awk -v m="$mib" 'BEGIN{printf "%.2f TiB", m/1048576}'
fi
}
ssh_run() {
local user="$1" pass="$2" host="$3" cmd="$4"
if command -v sshpass >/dev/null 2>&1; then
SSHPASS="$pass" sshpass -e ssh "${SSH_OPTS[@]}" "${user}@${host}" "$cmd"
else
ssh "${SSH_OPTS[@]}" "${user}@${host}" "$cmd"
fi
}
check_host() {
local user="$1" pass="$2" host="$3"
local cmd='df -hPT || df -hP'
local output
if ! output="$(ssh_run "$user" "$pass" "$host" "$cmd" 2>&1)"; then
echo "ERROR: Failed to connect or run df on $host" >&2
echo "$output" >&2
# Emit zero so the caller can still aggregate safely
echo "__TOTAL__ $host 0 0"
return 1
fi
local total_mib=0
local avail_mib=0
local lines=0
while IFS= read -r line; do
[[ "$line" =~ ^Filesystem[[:space:]] ]] && continue
read -r fs type size used avail usep mount <<<"$line" || true
if [[ -z "$mount" ]]; then
read -r fs size used avail usep mount <<<"$line" || true
type="unknown"
fi
[[ -z "$fs" || -z "$size" || -z "$avail" || -z "$mount" ]] && continue
[[ "$type" =~ $EXCLUDE_TYPES_REGEX ]] && continue
[[ "$mount" =~ $EXCLUDE_MOUNTS_REGEX ]] && continue
local size_mib avail_mib_i
size_mib="$(to_mib "$size")"
avail_mib_i="$(to_mib "$avail")"
(( size_mib <= 0 )) && continue
total_mib=$(( total_mib + size_mib ))
avail_mib=$(( avail_mib + avail_mib_i ))
lines=$(( lines + 1 ))
done <<< "$output"
echo "Host: $host"
if (( lines == 0 )); then
echo " No eligible filesystems found (might be a minimal container or all mounts excluded)."
echo
else
echo " Total capacity: $(mib_pretty "$total_mib")"
echo " Remaining (free): $(mib_pretty "$avail_mib")"
echo
fi
# Emit a parsable summary line for aggregation
overall_avail=$(( overall_avail + avail_mib ))
echo "__TOTAL__ $host $total_mib $avail_mib"
}
main() {
local username
read -r -p "SSH username: " username
echo
local overall_total=0
local overall_avail=0
local ok_hosts=0
# Call check_host for each and parse the emitted summary lines
for host in "${SERVERS[@]}"; do
check_host "$username" "none" "$host"
done | while read -r tag host total avail; do
if [[ "$tag" == "__TOTAL__" ]]; then
overall_total=$(( overall_total + total ))
overall_avail=$(( overall_avail + avail ))
(( total > 0 || avail > 0 )) && ok_hosts=$(( ok_hosts + 1 ))
else
# Pass through the human-readable lines
echo "$tag $host $total $avail"
fi
done
echo "Summary:"
echo " Hosts processed: $ok_hosts/${#SERVERS[@]}"
echo " Combined total capacity: $(mib_pretty "$overall_total")"
echo " Combined remaining (free): $(mib_pretty "$overall_avail")"
}
main "$@"

1
docs/index.md Normal file
View File

@@ -0,0 +1 @@
# NextCloud

View File

@@ -0,0 +1,13 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: backstage-argocd-server-access
subjects:
- kind: ServiceAccount
name: backstage-argocd
namespace: argocd
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: argocd-server

View File

@@ -0,0 +1,6 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: backstage-argocd
namespace: argocd

View File

@@ -15,9 +15,7 @@ spec:
- name: argocd-server
port: 80
- kind: Rule
match: >
Host(`cd.dgse.cloud`) &&
Headers(`Content-Type`, `application/grpc`)
match: Host(`cd.dgse.cloud`) && Headers(`Content-Type`, `application/grpc`)
priority: 11
services:
- name: argocd-server

View File

@@ -5,9 +5,11 @@ metadata:
name: argocd
resources:
- github.com/argoproj/argo-cd/manifests/cluster-install?ref=v2.14.15
- github.com/argoproj/argo-cd/manifests/cluster-install?ref=v3.2.6
- ingressRoute.yaml
- certificate.yaml
- backstage-sa.yaml
- backstage-rbac.yaml
patches:
- target:
@@ -45,7 +47,9 @@ patches:
metadata:
name: argocd-cm
data:
admin.enabled: "false"
accounts.admin: "apiKey, login"
accounts.backstage.enabled: "true"
admin.enabled: "true"
kustomize.buildOptions: --enable-helm
url: https://cd.dgse.cloud
oidc.config: |
@@ -94,8 +98,7 @@ patches:
name: argocd-server
rules:
- apiGroups: ["argoproj.io"]
resources: ["applications", "applications/status",
"applications/finalizers"]
resources: ["applications", "applications/status", "applications/finalizers"]
verbs: ["get", "list", "watch", "update", "patch", "delete"]
- apiGroups: ["argoproj.io"]
resources: ["appprojects"]
@@ -119,3 +122,23 @@ patches:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: argocd-server
# Map Backstage SA to Argo CD role:admin (full Argo CD permissions)
- target:
kind: ConfigMap
name: argocd-rbac-cm
patch: |-
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-rbac-cm
namespace: argocd
data:
policy.csv: |
g, argocd_admins, role:admin
p, argocd_users, applications, list, *, allow
p, argocd_users, applications, sync, *, allow
p, argocd_users, applications, refresh, *, allow
p, argocd_users, applications, get, *, allow
g, system:serviceaccount:argocd:backstage-argocd, role:admin
p, system:serviceaccount:argocd:backstage-argocd, applications, *, */*, allow

View File

@@ -7,6 +7,6 @@ metadata:
helmCharts:
- name: cloudnative-pg
repo: https://cloudnative-pg.github.io/charts
version: 0.26.0
version: 0.26.1
releaseName: cnpg
namespace: cnpg-system

View File

@@ -0,0 +1,22 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: digital-garden
spec:
replicas: 1
selector:
matchLabels:
app: digital-garden
template:
metadata:
labels:
app: digital-garden
spec:
containers:
- name: digital-garden
image: 'git.dgse.cloud/dgroothuis/garden:latest'
ports:
- containerPort: 8080
imagePullSecrets:
- name: regcred

View File

@@ -0,0 +1,24 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: digital-garden-ingress
annotations:
cert-manager.io/cluster-issuer: letsencrypt
spec:
rules:
- host: www.groothuis.io
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: digital-garden-svc
port:
number: 8080
tls:
- hosts:
- groothuis.io
- www.groothuis.io
secretName: letsencrypt

View File

@@ -0,0 +1,9 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- service.yaml
- ingress.yaml
- www-redirect.yaml
- deployment.yaml

View File

@@ -0,0 +1,12 @@
---
apiVersion: v1
kind: Service
metadata:
name: digital-garden-svc
spec:
selector:
app: digital-garden
ports:
- protocol: TCP
port: 8080
targetPort: 8080

View File

@@ -0,0 +1,43 @@
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: digital-garden-www-redirect
spec:
entryPoints:
- websecure
routes:
- kind: Rule
match: Host(`groothuis.io`)
middlewares:
- name: redirect-to-www
services:
- kind: TraefikService
name: noop@internal
- kind: Rule
match: Host(`danielgroothuis.com`) || Host(`www.danielgroothuis.com`)
middlewares:
- name: redirect-to-groothuis-io
services:
- kind: TraefikService
name: noop@internal
---
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: redirect-to-www
spec:
redirectRegex:
permanent: true
regex: "^https?://(?:www\\.)?(.+)"
replacement: "https://www.${1}"
---
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: redirect-to-groothuis-io
spec:
redirectRegex:
permanent: false
regex: "^https?://(?:www\\.)?(.+)"
replacement: "https://www.groothuis.io"

View File

@@ -10,6 +10,6 @@ resources:
helmCharts:
- name: external-secrets
repo: https://charts.external-secrets.io/
version: 0.18.1
version: 1.2.0
releaseName: external-secrets
namespace: external-secrets

View File

@@ -4,10 +4,7 @@ kind: Kustomization
metadata:
name: gitea-runners
helmCharts:
- name: actions
repo: https://dl.gitea.com/charts/
version: 0.0.1
releaseName: gitea-actions
namespace: gitea-runners
valuesFile: values.yaml
resources:
- runner-artemis-1.yaml
- runner-artemis-2.yaml
- runner-artemis-3.yaml

View File

@@ -0,0 +1,53 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: gitea-runner-data-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: local-path
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: gitea-act-runner-dind
spec:
replicas: 1
selector:
matchLabels:
app: gitea-act-runner-dind
template:
metadata:
labels:
app: gitea-act-runner-dind
spec:
containers:
- name: runner
image: vegardit/gitea-act-runner:dind-latest
securityContext:
privileged: true
env:
- name: GITEA_RUNNER_NAME
value: "artemis-1"
- name: GITEA_INSTANCE_URL
value: "https://git.dgse.cloud"
- name: GITEA_RUNNER_REGISTRATION_TOKEN
valueFrom:
secretKeyRef:
name: gitea-runner-token
key: token
volumeMounts:
- name: data
mountPath: /data
- name: docker-storage
mountPath: /var/lib/docker
volumes:
- name: data
persistentVolumeClaim:
claimName: gitea-runner-data-pvc
- name: docker-storage
emptyDir: {}

View File

@@ -0,0 +1,53 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: gitea-runner-data-pvc-2
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: local-path
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: gitea-act-runner-dind-2
spec:
replicas: 1
selector:
matchLabels:
app: gitea-act-runner-dind-2
template:
metadata:
labels:
app: gitea-act-runner-dind-2
spec:
containers:
- name: runner
image: vegardit/gitea-act-runner:dind-latest
securityContext:
privileged: true
env:
- name: GITEA_RUNNER_NAME
value: "artemis-2"
- name: GITEA_INSTANCE_URL
value: "https://git.dgse.cloud"
- name: GITEA_RUNNER_REGISTRATION_TOKEN
valueFrom:
secretKeyRef:
name: gitea-runner-token
key: token
volumeMounts:
- name: data
mountPath: /data
- name: docker-storage
mountPath: /var/lib/docker
volumes:
- name: data
persistentVolumeClaim:
claimName: gitea-runner-data-pvc-2
- name: docker-storage
emptyDir: {}

View File

@@ -0,0 +1,53 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: gitea-runner-data-pvc-3
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: local-path
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: gitea-act-runner-dind-3
spec:
replicas: 1
selector:
matchLabels:
app: gitea-act-runner-dind-3
template:
metadata:
labels:
app: gitea-act-runner-dind-3
spec:
containers:
- name: runner
image: vegardit/gitea-act-runner:dind-latest
securityContext:
privileged: true
env:
- name: GITEA_RUNNER_NAME
value: "artemis-3"
- name: GITEA_INSTANCE_URL
value: "https://git.dgse.cloud"
- name: GITEA_RUNNER_REGISTRATION_TOKEN
valueFrom:
secretKeyRef:
name: gitea-runner-token
key: token
volumeMounts:
- name: data
mountPath: /data
- name: docker-storage
mountPath: /var/lib/docker
volumes:
- name: data
persistentVolumeClaim:
claimName: gitea-runner-data-pvc-3
- name: docker-storage
emptyDir: {}

View File

@@ -1,95 +1,101 @@
# Configure Gitea Actions
## @section Gitea Actions
#
## @param enabled Create an act runner StatefulSet.
## @param init.image.repository The image used for the init containers
## @param init.image.tag The image tag used for the init containers
## @param statefulset.replicas the amount of (replica) runner pods deployed
## @param statefulset.timezone is the timezone that will be set in the act_runner image
## @param statefulset.annotations Act runner annotations
## @param statefulset.labels Act runner labels
## @param statefulset.resources Act runner resources
## @param statefulset.nodeSelector NodeSelector for the statefulset
## @param statefulset.tolerations Tolerations for the statefulset
## @param statefulset.affinity Affinity for the statefulset
## @param statefulset.extraVolumes Extra volumes for the statefulset
## @param statefulset.actRunner.repository The Gitea act runner image
## @param statefulset.actRunner.tag The Gitea act runner tag
## @param statefulset.actRunner.pullPolicy The Gitea act runner pullPolicy
## @param statefulset.actRunner.extraVolumeMounts Allows mounting extra volumes in the act runner container
## @param statefulset.actRunner.config [default: Too complex. See values.yaml] Act runner custom configuration. See [Act Runner documentation](https://docs.gitea.com/usage/actions/act-runner#configuration) for details.
## @param statefulset.dind.repository The Docker-in-Docker image
## @param statefulset.dind.tag The Docker-in-Docker image tag
## @param statefulset.dind.pullPolicy The Docker-in-Docker pullPolicy
## @param statefulset.dind.extraVolumeMounts Allows mounting extra volumes in the Docker-in-Docker container
## @param statefulset.dind.extraEnvs Allows adding custom environment variables, such as `DOCKER_IPTABLES_LEGACY`
## @param statefulset.persistence.size Size for persistence to store act runner data
## @param existingSecret Secret that contains the token
## @param existingSecretKey Secret key
## @param giteaRootURL URL the act_runner registers and connect with
# values.yaml — Gitea Actions runner with DinD over shared Unix socket (no TCP, no TLS)
enabled: true
statefulset:
replicas: 3
timezone: Etc/UTC
annotations: {}
labels: {}
resources: {}
nodeSelector: {}
tolerations: []
affinity: {}
extraVolumes: []
# Share only the docker.sock file between dind and act-runner
extraVolumes:
- name: docker-socket
emptyDir: {}
actRunner:
repository: gitea/act_runner
tag: 0.2.13
pullPolicy: IfNotPresent
extraVolumeMounts: []
# See full example here: https://gitea.com/gitea/act_runner/src/branch/main/internal/pkg/config/config.example.yaml
# Runner talks to DinD via unix socket; ensure no TLS is used
extraEnvs:
- name: DOCKER_HOST
value: unix:///var/run/docker.sock
- name: DOCKER_BUILDKIT
value: "1"
- name: DOCKER_TLS_CERTDIR
value: ""
- name: DOCKER_TLS_VERIFY
value: ""
# Mount only the socket path, not the entire /var/run (prevents "is a directory" errors)
extraVolumeMounts:
- name: docker-socket
mountPath: /var/run/docker.sock
subPath: docker.sock
# Act runner config
config: |
log:
level: debug
level: info
cache:
enabled: false
runner:
labels:
- "artemis"
- "ubuntu-latest:docker://docker.gitea.com/runner-images:ubuntu-latest"
- "ubuntu-22.04:docker://docker.gitea.com/runner-images:ubuntu-22.04"
- "ubuntu-20.04:docker://docker.gitea.com/runner-images:ubuntu-20.04"
container:
require_docker: true
privileged: true # required for DinD
require_docker: true # fail if docker is not reachable
docker_timeout: 300s
dind:
repository: docker
tag: 28.3.3-dind
pullPolicy: IfNotPresent
extraVolumeMounts: []
# If the container keeps crashing in your environment, you might have to add the `DOCKER_IPTABLES_LEGACY` environment variable.
# See https://github.com/docker-library/docker/issues/463#issuecomment-1881909456
# Disable TLS and DO NOT advertise TCP; use only unix socket
extraEnvs:
[]
# - name: "DOCKER_IPTABLES_LEGACY"
# value: "1"
- name: DOCKER_TLS_CERTDIR
value: ""
# If your nodes require legacy iptables:
# - name: DOCKER_IPTABLES_LEGACY
# value: "1"
# Share only the docker.sock file
extraVolumeMounts:
- name: docker-socket
mountPath: /var/run/docker.sock
subPath: docker.sock
# Explicitly bind dockerd to the unix socket; do not bind tcp://0.0.0.0:2375
command: ["dockerd"]
args:
- "--host=unix:///var/run/docker.sock"
- "--storage-driver=overlay2"
# Optional: persistence for DinD image layers (default will mount /var/lib/docker inside dind)
persistence:
size: 1Gi
init:
image:
repository: busybox
# Overrides the image tag whose default is the chart appVersion.
tag: "1.37.0"
## Specify an existing token secret
##
# Runner registration token
existingSecret: "gitea-runner-token"
existingSecretKey: "token"
## Specify the root URL of the Gitea instance
# Root URL of your Gitea
giteaRootURL: "https://git.dgse.cloud"
## @section Global
#
## @param global.imageRegistry global image registry override
## @param global.storageClass global storage class override
global:
imageRegistry: ""
storageClass: ""

View File

@@ -7,7 +7,7 @@ metadata:
helmCharts:
- name: gitea
repo: https://dl.gitea.com/charts/
version: 12.4.0
version: 12.5.0
releaseName: gitea
namespace: gitea
valuesFile: values.yaml

View File

@@ -444,9 +444,9 @@ gitea:
# PASSWORD_COMPLEXITY: spec
## @param gitea.additionalConfigSources Additional configuration from secret or configmap
additionalConfigSources: []
# - secret:
# secretName: gitea-app-ini-oauth
additionalConfigSources:
- secret:
secretName: gitea-conf-smtp-secret
# - configMap:
# name: gitea-app-ini-plaintext
@@ -643,6 +643,13 @@ postgresql-ha:
repmgrPassword: changeme2
postgresPassword: changeme1
password: changeme4
resources:
limits:
cpu: 2000m
memory: 2Gi
requests:
cpu: 1500m
memory: 2Gi
## @param postgresql-ha.pgpool.adminPassword pgpool adminPassword
## @param postgresql-ha.pgpool.image.repository Image repository, eg. `bitnamilegacy/pgpool`.
@@ -652,6 +659,13 @@ postgresql-ha:
image:
repository: bitnamilegacy/pgpool
srCheckPassword: changeme4
resources:
limits:
cpu: 1000m
memory: 2Gi
requests:
cpu: 250m
memory: 1Gi
## @param postgresql-ha.service.ports.postgresql PostgreSQL service port (overrides `service.ports.postgresql`)
service:

View File

@@ -0,0 +1,30 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: immich-postgres
namespace: immich
spec:
imageName: ghcr.io/tensorchord/cloudnative-pgvecto.rs:16.5-v0.3.0@sha256:be3f025d79aa1b747817f478e07e71be43236e14d00d8a9eb3914146245035ba
instances: 1
postgresql:
shared_preload_libraries:
- "vectors.so"
managed:
roles:
- name: immich
superuser: true
login: true
bootstrap:
initdb:
database: immich
owner: immich
secret:
name: immich-postgres-user
postInitSQL:
- CREATE EXTENSION IF NOT EXISTS "vectors";
- CREATE EXTENSION IF NOT EXISTS "cube" CASCADE;
- CREATE EXTENSION IF NOT EXISTS "earthdistance" CASCADE;
storage:
size: 4Gi
storageClass: local-path

View File

@@ -0,0 +1,18 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
metadata:
name: immich
resources:
- secret.yaml
- db-cluster.yaml
- volumeClaims.yaml
helmCharts:
- name: immich
repo: https://immich-app.github.io/immich-charts
version: 0.10.3
releaseName: immich
namespace: immich
valuesFile: values.yaml

View File

@@ -0,0 +1,33 @@
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: immich
spec:
refreshInterval: 1h
secretStoreRef:
name: platform
kind: ClusterSecretStore
target:
name: immich-postgres-user
data:
- secretKey: DB_USERNAME
remoteRef:
key: immich/db
property: DB_USERNAME
- secretKey: DB_DATABASE_NAME
remoteRef:
key: immich/db
property: DB_DATABASE_NAME
- secretKey: DB_PASSWORD
remoteRef:
key: immich/db
property: DB_PASSWORD
- secretKey: username
remoteRef:
key: immich/db
property: username
- secretKey: password
remoteRef:
key: immich/db
property: password

View File

@@ -0,0 +1,113 @@
controllers:
main:
containers:
main:
image:
tag: v2.0.0
env:
REDIS_HOSTNAME: '{{ printf "%s-valkey" .Release.Name }}'
IMMICH_MACHINE_LEARNING_URL: '{{ printf "http://%s-machine-learning:3003" .Release.Name }}'
DB_HOSTNAME: "{{ .Release.Name }}-postgres-rw.immich.svc.cluster.local"
DB_USERNAME:
secretKeyRef:
name: immich-postgres-user
key: username
DB_DATABASE_NAME:
secretKeyRef:
name: immich-postgres-user
key: DB_DATABASE_NAME
DB_PASSWORD:
secretKeyRef:
name: immich-postgres-user
key: password
immich:
metrics:
# Enabling this will create the service monitors needed to monitor immich with the prometheus operator
enabled: false
persistence:
# Main data store for all photos shared between different components.
library:
# Automatically creating the library volume is not supported by this chart
# You have to specify an existing PVC to use
existingClaim: immich-library-pvc
# configuration is immich-config.json converted to yaml
# ref: https://immich.app/docs/install/config-file/
#
configuration: {}
# trash:
# enabled: false
# days: 30
# storageTemplate:
# enabled: true
# template: "{{y}}/{{y}}-{{MM}}-{{dd}}/{{filename}}"
# Dependencies
valkey:
enabled: true
controllers:
main:
containers:
main:
image:
repository: docker.io/valkey/valkey
tag: 9.0-alpine@sha256:73f3aa08d95879525aa0dc84ebf6b82b59b898a24e8067a37d6250c31ee2c4d4
pullPolicy: IfNotPresent
persistence:
data:
enabled: true
size: 1Gi
# Optional: Set this to persistentVolumeClaim to keep job queues persistent
type: emptyDir
accessMode: ReadWriteOnce
# storageClass: your-class
# Immich components
server:
enabled: true
controllers:
main:
containers:
main:
image:
repository: ghcr.io/immich-app/immich-server
pullPolicy: IfNotPresent
ingress:
main:
enabled: true
className: traefik
annotations:
# proxy-body-size is set to 0 to remove the body limit on file uploads
nginx.ingress.kubernetes.io/proxy-body-size: "0"
cert-manager.io/cluster-issuer: letsencrypt
hosts:
- host: photos.dgse.cloud
paths:
- path: "/"
service:
identifier: main
tls:
- hosts:
- photos.dgse.cloud
secretName: immich-tls
machine-learning:
enabled: false
controllers:
main:
containers:
main:
image:
repository: ghcr.io/immich-app/immich-machine-learning
pullPolicy: IfNotPresent
env:
TRANSFORMERS_CACHE: /cache
HF_XET_CACHE: /cache/huggingface-xet
MPLCONFIGDIR: /cache/matplotlib-config
persistence:
cache:
enabled: true
size: 10Gi
existingClaim: immich-ml-pvc

View File

@@ -0,0 +1,24 @@
# ---
# apiVersion: v1
# kind: PersistentVolumeClaim
# metadata:
# name: immich-ml-pvc
# spec:
# storageClassName: local-path
# accessModes:
# - ReadWriteOnce
# resources:
# requests:
# storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: immich-library-pvc
spec:
storageClassName: local-path
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 250Gi

View File

@@ -0,0 +1,16 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
metadata:
name: kaneo
#resources:
# - secret.yaml
helmCharts:
- name: charts/kaneo
repo: https://github.com/usekaneo/kaneo
version: 0.1.0
releaseName: kaneo
namespace: kaneo
valuesFile: values.yaml

View File

@@ -0,0 +1,183 @@
# Global values
nameOverride: ""
fullnameOverride: ""
replicaCount: 1
# Autoscaling configuration
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 10
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
# Pod configuration
podAnnotations: {}
podSecurityContext: {}
nodeSelector: {}
tolerations: []
affinity: {}
# Service account configuration
serviceAccount:
create: true
annotations: {}
name: ""
# PostgreSQL database configuration
postgresql:
# Set to true to deploy PostgreSQL as part of this chart
enabled: true
image:
repository: postgres
tag: 16-alpine
pullPolicy: IfNotPresent
# Database configuration
auth:
database: kaneo
username: kaneo_user
password: kaneo_password
# Use existing secret for database credentials (optional)
existingSecret: ""
secretKeys:
adminPasswordKey: postgres-password
userPasswordKey: password
# Persistence for PostgreSQL data
persistence:
enabled: true
size: 8Gi
storageClass: ""
accessMode: ReadWriteOnce
# PostgreSQL service configuration
service:
type: ClusterIP
port: 5432
# Resources for PostgreSQL
resources: {}
# resources:
# limits:
# cpu: 500m
# memory: 512Mi
# requests:
# cpu: 100m
# memory: 128Mi
# API backend configuration
api:
image:
repository: ghcr.io/usekaneo/api
tag: latest
pullPolicy: IfNotPresent
securityContext: {}
service:
type: ClusterIP
port: 1337
targetPort: 1337
# Resources are optional and disabled by default
resources: {}
# resources:
# limits:
# cpu: 500m
# memory: 512Mi
# requests:
# cpu: 100m
# memory: 128Mi
# Environment variables for the API
env:
jwtAccess: appelflap
existingSecret:
enabled: false
name: ""
key: jwt-access
disableRegistration: false
# Database configuration
database:
# Use external PostgreSQL (set postgresql.enabled to false)
# Important: when using external postgres, make sure you have set up the db user correctly:
# CREATE DATABASE kaneo;
# CREATE USER kaneo_user WITH PASSWORD 'your_password';
# GRANT ALL PRIVILEGES ON DATABASE kaneo TO kaneo_user;
# \c kaneo;
# GRANT USAGE ON SCHEMA public TO kaneo_user;
# GRANT CREATE ON SCHEMA public TO kaneo_user;
# ALTER SCHEMA public OWNER TO kaneo_user;
external:
enabled: false
host: ""
port: 5432
database: kaneo
username: kaneo_user
password: ""
# Use existing secret for external database credentials in the form of a uri, e.g.: "postgresql://user:pass@host:port/db"
existingSecret:
enabled: false
name: ""
passwordKey: postgres_uri
livenessProbe:
httpGet:
path: /me
port: api
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /me
port: api
initialDelaySeconds: 5
periodSeconds: 10
# Web frontend configuration
web:
image:
repository: ghcr.io/usekaneo/web
tag: latest
pullPolicy: IfNotPresent
# Environment variables for the Web
env:
# Optional: Override the default API URL (http://localhost:1337)
# The /api path will be automatically appended to the URL
# Make sure this url matches the ingress host
# apiUrl: "https://kaneo.example.com"
apiUrl: ""
securityContext: {}
service:
type: ClusterIP
port: 80
targetPort: 80
# Resources are optional and disabled by default
resources: {}
# resources:
# limits:
# cpu: 300m
# memory: 256Mi
# requests:
# cpu: 100m
# memory: 128Mi
livenessProbe:
httpGet:
path: /
port: web
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /
port: web
initialDelaySeconds: 5
periodSeconds: 10
# Ingress configuration
ingress:
enabled: true
className: traefik
annotations:
cert-manager.io/cluster-issuer: letsencrypt
hosts:
# Use the same host in the web env variable apiUrl (with http:// or https://)
- host: projects.dgse.cloud
paths:
- path: /?(.*)
pathType: ImplementationSpecific
service: web
port: 80
- path: /api/?(.*)
pathType: ImplementationSpecific
service: api
port: 1337
tls:
- projects.dgse.cloud

View File

@@ -10,7 +10,7 @@ resources:
helmCharts:
- name: mailu
repo: https://mailu.github.io/helm-charts/
version: 2.1.2
version: 2.6.3
releaseName: mailu
namespace: mailu
valuesFile: values.yaml

View File

@@ -166,7 +166,7 @@ limits:
ipv6Mask: 56
user: 100/day
exemptionLength: 86400
exemption: "10.42.0.0/16"
exemption: "10.42.4.105"
# Configuration to reduce outgoing spam in case of a compromised account. See the documentation for further information: https://mailu.io/1.9/configuration.html?highlight=MESSAGE_RATELIMIT
## @param limits.messageRatelimit.value Sets the `MESSAGE_RATELIMIT` environment variable in the `admin` pod

View File

@@ -0,0 +1,16 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
metadata:
name: nextcloud
resources:
- secret.yaml
helmCharts:
- name: nextcloud
repo: https://nextcloud.github.io/helm/
version: 8.9.1
releaseName: nextcloud
namespace: nextcloud
valuesFile: values.yaml

View File

@@ -0,0 +1,33 @@
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: nextcloud-secret
spec:
refreshInterval: 1h
secretStoreRef:
name: platform
kind: ClusterSecretStore
target:
name: nextcloud-secret
data:
- secretKey: nextcloud-username
remoteRef:
key: nextcloud
property: nextcloud-username
- secretKey: nextcloud-password
remoteRef:
key: nextcloud
property: nextcloud-password
- secretKey: smtp-password
remoteRef:
key: nextcloud
property: smtp-password
- secretKey: smtp-username
remoteRef:
key: nextcloud
property: smtp-username
- secretKey: smtp-host
remoteRef:
key: nextcloud
property: smtp-host

View File

@@ -0,0 +1,981 @@
## ref: https://hub.docker.com/r/library/nextcloud/tags/
##
image:
repository: nextcloud
flavor: apache
# default is generated by flavor and appVersion
tag:
pullPolicy: IfNotPresent
# pullSecrets:
# - myRegistrKeySecretName
nameOverride: ""
fullnameOverride: ""
podAnnotations: {}
podLabels: {}
deploymentAnnotations: {}
deploymentLabels: {}
# Number of replicas to be deployed
replicaCount: 1
## Allowing use of ingress controllers
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
##
ingress:
enabled: true
className: traefik
annotations:
cert-manager.io/cluster-issuer: letsencrypt
tls:
- secretName: nextcloud-tls
hosts:
- nextcloud.dgse.cloud
labels: {}
path: /
pathType: Prefix
# Allow configuration of lifecycle hooks
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/
lifecycle: {}
# lifecycle:
# postStartCommand: []
# preStopCommand: []
phpClientHttpsFix:
enabled: false
protocol: https
nextcloud:
host: nextcloud.dgse.cloud
# username: admin
# password: changeme
## Use an existing secret
existingSecret:
enabled: true
secretName: nextcloud-secret
usernameKey: nextcloud-username
passwordKey: nextcloud-password
tokenKey: ""
smtpUsernameKey: smtp-username
smtpPasswordKey: smtp-password
smtpHostKey: smtp-host
update: 0
# If web server is not binding default port, you can define it
containerPort: 80
datadir: /var/www/html/data
persistence:
subPath:
# if set, we'll template this list to the NEXTCLOUD_TRUSTED_DOMAINS env var
trustedDomains: []
## SMTP configuration
mail:
enabled: false
# the user we send email as
fromAddress: user
# the domain we send email from
domain: domain.com
smtp:
host: domain.com
secure: ssl
port: 465
authtype: LOGIN
name: user
password: pass
## Primary ObjectStore options
# see: https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/primary_storage.html#configuring-object-storage-as-primary-storage
objectStore:
# https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/primary_storage.html#simple-storage-service-s3
s3:
enabled: false
# ignored if nextcloud.objectstore.s3.existingSecret is not empty string
accessKey: ""
# ignored if nextcloud.objectstore.s3.existingSecret is not empty string
secretKey: ""
# use legacy auth method
legacyAuth: false
# s3 endpoint to use; only required if you're not using AWS
host: ""
# use TLS/SSL for S3 connections
ssl: true
# default port that can be changed based on your object store, e.g. for minio, you can use 9000
port: "443"
# this is the default in the nextcloud docs
region: "eu-west-1"
# required if using s3, the name of the bucket you'd like to use
bucket: ""
# object prefix in bucket
prefix: ""
# set to true if you are not using DNS for your buckets.
usePathStyle: false
# autocreate the bucket
autoCreate: false
# optonal parameter: you probably want to keep this as default
storageClass: "STANDARD"
# server side encryption key. learn more: https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/primary_storage.html#s3-sse-c-encryption-support
sse_c_key: ""
# use an existingSecret for S3 credentials. If set, we ignore the following under nextcloud.objectStore.s3
# endpoint, accessKey, secretKey
existingSecret: ""
secretKeys:
# key in nextcloud.objectStore.s3.existingSecret to use for s3 endpoint
host: ""
# key in nextcloud.objectStore.s3.existingSecret to use for s3 accessKeyID
accessKey: ""
# key in nextcloud.objectStore.s3.existingSecret to use for s3 secretAccessKey
secretKey: ""
# key in nextcloud.objectStore.s3.existingSecret to use for the s3 bucket
bucket: ""
# key in nextcloud.objectStore.s3.existingSecret to use for the s3 sse_c_key
sse_c_key: ""
## options related to using Swift as a primary object storage
# https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/primary_storage.html#openstack-swift
swift:
enabled: false
# swift user info
user:
domain: "Default"
name: ""
password: ""
# swift project info
project:
name: ""
domain: "Default"
# The Identity / Keystone endpoint
url: ""
region: ""
# optional on some swift implementations
service: "swift"
# the container to store the data in
container: ""
# autocreate container
autoCreate: false
## PHP Configuration files
# Will be injected in /usr/local/etc/php/conf.d for apache image and in /usr/local/etc/php-fpm.d when nginx.enabled: true
phpConfigs: {}
## Default config files that utilize environment variables:
# see: https://github.com/nextcloud/docker/tree/master#auto-configuration-via-environment-variables
# IMPORTANT: Will be used only if you put extra configs, otherwise default will come from nextcloud itself
# Default confgurations can be found here: https://github.com/nextcloud/docker/tree/master/.config
defaultConfigs:
# To protect /var/www/html/config
.htaccess: true
# Apache configuration for rewrite urls
apache-pretty-urls.config.php: true
# Define APCu as local cache
apcu.config.php: true
# Apps directory configs
apps.config.php: true
# Used for auto configure database
autoconfig.php: true
# Redis default configuration
redis.config.php: true
# Reverse proxy default configuration
reverse-proxy.config.php: true
# S3 Object Storage as primary storage
s3.config.php: true
# SMTP default configuration via environment variables
smtp.config.php: true
# Swift Object Storage as primary storage
swift.config.php: true
# disables the web based updater as the default nextcloud docker image does not support it
upgrade-disable-web.config.php: true
# -- imaginary support config
imaginary.config.php: false
# Extra config files created in /var/www/html/config/
# ref: https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/config_sample_php_parameters.html#multiple-config-php-file
configs: {}
# For example, to enable image and text file previews:
# previews.config.php: |-
# <?php
# $CONFIG = array (
# 'enable_previews' => true,
# 'enabledPreviewProviders' => array (
# 'OC\Preview\Movie',
# 'OC\Preview\PNG',
# 'OC\Preview\JPEG',
# 'OC\Preview\GIF',
# 'OC\Preview\BMP',
# 'OC\Preview\XBitmap',
# 'OC\Preview\MP3',
# 'OC\Preview\MP4',
# 'OC\Preview\TXT',
# 'OC\Preview\MarkDown',
# 'OC\Preview\PDF'
# ),
# );
# Hooks for auto configuration
# Here you could write small scripts which are placed in `/docker-entrypoint-hooks.d/<hook-name>/helm.sh`
# ref: https://github.com/nextcloud/docker?tab=readme-ov-file#auto-configuration-via-hook-folders
hooks:
pre-installation:
post-installation:
pre-upgrade:
post-upgrade:
before-starting:
## Strategy used to replace old pods
## IMPORTANT: use with care, it is suggested to leave as that for upgrade purposes
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
strategy:
type: Recreate
# type: RollingUpdate
# rollingUpdate:
# maxSurge: 1
# maxUnavailable: 0
##
## Extra environment variables
extraEnv:
# - name: SOME_SECRET_ENV
# valueFrom:
# secretKeyRef:
# name: nextcloud
# key: secret_key
# Extra init containers that runs before pods start.
extraInitContainers: []
# - name: do-something
# image: busybox
# command: ['do', 'something']
# Extra sidecar containers.
extraSidecarContainers: []
# - name: nextcloud-logger
# image: busybox
# command: [/bin/sh, -c, 'while ! test -f "/run/nextcloud/data/nextcloud.log"; do sleep 1; done; tail -n+1 -f /run/nextcloud/data/nextcloud.log']
# volumeMounts:
# - name: nextcloud-data
# mountPath: /run/nextcloud/data
# Extra mounts for the pods. Example shown is for connecting a legacy NFS volume
# to NextCloud pods in Kubernetes. This can then be configured in External Storage
extraVolumes:
# - name: nfs
# nfs:
# server: "10.0.0.1"
# path: "/nextcloud_data"
# readOnly: false
extraVolumeMounts:
# - name: nfs
# mountPath: "/legacy_data"
# Set securityContext parameters for the nextcloud CONTAINER only (will not affect nginx container).
# For example, you may need to define runAsNonRoot directive
securityContext: {}
# runAsUser: 33
# runAsGroup: 33
# runAsNonRoot: true
# readOnlyRootFilesystem: false
# Set securityContext parameters for the entire pod. For example, you may need to define runAsNonRoot directive
podSecurityContext: {}
# runAsUser: 33
# runAsGroup: 33
# runAsNonRoot: true
# readOnlyRootFilesystem: false
# Settings for the MariaDB init container
mariaDbInitContainer:
resources: {}
# Set mariadb initContainer securityContext parameters. For example, you may need to define runAsNonRoot directive
securityContext: {}
# Settings for the PostgreSQL init container
postgreSqlInitContainer:
resources: {}
# Set postgresql initContainer securityContext parameters. For example, you may need to define runAsNonRoot directive
securityContext: {}
nginx:
## You need to set an fpm version of the image for nextcloud if you want to use nginx!
enabled: false
image:
repository: nginx
tag: alpine
pullPolicy: IfNotPresent
containerPort: 80
# This configures nginx to listen on either IPv4, IPv6 or both
ipFamilies:
- IPv4
# - IPv6
config:
# This generates the default nginx config as per the nextcloud documentation
default: true
headers:
# -- HSTS settings
# WARNING: Only add the preload option once you read about
# the consequences in https://hstspreload.org/. This option
# will add the domain to a hardcoded list that is shipped
# in all major browsers and getting removed from this list
# could take several months.
# Example:
# "Strict-Transport-Security": "max-age=15768000; includeSubDomains; preload;"
"Strict-Transport-Security": ""
"Referrer-Policy": "no-referrer"
"X-Content-Type-Options": "nosniff"
"X-Frame-Options": "SAMEORIGIN"
"X-Permitted-Cross-Domain-Policies": "none"
"X-Robots-Tag": "noindex, nofollow"
"X-XSS-Protection": "1; mode=block"
# Added in server block of default config.
serverBlockCustom: |
# set max upload size
client_max_body_size 10G;
client_body_timeout 300s;
fastcgi_buffers 64 4K;
fastcgi_read_timeout 3600s;
custom:
# custom: |-
# worker_processes 1;..
resources: {}
# Set nginx container securityContext parameters. For example, you may need to define runAsNonRoot directive
securityContext: {}
# the nginx alpine container default user is 82
# runAsUser: 82
# runAsGroup: 33
# runAsNonRoot: true
# readOnlyRootFilesystem: true
## Extra environment variables
extraEnv: []
# - name: SOME_ENV
# value: ENV_VALUE
internalDatabase:
enabled: true
name: nextcloud
##
## External database configuration
##
externalDatabase:
enabled: false
## Supported database engines: mysql or postgresql
type: mysql
## Database host. You can optionally include a colon delimited port like "myhost:1234"
host: ""
## Database user
user: nextcloud
## Database password
password: ""
## Database name
database: nextcloud
## Use a existing secret
existingSecret:
enabled: false
# secretName: nameofsecret
usernameKey: db-username
passwordKey: db-password
# hostKey: db-hostname-or-ip
# databaseKey: db-name
global:
security:
# required for bitnamilegacy repos
allowInsecureImages: true
##
## MariaDB chart configuration
## ref: https://github.com/bitnami/charts/tree/main/bitnami/mariadb
##
mariadb:
## Whether to deploy a mariadb server from the bitnami mariab db helm chart
# to satisfy the applications database requirements. if you want to deploy this bitnami mariadb, set this and externalDatabase to true
# To use an ALREADY DEPLOYED mariadb database, set this to false and configure the externalDatabase parameters
enabled: false
image:
repository: bitnamilegacy/mariadb
# see: https://github.com/bitnami/charts/tree/main/bitnami/mariadb#global-parameters
global:
# overwrites the primary.persistence.storageClass value
defaultStorageClass: ""
auth:
database: nextcloud
username: nextcloud
password: changeme
# Use existing secret (auth.rootPassword, auth.password, and auth.replicationPassword will be ignored).
# secret must contain the keys mariadb-root-password, mariadb-replication-password and mariadb-password
existingSecret: ""
architecture: standalone
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
primary:
persistence:
enabled: false
# Use an existing Persistent Volume Claim (must be created ahead of time)
existingClaim: ""
storageClass: ""
accessMode: ReadWriteOnce
size: 8Gi
##
## PostgreSQL chart configuration
## for more options see https://github.com/bitnami/charts/tree/main/bitnami/postgresql
##
postgresql:
enabled: false
image:
repository: bitnamilegacy/postgresql
global:
postgresql:
# global.postgresql.auth overrides postgresql.auth
auth:
username: nextcloud
password: changeme
database: nextcloud
# Name of existing secret to use for PostgreSQL credentials.
# auth.postgresPassword, auth.password, and auth.replicationPassword will be ignored and picked up from this secret.
# secret might also contains the key ldap-password if LDAP is enabled.
# ldap.bind_password will be ignored and picked from this secret in this case.
existingSecret: ""
# Names of keys in existing secret to use for PostgreSQL credentials
secretKeys:
adminPasswordKey: ""
userPasswordKey: ""
replicationPasswordKey: ""
primary:
persistence:
enabled: false
# Use an existing Persistent Volume Claim (must be created ahead of time)
# existingClaim: ""
# storageClass: ""
##
## External Redis configuration
##
externalRedis:
enabled: false
## Redis host
host: ""
## Redis port
port: "6379"
## Redis password
password: ""
## Use a existing secret
existingSecret:
enabled: false
# secretName: nameofsecret
passwordKey: redis-password
##
## Redis chart configuration
## for more options see https://github.com/bitnami/charts/tree/main/bitnami/redis
##
redis:
enabled: false
image:
repository: bitnamilegacy/redis
auth:
enabled: true
password: "changeme"
# name of an existing secret with Redis® credentials (instead of auth.password), must be created ahead of time
existingSecret: ""
# Password key to be retrieved from existing secret
existingSecretPasswordKey: ""
# Since Redis is used for caching only, you might want to use a storageClass with different reclaim policy and backup settings
global:
storageClass: ""
master:
persistence:
enabled: true
replica:
persistence:
enabled: true
##
## Collabora chart configuration
## for more options see https://github.com/CollaboraOnline/online/tree/master/kubernetes/helm/collabora-online
##
collabora:
enabled: false
autoscaling:
# enable autocaling, please check collabora README.md first
enabled: false
collabora:
## HTTPS nextcloud domain, if needed
aliasgroups: []
# - host: "https://nextcloud.domain:443"
# set extra parameters for collabora
# you may need to add --o:ssl.termination=true
extra_params: --o:ssl.enable=false
## Specify server_name when the hostname is not reachable directly for
# example behind reverse-proxy. example: collabora.domain
server_name: null
existingSecret:
# set to true to to get collabora admin credentials from an existin secret
# if set, ignores collabora.collabora.username and password
enabled: false
# name of existing Kubernetes Secret with collboara admin credentials
secretName: ""
usernameKey: "username"
passwordKey: "password"
# setup admin login credentials, these are ignored if
# collabora.collabora.existingSecret.enabled=true
password: examplepass
username: admin
# setup ingress
ingress:
# enable ingress for collabora online
enabled: false
className: ""
# please check collabora values.yaml for nginx/haproxy annotations examples
annotations: {}
hosts:
- host: chart-example.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: collabora-ingress-tls
# hosts:
# - collabora.domain
# see collabora helm README.md for recommended values
resources: {}
## Cronjob to execute Nextcloud background tasks
## ref: https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/background_jobs_configuration.html#cron
##
cronjob:
enabled: false
# Either 'sidecar' or 'cronjob'
type: sidecar
# Runs crond as a sidecar container in the Nextcloud pod
# Note: crond requires root
sidecar:
## Cronjob sidecar resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
# Allow configuration of lifecycle hooks
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/
lifecycle: {}
# lifecycle:
# postStartCommand: []
# preStopCommand: []
# Set securityContext parameters. For example, you may need to define runAsNonRoot directive
securityContext: {}
# runAsUser: 33
# runAsGroup: 33
# runAsNonRoot: true
# readOnlyRootFilesystem: true
# The command the cronjob container executes.
command:
- /cron.sh
# Uses a Kubernetes CronJob to execute the Nextcloud cron tasks
# Note: can run as non-root user. Should run as same user as the Nextcloud pod.
cronjob:
# Use a CronJob instead of crond sidecar container
# crond does not work when not running as root user
# Note: requires `persistence.enabled=true`
schedule: "*/5 * * * *"
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 5
# -- Additional labels for cronjob
labels: {}
# -- Additional labels for cronjob pod
podLabels: {}
annotations: {}
backoffLimit: 1
affinity: {}
# Often RWO volumes are used. But the cronjob pod needs access to the same volume as the nextcloud pod.
# Depending on your provider two pods on the same node can still access the same volume.
# Following config ensures that the cronjob pod is scheduled on the same node as the nextcloud pod.
# affinity:
# podAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - labelSelector:
# matchExpressions:
# - key: app.kubernetes.io/name
# operator: In
# values:
# - nextcloud
# - key: app.kubernetes.io/component
# operator: In
# values:
# - app
# topologyKey: kubernetes.io/hostname
## Resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
# Allow configuration of lifecycle hooks
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/
# Set securityContext parameters. For example, you may need to define runAsNonRoot directive
securityContext: {}
# runAsUser: 33
# runAsGroup: 33
# runAsNonRoot: true
# readOnlyRootFilesystem: true
# The command to run in the cronjob container
# Example to incerase memory limit: php -d memory_limit=2G ...
command:
- php
- -f
- /var/www/html/cron.php
- --
- --verbose
service:
type: ClusterIP
port: 8080
loadBalancerIP: ""
nodePort:
# -- use additional annotation on service for nextcloud
annotations: {}
# -- Set this to "ClientIP" to make sure that connections from the same client
# are passed to the same Nextcloud pod each time.
sessionAffinity: ""
sessionAffinityConfig: {}
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
# Nextcloud Data (/var/www/html)
enabled: true
annotations: {}
labels: {}
## nextcloud data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
storageClass: "local-path"
## A manually managed Persistent Volume and Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
# existingClaim:
accessMode: ReadWriteOnce
size: 50Gi
## Use an additional pvc for the data directory rather than a subpath of the default PVC
## Useful to store data on a different storageClass (e.g. on slower disks)
nextcloudData:
enabled: false
subPath:
labels: {}
annotations: {}
# storageClass: "-"
# existingClaim:
accessMode: ReadWriteOnce
size: 8Gi
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# resources:
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
## Liveness and readiness probe values
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
##
livenessProbe:
enabled: true
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
successThreshold: 1
startupProbe:
enabled: false
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 30
successThreshold: 1
## Enable pod autoscaling using HorizontalPodAutoscaler
## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/
##
hpa:
enabled: false
cputhreshold: 60
minPods: 1
maxPods: 10
nodeSelector: {}
tolerations: []
# -- Nextcloud pod topologySpreadConstraints
topologySpreadConstraints: []
affinity: {}
dnsConfig: {}
# Custom dns config for Nextcloud containers.
# You can for example configure ndots. This may be needed in some clusters with alpine images.
# options:
# - name: ndots
# value: "1"
imaginary:
# -- Start Imgaginary
enabled: false
# -- Number of imaginary pod replicas to deploy
replicaCount: 1
image:
# -- Imaginary image registry
registry: docker.io
# -- Imaginary image name
repository: h2non/imaginary
# -- Imaginary image tag
tag: 1.2.4
# -- Imaginary image pull policy
pullPolicy: IfNotPresent
# -- Imaginary image pull secrets
pullSecrets: []
# -- Additional annotations for imaginary
podAnnotations: {}
# -- Additional labels for imaginary
podLabels: {}
# -- Imaginary pod nodeSelector
nodeSelector: {}
# -- Imaginary pod tolerations
tolerations: []
# -- Imaginary pod topologySpreadConstraints
topologySpreadConstraints: []
# -- imaginary resources
resources: {}
# -- Optional security context for the Imaginary container
securityContext:
runAsUser: 1000
runAsNonRoot: true
# allowPrivilegeEscalation: false
# capabilities:
# drop:
# - ALL
# -- Optional security context for the Imaginary pod (applies to all containers in the pod)
podSecurityContext: {}
# runAsNonRoot: true
# seccompProfile:
# type: RuntimeDefault
readinessProbe:
enabled: true
failureThreshold: 3
successThreshold: 1
periodSeconds: 10
timeoutSeconds: 1
livenessProbe:
enabled: true
failureThreshold: 3
successThreshold: 1
periodSeconds: 10
timeoutSeconds: 1
service:
# -- Imaginary: Kubernetes Service type
type: ClusterIP
# -- Imaginary: LoadBalancerIp for service type LoadBalancer
loadBalancerIP:
# -- Imaginary: NodePort for service type NodePort
nodePort:
# -- Additional annotations for service imaginary
annotations: {}
# -- Additional labels for service imaginary
labels: {}
## Prometheus Exporter / Metrics
##
metrics:
enabled: false
replicaCount: 1
# Optional: becomes NEXTCLOUD_SERVER env var in the nextcloud-exporter container.
# Without it, we will use the full name of the nextcloud service
server: ""
# The metrics exporter needs to know how you serve Nextcloud either http or https
https: false
# Use API token if set, otherwise fall back to password authentication
# https://github.com/xperimental/nextcloud-exporter#token-authentication
# Currently you still need to set the token manually in your nextcloud install
token: ""
timeout: 5s
# if set to true, exporter skips certificate verification of Nextcloud server.
tlsSkipVerify: false
info:
# Optional: becomes NEXTCLOUD_INFO_APPS env var in the nextcloud-exporter container.
# Enables gathering of apps-related metrics. Defaults to false
apps: false
update: false
image:
repository: xperimental/nextcloud-exporter
tag: 0.8.0
pullPolicy: IfNotPresent
# pullSecrets:
# - myRegistrKeySecretName
## Metrics exporter resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
# -- Metrics exporter pod Annotation
podAnnotations: {}
# -- Metrics exporter pod Labels
podLabels: {}
# -- Metrics exporter pod nodeSelector
nodeSelector: {}
# -- Metrics exporter pod tolerations
tolerations: []
# -- Metrics exporter pod affinity
affinity: {}
service:
type: ClusterIP
# Use serviceLoadBalancerIP to request a specific static IP,
# otherwise leave blank
loadBalancerIP:
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9205"
labels: {}
# -- security context for the metrics CONTAINER in the pod
securityContext:
runAsUser: 1000
runAsNonRoot: true
# allowPrivilegeEscalation: false
# capabilities:
# drop:
# - ALL
# -- security context for the metrics POD
podSecurityContext: {}
# runAsNonRoot: true
# seccompProfile:
# type: RuntimeDefault
## Prometheus Operator ServiceMonitor configuration
##
serviceMonitor:
## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator
##
enabled: false
## @param metrics.serviceMonitor.namespace Namespace in which Prometheus is running
##
namespace: ""
## @param metrics.serviceMonitor.namespaceSelector The selector of the namespace where the target service is located (defaults to the release namespace)
namespaceSelector:
## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus.
##
jobLabel: ""
## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
##
interval: 30s
## @param metrics.serviceMonitor.scrapeTimeout Specify the timeout after which the scrape is ended
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
##
scrapeTimeout: ""
## @param metrics.serviceMonitor.labels Extra labels for the ServiceMonitor
##
labels: {}
rules:
# -- Deploy Prometheus Rules (Alerts) for the exporter
# @section -- Metrics
enabled: false
# -- Label on Prometheus Rules CRD Manifest
# @section -- Metrics
labels: {}
defaults:
# -- Add Default Rules
# @section -- Metrics
enabled: true
# -- Label on the rules (the severity is already set)
# @section -- Metrics
labels: {}
# -- Filter on metrics on alerts (default just for this helm-chart)
# @section -- Metrics
filter: ""
# -- Add own Rules to Prometheus Rules
# @section -- Metrics
additionalRules: []
rbac:
enabled: false
serviceaccount:
create: true
name: nextcloud-serviceaccount
annotations: {}
## @param securityContext for nextcloud pod @deprecated Use `nextcloud.podSecurityContext` instead
securityContext: {}

View File

@@ -0,0 +1,16 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
metadata:
name: penpot
resources:
- secret.yaml
helmCharts:
- name: penpot
repo: http://helm.penpot.app
version: 0.32.0
releaseName: penpot
namespace: penpot
valuesFile: values.yaml

View File

@@ -0,0 +1,21 @@
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: oidc-secret
spec:
refreshInterval: 1h
secretStoreRef:
name: platform
kind: ClusterSecretStore
target:
name: oidc-secret
data:
- secretKey: CLIENT_ID
remoteRef:
key: penpot/oidc
property: CLIENT_ID
- secretKey: CLIENT_SECRET
remoteRef:
key: penpot/oidc
property: CLIENT_SECRET

View File

@@ -0,0 +1,786 @@
# yaml-language-server: $schema=values.schema.json
## Default values for Penpot
global:
# -- Whether to deploy the Bitnami PostgreSQL chart as subchart. Check [the official chart](https://artifacthub.io/packages/helm/bitnami/postgresql) for configuration.
# @section -- Global parameters
postgresqlEnabled: true
# -- Whether to deploy the Bitnami Valkey chart as subchart. Check [the official chart](https://artifacthub.io/packages/helm/bitnami/valkey) for configuration.
# @section -- Global parameters
valkeyEnabled: true
# -- Whether to deploy the Bitnami Redis chart as subchart. Check [the official chart](https://artifacthub.io/packages/helm/bitnami/redis) for configuration.
# *DEPRECATION WARNING: Since Penpot 2.8, Penpot has migrated from Redis to Valkey. Although migration is recommended, Penpot will work seamlessly with compatible Redis versions.
# @section -- Global parameters
redisEnabled: false
# -- Global Docker registry secret names.
# E.g.
# imagePullSecrets:
# - myRegistryKeySecretName
# @section -- Global parameters
imagePullSecrets: []
# -- To partially override common.names.fullname
# @section -- Common parameters
nameOverride: ""
# -- To fully override common.names.fullname
# @section -- Common parameters
fullnameOverride: ""
serviceAccount:
# -- Specifies whether a ServiceAccount should be created.
# @section -- Common parameters
enabled: true
# -- Annotations for service account. Evaluated as a template.
# @section -- Common parameters
annotations: {}
# -- The name of the ServiceAccount to use. If not set and enabled is true, a name is generated using the fullname template.
# @section -- Common parameters
name: "penpot"
config:
# -- The public domain to serve Penpot on.
# **IMPORTANT:** Set `disable-secure-session-cookies` in the flags if you plan on serving it on a non HTTPS domain.
# @section -- Configuration parameters
publicUri: "http://penpot.dgse.cloud"
# -- The feature flags to enable. Check [the official docs](https://help.penpot.app/technical-guide/configuration/) for more info.
# @section -- Configuration parameters
flags: "disable-email-verification enable-smtp enable-login-with-oidc disable-registration"
# -- A random secret key needed for persistent user sessions. Generate with `python3 -c "import secrets; print(secrets.token_urlsafe(64))"` for example.
# @section -- Configuration parameters
apiSecretKey: ""
# -- The name of an existing secret.
# @section -- Configuration parameters
existingSecret: "penpot-api"
secretKeys:
# -- The api secret key to use from an existing secret.
# @section -- Configuration parameters
apiSecretKey: "token"
# -- Comma separated list of allowed domains to register. Empty to allow all domains.
# @section -- Configuration parameters
registrationDomainWhitelist: ""
# -- Whether to enable sending of anonymous telemetry data.
# @section -- Configuration parameters
telemetryEnabled: true
# -- Add custom resolver for frontend. e.g. 192.168.1.1
# @section -- Configuration parameters
internalResolver: ""
# -- Url adress to Terms of Services (empty to hide the link)
# @section -- Configuration parameters
termsOfServicesUri: ""
# -- Url adress to Privacy Policy (empty to hide the link)
# @section -- Configuration parameters
privacyPolicyUri: ""
postgresql:
# -- The PostgreSQL host to connect to. Empty to use dependencies.
# @section -- Configuration parameters
host: "" # Ex.: "postgresql.penpot.svc.cluster.local"
# -- The PostgreSQL host port to use.
# @section -- Configuration parameters
port: 5432
# -- The database username to use.
# @section -- Configuration parameters
username: "penpot"
# -- The database password to use.
# @section -- Configuration parameters
password: "penpot"
# -- The PostgreSQL database to use.
# @section -- Configuration parameters
database: "penpot"
# -- The name of an existing secret.
# @section -- Configuration parameters
existingSecret: ""
secretKeys:
# -- The postgresql uri key to use from an existing secret. (postgresql://host:port/database).
# @section -- Configuration parameters
postgresqlUriKey: ""
# -- The username key to use from an existing secret.
# @section -- Configuration parameters
usernameKey: ""
# -- The password key to use from an existing secret.
# @section -- Configuration parameters
passwordKey: ""
redis:
# -- The Valkey host to connect to. Empty to use dependencies
# @section -- Configuration parameters
host: "" # Ex.: "redis-headless.penpot.svc.cluster.local"
# -- The Valkey host port to use.
# @section -- Configuration parameters
port: 6379
# -- The Valkey database to connect to.
# @section -- Configuration parameters
database: "0"
# -- The name of an existing secret.
# @section -- Configuration parameters
existingSecret: ""
secretKeys:
# -- The redis uri key to use from an existing secret. (redis://:password@host:port/database).
# @section -- Configuration parameters
redisUriKey: ""
assets:
# -- The storage backend for assets to use. Use `assets-fs` for filesystem, and `assets-s3` for S3.
# @section -- Configuration parameters
storageBackend: "assets-fs"
filesystem:
# -- The storage directory to use if you chose the filesystem storage backend.
# @section -- Configuration parameters
directory: "/opt/data/assets"
s3:
# -- The S3 access key ID to use if you chose the S3 storage backend.
# @section -- Configuration parameters
accessKeyID: ""
# -- The S3 secret access key to use if you chose the S3 storage backend.
# @section -- Configuration parameters
secretAccessKey: ""
# -- The S3 region to use if you chose the S3 storage backend.
# @section -- Configuration parameters
region: ""
# -- The name of the S3 bucket to use if you chose the S3 storage backend.
# @section -- Configuration parameters
bucket: ""
# -- The S3 endpoint URI to use if you chose the S3 storage backend.
# @section -- Configuration parameters
endpointURI: ""
# -- The name of an existing secret.
# @section -- Configuration parameters
existingSecret: ""
secretKeys:
# -- The S3 access key ID to use from an existing secret.
# @section -- Configuration parameters
accessKeyIDKey: ""
# -- The S3 secret access key to use from an existing secret.
# @section -- Configuration parameters
secretAccessKey: ""
# -- The S3 endpoint URI to use from an existing secret.
# @section -- Configuration parameters
endpointURIKey: ""
smtp:
# -- Whether to enable SMTP configuration. You also need to add the 'enable-smtp' flag to the PENPOT_FLAGS variable.
# @section -- Configuration parameters
enabled: false
# -- The SMTP default email to send from.
# @section -- Configuration parameters
defaultFrom: ""
# -- The SMTP default email to reply to.
# @section -- Configuration parameters
defaultReplyTo: ""
# -- The SMTP host to use.
# @section -- Configuration parameters
host: ""
# -- The SMTP host port to use.
# @section -- Configuration parameters
port: ""
# -- The SMTP username to use.
# @section -- Configuration parameters
username: ""
# -- The SMTP password to use.
# @section -- Configuration parameters
password: ""
# -- Whether to use TLS for the SMTP connection.
# @section -- Configuration parameters
tls: true
# -- Whether to use SSL for the SMTP connection.
# @section -- Configuration parameters
ssl: false
# -- The name of an existing secret.
# @section -- Configuration parameters
existingSecret: ""
secretKeys:
# -- The SMTP username to use from an existing secret.
# @section -- Configuration parameters
usernameKey: ""
# -- The SMTP password to use from an existing secret.
# @section -- Configuration parameters
passwordKey: ""
providers:
google:
# -- Whether to enable Google configuration. To enable Google auth, add `enable-login-with-google` to the flags.
# @section -- Configuration parameters
enabled: false
# -- The Google client ID to use. To enable Google auth, add `enable-login-with-google` to the flags.
# @section -- Configuration parameters
clientID: ""
# -- The Google client secret to use. To enable Google auth, add `enable-login-with-google` to the flags.
# @section -- Configuration parameters
clientSecret: ""
github:
# -- Whether to enable GitHub configuration. To enable GitHub auth, also add `enable-login-with-github` to the flags.
# @section -- Configuration parameters
enabled: false
# -- The GitHub client ID to use.
# @section -- Configuration parameters
clientID: ""
# -- The GitHub client secret to use.
# @section -- Configuration parameters
clientSecret: ""
gitlab:
# -- Whether to enable GitLab configuration. To enable GitLab auth, also add `enable-login-with-gitlab` to the flags.
# @section -- Configuration parameters
enabled: false
# -- The GitLab base URI to use.
# @section -- Configuration parameters
baseURI: "https://gitlab.com"
# -- The GitLab client ID to use.
# @section -- Configuration parameters
clientID: ""
# -- The GitLab client secret to use.
# @section -- Configuration parameters
clientSecret: ""
oidc:
# -- Whether to enable OIDC configuration. To enable OpenID Connect auth, also add `enable-login-with-oidc` to the flags.
# @section -- Configuration parameters
enabled: true
# -- The OpenID Connect base URI to use.
# @section -- Configuration parameters
baseURI: "https://auth.dgse.cloud"
# -- The OpenID Connect client ID to use.
# @section -- Configuration parameters
clientID: ""
# -- The OpenID Connect client secret to use.
# @section -- Configuration parameters
clientSecret: ""
# -- Optional OpenID Connect auth URI to use. Auto discovered if not provided.
# @section -- Configuration parameters
authURI: ""
# -- Optional OpenID Connect token URI to use. Auto discovered if not provided.
# @section -- Configuration parameters
tokenURI: ""
# -- Optional OpenID Connect user URI to use. Auto discovered if not provided.
# @section -- Configuration parameters
userURI: ""
# -- Optional OpenID Connect roles to use. If no role is provided, role checking is disabled (default role values are set below, to disable role verification, send an empty string).
# @section -- Configuration parameters
roles: ""
# -- Optional OpenID Connect roles attribute to use. If not provided, the role checking will be disabled.
# @section -- Configuration parameters
rolesAttribute: ""
# -- Optional OpenID Connect scopes to use. These settings allow overwriting the required scopes, use with caution because penpot requires at least `name` and `email` attrs found on the user info. Optional, defaults to `openid profile`.
# @section -- Configuration parameters
scopes: ""
# -- Optional OpenID Connect name attribute to use. If not provided, the `name` prop will be used.
# @section -- Configuration parameters
nameAttribute: ""
# -- Optional OpenID Connect email attribute to use. If not provided, the `email` prop will be used.
# @section -- Configuration parameters
emailAttribute: ""
ldap:
# -- Whether to enable LDAP configuration. To enable LDAP, also add `enable-login-with-ldap` to the flags.
# @section -- Configuration parameters
enabled: false
# -- The LDAP host to use.
# @section -- Configuration parameters
host: "ldap"
# -- The LDAP port to use.
# @section -- Configuration parameters
port: 10389
# -- Whether to use SSL for the LDAP connection.
# @section -- Configuration parameters
ssl: false
# -- Whether to utilize StartTLS for the LDAP connection.
# @section -- Configuration parameters
startTLS: false
# -- The LDAP base DN to use.
# @section -- Configuration parameters
baseDN: "ou=people,dc=planetexpress,dc=com"
# -- The LDAP bind DN to use.
# @section -- Configuration parameters
bindDN: "uid=admin,ou=people,dc=planetexpress,dc=com"
# -- The LDAP bind password to use.
# @section -- Configuration parameters
bindPassword: "GoodNewsEveryone"
# -- The LDAP user query to use.
# @section -- Configuration parameters
userQuery: "(&(|(uid=:username)(mail=:username))(memberOf=cn=penpot,ou=groups,dc=my-domain,dc=com))"
# -- The LDAP attributes username to use.
# @section -- Configuration parameters
attributesUsername: "uid"
# -- The LDAP attributes email to use.
# @section -- Configuration parameters
attributesEmail: "mail"
# -- The LDAP attributes fullname to use.
# @section -- Configuration parameters
attributesFullname: "cn"
# -- The LDAP attributes photo format to use.
# @section -- Configuration parameters
attributesPhoto: "jpegPhoto"
# -- The name of an existing secret to use.
# @section -- Configuration parameters
existingSecret: "oidc-secret"
secretKeys:
# -- The Google client ID key to use from an existing secret.
# @section -- Configuration parameters
googleClientIDKey: ""
# -- The Google client secret key to use from an existing secret.
# @section -- Configuration parameters
googleClientSecretKey: ""
# -- The GitHub client ID key to use from an existing secret.
# @section -- Configuration parameters
githubClientIDKey: ""
# -- The GitHub client secret key to use from an existing secret.
# @section -- Configuration parameters
githubClientSecretKey: ""
# -- The GitLab client ID key to use from an existing secret.
# @section -- Configuration parameters
gitlabClientIDKey: ""
# -- The GitLab client secret key to use from an existing secret.
# @section -- Configuration parameters
gitlabClientSecretKey: ""
# -- The OpenID Connect client ID key to use from an existing secret.
# @section -- Configuration parameters
oidcClientIDKey: "CLIENT_ID"
# -- The OpenID Connect client secret key to use from an existing secret.
# @section -- Configuration parameters
oidcClientSecretKey: "CLIENT_SECRET"
# -- The LDAP admin bind password to use from an exsiting secret
# @section -- Configuration parameters
ldapBindPasswordKey: ""
autoFileSnapshot:
# -- How many changes before generating a new snapshot. You also need to add the 'auto-file-snapshot' flag to the PENPOT_FLAGS variable.
# @section -- Configuration parameters
every: 5 # Every 5 changes
# -- If there isn't a snapshot during this time, the system will generate one automatically. You also need to add the 'auto-file-snapshot' flag to the PENPOT_FLAGS variable.
# @section -- Configuration parameters
timeout: "3h"
# -- Specify any additional environment values you want to provide to all the containers (frontend, backend and exporter) in the deployment according to the [specification](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#environment-variables)
# @section -- Configuration parameters
extraEnvs: []
backend:
image:
# -- The Docker repository to pull the image from.
# @section -- Backend parameters
repository: penpotapp/backend
# -- The image tag to use.
# @section -- Backend parameters
tag: 2.10.1
# -- The image pull policy to use.
# @section -- Backend parameters
pullPolicy: IfNotPresent
# -- The number of replicas to deploy.
# @section -- Backend parameters
replicaCount: 1
service:
# -- The http service type to create.
# @section -- Backend parameters
type: ClusterIP
# -- The http service port to use.
# @section -- Backend parameters
port: 6060
# -- Mapped annotations for the backend service
# @section -- Backend parameters
annotations: {}
# -- An optional map of annotations to be applied to the controller Deployment
# @section -- Backend parameters
deploymentAnnotations: {}
# -- An optional map of labels to be applied to the controller Pods
# @section -- Backend parameters
podLabels: {}
# -- An optional map of annotations to be applied to the controller Pods
# @section -- Backend parameters
podAnnotations: {}
# -- Configure Pods Security Context. Check [the official doc](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod)
# @section -- Backend parameters
podSecurityContext:
fsGroup: 1001
# -- Configure Container Security Context. Check [the official doc](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod)
# @section -- Backend parameters
containerSecurityContext:
runAsUser: 1001
allowPrivilegeEscalation: false
capabilities:
drop:
- all
readOnlyRootFilesystem: false
runAsNonRoot: true
# -- Affinity for Penpot pods assignment. Check [the official doc](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity)
# @section -- Backend parameters
affinity: {}
# -- Node labels for Penpot pods assignment. Check [the official doc](https://kubernetes.io/docs/user-guide/node-selection/)
# @section -- Backend parameters
nodeSelector: {}
# -- Tolerations for Penpot pods assignment. Check [the official doc](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/)
# @section -- Backend parameters
tolerations: []
# -- Penpot backend resource requests and limits. Check [the official doc](https://kubernetes.io/docs/user-guide/compute-resources/)
# @section -- Backend parameters
resources:
# -- The resources limits for the Penpot backend containers
# @section -- Backend parameters
limits: {}
# -- The requested resources for the Penpot backend containers
# @section -- Backend parameters
requests: {}
# -- Startup probe for the Penpot backend containers. Tolerates up to 30 * 10 = 300 seconds = 5 Minutes. Check [the official doc](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes)
# @section -- Backend parameters
startupProbe:
httpGet:
path: /readyz
port: http
failureThreshold: 30
periodSeconds: 10
# -- Configure Pod Disruption Budget for the backend pods. Check [the official doc](https://kubernetes.io/docs/tasks/run-application/configure-pdb/)
# @section -- Backend parameters
pdb:
# -- Enable Pod Disruption Budget for the backend pods.
# @section -- Backend parameters
enabled: false
# -- (int,string) The number or percentage of pods from that set that must still be available after the eviction (e.g.: 3, "10%").
# @section -- Backend parameters
minAvailable:
# -- (int,string) The number or percentage of pods from that set that can be unavailable after the eviction (e.g.: 3, "10%").
# @section -- Backend parameters
maxUnavailable:
# -- Specify any additional environment values you want to provide to the backend container in the deployment according to the [specification](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#environment-variables)
# @section -- Backend parameters
extraEnvs: []
# -- Extra volumes to be made available. Check [the official doc](https://kubernetes.io/docs/concepts/storage/volumes/)
# @section -- Backend parameters
volumes: []
# -- Extra volumes to be mounted in the countainer. Check [the official doc](https://kubernetes.io/docs/concepts/storage/volumes/)
# @section -- Backend parameters
volumeMounts: []
frontend:
image:
# -- The Docker repository to pull the image from.
# @section -- Frontend parameters
repository: penpotapp/frontend
# -- The image tag to use.
# @section -- Frontend parameters
tag: 2.10.1
# -- The image pull policy to use.
# @section -- Frontend parameters
pullPolicy: IfNotPresent
# -- The number of replicas to deploy.
# @section -- Frontend parameters
replicaCount: 1
service:
# -- The service type to create.
# @section -- Frontend parameters
type: ClusterIP
# -- The service port to use.
# @section -- Frontend parameters
port: 8080
# -- Mapped annotations for the frontend service
# @section -- Frontend parameters
annotations: {}
# -- An optional map of annotations to be applied to the controller Deployment
# @section -- Frontend parameters
deploymentAnnotations: {}
# -- An optional map of labels to be applied to the controller Pods
# @section -- Frontend parameters
podLabels: {}
# -- An optional map of annotations to be applied to the controller Pods
# @section -- Frontend parameters
podAnnotations: {}
# -- Configure Pods Security Context. Check [the official doc](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod)
# @section -- Frontend parameters
podSecurityContext:
fsGroup: 1001
# -- Configure Container Security Context. Check [the official doc](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod)
# @section -- Frontend parameters
containerSecurityContext:
runAsUser: 1001
allowPrivilegeEscalation: false
capabilities:
drop:
- all
readOnlyRootFilesystem: false
runAsNonRoot: true
# -- Affinity for Penpot pods assignment. Check [the official doc](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity)
# @section -- Frontend parameters
affinity: {}
# -- Node labels for Penpot pods assignment. Check [the official doc](https://kubernetes.io/docs/user-guide/node-selection/)
# @section -- Frontend parameters
nodeSelector: {}
# -- Tolerations for Penpot pods assignment. Check [the official doc](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/)
# @section -- Frontend parameters
tolerations: []
# -- Penpot frontend resource requests and limits. Check [the official doc](https://kubernetes.io/docs/user-guide/compute-resources/)
# @section -- Frontend parameters
resources:
# -- The resources limits for the Penpot frontend containers
# @section -- Frontend parameters
limits: {}
# -- The requested resources for the Penpot frontend containers
# @section -- Frontend parameters
requests: {}
# -- Configure Pod Disruption Budget for the frontend pods. Check [the official doc](https://kubernetes.io/docs/tasks/run-application/configure-pdb/)
# @section -- Frontend parameters
pdb:
# -- Enable Pod Disruption Budget for the frontend pods.
# @section -- Frontend parameters
enabled: false
# -- (int,string) The number or percentage of pods from that set that must still be available after the eviction (e.g.: 3, "10%").
# @section -- Frontend parameters
minAvailable:
# -- (int,string) The number or percentage of pods from that set that can be unavailable after the eviction (e.g.: 3, "10%").
# @section -- Frontend parameters
maxUnavailable:
# -- Specify any additional environment values you want to provide to the frontend container in the deployment according to the [specification](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#environment-variables)
# @section -- Frontend parameters
extraEnvs: []
# -- Extra volumes to be made available. Check [the official doc](https://kubernetes.io/docs/concepts/storage/volumes/)
# @section -- Frontend parameters
volumes: []
# -- Extra volumes to be mounted in the countainer. Check [the official doc](https://kubernetes.io/docs/concepts/storage/volumes/)
# @section -- Frontend parameters
volumeMounts: []
exporter:
image:
# -- The Docker repository to pull the image from.
# @section -- Exporter parameters
repository: penpotapp/exporter
# -- The image tag to use.
# @section -- Exporter parameters
tag: 2.10.1
# -- The image pull policy to use.
# @section -- Exporter parameters
imagePullPolicy: IfNotPresent
# -- The number of replicas to deploy. Enable persistence.exporter if you use more than 1 replicaCount
# @section -- Exporter parameters
replicaCount: 1
service:
# -- The service type to create.
# @section -- Exporter parameters
type: ClusterIP
# -- The service port to use.
# @section -- Exporter parameters
port: 6061
# -- Mapped annotations for the exporter service
# @section -- Exporter parameters
annotations: {}
# -- An optional map of annotations to be applied to the controller Deployment
# @section -- Exporter parameters
deploymentAnnotations: {}
# -- An optional map of labels to be applied to the controller Pods
# @section -- Exporter parameters
podLabels: {}
# -- An optional map of annotations to be applied to the controller Pods
# @section -- Exporter parameters
podAnnotations: {}
# -- Configure Pods Security Context. Check [the official doc](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod)
# @section -- Exporter parameters
podSecurityContext:
fsGroup: 1001
# -- Configure Container Security Context. Check [the official doc](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod)
# @section -- Exporter parameters
containerSecurityContext:
runAsUser: 1001
allowPrivilegeEscalation: false
capabilities:
drop:
- all
readOnlyRootFilesystem: false
runAsNonRoot: true
# -- Affinity for Penpot pods assignment. Check [the official doc](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity)
# @section -- Exporter parameters
affinity: {}
# -- Node labels for Penpot pods assignment. Check [the official doc](https://kubernetes.io/docs/user-guide/node-selection/)
# @section -- Exporter parameters
nodeSelector: {}
# -- Tolerations for Penpot pods assignment. Check [the official doc](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/)
# @section -- Exporter parameters
tolerations: []
# -- Penpot frontend resource requests and limits. Check [the official doc](https://kubernetes.io/docs/user-guide/compute-resources/)
# @section -- Exporter parameters
resources:
# -- The resources limits for the Penpot frontend containers
# @section -- Exporter parameters
limits: {}
# -- The requested resources for the Penpot frontend containers
# @section -- Exporter parameters
requests: {}
# -- Configure Pod Disruption Budget for the exporter pods. Check [the official doc](https://kubernetes.io/docs/tasks/run-application/configure-pdb/)
# @section -- Exporter parameters
pdb:
# -- Enable Pod Disruption Budget for the exporter pods.
# @section -- Exporter parameters
enabled: false
# -- (int,string) The number or percentage of pods from that set that must still be available after the eviction (e.g.: 3, "10%").
# @section -- Exporter parameters
minAvailable:
# -- (int,string) The number or percentage of pods from that set that can be unavailable after the eviction (e.g.: 3, "10%").
# @section -- Exporter parameters
maxUnavailable:
# -- Specify any additional environment values you want to provide to the exporter container in the deployment according to the [specification](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#environment-variables)
# @section -- Exporter parameters
extraEnvs: []
# -- Extra volumes to be made available. Check [the official doc](https://kubernetes.io/docs/concepts/storage/volumes/)
# @section -- Exporter parameters
volumes: []
# -- Extra volumes to be mounted in the countainer. Check [the official doc](https://kubernetes.io/docs/concepts/storage/volumes/)
# @section -- Exporter parameters
volumeMounts: []
persistence:
assets:
# -- Enable assets persistence using Persistent Volume Claims.
# @section -- Persistence parameters
enabled: true
# -- Assets persistent Volume storage class.
# If defined, storageClassName: <storageClass>.
# If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner.
# @section -- Persistence parameters
storageClass: ""
# -- Assets persistent Volume size.
# @section -- Persistence parameters
size: 20Gi
# -- The name of an existing PVC to use for assets persistence.
# @section -- Persistence parameters
existingClaim: ""
# -- Assets persistent Volume access modes.
# @section -- Persistence parameters
accessModes:
- ReadWriteOnce
# -- Assetsp ersistent Volume Claim annotations.
# @section -- Persistence parameters
annotations: {}
exporter:
# -- Enable exporter persistence using Persistent Volume Claims. If exporter.replicaCount you have to enable it.
# @section -- Persistence parameters
enabled: false
# -- Exporter persistent Volume storage class. Empty is choosing the default provisioner by the provider.
# @section -- Persistence parameters
storageClass: ""
# -- Exporter persistent Volume size.
# @section -- Persistence parameters
size: 10Gi
# -- The name of an existing PVC to use for persistence.
# @section -- Persistence parameters
existingClaim: ""
# -- Exporter persistent Volume access modes.
# @section -- Persistence parameters
accessModes:
- ReadWriteOnce
# -- Exporter persistent Volume Claim annotations.
# @section -- Persistence parameters
annotations: {}
ingress:
# -- Enable (frontend) Ingress Controller.
# @section -- Ingress parameters
enabled: true
# -- The Ingress className.
# @section -- Ingress parameters
className: "traefik"
# -- Mapped annotations for the ingress crontroller.
# E.g.
# annotations:
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# @section -- Ingress parameters
annotations:
cert-manager.io/cluster-issuer: letsencrypt
# -- Root path for every hosts.
# @section -- Ingress parameters
path: "/"
# -- Array style hosts for the (frontend) ingress crontroller.
# @section -- Ingress parameters
hosts:
# -- The default external hostname to access to the penpot app.
# @section -- Ingress parameters
- "penpot.dgse.cloud"
# -- Array style TLS secrets for the (frontend) ingress crontroller.
# E.g.
# tls:
# - secretName: penpot.example.com-tls
# hosts:
# - penpot.example.com
# @section -- Ingress parameters
tls:
- hosts:
- penpot.dgse.cloud
secretName: penpot-tls
route:
# -- Enable Openshift/OKD Route. Check [the official doc](https://docs.openshift.com/container-platform/4.16/networking/routes/route-configuration.html). When it is enabled, all fsGroup and runAsUser must be changed to null.
# @section -- Route parameters
enabled: false
# -- An optional map of annotations to be applied to the route.
# @section -- Route parameters
annotations: {}
# -- The default external hostname to access to the penpot app.
# @section -- Route parameters
host: penpot.example.com
# -- Define a path to use Path-based routes.
# @section -- Route parameters
path: null
# -- A Map with TLS configuration for the route.
# E.g.
# tls:
# terminationType: edge
# terminationPolicy: Redirect
# @section -- Route parameters
tls: {}
# -- Define the wildcard policy (None, Subdomain, ...)
# @section -- Route parameters
wildcardPolicy: None
# PostgreSQL configuration (Check for [more parameters here](https://artifacthub.io/packages/helm/bitnami/postgresql))
postgresql:
image:
repository: bitnamilegacy/postgresql
tag: "16.4.0-debian-12-r14"
global:
compatibility:
openshift:
# -- Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation)
# @section -- PostgreSQL Dependencie parameters
adaptSecurityContext: "auto"
auth:
# -- Name for a custom user to create.
# @section -- PostgreSQL Dependencie parameters
username: "penpot"
# -- Password for the custom user to create.
# @section -- PostgreSQL Dependencie parameters
password: "penpot"
# -- Name for a custom database to create.
# @section -- PostgreSQL Dependencie parameters
database: "penpot"
# Valkey configuration (Check for [more parameters here](https://artifacthub.io/packages/helm/bitnami/valkey))
valkey:
image:
repository: bitnamilegacy/valkey
tag: "8.1.3-debian-12-r3"
global:
compatibility:
openshift:
# -- Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation)
# @section -- Valkey Dependencie parameters
adaptSecurityContext: "auto"
auth:
# -- Whether to enable password authentication.
# @section -- Valkey Dependencie parameters
enabled: false
# -- Valkey architecture. Allowed values: `standalone` or `replication`. Penpot only needs a standalone Valkey StatefulSet. Check for [more info here](https://artifacthub.io/packages/helm/bitnami/vlakey#cluster-topologies)
# @section -- Valkey Dependencie parameters
architecture: standalone
# Redis configuration (Check for [more parameters here](https://artifacthub.io/packages/helm/bitnami/redis))
# DEPRECATION WARNING: Since penpot 2.8, Penpot has migrated from Redis to Velkey. Although migration is recommended. Penpot will work seamlessly with compatible Redis versions.
redis:
image:
repository: bitnamilegacy/redis
tag: "7.2.5-debian-12-r4"
global:
compatibility:
openshift:
# -- Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation)
# @section -- Redis Dependencie parameters
adaptSecurityContext: "auto"
auth:
# -- Whether to enable password authentication.
# @section -- Redis Dependencie parameters
enabled: false
# -- Redis® architecture. Allowed values: `standalone` or `replication`. Penpot only needs a standalone Redis® StatefulSet. Check for [more info here](https://artifacthub.io/packages/helm/bitnami/redis#cluster-topologies)
# @section -- Redis Dependencie parameters
architecture: standalone

View File

@@ -34,4 +34,4 @@ spec:
volumes:
- name: pocket-id-data
persistentVolumeClaim:
claimName: pocket-id-data
claimName: pocket-id-data

View File

@@ -6,17 +6,17 @@ metadata:
name: pocket-id-ingress
spec:
rules:
- host: auth.dgse.cloud
http:
paths:
- backend:
service:
name: pocket-id
port:
number: 1411
path: /
pathType: Prefix
- host: auth.dgse.cloud
http:
paths:
- backend:
service:
name: pocket-id
port:
number: 1411
path: /
pathType: Prefix
tls:
- hosts:
- auth.dgse.cloud
secretName: letsencrypt
- hosts:
- auth.dgse.cloud
secretName: letsencrypt

View File

@@ -10,4 +10,4 @@ spec:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storage: 1Gi

View File

@@ -12,4 +12,4 @@ spec:
ports:
- name: websecure
port: 1411
targetPort: 1411
targetPort: 1411

View File

@@ -21,7 +21,7 @@ spec:
spec:
containers:
- name: uptime-kuma
image: louislam/uptime-kuma:1.23.16
image: louislam/uptime-kuma:2.0.2
imagePullPolicy: IfNotPresent
ports:
- containerPort: 3001

View File

@@ -10,7 +10,7 @@ resources:
helmCharts:
- name: vault
repo: https://helm.releases.hashicorp.com/
version: 0.30.0
version: 0.31.0
releaseName: vault
namespace: vault
valuesFile: values.yaml

View File

@@ -7,7 +7,7 @@ metadata:
helmCharts:
- name: vaultwarden
repo: https://guerzon.github.io/vaultwarden/
version: 0.31.8
version: 0.34.4
releaseName: vaultwarden
namespace: vaultwarden
valuesFile: values.yaml

7
mkdocs.yaml Normal file
View File

@@ -0,0 +1,7 @@
---
site_name: "Nextcloud"
site_description: "Self-hosted file hosting service"
nav:
- Introduction: index.md
plugins:
- techdocs-core

28
renovate.json Normal file
View File

@@ -0,0 +1,28 @@
{
"extends": [
"config:base"
],
"labels": ["Kind/Security"],
"major": {
"addLabels": ["Priority/High"]
},
"minor": {
"addLabels": ["Priority/Medium"]
},
"patch": {
"addLabels": ["Priority/Low"]
},
"digest": {
"addLabels": ["Priority/Low"]
},
"packageRules": [
{
"updateTypes": ["minor", "patch", "pin", "digest"],
"automerge": false
},
{
"matchFiles": ["**/values.yaml", "**/values/*.yaml"],
"enabled": false
}
]
}