154 Commits

Author SHA1 Message Date
Renovate Bot
ca5e42aacc chore(deps): update dependency argoproj/argo-cd to v3.3.0 2026-02-02 09:02:19 +00:00
bbfc8bbb27 Merge pull request 'chore(deps): update dependency argoproj/argo-cd to v3.2.6' (#35) from renovate/argoproj-argo-cd-3.x into main
Reviewed-on: #35
2026-01-24 16:00:25 +00:00
a5f0a6b081 Merge pull request 'chore(deps): update helm release penpot to v0.32.0' (#37) from renovate/penpot-0.x into main
Reviewed-on: #37
2026-01-24 15:58:56 +00:00
1bf0be751b Merge pull request 'chore(deps): update helm release gitea to v12.5.0' (#39) from renovate/gitea-12.x into main
Reviewed-on: #39
2026-01-24 15:55:26 +00:00
91ecd3b4c1 Update clusters/artemis/apps/kustomization.yaml 2026-01-24 15:46:50 +00:00
f5b3b5efe5 Update manifests/artemis/immich/volumeClaims.yaml 2026-01-24 15:42:58 +00:00
657c26e122 Update manifests/artemis/immich/values.yaml 2026-01-24 15:41:53 +00:00
49aa5f32f0 Update clusters/artemis/apps/external-secrets/application.yaml 2026-01-24 15:36:11 +00:00
a186c62acd Update manifests/artemis/mailu/kustomization.yaml 2026-01-24 15:14:42 +00:00
3818186562 Update manifests/artemis/mailu/kustomization.yaml 2026-01-24 15:12:53 +00:00
e2517be2b6 Update manifests/artemis/gitea/values.yaml 2026-01-24 15:07:09 +00:00
Renovate Bot
db123ab04f chore(deps): update helm release gitea to v12.5.0 2026-01-24 15:06:33 +00:00
Renovate Bot
2a206af9f6 chore(deps): update dependency argoproj/argo-cd to v3.2.6 2026-01-23 00:02:35 +00:00
Renovate Bot
1f12d004d7 chore(deps): update helm release penpot to v0.32.0 2026-01-07 21:39:10 +00:00
a9dc9a5cb7 Merge pull request 'chore(deps): update helm release external-secrets to v1' (#28) from renovate/external-secrets-1.x into main
Reviewed-on: #28
2025-12-22 09:36:51 +00:00
bbe1014a37 Merge pull request 'chore(deps): update actions/checkout action to v6' (#31) from renovate/actions-checkout-6.x into main
Reviewed-on: #31
2025-12-22 09:35:16 +00:00
8ffb57e1b7 Merge pull request 'chore(deps): update helm release penpot to v0.30.0' (#30) from renovate/penpot-0.x into main
Reviewed-on: #30
2025-12-22 09:32:41 +00:00
09961b68f9 Merge pull request 'chore(deps): update helm release immich to v0.10.3' (#29) from renovate/immich-0.x into main
Reviewed-on: #29
2025-12-22 09:03:04 +00:00
1352394ca5 Merge pull request 'chore(deps): update helm release mailu to v2.6.3' (#25) from renovate/mailu-2.x into main
Reviewed-on: #25
2025-12-22 08:53:36 +00:00
4fd96cf953 Merge pull request 'chore(deps): update dependency argoproj/argo-cd to v3.2.2' (#33) from renovate/argoproj-argo-cd-3.x into main
Reviewed-on: #33
2025-12-22 08:51:20 +00:00
Renovate Bot
680e0822a6 chore(deps): update helm release external-secrets to v1 2025-12-22 08:07:23 +00:00
Renovate Bot
3c0f9a713d chore(deps): update helm release mailu to v2.6.3 2025-12-22 08:07:11 +00:00
Renovate Bot
f9e156e522 chore(deps): update dependency argoproj/argo-cd to v3.2.2 2025-12-22 08:07:01 +00:00
Renovate Bot
5861f677a4 chore(deps): update actions/checkout action to v6 2025-11-21 00:01:59 +00:00
Renovate Bot
aa92ad14ef chore(deps): update helm release penpot to v0.30.0 2025-11-15 00:02:32 +00:00
Renovate Bot
fd2df98297 chore(deps): update helm release immich to v0.10.3 2025-11-15 00:02:27 +00:00
Daniël Groothuis
4a17940c9f chore(osx): Added OSX container 2025-11-11 17:37:01 +01:00
Daniël Groothuis
01c5b31cbb chore(osx): Added OSX container 2025-11-11 17:32:41 +01:00
Daniël Groothuis
83a0e6b8ee chore(mailu): Reverted rate limit 2025-11-10 15:10:25 +01:00
Daniël Groothuis
0e40fc2ca4 chore(backstage): Added SA for backstage to ArgoCD 2025-11-06 22:49:02 +01:00
Daniël Groothuis
36548f1ec5 chore(backstage): Added SA for backstage to ArgoCD 2025-11-06 22:47:10 +01:00
Daniël Groothuis
c8b5e32163 chore(backstage): Added SA for backstage to ArgoCD 2025-11-06 22:41:27 +01:00
Daniël Groothuis
14b0561828 chore(backstage): Added SA for backstage to ArgoCD 2025-11-06 22:40:44 +01:00
Daniël Groothuis
de8b6e0001 Merge remote-tracking branch 'origin/main' 2025-11-06 22:32:25 +01:00
Daniël Groothuis
e99d6cd772 chore(backstage): Added SA for backstage to ArgoCD 2025-11-06 22:32:12 +01:00
9201b1ddc1 Merge pull request 'chore(deps): update helm release nextcloud to v8.5.2' (#27) from renovate/nextcloud-8.x into main
Reviewed-on: #27
2025-11-06 21:08:08 +00:00
Daniël Groothuis
2b31072b1d chore(backstage): Added SA for backstage to ArgoCD 2025-11-06 21:54:13 +01:00
Daniël Groothuis
3c9c55b4d3 chore(backstage): Added SA for backstage to ArgoCD 2025-11-06 21:14:36 +01:00
Renovate Bot
afae9ae15b chore(deps): update helm release nextcloud to v8.5.2 2025-11-06 00:02:42 +00:00
Daniël Groothuis
0eda7b4ad2 chore(backstage): Added SA for backstage to ArgoCD 2025-11-05 21:56:30 +01:00
Daniël Groothuis
e3587553d7 chore(backstage): Added SA for backstage to ArgoCD 2025-11-05 21:50:48 +01:00
Daniël Groothuis
4c5dd7ae3d chore(backstage): Added SA for backstage to ArgoCD 2025-11-05 21:43:31 +01:00
Daniël Groothuis
ee3048f478 chore(backstage): Added SA for backstage to ArgoCD 2025-11-05 21:37:49 +01:00
Daniël Groothuis
179eb7a6dc chore(backstage): Added SA for backstage to ArgoCD 2025-11-05 21:37:01 +01:00
Daniël Groothuis
b1103e3136 chore(backstage): Added SA for backstage to ArgoCD 2025-11-05 21:36:25 +01:00
Daniël Groothuis
2cedf7f2b9 chore(backstage): Added SA for backstage to ArgoCD 2025-11-05 21:34:21 +01:00
Daniël Groothuis
c1a59cb710 chore(backstage): Added SA for backstage to ArgoCD 2025-11-05 21:30:04 +01:00
Daniël Groothuis
845ba4ead0 chore(backstage): Added SA for backstage to ArgoCD 2025-11-05 21:28:46 +01:00
Daniël Groothuis
001138f965 chore(backstage): Added SA for backstage to ArgoCD 2025-11-05 21:28:03 +01:00
Daniël Groothuis
5391a06e24 chore(backstage): Added SA for backstage to ArgoCD 2025-11-05 21:24:09 +01:00
Daniël Groothuis
e7b62d426c chore(backstage): Added SA for backstage to ArgoCD 2025-11-05 21:17:08 +01:00
Daniël Groothuis
a64bdf2ed0 chore(backstage): Added SA for backstage to ArgoCD 2025-11-05 21:14:17 +01:00
Daniël Groothuis
4f5acfc9a6 chore(backstage): Added SA for backstage to ArgoCD 2025-11-05 21:13:31 +01:00
Daniël Groothuis
c4754ea41a chore(backstage): Added SA for backstage to ArgoCD 2025-11-05 21:00:46 +01:00
Daniël Groothuis
134581bcce chore(backstage): Added SA for backstage to ArgoCD 2025-11-05 20:54:00 +01:00
Daniël Groothuis
3f3d99e8d0 chore(backstage): Updated catalogs 2025-11-05 20:35:40 +01:00
Daniël Groothuis
5e57066ccb chore(backstage): Updated catalogs 2025-11-05 20:34:39 +01:00
Daniël Groothuis
3e13ddb1cb feat(nextcloud): Added first draft for nextcloud deployment 2025-11-05 16:09:37 +01:00
Daniël Groothuis
16fcec670e feat(nextcloud): Added first draft for nextcloud deployment 2025-11-05 15:55:08 +01:00
Daniël Groothuis
c16d485a54 feat(nextcloud): Added first draft for nextcloud deployment 2025-11-05 15:52:37 +01:00
Daniël Groothuis
cf69895b68 feat(nextcloud): Added first draft for nextcloud deployment 2025-11-05 15:44:31 +01:00
Daniël Groothuis
e85a70957f feat(nextcloud): Added first draft for nextcloud deployment 2025-11-05 15:34:47 +01:00
Daniël Groothuis
82e626be39 feat(nextcloud): Added first draft for nextcloud deployment 2025-11-05 15:29:50 +01:00
Daniël Groothuis
aa4425cf19 feat(nextcloud): Added first draft for nextcloud deployment 2025-11-05 15:26:49 +01:00
Daniël Groothuis
06b192f780 feat(nextcloud): Added first draft for nextcloud deployment 2025-11-05 15:19:14 +01:00
7c2a40f2ac Merge pull request 'chore(deps): update dependency argoproj/argo-cd to v3.2.0' (#26) from renovate/argoproj-argo-cd-3.x into main
Reviewed-on: #26
2025-11-05 08:08:38 +00:00
Renovate Bot
c3068f9693 chore(deps): update dependency argoproj/argo-cd to v3.2.0 2025-11-05 00:02:07 +00:00
f76ec5a53b Update renovate.json 2025-11-03 14:27:12 +00:00
d28610a28a revert 9e2961f09c
revert Merge pull request 'chore(deps): update helm release mailu to v2.5.1' (#14) from renovate/mailu-2.x into main

Reviewed-on: #14
2025-11-03 14:23:02 +00:00
9e2961f09c Merge pull request 'chore(deps): update helm release mailu to v2.5.1' (#14) from renovate/mailu-2.x into main
Reviewed-on: #14
2025-11-03 14:21:31 +00:00
a1fba4a308 Merge pull request 'chore(deps): update helm release vaultwarden to v0.34.4' (#16) from renovate/vaultwarden-0.x into main
Reviewed-on: #16
2025-11-03 10:40:14 +00:00
0fbfd6f5f0 Merge pull request 'chore(deps): update helm release vault to v0.31.0' (#15) from renovate/vault-0.x into main
Reviewed-on: #15
2025-11-03 10:17:25 +00:00
8e550f98c5 Merge pull request 'chore(deps): update dependency argoproj/argo-cd to v3' (#24) from renovate/argoproj-argo-cd-3.x into main
Reviewed-on: #24
2025-11-03 10:14:32 +00:00
Renovate Bot
75ab95d9b1 chore(deps): update dependency argoproj/argo-cd to v3 2025-11-03 10:13:36 +00:00
7159dc0b20 Merge pull request 'chore(deps): update dependency argoproj/argo-cd to v2.14.20' (#21) from renovate/argoproj-argo-cd-2.x into main
Reviewed-on: #21
2025-11-03 10:04:50 +00:00
5e7c1acbef Update manifests/artemis/nextcloud/values.yaml 2025-11-02 18:32:39 +00:00
f735ec9b22 Update manifests/artemis/uptime-kuma/deployment.yaml 2025-11-02 17:50:40 +00:00
cea23020dc Merge pull request 'chore(deps): update actions/checkout action to v5' (#22) from renovate/actions-checkout-5.x into main
Reviewed-on: #22
2025-11-02 17:49:29 +00:00
Renovate Bot
9b1fc474ad chore(deps): update actions/checkout action to v5 2025-11-02 16:30:41 +00:00
Renovate Bot
8cd8dbc54d chore(deps): update dependency argoproj/argo-cd to v2.14.20 2025-11-02 16:30:31 +00:00
7b141bb89b Update renovate.json 2025-11-02 16:17:18 +00:00
4d523486b5 Update manifests/artemis/gitea/values.yaml 2025-11-02 16:11:57 +00:00
21bb310576 Update manifests/artemis/gitea/values.yaml 2025-11-02 16:09:45 +00:00
820c6703cc Update manifests/artemis/gitea/values.yaml 2025-11-02 16:03:30 +00:00
Daniël Groothuis
a217a2e5fc feat(nextcloud): Added first draft for nextcloud deployment 2025-11-01 23:27:59 +01:00
Daniël Groothuis
357d494073 feat(nextcloud): Added first draft for nextcloud deployment 2025-11-01 23:17:23 +01:00
Daniël Groothuis
d15ff6c2c0 feat(nextcloud): Added first draft for nextcloud deployment 2025-11-01 23:16:54 +01:00
Daniël Groothuis
a5a8c0912a feat(nextcloud): Added first draft for nextcloud deployment 2025-11-01 23:15:54 +01:00
Daniël Groothuis
18e368be40 feat(owncloud): Added first draft for owncloud deployment 2025-11-01 22:57:21 +01:00
Daniël Groothuis
eec40a680c feat(owncloud): Added first draft for owncloud deployment 2025-11-01 22:52:41 +01:00
Daniël Groothuis
c20e5f2d34 feat(owncloud): Added first draft for owncloud deployment 2025-11-01 20:25:59 +01:00
Daniël Groothuis
9cac63a132 feat(owncloud): Added first draft for owncloud deployment 2025-11-01 20:23:42 +01:00
Daniël Groothuis
512186fa1c feat(owncloud): Added first draft for owncloud deployment 2025-11-01 20:19:26 +01:00
Daniël Groothuis
d0574f0a9f feat(owncloud): Added first draft for owncloud deployment 2025-11-01 20:17:32 +01:00
4e56529d45 Update manifests/artemis/gitea/values.yaml
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-11-01 15:52:44 +00:00
4744de9f44 Update manifests/artemis/gitea/values.yaml
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 1m0s
2025-11-01 15:34:17 +00:00
Renovate Bot
28b69c85eb chore(deps): update helm release vaultwarden to v0.34.4
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 1m19s
2025-11-01 15:24:24 +00:00
Renovate Bot
87d63496a7 chore(deps): update helm release vault to v0.31.0
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 58s
2025-11-01 15:17:31 +00:00
Renovate Bot
acafcd1841 chore(deps): update helm release mailu to v2.5.1 2025-11-01 15:17:27 +00:00
6c952fc9c0 Update renovate.json
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 56s
2025-11-01 15:16:01 +00:00
a5a80e8949 Update renovate.json
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 57s
2025-11-01 15:07:15 +00:00
e2eed7bdaa Merge pull request 'chore(deps): update helm release external-secrets to v0.20.4' (#12) from renovate/external-secrets-0.x into main
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 1m13s
Reviewed-on: #12
2025-11-01 15:02:44 +00:00
Renovate Bot
f4c7340216 chore(deps): update helm release external-secrets to v0.20.4
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 1m11s
2025-11-01 00:01:42 +00:00
600999a08f Merge pull request 'chore(deps): update helm release cloudnative-pg to v0.26.1' (#7) from renovate/cloudnative-pg-0.x into main
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 58s
Reviewed-on: #7
2025-10-31 17:07:58 +00:00
13407630d5 Update renovate.json
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 1m3s
2025-10-31 17:01:23 +00:00
3026b1ef33 Update renovate.json
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 56s
2025-10-31 16:56:29 +00:00
Renovate Bot
526b8073ba chore(deps): update helm release cloudnative-pg to v0.26.1
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 59s
2025-10-31 16:48:50 +00:00
8044148153 Add renovate.json
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-31 16:43:06 +00:00
Daniël Groothuis
fbb9dc6803 feat(digital-garden): Added digital garden deployment
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 1m37s
2025-10-31 17:15:28 +01:00
Daniël Groothuis
2b52a58b7a feat(digital-garden): Added digital garden deployment
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-31 17:15:00 +01:00
Daniël Groothuis
146f0aba8b feat(digital-garden): Added digital garden deployment
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-31 17:12:27 +01:00
Daniël Groothuis
70c6c62d90 feat(digital-garden): Added digital garden deployment 2025-10-31 17:03:21 +01:00
Daniël Groothuis
630efbeaaf feat(digital-garden): Added digital garden deployment 2025-10-31 17:01:55 +01:00
Daniël Groothuis
b3753b3400 chore(kener): Removed Kener as it doesn't bring any value 2025-10-30 11:29:34 +01:00
Daniël Groothuis
f0c1a554c8 chore(kener): Added kustomize file 2025-10-30 10:54:00 +01:00
Daniël Groothuis
5a28daec87 chore(kener): Added kustomize file 2025-10-30 10:48:39 +01:00
Daniël Groothuis
c47e7ed3d0 chore(kener): First implementation of Kener 2025-10-30 10:47:16 +01:00
Daniël Groothuis
b8e858f21f chore(backstage): Removed backstage 2025-10-30 10:08:48 +01:00
Daniël Groothuis
1db12d6e31 chore(backstage): Added ingress for backstage 2025-10-29 09:33:12 +01:00
Daniël Groothuis
2db587a457 chore(backstage): Added database and secrets for backstage 2025-10-29 08:59:57 +01:00
Daniël Groothuis
ad65e98c58 chore(backstage): Added database and secrets for backstage 2025-10-29 08:39:07 +01:00
Daniël Groothuis
3562ec6e05 chore(backstage): Added database and secrets for backstage 2025-10-29 08:27:26 +01:00
Daniël Groothuis
a22925d95e chore(backstage): Added database and secrets for backstage 2025-10-28 21:55:29 +01:00
Daniël Groothuis
718c581ccd chore(backstage): Added database and secrets for backstage 2025-10-28 21:55:13 +01:00
Daniël Groothuis
9f2393a478 chore(backstage): Added database and secrets for backstage 2025-10-28 21:52:46 +01:00
Daniël Groothuis
d16efcde3c chore(backstage): Added database and secrets for backstage 2025-10-28 21:45:36 +01:00
Daniël Groothuis
8603742901 chore(backstage): Added database and secrets for backstage 2025-10-28 21:34:13 +01:00
Daniël Groothuis
f6e4f44984 chore(gitea-runners): Updated values for Dind support 2025-10-28 20:45:16 +01:00
Daniël Groothuis
c8bb379ffe chore(gitea-runners): Updated values for Dind support 2025-10-28 20:06:48 +01:00
Daniël Groothuis
4ddfe4e8b7 chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-28 19:34:50 +01:00
Daniël Groothuis
a4996f29b2 chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Failing after 16s
2025-10-28 19:29:03 +01:00
Daniël Groothuis
cbe66f6fd1 chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Failing after 1m18s
2025-10-28 19:20:05 +01:00
Daniël Groothuis
4ddb948f6b chore(gitea-runners): Updated values for Dind support
Some checks are pending
Validate K8s manifests / validate-manifests (push) Waiting to run
2025-10-28 19:19:21 +01:00
Daniël Groothuis
8d4331a0c6 chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-28 19:06:24 +01:00
Daniël Groothuis
0f0171ad32 chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-28 19:05:19 +01:00
Daniël Groothuis
984141b037 chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-28 19:03:47 +01:00
Daniël Groothuis
9380bc3b04 chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-28 19:02:09 +01:00
Daniël Groothuis
b4b9d0427b chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-28 19:00:34 +01:00
Daniël Groothuis
239ed7b214 chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-28 18:55:35 +01:00
Daniël Groothuis
6b5efb494b chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-28 18:53:42 +01:00
Daniël Groothuis
bbe5488871 chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-28 18:51:23 +01:00
Daniël Groothuis
13364cd31e chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-28 18:48:30 +01:00
Daniël Groothuis
24fef18693 chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-28 18:47:30 +01:00
Daniël Groothuis
ee65613844 chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-28 18:41:42 +01:00
Daniël Groothuis
a6fee6c9c9 chore(gitea-runners): Updated values for Dind support
Some checks are pending
Validate K8s manifests / validate-manifests (push) Waiting to run
2025-10-28 18:39:01 +01:00
Daniël Groothuis
8a7008aca6 chore(gitea-runners): Updated values for Dind support
All checks were successful
Validate K8s manifests / validate-manifests (push) Successful in 2m54s
2025-10-28 18:16:31 +01:00
Daniël Groothuis
8fa081ad5a chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-28 18:12:40 +01:00
Daniël Groothuis
724cd8c964 chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-28 18:09:49 +01:00
Daniël Groothuis
a4e9e566bf chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-28 18:06:59 +01:00
Daniël Groothuis
765afc3bfb chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-28 18:03:36 +01:00
Daniël Groothuis
258ba64bcc chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Failing after 1m36s
2025-10-28 17:58:33 +01:00
Daniël Groothuis
5e64d08f93 chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Failing after 1m17s
2025-10-28 16:35:06 +01:00
Daniël Groothuis
bdefefd39d chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Has been cancelled
2025-10-28 16:31:06 +01:00
Daniël Groothuis
69d83f786f chore(gitea-runners): Updated values for Dind support
Some checks failed
Validate K8s manifests / validate-manifests (push) Failing after 37s
2025-10-28 16:25:15 +01:00
67 changed files with 2166 additions and 84 deletions

View File

@@ -7,7 +7,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: 📥Checkout
uses: actions/checkout@v3
uses: actions/checkout@v6
- name: 🚀Validating the manifests
uses: frenck/action-yamllint@v1.5.0
with:

26
catalog-info.yaml Normal file
View File

@@ -0,0 +1,26 @@
---
apiVersion: backstage.io/v1alpha1
kind: Domain
metadata:
name: dgse-cloud
description: "Infrastructure for DGSE Cloud services."
spec:
owner: dgse-cloud
---
apiVersion: backstage.io/v1alpha1
kind: Location
metadata:
name: artemis-cluster
description: A collection of all entities running on the Artemis cluster
spec:
targets:
- ./clusters/artemis/catalog.yaml
- ./clusters/artemis/apps/argocd/catalog.yaml
- ./clusters/artemis/apps/cnpg/catalog.yaml
- ./clusters/artemis/apps/digital-garden/catalog.yaml
- ./clusters/artemis/apps/external-secrets/catalog.yaml
- ./clusters/artemis/apps/gitea/catalog.yaml
- ./clusters/artemis/apps/gitea-runners/catalog.yaml
- ./clusters/artemis/apps/immich/catalog.yaml
- ./clusters/artemis/apps/mailu/catalog.yaml
- ./clusters/artemis/apps/nextcloud/catalog.yaml

View File

@@ -0,0 +1,18 @@
---
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: argocd
description: "ArgoCD is a declarative, GitOps continuous delivery tool for Kubernetes."
links:
- url: https://cd.dgse.cloud
title: Dashboard
icon: dashboard
annotations:
argocd/app-name: argocd
argocd/app-namespace: argocd
spec:
type: service
lifecycle: production
owner: owners
system: artemis-cluster

View File

@@ -0,0 +1,14 @@
---
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: cnpg
description: "CloudNativePG is a Kubernetes operator that manages PostgreSQL databases in a cloud-native way."
annotations:
argocd/app-name: cnpg
argocd/app-namespace: cnpg-system
spec:
type: service
lifecycle: production
owner: owners
system: artemis-cluster

View File

@@ -0,0 +1,17 @@
---
apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: digital-garden
spec:
description: My digital garden
sourceRepos:
- '*'
sourceNamespaces:
- '*'
destinations:
- namespace: 'digital-garden'
server: '*'
clusterResourceWhitelist:
- group: '*'
kind: '*'

View File

@@ -0,0 +1,24 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: digital-garden
namespace: digital-garden
labels:
platform.dgse.cloud/cluster: artemis
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: digital-garden
source:
repoURL: 'https://git.dgse.cloud/DGSE/kubernetes.git'
path: manifests/artemis/digital-garden
targetRevision: main
destination:
namespace: digital-garden
name: in-cluster
syncPolicy:
syncOptions:
- CreateNamespace=true
automated:
prune: true
selfHeal: true

View File

@@ -0,0 +1,18 @@
---
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: digital-garden
description: "A collection of notes, essays, and other writing that is published on the web."
links:
- url: https://groothuis.io
title: Public Website
icon: web
annotations:
argocd/app-name: digital-garden
argocd/app-namespace: digital-garden
spec:
type: website
lifecycle: production
owner: owners
system: artemis-cluster

View File

@@ -0,0 +1,7 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- app-project.yaml
- application.yaml

View File

@@ -18,6 +18,7 @@ spec:
name: in-cluster
syncPolicy:
syncOptions:
- ServerSideApply=true
- CreateNamespace=true
automated:
prune: true

View File

@@ -0,0 +1,14 @@
---
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: external-secrets
description: "Vault Secrets Operator to sync secrets from Vault to Kubernetes"
annotations:
argocd/app-name: external-secrets
argocd/app-namespace: external-secrets
spec:
type: service
lifecycle: production
owner: owners
system: artemis-cluster

View File

@@ -0,0 +1,16 @@
---
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: gitea-runners
description: "Gitea Action Runners"
annotations:
argocd/app-name: gitea-runners
argocd/app-namespace: gitea-runners
spec:
type: service
lifecycle: production
owner: owners
system: artemis-cluster
dependencyOf:
- component:gitea

View File

@@ -0,0 +1,20 @@
---
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: gitea
description: "Self-hosted Git Server"
links:
- url: https://git.dgse.cloud
title: Git Server
icon: web
annotations:
argocd/app-name: gitea
argocd/app-namespace: gitea
spec:
type: service
lifecycle: production
owner: owners
system: artemis-cluster
dependsOn:
- Component:gitea-runners

View File

@@ -0,0 +1,18 @@
---
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: immich
description: "Self-hosted photo and video backup solution directly from your mobile phone."
links:
- url: https://photos.dgse.cloud
title: Git Server
icon: web
annotations:
argocd/app-name: immich
argocd/app-namespace: immich
spec:
type: service
lifecycle: production
owner: owners
system: artemis-cluster

View File

@@ -13,6 +13,9 @@ resources:
- pocket-id
- vaultwarden
- mailu
- ntfy
# - ntfy
- penpot
- immich
- digital-garden
# - nextcloud
# - osx

View File

@@ -0,0 +1,18 @@
---
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: mailu
description: "Self-hosted mail server"
links:
- url: https://mail.dgse.cloud
title: Mail Server
icon: web
annotations:
argocd/app-name: mailu
argocd/app-namespace: mailu
spec:
type: service
lifecycle: production
owner: owners
system: artemis-cluster

View File

@@ -0,0 +1,17 @@
---
apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: nextcloud
spec:
description: Self Hosted Cloud
sourceRepos:
- '*'
sourceNamespaces:
- '*'
destinations:
- namespace: 'nextcloud'
server: '*'
clusterResourceWhitelist:
- group: '*'
kind: '*'

View File

@@ -0,0 +1,24 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: nextcloud
namespace: nextcloud
labels:
platform.dgse.cloud/cluster: artemis
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: nextcloud
source:
repoURL: 'https://git.dgse.cloud/DGSE/kubernetes.git'
path: manifests/artemis/nextcloud
targetRevision: main
destination:
namespace: nextcloud
name: in-cluster
syncPolicy:
syncOptions:
- CreateNamespace=true
automated:
prune: true
selfHeal: true

View File

@@ -0,0 +1,19 @@
---
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: nextcloud
description: "Self-hosted photo and video backup solution directly from your mobile phone."
links:
- url: https://nextcloud.dgse.cloud
title: Git Server
icon: web
annotations:
argocd/app-name: nextcloud
argocd/app-namespace: nextcloud
backstage.io/techdocs-ref: dir:.
spec:
type: service
lifecycle: production
owner: owners
system: artemis-cluster

View File

@@ -0,0 +1,7 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- app-project.yaml
- application.yaml

View File

@@ -0,0 +1,17 @@
---
apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: osx
spec:
description: osx container to proxy shortcuts
sourceRepos:
- '*'
sourceNamespaces:
- '*'
destinations:
- namespace: 'osx'
server: '*'
clusterResourceWhitelist:
- group: '*'
kind: '*'

View File

@@ -0,0 +1,24 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: osx
namespace: osx
labels:
platform.dgse.cloud/cluster: artemis
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: osx
source:
repoURL: 'https://git.dgse.cloud/DGSE/kubernetes.git'
path: manifests/artemis/osx
targetRevision: main
destination:
namespace: osx
name: in-cluster
syncPolicy:
syncOptions:
- CreateNamespace=true
automated:
prune: true
selfHeal: true

View File

@@ -0,0 +1,7 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- app-project.yaml
- application.yaml

View File

@@ -0,0 +1,9 @@
---
apiVersion: backstage.io/v1alpha1
kind: System
metadata:
name: artemis-cluster
description: "The Artemis cluster is a Kubernetes cluster hosting all infra for DGSE Cloud."
spec:
owner: owners
domain: dgse-cloud

1
docs/index.md Normal file
View File

@@ -0,0 +1 @@
# NextCloud

View File

@@ -0,0 +1,13 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: backstage-argocd-server-access
subjects:
- kind: ServiceAccount
name: backstage-argocd
namespace: argocd
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: argocd-server

View File

@@ -0,0 +1,6 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: backstage-argocd
namespace: argocd

View File

@@ -5,9 +5,11 @@ metadata:
name: argocd
resources:
- github.com/argoproj/argo-cd/manifests/cluster-install?ref=v2.14.15
- github.com/argoproj/argo-cd/manifests/cluster-install?ref=v3.3.0
- ingressRoute.yaml
- certificate.yaml
- backstage-sa.yaml
- backstage-rbac.yaml
patches:
- target:
@@ -45,7 +47,9 @@ patches:
metadata:
name: argocd-cm
data:
admin.enabled: "false"
accounts.admin: "apiKey, login"
accounts.backstage.enabled: "true"
admin.enabled: "true"
kustomize.buildOptions: --enable-helm
url: https://cd.dgse.cloud
oidc.config: |
@@ -118,3 +122,23 @@ patches:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: argocd-server
# Map Backstage SA to Argo CD role:admin (full Argo CD permissions)
- target:
kind: ConfigMap
name: argocd-rbac-cm
patch: |-
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-rbac-cm
namespace: argocd
data:
policy.csv: |
g, argocd_admins, role:admin
p, argocd_users, applications, list, *, allow
p, argocd_users, applications, sync, *, allow
p, argocd_users, applications, refresh, *, allow
p, argocd_users, applications, get, *, allow
g, system:serviceaccount:argocd:backstage-argocd, role:admin
p, system:serviceaccount:argocd:backstage-argocd, applications, *, */*, allow

View File

@@ -7,6 +7,6 @@ metadata:
helmCharts:
- name: cloudnative-pg
repo: https://cloudnative-pg.github.io/charts
version: 0.26.0
version: 0.26.1
releaseName: cnpg
namespace: cnpg-system

View File

@@ -0,0 +1,22 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: digital-garden
spec:
replicas: 1
selector:
matchLabels:
app: digital-garden
template:
metadata:
labels:
app: digital-garden
spec:
containers:
- name: digital-garden
image: 'git.dgse.cloud/dgroothuis/garden:latest'
ports:
- containerPort: 8080
imagePullSecrets:
- name: regcred

View File

@@ -0,0 +1,24 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: digital-garden-ingress
annotations:
cert-manager.io/cluster-issuer: letsencrypt
spec:
rules:
- host: www.groothuis.io
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: digital-garden-svc
port:
number: 8080
tls:
- hosts:
- groothuis.io
- www.groothuis.io
secretName: letsencrypt

View File

@@ -0,0 +1,9 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- service.yaml
- ingress.yaml
- www-redirect.yaml
- deployment.yaml

View File

@@ -0,0 +1,12 @@
---
apiVersion: v1
kind: Service
metadata:
name: digital-garden-svc
spec:
selector:
app: digital-garden
ports:
- protocol: TCP
port: 8080
targetPort: 8080

View File

@@ -0,0 +1,43 @@
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: digital-garden-www-redirect
spec:
entryPoints:
- websecure
routes:
- kind: Rule
match: Host(`groothuis.io`)
middlewares:
- name: redirect-to-www
services:
- kind: TraefikService
name: noop@internal
- kind: Rule
match: Host(`danielgroothuis.com`) || Host(`www.danielgroothuis.com`)
middlewares:
- name: redirect-to-groothuis-io
services:
- kind: TraefikService
name: noop@internal
---
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: redirect-to-www
spec:
redirectRegex:
permanent: true
regex: "^https?://(?:www\\.)?(.+)"
replacement: "https://www.${1}"
---
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: redirect-to-groothuis-io
spec:
redirectRegex:
permanent: false
regex: "^https?://(?:www\\.)?(.+)"
replacement: "https://www.groothuis.io"

View File

@@ -10,6 +10,6 @@ resources:
helmCharts:
- name: external-secrets
repo: https://charts.external-secrets.io/
version: 0.18.1
version: 1.2.0
releaseName: external-secrets
namespace: external-secrets

View File

@@ -4,10 +4,7 @@ kind: Kustomization
metadata:
name: gitea-runners
helmCharts:
- name: actions
repo: https://dl.gitea.com/charts/
version: 0.0.1
releaseName: gitea-actions
namespace: gitea-runners
valuesFile: values.yaml
resources:
- runner-artemis-1.yaml
- runner-artemis-2.yaml
- runner-artemis-3.yaml

View File

@@ -0,0 +1,53 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: gitea-runner-data-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: local-path
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: gitea-act-runner-dind
spec:
replicas: 1
selector:
matchLabels:
app: gitea-act-runner-dind
template:
metadata:
labels:
app: gitea-act-runner-dind
spec:
containers:
- name: runner
image: vegardit/gitea-act-runner:dind-latest
securityContext:
privileged: true
env:
- name: GITEA_RUNNER_NAME
value: "artemis-1"
- name: GITEA_INSTANCE_URL
value: "https://git.dgse.cloud"
- name: GITEA_RUNNER_REGISTRATION_TOKEN
valueFrom:
secretKeyRef:
name: gitea-runner-token
key: token
volumeMounts:
- name: data
mountPath: /data
- name: docker-storage
mountPath: /var/lib/docker
volumes:
- name: data
persistentVolumeClaim:
claimName: gitea-runner-data-pvc
- name: docker-storage
emptyDir: {}

View File

@@ -0,0 +1,53 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: gitea-runner-data-pvc-2
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: local-path
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: gitea-act-runner-dind-2
spec:
replicas: 1
selector:
matchLabels:
app: gitea-act-runner-dind-2
template:
metadata:
labels:
app: gitea-act-runner-dind-2
spec:
containers:
- name: runner
image: vegardit/gitea-act-runner:dind-latest
securityContext:
privileged: true
env:
- name: GITEA_RUNNER_NAME
value: "artemis-2"
- name: GITEA_INSTANCE_URL
value: "https://git.dgse.cloud"
- name: GITEA_RUNNER_REGISTRATION_TOKEN
valueFrom:
secretKeyRef:
name: gitea-runner-token
key: token
volumeMounts:
- name: data
mountPath: /data
- name: docker-storage
mountPath: /var/lib/docker
volumes:
- name: data
persistentVolumeClaim:
claimName: gitea-runner-data-pvc-2
- name: docker-storage
emptyDir: {}

View File

@@ -0,0 +1,53 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: gitea-runner-data-pvc-3
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: local-path
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: gitea-act-runner-dind-3
spec:
replicas: 1
selector:
matchLabels:
app: gitea-act-runner-dind-3
template:
metadata:
labels:
app: gitea-act-runner-dind-3
spec:
containers:
- name: runner
image: vegardit/gitea-act-runner:dind-latest
securityContext:
privileged: true
env:
- name: GITEA_RUNNER_NAME
value: "artemis-3"
- name: GITEA_INSTANCE_URL
value: "https://git.dgse.cloud"
- name: GITEA_RUNNER_REGISTRATION_TOKEN
valueFrom:
secretKeyRef:
name: gitea-runner-token
key: token
volumeMounts:
- name: data
mountPath: /data
- name: docker-storage
mountPath: /var/lib/docker
volumes:
- name: data
persistentVolumeClaim:
claimName: gitea-runner-data-pvc-3
- name: docker-storage
emptyDir: {}

View File

@@ -1,96 +1,101 @@
# Configure Gitea Actions
## @section Gitea Actions
#
## @param enabled Create an act runner StatefulSet.
## @param init.image.repository The image used for the init containers
## @param init.image.tag The image tag used for the init containers
## @param statefulset.replicas the amount of (replica) runner pods deployed
## @param statefulset.timezone is the timezone that will be set in the act_runner image
## @param statefulset.annotations Act runner annotations
## @param statefulset.labels Act runner labels
## @param statefulset.resources Act runner resources
## @param statefulset.nodeSelector NodeSelector for the statefulset
## @param statefulset.tolerations Tolerations for the statefulset
## @param statefulset.affinity Affinity for the statefulset
## @param statefulset.extraVolumes Extra volumes for the statefulset
## @param statefulset.actRunner.repository The Gitea act runner image
## @param statefulset.actRunner.tag The Gitea act runner tag
## @param statefulset.actRunner.pullPolicy The Gitea act runner pullPolicy
## @param statefulset.actRunner.extraVolumeMounts Allows mounting extra volumes in the act runner container
## @param statefulset.actRunner.config [default: Too complex. See values.yaml] Act runner custom configuration. See [Act Runner documentation](https://docs.gitea.com/usage/actions/act-runner#configuration) for details.
## @param statefulset.dind.repository The Docker-in-Docker image
## @param statefulset.dind.tag The Docker-in-Docker image tag
## @param statefulset.dind.pullPolicy The Docker-in-Docker pullPolicy
## @param statefulset.dind.extraVolumeMounts Allows mounting extra volumes in the Docker-in-Docker container
## @param statefulset.dind.extraEnvs Allows adding custom environment variables, such as `DOCKER_IPTABLES_LEGACY`
## @param statefulset.persistence.size Size for persistence to store act runner data
## @param existingSecret Secret that contains the token
## @param existingSecretKey Secret key
## @param giteaRootURL URL the act_runner registers and connect with
# values.yaml — Gitea Actions runner with DinD over shared Unix socket (no TCP, no TLS)
enabled: true
statefulset:
replicas: 3
timezone: Etc/UTC
annotations: {}
labels: {}
resources: {}
nodeSelector: {}
tolerations: []
affinity: {}
extraVolumes: []
# Share only the docker.sock file between dind and act-runner
extraVolumes:
- name: docker-socket
emptyDir: {}
actRunner:
repository: gitea/act_runner
tag: 0.2.13
pullPolicy: IfNotPresent
extraVolumeMounts: []
# See full example here: https://gitea.com/gitea/act_runner/src/branch/main/internal/pkg/config/config.example.yaml
# Runner talks to DinD via unix socket; ensure no TLS is used
extraEnvs:
- name: DOCKER_HOST
value: unix:///var/run/docker.sock
- name: DOCKER_BUILDKIT
value: "1"
- name: DOCKER_TLS_CERTDIR
value: ""
- name: DOCKER_TLS_VERIFY
value: ""
# Mount only the socket path, not the entire /var/run (prevents "is a directory" errors)
extraVolumeMounts:
- name: docker-socket
mountPath: /var/run/docker.sock
subPath: docker.sock
# Act runner config
config: |
log:
level: info
cache:
enabled: false
runner:
labels:
- "artemis"
- "ubuntu-latest:docker://docker.gitea.com/runner-images:ubuntu-latest"
- "ubuntu-22.04:docker://docker.gitea.com/runner-images:ubuntu-22.04"
- "ubuntu-20.04:docker://docker.gitea.com/runner-images:ubuntu-20.04"
container:
privileged: true
require_docker: true
privileged: true # required for DinD
require_docker: true # fail if docker is not reachable
docker_timeout: 300s
dind:
repository: docker
tag: 28.3.3-dind
pullPolicy: IfNotPresent
extraVolumeMounts: []
# If the container keeps crashing in your environment, you might have to add the `DOCKER_IPTABLES_LEGACY` environment variable.
# See https://github.com/docker-library/docker/issues/463#issuecomment-1881909456
# Disable TLS and DO NOT advertise TCP; use only unix socket
extraEnvs:
[]
# - name: "DOCKER_IPTABLES_LEGACY"
# value: "1"
- name: DOCKER_TLS_CERTDIR
value: ""
# If your nodes require legacy iptables:
# - name: DOCKER_IPTABLES_LEGACY
# value: "1"
# Share only the docker.sock file
extraVolumeMounts:
- name: docker-socket
mountPath: /var/run/docker.sock
subPath: docker.sock
# Explicitly bind dockerd to the unix socket; do not bind tcp://0.0.0.0:2375
command: ["dockerd"]
args:
- "--host=unix:///var/run/docker.sock"
- "--storage-driver=overlay2"
# Optional: persistence for DinD image layers (default will mount /var/lib/docker inside dind)
persistence:
size: 1Gi
init:
image:
repository: busybox
# Overrides the image tag whose default is the chart appVersion.
tag: "1.37.0"
## Specify an existing token secret
##
# Runner registration token
existingSecret: "gitea-runner-token"
existingSecretKey: "token"
## Specify the root URL of the Gitea instance
# Root URL of your Gitea
giteaRootURL: "https://git.dgse.cloud"
## @section Global
#
## @param global.imageRegistry global image registry override
## @param global.storageClass global storage class override
global:
imageRegistry: ""
storageClass: ""

View File

@@ -7,7 +7,7 @@ metadata:
helmCharts:
- name: gitea
repo: https://dl.gitea.com/charts/
version: 12.4.0
version: 12.5.0
releaseName: gitea
namespace: gitea
valuesFile: values.yaml

View File

@@ -643,6 +643,13 @@ postgresql-ha:
repmgrPassword: changeme2
postgresPassword: changeme1
password: changeme4
resources:
limits:
cpu: 2000m
memory: 2Gi
requests:
cpu: 1500m
memory: 2Gi
## @param postgresql-ha.pgpool.adminPassword pgpool adminPassword
## @param postgresql-ha.pgpool.image.repository Image repository, eg. `bitnamilegacy/pgpool`.
@@ -652,6 +659,13 @@ postgresql-ha:
image:
repository: bitnamilegacy/pgpool
srCheckPassword: changeme4
resources:
limits:
cpu: 1000m
memory: 2Gi
requests:
cpu: 250m
memory: 1Gi
## @param postgresql-ha.service.ports.postgresql PostgreSQL service port (overrides `service.ports.postgresql`)
service:

View File

@@ -12,7 +12,7 @@ resources:
helmCharts:
- name: immich
repo: https://immich-app.github.io/immich-charts
version: 0.10.1
version: 0.10.3
releaseName: immich
namespace: immich
valuesFile: values.yaml

View File

@@ -94,7 +94,7 @@ server:
secretName: immich-tls
machine-learning:
enabled: true
enabled: false
controllers:
main:
containers:

View File

@@ -1,15 +1,15 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: immich-ml-pvc
spec:
storageClassName: local-path
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
# ---
# apiVersion: v1
# kind: PersistentVolumeClaim
# metadata:
# name: immich-ml-pvc
# spec:
# storageClassName: local-path
# accessModes:
# - ReadWriteOnce
# resources:
# requests:
# storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim

View File

@@ -0,0 +1,21 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: kener-postgres
spec:
instances: 1
managed:
roles:
- name: kener
superuser: true
login: true
bootstrap:
initdb:
database: kener
owner: kener
secret:
name: kener-postgres-user
storage:
size: 4Gi
storageClass: local-path

View File

@@ -0,0 +1,79 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kener
spec:
selector:
matchLabels:
app: kener
template:
metadata:
labels:
app: kener
spec:
containers:
- name: kener
image: rajnandan1/kener:latest
ports:
- containerPort: 3000
name: http
volumeMounts:
- name: kener-uploads
mountPath: "/app/uploads"
env:
- name: ORIGIN
valueFrom:
secretKeyRef:
name: kener-secret
key: ORIGIN
- name: DATABASE_URL
valueFrom:
secretKeyRef:
name: kener-secret
key: DATABASE_URL
- name: KENER_SECRET_KEY
valueFrom:
secretKeyRef:
name: kener-secret
key: KENER_SECRET_KEY
- name: SMTP_HOST
valueFrom:
secretKeyRef:
name: kener-secret
key: SMTP_HOST
- name: SMTP_PORT
valueFrom:
secretKeyRef:
name: kener-secret
key: SMTP_PORT
- name: SMTP_USER
valueFrom:
secretKeyRef:
name: kener-secret
key: SMTP_USER
- name: SMTP_PASS
valueFrom:
secretKeyRef:
name: kener-secret
key: SMTP_PASS
- name: SMTP_SECURE
valueFrom:
secretKeyRef:
name: kener-secret
key: SMTP_SECURE
- name: SMTP_FROM_EMAIL
valueFrom:
secretKeyRef:
name: kener-secret
key: SMTP_FROM_EMAIL
- name: TZ
valueFrom:
secretKeyRef:
name: kener-secret
key: TZ
volumes:
- name: kener-uploads
persistentVolumeClaim:
claimName: kener-pvc

View File

@@ -0,0 +1,22 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
cert-manager.io/cluster-issuer: letsencrypt
name: kener-ingress
spec:
rules:
- host: monitor.dgse.cloud
http:
paths:
- backend:
service:
name: kener
port:
number: 3000
path: /
pathType: Prefix
tls:
- hosts:
- monitor.dgse.cloud
secretName: letsencrypt

View File

@@ -0,0 +1,13 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
metadata:
name: kener
resources:
- secret.yaml
- db-cluster.yaml
- service.yaml
- pvc.yaml
- deployment.yaml
- ingress.yaml

View File

@@ -0,0 +1,11 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: kener-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi

View File

@@ -0,0 +1,74 @@
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: kener-secret
spec:
refreshInterval: 1h
secretStoreRef:
name: platform
kind: ClusterSecretStore
target:
name: kener-secret
data:
- secretKey: ORIGIN
remoteRef:
key: kener
property: ORIGIN
- secretKey: DATABASE_URL
remoteRef:
key: kener
property: DATABASE_URL
- secretKey: KENER_SECRET_KEY
remoteRef:
key: kener
property: KENER_SECRET_KEY
- secretKey: SMTP_HOST
remoteRef:
key: kener
property: SMTP_HOST
- secretKey: SMTP_PORT
remoteRef:
key: kener
property: SMTP_PORT
- secretKey: SMTP_USER
remoteRef:
key: kener
property: SMTP_USER
- secretKey: SMTP_PASS
remoteRef:
key: kener
property: SMTP_PASS
- secretKey: SMTP_SECURE
remoteRef:
key: kener
property: SMTP_SECURE
- secretKey: SMTP_FROM_EMAIL
remoteRef:
key: kener
property: SMTP_FROM_EMAIL
- secretKey: TZ
remoteRef:
key: kener
property: TZ
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: kener-postgres-user
spec:
refreshInterval: 1h
secretStoreRef:
name: platform
kind: ClusterSecretStore
target:
name: kener-postgres-user
data:
- secretKey: username
remoteRef:
key: kener
property: postgres_username
- secretKey: password
remoteRef:
key: kener
property: postgres_password

View File

@@ -0,0 +1,12 @@
---
apiVersion: v1
kind: Service
metadata:
name: kener
spec:
selector:
app: kener
ports:
- protocol: TCP
port: 3000
targetPort: 3000

View File

@@ -10,7 +10,7 @@ resources:
helmCharts:
- name: mailu
repo: https://mailu.github.io/helm-charts/
version: 2.1.2
version: 2.6.3
releaseName: mailu
namespace: mailu
valuesFile: values.yaml

View File

@@ -166,7 +166,7 @@ limits:
ipv6Mask: 56
user: 100/day
exemptionLength: 86400
exemption: "10.42.0.0/16"
exemption: "10.42.4.105"
# Configuration to reduce outgoing spam in case of a compromised account. See the documentation for further information: https://mailu.io/1.9/configuration.html?highlight=MESSAGE_RATELIMIT
## @param limits.messageRatelimit.value Sets the `MESSAGE_RATELIMIT` environment variable in the `admin` pod

View File

@@ -0,0 +1,16 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
metadata:
name: nextcloud
resources:
- secret.yaml
helmCharts:
- name: nextcloud
repo: https://nextcloud.github.io/helm/
version: 8.5.2
releaseName: nextcloud
namespace: nextcloud
valuesFile: values.yaml

View File

@@ -0,0 +1,33 @@
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: nextcloud-secret
spec:
refreshInterval: 1h
secretStoreRef:
name: platform
kind: ClusterSecretStore
target:
name: nextcloud-secret
data:
- secretKey: nextcloud-username
remoteRef:
key: nextcloud
property: nextcloud-username
- secretKey: nextcloud-password
remoteRef:
key: nextcloud
property: nextcloud-password
- secretKey: smtp-password
remoteRef:
key: nextcloud
property: smtp-password
- secretKey: smtp-username
remoteRef:
key: nextcloud
property: smtp-username
- secretKey: smtp-host
remoteRef:
key: nextcloud
property: smtp-host

View File

@@ -0,0 +1,981 @@
## ref: https://hub.docker.com/r/library/nextcloud/tags/
##
image:
repository: nextcloud
flavor: apache
# default is generated by flavor and appVersion
tag:
pullPolicy: IfNotPresent
# pullSecrets:
# - myRegistrKeySecretName
nameOverride: ""
fullnameOverride: ""
podAnnotations: {}
podLabels: {}
deploymentAnnotations: {}
deploymentLabels: {}
# Number of replicas to be deployed
replicaCount: 1
## Allowing use of ingress controllers
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
##
ingress:
enabled: true
className: traefik
annotations:
cert-manager.io/cluster-issuer: letsencrypt
tls:
- secretName: nextcloud-tls
hosts:
- nextcloud.dgse.cloud
labels: {}
path: /
pathType: Prefix
# Allow configuration of lifecycle hooks
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/
lifecycle: {}
# lifecycle:
# postStartCommand: []
# preStopCommand: []
phpClientHttpsFix:
enabled: false
protocol: https
nextcloud:
host: nextcloud.dgse.cloud
# username: admin
# password: changeme
## Use an existing secret
existingSecret:
enabled: true
secretName: nextcloud-secret
usernameKey: nextcloud-username
passwordKey: nextcloud-password
tokenKey: ""
smtpUsernameKey: smtp-username
smtpPasswordKey: smtp-password
smtpHostKey: smtp-host
update: 0
# If web server is not binding default port, you can define it
containerPort: 80
datadir: /var/www/html/data
persistence:
subPath:
# if set, we'll template this list to the NEXTCLOUD_TRUSTED_DOMAINS env var
trustedDomains: []
## SMTP configuration
mail:
enabled: false
# the user we send email as
fromAddress: user
# the domain we send email from
domain: domain.com
smtp:
host: domain.com
secure: ssl
port: 465
authtype: LOGIN
name: user
password: pass
## Primary ObjectStore options
# see: https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/primary_storage.html#configuring-object-storage-as-primary-storage
objectStore:
# https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/primary_storage.html#simple-storage-service-s3
s3:
enabled: false
# ignored if nextcloud.objectstore.s3.existingSecret is not empty string
accessKey: ""
# ignored if nextcloud.objectstore.s3.existingSecret is not empty string
secretKey: ""
# use legacy auth method
legacyAuth: false
# s3 endpoint to use; only required if you're not using AWS
host: ""
# use TLS/SSL for S3 connections
ssl: true
# default port that can be changed based on your object store, e.g. for minio, you can use 9000
port: "443"
# this is the default in the nextcloud docs
region: "eu-west-1"
# required if using s3, the name of the bucket you'd like to use
bucket: ""
# object prefix in bucket
prefix: ""
# set to true if you are not using DNS for your buckets.
usePathStyle: false
# autocreate the bucket
autoCreate: false
# optonal parameter: you probably want to keep this as default
storageClass: "STANDARD"
# server side encryption key. learn more: https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/primary_storage.html#s3-sse-c-encryption-support
sse_c_key: ""
# use an existingSecret for S3 credentials. If set, we ignore the following under nextcloud.objectStore.s3
# endpoint, accessKey, secretKey
existingSecret: ""
secretKeys:
# key in nextcloud.objectStore.s3.existingSecret to use for s3 endpoint
host: ""
# key in nextcloud.objectStore.s3.existingSecret to use for s3 accessKeyID
accessKey: ""
# key in nextcloud.objectStore.s3.existingSecret to use for s3 secretAccessKey
secretKey: ""
# key in nextcloud.objectStore.s3.existingSecret to use for the s3 bucket
bucket: ""
# key in nextcloud.objectStore.s3.existingSecret to use for the s3 sse_c_key
sse_c_key: ""
## options related to using Swift as a primary object storage
# https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/primary_storage.html#openstack-swift
swift:
enabled: false
# swift user info
user:
domain: "Default"
name: ""
password: ""
# swift project info
project:
name: ""
domain: "Default"
# The Identity / Keystone endpoint
url: ""
region: ""
# optional on some swift implementations
service: "swift"
# the container to store the data in
container: ""
# autocreate container
autoCreate: false
## PHP Configuration files
# Will be injected in /usr/local/etc/php/conf.d for apache image and in /usr/local/etc/php-fpm.d when nginx.enabled: true
phpConfigs: {}
## Default config files that utilize environment variables:
# see: https://github.com/nextcloud/docker/tree/master#auto-configuration-via-environment-variables
# IMPORTANT: Will be used only if you put extra configs, otherwise default will come from nextcloud itself
# Default confgurations can be found here: https://github.com/nextcloud/docker/tree/master/.config
defaultConfigs:
# To protect /var/www/html/config
.htaccess: true
# Apache configuration for rewrite urls
apache-pretty-urls.config.php: true
# Define APCu as local cache
apcu.config.php: true
# Apps directory configs
apps.config.php: true
# Used for auto configure database
autoconfig.php: true
# Redis default configuration
redis.config.php: true
# Reverse proxy default configuration
reverse-proxy.config.php: true
# S3 Object Storage as primary storage
s3.config.php: true
# SMTP default configuration via environment variables
smtp.config.php: true
# Swift Object Storage as primary storage
swift.config.php: true
# disables the web based updater as the default nextcloud docker image does not support it
upgrade-disable-web.config.php: true
# -- imaginary support config
imaginary.config.php: false
# Extra config files created in /var/www/html/config/
# ref: https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/config_sample_php_parameters.html#multiple-config-php-file
configs: {}
# For example, to enable image and text file previews:
# previews.config.php: |-
# <?php
# $CONFIG = array (
# 'enable_previews' => true,
# 'enabledPreviewProviders' => array (
# 'OC\Preview\Movie',
# 'OC\Preview\PNG',
# 'OC\Preview\JPEG',
# 'OC\Preview\GIF',
# 'OC\Preview\BMP',
# 'OC\Preview\XBitmap',
# 'OC\Preview\MP3',
# 'OC\Preview\MP4',
# 'OC\Preview\TXT',
# 'OC\Preview\MarkDown',
# 'OC\Preview\PDF'
# ),
# );
# Hooks for auto configuration
# Here you could write small scripts which are placed in `/docker-entrypoint-hooks.d/<hook-name>/helm.sh`
# ref: https://github.com/nextcloud/docker?tab=readme-ov-file#auto-configuration-via-hook-folders
hooks:
pre-installation:
post-installation:
pre-upgrade:
post-upgrade:
before-starting:
## Strategy used to replace old pods
## IMPORTANT: use with care, it is suggested to leave as that for upgrade purposes
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
strategy:
type: Recreate
# type: RollingUpdate
# rollingUpdate:
# maxSurge: 1
# maxUnavailable: 0
##
## Extra environment variables
extraEnv:
# - name: SOME_SECRET_ENV
# valueFrom:
# secretKeyRef:
# name: nextcloud
# key: secret_key
# Extra init containers that runs before pods start.
extraInitContainers: []
# - name: do-something
# image: busybox
# command: ['do', 'something']
# Extra sidecar containers.
extraSidecarContainers: []
# - name: nextcloud-logger
# image: busybox
# command: [/bin/sh, -c, 'while ! test -f "/run/nextcloud/data/nextcloud.log"; do sleep 1; done; tail -n+1 -f /run/nextcloud/data/nextcloud.log']
# volumeMounts:
# - name: nextcloud-data
# mountPath: /run/nextcloud/data
# Extra mounts for the pods. Example shown is for connecting a legacy NFS volume
# to NextCloud pods in Kubernetes. This can then be configured in External Storage
extraVolumes:
# - name: nfs
# nfs:
# server: "10.0.0.1"
# path: "/nextcloud_data"
# readOnly: false
extraVolumeMounts:
# - name: nfs
# mountPath: "/legacy_data"
# Set securityContext parameters for the nextcloud CONTAINER only (will not affect nginx container).
# For example, you may need to define runAsNonRoot directive
securityContext: {}
# runAsUser: 33
# runAsGroup: 33
# runAsNonRoot: true
# readOnlyRootFilesystem: false
# Set securityContext parameters for the entire pod. For example, you may need to define runAsNonRoot directive
podSecurityContext: {}
# runAsUser: 33
# runAsGroup: 33
# runAsNonRoot: true
# readOnlyRootFilesystem: false
# Settings for the MariaDB init container
mariaDbInitContainer:
resources: {}
# Set mariadb initContainer securityContext parameters. For example, you may need to define runAsNonRoot directive
securityContext: {}
# Settings for the PostgreSQL init container
postgreSqlInitContainer:
resources: {}
# Set postgresql initContainer securityContext parameters. For example, you may need to define runAsNonRoot directive
securityContext: {}
nginx:
## You need to set an fpm version of the image for nextcloud if you want to use nginx!
enabled: false
image:
repository: nginx
tag: alpine
pullPolicy: IfNotPresent
containerPort: 80
# This configures nginx to listen on either IPv4, IPv6 or both
ipFamilies:
- IPv4
# - IPv6
config:
# This generates the default nginx config as per the nextcloud documentation
default: true
headers:
# -- HSTS settings
# WARNING: Only add the preload option once you read about
# the consequences in https://hstspreload.org/. This option
# will add the domain to a hardcoded list that is shipped
# in all major browsers and getting removed from this list
# could take several months.
# Example:
# "Strict-Transport-Security": "max-age=15768000; includeSubDomains; preload;"
"Strict-Transport-Security": ""
"Referrer-Policy": "no-referrer"
"X-Content-Type-Options": "nosniff"
"X-Frame-Options": "SAMEORIGIN"
"X-Permitted-Cross-Domain-Policies": "none"
"X-Robots-Tag": "noindex, nofollow"
"X-XSS-Protection": "1; mode=block"
# Added in server block of default config.
serverBlockCustom: |
# set max upload size
client_max_body_size 10G;
client_body_timeout 300s;
fastcgi_buffers 64 4K;
fastcgi_read_timeout 3600s;
custom:
# custom: |-
# worker_processes 1;..
resources: {}
# Set nginx container securityContext parameters. For example, you may need to define runAsNonRoot directive
securityContext: {}
# the nginx alpine container default user is 82
# runAsUser: 82
# runAsGroup: 33
# runAsNonRoot: true
# readOnlyRootFilesystem: true
## Extra environment variables
extraEnv: []
# - name: SOME_ENV
# value: ENV_VALUE
internalDatabase:
enabled: true
name: nextcloud
##
## External database configuration
##
externalDatabase:
enabled: false
## Supported database engines: mysql or postgresql
type: mysql
## Database host. You can optionally include a colon delimited port like "myhost:1234"
host: ""
## Database user
user: nextcloud
## Database password
password: ""
## Database name
database: nextcloud
## Use a existing secret
existingSecret:
enabled: false
# secretName: nameofsecret
usernameKey: db-username
passwordKey: db-password
# hostKey: db-hostname-or-ip
# databaseKey: db-name
global:
security:
# required for bitnamilegacy repos
allowInsecureImages: true
##
## MariaDB chart configuration
## ref: https://github.com/bitnami/charts/tree/main/bitnami/mariadb
##
mariadb:
## Whether to deploy a mariadb server from the bitnami mariab db helm chart
# to satisfy the applications database requirements. if you want to deploy this bitnami mariadb, set this and externalDatabase to true
# To use an ALREADY DEPLOYED mariadb database, set this to false and configure the externalDatabase parameters
enabled: false
image:
repository: bitnamilegacy/mariadb
# see: https://github.com/bitnami/charts/tree/main/bitnami/mariadb#global-parameters
global:
# overwrites the primary.persistence.storageClass value
defaultStorageClass: ""
auth:
database: nextcloud
username: nextcloud
password: changeme
# Use existing secret (auth.rootPassword, auth.password, and auth.replicationPassword will be ignored).
# secret must contain the keys mariadb-root-password, mariadb-replication-password and mariadb-password
existingSecret: ""
architecture: standalone
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
primary:
persistence:
enabled: false
# Use an existing Persistent Volume Claim (must be created ahead of time)
existingClaim: ""
storageClass: ""
accessMode: ReadWriteOnce
size: 8Gi
##
## PostgreSQL chart configuration
## for more options see https://github.com/bitnami/charts/tree/main/bitnami/postgresql
##
postgresql:
enabled: false
image:
repository: bitnamilegacy/postgresql
global:
postgresql:
# global.postgresql.auth overrides postgresql.auth
auth:
username: nextcloud
password: changeme
database: nextcloud
# Name of existing secret to use for PostgreSQL credentials.
# auth.postgresPassword, auth.password, and auth.replicationPassword will be ignored and picked up from this secret.
# secret might also contains the key ldap-password if LDAP is enabled.
# ldap.bind_password will be ignored and picked from this secret in this case.
existingSecret: ""
# Names of keys in existing secret to use for PostgreSQL credentials
secretKeys:
adminPasswordKey: ""
userPasswordKey: ""
replicationPasswordKey: ""
primary:
persistence:
enabled: false
# Use an existing Persistent Volume Claim (must be created ahead of time)
# existingClaim: ""
# storageClass: ""
##
## External Redis configuration
##
externalRedis:
enabled: false
## Redis host
host: ""
## Redis port
port: "6379"
## Redis password
password: ""
## Use a existing secret
existingSecret:
enabled: false
# secretName: nameofsecret
passwordKey: redis-password
##
## Redis chart configuration
## for more options see https://github.com/bitnami/charts/tree/main/bitnami/redis
##
redis:
enabled: false
image:
repository: bitnamilegacy/redis
auth:
enabled: true
password: "changeme"
# name of an existing secret with Redis® credentials (instead of auth.password), must be created ahead of time
existingSecret: ""
# Password key to be retrieved from existing secret
existingSecretPasswordKey: ""
# Since Redis is used for caching only, you might want to use a storageClass with different reclaim policy and backup settings
global:
storageClass: ""
master:
persistence:
enabled: true
replica:
persistence:
enabled: true
##
## Collabora chart configuration
## for more options see https://github.com/CollaboraOnline/online/tree/master/kubernetes/helm/collabora-online
##
collabora:
enabled: false
autoscaling:
# enable autocaling, please check collabora README.md first
enabled: false
collabora:
## HTTPS nextcloud domain, if needed
aliasgroups: []
# - host: "https://nextcloud.domain:443"
# set extra parameters for collabora
# you may need to add --o:ssl.termination=true
extra_params: --o:ssl.enable=false
## Specify server_name when the hostname is not reachable directly for
# example behind reverse-proxy. example: collabora.domain
server_name: null
existingSecret:
# set to true to to get collabora admin credentials from an existin secret
# if set, ignores collabora.collabora.username and password
enabled: false
# name of existing Kubernetes Secret with collboara admin credentials
secretName: ""
usernameKey: "username"
passwordKey: "password"
# setup admin login credentials, these are ignored if
# collabora.collabora.existingSecret.enabled=true
password: examplepass
username: admin
# setup ingress
ingress:
# enable ingress for collabora online
enabled: false
className: ""
# please check collabora values.yaml for nginx/haproxy annotations examples
annotations: {}
hosts:
- host: chart-example.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: collabora-ingress-tls
# hosts:
# - collabora.domain
# see collabora helm README.md for recommended values
resources: {}
## Cronjob to execute Nextcloud background tasks
## ref: https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/background_jobs_configuration.html#cron
##
cronjob:
enabled: false
# Either 'sidecar' or 'cronjob'
type: sidecar
# Runs crond as a sidecar container in the Nextcloud pod
# Note: crond requires root
sidecar:
## Cronjob sidecar resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
# Allow configuration of lifecycle hooks
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/
lifecycle: {}
# lifecycle:
# postStartCommand: []
# preStopCommand: []
# Set securityContext parameters. For example, you may need to define runAsNonRoot directive
securityContext: {}
# runAsUser: 33
# runAsGroup: 33
# runAsNonRoot: true
# readOnlyRootFilesystem: true
# The command the cronjob container executes.
command:
- /cron.sh
# Uses a Kubernetes CronJob to execute the Nextcloud cron tasks
# Note: can run as non-root user. Should run as same user as the Nextcloud pod.
cronjob:
# Use a CronJob instead of crond sidecar container
# crond does not work when not running as root user
# Note: requires `persistence.enabled=true`
schedule: "*/5 * * * *"
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 5
# -- Additional labels for cronjob
labels: {}
# -- Additional labels for cronjob pod
podLabels: {}
annotations: {}
backoffLimit: 1
affinity: {}
# Often RWO volumes are used. But the cronjob pod needs access to the same volume as the nextcloud pod.
# Depending on your provider two pods on the same node can still access the same volume.
# Following config ensures that the cronjob pod is scheduled on the same node as the nextcloud pod.
# affinity:
# podAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - labelSelector:
# matchExpressions:
# - key: app.kubernetes.io/name
# operator: In
# values:
# - nextcloud
# - key: app.kubernetes.io/component
# operator: In
# values:
# - app
# topologyKey: kubernetes.io/hostname
## Resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
# Allow configuration of lifecycle hooks
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/
# Set securityContext parameters. For example, you may need to define runAsNonRoot directive
securityContext: {}
# runAsUser: 33
# runAsGroup: 33
# runAsNonRoot: true
# readOnlyRootFilesystem: true
# The command to run in the cronjob container
# Example to incerase memory limit: php -d memory_limit=2G ...
command:
- php
- -f
- /var/www/html/cron.php
- --
- --verbose
service:
type: ClusterIP
port: 8080
loadBalancerIP: ""
nodePort:
# -- use additional annotation on service for nextcloud
annotations: {}
# -- Set this to "ClientIP" to make sure that connections from the same client
# are passed to the same Nextcloud pod each time.
sessionAffinity: ""
sessionAffinityConfig: {}
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
# Nextcloud Data (/var/www/html)
enabled: true
annotations: {}
labels: {}
## nextcloud data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
storageClass: "local-path"
## A manually managed Persistent Volume and Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
# existingClaim:
accessMode: ReadWriteOnce
size: 50Gi
## Use an additional pvc for the data directory rather than a subpath of the default PVC
## Useful to store data on a different storageClass (e.g. on slower disks)
nextcloudData:
enabled: false
subPath:
labels: {}
annotations: {}
# storageClass: "-"
# existingClaim:
accessMode: ReadWriteOnce
size: 8Gi
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# resources:
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
## Liveness and readiness probe values
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
##
livenessProbe:
enabled: true
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
successThreshold: 1
startupProbe:
enabled: false
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 30
successThreshold: 1
## Enable pod autoscaling using HorizontalPodAutoscaler
## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/
##
hpa:
enabled: false
cputhreshold: 60
minPods: 1
maxPods: 10
nodeSelector: {}
tolerations: []
# -- Nextcloud pod topologySpreadConstraints
topologySpreadConstraints: []
affinity: {}
dnsConfig: {}
# Custom dns config for Nextcloud containers.
# You can for example configure ndots. This may be needed in some clusters with alpine images.
# options:
# - name: ndots
# value: "1"
imaginary:
# -- Start Imgaginary
enabled: false
# -- Number of imaginary pod replicas to deploy
replicaCount: 1
image:
# -- Imaginary image registry
registry: docker.io
# -- Imaginary image name
repository: h2non/imaginary
# -- Imaginary image tag
tag: 1.2.4
# -- Imaginary image pull policy
pullPolicy: IfNotPresent
# -- Imaginary image pull secrets
pullSecrets: []
# -- Additional annotations for imaginary
podAnnotations: {}
# -- Additional labels for imaginary
podLabels: {}
# -- Imaginary pod nodeSelector
nodeSelector: {}
# -- Imaginary pod tolerations
tolerations: []
# -- Imaginary pod topologySpreadConstraints
topologySpreadConstraints: []
# -- imaginary resources
resources: {}
# -- Optional security context for the Imaginary container
securityContext:
runAsUser: 1000
runAsNonRoot: true
# allowPrivilegeEscalation: false
# capabilities:
# drop:
# - ALL
# -- Optional security context for the Imaginary pod (applies to all containers in the pod)
podSecurityContext: {}
# runAsNonRoot: true
# seccompProfile:
# type: RuntimeDefault
readinessProbe:
enabled: true
failureThreshold: 3
successThreshold: 1
periodSeconds: 10
timeoutSeconds: 1
livenessProbe:
enabled: true
failureThreshold: 3
successThreshold: 1
periodSeconds: 10
timeoutSeconds: 1
service:
# -- Imaginary: Kubernetes Service type
type: ClusterIP
# -- Imaginary: LoadBalancerIp for service type LoadBalancer
loadBalancerIP:
# -- Imaginary: NodePort for service type NodePort
nodePort:
# -- Additional annotations for service imaginary
annotations: {}
# -- Additional labels for service imaginary
labels: {}
## Prometheus Exporter / Metrics
##
metrics:
enabled: false
replicaCount: 1
# Optional: becomes NEXTCLOUD_SERVER env var in the nextcloud-exporter container.
# Without it, we will use the full name of the nextcloud service
server: ""
# The metrics exporter needs to know how you serve Nextcloud either http or https
https: false
# Use API token if set, otherwise fall back to password authentication
# https://github.com/xperimental/nextcloud-exporter#token-authentication
# Currently you still need to set the token manually in your nextcloud install
token: ""
timeout: 5s
# if set to true, exporter skips certificate verification of Nextcloud server.
tlsSkipVerify: false
info:
# Optional: becomes NEXTCLOUD_INFO_APPS env var in the nextcloud-exporter container.
# Enables gathering of apps-related metrics. Defaults to false
apps: false
update: false
image:
repository: xperimental/nextcloud-exporter
tag: 0.8.0
pullPolicy: IfNotPresent
# pullSecrets:
# - myRegistrKeySecretName
## Metrics exporter resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
# -- Metrics exporter pod Annotation
podAnnotations: {}
# -- Metrics exporter pod Labels
podLabels: {}
# -- Metrics exporter pod nodeSelector
nodeSelector: {}
# -- Metrics exporter pod tolerations
tolerations: []
# -- Metrics exporter pod affinity
affinity: {}
service:
type: ClusterIP
# Use serviceLoadBalancerIP to request a specific static IP,
# otherwise leave blank
loadBalancerIP:
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9205"
labels: {}
# -- security context for the metrics CONTAINER in the pod
securityContext:
runAsUser: 1000
runAsNonRoot: true
# allowPrivilegeEscalation: false
# capabilities:
# drop:
# - ALL
# -- security context for the metrics POD
podSecurityContext: {}
# runAsNonRoot: true
# seccompProfile:
# type: RuntimeDefault
## Prometheus Operator ServiceMonitor configuration
##
serviceMonitor:
## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator
##
enabled: false
## @param metrics.serviceMonitor.namespace Namespace in which Prometheus is running
##
namespace: ""
## @param metrics.serviceMonitor.namespaceSelector The selector of the namespace where the target service is located (defaults to the release namespace)
namespaceSelector:
## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus.
##
jobLabel: ""
## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
##
interval: 30s
## @param metrics.serviceMonitor.scrapeTimeout Specify the timeout after which the scrape is ended
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
##
scrapeTimeout: ""
## @param metrics.serviceMonitor.labels Extra labels for the ServiceMonitor
##
labels: {}
rules:
# -- Deploy Prometheus Rules (Alerts) for the exporter
# @section -- Metrics
enabled: false
# -- Label on Prometheus Rules CRD Manifest
# @section -- Metrics
labels: {}
defaults:
# -- Add Default Rules
# @section -- Metrics
enabled: true
# -- Label on the rules (the severity is already set)
# @section -- Metrics
labels: {}
# -- Filter on metrics on alerts (default just for this helm-chart)
# @section -- Metrics
filter: ""
# -- Add own Rules to Prometheus Rules
# @section -- Metrics
additionalRules: []
rbac:
enabled: false
serviceaccount:
create: true
name: nextcloud-serviceaccount
annotations: {}
## @param securityContext for nextcloud pod @deprecated Use `nextcloud.podSecurityContext` instead
securityContext: {}

View File

@@ -0,0 +1,58 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: macos
labels:
name: macos
spec:
replicas: 1
selector:
matchLabels:
app: macos
template:
metadata:
labels:
app: macos
spec:
containers:
- name: macos
image: dockurr/macos
env:
- name: VERSION
value: "14"
- name: DISK_SIZE
value: "64G"
- name: KVM
value: "N"
ports:
- containerPort: 8006
name: http
protocol: TCP
- containerPort: 5900
name: vnc
protocol: TCP
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: true
volumeMounts:
- mountPath: /storage
name: storage
- mountPath: /dev/kvm
name: dev-kvm
- mountPath: /dev/net/tun
name: dev-tun
terminationGracePeriodSeconds: 120
volumes:
- name: storage
persistentVolumeClaim:
claimName: macos-pvc
- hostPath:
path: /dev/kvm
name: dev-kvm
- hostPath:
path: /dev/net/tun
type: CharDevice
name: dev-tun

View File

@@ -0,0 +1,22 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
cert-manager.io/cluster-issuer: letsencrypt
name: osx-ingress
spec:
rules:
- host: osx.dgse.cloud
http:
paths:
- backend:
service:
name: macos
port:
number: 8006
path: /
pathType: Prefix
tls:
- hosts:
- osx.dgse.cloud
secretName: letsencrypt

View File

@@ -0,0 +1,8 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- service.yaml
- deployment.yaml
- pvc.yaml

View File

@@ -0,0 +1,11 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: macos-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 64Gi

View File

@@ -0,0 +1,19 @@
---
apiVersion: v1
kind: Service
metadata:
name: macos
spec:
internalTrafficPolicy: Cluster
ports:
- name: http
port: 8006
protocol: TCP
targetPort: 8006
- name: vnc
port: 5900
protocol: TCP
targetPort: 5900
selector:
app: macos
type: ClusterIP

View File

@@ -10,7 +10,7 @@ resources:
helmCharts:
- name: penpot
repo: http://helm.penpot.app
version: 0.28.0
version: 0.32.0
releaseName: penpot
namespace: penpot
valuesFile: values.yaml

View File

@@ -21,7 +21,7 @@ spec:
spec:
containers:
- name: uptime-kuma
image: louislam/uptime-kuma:1.23.16
image: louislam/uptime-kuma:2.0.2
imagePullPolicy: IfNotPresent
ports:
- containerPort: 3001

View File

@@ -10,7 +10,7 @@ resources:
helmCharts:
- name: vault
repo: https://helm.releases.hashicorp.com/
version: 0.30.0
version: 0.31.0
releaseName: vault
namespace: vault
valuesFile: values.yaml

View File

@@ -7,7 +7,7 @@ metadata:
helmCharts:
- name: vaultwarden
repo: https://guerzon.github.io/vaultwarden/
version: 0.31.8
version: 0.34.4
releaseName: vaultwarden
namespace: vaultwarden
valuesFile: values.yaml

7
mkdocs.yaml Normal file
View File

@@ -0,0 +1,7 @@
---
site_name: "Nextcloud"
site_description: "Self-hosted file hosting service"
nav:
- Introduction: index.md
plugins:
- techdocs-core

28
renovate.json Normal file
View File

@@ -0,0 +1,28 @@
{
"extends": [
"config:base"
],
"labels": ["Kind/Security"],
"major": {
"addLabels": ["Priority/High"]
},
"minor": {
"addLabels": ["Priority/Medium"]
},
"patch": {
"addLabels": ["Priority/Low"]
},
"digest": {
"addLabels": ["Priority/Low"]
},
"packageRules": [
{
"updateTypes": ["minor", "patch", "pin", "digest"],
"automerge": false
},
{
"matchFiles": ["**/values.yaml", "**/values/*.yaml"],
"enabled": false
}
]
}