1 Commits

Author SHA1 Message Date
Danyliuk
cbb1eb83ee .gitignore: Add IntelliJ IDEA part 2022-10-08 19:43:47 +03:00
35 changed files with 388 additions and 12922 deletions

View File

@@ -23,7 +23,6 @@ Most endpoints are protected by OIDC autentication or Authelia SSO middleware.
General discussion is happening in the `#kube` Slack channel.
<details><summary>Bootstrapping access</summary>
For bootstrap access obtain `/etc/kubernetes/admin.conf` from one of the master
nodes and place it under `~/.kube/config` on your machine.
@@ -47,9 +46,9 @@ EOF
sudo systemctl daemon-reload
systemctl restart kubelet
```
</details>
The following can be used to talk to the Kubernetes cluster using OIDC credentials:
Afterwards following can be used to talk to the Kubernetes cluster using
OIDC credentials:
```bash
kubectl krew install oidc-login
@@ -90,16 +89,6 @@ EOF
For access control mapping see [cluster-role-bindings.yml](cluster-role-bindings.yml)
### systemd-resolved issues on access
```sh
Unable to connect to the server: dial tcp: lookup master.kube.k-space.ee on 127.0.0.53:53: no such host
```
```
Network → VPN → `IPv4` → Other nameservers (Muud nimeserverid): `172.21.0.1`
Network → VPN → `IPv6` → Other nameservers (Muud nimeserverid): `2001:bb8:4008:21::1`
Network → VPN → `IPv4` → Search domains (Otsingudomeenid): `kube.k-space.ee`
Network → VPN → `IPv6` → Search domains (Otsingudomeenid): `kube.k-space.ee`
```
# Technology mapping

View File

@@ -1,17 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: grafana
namespace: argocd
spec:
project: default
source:
repoURL: 'git@git.k-space.ee:k-space/kube.git'
path: grafana
targetRevision: HEAD
destination:
server: 'https://kubernetes.default.svc'
namespace: grafana
syncPolicy:
syncOptions:
- CreateNamespace=true

View File

@@ -16,6 +16,7 @@ server:
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: default
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
@@ -23,7 +24,8 @@ server:
- argocd.k-space.ee
tls:
- hosts:
- "*.k-space.ee"
- argocd.k-space.ee
secretName: argocd-server-tls
configEnabled: true
config:
admin.enabled: "false"

View File

@@ -162,8 +162,8 @@ kubectl -n argocd create secret generic argocd-secret \
kubectl get secret -n authelia oidc-secrets -o json \
| jq '.data."oidc-secrets.yml"' -r | base64 -d | yq -o json \
| jq '.identity_providers.oidc.clients[] | select(.id == "argocd") | .secret' -r)
kubectl -n grafana delete secret oidc-secret
kubectl -n grafana create secret generic oidc-secret \
kubectl -n monitoring delete secret oidc-secret
kubectl -n monitoring create secret generic oidc-secret \
--from-literal=GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET=$( \
kubectl get secret -n authelia oidc-secrets -o json \
| jq '.data."oidc-secrets.yml"' -r | base64 -d | yq -o json \

View File

@@ -295,6 +295,7 @@ metadata:
labels:
app.kubernetes.io/name: authelia
annotations:
cert-manager.io/cluster-issuer: default
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
kubernetes.io/tls-acme: "true"
traefik.ingress.kubernetes.io/router.entryPoints: websecure
@@ -314,7 +315,8 @@ spec:
number: 80
tls:
- hosts:
- "*.k-space.ee"
- auth.k-space.ee
secretName: authelia-tls
---
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware

View File

@@ -182,6 +182,12 @@ metadata:
annotations:
kubernetes.io/ingress.class: traefik
# Following specifies the certificate issuer defined in
# ../cert-manager/issuer.yml
# This is where the HTTPS certificates for the
# `tls:` section below are obtained from
cert-manager.io/cluster-issuer: default
# This tells Traefik this Ingress object is associated with the
# https:// entrypoint
# Global http:// to https:// redirect is enabled in
@@ -228,7 +234,8 @@ spec:
number: 3003
tls:
- hosts:
- "*.k-space.ee"
- cams.k-space.ee
secretName: camtiler-tls
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
@@ -364,6 +371,7 @@ metadata:
name: minio
annotations:
kubernetes.io/ingress.class: traefik
cert-manager.io/cluster-issuer: default
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
@@ -381,7 +389,8 @@ spec:
number: 80
tls:
- hosts:
- "*.k-space.ee"
- cams-s3.k-space.ee
secretName: cams-s3-tls
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition

View File

@@ -77,11 +77,14 @@ steps:
- echo "ENV GIT_COMMIT_TIMESTAMP=$(git log -1 --format=%cd --date=iso-strict)" >> Dockerfile
- cat Dockerfile
- name: docker
image: harbor.k-space.ee/k-space/drone-kaniko
image: plugins/docker
settings:
repo: ${DRONE_REPO}
repo: harbor.k-space.ee/${DRONE_REPO}
tags: latest-arm64
registry: harbor.k-space.ee
squash: true
experimental: true
mtu: 1300
username:
from_secret: docker_username
password:
@@ -106,11 +109,14 @@ steps:
- echo "ENV GIT_COMMIT_TIMESTAMP=$(git log -1 --format=%cd --date=iso-strict)" >> Dockerfile
- cat Dockerfile
- name: docker
image: harbor.k-space.ee/k-space/drone-kaniko
image: plugins/docker
settings:
repo: ${DRONE_REPO}
repo: harbor.k-space.ee/${DRONE_REPO}
tags: latest-amd64
registry: harbor.k-space.ee
squash: true
experimental: true
mtu: 1300
storage_driver: vfs
username:
from_secret: docker_username
@@ -124,8 +130,8 @@ steps:
- name: manifest
image: plugins/manifest
settings:
target: ${DRONE_REPO}:latest
template: ${DRONE_REPO}:latest-ARCH
target: harbor.k-space.ee/${DRONE_REPO}:latest
template: harbor.k-space.ee/${DRONE_REPO}:latest-ARCH
platforms:
- linux/amd64
- linux/arm64

View File

@@ -83,6 +83,7 @@ kind: Ingress
metadata:
name: drone
annotations:
cert-manager.io/cluster-issuer: default
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
@@ -90,7 +91,8 @@ metadata:
spec:
tls:
- hosts:
- "*.k-space.ee"
- "drone.k-space.ee"
secretName: drone-tls
rules:
- host: "drone.k-space.ee"
http:

View File

@@ -5,9 +5,11 @@ metadata:
name: filebeat
spec:
type: filebeat
version: 8.4.3
version: 8.4.1
elasticsearchRef:
name: elasticsearch
kibanaRef:
name: kibana
config:
logging:
level: warning
@@ -27,9 +29,6 @@ spec:
- /var/log/containers/*${data.kubernetes.container.id}.log
daemonSet:
podTemplate:
metadata:
annotations:
co.elastic.logs/enabled: 'false'
spec:
serviceAccountName: filebeat
automountServiceAccountToken: true
@@ -86,9 +85,11 @@ metadata:
name: filebeat-syslog
spec:
type: filebeat
version: 8.4.3
version: 8.4.1
elasticsearchRef:
name: elasticsearch
kibanaRef:
name: kibana
config:
logging:
level: warning
@@ -108,9 +109,6 @@ spec:
deployment:
replicas: 2
podTemplate:
metadata:
annotations:
co.elastic.logs/enabled: 'false'
spec:
terminationGracePeriodSeconds: 30
containers:
@@ -218,7 +216,7 @@ kind: Elasticsearch
metadata:
name: elasticsearch
spec:
version: 8.4.3
version: 8.4.1
nodeSets:
- name: default
count: 1
@@ -242,7 +240,7 @@ kind: Kibana
metadata:
name: kibana
spec:
version: 8.4.3
version: 8.4.1
count: 1
elasticsearchRef:
name: elasticsearch
@@ -265,9 +263,6 @@ spec:
- key: elastic
path: xpack.security.authc.providers.anonymous.anonymous1.credentials.password
podTemplate:
metadata:
annotations:
co.elastic.logs/enabled: 'false'
spec:
containers:
- name: kibana
@@ -288,6 +283,7 @@ metadata:
name: kibana
annotations:
kubernetes.io/ingress.class: traefik
cert-manager.io/cluster-issuer: default
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.middlewares: traefik-sso@kubernetescrd
traefik.ingress.kubernetes.io/router.tls: "true"
@@ -306,7 +302,8 @@ spec:
number: 5601
tls:
- hosts:
- "*.k-space.ee"
- kibana.k-space.ee
secretName: kibana-tls
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor

View File

@@ -79,6 +79,7 @@ metadata:
namespace: etherpad
annotations:
kubernetes.io/ingress.class: traefik
cert-manager.io/cluster-issuer: default
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
@@ -96,7 +97,8 @@ spec:
number: 9001
tls:
- hosts:
- "*.k-space.ee"
- pad.k-space.ee
secretName: pad-tls
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy

View File

@@ -1,19 +0,0 @@
# Grafana
```
kubectl create namespace grafana
kubectl apply -n grafana -f application.yml
```
## OIDC secret
See Authelia README on provisioning and updating OIDC secrets for Grafana
## Grafana post deployment steps
* Configure Prometheus datasource with URL set to
`http://prometheus-operated.prometheus-operator.svc.cluster.local:9090`
* Configure Elasticsearch datasource with URL set to
`http://elasticsearch.elastic-system.svc.cluster.local`,
Time field name set to `timestamp` and
ElasticSearch version set to `7.10+`

View File

@@ -1,135 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: grafana-config
data:
grafana.ini: |
[log]
level = warn
[server]
domain = grafana.k-space.ee
root_url = https://%(domain)s/
[auth.generic_oauth]
name = OAuth
icon = signin
enabled = true
client_id = grafana
scopes = openid profile email groups
empty_scopes = false
auth_url = https://auth.k-space.ee/api/oidc/authorize
token_url = https://auth.k-space.ee/api/oidc/token
api_url = https://auth.k-space.ee/api/oidc/userinfo
allow_sign_up = true
role_attribute_path = contains(groups[*], 'Grafana Admins') && 'Admin' || 'Viewer'
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
labels:
app: grafana
name: grafana
spec:
revisionHistoryLimit: 0
serviceName: grafana
selector:
matchLabels:
app: grafana
template:
metadata:
labels:
app: grafana
spec:
securityContext:
fsGroup: 472
containers:
- name: grafana
image: grafana/grafana:8.5.0
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 472
envFrom:
- secretRef:
name: oidc-secret
ports:
- containerPort: 3000
name: http-grafana
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /robots.txt
port: 3000
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 2
livenessProbe:
failureThreshold: 3
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
tcpSocket:
port: 3000
timeoutSeconds: 1
resources:
requests:
cpu: 250m
memory: 750Mi
volumeMounts:
- mountPath: /var/lib/grafana
name: grafana-data
- mountPath: /etc/grafana
name: grafana-config
volumes:
- name: grafana-config
configMap:
name: grafana-config
volumeClaimTemplates:
- metadata:
name: grafana-data
spec:
storageClassName: longhorn
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: Service
metadata:
name: grafana
spec:
ports:
- port: 80
protocol: TCP
targetPort: http-grafana
selector:
app: grafana
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: grafana
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
spec:
rules:
- host: grafana.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: grafana
port:
number: 80
tls:
- hosts:
- "*.k-space.ee"

View File

@@ -397,6 +397,7 @@ spec:
containers:
- name: core
image: goharbor/harbor-core:v2.4.2
imagePullPolicy: IfNotPresent
startupProbe:
httpGet:
path: /api/v2.0/ping
@@ -405,9 +406,16 @@ spec:
failureThreshold: 360
initialDelaySeconds: 10
periodSeconds: 10
livenessProbe:
httpGet:
path: /api/v2.0/ping
scheme: HTTP
port: 8080
failureThreshold: 2
periodSeconds: 10
readinessProbe:
httpGet:
path: /api/v2.0/projects
path: /api/v2.0/ping
scheme: HTTP
port: 8080
failureThreshold: 2
@@ -464,13 +472,6 @@ spec:
secret:
- name: psc
emptyDir: {}
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule
---
# Source: harbor/templates/jobservice/jobservice-dpl.yaml
apiVersion: apps/v1
@@ -501,6 +502,14 @@ spec:
containers:
- name: jobservice
image: goharbor/harbor-jobservice:v2.4.2
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /api/v1/stats
scheme: HTTP
port: 8080
initialDelaySeconds: 300
periodSeconds: 10
readinessProbe:
httpGet:
path: /api/v1/stats
@@ -535,13 +544,6 @@ spec:
- name: job-logs
persistentVolumeClaim:
claimName: harbor-jobservice
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule
---
# Source: harbor/templates/portal/deployment.yaml
apiVersion: apps/v1
@@ -572,6 +574,14 @@ spec:
containers:
- name: portal
image: goharbor/harbor-portal:v2.4.2
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /
scheme: HTTP
port: 8080
initialDelaySeconds: 300
periodSeconds: 10
readinessProbe:
httpGet:
path: /
@@ -589,13 +599,6 @@ spec:
- name: portal-config
configMap:
name: "harbor-portal"
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule
---
# Source: harbor/templates/registry/registry-dpl.yaml
apiVersion: apps/v1
@@ -626,6 +629,14 @@ spec:
containers:
- name: registry
image: goharbor/registry-photon:v2.4.2
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /
scheme: HTTP
port: 5000
initialDelaySeconds: 300
periodSeconds: 10
readinessProbe:
httpGet:
path: /
@@ -653,6 +664,14 @@ spec:
subPath: config.yml
- name: registryctl
image: goharbor/harbor-registryctl:v2.4.2
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /api/health
scheme: HTTP
port: 8080
initialDelaySeconds: 300
periodSeconds: 10
readinessProbe:
httpGet:
path: /api/health
@@ -703,13 +722,6 @@ spec:
- name: registry-data
persistentVolumeClaim:
claimName: harbor-registry
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule
---
# Source: harbor/templates/database/database-ss.yaml
apiVersion: apps/v1
@@ -744,6 +756,7 @@ spec:
# we may remove it after several releases
- name: "data-migrator"
image: goharbor/harbor-db:v2.4.2
imagePullPolicy: IfNotPresent
command: ["/bin/sh"]
args: ["-c", "[ -e /var/lib/postgresql/data/postgresql.conf ] && [ ! -d /var/lib/postgresql/data/pgdata ] && mkdir -m 0700 /var/lib/postgresql/data/pgdata && mv /var/lib/postgresql/data/* /var/lib/postgresql/data/pgdata/ || true"]
volumeMounts:
@@ -756,6 +769,7 @@ spec:
# as "fsGroup" applied before the init container running, the container has enough permission to execute the command
- name: "data-permissions-ensurer"
image: goharbor/harbor-db:v2.4.2
imagePullPolicy: IfNotPresent
command: ["/bin/sh"]
args: ["-c", "chmod -R 700 /var/lib/postgresql/data/pgdata || true"]
volumeMounts:
@@ -765,6 +779,13 @@ spec:
containers:
- name: database
image: goharbor/harbor-db:v2.4.2
imagePullPolicy: IfNotPresent
livenessProbe:
exec:
command:
- /docker-healthcheck.sh
initialDelaySeconds: 300
periodSeconds: 10
readinessProbe:
exec:
command:
@@ -790,13 +811,6 @@ spec:
emptyDir:
medium: Memory
sizeLimit: 512Mi
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule
volumeClaimTemplates:
- metadata:
name: "database-data"
@@ -839,6 +853,12 @@ spec:
containers:
- name: redis
image: goharbor/redis-photon:v2.4.2
imagePullPolicy: IfNotPresent
livenessProbe:
tcpSocket:
port: 6379
initialDelaySeconds: 300
periodSeconds: 10
readinessProbe:
tcpSocket:
port: 6379
@@ -848,13 +868,6 @@ spec:
- name: data
mountPath: /var/lib/redis
subPath:
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule
volumeClaimTemplates:
- metadata:
name: data
@@ -957,6 +970,15 @@ spec:
mountPath: /home/scanner/.cache
subPath:
readOnly: false
livenessProbe:
httpGet:
scheme: HTTP
path: /probe/healthy
port: api-server
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
failureThreshold: 10
readinessProbe:
httpGet:
scheme: HTTP
@@ -973,13 +995,6 @@ spec:
requests:
cpu: 200m
memory: 512Mi
nodeSelector:
dedicated: storage
tolerations:
- key: dedicated
operator: Equal
value: storage
effect: NoSchedule
volumeClaimTemplates:
- metadata:
name: data
@@ -1001,6 +1016,7 @@ metadata:
labels:
app: harbor
annotations:
cert-manager.io/cluster-issuer: default
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
ingress.kubernetes.io/proxy-body-size: "0"
ingress.kubernetes.io/ssl-redirect: "true"
@@ -1011,8 +1027,9 @@ metadata:
traefik.ingress.kubernetes.io/router.tls: "true"
spec:
tls:
- hosts:
- "*.k-space.ee"
- secretName: harbor-tls
hosts:
- harbor.k-space.ee
rules:
- http:
paths:

View File

@@ -1,197 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: system:aggregated-metrics-reader
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- nodes/metrics
verbs:
- get
- apiGroups:
- ""
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: metrics-server
strategy:
rollingUpdate:
maxUnavailable: 0
template:
metadata:
labels:
k8s-app: metrics-server
spec:
containers:
- args:
- --cert-dir=/tmp
- --secure-port=4443
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --kubelet-insecure-tls
- --metric-resolution=15s
image: k8s.gcr.io/metrics-server/metrics-server:v0.6.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: https
scheme: HTTPS
periodSeconds: 10
name: metrics-server
ports:
- containerPort: 4443
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: https
scheme: HTTPS
initialDelaySeconds: 20
periodSeconds: 10
resources:
requests:
cpu: 100m
memory: 200Mi
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- mountPath: /tmp
name: tmp-dir
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
volumes:
- emptyDir: {}
name: tmp-dir
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
labels:
k8s-app: metrics-server
name: v1beta1.metrics.k8s.io
spec:
group: metrics.k8s.io
groupPriorityMinimum: 100
insecureSkipTLSVerify: true
service:
name: metrics-server
namespace: kube-system
version: v1beta1
versionPriority: 100

View File

@@ -269,6 +269,7 @@ metadata:
certManager: "true"
rewriteTarget: "true"
annotations:
cert-manager.io/cluster-issuer: default
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
@@ -288,4 +289,5 @@ spec:
number: 80
tls:
- hosts:
- "*.k-space.ee"
- dashboard.k-space.ee
secretName: dashboard-tls

View File

@@ -5,6 +5,7 @@ metadata:
namespace: longhorn-system
annotations:
kubernetes.io/ingress.class: traefik
cert-manager.io/cluster-issuer: default
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.middlewares: traefik-sso@kubernetescrd
@@ -23,7 +24,9 @@ spec:
number: 80
tls:
- hosts:
- "*.k-space.ee"
- longhorn.k-space.ee
secretName: longhorn-tls
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor

View File

@@ -1056,6 +1056,9 @@ spec:
apiVersion: v1
kind: Service
metadata:
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9500"
labels:
app: longhorn-manager
name: longhorn-backend

View File

@@ -40,6 +40,7 @@ metadata:
name: phpmyadmin
annotations:
kubernetes.io/ingress.class: traefik
cert-manager.io/cluster-issuer: default
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.middlewares: traefik-sso@kubernetescrd
traefik.ingress.kubernetes.io/router.tls: "true"
@@ -58,7 +59,8 @@ spec:
number: 80
tls:
- hosts:
- "*.k-space.ee"
- phpmyadmin.k-space.ee
secretName: phpmyadmin-tls
---
apiVersion: v1
kind: Service

View File

@@ -1,10 +0,0 @@
# Playground
Playground namespace is accessible to `Developers` AD group.
Novel log aggregator is being developer in this namespace:
```
kubectl create secret generic -n playground mongodb-application-readwrite-password --from-literal="password=$(cat /dev/urandom | base64 | head -c 30)"
kubectl create secret generic -n playground mongodb-application-readonly-password --from-literal="password=$(cat /dev/urandom | base64 | head -c 30)"
kubectl apply -n playground -f logging.yml -f mongodb-support.yml -f mongoexpress.yml -f networkpolicy-base.yml

View File

@@ -1,263 +0,0 @@
---
apiVersion: mongodbcommunity.mongodb.com/v1
kind: MongoDBCommunity
metadata:
name: mongodb
spec:
additionalMongodConfig:
systemLog:
quiet: true
members: 3
type: ReplicaSet
version: "5.0.13"
security:
authentication:
modes: ["SCRAM"]
users:
- name: readwrite
db: application
passwordSecretRef:
name: mongodb-application-readwrite-password
roles:
- name: readWrite
db: application
scramCredentialsSecretName: mongodb-application-readwrite
- name: readonly
db: application
passwordSecretRef:
name: mongodb-application-readonly-password
roles:
- name: readOnly
db: application
scramCredentialsSecretName: mongodb-application-readonly
statefulSet:
spec:
logLevel: WARN
template:
spec:
containers:
- name: mongod
resources:
requests:
cpu: 100m
memory: 2Gi
limits:
cpu: 2000m
memory: 2Gi
- name: mongodb-agent
resources:
requests:
cpu: 1m
memory: 100Mi
limits: {}
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- mongodb-svc
topologyKey: kubernetes.io/hostname
nodeSelector:
dedicated: monitoring
tolerations:
- key: dedicated
operator: Equal
value: monitoring
effect: NoSchedule
volumeClaimTemplates:
- metadata:
name: logs-volume
spec:
storageClassName: local-path
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 512Mi
- metadata:
name: data-volume
spec:
storageClassName: local-path
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: log-shipper
spec:
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 50%
selector:
matchLabels:
app: log-shipper
template:
metadata:
labels:
app: log-shipper
spec:
serviceAccountName: log-shipper
containers:
- name: log-shipper
image: harbor.k-space.ee/k-space/log-shipper
securityContext:
runAsUser: 0
env:
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: MONGODB_HOST
valueFrom:
secretKeyRef:
name: mongodb-application-readwrite
key: connectionString.standard
ports:
- containerPort: 8000
name: metrics
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 100Mi
volumeMounts:
- name: etcmachineid
mountPath: /etc/machine-id
readOnly: true
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
- name: varlog
mountPath: /var/log
readOnly: true
volumes:
- name: etcmachineid
hostPath:
path: /etc/machine-id
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: varlog
hostPath:
path: /var/log
tolerations:
- operator: "Exists"
effect: "NoExecute"
- operator: "Exists"
effect: "NoSchedule"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: logging-log-shipper
subjects:
- kind: ServiceAccount
name: log-shipper
namespace: playground
roleRef:
kind: ClusterRole
name: filebeat
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: log-shipper
labels:
app: log-shipper
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: log-shipper
spec:
podSelector:
matchLabels:
app: log-shipper
policyTypes:
- Ingress
- Egress
ingress:
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: prometheus-operator
podSelector:
matchLabels:
app.kubernetes.io/name: prometheus
egress:
- to:
- podSelector:
matchLabels:
app: mongodb-svc
ports:
- port: 27017
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: log-viewer-backend
spec:
podSelector:
matchLabels:
app: log-viewer-backend
policyTypes:
- Ingress
- Egress
egress:
- to:
- podSelector:
matchLabels:
app: mongodb-svc
ingress:
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: traefik
podSelector:
matchLabels:
app.kubernetes.io/name: traefik
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: log-viewer-frontend
spec:
podSelector:
matchLabels:
app: log-viewer-frontend
policyTypes:
- Ingress
- Egress
ingress:
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: traefik
podSelector:
matchLabels:
app.kubernetes.io/name: traefik
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: log-shipper
spec:
selector:
matchLabels:
app: log-shipper
podMetricsEndpoints:
- port: metrics

View File

@@ -1 +0,0 @@
../mongodb-operator/mongodb-support.yml

View File

@@ -1 +0,0 @@
../shared/mongoexpress.yml

View File

@@ -1 +0,0 @@
../shared/networkpolicy-base.yml

View File

@@ -399,6 +399,7 @@ kind: Ingress
metadata:
name: prometheus
annotations:
cert-manager.io/cluster-issuer: default
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
@@ -417,13 +418,15 @@ spec:
number: 9090
tls:
- hosts:
- "*.k-space.ee"
- prom.k-space.ee
secretName: prom-tls
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: alertmanager
annotations:
cert-manager.io/cluster-issuer: default
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
@@ -442,7 +445,8 @@ spec:
number: 9093
tls:
- hosts:
- "*.k-space.ee"
- am.k-space.ee
secretName: alertmanager-tls
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor

View File

@@ -156,7 +156,7 @@ metadata:
name: blackbox-exporter
spec:
revisionHistoryLimit: 0
replicas: 3
replicas: 2
selector:
matchLabels:
app: blackbox-exporter

View File

@@ -5,6 +5,5 @@ Calico implements the inter-pod overlay network
```
curl https://projectcalico.docs.tigera.io/manifests/tigera-operator.yaml -O
curl https://projectcalico.docs.tigera.io/manifests/custom-resources.yaml -O
kubectl apply -f custom-resources.yaml
kubectl replace -f tigera-operator.yaml
kubectl apply -f tigera-operator.yaml -f custom-resources.yaml
```

View File

@@ -0,0 +1,64 @@
#!/bin/bash
NAMESPACE=${NAMESPACE:-longhorn-system}
remove_and_wait() {
local crd=$1
out=`kubectl -n ${NAMESPACE} delete $crd --all 2>&1`
if [ $? -ne 0 ]; then
echo $out
return
fi
while true; do
out=`kubectl -n ${NAMESPACE} get $crd -o yaml | grep 'items: \[\]'`
if [ $? -eq 0 ]; then
break
fi
sleep 1
done
echo all $crd instances deleted
}
remove_crd_instances() {
remove_and_wait volumes.longhorn.rancher.io
# TODO: remove engines and replicas once we fix https://github.com/rancher/longhorn/issues/273
remove_and_wait engines.longhorn.rancher.io
remove_and_wait replicas.longhorn.rancher.io
remove_and_wait engineimages.longhorn.rancher.io
remove_and_wait settings.longhorn.rancher.io
# do this one last; manager crashes
remove_and_wait nodes.longhorn.rancher.io
}
# Delete driver related workloads in specific order
remove_driver() {
kubectl -n ${NAMESPACE} delete deployment.apps/longhorn-driver-deployer
kubectl -n ${NAMESPACE} delete daemonset.apps/longhorn-csi-plugin
kubectl -n ${NAMESPACE} delete statefulset.apps/csi-attacher
kubectl -n ${NAMESPACE} delete service/csi-attacher
kubectl -n ${NAMESPACE} delete statefulset.apps/csi-provisioner
kubectl -n ${NAMESPACE} delete service/csi-provisioner
kubectl -n ${NAMESPACE} delete daemonset.apps/longhorn-flexvolume-driver
}
# Delete all workloads in the namespace
remove_workloads() {
kubectl -n ${NAMESPACE} get daemonset.apps -o yaml | kubectl delete -f -
kubectl -n ${NAMESPACE} get deployment.apps -o yaml | kubectl delete -f -
kubectl -n ${NAMESPACE} get replicaset.apps -o yaml | kubectl delete -f -
kubectl -n ${NAMESPACE} get statefulset.apps -o yaml | kubectl delete -f -
kubectl -n ${NAMESPACE} get pods -o yaml | kubectl delete -f -
kubectl -n ${NAMESPACE} get service -o yaml | kubectl delete -f -
}
# Delete CRD definitions with longhorn.rancher.io in the name
remove_crds() {
for crd in $(kubectl get crd -o jsonpath={.items[*].metadata.name} | tr ' ' '\n' | grep longhorn.rancher.io); do
kubectl delete crd/$crd
done
}
remove_crd_instances
remove_driver
remove_workloads
remove_crds

View File

@@ -1,5 +1,5 @@
# This section includes base Calico installation configuration.
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.Installation
# For more information, see: https://projectcalico.docs.tigera.io/v3.23/reference/installation/api#operator.tigera.io/v1.Installation
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
@@ -10,7 +10,7 @@ spec:
# Note: The ipPools section cannot be modified post-install.
ipPools:
- blockSize: 26
cidr: 10.244.0.0/16
cidr: 192.168.0.0/16
encapsulation: VXLANCrossSubnet
natOutgoing: Enabled
nodeSelector: all()
@@ -18,7 +18,7 @@ spec:
---
# This section configures the Calico API server.
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.APIServer
# For more information, see: https://projectcalico.docs.tigera.io/v3.23/reference/installation/api#operator.tigera.io/v1.APIServer
apiVersion: operator.tigera.io/v1
kind: APIServer
metadata:

File diff suppressed because it is too large Load Diff

View File

@@ -64,16 +64,8 @@ spec:
number: 9000
tls:
- hosts:
- "*.k-space.ee"
secretName: wildcard-tls
---
apiVersion: traefik.containo.us/v1alpha1
kind: TLSStore
metadata:
name: default
spec:
defaultCertificate:
secretName: wildcard-tls
- traefik.k-space.ee
secretName: traefik-tls
---
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware

View File

@@ -1,34 +1,3 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: proxmox
spec:
entryPoints:
- https
routes:
- kind: Rule
match: Host(`pve.k-space.ee`)
priority: 10
middlewares:
- name: proxmox-redirect
- name: traefik-sso@kubernetescrd
- name: traefik-proxmox-redirect@kubernetescrd
services:
- kind: Service
name: pve1
passHostHeader: true
port: 8006
responseForwarding:
flushInterval: 1ms
scheme: https
serversTransport: proxmox-servers-transport
tls:
secretName: pve
domains:
- main: pve.k-space.ee
sans:
- "*.k-space.ee"
apiVersion: traefik.containo.us/v1alpha1
kind: ServersTransport
metadata:
@@ -87,6 +56,101 @@ data:
RWRmRHIzNTBpZkRCQkVuL3RvL3JUczFOVjhyOGpjcG14a2MzNjlSQXp3TmJiRVkKMVE9PQotLS0t
LUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
---
apiVersion: v1
kind: Service
metadata:
name: pve1
annotations:
traefik.ingress.kubernetes.io/service.serverstransport: traefik-proxmox-servers-transport@kubernetescrd
spec:
type: ExternalName
externalName: pve1.proxmox.infra.k-space.ee
ports:
- name: https
port: 8006
protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
name: pve8
annotations:
traefik.ingress.kubernetes.io/service.serverstransport: traefik-proxmox-servers-transport@kubernetescrd
spec:
type: ExternalName
externalName: pve8.proxmox.infra.k-space.ee
ports:
- name: https
port: 8006
protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
name: pve9
annotations:
traefik.ingress.kubernetes.io/service.serverstransport: traefik-proxmox-servers-transport@kubernetescrd
spec:
type: ExternalName
externalName: pve9.proxmox.infra.k-space.ee
ports:
- name: https
port: 8006
protocol: TCP
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: pve
annotations:
kubernetes.io/ingress.class: traefik
cert-manager.io/cluster-issuer: default
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.middlewares: traefik-sso@kubernetescrd,traefik-proxmox-redirect@kubernetescrd
traefik.ingress.kubernetes.io/router.tls: "true"
spec:
rules:
- host: proxmox.k-space.ee
http:
paths:
- pathType: Prefix
path: /
backend:
service:
name: whoami
port:
number: 80
- host: pve.k-space.ee
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: pve1
port:
number: 8006
- pathType: Prefix
path: "/"
backend:
service:
name: pve8
port:
number: 8006
- pathType: Prefix
path: "/"
backend:
service:
name: pve9
port:
number: 8006
tls:
- hosts:
- pve.k-space.ee
- proxmox.k-space.ee
secretName: pve-tls
---
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:

View File

@@ -1,36 +1,13 @@
image:
tag: "2.9"
tag: "2.8"
websecure:
tls:
enabled: true
providers:
kubernetesCRD:
enabled: true
namespaces:
- traefik
- authelia
kubernetesIngress:
allowEmptyServices: true
allowExternalNameServices: true
namespaces:
- argocd
- authelia
- camtiler
- drone
- elastic-system
- etherpad
- freescout
- grafana
- harbor
- kubernetes-dashboard
- logging
- longhorn-system
- phpmyadmin
- prometheus-operator
- wildduck
deployment:
replicas: 2

View File

@@ -17,6 +17,7 @@ metadata:
name: voron
annotations:
kubernetes.io/ingress.class: traefik
cert-manager.io/cluster-issuer: default
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.middlewares: traefik-sso@kubernetescrd
traefik.ingress.kubernetes.io/router.tls: "true"
@@ -35,4 +36,5 @@ spec:
name: http
tls:
- hosts:
- "*.k-space.ee"
- voron.k-space.ee
secretName: voron-tls

View File

@@ -41,6 +41,7 @@ kind: Ingress
metadata:
name: whoami
annotations:
cert-manager.io/cluster-issuer: default
external-dns.alpha.kubernetes.io/target: traefik.k-space.ee
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
@@ -49,7 +50,8 @@ metadata:
spec:
tls:
- hosts:
- "*.k-space.ee"
- "whoami.k-space.ee"
secretName: whoami-tls
rules:
- host: "whoami.k-space.ee"
http:

View File

@@ -104,6 +104,7 @@ metadata:
namespace: wildduck
annotations:
kubernetes.io/ingress.class: traefik
cert-manager.io/cluster-issuer: default
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.middlewares: traefik-sso@kubernetescrd
traefik.ingress.kubernetes.io/router.tls: "true"
@@ -122,7 +123,8 @@ spec:
number: 80
tls:
- hosts:
- "*.k-space.ee"
- webmail.k-space.ee
secretName: webmail-tls
---
apiVersion: codemowers.io/v1alpha1
kind: KeyDBCluster