diff --git a/harbor/README.md b/harbor/README.md index 829f9bc..4125b0d 100644 --- a/harbor/README.md +++ b/harbor/README.md @@ -1,8 +1,12 @@ Deploy with: ``` -kubectl create namespace harbor -kubectl apply -n harbor -f application.yml -f application-secrets.yml +kubectl create namespace harbor-operator +kubectl -n harbor-operator create secret generic harbor-minio-credentials --from-literal REGISTRY_STORAGE_S3_ACCESSKEY=...--from-literal=REGISTRY_STORAGE_S3_SECRETKEY=... +kubectl -n harbor-operator create secret generic harbor-postgres-password --from-literal password=... + +helm template -n harbor-operator --release-name harbor harbor/harbor --include-crds -f harbor/values.yaml > harbor/application.yml +kubectl apply -n harbor -f application.yml -f application-extras.yml ``` After deployment login with Harbor admin credentials and configure OIDC: diff --git a/harbor/application-extras.yml b/harbor/application-extras.yml index 22a24ce..a96159c 100644 --- a/harbor/application-extras.yml +++ b/harbor/application-extras.yml @@ -20,4 +20,38 @@ spec: - openid - profile pkce: false - \ No newline at end of file +--- +apiVersion: codemowers.cloud/v1beta1 +kind: MinioBucketClaim +metadata: + name: harbor + namespace: harbor-operator +spec: + capacity: 1Ti + class: external +--- +apiVersion: codemowers.cloud/v1beta1 +kind: SecretClaim +metadata: + name: dragonfly-auth +spec: + size: 32 + mapping: + - key: REDIS_PASSWORD + value: "%(plaintext)s" + - key: REDIS_URI + value: "redis://:%(plaintext)s@dragonfly" +--- +apiVersion: dragonflydb.io/v1alpha1 +kind: Dragonfly +metadata: + name: dragonfly +spec: + authentication: + passwordFromSecret: + key: REDIS_PASSWORD + name: dragonfly-auth + replicas: 3 + resources: + limits: + memory: 5Gi \ No newline at end of file diff --git a/harbor/application.yml b/harbor/application.yml index 7aa3e0e..394c72a 100644 --- a/harbor/application.yml +++ b/harbor/application.yml @@ -5,7 +5,15 @@ kind: ConfigMap metadata: name: harbor-core labels: - app: harbor + heritage: Helm + release: harbor + chart: harbor + app: "harbor" + app.kubernetes.io/instance: harbor + app.kubernetes.io/name: harbor + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: harbor + app.kubernetes.io/version: "2.11.0" data: app.conf: |+ appname = Harbor @@ -16,10 +24,10 @@ data: httpport = 8080 PORT: "8080" DATABASE_TYPE: "postgresql" - POSTGRESQL_HOST: harbor-database + POSTGRESQL_HOST: "172.20.43.1" POSTGRESQL_PORT: "5432" - POSTGRESQL_USERNAME: "postgres" - POSTGRESQL_DATABASE: "registry" + POSTGRESQL_USERNAME: "kspace_harbor" + POSTGRESQL_DATABASE: "kspace_harbor" POSTGRESQL_SSLMODE: "disable" POSTGRESQL_MAX_IDLE_CONNS: "100" POSTGRESQL_MAX_OPEN_CONNS: "900" @@ -28,30 +36,69 @@ data: JOBSERVICE_URL: "http://harbor-jobservice" REGISTRY_URL: "http://harbor-registry:5000" TOKEN_SERVICE_URL: "http://harbor-core:80/service/token" - WITH_NOTARY: "false" - NOTARY_URL: "http://harbor-notary-server:4443" CORE_LOCAL_URL: "http://127.0.0.1:8080" - WITH_TRIVY: "true" + WITH_TRIVY: "false" TRIVY_ADAPTER_URL: "http://harbor-trivy:8080" - REGISTRY_STORAGE_PROVIDER_NAME: "filesystem" - WITH_CHARTMUSEUM: "false" - LOG_LEVEL: "warning" + REGISTRY_STORAGE_PROVIDER_NAME: "s3" + LOG_LEVEL: "debug" CONFIG_PATH: "/etc/core/app.conf" CHART_CACHE_DRIVER: "redis" - _REDIS_URL_CORE: "redis://harbor-redis:6379/0?idle_timeout_seconds=30" - _REDIS_URL_REG: "redis://harbor-redis:6379/2?idle_timeout_seconds=30" + _REDIS_URL_CORE: "redis://:MvYcuU0RaIu1SX7fY1m1JrgLUSaZJjge@dragonfly:6379/0?idle_timeout_seconds=30" + _REDIS_URL_REG: "redis://:MvYcuU0RaIu1SX7fY1m1JrgLUSaZJjge@dragonfly:6379/2?idle_timeout_seconds=30" PORTAL_URL: "http://harbor-portal" REGISTRY_CONTROLLER_URL: "http://harbor-registry:8080" REGISTRY_CREDENTIAL_USERNAME: "harbor_registry_user" HTTP_PROXY: "" HTTPS_PROXY: "" NO_PROXY: "harbor-core,harbor-jobservice,harbor-database,harbor-registry,harbor-portal,harbor-trivy,harbor-exporter,127.0.0.1,localhost,.local,.internal" - PERMITTED_REGISTRY_TYPES_FOR_PROXY_CACHE: "docker-hub,harbor,azure-acr,aws-ecr,google-gcr,quay,docker-registry" + PERMITTED_REGISTRY_TYPES_FOR_PROXY_CACHE: "docker-hub,harbor,azure-acr,aws-ecr,google-gcr,quay,docker-registry,github-ghcr,jfrog-artifactory" METRIC_ENABLE: "true" METRIC_PATH: "/metrics" METRIC_PORT: "8001" METRIC_NAMESPACE: harbor METRIC_SUBSYSTEM: core + QUOTA_UPDATE_PROVIDER: "db" +--- +# Source: harbor/templates/exporter/exporter-cm-env.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: "harbor-exporter-env" + labels: + heritage: Helm + release: harbor + chart: harbor + app: "harbor" + app.kubernetes.io/instance: harbor + app.kubernetes.io/name: harbor + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: harbor + app.kubernetes.io/version: "2.11.0" +data: + HTTP_PROXY: "" + HTTPS_PROXY: "" + NO_PROXY: "harbor-core,harbor-jobservice,harbor-database,harbor-registry,harbor-portal,harbor-trivy,harbor-exporter,127.0.0.1,localhost,.local,.internal" + LOG_LEVEL: "debug" + HARBOR_EXPORTER_PORT: "8001" + HARBOR_EXPORTER_METRICS_PATH: "/metrics" + HARBOR_EXPORTER_METRICS_ENABLED: "true" + HARBOR_EXPORTER_CACHE_TIME: "23" + HARBOR_EXPORTER_CACHE_CLEAN_INTERVAL: "14400" + HARBOR_METRIC_NAMESPACE: harbor + HARBOR_METRIC_SUBSYSTEM: exporter + HARBOR_REDIS_URL: "redis://:MvYcuU0RaIu1SX7fY1m1JrgLUSaZJjge@dragonfly:6379/1" + HARBOR_REDIS_NAMESPACE: harbor_job_service_namespace + HARBOR_REDIS_TIMEOUT: "3600" + HARBOR_SERVICE_SCHEME: "http" + HARBOR_SERVICE_HOST: "harbor-core" + HARBOR_SERVICE_PORT: "80" + HARBOR_DATABASE_HOST: "172.20.43.1" + HARBOR_DATABASE_PORT: "5432" + HARBOR_DATABASE_USERNAME: "kspace_harbor" + HARBOR_DATABASE_DBNAME: "kspace_harbor" + HARBOR_DATABASE_SSLMODE: "disable" + HARBOR_DATABASE_MAX_IDLE_CONNS: "100" + HARBOR_DATABASE_MAX_OPEN_CONNS: "900" --- # Source: harbor/templates/jobservice/jobservice-cm-env.yaml apiVersion: v1 @@ -59,16 +106,29 @@ kind: ConfigMap metadata: name: "harbor-jobservice-env" labels: - app: harbor + heritage: Helm + release: harbor + chart: harbor + app: "harbor" + app.kubernetes.io/instance: harbor + app.kubernetes.io/name: harbor + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: harbor + app.kubernetes.io/version: "2.11.0" data: CORE_URL: "http://harbor-core:80" TOKEN_SERVICE_URL: "http://harbor-core:80/service/token" REGISTRY_URL: "http://harbor-registry:5000" REGISTRY_CONTROLLER_URL: "http://harbor-registry:8080" REGISTRY_CREDENTIAL_USERNAME: "harbor_registry_user" + + JOBSERVICE_WEBHOOK_JOB_MAX_RETRY: "3" + JOBSERVICE_WEBHOOK_JOB_HTTP_CLIENT_TIMEOUT: "3" HTTP_PROXY: "" HTTPS_PROXY: "" NO_PROXY: "harbor-core,harbor-jobservice,harbor-database,harbor-registry,harbor-portal,harbor-trivy,harbor-exporter,127.0.0.1,localhost,.local,.internal" + METRIC_NAMESPACE: harbor + METRIC_SUBSYSTEM: jobservice --- # Source: harbor/templates/jobservice/jobservice-cm.yaml apiVersion: v1 @@ -76,7 +136,15 @@ kind: ConfigMap metadata: name: "harbor-jobservice" labels: - app: harbor + heritage: Helm + release: harbor + chart: harbor + app: "harbor" + app.kubernetes.io/instance: harbor + app.kubernetes.io/name: harbor + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: harbor + app.kubernetes.io/version: "2.11.0" data: config.yml: |+ #Server listening port @@ -86,12 +154,12 @@ data: workers: 10 backend: "redis" redis_pool: - redis_url: "redis://harbor-redis:6379/1" + redis_url: "redis://:MvYcuU0RaIu1SX7fY1m1JrgLUSaZJjge@dragonfly:6379/1" namespace: "harbor_job_service_namespace" idle_timeout_second: 3600 job_loggers: - name: "FILE" - level: INFO + level: DEBUG settings: # Customized settings of logger base_dir: "/var/log/jobs" sweeper: @@ -99,13 +167,18 @@ data: settings: # Customized settings of sweeper work_dir: "/var/log/jobs" metric: - enabled: false + enabled: true path: /metrics port: 8001 #Loggers for the job service loggers: - name: "STD_OUTPUT" - level: INFO + level: DEBUG + reaper: + # the max time to wait for a task to finish, if unfinished after max_update_hours, the task will be mark as error, but the task will continue to run, default value is 24 + max_update_hours: 24 + # the max time for execution in running state without new task created + max_dangling_hours: 168 --- # Source: harbor/templates/portal/configmap.yaml apiVersion: v1 @@ -113,7 +186,15 @@ kind: ConfigMap metadata: name: "harbor-portal" labels: - app: harbor + heritage: Helm + release: harbor + chart: harbor + app: "harbor" + app.kubernetes.io/instance: harbor + app.kubernetes.io/name: harbor + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: harbor + app.kubernetes.io/version: "2.11.0" data: nginx.conf: |+ worker_processes auto; @@ -138,6 +219,9 @@ data: gzip_min_length 1000; gzip_proxied expired no-cache no-store private auth; gzip_types text/plain text/css application/json application/javascript application/x-javascript text/xml application/xml application/xml+rss text/javascript; + location /devcenter-api-2.0 { + try_files $uri $uri/ /swagger-ui-index.html; + } location / { try_files $uri $uri/ /index.html; } @@ -153,29 +237,43 @@ kind: ConfigMap metadata: name: "harbor-registry" labels: - app: harbor + heritage: Helm + release: harbor + chart: harbor + app: "harbor" + app.kubernetes.io/instance: harbor + app.kubernetes.io/name: harbor + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: harbor + app.kubernetes.io/version: "2.11.0" data: config.yml: |+ version: 0.1 log: - level: info + level: debug fields: service: registry storage: - filesystem: - rootdirectory: /storage + s3: + region: us-east-1 + bucket: harbor-operator-e60e5943-234a-496d-ae74-933f6a67c530 + regionendpoint: https://external.minio-clusters.k-space.ee cache: layerinfo: redis maintenance: uploadpurging: - enabled: false + enabled: true + age: 168h + interval: 24h + dryrun: false delete: enabled: true redirect: disable: false redis: - addr: harbor-redis:6379 + addr: dragonfly:6379 db: 2 + password: MvYcuU0RaIu1SX7fY1m1JrgLUSaZJjge readtimeout: 10s writetimeout: 10s dialtimeout: 10s @@ -189,7 +287,10 @@ data: # set via environment variable # secret: placeholder debug: - addr: localhost:5001 + addr: :8001 + prometheus: + enabled: true + path: /metrics auth: htpasswd: realm: harbor-registry-basic-realm @@ -203,7 +304,7 @@ data: --- protocol: "http" port: 8080 - log_level: info + log_level: debug registry_config: "/etc/registry/config.yml" --- # Source: harbor/templates/registry/registryctl-cm.yaml @@ -212,7 +313,15 @@ kind: ConfigMap metadata: name: "harbor-registryctl" labels: - app: harbor + heritage: Helm + release: harbor + chart: harbor + app: "harbor" + app.kubernetes.io/instance: harbor + app.kubernetes.io/name: harbor + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: harbor + app.kubernetes.io/version: "2.11.0" data: --- # Source: harbor/templates/jobservice/jobservice-pvc.yaml @@ -220,31 +329,26 @@ kind: PersistentVolumeClaim apiVersion: v1 metadata: name: harbor-jobservice + annotations: + helm.sh/resource-policy: keep labels: - app: harbor + heritage: Helm + release: harbor + chart: harbor + app: "harbor" + app.kubernetes.io/instance: harbor + app.kubernetes.io/name: harbor + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: harbor + app.kubernetes.io/version: "2.11.0" component: jobservice + app.kubernetes.io/component: jobservice spec: accessModes: - - ReadWriteOnce + - ReadWriteMany resources: requests: - storage: 1Gi - storageClassName: longhorn ---- -# Source: harbor/templates/registry/registry-pvc.yaml -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: harbor-registry - labels: - app: harbor - component: registry -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 30Gi + storage: 5Gi storageClassName: longhorn --- # Source: harbor/templates/core/core-svc.yaml @@ -253,7 +357,15 @@ kind: Service metadata: name: harbor-core labels: - app: harbor + heritage: Helm + release: harbor + chart: harbor + app: "harbor" + app.kubernetes.io/instance: harbor + app.kubernetes.io/name: harbor + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: harbor + app.kubernetes.io/version: "2.11.0" spec: ports: - name: http-web @@ -262,20 +374,33 @@ spec: - name: http-metrics port: 8001 selector: - app: harbor + release: harbor + app: "harbor" component: core --- -# Source: harbor/templates/database/database-svc.yaml +# Source: harbor/templates/exporter/exporter-svc.yaml apiVersion: v1 kind: Service metadata: - name: harbor-database + name: "harbor-exporter" + labels: + heritage: Helm + release: harbor + chart: harbor + app: "harbor" + app.kubernetes.io/instance: harbor + app.kubernetes.io/name: harbor + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: harbor + app.kubernetes.io/version: "2.11.0" spec: ports: - - port: 5432 + - name: http-metrics + port: 8001 selector: - app: harbor - component: database + release: harbor + app: "harbor" + component: exporter --- # Source: harbor/templates/jobservice/jobservice-svc.yaml apiVersion: v1 @@ -283,14 +408,25 @@ kind: Service metadata: name: "harbor-jobservice" labels: - app: harbor + heritage: Helm + release: harbor + chart: harbor + app: "harbor" + app.kubernetes.io/instance: harbor + app.kubernetes.io/name: harbor + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: harbor + app.kubernetes.io/version: "2.11.0" spec: ports: - name: http-jobservice port: 80 targetPort: 8080 + - name: http-metrics + port: 8001 selector: - app: harbor + release: harbor + app: "harbor" component: jobservice --- # Source: harbor/templates/portal/service.yaml @@ -299,50 +435,39 @@ kind: Service metadata: name: "harbor-portal" labels: - app: harbor + heritage: Helm + release: harbor + chart: harbor + app: "harbor" + app.kubernetes.io/instance: harbor + app.kubernetes.io/name: harbor + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: harbor + app.kubernetes.io/version: "2.11.0" spec: ports: - port: 80 targetPort: 8080 selector: - app: harbor + release: harbor + app: "harbor" component: portal --- -# Source: harbor/templates/redis/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: harbor-redis - labels: - app: harbor -spec: - ports: - - port: 6379 - selector: - app: harbor - component: redis ---- -# Source: harbor/templates/redis/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: k6-harbor-redis - labels: - app: harbor -spec: - ports: - - port: 6379 - selector: - app: harbor - component: redis ---- # Source: harbor/templates/registry/registry-svc.yaml apiVersion: v1 kind: Service metadata: name: "harbor-registry" labels: - app: harbor + heritage: Helm + release: harbor + chart: harbor + app: "harbor" + app.kubernetes.io/instance: harbor + app.kubernetes.io/name: harbor + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: harbor + app.kubernetes.io/version: "2.11.0" spec: ports: - name: http-registry @@ -350,44 +475,56 @@ spec: - name: http-controller port: 8080 + - name: http-metrics + port: 8001 selector: - app: harbor + release: harbor + app: "harbor" component: registry --- -# Source: harbor/templates/trivy/trivy-svc.yaml -apiVersion: v1 -kind: Service -metadata: - name: harbor-trivy -spec: - ports: - - name: http-trivy - protocol: TCP - port: 8080 - selector: - app: harbor - component: trivy ---- # Source: harbor/templates/core/core-dpl.yaml apiVersion: apps/v1 kind: Deployment metadata: name: harbor-core labels: - app: harbor + heritage: Helm + release: harbor + chart: harbor + app: "harbor" + app.kubernetes.io/instance: harbor + app.kubernetes.io/name: harbor + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: harbor + app.kubernetes.io/version: "2.11.0" component: core + app.kubernetes.io/component: core spec: - replicas: 3 + replicas: 1 + revisionHistoryLimit: 10 selector: matchLabels: - app: harbor + release: harbor + app: "harbor" component: core template: metadata: labels: - release: k6 - app: harbor + heritage: Helm + release: harbor + chart: harbor + app: "harbor" + app.kubernetes.io/instance: harbor + app.kubernetes.io/name: harbor + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: harbor + app.kubernetes.io/version: "2.11.0" component: core + app.kubernetes.io/component: core + annotations: + checksum/configmap: 9ea7f1881e4fe5b908355ee28e246b67c8c498d2f719dd74a5536a51ee2d9865 + checksum/secret: ca32f9ad83fe0ed3d8ebb51e01558e15a66ea80eb95dae096d00a19e25f2f239 + checksum/secret-jobservice: e54d9ef76e5cfb44adfcf233be3c39cc3f6d15ed61a36c4370fa5948c3192f38 spec: securityContext: runAsUser: 10000 @@ -396,7 +533,8 @@ spec: terminationGracePeriodSeconds: 120 containers: - name: core - image: mirror.gcr.io/goharbor/harbor-core:v2.4.2 + image: goharbor/harbor-core:v2.11.0 + imagePullPolicy: IfNotPresent startupProbe: httpGet: path: /api/v2.0/ping @@ -405,9 +543,16 @@ spec: failureThreshold: 360 initialDelaySeconds: 10 periodSeconds: 10 + livenessProbe: + httpGet: + path: /api/v2.0/ping + scheme: HTTP + port: 8080 + failureThreshold: 2 + periodSeconds: 10 readinessProbe: httpGet: - path: /api/v2.0/projects + path: /api/v2.0/ping scheme: HTTP port: 8080 failureThreshold: 2 @@ -426,8 +571,22 @@ spec: - name: JOBSERVICE_SECRET valueFrom: secretKeyRef: - name: "harbor-jobservice" + name: harbor-jobservice key: JOBSERVICE_SECRET + - name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + name: harbor-postgres-password + key: password + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault ports: - containerPort: 8080 volumeMounts: @@ -453,7 +612,7 @@ spec: path: app.conf - name: secret-key secret: - secretName: harbor-core + secretName: harbor-core-oidc-secret-encryption-key items: - key: secretKey path: key @@ -462,36 +621,148 @@ spec: secretName: harbor-core - name: ca-download secret: + secretName: "harbor-ingress" - name: psc emptyDir: {} - nodeSelector: - dedicated: storage - tolerations: - - key: dedicated - operator: Equal - value: storage - effect: NoSchedule +--- +# Source: harbor/templates/exporter/exporter-dpl.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: harbor-exporter + labels: + heritage: Helm + release: harbor + chart: harbor + app: "harbor" + app.kubernetes.io/instance: harbor + app.kubernetes.io/name: harbor + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: harbor + app.kubernetes.io/version: "2.11.0" + component: exporter + app.kubernetes.io/component: exporter +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + release: harbor + app: "harbor" + component: exporter + template: + metadata: + labels: + heritage: Helm + release: harbor + chart: harbor + app: "harbor" + app.kubernetes.io/instance: harbor + app.kubernetes.io/name: harbor + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: harbor + app.kubernetes.io/version: "2.11.0" + component: exporter + app.kubernetes.io/component: exporter + annotations: + checksum/configmap: 79f5dcd087513f8f1d03fca430907faeb9bd7df805dbb251b750fb49ccb0f0b5 + checksum/secret: 55bad27b07dca97c644c9977eb8c3da9c08c8b8bbda2854878d2936a8da28508 + spec: + securityContext: + runAsUser: 10000 + fsGroup: 10000 + automountServiceAccountToken: false + containers: + - name: exporter + image: goharbor/harbor-exporter:v2.11.0 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: / + port: 8001 + initialDelaySeconds: 300 + periodSeconds: 10 + readinessProbe: + httpGet: + path: / + port: 8001 + initialDelaySeconds: 30 + periodSeconds: 10 + args: ["-log-level", "debug"] + envFrom: + - configMapRef: + name: "harbor-exporter-env" + - secretRef: + name: "harbor-exporter" + env: + - name: HARBOR_DATABASE_PASSWORD + valueFrom: + secretKeyRef: + name: harbor-postgres-password + key: password + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + ports: + - containerPort: 8001 + volumeMounts: + volumes: + - name: config + secret: + secretName: "harbor-exporter" --- # Source: harbor/templates/jobservice/jobservice-dpl.yaml apiVersion: apps/v1 -kind: StatefulSet +kind: Deployment metadata: name: "harbor-jobservice" labels: - app: harbor + heritage: Helm + release: harbor + chart: harbor + app: "harbor" + app.kubernetes.io/instance: harbor + app.kubernetes.io/name: harbor + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: harbor + app.kubernetes.io/version: "2.11.0" component: jobservice + app.kubernetes.io/component: jobservice spec: replicas: 1 - serviceName: jobservice + revisionHistoryLimit: 10 + strategy: + type: RollingUpdate selector: matchLabels: - app: harbor + release: harbor + app: "harbor" component: jobservice template: metadata: labels: - app: harbor + heritage: Helm + release: harbor + chart: harbor + app: "harbor" + app.kubernetes.io/instance: harbor + app.kubernetes.io/name: harbor + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: harbor + app.kubernetes.io/version: "2.11.0" component: jobservice + app.kubernetes.io/component: jobservice + annotations: + checksum/configmap: 3a35bef831e58536bf86670117b43e2913a4c1a60d0e74d948559d7a7d564684 + checksum/configmap-env: 80e8b81abf755707210d6112ad65167a7d53088b209f63c603d308ef68c4cfad + checksum/secret: 8f842279ee68a874f9f099e41130fc9792a74bb594ac52eb5615587636988526 + checksum/secret-core: d210f333cfb703a98116fd88d154fb61ed81a81b4276f042496d53e622702c5c spec: securityContext: runAsUser: 10000 @@ -500,7 +771,15 @@ spec: terminationGracePeriodSeconds: 120 containers: - name: jobservice - image: mirror.gcr.io/goharbor/harbor-jobservice:v2.4.2 + image: goharbor/harbor-jobservice:v2.11.0 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /api/v1/stats + scheme: HTTP + port: 8080 + initialDelaySeconds: 300 + periodSeconds: 10 readinessProbe: httpGet: path: /api/v1/stats @@ -514,6 +793,15 @@ spec: secretKeyRef: name: harbor-core key: secret + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault envFrom: - configMapRef: name: "harbor-jobservice-env" @@ -535,13 +823,6 @@ spec: - name: job-logs persistentVolumeClaim: claimName: harbor-jobservice - nodeSelector: - dedicated: storage - tolerations: - - key: dedicated - operator: Equal - value: storage - effect: NoSchedule --- # Source: harbor/templates/portal/deployment.yaml apiVersion: apps/v1 @@ -549,21 +830,41 @@ kind: Deployment metadata: name: "harbor-portal" labels: - app: harbor + heritage: Helm + release: harbor + chart: harbor + app: "harbor" + app.kubernetes.io/instance: harbor + app.kubernetes.io/name: harbor + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: harbor + app.kubernetes.io/version: "2.11.0" component: portal + app.kubernetes.io/component: portal spec: replicas: 1 + revisionHistoryLimit: 10 selector: matchLabels: - app: harbor + release: harbor + app: "harbor" component: portal template: metadata: labels: - release: k6 - app: harbor + heritage: Helm + release: harbor + chart: harbor + app: "harbor" + app.kubernetes.io/instance: harbor + app.kubernetes.io/name: harbor + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: harbor + app.kubernetes.io/version: "2.11.0" component: portal + app.kubernetes.io/component: portal annotations: + checksum/configmap: d1b4818dc76aa5b382b435491e437f3c5f9795bf1fb019c82b003f75e7bc3d8f spec: securityContext: runAsUser: 10000 @@ -571,7 +872,24 @@ spec: automountServiceAccountToken: false containers: - name: portal - image: mirror.gcr.io/goharbor/harbor-portal:v2.4.2 + image: goharbor/harbor-portal:v2.11.0 + imagePullPolicy: IfNotPresent + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + livenessProbe: + httpGet: + path: / + scheme: HTTP + port: 8080 + initialDelaySeconds: 300 + periodSeconds: 10 readinessProbe: httpGet: path: / @@ -589,43 +907,71 @@ spec: - name: portal-config configMap: name: "harbor-portal" - nodeSelector: - dedicated: storage - tolerations: - - key: dedicated - operator: Equal - value: storage - effect: NoSchedule --- # Source: harbor/templates/registry/registry-dpl.yaml apiVersion: apps/v1 -kind: StatefulSet +kind: Deployment metadata: name: "harbor-registry" labels: - app: harbor + heritage: Helm + release: harbor + chart: harbor + app: "harbor" + app.kubernetes.io/instance: harbor + app.kubernetes.io/name: harbor + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: harbor + app.kubernetes.io/version: "2.11.0" component: registry + app.kubernetes.io/component: registry spec: replicas: 1 - serviceName: registry + revisionHistoryLimit: 10 + strategy: + type: RollingUpdate selector: matchLabels: - app: harbor + release: harbor + app: "harbor" component: registry template: metadata: labels: - app: harbor + heritage: Helm + release: harbor + chart: harbor + app: "harbor" + app.kubernetes.io/instance: harbor + app.kubernetes.io/name: harbor + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: harbor + app.kubernetes.io/version: "2.11.0" component: registry + app.kubernetes.io/component: registry + annotations: + checksum/configmap: b11f146e734a9ac7c3df9f83562e7ac5fea9e2b10b89118f19207c9b95104496 + checksum/secret: a441697613dc9da44a7147c0212aafcfb5e12cc4dfb7130b55851b6a502ceac6 + checksum/secret-jobservice: b316e2054db578411b0078450fe05fc52b227cead30b5981ed20c2c97f8c7d8b + checksum/secret-core: 2f3ad0d88e741a710ff8068d2f687b1873667bebb472ddb85726b87375a9e1c6 spec: securityContext: runAsUser: 10000 fsGroup: 10000 + fsGroupChangePolicy: OnRootMismatch automountServiceAccountToken: false terminationGracePeriodSeconds: 120 containers: - name: registry - image: mirror.gcr.io/goharbor/registry-photon:v2.4.2 + image: goharbor/registry-photon:v2.11.0 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: / + scheme: HTTP + port: 5000 + initialDelaySeconds: 300 + periodSeconds: 10 readinessProbe: httpGet: path: / @@ -633,14 +979,25 @@ spec: port: 5000 initialDelaySeconds: 1 periodSeconds: 10 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault args: ["serve", "/etc/registry/config.yml"] envFrom: - secretRef: name: "harbor-registry" + - secretRef: + name: harbor-minio-credentials env: ports: - containerPort: 5000 - - containerPort: 5001 + - containerPort: 8001 volumeMounts: - name: registry-data mountPath: /storage @@ -652,7 +1009,15 @@ spec: mountPath: /etc/registry/config.yml subPath: config.yml - name: registryctl - image: mirror.gcr.io/goharbor/harbor-registryctl:v2.4.2 + image: goharbor/harbor-registryctl:v2.11.0 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /api/health + scheme: HTTP + port: 8080 + initialDelaySeconds: 300 + periodSeconds: 10 readinessProbe: httpGet: path: /api/health @@ -660,6 +1025,15 @@ spec: port: 8080 initialDelaySeconds: 1 periodSeconds: 10 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault envFrom: - configMapRef: name: "harbor-registryctl" @@ -667,6 +1041,8 @@ spec: name: "harbor-registry" - secretRef: name: "harbor-registryctl" + - secretRef: + name: harbor-minio-credentials env: - name: CORE_SECRET valueFrom: @@ -694,6 +1070,7 @@ spec: - name: registry-htpasswd secret: secretName: harbor-registry-htpasswd + items: - key: REGISTRY_HTPASSWD path: passwd @@ -701,297 +1078,7 @@ spec: configMap: name: "harbor-registry" - name: registry-data - persistentVolumeClaim: - claimName: harbor-registry - nodeSelector: - dedicated: storage - tolerations: - - key: dedicated - operator: Equal - value: storage - effect: NoSchedule ---- -# Source: harbor/templates/database/database-ss.yaml -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: harbor-database - labels: - app: harbor - component: database -spec: - replicas: 1 - serviceName: harbor-database - selector: - matchLabels: - app: harbor - component: database - template: - metadata: - labels: - app: harbor - component: database - spec: - securityContext: - runAsUser: 999 - fsGroup: 999 - automountServiceAccountToken: false - terminationGracePeriodSeconds: 120 - initContainers: - # as we change the data directory to a sub folder to support psp, the init container here - # is used to migrate the existing data. See https://github.com/goharbor/harbor-helm/issues/756 - # for more detail. - # we may remove it after several releases - - name: "data-migrator" - image: mirror.gcr.io/goharbor/harbor-db:v2.4.2 - command: ["/bin/sh"] - args: ["-c", "[ -e /var/lib/postgresql/data/postgresql.conf ] && [ ! -d /var/lib/postgresql/data/pgdata ] && mkdir -m 0700 /var/lib/postgresql/data/pgdata && mv /var/lib/postgresql/data/* /var/lib/postgresql/data/pgdata/ || true"] - volumeMounts: - - name: database-data - mountPath: /var/lib/postgresql/data - subPath: - # with "fsGroup" set, each time a volume is mounted, Kubernetes must recursively chown() and chmod() all the files and directories inside the volume - # this causes the postgresql reports the "data directory /var/lib/postgresql/data/pgdata has group or world access" issue when using some CSIs e.g. Ceph - # use this init container to correct the permission - # as "fsGroup" applied before the init container running, the container has enough permission to execute the command - - name: "data-permissions-ensurer" - image: mirror.gcr.io/goharbor/harbor-db:v2.4.2 - command: ["/bin/sh"] - args: ["-c", "chmod -R 700 /var/lib/postgresql/data/pgdata || true"] - volumeMounts: - - name: database-data - mountPath: /var/lib/postgresql/data - subPath: - containers: - - name: database - image: mirror.gcr.io/goharbor/harbor-db:v2.4.2 - readinessProbe: - exec: - command: - - /docker-healthcheck.sh - initialDelaySeconds: 1 - periodSeconds: 10 - envFrom: - - secretRef: - name: harbor-database - env: - # put the data into a sub directory to avoid the permission issue in k8s with restricted psp enabled - # more detail refer to https://github.com/goharbor/harbor-helm/issues/756 - - name: PGDATA - value: "/var/lib/postgresql/data/pgdata" - volumeMounts: - - name: database-data - mountPath: /var/lib/postgresql/data - subPath: - - name: shm-volume - mountPath: /dev/shm - volumes: - - name: shm-volume - emptyDir: - medium: Memory - sizeLimit: 512Mi - nodeSelector: - dedicated: storage - tolerations: - - key: dedicated - operator: Equal - value: storage - effect: NoSchedule - volumeClaimTemplates: - - metadata: - name: "database-data" - labels: - app: harbor - spec: - accessModes: - - ReadWriteOnce - storageClassName: longhorn - resources: - requests: - storage: "1Gi" ---- -# Source: harbor/templates/redis/statefulset.yaml -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: harbor-redis - labels: - app: harbor - component: redis -spec: - replicas: 1 - serviceName: harbor-redis - selector: - matchLabels: - app: harbor - component: redis - template: - metadata: - labels: - app: harbor - component: redis - spec: - securityContext: - runAsUser: 999 - fsGroup: 999 - automountServiceAccountToken: false - terminationGracePeriodSeconds: 120 - containers: - - name: redis - image: mirror.gcr.io/goharbor/redis-photon:v2.4.2 - readinessProbe: - tcpSocket: - port: 6379 - initialDelaySeconds: 1 - periodSeconds: 10 - volumeMounts: - - name: data - mountPath: /var/lib/redis - subPath: - nodeSelector: - dedicated: storage - tolerations: - - key: dedicated - operator: Equal - value: storage - effect: NoSchedule - volumeClaimTemplates: - - metadata: - name: data - labels: - app: harbor - spec: - accessModes: - - ReadWriteOnce - storageClassName: longhorn - resources: - requests: - storage: "1Gi" ---- -# Source: harbor/templates/trivy/trivy-sts.yaml -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: harbor-trivy - labels: - app: harbor - component: trivy -spec: - replicas: 1 - serviceName: harbor-trivy - selector: - matchLabels: - app: harbor - component: trivy - template: - metadata: - labels: - app: harbor - component: trivy - spec: - securityContext: - runAsUser: 10000 - fsGroup: 10000 - automountServiceAccountToken: false - containers: - - name: trivy - image: mirror.gcr.io/goharbor/trivy-adapter-photon:v2.4.2 - imagePullPolicy: IfNotPresent - securityContext: - privileged: false - allowPrivilegeEscalation: false - env: - - name: HTTP_PROXY - value: "" - - name: HTTPS_PROXY - value: "" - - name: NO_PROXY - value: "harbor-core,harbor-jobservice,harbor-database,harbor-registry,harbor-portal,harbor-trivy,harbor-exporter,127.0.0.1,localhost,.local,.internal" - - name: "SCANNER_LOG_LEVEL" - value: "info" - - name: "SCANNER_TRIVY_CACHE_DIR" - value: "/home/scanner/.cache/trivy" - - name: "SCANNER_TRIVY_REPORTS_DIR" - value: "/home/scanner/.cache/reports" - - name: "SCANNER_TRIVY_DEBUG_MODE" - value: "false" - - name: "SCANNER_TRIVY_VULN_TYPE" - value: "os,library" - - name: "SCANNER_TRIVY_TIMEOUT" - value: "5m0s" - - name: "SCANNER_TRIVY_GITHUB_TOKEN" - valueFrom: - secretKeyRef: - name: harbor-trivy - key: gitHubToken - - name: "SCANNER_TRIVY_SEVERITY" - value: "UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL" - - name: "SCANNER_TRIVY_IGNORE_UNFIXED" - value: "false" - - name: "SCANNER_TRIVY_SKIP_UPDATE" - value: "false" - - name: "SCANNER_TRIVY_INSECURE" - value: "false" - - name: SCANNER_API_SERVER_ADDR - value: ":8080" - - name: "SCANNER_REDIS_URL" - valueFrom: - secretKeyRef: - name: harbor-trivy - key: redisURL - - name: "SCANNER_STORE_REDIS_URL" - valueFrom: - secretKeyRef: - name: harbor-trivy - key: redisURL - - name: "SCANNER_JOB_QUEUE_REDIS_URL" - valueFrom: - secretKeyRef: - name: harbor-trivy - key: redisURL - ports: - - name: api-server - containerPort: 8080 - volumeMounts: - - name: data - mountPath: /home/scanner/.cache - subPath: - readOnly: false - readinessProbe: - httpGet: - scheme: HTTP - path: /probe/ready - port: api-server - initialDelaySeconds: 5 - periodSeconds: 10 - successThreshold: 1 - failureThreshold: 3 - resources: - limits: - cpu: 1 - memory: 1Gi - requests: - cpu: 200m - memory: 512Mi - nodeSelector: - dedicated: storage - tolerations: - - key: dedicated - operator: Equal - value: storage - effect: NoSchedule - volumeClaimTemplates: - - metadata: - name: data - labels: - app: harbor - spec: - accessModes: - - ReadWriteOnce - storageClassName: longhorn - resources: - requests: - storage: "10Gi" + emptyDir: {} --- # Source: harbor/templates/ingress/ingress.yaml apiVersion: networking.k8s.io/v1 @@ -999,8 +1086,17 @@ kind: Ingress metadata: name: "harbor-ingress" labels: - app: harbor + heritage: Helm + release: harbor + chart: harbor + app: "harbor" + app.kubernetes.io/instance: harbor + app.kubernetes.io/name: harbor + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: harbor + app.kubernetes.io/version: "2.11.0" annotations: + cert-manager.io/cluster-issuer: default external-dns.alpha.kubernetes.io/target: traefik.k-space.ee ingress.kubernetes.io/proxy-body-size: "0" ingress.kubernetes.io/ssl-redirect: "true" @@ -1011,18 +1107,12 @@ metadata: traefik.ingress.kubernetes.io/router.tls: "true" spec: tls: - - hosts: - - "*.k-space.ee" + - secretName: harbor-ingress + hosts: + - harbor.k-space.ee rules: - http: paths: - - path: / - pathType: Prefix - backend: - service: - name: harbor-portal - port: - number: 80 - path: /api/ pathType: Prefix backend: @@ -1037,7 +1127,7 @@ spec: name: harbor-core port: number: 80 - - path: /v2 + - path: /v2/ pathType: Prefix backend: service: @@ -1058,4 +1148,36 @@ spec: name: harbor-core port: number: 80 + - path: / + pathType: Prefix + backend: + service: + name: harbor-portal + port: + number: 80 host: harbor.k-space.ee +--- +# Source: harbor/templates/metrics/metrics-svcmon.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: harbor + labels: + heritage: Helm + release: harbor + chart: harbor + app: "harbor" + app.kubernetes.io/instance: harbor + app.kubernetes.io/name: harbor + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: harbor + app.kubernetes.io/version: "2.11.0" +spec: + jobLabel: app.kubernetes.io/name + endpoints: + - port: http-metrics + honorLabels: true + selector: + matchLabels: + release: harbor + app: "harbor" diff --git a/harbor/values.yaml b/harbor/values.yaml new file mode 100644 index 0000000..1bea01d --- /dev/null +++ b/harbor/values.yaml @@ -0,0 +1,144 @@ +expose: + type: ingress + tls: + enabled: true + ingress: + hosts: + core: harbor.k-space.ee + annotations: + cert-manager.io/cluster-issuer: default + kubernetes.io/ingress.class: traefik + traefik.ingress.kubernetes.io/router.entrypoints: websecure + traefik.ingress.kubernetes.io/router.tls: "true" + external-dns.alpha.kubernetes.io/target: traefik.k-space.ee + labels: {} + +externalURL: https://harbor.k-space.ee + +# The persistence is enabled by default and a default StorageClass +# is needed in the k8s cluster to provision volumes dynamically. +# Specify another StorageClass in the "storageClass" or set "existingClaim" +# if you already have existing persistent volumes to use +# +# For storing images and charts, you can also use "azure", "gcs", "s3", +# "swift" or "oss". Set it in the "imageChartStorage" section +persistence: + enabled: true + # Define which storage backend is used for registry to store + # images and charts. Refer to + # https://github.com/distribution/distribution/blob/main/docs/content/about/configuration.md#storage + # for the detail. + persistentVolumeClaim: + jobservice: + jobLog: + existingClaim: "" + storageClass: "longhorn" + subPath: "" + accessMode: ReadWriteMany + size: 5Gi + annotations: {} + imageChartStorage: + # Specify whether to disable `redirect` for images and chart storage, for + # backends which not supported it (such as using minio for `s3` storage type), please disable + # it. To disable redirects, simply set `disableredirect` to `true` instead. + # Refer to + # https://github.com/distribution/distribution/blob/main/docs/configuration.md#redirect + # for the detail. + disableredirect: false + type: s3 + s3: + # Set an existing secret for S3 accesskey and secretkey + # keys in the secret should be REGISTRY_STORAGE_S3_ACCESSKEY and REGISTRY_STORAGE_S3_SECRETKEY for registry + existingSecret: "harbor-minio-credentials" + region: us-east-1 + bucket: harbor-operator-e60e5943-234a-496d-ae74-933f6a67c530 + #accesskey: awsaccesskey + #secretkey: awssecretkey + regionendpoint: https://external.minio-clusters.k-space.ee + #encrypt: false + #keyid: mykeyid + #secure: true + #skipverify: false + #v4auth: true + #chunksize: "5242880" + #rootdirectory: /s3/object/name/prefix + #storageclass: STANDARD + #multipartcopychunksize: "33554432" + #multipartcopymaxconcurrency: 100 + #multipartcopythresholdsize: "33554432" + + +# The initial password of Harbor admin. Change it from portal after launching Harbor +# or give an existing secret for it +# key in secret is given via (default to HARBOR_ADMIN_PASSWORD) +# existingSecretAdminPassword: +existingSecretAdminPasswordKey: HARBOR_ADMIN_PASSWORD + +# debug, info, warning, error or fatal +logLevel: debug + +# If using existingSecretSecretKey, the key must be secretKey +existingSecretSecretKey: "harbor-core-oidc-secret-encryption-key" + +# Run the migration job via helm hook +enableMigrateHelmHook: false + +metrics: + enabled: true + core: + path: /metrics + port: 8001 + registry: + path: /metrics + port: 8001 + jobservice: + path: /metrics + port: 8001 + exporter: + path: /metrics + port: 8001 + serviceMonitor: + enabled: true + additionalLabels: {} + # Scrape interval. If not set, the Prometheus default scrape interval is used. + interval: "" + # Metric relabel configs to apply to samples before ingestion. + metricRelabelings: + [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + # Relabel configs to apply to samples before ingestion. + relabelings: + [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + +trivy: + enabled: false + +database: + type: "external" + + external: + host: "172.20.43.1" + port: "5432" + username: "kspace_harbor" + coreDatabase: "kspace_harbor" + existingSecret: "harbor-postgres-password" + sslmode: "disable" + +redis: + type: external + external: + # support redis, redis+sentinel + # addr for redis: : + # addr for redis+sentinel: :,:,: + addr: "dragonfly:6379" + username: "" + password: "MvYcuU0RaIu1SX7fY1m1JrgLUSaZJjge"