Initial commit

This commit is contained in:
Lauri Võsandi 2022-11-14 21:08:45 +02:00
commit 34f3a878d9
24 changed files with 1978 additions and 0 deletions

2
.drone.yml Normal file
View File

@ -0,0 +1,2 @@
kind: template
load: docker-multiarch.yaml

1
.gitignore vendored Normal file
View File

@ -0,0 +1 @@
__pycache__

6
Chart.yaml Normal file
View File

@ -0,0 +1,6 @@
apiVersion: v2
name: harbor-operator
description: Helm chart for harbor-operator
type: application
version: 0.1.0
appVersion: 0.0.1

4
Dockerfile Normal file
View File

@ -0,0 +1,4 @@
FROM harbor.k-space.ee/k-space/microservice-base
RUN pip3 install kopf
ADD /app /app
ENTRYPOINT /app/harbor-operator.py

113
README.md Normal file
View File

@ -0,0 +1,113 @@
# harbor-operator
## Background
This operator is higly opinionated way to deploy Harbor in a Kubernetes cluster:
* Only one Harbor instance per Kubernetes cluster
* Nearly all components deployed in HA fashion
* Automates Harbor project creation via `ClusterHarborProject` CRD
* Create per user projects with quotas and password protection
* Create proxy cache projects with quotas and password protection
* Designed to work in conjunction with
[https://git.k-space.ee/k-space/sandbox-dashboard](sandbox-dashboard):
* Sandbox template repository contains `HarborCredential` definitions
* Sandbox dashboard adds `ClusterUser` resources when user logs in
* Automate push/pull credential provisioning using HarborCredential CRD-s,
to simplify working with Skaffold
* [WIP] Pod admission mutation webhook to rewrite Pod images to use
proxy caches defined via `ClusterHarborProject` definitions with `cache: true`.
## Instantiating Harbor projects
To instantiate proxy cache project:
```
---
apiVersion: codemowers.io/v1alpha1
kind: ClusterHarborRegistry
metadata:
name: quay.io
spec:
type: quay
endpoint: https://quay.io
---
apiVersion: codemowers.io/v1alpha1
kind: ClusterHarborRegistry
metadata:
name: docker.io
spec:
type: docker-hub
endpoint: https://docker.io
---
apiVersion: codemowers.io/v1alpha1
kind: ClusterHarborProject
metadata:
name: docker.io
spec:
cache: true
public: true
quota: 10737418240
---
apiVersion: codemowers.io/v1alpha1
kind: ClusterHarborProject
metadata:
name: quay.io
spec:
cache: true
public: true
quota: 10737418240
```
## Deploying push/pull secrets into namespaces
Once everything is running you can easily provision Harbor project
push and pull secrets into namespaces:
```
---
apiVersion: codemowers.io/v1alpha1
kind: HarborCredential
metadata:
name: kaniko
spec:
project: foobar
key: config.json
permissions:
- resource: repository
action: pull
- resource: tag
action: create
- resource: repository
action: push
---
apiVersion: codemowers.io/v1alpha1
kind: HarborCredential
metadata:
name: regcred
spec:
project: foobar
type: kubernetes.io/dockerconfigjson
key: .dockerconfigjson
permissions:
- resource: repository
action: pull
```
## Uninstalling
The finalizers will likely block deletion of resources:
```
for j in $(
kubectl get harborcredentials -o name;
kubectl get clusterharborprojectmembers -o name;
kubectl get clusterharborprojects -o name;
kubectl get clusterharborregistries -o name ); do
echo "Removing $j"
kubectl patch $j --type json --patch='[ { "op": "remove", "path": "/metadata/finalizers" } ]'
kubectl delete $j
done
```

221
app/harbor-operator.py Executable file
View File

@ -0,0 +1,221 @@
#!/usr/bin/env python3
import os
import kopf
from base64 import b64encode
from json import dumps
from kubernetes_asyncio.client.exceptions import ApiException
from kubernetes_asyncio import client, config
from sanic import Sanic
from sanic.response import json
from image_mutation import mutate_image
from harbor_wrapper import Harbor
harbor = Harbor(os.environ["HARBOR_URI"])
cached_registries = set()
app = Sanic("admission_control")
@app.post("/")
async def admission_control_handler(request):
patches = []
for index, container in enumerate(request.json["request"]["object"]["spec"]["containers"]):
patches.append({
"op": "replace",
"path": "/spec/containers/%d/image" % index,
"value": mutate_image(container["image"], harbor.hostname, cached_registries),
})
response = {
"apiVersion": "admission.k8s.io/v1",
"kind": "AdmissionReview",
"response": {
"uid": request.json["request"]["uid"],
"allowed": True,
"patchType": "JSONPatch",
"patch": b64encode(dumps(patches).encode("ascii")).decode("ascii")
}
}
return json(response)
@kopf.on.resume("harborcredentials")
@kopf.on.create("harborcredentials")
async def credentialCreation(name, namespace, body, **kwargs):
v1 = client.CoreV1Api()
project_name = body["spec"]["project"]
username = "harbor-operator_%s_%s" % (namespace, name)
try:
dockerconfig, username, password, robot_id = await harbor.create_robot_account(
project_name,
username,
body["spec"]["permissions"])
except Harbor.NoSuchProject:
raise kopf.TemporaryError("PROJECT_MISSING", delay=300)
except Harbor.RobotAccountAlreadyExists:
# We can't read the password to retry, so just let's fail gracefully
raise kopf.TemporaryError("ROBOT_ACCOUNT_ALREADY_EXISTS")
else:
data = {}
data[body["spec"]["key"]] = b64encode(dockerconfig.encode("ascii")).decode("ascii")
kwargs = {
"api_version": "v1",
"data": data,
"kind": "Secret",
"metadata": {
"name": body["metadata"]["name"]
}
}
if body["spec"].get("type"):
kwargs["type"] = body["spec"]["type"]
kopf.adopt(kwargs)
await v1.create_namespaced_secret(body["metadata"]["namespace"],
client.V1Secret(**kwargs))
return {"state": "READY", "id": robot_id, "project": project_name}
@kopf.on.delete("harborcredentials")
async def credential_deletion(name, namespace, body, **kwargs):
try:
project_name = body["status"]["credentialCreation"]["project"]
robot_id = body["status"]["credentialCreation"]["id"]
except KeyError:
pass
else:
await harbor.delete_robot_account(project_name, robot_id)
@kopf.on.resume("clusterharborprojects")
@kopf.on.create("clusterharborprojects")
async def projectCreation(name, namespace, body, **kwargs):
kwargs = {
"project_name": name,
"public": body["spec"]["public"],
"quota": body["spec"]["quota"],
}
if body["spec"]["cache"]:
api_instance = client.CustomObjectsApi()
try:
registry_spec = await api_instance.get_cluster_custom_object("codemowers.io",
"v1alpha1", "clusterharborregistries", name)
except ApiException as e:
if e.status == 404:
raise kopf.TemporaryError("NO_REGISTRY")
try:
registry_id = registry_spec["status"]["registryCreation"]["id"]
except KeyError:
raise kopf.TemporaryError("REGISTRY_NOT_READY")
kwargs["registry_id"] = registry_id
project = await harbor.create_project(**kwargs)
if body["spec"]["cache"]:
cached_registries.add(name)
return {"state": "READY", "id": project["project_id"]}
@kopf.on.delete("clusterharborprojects")
async def project_deletion(name, body, **kwargs):
cached_registries.discard(name)
try:
project_id = body["status"]["projectCreation"]["id"]
except KeyError:
pass
else:
await harbor.delete_project_by_id(project_id)
HARBOR_ROLES = {
"PROJECT_ADMIN": 1,
"DEVELOPER": 2,
"GUEST": 3,
"MAINTAINER": 4,
}
@kopf.on.resume("clusterharborprojectmembers")
@kopf.on.create("clusterharborprojectmembers")
async def memberCreation(name, namespace, body, **kwargs):
api_instance = client.CustomObjectsApi()
try:
project_spec = await api_instance.get_cluster_custom_object("codemowers.io",
"v1alpha1", "clusterharborprojects", body["spec"]["project"])
except ApiException as e:
if e.status == 404:
raise kopf.TemporaryError("NO_PROJECT")
try:
project_id = project_spec["status"]["projectCreation"]["id"]
except KeyError:
raise kopf.TemporaryError("PROJECT_NOT_READY")
try:
membership_id = await harbor.add_project_member(project_id,
body["spec"]["username"], HARBOR_ROLES[body["spec"]["role"]])
except Harbor.UserNotProvisioned:
# User has not logged in yet with OIDC and we don't have mechanism
# to provision OIDC user accounts either
raise kopf.TemporaryError("USER_NOT_PROVISIONED", delay=300)
return {"state": "READY", "id": membership_id, "project_id": project_id}
@kopf.on.delete("clusterharborprojectmembers")
async def member_deletion(name, body, **kwargs):
try:
membership_id = body["status"]["memberCreation"]["id"]
project_id = body["status"]["memberCreation"]["project_id"]
except KeyError:
membership_id = 0
if membership_id:
await harbor.delete_project_member(project_id, membership_id)
@kopf.on.resume("clusterharborregistries")
@kopf.on.create("clusterharborregistries")
async def registryCreation(name, body, **kwargs):
registry_id = await harbor.create_registry_endpoint(name,
body["spec"]["type"], body["spec"]["endpoint"])
return {"state": "READY", "id": registry_id}
@kopf.on.delete("clusterharborregistries")
async def registry_deletion(name, body, **kwargs):
await harbor.delete_registry_endpoint(body["status"]["registryCreation"]["id"])
@kopf.on.startup()
def configure(settings: kopf.OperatorSettings, **_):
settings.scanning.disabled = True
settings.posting.enabled = False
settings.persistence.finalizer = "harbor-operator"
print("Kopf operator starting up")
@app.listener("before_server_start")
async def setup_db(app, loop):
if os.getenv("KUBECONFIG"):
await config.load_kube_config()
else:
config.load_incluster_config()
app.ctx.cached_registries = set()
api_instance = client.CustomObjectsApi()
resp = await api_instance.list_cluster_custom_object("codemowers.io",
"v1alpha1", "clusterharborprojects")
for body in resp["items"]:
if not body["spec"]["cache"]:
continue
try:
project_id = body["status"]["projectCreation"]["id"]
except KeyError:
project_id = 0
if project_id:
cached_registries.add(body["metadata"]["name"])
print("Caching registries:", cached_registries)
app.add_task(kopf.operator(
clusterwide=True))
kwargs = {}
if os.path.exists("/tls"):
kwargs["ssl"] = {"key": "/tls/tls.key", "cert": "/tls/tls.crt"}
app.run(host="0.0.0.0", port=3001, single_process=True,
motd=False, **kwargs)

178
app/harbor_wrapper.py Normal file
View File

@ -0,0 +1,178 @@
import aiohttp
import re
from base64 import b64encode
from json import dumps
from urllib.parse import urlsplit
class Harbor(object):
class Error(Exception):
pass
class NoSuchProject(Error):
pass
class RobotAccountAlreadyExists(Error):
pass
class UserAlreadyMember(Error):
pass
class UserNotProvisioned(Error):
pass
def __init__(self, base_url):
self.base_url = base_url
self.hostname = urlsplit(base_url).hostname
async def delete_registry_endpoint(self, registry_id):
async with aiohttp.ClientSession() as session:
await session.request(
"DELETE", "%s/api/v2.0/registries/%d" % (self.base_url, registry_id))
async def create_registry_endpoint(self, reg_name, reg_type, reg_url):
body = {
"credential": {
"access_key": "",
"access_secret": "",
"type": "basic"
},
"description": "",
"name": reg_name,
"type": reg_type,
}
if reg_url:
body["url"] = reg_url
body["insecure"] = False
async with aiohttp.ClientSession() as session:
resp = await session.request(
"POST", "%s/api/v2.0/registries" % self.base_url, json=body)
if resp.status not in (201, 409):
raise self.Error("Unexpected status code %d for "
"registry endpoint creation" % resp.status)
async with aiohttp.ClientSession() as session:
resp = await session.request(
"GET", "%s/api/v2.0/registries" % self.base_url)
if resp.status not in (200, 409):
raise self.Error("Unexpected status code %d for "
"registry endpoint lookup" % resp.status)
registries = await resp.json()
for registry in registries:
if registry["name"] == reg_name:
return registry["id"]
raise self.Error("Failed to lookup registry endpoint %s" %
repr(reg_name))
async def get_project(self, project_name):
async with aiohttp.ClientSession() as session:
resp = await session.request(
"GET", "%s/api/v2.0/projects/%s" % (self.base_url, project_name))
if resp.status == 200:
return await resp.json()
elif resp.status == 404:
return None
elif resp.status == 403: # TODO: ??
return None
else:
raise self.Error("Unexpected status code %d for "
"project lookup" % resp.status)
async def delete_project_by_id(self, project_id):
async with aiohttp.ClientSession() as session:
await session.request(
"DELETE", "%s/api/v2.0/projects/%d" % (self.base_url, project_id))
async def delete_project_by_name(self, project_name):
async with aiohttp.ClientSession() as session:
await session.request(
"DELETE", "%s/api/v2.0/projects/%s" % (self.base_url, project_name))
# TODO: Check status code
async def delete_project_member(self, project_id, membership_id):
async with aiohttp.ClientSession() as session:
await session.request(
"DELETE", "%s/api/v2.0/projects/%d/members/%d" % (self.base_url, project_id, membership_id))
# TODO: Check status code
async def delete_robot_account(self, project_name, membership_id):
async with aiohttp.ClientSession() as session:
await session.request(
"DELETE", "%s/api/v2.0/projects/%s/robots/%d" % (self.base_url, project_name, membership_id))
# TODO: Check status code
async def create_project(self, project_name, public, quota, registry_id=None):
async with aiohttp.ClientSession() as session:
resp = await session.request(
"POST", "%s/api/v2.0/projects" % self.base_url, json={
"metadata": {
"public": str(public).lower()
},
"project_name": project_name,
"storage_limit": quota,
"registry_id": registry_id
})
if resp.status not in (201, 409):
raise self.Error("Unexpected status code %d for project "
"creation" % resp.status)
return await self.get_project(project_name)
async def add_project_member(self, project_id, username, role_id):
async with aiohttp.ClientSession() as session:
response = await session.post(
"%s/api/v2.0/projects/%d/members" % (self.base_url, project_id),
json={
"role_id": role_id,
"member_user": {
"username": username
}
}
)
if response.status == 201:
m = re.search("/members/([0-9]+)$", response.headers["Location"])
return int(m.groups()[0])
elif response.status == 409:
return 0
elif response.status == 404:
raise self.UserNotProvisioned(username)
raise self.Error("Got unexpected response from Harbor: %s" % response.status)
async def create_robot_account(self, project_name, account_name, permissions):
async with aiohttp.ClientSession() as session:
response = await session.post(
"%s/api/v2.0/robots" % self.base_url,
json={
"name": account_name,
"duration": -1,
"description": "Robot account created by harbor-operator",
"disable": False,
"level": "project",
"permissions": [{
"namespace": project_name,
"kind": "project",
"access": permissions
}]
}
)
if response.status == 201:
response_json = await response.json()
auth = response_json["name"].encode("ascii"), \
response_json["secret"].encode("ascii")
auths = {}
auths[self.hostname] = {
"auth": b64encode(b"%s:%s" % auth).decode("ascii")
}
dockerconfig = dumps({
"auths": auths
})
m = re.search("/robots/([0-9]+)$", response.headers["Location"])
robot_id = int(m.groups()[0])
return dockerconfig, response_json["name"], response_json["secret"], robot_id
elif response.status == 409:
raise self.RobotAccountAlreadyExists()
elif response.status == 403:
raise self.NoSuchProject(project_name)
raise self.Error("Got unexpected response from Harbor: %s" % response.status)

45
app/image_mutation.py Normal file
View File

@ -0,0 +1,45 @@
import re
RE_IMAGE = re.compile("^((?:(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])"
"(?:(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?"
"(?::[0-9]+)?/)?[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?"
"(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?)"
"(?::([\\w][\\w.-]{0,127}))?(?:@([A-Za-z][A-Za-z0-9]*"
"(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}))?$")
def parse_image(foo):
m = RE_IMAGE.match(foo)
if not m:
raise ValueError("%s does not match Docker image regex" % repr(foo))
image, tag, digest = m.groups()
try:
org, image = foo.rsplit("/", 1)
except ValueError:
org = "library"
try:
registry, org = org.rsplit("/", 1)
except ValueError:
registry = "docker.io"
if "/" in registry:
raise ValueError("Won't allow caching Docker registry in image name")
return registry, org, image, tag, digest
def mutate_image(foo, hostname, cached_registries):
registry, org, image, tag, digest = parse_image(foo)
j = "%s/%s/%s" % (registry, org, image)
if tag:
j = "%s:%s" % (j, tag)
if digest:
# TODO: Test this
j = "%s@%s" % (j, digest)
if registry in cached_registries:
j = "%s/%s" % (hostname, j)
return j
assert mutate_image("mongo:latest", "harbor.k-space.ee", ("docker.io")) == "harbor.k-space.ee/docker.io/library/mongo:latest"
assert mutate_image("mongo", "harbor.k-space.ee", ("docker.io")) == "harbor.k-space.ee/docker.io/library/mongo"
assert mutate_image("library/mongo", "harbor.k-space.ee", ("docker.io")) == "harbor.k-space.ee/docker.io/library/mongo"
assert mutate_image("docker.io/library/mongo", "harbor.k-space.ee", ("docker.io")) == "harbor.k-space.ee/docker.io/library/mongo"

View File

@ -0,0 +1,76 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clusterharborprojectmembers.codemowers.io
spec:
group: codemowers.io
names:
plural: clusterharborprojectmembers
singular: clusterharborprojectmember
kind: ClusterHarborProjectMember
shortNames:
- clusterharborprojectmember
scope: Cluster
versions:
- name: v1alpha1
served: true
storage: true
subresources:
status: {}
additionalPrinterColumns:
- name: State
jsonPath: .status.memberCreation.state
type: string
- name: Errors
jsonPath: .status.kopf.progress.memberCreation.message
type: string
- name: Membership ID
jsonPath: .status.memberCreation.id
type: integer
- name: Project
jsonPath: .spec.project
type: string
- name: Username
jsonPath: .spec.username
type: string
- name: Role
jsonPath: .spec.role
type: string
schema:
openAPIV3Schema:
type: object
required:
- spec
properties:
status:
type: object
x-kubernetes-preserve-unknown-fields: true
properties:
memberCreation:
type: object
properties:
id:
type: integer
project_id:
type: integer
state:
type: string
spec:
type: object
required:
- project
- username
- role
properties:
project:
type: string
username:
type: string
role:
type: string
enum:
- PROJECT_ADMIN
- DEVELOPER
- GUEST
- MAINTAINER

View File

@ -0,0 +1,71 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clusterharborprojects.codemowers.io
spec:
group: codemowers.io
names:
plural: clusterharborprojects
singular: clusterharborproject
kind: ClusterHarborProject
shortNames:
- clusterharborproject
scope: Cluster
versions:
- name: v1alpha1
served: true
storage: true
subresources:
status: {}
additionalPrinterColumns:
- name: Status
jsonPath: .status.projectCreation.state
type: string
- name: Errors
jsonPath: .status.kopf.progress.projectCreation.message
type: string
- name: Project ID
jsonPath: .status.projectCreation.id
type: integer
- name: Quota
jsonPath: .spec.quota
type: integer
- name: Public
jsonPath: .spec.public
type: boolean
- name: Cache
jsonPath: .spec.cache
type: boolean
schema:
openAPIV3Schema:
type: object
properties:
status:
type: object
x-kubernetes-preserve-unknown-fields: true
properties:
projectCreation:
type: object
properties:
id:
type: integer
state:
type: string
spec:
type: object
properties:
public:
type: boolean
default: false
description: Whether this project is publicly readable
cache:
type: boolean
default: false
description: Whether this project enables caching from
upstream Docker registry
quota:
type: integer
default: 2147483648
required:
- spec

View File

@ -0,0 +1,72 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clusterharborregistries.codemowers.io
spec:
group: codemowers.io
names:
plural: clusterharborregistries
singular: clusterharborregistry
kind: ClusterHarborRegistry
shortNames:
- clusterharborregistry
scope: Cluster
versions:
- name: v1alpha1
served: true
storage: true
subresources:
status: {}
additionalPrinterColumns:
- name: Status
jsonPath: .status.registryCreation.state
type: string
- name: Errors
jsonPath: .status.kopf.progress.registryCreation.message
type: string
- name: Registry ID
jsonPath: .status.registryCreation.id
type: string
- name: Type
jsonPath: .spec.type
type: string
- name: Endpoint
jsonPath: .spec.endpoint
type: string
schema:
openAPIV3Schema:
type: object
required:
- spec
properties:
status:
type: object
x-kubernetes-preserve-unknown-fields: true
properties:
registryCreation:
type: object
properties:
id:
type: integer
state:
type: string
spec:
type: object
required:
- type
properties:
type:
type: string
enum:
- azure-acr
- aws-ecr
- docker-hub
- docker-registry
- google-gcr
- harbor
- quay
description: Registry provider
endpoint:
type: string
description: Registry endpoint

View File

@ -0,0 +1,77 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: harborcredentials.codemowers.io
spec:
group: codemowers.io
names:
plural: harborcredentials
singular: harborcredential
kind: HarborCredential
shortNames:
- harborcredential
scope: Namespaced
versions:
- name: v1alpha1
served: true
storage: true
subresources:
status: {}
additionalPrinterColumns:
- name: State
jsonPath: .status.credentialCreation.state
type: string
- name: Errors
jsonPath: .status.kopf.progress.credentialCreation.message
type: string
- name: Robot ID
jsonPath: .status.credentialCreation.id
type: integer
- name: Project
jsonPath: .spec.project
type: string
schema:
openAPIV3Schema:
type: object
required:
- spec
properties:
status:
type: object
x-kubernetes-preserve-unknown-fields: true
properties:
credentialCreation:
type: object
properties:
id:
type: integer
project:
type: string
state:
type: string
spec:
type: object
required:
- key
- project
- permissions
properties:
type:
type: string
key:
type: string
project:
type: string
permissions:
type: array
items:
type: object
required:
- resource
- action
properties:
resource:
type: string
action:
type: string

View File

@ -0,0 +1,55 @@
{{ if .Values.admissionController }}
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: admission-control
spec:
secretName: admission-control
dnsNames:
- admission-control.harbor-operator.svc
issuerRef:
name: harbor-operator
---
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
name: harbor-operator-admission-control
annotations:
cert-manager.io/inject-ca-from: harbor-operator/admission-control
webhooks:
- name: harbor-operator-admission-control.codemowers.io
rules:
- apiGroups:
- ""
apiVersions:
- v1
operations:
- CREATE
resources:
- pods
scope: Namespaced
clientConfig:
service:
namespace: harbor-operator
name: admission-control
admissionReviewVersions:
- v1
sideEffects: None
timeoutSeconds: 30
failurePolicy: Ignore
---
apiVersion: v1
kind: Service
metadata:
name: admission-control
labels:
app.kubernetes.io/name: harbor-operator
spec:
selector:
app.kubernetes.io/name: harbor-operator
ports:
- name: http
targetPort: 3001
port: 443
{{ end }}

264
templates/harbor-core.yaml Normal file
View File

@ -0,0 +1,264 @@
---
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: harbor-operator
spec:
selfSigned: {}
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: harbor-core
spec:
secretName: harbor-core-key
dnsNames:
- harbor-core.harbor-operator.svc
issuerRef:
name: harbor-operator
---
apiVersion: codemowers.io/v1alpha1
kind: GeneratedSecret
metadata:
name: harbor-admin-secrets
spec:
mapping:
- key: HARBOR_ADMIN_PASSWORD
value: "%(password)s"
- key: HARBOR_URI
value: "https://admin:%(password)s@{{ .Values.ingress.host }}"
---
apiVersion: codemowers.io/v1alpha1
kind: GeneratedSecret
metadata:
name: harbor-core-secret
spec:
mapping:
- key: CORE_SECRET
value: "%(password)s"
---
apiVersion: codemowers.io/v1alpha1
kind: GeneratedSecret
metadata:
name: harbor-core-oidc-secret-encryption-key
spec:
size: 32
mapping:
- key: secretKey
value: "%(password)s"
---
apiVersion: codemowers.io/v1alpha1
kind: GeneratedSecret
metadata:
name: harbor-core-csrf-key
spec:
size: 32
mapping:
- key: CSRF_KEY
value: "%(password)s"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: harbor-core
labels:
app: harbor
data:
app.conf: |+
appname = Harbor
runmode = prod
enablegzip = true
[prod]
httpport = 8080
PORT: "8080"
POSTGRESQL_MAX_IDLE_CONNS: "100"
POSTGRESQL_MAX_OPEN_CONNS: "900"
EXT_ENDPOINT: "https://{{ .Values.ingress.host }}"
CORE_URL: "http://harbor-core:80"
JOBSERVICE_URL: "http://harbor-jobservice"
REGISTRY_URL: "http://harbor-registry:5000"
TOKEN_SERVICE_URL: "http://harbor-core:80/service/token"
CORE_LOCAL_URL: "http://127.0.0.1:8080"
REGISTRY_STORAGE_PROVIDER_NAME: "filesystem"
LOG_LEVEL: "info"
CONFIG_PATH: "/etc/core/app.conf"
CHART_CACHE_DRIVER: "redis"
PORTAL_URL: "http://harbor-portal"
REGISTRY_CONTROLLER_URL: "http://harbor-registry:8080"
PERMITTED_REGISTRY_TYPES_FOR_PROXY_CACHE: "docker-hub,harbor,azure-acr,aws-ecr,google-gcr,quay,docker-registry"
METRIC_ENABLE: "true"
METRIC_PATH: "/metrics"
METRIC_PORT: "8001"
METRIC_NAMESPACE: harbor
METRIC_SUBSYSTEM: core
---
apiVersion: v1
kind: Service
metadata:
name: harbor-core
labels:
app: harbor
spec:
ports:
- name: http
port: 80
targetPort: 8080
selector:
app: harbor
component: core
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: harbor-core
labels:
app: harbor
component: core
spec:
replicas: 2
revisionHistoryLimit: 0
selector:
matchLabels: &selectorLabels
app: harbor
component: core
template:
metadata:
labels: *selectorLabels
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/arch
operator: In
values:
- amd64
securityContext:
runAsUser: 10000
fsGroup: 10000
automountServiceAccountToken: false
terminationGracePeriodSeconds: 120
containers:
- name: core
image: "{{ .Values.image.repository }}/harbor-core:{{ .Values.image.tag }}"
startupProbe:
httpGet:
path: /api/v2.0/ping
scheme: HTTP
port: 8080
failureThreshold: 360
initialDelaySeconds: 10
periodSeconds: 10
readinessProbe:
httpGet:
path: /api/v2.0/ping
scheme: HTTP
port: 8080
failureThreshold: 2
periodSeconds: 10
envFrom:
- configMapRef:
name: harbor-core
env:
- name: REGISTRY_CREDENTIAL_USERNAME
value: harbor_registry_user
- name: REGISTRY_CREDENTIAL_PASSWORD
valueFrom:
secretKeyRef:
name: harbor-registry-credentials
key: REGISTRY_CREDENTIAL_PASSWORD
- name: DATABASE_TYPE
value: postgresql
- name: POSTGRESQL_SSLMODE
value: require
- name: CSRF_KEY
valueFrom:
secretKeyRef:
name: harbor-core-csrf-key
key: CSRF_KEY
- name: HARBOR_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: harbor-admin-secrets
key: HARBOR_ADMIN_PASSWORD
- name: POSTGRESQL_HOST
valueFrom:
secretKeyRef:
name: harbor-pguser-harbor
key: host
- name: POSTGRESQL_PORT
valueFrom:
secretKeyRef:
name: harbor-pguser-harbor
key: port
- name: POSTGRESQL_DATABASE
valueFrom:
secretKeyRef:
name: harbor-pguser-harbor
key: dbname
- name: POSTGRESQL_USERNAME
valueFrom:
secretKeyRef:
name: harbor-pguser-harbor
key: user
- name: POSTGRESQL_PASSWORD
valueFrom:
secretKeyRef:
name: harbor-pguser-harbor
key: password
- name: _REDIS_URL_CORE
valueFrom:
secretKeyRef:
name: harbor-core-redis-secrets
key: REDIS_URI
- name: _REDIS_URL_REG
valueFrom:
secretKeyRef:
name: harbor-registry-redis-secrets
key: REDIS_URI
- name: CORE_SECRET
valueFrom:
secretKeyRef:
name: harbor-core-secret
key: CORE_SECRET
- name: JOBSERVICE_SECRET
valueFrom:
secretKeyRef:
name: harbor-jobservice
key: JOBSERVICE_SECRET
ports:
- containerPort: 8080
name: http
- containerPort: 8001
name: metrics
volumeMounts:
- name: config
mountPath: /etc/core/app.conf
subPath: app.conf
- name: secret-key
mountPath: /etc/core/key
subPath: key
- name: token-service-private-key
mountPath: /etc/core/private_key.pem
subPath: tls.key
- name: psc
mountPath: /etc/core/token
volumes:
- name: config
configMap:
name: harbor-core
items:
- key: app.conf
path: app.conf
- name: secret-key
secret:
secretName: harbor-core-oidc-secret-encryption-key
items:
- key: secretKey
path: key
- name: token-service-private-key
secret:
secretName: harbor-core-key
- name: psc
emptyDir: {}

View File

@ -0,0 +1,155 @@
---
apiVersion: codemowers.io/v1alpha1
kind: GeneratedSecret
metadata:
name: harbor-jobservice
spec:
mapping:
- key: JOBSERVICE_SECRET
value: "%(password)s"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: harbor-jobservice-env
labels:
app: harbor
data:
CORE_URL: "http://harbor-core:80"
TOKEN_SERVICE_URL: "http://harbor-core:80/service/token"
REGISTRY_URL: "http://harbor-registry:5000"
REGISTRY_CONTROLLER_URL: "http://harbor-registry:8080"
METRIC_NAMESPACE: harbor
METRIC_SUBSYSTEM: jobservice
---
apiVersion: v1
kind: ConfigMap
metadata:
name: harbor-jobservice
labels:
app: harbor
data:
config.yml: |+
protocol: "http"
port: 8080
worker_pool:
workers: 1
backend: "redis"
redis_pool:
redis_url: "redis://harbor-jobservice-redis:6379/0"
namespace: "harbor_job_service_namespace"
idle_timeout_second: 3600
job_loggers:
- name: "STD_OUTPUT"
level: INFO
metric:
enabled: true
path: /metrics
port: 8001
loggers:
- name: "STD_OUTPUT"
level: INFO
---
apiVersion: v1
kind: Service
metadata:
name: harbor-jobservice
spec:
ports:
- name: http
port: 80
targetPort: 8080
selector:
app: harbor
component: jobservice
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: harbor-jobservice
labels:
app: harbor
component: jobservice
spec:
replicas: 3
revisionHistoryLimit: 0
selector:
matchLabels: &selectorLabels
app: harbor
component: jobservice
template:
metadata:
labels: *selectorLabels
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/arch
operator: In
values:
- amd64
securityContext:
runAsUser: 10000
fsGroup: 10000
automountServiceAccountToken: false
terminationGracePeriodSeconds: 120
containers:
- name: jobservice
image: "{{ .Values.image.repository }}/harbor-jobservice:{{ .Values.image.tag }}"
readinessProbe:
httpGet:
path: /api/v1/stats
scheme: HTTP
port: 8080
initialDelaySeconds: 20
periodSeconds: 10
env:
- name: REGISTRY_CREDENTIAL_USERNAME
value: harbor_registry_user
- name: REGISTRY_CREDENTIAL_PASSWORD
valueFrom:
secretKeyRef:
name: harbor-registry-credentials
key: REGISTRY_CREDENTIAL_PASSWORD
- name: JOBSERVICE_SECRET
valueFrom:
secretKeyRef:
name: harbor-jobservice
key: JOBSERVICE_SECRET
- name: JOB_SERVICE_POOL_REDIS_URL
valueFrom:
secretKeyRef:
name: harbor-jobservice-redis-secrets
key: REDIS_URI
- name: CORE_SECRET
valueFrom:
secretKeyRef:
name: harbor-core-secret
key: CORE_SECRET
- name: _REDIS_URL_CORE
valueFrom:
secretKeyRef:
name: harbor-core-redis-secrets
key: REDIS_URI
envFrom:
- configMapRef:
name: harbor-jobservice-env
ports:
- containerPort: 8080
name: http
- containerPort: 8001
name: metrics
volumeMounts:
- name: jobservice-config
mountPath: /etc/jobservice/config.yml
subPath: config.yml
- name: job-scandata-exports
mountPath: /var/scandata_exports
volumes:
- name: jobservice-config
configMap:
name: harbor-jobservice
- name: job-scandata-exports
emptyDir: {}

View File

@ -0,0 +1,129 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: harbor-operator
labels:
app.kubernetes.io/name: harbor-operator
spec:
replicas: 1
selector:
matchLabels: &selectorLabels
app.kubernetes.io/name: harbor-operator
template:
metadata:
labels: *selectorLabels
spec:
serviceAccountName: harbor-operator
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: {{ .Values.topologyKey }}
labelSelector:
matchLabels: *selectorLabels
containers:
- name: harbor-operator
image: codemowers/harbor-operator:latest
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
env:
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: HARBOR_URI
valueFrom:
secretKeyRef:
name: harbor-admin-secrets
key: HARBOR_URI
volumeMounts:
- name: tls-config
mountPath: /tls
readOnly: true
volumes:
- name: tls-config
secret:
secretName: admission-control
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: harbor-operator
rules:
- apiGroups:
- zalando.org
resources:
- clusterkopfpeerings
verbs:
- get
- list
- watch
- patch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- namespaces
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- get
- apiGroups:
- codemowers.io
resources:
- clusterharborregistries
- clusterharborprojectmembers
- clusterharborprojects
- harborcredentials
verbs:
- get
- list
- watch
- apiGroups:
- codemowers.io
resources:
- clusterharborregistries/status
- clusterharborprojectmembers/status
- clusterharborprojects/status
- harborcredentials/status
verbs:
- patch
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: harbor-operator
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: harbor-operator
subjects:
- kind: ServiceAccount
name: harbor-operator
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
# TODO: With the ACL-s above no errors are reporter by Kopf but yet it doesn't work
name: cluster-admin
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,99 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: harbor-portal
data:
nginx.conf: |+
worker_processes auto;
pid /tmp/nginx.pid;
events {
worker_connections 1024;
}
http {
client_body_temp_path /tmp/client_body_temp;
proxy_temp_path /tmp/proxy_temp;
fastcgi_temp_path /tmp/fastcgi_temp;
uwsgi_temp_path /tmp/uwsgi_temp;
scgi_temp_path /tmp/scgi_temp;
server {
listen 8080;
listen [::]:8080;
server_name localhost;
root /usr/share/nginx/html;
index index.html index.htm;
include /etc/nginx/mime.types;
gzip on;
gzip_min_length 1000;
gzip_proxied expired no-cache no-store private auth;
gzip_types text/plain text/css application/json application/javascript application/x-javascript text/xml application/xml application/xml+rss text/javascript;
location / {
try_files $uri $uri/ /index.html;
}
location = /index.html {
add_header Cache-Control "no-store, no-cache, must-revalidate";
}
}
}
---
apiVersion: v1
kind: Service
metadata:
name: harbor-portal
spec:
ports:
- port: 80
targetPort: 8080
selector:
app: harbor
component: portal
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: harbor-portal
spec:
replicas: 2
revisionHistoryLimit: 0
selector:
matchLabels: &selectorLabels
app: harbor
component: portal
template:
metadata:
labels: *selectorLabels
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/arch
operator: In
values:
- amd64
securityContext:
runAsUser: 10000
fsGroup: 10000
automountServiceAccountToken: false
containers:
- name: portal
image: {{ .Values.image.repository }}/harbor-portal:{{ .Values.image.tag }}
readinessProbe:
httpGet:
path: /
scheme: HTTP
port: 8080
initialDelaySeconds: 1
periodSeconds: 10
ports:
- containerPort: 8080
name: http
volumeMounts:
- name: portal-config
mountPath: /etc/nginx/nginx.conf
subPath: nginx.conf
volumes:
- name: portal-config
configMap:
name: harbor-portal

View File

@ -0,0 +1,10 @@
{{- range .Values.projects }}
apiVersion: codemowers.io/v1alpha1
kind: ClusterHarborProject
metadata:
name: {{ . | quote }}
spec:
cache: false
public: true
quota: 10737418240
{{- end }}

View File

@ -0,0 +1,38 @@
{{ if .Values.caches.quay }}
---
apiVersion: codemowers.io/v1alpha1
kind: ClusterHarborRegistry
metadata:
name: quay.io
spec:
type: quay
endpoint: https://quay.io
---
apiVersion: codemowers.io/v1alpha1
kind: ClusterHarborProject
metadata:
name: quay.io
spec:
cache: true
public: true
quota: 10737418240
{{ end }}
{{ if .Values.caches.docker }}
---
apiVersion: codemowers.io/v1alpha1
kind: ClusterHarborRegistry
metadata:
name: docker.io
spec:
type: docker-hub
endpoint: https://docker.io
---
apiVersion: codemowers.io/v1alpha1
kind: ClusterHarborProject
metadata:
name: docker.io
spec:
cache: true
public: true
quota: 10737418240
{{ end }}

View File

@ -0,0 +1,227 @@
---
apiVersion: codemowers.io/v1alpha1
kind: GeneratedSecret
metadata:
name: harbor-registry-credentials
spec:
mapping:
- key: REGISTRY_CREDENTIAL_PASSWORD
value: "%(password)s"
- key: REGISTRY_HTPASSWD
value: "harbor_registry_user:%(bcrypt)s"
---
apiVersion: codemowers.io/v1alpha1
kind: GeneratedSecret
metadata:
name: harbor-registry
spec:
mapping:
- key: REGISTRY_HTTP_SECRET
value: "%(password)s"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: harbor-registry
labels:
app: harbor
data:
config.yml: |+
version: 0.1
log:
level: info
fields:
service: registry
storage:
filesystem:
rootdirectory: /storage
cache:
layerinfo: redis
maintenance:
uploadpurging:
enabled: true
age: 168h
interval: 24h
dryrun: false
delete:
enabled: true
redirect:
disable: false
redis:
addr: harbor-registry-redis:6379
db: 0
readtimeout: 10s
writetimeout: 10s
dialtimeout: 10s
pool:
maxidle: 100
maxactive: 500
idletimeout: 60s
http:
addr: :5000
relativeurls: false
debug:
addr: :5001
prometheus:
enabled: true
path: /metrics
auth:
htpasswd:
realm: harbor-registry-basic-realm
path: /etc/registry/passwd
validation:
disabled: true
compatibility:
schema1:
enabled: true
ctl-config.yml: |+
---
protocol: "http"
port: 8080
log_level: info
registry_config: "/etc/registry/config.yml"
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: harbor-registry
labels:
app: harbor
component: registry
spec:
storageClassName: {{ .Values.storage.registry.storageClass }}
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ .Values.storage.registry.storage }}
---
apiVersion: v1
kind: Service
metadata:
name: harbor-registry
spec:
ports:
- name: http-registry
port: 5000
- name: http-controller
port: 8080
selector:
app: harbor
component: registry
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: harbor-registry
spec:
replicas: 1
revisionHistoryLimit: 0
selector:
matchLabels: &selectorLabels
app: harbor
component: registry
template:
metadata:
labels: *selectorLabels
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/arch
operator: In
values:
- amd64
securityContext:
runAsUser: 10000
fsGroup: 10000
fsGroupChangePolicy: OnRootMismatch
automountServiceAccountToken: false
terminationGracePeriodSeconds: 120
containers:
- name: registry
image: "{{ .Values.image.repository }}/registry-photon:{{ .Values.image.tag }}"
readinessProbe:
httpGet:
path: /
scheme: HTTP
port: 5000
initialDelaySeconds: 1
periodSeconds: 10
args:
- serve
- /etc/registry/config.yml
env:
- name: REGISTRY_REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: harbor-registry-redis-secrets
key: REDIS_PASSWORD
ports:
- containerPort: 5000
name: http
- containerPort: 5001
name: metrics
volumeMounts:
- name: registry-data
mountPath: /storage
subPath:
- name: registry-htpasswd
mountPath: /etc/registry/passwd
subPath: passwd
- name: registry-config
mountPath: /etc/registry/config.yml
subPath: config.yml
- name: registryctl
image: "{{ .Values.image.repository }}/harbor-registryctl:{{ .Values.image.tag }}"
readinessProbe:
httpGet:
path: /api/health
scheme: HTTP
port: 8080
initialDelaySeconds: 1
periodSeconds: 10
env:
- name: CORE_SECRET
valueFrom:
secretKeyRef:
name: harbor-core-secret
key: CORE_SECRET
- name: JOBSERVICE_SECRET
valueFrom:
secretKeyRef:
name: harbor-jobservice
key: JOBSERVICE_SECRET
- name: REGISTRY_HTTP_SECRET
valueFrom:
secretKeyRef:
name: harbor-registry
key: REGISTRY_HTTP_SECRET
ports:
- containerPort: 8080
name: http
volumeMounts:
- name: registry-data
mountPath: /storage
subPath:
- name: registry-config
mountPath: /etc/registry/config.yml
subPath: config.yml
- name: registry-config
mountPath: /etc/registryctl/config.yml
subPath: ctl-config.yml
volumes:
- name: registry-htpasswd
secret:
secretName: harbor-registry-credentials
items:
- key: REGISTRY_HTPASSWD
path: passwd
- name: registry-config
configMap:
name: harbor-registry
- name: registry-data
persistentVolumeClaim:
claimName: harbor-registry

64
templates/ingress.yaml Normal file
View File

@ -0,0 +1,64 @@
{{ if .Values.ingress.enabled }}
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: harbor
annotations:
ingress.kubernetes.io/proxy-body-size: "0"
ingress.kubernetes.io/ssl-redirect: "true"
external-dns.alpha.kubernetes.io/target: "{{ .Values.ingress.target }}"
kubernetes.io/ingress.class: "{{ .Values.ingress.class }}"
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
spec:
tls:
- hosts:
- "{{ .Values.ingress.tls.host }}"
rules:
- host: "{{ .Values.ingress.host }}"
http:
paths:
- path: /api/
pathType: Prefix
backend:
service:
name: harbor-core
port:
name: http
- path: /service/
pathType: Prefix
backend:
service:
name: harbor-core
port:
number: 80
- path: /v2/
pathType: Prefix
backend:
service:
name: harbor-core
port:
number: 80
- path: /chartrepo/
pathType: Prefix
backend:
service:
name: harbor-core
port:
number: 80
- path: /c/
pathType: Prefix
backend:
service:
name: harbor-core
port:
number: 80
- path: /
pathType: Prefix
backend:
service:
name: harbor-portal
port:
number: 80
{{ end }}

26
templates/postgres.yaml Normal file
View File

@ -0,0 +1,26 @@
apiVersion: postgres-operator.crunchydata.com/v1beta1
kind: PostgresCluster
metadata:
name: harbor
spec:
postgresVersion: 14
instances:
- name: postgres
replicas: 3
dataVolumeClaimSpec:
storageClassName: {{ .Values.storage.postgres.storageClass }}
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: {{ .Values.storage.postgres.storage }}
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: {{ .Values.topologyKey }}
labelSelector:
matchLabels:
postgres-operator.crunchydata.com/cluster: harbor
backups:
pgbackrest:
repos: []

21
templates/redis.yaml Normal file
View File

@ -0,0 +1,21 @@
---
apiVersion: codemowers.io/v1alpha1
kind: KeyDBCluster
metadata:
name: harbor-core-redis
spec:
replicas: 3
---
apiVersion: codemowers.io/v1alpha1
kind: KeyDBCluster
metadata:
name: harbor-jobservice-redis
spec:
replicas: 3
---
apiVersion: codemowers.io/v1alpha1
kind: KeyDBCluster
metadata:
name: harbor-registry-redis
spec:
replicas: 3

24
values.yaml Normal file
View File

@ -0,0 +1,24 @@
topologyKey: kubernetes.io/hostname
ingress:
enabled: true
host: harbor.k-space.ee
tls:
host: "*.k-space.ee"
target: traefik.k-space.ee
class: traefik
caches:
docker: true
quay: true
image:
repository: goharbor
tag: v2.6.2
persistence:
postgres:
storageClass: postgres
storage: 5Gi
registry:
storageClass: longhorn
storage: 30Gi
# Harbor projects to initialize
projects: []