Initial PoC of the csi driver

This commit is contained in:
Mehran Kholdi 2020-04-23 18:22:01 +04:30
parent 98139d428e
commit 6f23dbe067
10 changed files with 368 additions and 1 deletions

1
consts.py Normal file
View File

@ -0,0 +1 @@
DATA_DIR = "/data"

9
deploy/00-csi.yaml Normal file
View File

@ -0,0 +1,9 @@
apiVersion: storage.k8s.io/v1beta1
kind: CSIDriver
metadata:
name: rawfile.hamravesh.com
spec:
attachRequired: false
podInfoOnMount: true
volumeLifecycleModes:
- Persistent

51
deploy/00-rbac.yaml Normal file
View File

@ -0,0 +1,51 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: rawfile-csi-controller
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rawfile-external-provisioner-runner
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rawfile-csi-provisioner-role
subjects:
- kind: ServiceAccount
name: rawfile-csi-controller
namespace: kube-system
roleRef:
kind: ClusterRole
name: rawfile-external-provisioner-runner
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,49 @@
kind: Deployment
apiVersion: apps/v1
metadata:
name: rawfile-csi-controller
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: rawfile-csi-controller
template:
metadata:
labels:
app: rawfile-csi-controller
spec:
serviceAccount: rawfile-csi-controller
priorityClassName: system-cluster-critical
tolerations:
- key: "node-role.kubernetes.io/master"
operator: Equal
value: "true"
effect: NoSchedule
volumes:
- name: socket-dir
emptyDir: {}
containers:
- name: csi-driver
image: registry.hamdocker.ir/hamravesh/rawfile-csi:master
imagePullPolicy: Always
env:
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-provisioner
image: quay.io/k8scsi/csi-provisioner:v1.6.0
imagePullPolicy: IfNotPresent
args:
- "--csi-address=$(ADDRESS)"
- "--feature-gates=Topology=true"
- "--strict-topology"
env:
- name: ADDRESS
value: /csi/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /csi

View File

@ -0,0 +1,86 @@
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: rawfile-csi-node
spec:
updateStrategy:
rollingUpdate:
maxUnavailable: "100%"
selector:
matchLabels:
app: rawfile-csi-node
template:
metadata:
labels:
app: rawfile-csi-node
spec:
serviceAccount: rawfile-csi-controller
hostNetwork: true
hostIPC: true
priorityClassName: system-node-critical
tolerations:
- operator: "Exists"
volumes:
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry
type: Directory
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/rawfile-csi
type: DirectoryOrCreate
- name: mountpoint-dir
hostPath:
path: /var/lib/kubelet/pods
type: DirectoryOrCreate
- name: data-dir
hostPath:
path: /var/csi/rawfile
type: DirectoryOrCreate
containers:
- name: csi-driver
image: registry.hamdocker.ir/hamravesh/rawfile-csi:master
imagePullPolicy: Always
env:
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: NODE_ID
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
securityContext:
privileged: true
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: mountpoint-dir
mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional"
- name: data-dir
mountPath: /data
- name: node-driver-registrar
image: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /bin/sh
- -c
- |
rm -rf /registration/rawfile-csi /csi/csi.sock
args:
- --v=5
- --csi-address=$(ADDRESS)
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
env:
- name: ADDRESS
value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/rawfile-csi/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration

38
rawfile.py Executable file
View File

@ -0,0 +1,38 @@
#!/usr/bin/env python3
import logging
from concurrent import futures
import click
import grpc
import rawfile_servicer
from csi import csi_pb2_grpc
@click.group()
def cli():
pass
@cli.command()
@click.option("--endpoint", envvar="CSI_ENDPOINT", default="0.0.0.0:5000")
@click.option("--nodeid", envvar="NODE_ID")
def csi_driver(endpoint, nodeid):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
csi_pb2_grpc.add_IdentityServicer_to_server(
rawfile_servicer.RawFileIdentityServicer(), server
)
csi_pb2_grpc.add_NodeServicer_to_server(
rawfile_servicer.RawFileNodeServicer(node_name=nodeid), server
)
csi_pb2_grpc.add_ControllerServicer_to_server(
rawfile_servicer.RawFileControllerServicer(), server
)
server.add_insecure_port(endpoint)
server.start()
server.wait_for_termination()
if __name__ == "__main__":
logging.basicConfig()
cli()

104
rawfile_servicer.py Normal file
View File

@ -0,0 +1,104 @@
from pathlib import Path
from google.protobuf.wrappers_pb2 import BoolValue
from consts import DATA_DIR
from csi import csi_pb2, csi_pb2_grpc
from util import log_grpc_request, run
NODE_NAME_TOPOLOGY_KEY = "hostname"
class RawFileIdentityServicer(csi_pb2_grpc.IdentityServicer):
@log_grpc_request
def GetPluginInfo(self, request, context):
return csi_pb2.GetPluginInfoResponse(
name="rawfile.hamravesh.com", vendor_version="0.0.1"
)
@log_grpc_request
def GetPluginCapabilities(self, request, context):
Cap = csi_pb2.PluginCapability
return csi_pb2.GetPluginCapabilitiesResponse(
capabilities=[
Cap(service=Cap.Service(type=Cap.Service.CONTROLLER_SERVICE)),
Cap(
service=Cap.Service(
type=Cap.Service.VOLUME_ACCESSIBILITY_CONSTRAINTS
)
),
]
)
@log_grpc_request
def Probe(self, request, context):
return csi_pb2.ProbeResponse(ready=BoolValue(value=True))
class RawFileNodeServicer(csi_pb2_grpc.NodeServicer):
def __init__(self, node_name):
self.node_name = node_name
@log_grpc_request
def NodeGetCapabilities(self, request, context):
return csi_pb2.NodeGetCapabilitiesResponse(capabilities=[])
@log_grpc_request
def NodePublishVolume(self, request, context):
mount_path = request.target_path
img_dir = Path(f"{DATA_DIR}/{request.volume_id}")
img_file = Path(f"{img_dir}/raw.img")
img_dir.mkdir(parents=False, exist_ok=True)
if not img_file.exists():
run(f"truncate -s1G {img_file}")
run(f"mkfs.ext4 {img_file}")
run(f"mount {img_file} {mount_path}")
return csi_pb2.NodePublishVolumeResponse()
@log_grpc_request
def NodeUnpublishVolume(self, request, context):
mount_path = request.target_path
run(f"umount {mount_path}")
return csi_pb2.NodeUnpublishVolumeResponse()
@log_grpc_request
def NodeGetInfo(self, request, context):
return csi_pb2.NodeGetInfoResponse(
node_id=self.node_name,
accessible_topology=csi_pb2.Topology(
segments={NODE_NAME_TOPOLOGY_KEY: self.node_name}
),
)
class RawFileControllerServicer(csi_pb2_grpc.ControllerServicer):
@log_grpc_request
def ControllerGetCapabilities(self, request, context):
Cap = csi_pb2.ControllerServiceCapability
return csi_pb2.ControllerGetCapabilitiesResponse(
capabilities=[Cap(rpc=Cap.RPC(type=Cap.RPC.CREATE_DELETE_VOLUME))]
)
@log_grpc_request
def CreateVolume(self, request, context):
# TODO: capacity_range
# TODO: volume_capabilities
node_name = request.accessibility_requirements.preferred[0].segments[
NODE_NAME_TOPOLOGY_KEY
]
return csi_pb2.CreateVolumeResponse(
volume=csi_pb2.Volume(
volume_id=request.name,
capacity_bytes=0,
accessible_topology=[
csi_pb2.Topology(segments={NODE_NAME_TOPOLOGY_KEY: node_name})
],
)
)
@log_grpc_request
def DeleteVolume(self, request, context):
pv_name = request.volume_id
# TODO: Run a pod on that node to scrub the data
return csi_pb2.DeleteVolumeResponse()

View File

@ -1 +1,3 @@
grpcio-tools grpcio-tools
grpcio
click

View File

@ -4,8 +4,9 @@
# #
# pip-compile # pip-compile
# #
click==7.1.1 # via -r requirements.in
grpcio-tools==1.28.1 # via -r requirements.in grpcio-tools==1.28.1 # via -r requirements.in
grpcio==1.28.1 # via grpcio-tools grpcio==1.28.1 # via -r requirements.in, grpcio-tools
protobuf==3.11.3 # via grpcio-tools protobuf==3.11.3 # via grpcio-tools
six==1.14.0 # via grpcio, protobuf six==1.14.0 # via grpcio, protobuf

26
util.py Normal file
View File

@ -0,0 +1,26 @@
import functools
import subprocess
def indent(obj, lvl):
return "\n".join([(lvl * " ") + line for line in str(obj).splitlines()])
def log_grpc_request(func):
@functools.wraps(func)
def wrap(self, request, context):
res = func(self, request, context)
print(
f"""{func.__name__}({{
{indent(request, 2)}
}}) = {{
{indent(res, 2)}
}}"""
)
return res
return wrap
def run(cmd):
return subprocess.run(cmd, shell=True, check=True)