forked from k-space/kube
		
	Initial commit
This commit is contained in:
		
							
								
								
									
										9
									
								
								tigera-operator/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										9
									
								
								tigera-operator/README.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,9 @@ | ||||
| # Calico CNI | ||||
|  | ||||
| Calico implements the inter-pod overlay network | ||||
|  | ||||
| ``` | ||||
| curl https://projectcalico.docs.tigera.io/manifests/tigera-operator.yaml -O | ||||
| curl https://projectcalico.docs.tigera.io/manifests/custom-resources.yaml -O | ||||
| kubectl apply -f tigera-operator.yaml -f custom-resources.yaml | ||||
| ``` | ||||
							
								
								
									
										64
									
								
								tigera-operator/cleanup.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										64
									
								
								tigera-operator/cleanup.sh
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,64 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| NAMESPACE=${NAMESPACE:-longhorn-system} | ||||
|  | ||||
| remove_and_wait() { | ||||
|   local crd=$1 | ||||
|   out=`kubectl -n ${NAMESPACE} delete $crd --all 2>&1` | ||||
|   if [ $? -ne 0 ]; then | ||||
|     echo $out | ||||
|     return | ||||
|   fi | ||||
|   while true; do | ||||
|     out=`kubectl -n ${NAMESPACE} get $crd -o yaml | grep 'items: \[\]'` | ||||
|     if [ $? -eq 0 ]; then | ||||
|       break | ||||
|     fi | ||||
|     sleep 1 | ||||
|   done | ||||
|   echo all $crd instances deleted | ||||
| } | ||||
|  | ||||
| remove_crd_instances() { | ||||
|   remove_and_wait volumes.longhorn.rancher.io | ||||
|   # TODO: remove engines and replicas once we fix https://github.com/rancher/longhorn/issues/273 | ||||
|   remove_and_wait engines.longhorn.rancher.io | ||||
|   remove_and_wait replicas.longhorn.rancher.io | ||||
|   remove_and_wait engineimages.longhorn.rancher.io | ||||
|   remove_and_wait settings.longhorn.rancher.io | ||||
|   # do this one last; manager crashes | ||||
|   remove_and_wait nodes.longhorn.rancher.io | ||||
| } | ||||
|  | ||||
| # Delete driver related workloads in specific order | ||||
| remove_driver() { | ||||
|   kubectl -n ${NAMESPACE} delete deployment.apps/longhorn-driver-deployer | ||||
|   kubectl -n ${NAMESPACE} delete daemonset.apps/longhorn-csi-plugin | ||||
|   kubectl -n ${NAMESPACE} delete statefulset.apps/csi-attacher | ||||
|   kubectl -n ${NAMESPACE} delete service/csi-attacher | ||||
|   kubectl -n ${NAMESPACE} delete statefulset.apps/csi-provisioner | ||||
|   kubectl -n ${NAMESPACE} delete service/csi-provisioner | ||||
|   kubectl -n ${NAMESPACE} delete daemonset.apps/longhorn-flexvolume-driver | ||||
| } | ||||
|  | ||||
| # Delete all workloads in the namespace | ||||
| remove_workloads() { | ||||
|   kubectl -n ${NAMESPACE} get daemonset.apps -o yaml | kubectl delete -f - | ||||
|   kubectl -n ${NAMESPACE} get deployment.apps -o yaml | kubectl delete -f - | ||||
|   kubectl -n ${NAMESPACE} get replicaset.apps -o yaml | kubectl delete -f - | ||||
|   kubectl -n ${NAMESPACE} get statefulset.apps -o yaml | kubectl delete -f - | ||||
|   kubectl -n ${NAMESPACE} get pods -o yaml | kubectl delete -f - | ||||
|   kubectl -n ${NAMESPACE} get service -o yaml | kubectl delete -f - | ||||
| } | ||||
|  | ||||
| # Delete CRD definitions with longhorn.rancher.io in the name | ||||
| remove_crds() { | ||||
|   for crd in $(kubectl get crd -o jsonpath={.items[*].metadata.name} | tr ' ' '\n' | grep longhorn.rancher.io); do | ||||
|     kubectl delete crd/$crd | ||||
|   done | ||||
| } | ||||
|  | ||||
| remove_crd_instances | ||||
| remove_driver | ||||
| remove_workloads | ||||
| remove_crds | ||||
							
								
								
									
										27
									
								
								tigera-operator/custom-resources.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										27
									
								
								tigera-operator/custom-resources.yaml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,27 @@ | ||||
| # This section includes base Calico installation configuration. | ||||
| # For more information, see: https://projectcalico.docs.tigera.io/v3.23/reference/installation/api#operator.tigera.io/v1.Installation | ||||
| apiVersion: operator.tigera.io/v1 | ||||
| kind: Installation | ||||
| metadata: | ||||
|   name: default | ||||
| spec: | ||||
|   # Configures Calico networking. | ||||
|   calicoNetwork: | ||||
|     # Note: The ipPools section cannot be modified post-install. | ||||
|     ipPools: | ||||
|     - blockSize: 26 | ||||
|       cidr: 192.168.0.0/16 | ||||
|       encapsulation: VXLANCrossSubnet | ||||
|       natOutgoing: Enabled | ||||
|       nodeSelector: all() | ||||
|  | ||||
| --- | ||||
|  | ||||
| # This section configures the Calico API server. | ||||
| # For more information, see: https://projectcalico.docs.tigera.io/v3.23/reference/installation/api#operator.tigera.io/v1.APIServer | ||||
| apiVersion: operator.tigera.io/v1 | ||||
| kind: APIServer  | ||||
| metadata:  | ||||
|   name: default  | ||||
| spec: {} | ||||
|  | ||||
							
								
								
									
										6097
									
								
								tigera-operator/tigera-operator.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										6097
									
								
								tigera-operator/tigera-operator.yaml
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
		Reference in New Issue
	
	Block a user