Skip to content
This repository was archived by the owner on Apr 25, 2022. It is now read-only.
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ jobs:
- name: run golangci-lint
uses: golangci/golangci-lint-action@v2
with:
version: v1.35
version: v1.35.2
static-test:
runs-on: ubuntu-20.04
steps:
Expand All @@ -45,7 +45,7 @@ jobs:
e2e:
needs: [image-build]
runs-on: self-hosted
timeout-minutes: 40
timeout-minutes: 70
strategy:
matrix:
config:
Expand Down
2 changes: 2 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,8 @@ e2e-deploy: registry docker-build docker-push deploy
hack/e2e.sh bootstrap
hack/e2e.sh update_cm_after_delete
hack/e2e.sh update_secret_after_delete
hack/e2e.sh add_host
hack/e2e.sh add_disk
hack/e2e.sh delete_cluster

e2e: e2e-deploy clean
Expand Down
23 changes: 17 additions & 6 deletions api/v1alpha1/cephcluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,19 +53,30 @@ type CephClusterSpec struct {
Config map[string]string `json:"config,omitempty"`
}

// CephClusterState is the current state of CephCluster
type CephClusterState string
// CephClusterCondition is the current condition of CephCluster
type CephClusterCondition string

const (
// ConditionReadyToUse indicates CephCluster is ready to use
ConditionReadyToUse = "ReadyToUse"
ConditionReadyToUse CephClusterCondition = "ReadyToUse"
// ConditionBootstrapped indicates CephCluster is bootstrapped
ConditionBootstrapped CephClusterCondition = "Bootstrapped"
// ConditionOsdDeployed indicates CephCluster is deployed with osds
ConditionOsdDeployed CephClusterCondition = "OsdDeployed"
)

// CephClusterState is the current state of CephCluster
type CephClusterState string

const (
// CephClusterStatePending indicates CephClusterState is creating or updating
CephClusterStatePending CephClusterState = "Pending"
// CephClusterStateCreating indicates CephClusterState is creating
CephClusterStateCreating CephClusterState = "Creating"
// CephClusterStateCompleted indicates CephClusterState is completed
CephClusterStateCompleted CephClusterState = "Completed"
// CephClusterStateUpdating indicates CephClusterState is updating
CephClusterStateUpdating CephClusterState = "Updating"
// CephClusterStateRunning indicates CephClusterState is available
CephClusterStateRunning CephClusterState = "Running"
// CephClusterStateError indicates CephClusterState is error
CephClusterStateError CephClusterState = "Error"
)
Expand All @@ -74,7 +85,7 @@ const (
type CephClusterStatus struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make" to regenerate code after modifying this file

DeployNode Node `json:"deployNode,omitempty"`
State CephClusterState `json:"state"`
Conditions []metav1.Condition `json:"conditions,omitempty"`
}
Expand Down
1 change: 1 addition & 0 deletions api/v1alpha1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

19 changes: 19 additions & 0 deletions config/crd/bases/hypersds.tmax.io_cephclusters.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -165,6 +165,25 @@ spec:
- type
type: object
type: array
deployNode:
description: 'INSERT ADDITIONAL STATUS FIELD - define observed state
of cluster Important: Run "make" to regenerate code after modifying
this file'
properties:
hostName:
type: string
ip:
type: string
password:
type: string
userId:
type: string
required:
- hostName
- ip
- password
- userId
type: object
state:
description: CephClusterState is the current state of CephCluster
type: string
Expand Down
10 changes: 8 additions & 2 deletions config/manager/kustomization.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,13 @@
resources:
- manager.yaml
patches:
- target:
- path: manager_resource_patch.yaml
target:
kind: Deployment
name: controller-manager
path: manager_resource_patch.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
images:
- name: controller
newName: 192.168.9.21:5000/hypersds-operator
newTag: latest
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
apiVersion: hypersds.tmax.io/v1alpha1
kind: CephCluster
metadata:
name: cephcluster-sample
spec:
mon:
count: 1
osd:
- hostName: centos-node5
devices:
- /dev/sdb
- /dev/sdc
- hostName: centos-node4
devices:
- /dev/sdb
nodes:
- ip: 192.168.33.21
userId: root
password: "ck@3434"
hostName: centos-node4
- ip: 192.168.33.22
userId: root
password: "ck@3434"
hostName: centos-node5
23 changes: 23 additions & 0 deletions config/samples/hypersds_v1alpha1_cephcluster_hostadd.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
apiVersion: hypersds.tmax.io/v1alpha1
kind: CephCluster
metadata:
name: cephcluster-sample
spec:
mon:
count: 1
osd:
- hostName: centos-node4
devices:
- /dev/sdb
- hostName: centos-node5
devices:
- /dev/sdb
nodes:
- ip: 192.168.33.21
userId: root
password: "ck@3434"
hostName: centos-node4
- ip: 192.168.33.22
userId: root
password: "ck@3434"
hostName: centos-node5
25 changes: 15 additions & 10 deletions controllers/cephcluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,10 +72,17 @@ func (r *CephClusterReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error)
return nil
}
if err := syncAll(); err != nil {
if err2 := r.updateStateWithReadyToUse(hypersdsv1alpha1.CephClusterStateError, metav1.ConditionFalse, "SeeMessages", err.Error()); err2 != nil {
if err2 := r.updateState(hypersdsv1alpha1.CephClusterStateError); err2 != nil {
return ctrl.Result{}, err2
}
if err2 := r.updateCondition(&metav1.Condition{
Type: string(hypersdsv1alpha1.ConditionReadyToUse),
Status: metav1.ConditionFalse,
Reason: "SeeMessages",
Message: err.Error(),
}); err2 != nil {
return ctrl.Result{}, err2
}
return ctrl.Result{}, err
}

return ctrl.Result{}, nil
Expand All @@ -92,14 +99,12 @@ func (r *CephClusterReconciler) SetupWithManager(mgr ctrl.Manager) error {
Complete(r)
}

func (r *CephClusterReconciler) updateStateWithReadyToUse(state hypersdsv1alpha1.CephClusterState, readyToUseStatus metav1.ConditionStatus,
reason, message string) error {
meta.SetStatusCondition(&r.Cluster.Status.Conditions, metav1.Condition{
Type: hypersdsv1alpha1.ConditionReadyToUse,
Status: readyToUseStatus,
Reason: reason,
Message: message,
})
func (r *CephClusterReconciler) updateState(state hypersdsv1alpha1.CephClusterState) error {
r.Cluster.Status.State = state
return r.Client.Status().Update(context.TODO(), r.Cluster)
}

func (r *CephClusterReconciler) updateCondition(condition *metav1.Condition) error {
meta.SetStatusCondition(&r.Cluster.Status.Conditions, *condition)
return r.Client.Status().Update(context.TODO(), r.Cluster)
}
16 changes: 15 additions & 1 deletion controllers/configmap.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,21 @@ func (r *CephClusterReconciler) syncConfigMap() error {
}

klog.Infof("syncConfigMap: creating config map %s", r.Cluster.Name)
if err := r.updateStateWithReadyToUse(v1alpha1.CephClusterStateCreating, metav1.ConditionFalse, "CephClusterIsCreating", "CephCluster is creating"); err != nil {
if err := r.updateState(v1alpha1.CephClusterStateCreating); err != nil {
return err
}
if err := r.updateCondition(&metav1.Condition{
Type: string(v1alpha1.ConditionBootstrapped),
Status: metav1.ConditionFalse,
Reason: "BootstrappingIsNotFinished",
}); err != nil {
return err
}
if err := r.updateCondition(&metav1.Condition{
Type: string(v1alpha1.ConditionReadyToUse),
Status: metav1.ConditionFalse,
Reason: "CephClusterIsCreating",
}); err != nil {
return err
}

Expand Down
2 changes: 1 addition & 1 deletion controllers/configmap_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ var _ = Describe("syncConfigMap", func() {
cc := &hypersdsv1alpha1.CephCluster{}
err := r.Client.Get(context.TODO(), types.NamespacedName{Namespace: r.Cluster.Namespace, Name: r.Cluster.Name}, cc)
Expect(err).Should(BeNil())
cond := meta.FindStatusCondition(cc.Status.Conditions, hypersdsv1alpha1.ConditionReadyToUse)
cond := meta.FindStatusCondition(cc.Status.Conditions, string(hypersdsv1alpha1.ConditionReadyToUse))
Expect(cond).ShouldNot(BeNil())
Expect(cond.Status).Should(Equal(metav1.ConditionFalse))
})
Expand Down
39 changes: 38 additions & 1 deletion controllers/provisioner.go
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
package controllers

import (
"context"
"github.com/tmax-cloud/hypersds-operator/api/v1alpha1"
"github.com/tmax-cloud/hypersds-operator/pkg/provisioner/provisioner"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog"
)
Expand All @@ -23,6 +25,10 @@ func (r *CephClusterReconciler) syncProvisioner() error {
return err
}

if err := r.updateDeployNode(); err != nil {
return err
}

klog.Infof("syncProvisioner: bootstrapping ceph cluster %s", r.Cluster.Name)
provisionerInstance, err := provisioner.NewProvisioner(r.Cluster.Spec, r.Client, r.Cluster.Namespace, r.Cluster.Name)
if err != nil {
Expand All @@ -31,9 +37,40 @@ func (r *CephClusterReconciler) syncProvisioner() error {
if err := provisionerInstance.Run(); err != nil {
return err
}
if err := r.updateStateWithReadyToUse(v1alpha1.CephClusterStateCompleted, v1.ConditionTrue, "CephClusterIsReady", "Ceph cluster is ready to use"); err != nil {
if err := r.updateState(v1alpha1.CephClusterStateRunning); err != nil {
return err
}
if err := r.updateCondition(&v1.Condition{
Type: string(v1alpha1.ConditionReadyToUse),
Status: v1.ConditionTrue,
Reason: "CephClusterIsReady",
}); err != nil {
return err
}
if meta.IsStatusConditionFalse(r.Cluster.Status.Conditions, string(v1alpha1.ConditionBootstrapped)) {
if err := r.updateCondition(&v1.Condition{
Type: string(v1alpha1.ConditionBootstrapped),
Status: v1.ConditionTrue,
Reason: "CephClusterIsBootstrapped",
}); err != nil {
return err
}
}

return nil
}

func (r *CephClusterReconciler) updateDeployNode() error {
if r.Cluster.Status.DeployNode == (v1alpha1.Node{}) {
r.Cluster.Status.DeployNode = r.getDefaultDeployNode()
if err := r.Client.Status().Update(context.TODO(), r.Cluster); err != nil {
return err
}
}
return nil
}

// Decide deploying node (currently, first node is deploying node)
func (r *CephClusterReconciler) getDefaultDeployNode() v1alpha1.Node {
return r.Cluster.Spec.Nodes[0]
}
12 changes: 11 additions & 1 deletion hack/e2e.sh
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ case "${1:-}" in
bootstrap)
echo "deploying ceph cluster cr ..."
kubectl apply -f config/samples/hypersds_v1alpha1_cephcluster.yaml
wait_condition "kubectl get cephclusters.hypersds.tmax.io | grep Completed" 2400 60
wait_condition "kubectl get cephclusters.hypersds.tmax.io | grep Running" 2400 60
;;
update_cm_after_delete)
echo "deleting configmap ..."
Expand All @@ -43,6 +43,16 @@ case "${1:-}" in
sleep 3
wait_condition "kubectl describe secret cephcluster-sample-keyring | grep keyring:" 300 60
;;
add_host)
echo "adding host ..."
kubectl apply -f config/samples/hypersds_v1alpha1_cephcluster_hostadd.yaml
sleep 600
;;
add_disk)
echo "adding disk ..."
kubectl apply -f config/samples/hypersds_v1alpha1_cephcluster_diskadd_unordered.yaml
sleep 600
;;
delete_cluster)
echo "deleting ceph cluster cr ..."
kubectl delete -f config/samples/hypersds_v1alpha1_cephcluster.yaml
Expand Down
Loading