diff --git a/internal/controller/aggregates_controller_test.go b/internal/controller/aggregates_controller_test.go index fe98165..7f714a3 100644 --- a/internal/controller/aggregates_controller_test.go +++ b/internal/controller/aggregates_controller_test.go @@ -101,6 +101,7 @@ var _ = Describe("AggregatesController", func() { aggregatesController *AggregatesController fakeServer testhelper.FakeServer hypervisorName = types.NamespacedName{Name: "hv-test"} + reconcileRequest = ctrl.Request{NamespacedName: hypervisorName} ) BeforeEach(func(ctx SpecContext) { @@ -113,6 +114,7 @@ var _ = Describe("AggregatesController", func() { Fail("Unhandled request to fake server: " + r.Method + " " + r.URL.Path) }) + By("Creating hypervisor resource with lifecycle enabled") hypervisor := &kvmv1.Hypervisor{ ObjectMeta: metav1.ObjectMeta{ Name: hypervisorName.Name, @@ -126,6 +128,7 @@ var _ = Describe("AggregatesController", func() { Expect(k8sClient.Delete(ctx, hypervisor)).To(Succeed()) }) + By("Setting onboarding condition to false") Expect(k8sClient.Get(ctx, hypervisorName, hypervisor)).To(Succeed()) meta.SetStatusCondition(&hypervisor.Status.Conditions, metav1.Condition{ Type: kvmv1.ConditionTypeOnboarding, @@ -143,143 +146,293 @@ var _ = Describe("AggregatesController", func() { } }) - JustBeforeEach(func(ctx SpecContext) { - _, err := aggregatesController.Reconcile(ctx, ctrl.Request{NamespacedName: hypervisorName}) - Expect(err).NotTo(HaveOccurred()) - }) + Context("Happy Path", func() { + JustBeforeEach(func(ctx SpecContext) { + result, err := aggregatesController.Reconcile(ctx, reconcileRequest) + Expect(err).NotTo(HaveOccurred()) + Expect(result).To(Equal(ctrl.Result{})) + }) - // Tests - Context("Adding new Aggregate", func() { - BeforeEach(func(ctx SpecContext) { - By("Setting a missing aggregate") - hypervisor := &kvmv1.Hypervisor{} - Expect(k8sClient.Get(ctx, hypervisorName, hypervisor)).To(Succeed()) - hypervisor.Spec.Aggregates = []string{"test-aggregate1"} - Expect(k8sClient.Update(ctx, hypervisor)).To(Succeed()) - - // Mock resourceproviders.GetAggregates - fakeServer.Mux.HandleFunc("GET /os-aggregates", func(w http.ResponseWriter, r *http.Request) { - w.Header().Add("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - - _, err := fmt.Fprint(w, AggregateListBodyEmpty) - Expect(err).NotTo(HaveOccurred()) + Context("Adding new Aggregate", func() { + BeforeEach(func(ctx SpecContext) { + By("Setting a missing aggregate") + hypervisor := &kvmv1.Hypervisor{} + Expect(k8sClient.Get(ctx, hypervisorName, hypervisor)).To(Succeed()) + hypervisor.Spec.Aggregates = []string{"test-aggregate1"} + Expect(k8sClient.Update(ctx, hypervisor)).To(Succeed()) + + By("Mocking GetAggregates to return empty list") + fakeServer.Mux.HandleFunc("GET /os-aggregates", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, err := fmt.Fprint(w, AggregateListBodyEmpty) + Expect(err).NotTo(HaveOccurred()) + }) + + By("Mocking CreateAggregate") + fakeServer.Mux.HandleFunc("POST /os-aggregates", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, err := fmt.Fprint(w, AggregatesPostBody) + Expect(err).NotTo(HaveOccurred()) + }) + + By("Mocking AddHost") + fakeServer.Mux.HandleFunc("POST /os-aggregates/42/action", func(w http.ResponseWriter, r *http.Request) { + Expect(r.Header.Get("Content-Type")).To(Equal("application/json")) + expectedBody := `{"add_host":{"host":"hv-test"}}` + body := make([]byte, r.ContentLength) + _, err := r.Body.Read(body) + Expect(err == nil || err.Error() == EOF).To(BeTrue()) + Expect(string(body)).To(MatchJSON(expectedBody)) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, err = fmt.Fprint(w, AggregateAddHostBody) + Expect(err).NotTo(HaveOccurred()) + }) }) - fakeServer.Mux.HandleFunc("POST /os-aggregates", func(w http.ResponseWriter, r *http.Request) { - w.Header().Add("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - _, err := fmt.Fprint(w, AggregatesPostBody) - Expect(err).NotTo(HaveOccurred()) + It("should update Aggregates and set status condition as Aggregates differ", func(ctx SpecContext) { + updated := &kvmv1.Hypervisor{} + Expect(aggregatesController.Client.Get(ctx, hypervisorName, updated)).To(Succeed()) + Expect(updated.Status.Aggregates).To(ContainElements("test-aggregate1")) + Expect(meta.IsStatusConditionTrue(updated.Status.Conditions, kvmv1.ConditionTypeAggregatesUpdated)).To(BeTrue()) }) + }) - // Mock resourceproviders.UpdateAggregates - fakeServer.Mux.HandleFunc("POST /os-aggregates/42/action", func(w http.ResponseWriter, r *http.Request) { - // parse request - Expect(r.Header.Get("Content-Type")).To(Equal("application/json")) - expectedBody := `{"add_host":{"host":"hv-test"}}` - body := make([]byte, r.ContentLength) - _, err := r.Body.Read(body) - Expect(err == nil || err.Error() == EOF).To(BeTrue()) - Expect(string(body)).To(MatchJSON(expectedBody)) - - // send response - w.Header().Add("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - - _, err = fmt.Fprint(w, AggregateAddHostBody) - Expect(err).NotTo(HaveOccurred()) + Context("Removing Aggregate", func() { + BeforeEach(func(ctx SpecContext) { + By("Setting existing aggregates in status") + hypervisor := &kvmv1.Hypervisor{} + Expect(k8sClient.Get(ctx, hypervisorName, hypervisor)).To(Succeed()) + hypervisor.Status.Aggregates = []string{"test-aggregate2", "test-aggregate3"} + Expect(k8sClient.Status().Update(ctx, hypervisor)).To(Succeed()) + + By("Mocking GetAggregates to return full list") + fakeServer.Mux.HandleFunc("GET /os-aggregates", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, err := fmt.Fprint(w, AggregateListBodyFull) + Expect(err).NotTo(HaveOccurred()) + }) + + By("Mocking RemoveHost for both aggregates") + expectRemoveHostFromAggregate := func(w http.ResponseWriter, r *http.Request) { + Expect(r.Header.Get("Content-Type")).To(Equal("application/json")) + expectedBody := `{"remove_host":{"host":"hv-test"}}` + body := make([]byte, r.ContentLength) + _, err := r.Body.Read(body) + Expect(err == nil || err.Error() == EOF).To(BeTrue()) + Expect(string(body)).To(MatchJSON(expectedBody)) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, err = fmt.Fprint(w, AggregateRemoveHostBody) + Expect(err).NotTo(HaveOccurred()) + } + fakeServer.Mux.HandleFunc("POST /os-aggregates/100001/action", expectRemoveHostFromAggregate) + fakeServer.Mux.HandleFunc("POST /os-aggregates/99/action", expectRemoveHostFromAggregate) }) - }) - It("should update Aggregates and set status condition as Aggregates differ", func(ctx SpecContext) { - updated := &kvmv1.Hypervisor{} - Expect(aggregatesController.Client.Get(ctx, hypervisorName, updated)).To(Succeed()) - Expect(updated.Status.Aggregates).To(ContainElements("test-aggregate1")) - Expect(meta.IsStatusConditionTrue(updated.Status.Conditions, kvmv1.ConditionTypeAggregatesUpdated)).To(BeTrue()) + It("should update Aggregates and set status condition when Aggregates differ", func(ctx SpecContext) { + updated := &kvmv1.Hypervisor{} + Expect(aggregatesController.Client.Get(ctx, hypervisorName, updated)).To(Succeed()) + Expect(updated.Status.Aggregates).To(BeEmpty()) + Expect(meta.IsStatusConditionTrue(updated.Status.Conditions, kvmv1.ConditionTypeAggregatesUpdated)).To(BeTrue()) + }) }) }) - Context("Removing Aggregate", func() { - BeforeEach(func(ctx SpecContext) { - hypervisor := &kvmv1.Hypervisor{} - Expect(k8sClient.Get(ctx, hypervisorName, hypervisor)).To(Succeed()) - // update status to have existing aggregate - hypervisor.Status.Aggregates = []string{"test-aggregate2", "test-aggregate3"} - Expect(k8sClient.Status().Update(ctx, hypervisor)).To(Succeed()) - - // Mock resourceproviders.GetAggregates - fakeServer.Mux.HandleFunc("GET /os-aggregates", func(w http.ResponseWriter, r *http.Request) { - w.Header().Add("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - - _, err := fmt.Fprint(w, AggregateListBodyFull) - Expect(err).NotTo(HaveOccurred()) - }) - expectRemoveHostFromAggregate := func(w http.ResponseWriter, r *http.Request) { - // parse request - Expect(r.Header.Get("Content-Type")).To(Equal("application/json")) - expectedBody := `{"remove_host":{"host":"hv-test"}}` - body := make([]byte, r.ContentLength) - _, err := r.Body.Read(body) - Expect(err == nil || err.Error() == EOF).To(BeTrue()) - Expect(string(body)).To(MatchJSON(expectedBody)) - - // send response - w.Header().Add("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - - _, err = fmt.Fprint(w, AggregateRemoveHostBody) - Expect(err).NotTo(HaveOccurred()) - } - fakeServer.Mux.HandleFunc("POST /os-aggregates/100001/action", expectRemoveHostFromAggregate) - fakeServer.Mux.HandleFunc("POST /os-aggregates/99/action", expectRemoveHostFromAggregate) + Context("Guard Conditions", func() { + JustBeforeEach(func(ctx SpecContext) { + result, err := aggregatesController.Reconcile(ctx, reconcileRequest) + Expect(err).NotTo(HaveOccurred()) + Expect(result).To(Equal(ctrl.Result{})) }) - It("should update Aggregates and set status condition when Aggregates differ", func(ctx SpecContext) { - updated := &kvmv1.Hypervisor{} - Expect(aggregatesController.Client.Get(ctx, hypervisorName, updated)).To(Succeed()) - Expect(updated.Status.Aggregates).To(BeEmpty()) - Expect(meta.IsStatusConditionTrue(updated.Status.Conditions, kvmv1.ConditionTypeAggregatesUpdated)).To(BeTrue()) + Context("before onboarding", func() { + BeforeEach(func(ctx SpecContext) { + By("Removing the onboarding condition") + hypervisor := &kvmv1.Hypervisor{} + Expect(k8sClient.Get(ctx, hypervisorName, hypervisor)).To(Succeed()) + hypervisor.Status.Conditions = []metav1.Condition{} + Expect(k8sClient.Status().Update(ctx, hypervisor)).To(Succeed()) + }) + + It("should neither update Aggregates and nor set status condition", func(ctx SpecContext) { + updated := &kvmv1.Hypervisor{} + Expect(aggregatesController.Client.Get(ctx, hypervisorName, updated)).To(Succeed()) + Expect(updated.Status.Aggregates).To(BeEmpty()) + Expect(meta.IsStatusConditionTrue(updated.Status.Conditions, kvmv1.ConditionTypeAggregatesUpdated)).To(BeFalse()) + }) }) - }) - Context("before onboarding", func() { - BeforeEach(func(ctx SpecContext) { - hypervisor := &kvmv1.Hypervisor{} - Expect(k8sClient.Get(ctx, hypervisorName, hypervisor)).To(Succeed()) - // Remove the onboarding condition - hypervisor.Status.Conditions = []metav1.Condition{} - Expect(k8sClient.Status().Update(ctx, hypervisor)).To(Succeed()) + Context("when terminating", func() { + BeforeEach(func(ctx SpecContext) { + By("Setting terminating condition") + hypervisor := &kvmv1.Hypervisor{} + Expect(k8sClient.Get(ctx, hypervisorName, hypervisor)).To(Succeed()) + meta.SetStatusCondition(&hypervisor.Status.Conditions, metav1.Condition{ + Type: kvmv1.ConditionTypeTerminating, + Status: metav1.ConditionTrue, + Reason: "dontcare", + Message: "dontcare", + }) + Expect(k8sClient.Status().Update(ctx, hypervisor)).To(Succeed()) + }) + + It("should neither update Aggregates and nor set status condition", func(ctx SpecContext) { + updated := &kvmv1.Hypervisor{} + Expect(aggregatesController.Client.Get(ctx, hypervisorName, updated)).To(Succeed()) + Expect(updated.Status.Aggregates).To(BeEmpty()) + Expect(meta.IsStatusConditionTrue(updated.Status.Conditions, kvmv1.ConditionTypeAggregatesUpdated)).To(BeFalse()) + }) }) + }) - It("should neither update Aggregates and nor set status condition", func(ctx SpecContext) { + Context("Failure Modes", func() { + var sharedErrorConditionChecks = func(ctx SpecContext, expectedMessage string) { updated := &kvmv1.Hypervisor{} Expect(aggregatesController.Client.Get(ctx, hypervisorName, updated)).To(Succeed()) - Expect(updated.Status.Aggregates).To(BeEmpty()) - Expect(meta.IsStatusConditionTrue(updated.Status.Conditions, kvmv1.ConditionTypeAggregatesUpdated)).To(BeFalse()) + Expect(meta.IsStatusConditionFalse(updated.Status.Conditions, kvmv1.ConditionTypeAggregatesUpdated)).To(BeTrue()) + cond := meta.FindStatusCondition(updated.Status.Conditions, kvmv1.ConditionTypeAggregatesUpdated) + Expect(cond).NotTo(BeNil()) + Expect(cond.Reason).To(Equal(kvmv1.ConditionReasonFailed)) + Expect(cond.Message).To(ContainSubstring(expectedMessage)) + } + + Context("when GetAggregates fails", func() { + BeforeEach(func(ctx SpecContext) { + By("Setting a missing aggregate") + hypervisor := &kvmv1.Hypervisor{} + Expect(k8sClient.Get(ctx, hypervisorName, hypervisor)).To(Succeed()) + hypervisor.Spec.Aggregates = []string{"test-aggregate1"} + Expect(k8sClient.Update(ctx, hypervisor)).To(Succeed()) + + By("Mocking GetAggregates to fail") + fakeServer.Mux.HandleFunc("GET /os-aggregates", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusInternalServerError) + _, err := fmt.Fprint(w, `{"error": "Internal Server Error"}`) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + It("should set error condition", func(ctx SpecContext) { + _, err := aggregatesController.Reconcile(ctx, reconcileRequest) + Expect(err).To(HaveOccurred()) + sharedErrorConditionChecks(ctx, "failed listing aggregates") + }) }) - }) - Context("when terminating", func() { - BeforeEach(func(ctx SpecContext) { - hypervisor := &kvmv1.Hypervisor{} - Expect(k8sClient.Get(ctx, hypervisorName, hypervisor)).To(Succeed()) - // Remove the onboarding condition - meta.SetStatusCondition(&hypervisor.Status.Conditions, metav1.Condition{ - Type: kvmv1.ConditionTypeTerminating, - Status: metav1.ConditionTrue, - Reason: "dontcare", - Message: "dontcare", + Context("when AddToAggregate fails", func() { + BeforeEach(func(ctx SpecContext) { + By("Setting a missing aggregate") + hypervisor := &kvmv1.Hypervisor{} + Expect(k8sClient.Get(ctx, hypervisorName, hypervisor)).To(Succeed()) + hypervisor.Spec.Aggregates = []string{"test-aggregate1"} + Expect(k8sClient.Update(ctx, hypervisor)).To(Succeed()) + + By("Mocking GetAggregates") + fakeServer.Mux.HandleFunc("GET /os-aggregates", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, err := fmt.Fprint(w, AggregateListBodyEmpty) + Expect(err).NotTo(HaveOccurred()) + }) + + By("Mocking CreateAggregate") + fakeServer.Mux.HandleFunc("POST /os-aggregates", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, err := fmt.Fprint(w, AggregatesPostBody) + Expect(err).NotTo(HaveOccurred()) + }) + + By("Mocking AddHost to fail") + fakeServer.Mux.HandleFunc("POST /os-aggregates/42/action", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusConflict) + _, err := fmt.Fprint(w, `{"conflictingRequest": {"message": "Cannot add host to aggregate", "code": 409}}`) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + It("should set error condition", func(ctx SpecContext) { + _, err := aggregatesController.Reconcile(ctx, reconcileRequest) + Expect(err).To(HaveOccurred()) + sharedErrorConditionChecks(ctx, "encountered errors during aggregate update") }) - Expect(k8sClient.Status().Update(ctx, hypervisor)).To(Succeed()) }) - It("should neither update Aggregates and nor set status condition", func(ctx SpecContext) { - updated := &kvmv1.Hypervisor{} - Expect(aggregatesController.Client.Get(ctx, hypervisorName, updated)).To(Succeed()) - Expect(updated.Status.Aggregates).To(BeEmpty()) - Expect(meta.IsStatusConditionTrue(updated.Status.Conditions, kvmv1.ConditionTypeAggregatesUpdated)).To(BeFalse()) + Context("when RemoveFromAggregate fails", func() { + BeforeEach(func(ctx SpecContext) { + By("Setting existing aggregate in status") + hypervisor := &kvmv1.Hypervisor{} + Expect(k8sClient.Get(ctx, hypervisorName, hypervisor)).To(Succeed()) + hypervisor.Status.Aggregates = []string{"test-aggregate2"} + Expect(k8sClient.Status().Update(ctx, hypervisor)).To(Succeed()) + + By("Mocking GetAggregates") + fakeServer.Mux.HandleFunc("GET /os-aggregates", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, err := fmt.Fprint(w, AggregateListBodyFull) + Expect(err).NotTo(HaveOccurred()) + }) + + By("Mocking RemoveHost to fail") + fakeServer.Mux.HandleFunc("POST /os-aggregates/100001/action", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusConflict) + _, err := fmt.Fprint(w, `{"conflictingRequest": {"message": "Cannot remove host from aggregate", "code": 409}}`) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + It("should set error condition", func(ctx SpecContext) { + _, err := aggregatesController.Reconcile(ctx, reconcileRequest) + Expect(err).To(HaveOccurred()) + sharedErrorConditionChecks(ctx, "encountered errors during aggregate update") + }) + }) + + Context("when setErrorCondition does not change status", func() { + BeforeEach(func(ctx SpecContext) { + By("Setting a missing aggregate and pre-existing error condition") + hypervisor := &kvmv1.Hypervisor{} + Expect(k8sClient.Get(ctx, hypervisorName, hypervisor)).To(Succeed()) + hypervisor.Spec.Aggregates = []string{"test-aggregate1"} + Expect(k8sClient.Update(ctx, hypervisor)).To(Succeed()) + + By("Pre-setting the exact same error condition that would be set") + Expect(k8sClient.Get(ctx, hypervisorName, hypervisor)).To(Succeed()) + meta.SetStatusCondition(&hypervisor.Status.Conditions, metav1.Condition{ + Type: kvmv1.ConditionTypeAggregatesUpdated, + Status: metav1.ConditionFalse, + Reason: kvmv1.ConditionReasonFailed, + Message: "failed listing aggregates: test error", + }) + Expect(k8sClient.Status().Update(ctx, hypervisor)).To(Succeed()) + + By("Mocking GetAggregates to fail") + fakeServer.Mux.HandleFunc("GET /os-aggregates", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusInternalServerError) + _, err := fmt.Fprint(w, `{"error": "test error"}`) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + It("should not update status when condition is already set", func(ctx SpecContext) { + _, err := aggregatesController.Reconcile(ctx, reconcileRequest) + Expect(err).To(HaveOccurred()) + + updated := &kvmv1.Hypervisor{} + Expect(aggregatesController.Client.Get(ctx, hypervisorName, updated)).To(Succeed()) + Expect(meta.IsStatusConditionFalse(updated.Status.Conditions, kvmv1.ConditionTypeAggregatesUpdated)).To(BeTrue()) + }) }) }) }) diff --git a/internal/controller/decomission_controller_test.go b/internal/controller/decomission_controller_test.go index d12df47..06956be 100644 --- a/internal/controller/decomission_controller_test.go +++ b/internal/controller/decomission_controller_test.go @@ -50,7 +50,7 @@ var _ = Describe("Decommission Controller", func() { "availability_zone": "", "deleted": false, "id": 100001, - "hosts": ["note-test"] + "hosts": ["node-test"] } ] } @@ -69,13 +69,12 @@ var _ = Describe("Decommission Controller", func() { var ( decommissionReconciler *NodeDecommissionReconciler resourceName = types.NamespacedName{Name: hypervisorName} - reconcileReq = ctrl.Request{ - NamespacedName: resourceName, - } - fakeServer testhelper.FakeServer + reconcileReq = ctrl.Request{NamespacedName: resourceName} + fakeServer testhelper.FakeServer ) BeforeEach(func(ctx SpecContext) { + By("Setting up the OpenStack http mock server") fakeServer = testhelper.SetupHTTP() DeferCleanup(fakeServer.Teardown) @@ -84,11 +83,13 @@ var _ = Describe("Decommission Controller", func() { Fail("Unhandled request to fake server: " + r.Method + " " + r.URL.Path) }) + By("Setting KVM_HA_SERVICE_URL environment variable") os.Setenv("KVM_HA_SERVICE_URL", fakeServer.Endpoint()+"instance-ha") DeferCleanup(func() { os.Unsetenv("KVM_HA_SERVICE_URL") }) + By("Creating the NodeDecommissionReconciler") decommissionReconciler = &NodeDecommissionReconciler{ Client: k8sClient, Scheme: k8sClient.Scheme(), @@ -96,14 +97,14 @@ var _ = Describe("Decommission Controller", func() { placementClient: client.ServiceClient(fakeServer), } - By("creating the namespace for the reconciler") + By("Creating the namespace for the reconciler") ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespaceName}} Expect(k8sclient.IgnoreAlreadyExists(k8sClient.Create(ctx, ns))).To(Succeed()) DeferCleanup(func(ctx SpecContext) { Expect(k8sClient.Delete(ctx, ns)).To(Succeed()) }) - By("Create the hypervisor resource with lifecycle enabled") + By("Creating the hypervisor resource with lifecycle enabled") hypervisor := &kvmv1.Hypervisor{ ObjectMeta: metav1.ObjectMeta{ Name: resourceName.Name, @@ -118,139 +119,497 @@ var _ = Describe("Decommission Controller", func() { }) }) - Context("When marking the hypervisor terminating", func() { + Context("Happy Path", func() { + Context("When marking the hypervisor terminating", func() { + JustBeforeEach(func(ctx SpecContext) { + By("Reconciling first to add the finalizer") + _, err := decommissionReconciler.Reconcile(ctx, reconcileReq) + Expect(err).NotTo(HaveOccurred()) + + By("Marking the hypervisor for termination") + hypervisor := &kvmv1.Hypervisor{} + Expect(k8sClient.Get(ctx, resourceName, hypervisor)).To(Succeed()) + hypervisor.Spec.Maintenance = kvmv1.MaintenanceTermination + Expect(k8sClient.Update(ctx, hypervisor)).To(Succeed()) + + By("Setting terminating condition") + meta.SetStatusCondition(&hypervisor.Status.Conditions, metav1.Condition{ + Type: kvmv1.ConditionTypeTerminating, + Status: metav1.ConditionTrue, + Reason: "dontcare", + Message: "dontcare", + }) + Expect(k8sClient.Status().Update(ctx, hypervisor)).To(Succeed()) + }) + + Context("when the hypervisor was set to ready and has been evicted", func() { + var getHypervisorsCalled int + + BeforeEach(func(ctx SpecContext) { + getHypervisorsCalled = 0 + + By("Setting hypervisor to ready and evicted") + hv := &kvmv1.Hypervisor{} + Expect(k8sClient.Get(ctx, resourceName, hv)).To(Succeed()) + meta.SetStatusCondition(&hv.Status.Conditions, metav1.Condition{ + Type: kvmv1.ConditionTypeReady, + Status: metav1.ConditionTrue, + Reason: "dontcare", + Message: "dontcare", + }) + meta.SetStatusCondition(&hv.Status.Conditions, metav1.Condition{ + Type: kvmv1.ConditionTypeEvicting, + Status: metav1.ConditionFalse, + Reason: "dontcare", + Message: "dontcare", + }) + Expect(k8sClient.Status().Update(ctx, hv)).To(Succeed()) + + By("Mocking OpenStack API endpoints") + fakeServer.Mux.HandleFunc("GET /os-hypervisors/detail", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + getHypervisorsCalled++ + Expect(fmt.Fprintf(w, HypervisorWithServers, serviceId, "some reason", hypervisorName)).ToNot(BeNil()) + }) + + fakeServer.Mux.HandleFunc("GET /os-aggregates", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, err := fmt.Fprint(w, AggregateListWithHv) + Expect(err).NotTo(HaveOccurred()) + }) + + fakeServer.Mux.HandleFunc("POST /os-aggregates/100001/action", func(w http.ResponseWriter, r *http.Request) { + Expect(r.Header.Get("Content-Type")).To(Equal("application/json")) + expectedBody := `{"remove_host":{"host":"node-test"}}` + body := make([]byte, r.ContentLength) + _, err := r.Body.Read(body) + Expect(err == nil || err.Error() == EOF).To(BeTrue()) + Expect(string(body)).To(MatchJSON(expectedBody)) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, err = fmt.Fprint(w, AggregateRemoveHostBody) + Expect(err).NotTo(HaveOccurred()) + }) + + // c48f6247-abe4-4a24-824e-ea39e108874f comes from the HypervisorWithServers const + fakeServer.Mux.HandleFunc("GET /resource_providers/c48f6247-abe4-4a24-824e-ea39e108874f", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, err := fmt.Fprint(w, `{"uuid": "rp-uuid", "name": "hv-test"}`) + Expect(err).NotTo(HaveOccurred()) + }) + + fakeServer.Mux.HandleFunc("GET /resource_providers/rp-uuid/allocations", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, err := fmt.Fprint(w, `{"allocations": {}}}`) + Expect(err).NotTo(HaveOccurred()) + }) + + fakeServer.Mux.HandleFunc("DELETE /resource_providers/rp-uuid", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusAccepted) + }) + + fakeServer.Mux.HandleFunc("DELETE /os-services/service-1234", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNoContent) + }) + }) + + It("should set the hypervisor ready condition", func(ctx SpecContext) { + _, err := decommissionReconciler.Reconcile(ctx, reconcileReq) + Expect(err).NotTo(HaveOccurred()) + + hypervisor := &kvmv1.Hypervisor{} + Expect(k8sClient.Get(ctx, resourceName, hypervisor)).To(Succeed()) + Expect(hypervisor.Status.Conditions).To(ContainElement( + SatisfyAll( + HaveField("Type", kvmv1.ConditionTypeReady), + HaveField("Status", metav1.ConditionFalse), + HaveField("Reason", "Decommissioning"), + ), + )) + }) + + It("should set the hypervisor offboarded condition", func(ctx SpecContext) { + for range 3 { + _, err := decommissionReconciler.Reconcile(ctx, reconcileReq) + Expect(err).NotTo(HaveOccurred()) + } + Expect(getHypervisorsCalled).To(BeNumerically(">", 0)) + + hypervisor := &kvmv1.Hypervisor{} + Expect(k8sClient.Get(ctx, resourceName, hypervisor)).To(Succeed()) + Expect(hypervisor.Status.Conditions).To(ContainElement( + SatisfyAll( + HaveField("Type", kvmv1.ConditionTypeOffboarded), + HaveField("Status", metav1.ConditionTrue), + ), + )) + }) + }) + }) + }) + + Context("Guard Conditions", func() { JustBeforeEach(func(ctx SpecContext) { - By("reconciling first to add the finalizer") - _, err := decommissionReconciler.Reconcile(ctx, reconcileReq) + result, err := decommissionReconciler.Reconcile(ctx, reconcileReq) Expect(err).NotTo(HaveOccurred()) + Expect(result).To(Equal(ctrl.Result{})) + }) + + Context("When lifecycle is not enabled, but maintenance is set to termination", func() { + BeforeEach(func(ctx SpecContext) { + hypervisor := &kvmv1.Hypervisor{} + Expect(k8sClient.Get(ctx, resourceName, hypervisor)).To(Succeed()) + hypervisor.Spec.Maintenance = kvmv1.MaintenanceTermination + hypervisor.Spec.LifecycleEnabled = false + Expect(k8sClient.Update(ctx, hypervisor)).To(Succeed()) + }) + + It("should not reconcile", func(ctx SpecContext) { + hypervisor := &kvmv1.Hypervisor{} + Expect(k8sClient.Get(ctx, resourceName, hypervisor)).To(Succeed()) + Expect(hypervisor.Status.Conditions).To(BeEmpty()) + }) + }) + + Context("When maintenance is not set to termination", func() { + BeforeEach(func(ctx SpecContext) { + hypervisor := &kvmv1.Hypervisor{} + Expect(k8sClient.Get(ctx, resourceName, hypervisor)).To(Succeed()) + hypervisor.Spec.Maintenance = kvmv1.MaintenanceManual + Expect(k8sClient.Update(ctx, hypervisor)).To(Succeed()) + }) + + It("should not reconcile", func(ctx SpecContext) { + hypervisor := &kvmv1.Hypervisor{} + Expect(k8sClient.Get(ctx, resourceName, hypervisor)).To(Succeed()) + Expect(hypervisor.Status.Conditions).To(BeEmpty()) + }) + }) + }) + Context("Failure Modes", func() { + var sharedDecommissioningErrorCheck = func(ctx SpecContext, expectedMessageSubstring string) { hypervisor := &kvmv1.Hypervisor{} Expect(k8sClient.Get(ctx, resourceName, hypervisor)).To(Succeed()) + Expect(hypervisor.Status.Conditions).To(ContainElement( + SatisfyAll( + HaveField("Type", kvmv1.ConditionTypeReady), + HaveField("Status", metav1.ConditionFalse), + HaveField("Reason", "Decommissioning"), + HaveField("Message", ContainSubstring(expectedMessageSubstring)), + ), + )) + } - By("and then marking the hypervisor terminating") + BeforeEach(func(ctx SpecContext) { + By("Setting hypervisor to maintenance termination mode") + hypervisor := &kvmv1.Hypervisor{} + Expect(k8sClient.Get(ctx, resourceName, hypervisor)).To(Succeed()) hypervisor.Spec.Maintenance = kvmv1.MaintenanceTermination Expect(k8sClient.Update(ctx, hypervisor)).To(Succeed()) - meta.SetStatusCondition(&hypervisor.Status.Conditions, metav1.Condition{ - Type: kvmv1.ConditionTypeTerminating, - Status: metav1.ConditionTrue, - Reason: "dontcare", - Message: "dontcare", + }) + + Context("When getting hypervisor by name fails", func() { + BeforeEach(func() { + fakeServer.Mux.HandleFunc("GET /os-hypervisors/detail", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprint(w, `{"error": "Internal Server Error"}`) + }) + }) + + It("should set decommissioning condition with error", func(ctx SpecContext) { + _, err := decommissionReconciler.Reconcile(ctx, reconcileReq) + Expect(err).NotTo(HaveOccurred()) + sharedDecommissioningErrorCheck(ctx, "") }) - Expect(k8sClient.Status().Update(ctx, hypervisor)).To(Succeed()) }) - When("the hypervisor was set to ready and has been evicted", func() { - getHypervisorsCalled := 0 - BeforeEach(func(ctx SpecContext) { - hv := &kvmv1.Hypervisor{} - Expect(k8sClient.Get(ctx, resourceName, hv)).To(Succeed()) - meta.SetStatusCondition(&hv.Status.Conditions, - metav1.Condition{ - Type: kvmv1.ConditionTypeReady, - Status: metav1.ConditionTrue, - Reason: "dontcare", - Message: "dontcare", - }, - ) - meta.SetStatusCondition(&hv.Status.Conditions, metav1.Condition{ - Type: kvmv1.ConditionTypeEvicting, - Status: metav1.ConditionFalse, - Reason: "dontcare", - Message: "dontcare", + Context("When hypervisor still has running VMs", func() { + BeforeEach(func() { + hvResponse := `{ + "hypervisors": [{ + "id": "c48f6247-abe4-4a24-824e-ea39e108874f", + "hypervisor_hostname": "node-test", + "hypervisor_version": 2002000, + "state": "up", + "status": "enabled", + "running_vms": 2, + "service": { + "id": "service-1234", + "host": "node-test", + "disabled_reason": "" + } + }] + }` + + fakeServer.Mux.HandleFunc("GET /os-hypervisors/detail", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, hvResponse) }) - Expect(k8sClient.Status().Update(ctx, hv)).To(Succeed()) + }) + + It("should set decommissioning condition about running VMs", func(ctx SpecContext) { + _, err := decommissionReconciler.Reconcile(ctx, reconcileReq) + Expect(err).NotTo(HaveOccurred()) + sharedDecommissioningErrorCheck(ctx, "still has 2 running VMs") + }) + }) + Context("When listing aggregates fails", func() { + BeforeEach(func() { fakeServer.Mux.HandleFunc("GET /os-hypervisors/detail", func(w http.ResponseWriter, r *http.Request) { w.Header().Add("Content-Type", "application/json") w.WriteHeader(http.StatusOK) - getHypervisorsCalled++ - Expect(fmt.Fprintf(w, HypervisorWithServers, serviceId, "some reason", hypervisorName)).ToNot(BeNil()) + fmt.Fprintf(w, `{ + "hypervisors": [{ + "id": "c48f6247-abe4-4a24-824e-ea39e108874f", + "hypervisor_hostname": "node-test", + "hypervisor_version": 2002000, + "state": "up", + "status": "enabled", + "running_vms": 0, + "service": { + "id": "service-1234", + "host": "node-test" + } + }] + }`) }) fakeServer.Mux.HandleFunc("GET /os-aggregates", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprint(w, `{"error": "Internal Server Error"}`) + }) + }) + + It("should set decommissioning condition with error", func(ctx SpecContext) { + _, err := decommissionReconciler.Reconcile(ctx, reconcileReq) + Expect(err).NotTo(HaveOccurred()) + sharedDecommissioningErrorCheck(ctx, "cannot list aggregates") + }) + }) + + Context("When removing host from aggregate fails", func() { + BeforeEach(func() { + fakeServer.Mux.HandleFunc("GET /os-hypervisors/detail", func(w http.ResponseWriter, r *http.Request) { w.Header().Add("Content-Type", "application/json") w.WriteHeader(http.StatusOK) - - _, err := fmt.Fprint(w, AggregateListWithHv) - Expect(err).NotTo(HaveOccurred()) + fmt.Fprintf(w, `{ + "hypervisors": [{ + "id": "c48f6247-abe4-4a24-824e-ea39e108874f", + "hypervisor_hostname": "node-test", + "hypervisor_version": 2002000, + "state": "up", + "status": "enabled", + "running_vms": 0, + "service": { + "id": "service-1234", + "host": "node-test" + } + }] + }`) }) - fakeServer.Mux.HandleFunc("POST /os-aggregates/100001/action", func(w http.ResponseWriter, r *http.Request) { - // parse request - Expect(r.Header.Get("Content-Type")).To(Equal("application/json")) - expectedBody := `{"remove_host":{"host":"hv-test"}}` - body := make([]byte, r.ContentLength) - _, err := r.Body.Read(body) - Expect(err == nil || err.Error() == EOF).To(BeTrue()) - Expect(string(body)).To(MatchJSON(expectedBody)) - - // send response + fakeServer.Mux.HandleFunc("GET /os-aggregates", func(w http.ResponseWriter, r *http.Request) { w.Header().Add("Content-Type", "application/json") w.WriteHeader(http.StatusOK) + fmt.Fprint(w, AggregateListWithHv) + }) - _, err = fmt.Fprint(w, AggregateRemoveHostBody) - Expect(err).NotTo(HaveOccurred()) + fakeServer.Mux.HandleFunc("POST /os-aggregates/100001/action", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprint(w, `{"error": "Internal Server Error"}`) + }) + + // Add handlers for subsequent steps even though we expect failure earlier + fakeServer.Mux.HandleFunc("DELETE /os-services/service-1234", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNoContent) }) - // c48f6247-abe4-4a24-824e-ea39e108874f comes from the HypervisorWithServers const fakeServer.Mux.HandleFunc("GET /resource_providers/c48f6247-abe4-4a24-824e-ea39e108874f", func(w http.ResponseWriter, r *http.Request) { w.Header().Add("Content-Type", "application/json") w.WriteHeader(http.StatusOK) - - _, err := fmt.Fprint(w, `{"uuid": "rp-uuid", "name": "hv-test"}`) - Expect(err).NotTo(HaveOccurred()) + fmt.Fprint(w, `{"uuid": "rp-uuid", "name": "hv-test"}`) }) fakeServer.Mux.HandleFunc("GET /resource_providers/rp-uuid/allocations", func(w http.ResponseWriter, r *http.Request) { w.Header().Add("Content-Type", "application/json") w.WriteHeader(http.StatusOK) - - _, err := fmt.Fprint(w, `{"allocations": {}}}`) - Expect(err).NotTo(HaveOccurred()) - + fmt.Fprint(w, `{"allocations": {}}`) }) + fakeServer.Mux.HandleFunc("DELETE /resource_providers/rp-uuid", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusAccepted) }) - - fakeServer.Mux.HandleFunc("DELETE /os-services/service-1234", func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusNoContent) - }) }) - It("should set the hypervisor ready condition", func(ctx SpecContext) { - By("reconciling the created resource") + It("should set decommissioning condition with error", func(ctx SpecContext) { _, err := decommissionReconciler.Reconcile(ctx, reconcileReq) Expect(err).NotTo(HaveOccurred()) + hypervisor := &kvmv1.Hypervisor{} Expect(k8sClient.Get(ctx, resourceName, hypervisor)).To(Succeed()) + Expect(hypervisor.Status.Conditions).To(ContainElement( SatisfyAll( HaveField("Type", kvmv1.ConditionTypeReady), HaveField("Status", metav1.ConditionFalse), HaveField("Reason", "Decommissioning"), + HaveField("Message", ContainSubstring("failed to remove host")), ), )) + + // Should NOT be offboarded yet + Expect(hypervisor.Status.Conditions).NotTo(ContainElement( + HaveField("Type", kvmv1.ConditionTypeOffboarded), + )) }) + }) - It("should set the hypervisor offboarded condition", func(ctx SpecContext) { - By("reconciling the created resource") - for range 3 { - _, err := decommissionReconciler.Reconcile(ctx, reconcileReq) - Expect(err).NotTo(HaveOccurred()) - } - Expect(getHypervisorsCalled).To(BeNumerically(">", 0)) + Context("When deleting service fails", func() { + BeforeEach(func() { + fakeServer.Mux.HandleFunc("GET /os-hypervisors/detail", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{ + "hypervisors": [{ + "id": "c48f6247-abe4-4a24-824e-ea39e108874f", + "hypervisor_hostname": "node-test", + "hypervisor_version": 2002000, + "state": "up", + "status": "enabled", + "running_vms": 0, + "service": { + "id": "service-1234", + "host": "node-test" + } + }] + }`) + }) - hypervisor := &kvmv1.Hypervisor{} - Expect(k8sClient.Get(ctx, resourceName, hypervisor)).To(Succeed()) - Expect(hypervisor.Status.Conditions).To(ContainElement( - SatisfyAll( - HaveField("Type", kvmv1.ConditionTypeOffboarded), - HaveField("Status", metav1.ConditionTrue), - ), - )) + fakeServer.Mux.HandleFunc("GET /os-aggregates", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, `{"aggregates": []}`) + }) + + fakeServer.Mux.HandleFunc("DELETE /os-services/service-1234", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprint(w, `{"error": "Internal Server Error"}`) + }) + }) + + It("should set decommissioning condition with error", func(ctx SpecContext) { + _, err := decommissionReconciler.Reconcile(ctx, reconcileReq) + Expect(err).NotTo(HaveOccurred()) + sharedDecommissioningErrorCheck(ctx, "cannot delete service") }) }) + Context("When getting resource provider fails", func() { + BeforeEach(func() { + fakeServer.Mux.HandleFunc("GET /os-hypervisors/detail", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{ + "hypervisors": [{ + "id": "c48f6247-abe4-4a24-824e-ea39e108874f", + "hypervisor_hostname": "node-test", + "hypervisor_version": 2002000, + "state": "up", + "status": "enabled", + "running_vms": 0, + "service": { + "id": "service-1234", + "host": "node-test" + } + }] + }`) + }) + + fakeServer.Mux.HandleFunc("GET /os-aggregates", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, `{"aggregates": []}`) + }) + + fakeServer.Mux.HandleFunc("DELETE /os-services/service-1234", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNoContent) + }) + + fakeServer.Mux.HandleFunc("GET /resource_providers/c48f6247-abe4-4a24-824e-ea39e108874f", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprint(w, `{"error": "Internal Server Error"}`) + }) + }) + + It("should set decommissioning condition with error", func(ctx SpecContext) { + _, err := decommissionReconciler.Reconcile(ctx, reconcileReq) + Expect(err).NotTo(HaveOccurred()) + sharedDecommissioningErrorCheck(ctx, "cannot get resource provider") + }) + }) + + Context("When cleaning up resource provider fails", func() { + BeforeEach(func() { + fakeServer.Mux.HandleFunc("GET /os-hypervisors/detail", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{ + "hypervisors": [{ + "id": "c48f6247-abe4-4a24-824e-ea39e108874f", + "hypervisor_hostname": "node-test", + "hypervisor_version": 2002000, + "state": "up", + "status": "enabled", + "running_vms": 0, + "service": { + "id": "service-1234", + "host": "node-test" + } + }] + }`) + }) + + fakeServer.Mux.HandleFunc("GET /os-aggregates", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, `{"aggregates": []}`) + }) + + fakeServer.Mux.HandleFunc("DELETE /os-services/service-1234", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNoContent) + }) + + fakeServer.Mux.HandleFunc("GET /resource_providers/c48f6247-abe4-4a24-824e-ea39e108874f", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, `{"uuid": "rp-uuid", "name": "hv-test"}`) + }) + + fakeServer.Mux.HandleFunc("GET /resource_providers/rp-uuid/allocations", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, `{"allocations": {}}`) + }) + + fakeServer.Mux.HandleFunc("DELETE /resource_providers/rp-uuid", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprint(w, `{"error": "Internal Server Error"}`) + }) + }) + + It("should set decommissioning condition with error", func(ctx SpecContext) { + _, err := decommissionReconciler.Reconcile(ctx, reconcileReq) + Expect(err).NotTo(HaveOccurred()) + sharedDecommissioningErrorCheck(ctx, "cannot clean up resource provider") + }) + }) }) }) diff --git a/internal/controller/eviction_controller_test.go b/internal/controller/eviction_controller_test.go index e762a65..d70d855 100644 --- a/internal/controller/eviction_controller_test.go +++ b/internal/controller/eviction_controller_test.go @@ -112,8 +112,8 @@ var _ = Describe("Eviction Controller", func() { }) Describe("API validation", func() { - When("creating an eviction without hypervisor", func() { - It("it should fail creating the resource", func(ctx SpecContext) { + Context("When creating an eviction without hypervisor", func() { + It("should fail creating the resource", func(ctx SpecContext) { resource := &kvmv1.Eviction{ ObjectMeta: evictionObjectMeta, Spec: kvmv1.EvictionSpec{ @@ -125,8 +125,8 @@ var _ = Describe("Eviction Controller", func() { }) }) - When("creating an eviction without reason", func() { - It("it should fail creating the resource", func(ctx SpecContext) { + Context("When creating an eviction without reason", func() { + It("should fail creating the resource", func(ctx SpecContext) { resource := &kvmv1.Eviction{ ObjectMeta: evictionObjectMeta, Spec: kvmv1.EvictionSpec{ @@ -138,9 +138,9 @@ var _ = Describe("Eviction Controller", func() { }) }) - When("creating an eviction with reason and hypervisor", func() { + Context("When creating an eviction with reason and hypervisor", func() { BeforeEach(func(ctx SpecContext) { - By("creating the hypervisor resource") + By("Creating the hypervisor resource") hypervisor := &kvmv1.Hypervisor{ ObjectMeta: metav1.ObjectMeta{ Name: hypervisorName, @@ -151,6 +151,7 @@ var _ = Describe("Eviction Controller", func() { Expect(k8sClient.Delete(ctx, hypervisor)).To(Succeed()) }) }) + It("should successfully create the resource", func(ctx SpecContext) { eviction := &kvmv1.Eviction{ ObjectMeta: evictionObjectMeta, @@ -166,77 +167,50 @@ var _ = Describe("Eviction Controller", func() { }) Describe("Reconciliation", func() { - Describe("an eviction for an onboarded 'test-hypervisor'", func() { - BeforeEach(func(ctx SpecContext) { - By("creating the hypervisor resource") - hypervisor := &kvmv1.Hypervisor{ - ObjectMeta: metav1.ObjectMeta{ - Name: hypervisorName, - }, - } - Expect(k8sClient.Create(ctx, hypervisor)).To(Succeed()) - DeferCleanup(func(ctx SpecContext) { - Expect(k8sClient.Delete(ctx, hypervisor)).To(Succeed()) - }) - - hypervisor.Status.HypervisorID = hypervisorId - hypervisor.Status.ServiceID = serviceId - meta.SetStatusCondition(&hypervisor.Status.Conditions, metav1.Condition{ - Type: kvmv1.ConditionTypeOnboarding, - Status: metav1.ConditionTrue, - Reason: "dontcare", - Message: "dontcare", - }) - meta.SetStatusCondition(&hypervisor.Status.Conditions, metav1.Condition{ - Type: kvmv1.ConditionTypeHypervisorDisabled, - Status: metav1.ConditionTrue, - Reason: "dontcare", - Message: "dontcare", - }) - Expect(k8sClient.Status().Update(ctx, hypervisor)).To(Succeed()) - - By("creating the eviction") - eviction := &kvmv1.Eviction{ - ObjectMeta: evictionObjectMeta, - Spec: kvmv1.EvictionSpec{ - Reason: "test-reason", - Hypervisor: hypervisorName, - }, - } - Expect(k8sClient.Create(ctx, eviction)).To(Succeed()) + BeforeEach(func(ctx SpecContext) { + By("Creating the hypervisor resource") + hypervisor := &kvmv1.Hypervisor{ + ObjectMeta: metav1.ObjectMeta{ + Name: hypervisorName, + }, + } + Expect(k8sClient.Create(ctx, hypervisor)).To(Succeed()) + DeferCleanup(func(ctx SpecContext) { + Expect(k8sClient.Delete(ctx, hypervisor)).To(Succeed()) }) - When("hypervisor is not found in openstack", func() { - BeforeEach(func() { - fakeServer.Mux.HandleFunc("GET /os-hypervisors/{hypervisor_id}", func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusNotFound) - }) - }) - - It("should fail reconciliation", func(ctx SpecContext) { - for range 3 { - _, err := evictionReconciler.Reconcile(ctx, reconcileRequest) - Expect(err).NotTo(HaveOccurred()) - } - - resource := &kvmv1.Eviction{} - err := k8sClient.Get(ctx, typeNamespacedName, resource) - Expect(err).NotTo(HaveOccurred()) - - // expect eviction condition to be false due to missing hypervisor - Expect(resource.Status.Conditions).To(ContainElements(SatisfyAll( - HaveField("Status", metav1.ConditionFalse), - HaveField("Type", kvmv1.ConditionTypeEvicting), - HaveField("Reason", "Failed"), - HaveField("Message", ContainSubstring("got 404")), - ))) - - Expect(resource.GetFinalizers()).To(BeEmpty()) - }) - + By("Setting hypervisor status with IDs and conditions") + hypervisor.Status.HypervisorID = hypervisorId + hypervisor.Status.ServiceID = serviceId + meta.SetStatusCondition(&hypervisor.Status.Conditions, metav1.Condition{ + Type: kvmv1.ConditionTypeOnboarding, + Status: metav1.ConditionTrue, + Reason: "dontcare", + Message: "dontcare", + }) + meta.SetStatusCondition(&hypervisor.Status.Conditions, metav1.Condition{ + Type: kvmv1.ConditionTypeHypervisorDisabled, + Status: metav1.ConditionTrue, + Reason: "dontcare", + Message: "dontcare", }) - When("enabled hypervisor has no servers", func() { + Expect(k8sClient.Status().Update(ctx, hypervisor)).To(Succeed()) + + By("Creating the eviction resource") + eviction := &kvmv1.Eviction{ + ObjectMeta: evictionObjectMeta, + Spec: kvmv1.EvictionSpec{ + Reason: "test-reason", + Hypervisor: hypervisorName, + }, + } + Expect(k8sClient.Create(ctx, eviction)).To(Succeed()) + }) + + Context("Happy Path", func() { + Context("When enabled hypervisor has no servers", func() { BeforeEach(func(ctx SpecContext) { + By("Mocking hypervisor API to return enabled status") fakeServer.Mux.HandleFunc("GET /os-hypervisors/{hypervisor_id}", func(w http.ResponseWriter, r *http.Request) { rHypervisorId := r.PathValue("hypervisor_id") Expect(rHypervisorId).To(Equal(hypervisorId)) @@ -246,6 +220,7 @@ var _ = Describe("Eviction Controller", func() { Expect(err).To(Succeed()) }) + By("Mocking service update API") fakeServer.Mux.HandleFunc("PUT /os-services/{service_id}", func(w http.ResponseWriter, r *http.Request) { rServiceId := r.PathValue("service_id") Expect(rServiceId).To(Equal(serviceId)) @@ -255,7 +230,8 @@ var _ = Describe("Eviction Controller", func() { Expect(err).To(Succeed()) }) }) - It("should succeed the reconciliation", func(ctx SpecContext) { + + It("should succeed the reconciliation through all phases", func(ctx SpecContext) { runningCond := SatisfyAll( HaveField("Type", kvmv1.ConditionTypeEvicting), HaveField("Status", metav1.ConditionTrue), @@ -286,21 +262,20 @@ var _ = Describe("Eviction Controller", func() { for i, expectation := range expectations { By(fmt.Sprintf("Reconciliation step %d", i+1)) - // Reconcile the resource result, err := evictionReconciler.Reconcile(ctx, reconcileRequest) Expect(result).To(Equal(ctrl.Result{})) Expect(err).NotTo(HaveOccurred()) resource := &kvmv1.Eviction{} Expect(k8sClient.Get(ctx, typeNamespacedName, resource)).NotTo(HaveOccurred()) - - // Check the condition Expect(resource.Status.Conditions).To(expectation) } }) }) - When("disabled hypervisor has no servers", func() { + + Context("When disabled hypervisor has no servers", func() { BeforeEach(func(ctx SpecContext) { + By("Mocking hypervisor API to return disabled status") fakeServer.Mux.HandleFunc("GET /os-hypervisors/{hypervisor_id}", func(w http.ResponseWriter, r *http.Request) { rHypervisorId := r.PathValue("hypervisor_id") Expect(rHypervisorId).To(Equal(hypervisorId)) @@ -309,6 +284,8 @@ var _ = Describe("Eviction Controller", func() { _, err := fmt.Fprintf(w, hypervisorTpl, `"some reason"`, "disabled") Expect(err).To(Succeed()) }) + + By("Mocking service update API") fakeServer.Mux.HandleFunc("PUT /os-services/{service_id}", func(w http.ResponseWriter, r *http.Request) { rServiceId := r.PathValue("service_id") Expect(rServiceId).To(Equal(serviceId)) @@ -319,16 +296,13 @@ var _ = Describe("Eviction Controller", func() { }) It("should succeed the reconciliation", func(ctx SpecContext) { - for range 1 { - _, err := evictionReconciler.Reconcile(ctx, reconcileRequest) - Expect(err).NotTo(HaveOccurred()) - } + By("First reconciliation should set eviction to running") + _, err := evictionReconciler.Reconcile(ctx, reconcileRequest) + Expect(err).NotTo(HaveOccurred()) resource := &kvmv1.Eviction{} - err := k8sClient.Get(ctx, typeNamespacedName, resource) + err = k8sClient.Get(ctx, typeNamespacedName, resource) Expect(err).NotTo(HaveOccurred()) - - // expect eviction condition to be true Expect(resource.Status.Conditions).To(ContainElement( SatisfyAll( HaveField("Type", kvmv1.ConditionTypeEvicting), @@ -337,14 +311,14 @@ var _ = Describe("Eviction Controller", func() { ), )) + By("Additional reconciliations should complete the eviction") for range 3 { _, err = evictionReconciler.Reconcile(ctx, reconcileRequest) Expect(err).NotTo(HaveOccurred()) } + err = k8sClient.Get(ctx, typeNamespacedName, resource) Expect(err).NotTo(HaveOccurred()) - - // expect reconciliation to be successfully finished Expect(resource.Status.Conditions).To(ContainElement( SatisfyAll( HaveField("Type", kvmv1.ConditionTypeEvicting), @@ -355,5 +329,36 @@ var _ = Describe("Eviction Controller", func() { }) }) }) + + Context("Failure Modes", func() { + Context("When hypervisor is not found in openstack", func() { + BeforeEach(func() { + By("Mocking hypervisor API to return 404") + fakeServer.Mux.HandleFunc("GET /os-hypervisors/{hypervisor_id}", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + }) + }) + + It("should fail reconciliation", func(ctx SpecContext) { + for range 3 { + _, err := evictionReconciler.Reconcile(ctx, reconcileRequest) + Expect(err).NotTo(HaveOccurred()) + } + + resource := &kvmv1.Eviction{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + Expect(resource.Status.Conditions).To(ContainElements(SatisfyAll( + HaveField("Status", metav1.ConditionFalse), + HaveField("Type", kvmv1.ConditionTypeEvicting), + HaveField("Reason", "Failed"), + HaveField("Message", ContainSubstring("got 404")), + ))) + + Expect(resource.GetFinalizers()).To(BeEmpty()) + }) + }) + }) }) }) diff --git a/internal/controller/gardener_node_lifecycle_controller_test.go b/internal/controller/gardener_node_lifecycle_controller_test.go index 8d9d4bd..205b51f 100644 --- a/internal/controller/gardener_node_lifecycle_controller_test.go +++ b/internal/controller/gardener_node_lifecycle_controller_test.go @@ -29,6 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" + k8sclient "sigs.k8s.io/controller-runtime/pkg/client" kvmv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1" ) @@ -71,7 +72,8 @@ var _ = Describe("Gardener Maintenance Controller", func() { } Expect(k8sClient.Create(ctx, hypervisor)).To(Succeed()) DeferCleanup(func(ctx SpecContext) { - Expect(k8sClient.Delete(ctx, hypervisor)).To(Succeed()) + err := k8sClient.Delete(ctx, hypervisor) + Expect(k8sclient.IgnoreNotFound(err)).To(Succeed()) }) }) @@ -135,4 +137,74 @@ var _ = Describe("Gardener Maintenance Controller", func() { }) }) + + Context("When hypervisor does not exist", func() { + It("should succeed without error", func(ctx SpecContext) { + // Delete the hypervisor - controller should handle this gracefully with IgnoreNotFound + hypervisor := &kvmv1.Hypervisor{} + Expect(k8sClient.Get(ctx, name, hypervisor)).To(Succeed()) + Expect(k8sClient.Delete(ctx, hypervisor)).To(Succeed()) + + _, err := controller.Reconcile(ctx, reconcileReq) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("When lifecycle is not enabled", func() { + BeforeEach(func(ctx SpecContext) { + hypervisor := &kvmv1.Hypervisor{} + Expect(k8sClient.Get(ctx, name, hypervisor)).To(Succeed()) + hypervisor.Spec.LifecycleEnabled = false + Expect(k8sClient.Update(ctx, hypervisor)).To(Succeed()) + }) + + It("should return early without error", func(ctx SpecContext) { + _, err := controller.Reconcile(ctx, reconcileReq) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("When node is terminating and offboarded", func() { + BeforeEach(func(ctx SpecContext) { + // Set node as terminating and add required labels for disableInstanceHA + node := &corev1.Node{} + Expect(k8sClient.Get(ctx, name, node)).To(Succeed()) + node.Labels = map[string]string{ + corev1.LabelHostname: nodeName, + "topology.kubernetes.io/zone": "test-zone", + } + node.Status.Conditions = append(node.Status.Conditions, corev1.NodeCondition{ + Type: "Terminating", + Status: corev1.ConditionTrue, + }) + Expect(k8sClient.Update(ctx, node)).To(Succeed()) + Expect(k8sClient.Status().Update(ctx, node)).To(Succeed()) + + // Set hypervisor as onboarded and offboarded + hypervisor := &kvmv1.Hypervisor{} + Expect(k8sClient.Get(ctx, name, hypervisor)).To(Succeed()) + meta.SetStatusCondition(&hypervisor.Status.Conditions, metav1.Condition{ + Type: kvmv1.ConditionTypeOnboarding, + Status: metav1.ConditionFalse, + Reason: "Onboarded", + Message: "Onboarding completed", + }) + meta.SetStatusCondition(&hypervisor.Status.Conditions, metav1.Condition{ + Type: kvmv1.ConditionTypeOffboarded, + Status: metav1.ConditionTrue, + Reason: "Offboarded", + Message: "Offboarding successful", + }) + Expect(k8sClient.Status().Update(ctx, hypervisor)).To(Succeed()) + }) + + It("should allow pod eviction by setting the PDB to minAvailable 0", func(ctx SpecContext) { + _, err := controller.Reconcile(ctx, reconcileReq) + Expect(err).NotTo(HaveOccurred()) + + pdb := &policyv1.PodDisruptionBudget{} + Expect(k8sClient.Get(ctx, maintenanceName, pdb)).To(Succeed()) + Expect(pdb.Spec.MinAvailable).To(HaveField("IntVal", BeNumerically("==", int32(0)))) + }) + }) }) diff --git a/internal/controller/hypervisor_controller_test.go b/internal/controller/hypervisor_controller_test.go index 064c940..44a0f49 100644 --- a/internal/controller/hypervisor_controller_test.go +++ b/internal/controller/hypervisor_controller_test.go @@ -406,4 +406,44 @@ var _ = Describe("Hypervisor Controller", func() { }) }) }) + + Context("When node lookup fails", func() { + It("should return error for non-NotFound errors", func(ctx SpecContext) { + // Try to reconcile a node that doesn't exist - should be gracefully ignored + _, err := hypervisorController.Reconcile(ctx, ctrl.Request{ + NamespacedName: types.NamespacedName{Name: "non-existent-node"}, + }) + // NotFound errors are ignored, so this should not error + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("When node has internal IP", func() { + It("should set internal IP in hypervisor status", func(ctx SpecContext) { + // First reconcile to create the hypervisor + _, err := hypervisorController.Reconcile(ctx, ctrl.Request{ + NamespacedName: types.NamespacedName{Name: resource.Name}, + }) + Expect(err).NotTo(HaveOccurred()) + + // Now add internal IP to node status + node := &corev1.Node{} + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: resource.Name}, node)).To(Succeed()) + node.Status.Addresses = []corev1.NodeAddress{ + {Type: corev1.NodeInternalIP, Address: "192.168.1.100"}, + {Type: corev1.NodeHostName, Address: "test-host"}, + } + Expect(k8sClient.Status().Update(ctx, node)).To(Succeed()) + + // Reconcile again to update the hypervisor with the IP + _, err = hypervisorController.Reconcile(ctx, ctrl.Request{ + NamespacedName: types.NamespacedName{Name: resource.Name}, + }) + Expect(err).NotTo(HaveOccurred()) + + hypervisor := &kvmv1.Hypervisor{} + Expect(k8sClient.Get(ctx, hypervisorName, hypervisor)).To(Succeed()) + Expect(hypervisor.Status.InternalIP).To(Equal("192.168.1.100")) + }) + }) }) diff --git a/internal/controller/hypervisor_maintenance_controller_test.go b/internal/controller/hypervisor_maintenance_controller_test.go index 234ec06..9514264 100644 --- a/internal/controller/hypervisor_maintenance_controller_test.go +++ b/internal/controller/hypervisor_maintenance_controller_test.go @@ -402,4 +402,62 @@ var _ = Describe("HypervisorMaintenanceController", func() { } }) }) // Context Onboarded Hypervisor + + Context("Non-existent Hypervisor", func() { + It("should handle gracefully with IgnoreNotFound", func(ctx SpecContext) { + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: "non-existent-hv"}} + _, err := controller.Reconcile(ctx, req) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("Hypervisor still onboarding", func() { + BeforeEach(func(ctx SpecContext) { + hypervisor := &kvmv1.Hypervisor{} + Expect(k8sClient.Get(ctx, hypervisorName, hypervisor)).To(Succeed()) + meta.SetStatusCondition(&hypervisor.Status.Conditions, + metav1.Condition{ + Type: kvmv1.ConditionTypeOnboarding, + Status: metav1.ConditionTrue, + Reason: "InProgress", + Message: "Still onboarding", + }, + ) + Expect(k8sClient.Status().Update(ctx, hypervisor)).To(Succeed()) + }) + + It("should return early without error", func(ctx SpecContext) { + // JustBeforeEach already called Reconcile, just verify no errors occurred + hypervisor := &kvmv1.Hypervisor{} + Expect(k8sClient.Get(ctx, hypervisorName, hypervisor)).To(Succeed()) + // Verify onboarding condition is still true (not modified) + Expect(meta.IsStatusConditionTrue(hypervisor.Status.Conditions, kvmv1.ConditionTypeOnboarding)).To(BeTrue()) + }) + }) + + Context("Hypervisor with empty ServiceID", func() { + BeforeEach(func(ctx SpecContext) { + hypervisor := &kvmv1.Hypervisor{} + Expect(k8sClient.Get(ctx, hypervisorName, hypervisor)).To(Succeed()) + hypervisor.Status.ServiceID = "" + hypervisor.Spec.Maintenance = "auto" + meta.SetStatusCondition(&hypervisor.Status.Conditions, + metav1.Condition{ + Type: kvmv1.ConditionTypeOnboarding, + Status: metav1.ConditionFalse, + Reason: metav1.StatusSuccess, + Message: "Onboarded", + }, + ) + Expect(k8sClient.Status().Update(ctx, hypervisor)).To(Succeed()) + }) + + It("should skip reconcileComputeService without error", func(ctx SpecContext) { + // JustBeforeEach already called Reconcile + hypervisor := &kvmv1.Hypervisor{} + Expect(k8sClient.Get(ctx, hypervisorName, hypervisor)).To(Succeed()) + // ServiceID should still be empty + Expect(hypervisor.Status.ServiceID).To(BeEmpty()) + }) + }) }) diff --git a/internal/controller/hypervisor_taint_controller_test.go b/internal/controller/hypervisor_taint_controller_test.go index 271a33b..bfac29b 100644 --- a/internal/controller/hypervisor_taint_controller_test.go +++ b/internal/controller/hypervisor_taint_controller_test.go @@ -95,4 +95,34 @@ var _ = Describe("Hypervisor Taint Controller", func() { ))) }) }) + + Context("When reconciling a non-existent hypervisor", func() { + It("should return without error", func(ctx SpecContext) { + nonExistentReq := ctrl.Request{ + NamespacedName: types.NamespacedName{Name: "non-existent-hv"}, + } + _, err := controller.Reconcile(ctx, nonExistentReq) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("When no changes are needed", func() { + It("should return early without updating status", func(ctx SpecContext) { + // First reconcile to set initial condition + _, err := controller.Reconcile(ctx, reconcileReq) + Expect(err).NotTo(HaveOccurred()) + + // Second reconcile without any changes - should return early without error + _, err = controller.Reconcile(ctx, reconcileReq) + Expect(err).NotTo(HaveOccurred()) + + // Verify the condition is still correct + Expect(k8sClient.Get(ctx, namespacedName, resource)).To(Succeed()) + Expect(resource.Status.Conditions).To(ContainElement( + SatisfyAll( + HaveField("Type", kvmv1.ConditionTypeTainted), + HaveField("Status", metav1.ConditionFalse), + ))) + }) + }) }) diff --git a/internal/controller/node_certificate_controller_test.go b/internal/controller/node_certificate_controller_test.go index b704006..89cc568 100644 --- a/internal/controller/node_certificate_controller_test.go +++ b/internal/controller/node_certificate_controller_test.go @@ -70,14 +70,14 @@ var _ = Describe("Node Certificate Controller", func() { }, } Expect(fakeClient.Create(ctx, resource)).To(Succeed()) - }) - AfterEach(func(ctx SpecContext) { - node := &corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}} - By("Cleanup the specific node") - Expect(client.IgnoreAlreadyExists(fakeClient.Delete(ctx, node))).To(Succeed()) + DeferCleanup(func(ctx SpecContext) { + node := &corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}} + By("Cleanup the specific node") + Expect(client.IgnoreAlreadyExists(fakeClient.Delete(ctx, node))).To(Succeed()) - By("Cleaning up the test environment") + By("Cleaning up the test environment") + }) }) // Tests @@ -100,4 +100,94 @@ var _ = Describe("Node Certificate Controller", func() { Expect(certificate.Spec.DNSNames).To(ContainElement(nodeName)) }) }) + + Context("When reconciling a node with various address types", func() { + BeforeEach(func(ctx SpecContext) { + By("Updating node with different address types") + node := &corev1.Node{} + Expect(fakeClient.Get(ctx, types.NamespacedName{Name: nodeName}, node)).To(Succeed()) + node.Status.Addresses = []corev1.NodeAddress{ + {Type: corev1.NodeHostName, Address: "hostname.example.com"}, + {Type: corev1.NodeInternalDNS, Address: "internal.example.com"}, + {Type: corev1.NodeExternalDNS, Address: "external.example.com"}, + {Type: corev1.NodeInternalIP, Address: "10.0.0.1"}, + {Type: corev1.NodeExternalIP, Address: "203.0.113.1"}, + {Type: corev1.NodeHostName, Address: ""}, // Empty address should be skipped + } + Expect(fakeClient.Status().Update(ctx, node)).To(Succeed()) + }) + + It("should create certificate with all DNS names and IP addresses", func(ctx SpecContext) { + By("Reconciling the node") + req := ctrl.Request{ + NamespacedName: types.NamespacedName{Name: nodeName}, + } + _, err := nodeCertificateController.Reconcile(ctx, req) + Expect(err).NotTo(HaveOccurred()) + + By("Checking if the certificate includes all addresses") + _, certName := getSecretAndCertName(nodeName) + certificate := &cmapi.Certificate{} + err = fakeClient.Get(ctx, types.NamespacedName{Name: certName, Namespace: namespace}, certificate) + Expect(err).NotTo(HaveOccurred()) + + // Check DNS names + Expect(certificate.Spec.DNSNames).To(ContainElements( + nodeName, + "hostname.example.com", + "internal.example.com", + "external.example.com", + )) + + // Check IP addresses + Expect(certificate.Spec.IPAddresses).To(ContainElements( + "10.0.0.1", + "203.0.113.1", + )) + }) + }) + + Context("When reconciling a node without nova virt label", func() { + BeforeEach(func(ctx SpecContext) { + By("Creating a node without the hypervisor label") + nodeWithoutLabel := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node-without-label", + }, + } + Expect(fakeClient.Create(ctx, nodeWithoutLabel)).To(Succeed()) + DeferCleanup(func(ctx SpecContext) { + Expect(fakeClient.Delete(ctx, nodeWithoutLabel)).To(Succeed()) + }) + }) + + It("should still create certificate when Reconcile is called directly", func(ctx SpecContext) { + // Note: The label filter is enforced by the controller predicate in SetupWithManager, + // not in the Reconcile method itself. When testing Reconcile directly, it will still + // create the certificate even without the label. + By("Reconciling the node without label") + req := ctrl.Request{ + NamespacedName: types.NamespacedName{Name: "node-without-label"}, + } + _, err := nodeCertificateController.Reconcile(ctx, req) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying certificate was created despite missing label") + _, certName := getSecretAndCertName("node-without-label") + certificate := &cmapi.Certificate{} + err = fakeClient.Get(ctx, types.NamespacedName{Name: certName, Namespace: namespace}, certificate) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("When reconciling a non-existent node", func() { + It("should return without error", func(ctx SpecContext) { + By("Reconciling a non-existent node") + req := ctrl.Request{ + NamespacedName: types.NamespacedName{Name: "non-existent-node"}, + } + _, err := nodeCertificateController.Reconcile(ctx, req) + Expect(err).NotTo(HaveOccurred()) + }) + }) })