You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
terraform-provider-dynamix/internal/service/cloudapi/k8s/utilities/utility_resource_k8s_cp.go

494 lines
16 KiB

package utilities
import (
"context"
"fmt"
"strconv"
"strings"
"time"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/k8s"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/lb"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/tasks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/k8s/models"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/status"
)
func CreateRequestResourceK8CP(ctx context.Context, plan *models.ResourceK8SCPModel) k8s.CreateRequest {
tflog.Info(ctx, "Start CreateRequestResourceK8CP", map[string]any{"name": plan.Name.ValueString()})
req := k8s.CreateRequest{
Name: plan.Name.ValueString(),
RGID: uint64(plan.RGID.ValueInt64()),
K8SCIID: uint64(plan.K8SCIID.ValueInt64()),
WorkerGroupName: "temp",
NetworkPlugin: plan.NetworkPlugin.ValueString(),
}
if !plan.Num.IsUnknown() {
req.MasterNum = uint(plan.Num.ValueInt64())
}
if !plan.CPU.IsUnknown() {
req.MasterCPU = uint(plan.CPU.ValueInt64())
}
if !plan.RAM.IsUnknown() {
req.MasterRAM = uint64(plan.RAM.ValueInt64())
}
if !plan.Disk.IsUnknown() {
req.MasterDisk = uint(plan.Disk.ValueInt64())
}
if !plan.SEPID.IsNull() {
req.MasterSEPID = uint64(plan.SEPID.ValueInt64())
}
if !plan.SEPPool.IsNull() {
req.MasterSEPPool = plan.SEPPool.ValueString()
}
if !plan.WithLB.IsNull() {
req.WithLB = plan.WithLB.ValueBool()
} else {
req.WithLB = true
}
if !plan.ExtNetID.IsUnknown() {
req.ExtNetID = uint64(plan.ExtNetID.ValueInt64())
}
if !plan.VinsId.IsUnknown() {
req.VinsId = uint64(plan.VinsId.ValueInt64())
}
if !plan.HighlyAvailable.IsNull() {
req.HighlyAvailable = plan.HighlyAvailable.ValueBool()
}
if !plan.AdditionalSANs.IsNull() {
result := make([]string, 0, len(plan.AdditionalSANs.Elements()))
for _, val := range plan.AdditionalSANs.Elements() {
result = append(result, strings.Trim(val.String(), "\""))
}
req.AdditionalSANs = result
}
if !plan.ClusterConfiguration.IsNull() {
req.ClusterConfiguration = plan.ClusterConfiguration.ValueString()
}
if !plan.KubeletConfiguration.IsNull() {
req.KubeletConfiguration = plan.KubeletConfiguration.ValueString()
}
if !plan.KubeProxyConfiguration.IsNull() {
req.KubeProxyConfiguration = plan.KubeProxyConfiguration.ValueString()
}
if !plan.InitConfiguration.IsNull() {
req.InitConfiguration = plan.InitConfiguration.ValueString()
}
if !plan.OidcCertificate.IsNull() {
req.OidcCertificate = plan.OidcCertificate.ValueString()
}
if !plan.Chipset.IsNull() {
req.Chipset = plan.Chipset.ValueString()
}
if !plan.Description.IsNull() {
req.Description = plan.Description.ValueString()
}
if !plan.ExtNetOnly.IsNull() {
req.ExtNetOnly = plan.ExtNetOnly.ValueBool()
}
if !plan.LBSysctlParams.IsNull() {
result := make([]map[string]interface{}, 0, len(plan.LBSysctlParams.Elements()))
for _, val := range plan.LBSysctlParams.Elements() {
objVal := val.(types.Object)
valMap := objVal.Attributes()
mapKey := valMap["key"].(types.String).ValueString()
mapVal := valMap["value"].(types.String).ValueString()
tempMap := make(map[string]interface{})
tempMap[mapKey] = mapVal
result = append(result, tempMap)
}
req.LbSysctlParams = result
}
tflog.Info(ctx, "End CreateRequestResourceK8CP", map[string]any{"name": plan.Name.String()})
return req
}
func CheckResourceK8SCPCreateAndDeleteWG(ctx context.Context, plan *models.ResourceK8SCPModel, c *client.Client, resp string) diag.Diagnostics {
tflog.Info(ctx, "Start CheckResourceK8CPCreateAndDeleteWG", map[string]any{"name": plan.Name.ValueString()})
diags := diag.Diagnostics{}
taskReq := tasks.GetRequest{
AuditID: strings.Trim(resp, `"`),
}
for {
task, err := c.CloudAPI().Tasks().Get(ctx, taskReq)
if err != nil {
diags.AddError("The audit cannot be found", err.Error())
return diags
}
tflog.Info(ctx, fmt.Sprintf("ResourceK8sControlPlaneCreate instance creating - %s", task.Stage))
if task.Completed {
if task.Error != "" {
diags.AddError("Cannot create cluster instance:", task.Error)
return diags
}
result, err := task.Result.ID()
if err != nil {
diags.AddError("Cannot get cluster ID:", err.Error())
return diags
}
plan.Id = types.StringValue(strconv.Itoa(result))
plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850))
break
}
time.Sleep(time.Second * 20)
}
k8sID, err := strconv.ParseUint(plan.Id.ValueString(), 10, 64)
if err != nil {
diags.AddError("Cannot parsed ID cluster from state", err.Error())
return diags
}
cluster, err := K8SCPResourceCheckPresence(ctx, k8sID, c)
if err != nil {
diags.AddError("Cannot get info about cluster ", err.Error())
return diags
}
tflog.Info(ctx, "Start delete Work Group from cluster with ID", map[string]any{"k8s_id": k8sID})
delWGReq := k8s.WorkersGroupDeleteRequest{
K8SID: cluster.ID,
WorkersGroupID: cluster.K8SGroups.Workers[0].ID,
}
_, err = c.CloudAPI().K8S().WorkersGroupDelete(ctx, delWGReq)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot create cluster with ID - %d: platform error, creation cancelled, cluster will be delete permanently", cluster.ID), err.Error())
tflog.Error(ctx, "Start delete cluster with ID", map[string]any{"k8s_id": plan.Id.ValueString()})
_, err = c.CloudAPI().K8S().Delete(ctx, k8s.DeleteRequest{K8SID: cluster.ID, Permanently: true})
if err != nil {
diags.AddError(fmt.Sprintf("Cannot delete cluster with ID - %d, after error creation. Please report this issue to the provider developers.", cluster.ID), err.Error())
return diags
}
tflog.Error(ctx, "Delete cluster successfully", map[string]any{"k8s_id": plan.Id.ValueString()})
return diags
}
tflog.Info(ctx, "End CheckResourceK8CPCreateAndDeleteWG", map[string]any{"k8s_id": cluster.ID})
return diags
}
func K8SCPResourceCheckPresence(ctx context.Context, k8SID uint64, c *client.Client) (*k8s.RecordK8S, error) {
tflog.Info(ctx, "Get info about cluster with ID", map[string]any{"k8s_id": k8SID})
cluster, err := c.CloudAPI().K8S().Get(ctx, k8s.GetRequest{K8SID: k8SID})
if err != nil {
return nil, fmt.Errorf("cannot get info about cluster with error: %w", err)
}
tflog.Info(ctx, "Getting info about cluster successfully", map[string]any{"k8s_id": k8SID})
return cluster, nil
}
func K8sListForResourceCheckPresence(ctx context.Context, k8SID uint64, c *client.Client) (*k8s.ListK8SClusters, error) {
tflog.Info(ctx, "Get info in List about cluster with ID", map[string]any{"k8s_id": k8SID})
req := k8s.ListRequest{
IncludeDeleted: false,
ByID: k8SID,
}
k8sList, err := c.CloudAPI().K8S().List(ctx, req)
if err != nil {
return nil, err
}
tflog.Info(ctx, "Getting info in List about cluster successfully", map[string]any{"k8s_id": k8SID})
return k8sList, nil
}
func ComputeCheckPresence(ctx context.Context, computeID uint64, c *client.Client) (*compute.RecordCompute, error) {
tflog.Info(ctx, "Start utilityComputeCheckPresence", map[string]any{"compute_id": computeID})
req := compute.GetRequest{
ComputeID: computeID,
}
compute, err := c.CloudAPI().Compute().Get(ctx, req)
if err != nil {
return nil, err
}
tflog.Info(ctx, "End utilityComputeCheckPresence", map[string]any{"compute_id": computeID})
return compute, nil
}
func K8SCPUpdateNameOrDescription(ctx context.Context, plan, state *models.ResourceK8SCPModel, c *client.Client) diag.Diagnostics {
tflog.Info(ctx, "Update info about cluster with ID", map[string]any{"k8s_id": plan.Id.ValueString()})
diags := diag.Diagnostics{}
k8sID, err := strconv.ParseUint(plan.Id.ValueString(), 10, 64)
if err != nil {
diags.AddError("Cannot parsed ID cluster from state", err.Error())
return diags
}
req := k8s.UpdateRequest{
K8SID: k8sID,
}
if !plan.Name.Equal(state.Name) {
req.Name = plan.Name.ValueString()
}
if !plan.Description.Equal(state.Description) {
req.Description = plan.Description.ValueString()
}
_, err = c.CloudAPI().K8S().Update(ctx, req)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot update cluster with ID - %s", plan.Id.ValueString()), err.Error())
return diags
}
tflog.Info(ctx, "Update info about cluster successfully", map[string]any{"k8s_id": plan.Id.ValueString()})
return diags
}
func K8SCPReadStatus(ctx context.Context, plan *models.ResourceK8SCPModel, c *client.Client) diag.Diagnostics {
tflog.Info(ctx, "Read status cluster with ID", map[string]any{"k8s_id": plan.Id.ValueString()})
diags := diag.Diagnostics{}
k8sID, err := strconv.ParseUint(plan.Id.ValueString(), 10, 64)
if err != nil {
diags.AddError("Cannot parsed ID cluster from state", err.Error())
return diags
}
cluster, err := K8SCPResourceCheckPresence(ctx, k8sID, c)
if err != nil {
diags.AddError("Cannot get info about cluster ", err.Error())
return diags
}
switch cluster.Status {
case status.Modeled:
diags.AddError("Error:", fmt.Sprintf("The k8s cluster is in status: %s, please, contact support for more information", cluster.Status))
return diags
case status.Deleted:
if plan.Restore.ValueBool() || plan.Restore.IsNull() {
diags = K8SCPRestore(ctx, plan, c)
if diags.HasError() {
tflog.Error(ctx, "Error restore cluster", map[string]any{"k8s_id": plan.Id.ValueString()})
return diags
}
} else {
diags.AddError("Cluster in status Deleted:", "please clean state, or restore cluster")
return diags
}
if plan.Enabled.ValueBool() || plan.Enabled.IsNull() {
diags = K8SCPEnableDisable(ctx, plan, c)
if diags.HasError() {
tflog.Error(ctx, "Error enable/disable cluster", map[string]any{"k8s_id": plan.Id.ValueString()})
return diags
}
if plan.Start.ValueBool() || plan.Start.IsNull() {
diags = K8SCPStartStop(ctx, plan, c)
if diags.HasError() {
tflog.Error(ctx, "Error start/stop cluster", map[string]any{"k8s_id": plan.Id.ValueString()})
return diags
}
}
}
case status.Destroying:
diags.AddError("Error:", fmt.Sprintf("The k8s cluster is in progress with status: %s", cluster.Status))
return diags
case status.Destroyed:
diags.AddError("Error:", "The resource cannot be updated because it has been destroyed")
return diags
}
tflog.Info(ctx, "Read status cluster successfully", map[string]any{"k8s_id": plan.Id.ValueString()})
return diags
}
func K8SCPRestore(ctx context.Context, plan *models.ResourceK8SCPModel, c *client.Client) diag.Diagnostics {
tflog.Info(ctx, "Restore cluster with ID", map[string]any{"k8s_id": plan.Id.ValueString()})
diags := diag.Diagnostics{}
k8sID, err := strconv.ParseUint(plan.Id.ValueString(), 10, 64)
if err != nil {
diags.AddError("Cannot parsed ID cluster from state", err.Error())
return diags
}
_, err = c.CloudAPI().K8S().Restore(ctx, k8s.RestoreRequest{K8SID: k8sID})
if err != nil {
diags.AddError(fmt.Sprintf("Cannot restore cluster with ID - %s", plan.Id.ValueString()), err.Error())
return diags
}
tflog.Info(ctx, "Restore cluster successfully", map[string]any{"k8s_id": plan.Id.ValueString()})
return diags
}
func K8SCPEnableDisable(ctx context.Context, plan *models.ResourceK8SCPModel, c *client.Client) diag.Diagnostics {
tflog.Info(ctx, "Enable/Disable cluster with ID", map[string]any{"k8s_id": plan.Id.ValueString()})
diags := diag.Diagnostics{}
k8sID, err := strconv.ParseUint(plan.Id.ValueString(), 10, 64)
if err != nil {
diags.AddError("Cannot parsed ID cluster from state", err.Error())
return diags
}
if plan.Enabled.ValueBool() || plan.Enabled.IsNull() {
_, err := c.CloudAPI().K8S().Enable(ctx, k8s.DisableEnableRequest{K8SID: k8sID})
if err != nil {
diags.AddError(fmt.Sprintf("Cannot enable cluster with ID - %s", plan.Id.ValueString()), err.Error())
return diags
}
}
if !plan.Enabled.ValueBool() && !plan.Enabled.IsNull() {
_, err := c.CloudAPI().K8S().Disable(ctx, k8s.DisableEnableRequest{K8SID: k8sID})
if err != nil {
diags.AddError(fmt.Sprintf("Cannot disable cluster with ID - %s", plan.Id.ValueString()), err.Error())
return diags
}
}
tflog.Info(ctx, "Enable/Disable cluster successfully", map[string]any{"k8s_id": plan.Id.ValueString()})
return diags
}
func K8SCPStartStop(ctx context.Context, plan *models.ResourceK8SCPModel, c *client.Client) diag.Diagnostics {
tflog.Info(ctx, "Start/Stop cluster with ID", map[string]any{"k8s_id": plan.Id.ValueString()})
diags := diag.Diagnostics{}
k8sID, err := strconv.ParseUint(plan.Id.ValueString(), 10, 64)
if err != nil {
diags.AddError("Cannot parsed ID cluster from state", err.Error())
return diags
}
if plan.Enabled.ValueBool() || plan.Enabled.IsNull() {
if plan.Start.ValueBool() || plan.Start.IsNull() {
_, err := c.CloudAPI().K8S().Start(ctx, k8s.StartRequest{K8SID: k8sID})
if err != nil {
diags.AddError(fmt.Sprintf("Cannot start cluster with ID - %s", plan.Id.ValueString()), err.Error())
return diags
}
}
}
if plan.Enabled.ValueBool() || plan.Enabled.IsNull() {
if !plan.Start.ValueBool() && !plan.Start.IsNull() {
_, err := c.CloudAPI().K8S().Stop(ctx, k8s.StopRequest{K8SID: k8sID})
if err != nil {
diags.AddError(fmt.Sprintf("Cannot stop cluster with ID - %s", plan.Id.ValueString()), err.Error())
return diags
}
}
}
tflog.Info(ctx, "Start/Stop cluster successfully", map[string]any{"k8s_id": plan.Id.ValueString()})
return diags
}
func K8SCPDeleteMaster(ctx context.Context, plan *models.ResourceK8SCPModel, state *models.ResourceK8SCPModel, c *client.Client) diag.Diagnostics {
tflog.Info(ctx, "Delete Master node from cluster with ID", map[string]any{"k8s_id": plan.Id.ValueString()})
diags := diag.Diagnostics{}
deleteMasterComp := make([]uint64, 0)
for i, val := range state.DetailedInfo.Elements() {
if i == 2 {
break
}
obj, err := types.ObjectValueFrom(ctx, models.ItemDetailedInfo, val)
if err != nil {
tflog.Error(ctx, fmt.Sprint("Error flattenDetailedInfo struct to obj", err), map[string]any{"k8s_id": plan.Id.ValueString()})
}
id := obj.Attributes()["compute_id"].(types.Int64).ValueInt64()
deleteMasterComp = append(deleteMasterComp, uint64(id))
}
req := k8s.DeleteMasterFromGroupRequest{
K8SID: uint64(state.K8SID.ValueInt64()),
MasterGroupID: uint64(state.MasterGroupId.ValueInt64()),
//TODO fix it
//MasterIDs: deleteMasterComp,
}
_, err := c.CloudAPI().K8S().DeleteMasterFromGroup(ctx, req)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot delete master node from cluster with ID - %s", plan.Id.ValueString()), err.Error())
return diags
}
tflog.Info(ctx, "Delete Master node from cluster successfully", map[string]any{"k8s_id": plan.Id.ValueString()})
return diags
}
func K8CPUpdateSysctlParams(ctx context.Context, plan *models.ResourceK8SCPModel, state *models.ResourceK8SCPModel, c *client.Client) diag.Diagnostics {
tflog.Info(ctx, "Update LB parameters from cluster with ID", map[string]any{"k8s_id": plan.Id.ValueString()})
diags := diag.Diagnostics{}
result := make([]map[string]interface{}, 0, len(plan.LBSysctlParams.Elements()))
for _, val := range plan.LBSysctlParams.Elements() {
objVal := val.(types.Object)
valMap := objVal.Attributes()
mapKey := valMap["key"].(types.String).ValueString()
mapVal := valMap["value"].(types.String).ValueString()
tempMap := make(map[string]interface{})
tempMap[mapKey] = mapVal
result = append(result, tempMap)
}
req := lb.UpdateSysctParamsRequest{
LBID: uint64(state.LBID.ValueInt64()),
SysctlParams: result,
}
_, err := c.CloudAPI().LB().UpdateSysctlParams(ctx, req)
if err != nil {
diags.AddError(fmt.Sprintf("Cannot update LB parameters from cluster with ID - %s", plan.Id.ValueString()), err.Error())
return diags
}
tflog.Info(ctx, "Update LB parameters from cluster with ID successfully", map[string]any{"k8s_id": plan.Id.ValueString()})
return diags
}