|
|
package k8s
|
|
|
|
|
|
import (
|
|
|
"context"
|
|
|
"time"
|
|
|
|
|
|
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
|
|
|
"github.com/hashicorp/terraform-plugin-framework/path"
|
|
|
"github.com/hashicorp/terraform-plugin-framework/resource"
|
|
|
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
|
|
|
"github.com/hashicorp/terraform-plugin-framework/types"
|
|
|
"github.com/hashicorp/terraform-plugin-log/tflog"
|
|
|
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/k8s"
|
|
|
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
|
|
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
|
|
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/k8s/flattens"
|
|
|
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/k8s/models"
|
|
|
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/k8s/schemas"
|
|
|
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/k8s/utilities"
|
|
|
)
|
|
|
|
|
|
// Ensure the implementation satisfies the expected interfaces.
|
|
|
var (
|
|
|
_ resource.Resource = &resourceK8SCP{}
|
|
|
_ resource.ResourceWithImportState = &resourceK8SCP{}
|
|
|
)
|
|
|
|
|
|
// NewResourceK8S_CP is a helper function to simplify the provider implementation.
|
|
|
func NewResourceK8SCP() resource.Resource {
|
|
|
return &resourceK8SCP{}
|
|
|
}
|
|
|
|
|
|
// resourceK8S_CP is the resource implementation.
|
|
|
type resourceK8SCP struct {
|
|
|
client *client.Client
|
|
|
}
|
|
|
|
|
|
// Create the resource and sets the initial Terraform state.
|
|
|
func (r *resourceK8SCP) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
|
|
|
var plan models.ResourceK8SCPModel
|
|
|
|
|
|
// Get plan for create cluster
|
|
|
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
|
|
|
if resp.Diagnostics.HasError() {
|
|
|
tflog.Error(ctx, "Error receiving the plan")
|
|
|
return
|
|
|
}
|
|
|
|
|
|
tflog.Info(ctx, "Start create k8s_cp cluster", map[string]any{"name": plan.Name.ValueString()})
|
|
|
|
|
|
// Set timeouts
|
|
|
createTimeout, diags := plan.Timeouts.Create(ctx, constants.Timeout30m)
|
|
|
resp.Diagnostics.Append(diags...)
|
|
|
if resp.Diagnostics.HasError() {
|
|
|
tflog.Error(ctx, "Error set timeout")
|
|
|
return
|
|
|
}
|
|
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, createTimeout)
|
|
|
defer cancel()
|
|
|
|
|
|
// Сhecking for values in the platform
|
|
|
resp.Diagnostics.Append(utilities.CheckParamsExistenceCP(ctx, &plan, r.client)...)
|
|
|
if resp.Diagnostics.HasError() {
|
|
|
tflog.Error(ctx, "Error check input values")
|
|
|
return
|
|
|
}
|
|
|
|
|
|
// Make request and get response
|
|
|
task, err := r.client.CloudAPI().K8S().Create(ctx, utilities.CreateRequestResourceK8CP(ctx, &plan))
|
|
|
if err != nil {
|
|
|
tflog.Error(ctx, "Error response for create k8s_cp cluster")
|
|
|
resp.Diagnostics.AddError("Unable to Create K8SCP", err.Error())
|
|
|
return
|
|
|
}
|
|
|
|
|
|
// Check cluster and validated
|
|
|
resp.Diagnostics.Append(utilities.CheckResourceK8SCPCreateAndDeleteWG(ctx, &plan, r.client, task)...)
|
|
|
if resp.Diagnostics.HasError() {
|
|
|
tflog.Error(ctx, "Error create k8s_cp cluster")
|
|
|
return
|
|
|
}
|
|
|
|
|
|
// Map response body to schema
|
|
|
resp.Diagnostics.Append(flattens.K8SCPResource(ctx, &plan, r.client)...)
|
|
|
if resp.Diagnostics.HasError() {
|
|
|
tflog.Error(ctx, "Error flatten k8s_cp cluster")
|
|
|
return
|
|
|
}
|
|
|
|
|
|
// Set state to fully populated data
|
|
|
resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...)
|
|
|
if resp.Diagnostics.HasError() {
|
|
|
tflog.Error(ctx, "Error set state")
|
|
|
return
|
|
|
}
|
|
|
tflog.Info(ctx, "End create k8s_cp cluster", map[string]any{"k8s_id": plan.K8SID.ValueInt64()})
|
|
|
}
|
|
|
|
|
|
// Read refreshes the Terraform state with the latest data.
|
|
|
func (r *resourceK8SCP) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
|
|
|
var state models.ResourceK8SCPModel
|
|
|
|
|
|
// Get current state
|
|
|
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
|
|
|
if resp.Diagnostics.HasError() {
|
|
|
tflog.Error(ctx, "Error get state")
|
|
|
return
|
|
|
}
|
|
|
|
|
|
tflog.Info(ctx, "Start read k8s_cp cluster", map[string]any{"k8s_id": state.K8SID.ValueInt64()})
|
|
|
|
|
|
// Set timeouts
|
|
|
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout600s)
|
|
|
resp.Diagnostics.Append(diags...)
|
|
|
if resp.Diagnostics.HasError() {
|
|
|
tflog.Error(ctx, "Error set timeout")
|
|
|
return
|
|
|
}
|
|
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
|
|
defer cancel()
|
|
|
|
|
|
// Read status cluster and if it is necessary to restore it
|
|
|
resp.Diagnostics.Append(utilities.K8SCPReadStatus(ctx, &state, r.client)...)
|
|
|
if resp.Diagnostics.HasError() {
|
|
|
tflog.Error(ctx, "Error read status or restore")
|
|
|
return
|
|
|
}
|
|
|
|
|
|
// Overwrite items with refreshed state
|
|
|
resp.Diagnostics.Append(flattens.K8SCPResource(ctx, &state, r.client)...)
|
|
|
if resp.Diagnostics.HasError() {
|
|
|
tflog.Error(ctx, "Error flatten k8s_cp cluster")
|
|
|
return
|
|
|
}
|
|
|
|
|
|
// Set refreshed state
|
|
|
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
|
|
if resp.Diagnostics.HasError() {
|
|
|
tflog.Error(ctx, "Error set state")
|
|
|
return
|
|
|
}
|
|
|
tflog.Info(ctx, "End read k8s_cp cluster", map[string]any{"k8s_id": state.K8SID.ValueInt64()})
|
|
|
}
|
|
|
|
|
|
// Update updates the resource and sets the updated Terraform state on success.
|
|
|
func (r *resourceK8SCP) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
|
|
|
var plan models.ResourceK8SCPModel
|
|
|
var state models.ResourceK8SCPModel
|
|
|
|
|
|
// Retrieve values from plan
|
|
|
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
|
|
|
if resp.Diagnostics.HasError() {
|
|
|
tflog.Error(ctx, "Error receiving the plan")
|
|
|
return
|
|
|
}
|
|
|
|
|
|
// Retrieve values from state
|
|
|
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
|
|
|
if resp.Diagnostics.HasError() {
|
|
|
tflog.Error(ctx, "Error receiving the state")
|
|
|
return
|
|
|
}
|
|
|
|
|
|
tflog.Info(ctx, "Start update k8s_cp cluster", map[string]any{"k8s_id": state.K8SID.ValueInt64()})
|
|
|
|
|
|
// Set timeouts
|
|
|
updateTimeout, diags := plan.Timeouts.Update(ctx, constants.Timeout600s)
|
|
|
resp.Diagnostics.Append(diags...)
|
|
|
if resp.Diagnostics.HasError() {
|
|
|
tflog.Error(ctx, "Error set timeout")
|
|
|
return
|
|
|
}
|
|
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, updateTimeout)
|
|
|
defer cancel()
|
|
|
|
|
|
// Сhecking for values in the platform
|
|
|
resp.Diagnostics.Append(utilities.CheckParamsExistenceCP(ctx, &plan, r.client)...)
|
|
|
if resp.Diagnostics.HasError() {
|
|
|
|
|
|
tflog.Error(ctx, "Error check input values")
|
|
|
return
|
|
|
}
|
|
|
|
|
|
// Update Name or/and Description cluster
|
|
|
if !plan.Name.Equal(state.Name) || !plan.Description.Equal(state.Description) {
|
|
|
resp.Diagnostics.Append(utilities.K8SCPUpdateNameOrDescription(ctx, &plan, &state, r.client)...)
|
|
|
if resp.Diagnostics.HasError() {
|
|
|
tflog.Error(ctx, "Error update info")
|
|
|
return
|
|
|
}
|
|
|
}
|
|
|
|
|
|
// Update Enable/Disable cluster
|
|
|
if !plan.Enabled.Equal(state.Enabled) {
|
|
|
resp.Diagnostics.Append(utilities.K8SCPEnableDisable(ctx, &plan, r.client)...)
|
|
|
if resp.Diagnostics.HasError() {
|
|
|
tflog.Error(ctx, "Error Enable/Disable")
|
|
|
return
|
|
|
}
|
|
|
}
|
|
|
|
|
|
// Update Start/Stop cluster
|
|
|
if !plan.Start.Equal(state.Start) {
|
|
|
resp.Diagnostics.Append(utilities.K8SCPStartStop(ctx, &plan, r.client)...)
|
|
|
if resp.Diagnostics.HasError() {
|
|
|
tflog.Error(ctx, "Error Start/Stop")
|
|
|
return
|
|
|
}
|
|
|
}
|
|
|
|
|
|
// Delete master node from cluster
|
|
|
if (plan.Num.ValueInt64() < state.Num.ValueInt64()) && (plan.Num.ValueInt64() != 0) {
|
|
|
resp.Diagnostics.Append(utilities.K8SCPDeleteMaster(ctx, &plan, &state, r.client)...)
|
|
|
if resp.Diagnostics.HasError() {
|
|
|
tflog.Error(ctx, "Error Delete master node")
|
|
|
return
|
|
|
}
|
|
|
}
|
|
|
|
|
|
//Update LB params
|
|
|
if (state.WithLB.IsNull() || state.WithLB.ValueBool()) && !plan.LBSysctlParams.Equal(state.LBSysctlParams) {
|
|
|
resp.Diagnostics.Append(utilities.K8CPUpdateSysctlParams(ctx, &plan, &state, r.client)...)
|
|
|
if resp.Diagnostics.HasError() {
|
|
|
tflog.Error(ctx, "Error Update LB params")
|
|
|
return
|
|
|
}
|
|
|
}
|
|
|
|
|
|
// Map response body to schema
|
|
|
resp.Diagnostics.Append(flattens.K8SCPResource(ctx, &plan, r.client)...)
|
|
|
if resp.Diagnostics.HasError() {
|
|
|
tflog.Error(ctx, "Error flatten k8s_cp cluster")
|
|
|
return
|
|
|
}
|
|
|
|
|
|
// Set data last update
|
|
|
plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850))
|
|
|
|
|
|
// Set state to fully populated data
|
|
|
resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...)
|
|
|
if resp.Diagnostics.HasError() {
|
|
|
tflog.Error(ctx, "Error set state")
|
|
|
return
|
|
|
}
|
|
|
|
|
|
tflog.Info(ctx, "End update k8s_cp cluster", map[string]any{"k8s_id": state.K8SID.ValueInt64()})
|
|
|
}
|
|
|
|
|
|
// Delete deletes the resource and removes the Terraform state on success.
|
|
|
func (r *resourceK8SCP) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
|
|
|
// Get current state
|
|
|
var state models.ResourceK8SCPModel
|
|
|
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
|
|
|
if resp.Diagnostics.HasError() {
|
|
|
tflog.Error(ctx, "Error get state")
|
|
|
return
|
|
|
}
|
|
|
|
|
|
tflog.Info(ctx, "Start delete k8s_cp cluster", map[string]any{"k8s_id": state.K8SID.ValueInt64()})
|
|
|
|
|
|
// Set timeouts
|
|
|
deleteTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout600s)
|
|
|
resp.Diagnostics.Append(diags...)
|
|
|
if resp.Diagnostics.HasError() {
|
|
|
tflog.Error(ctx, "Error set timeout")
|
|
|
return
|
|
|
}
|
|
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, deleteTimeout)
|
|
|
defer cancel()
|
|
|
|
|
|
var permanently bool
|
|
|
if state.Permanently.IsNull() {
|
|
|
permanently = true
|
|
|
} else {
|
|
|
permanently = state.Permanently.ValueBool()
|
|
|
}
|
|
|
|
|
|
// Delete existing k8s_cp cluster
|
|
|
_, err := r.client.CloudAPI().K8S().Delete(ctx, k8s.DeleteRequest{K8SID: uint64(state.K8SID.ValueInt64()), Permanently: permanently})
|
|
|
if err != nil {
|
|
|
resp.Diagnostics.AddError("Error deleting k8s_cp cluster with error: ", err.Error())
|
|
|
return
|
|
|
}
|
|
|
|
|
|
tflog.Info(ctx, "End delete k8s_cp cluster", map[string]any{"k8s_id": state.K8SID.ValueInt64()})
|
|
|
}
|
|
|
|
|
|
// Schema defines the schema for the resource.
|
|
|
func (r *resourceK8SCP) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
|
|
|
resp.Schema = schema.Schema{
|
|
|
Attributes: schemas.MakeSchemaResourceK8SCP(),
|
|
|
Blocks: map[string]schema.Block{
|
|
|
"timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}),
|
|
|
},
|
|
|
Version: 1,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
// Metadata returns the resource type name.
|
|
|
func (r *resourceK8SCP) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
|
|
|
resp.TypeName = req.ProviderTypeName + "_k8s_cp"
|
|
|
}
|
|
|
|
|
|
// Configure adds the provider configured client to the resource.
|
|
|
func (r *resourceK8SCP) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
|
|
|
tflog.Info(ctx, "Get configure resource")
|
|
|
r.client = client.Resource(ctx, &req, resp)
|
|
|
tflog.Info(ctx, "Getting configure resource successfully")
|
|
|
}
|
|
|
|
|
|
func (r *resourceK8SCP) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
|
|
|
// Retrieve import ID and save to id attribute
|
|
|
resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp)
|
|
|
}
|