You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
370 lines
14 KiB
370 lines
14 KiB
package cbDisks
|
|
|
|
import (
|
|
"context"
|
|
"reflect"
|
|
"strconv"
|
|
"time"
|
|
|
|
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
|
|
"github.com/hashicorp/terraform-plugin-framework/path"
|
|
"github.com/hashicorp/terraform-plugin-framework/resource"
|
|
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
|
|
"github.com/hashicorp/terraform-plugin-framework/types"
|
|
"github.com/hashicorp/terraform-plugin-log/tflog"
|
|
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
|
|
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
|
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
|
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens"
|
|
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
|
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas"
|
|
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities"
|
|
)
|
|
|
|
// Ensure the implementation satisfies the expected interfaces.
|
|
var (
|
|
_ resource.Resource = &resourceDisk{}
|
|
_ resource.ResourceWithImportState = &resourceDisk{}
|
|
)
|
|
|
|
// NewResourceDisk is a helper function to simplify the provider implementation.
|
|
func NewResourceDisk() resource.Resource {
|
|
return &resourceDisk{}
|
|
}
|
|
|
|
// resourceDisk is the resource implementation.
|
|
type resourceDisk struct {
|
|
client *client.Client
|
|
}
|
|
|
|
// Create creates the resource and sets the initial Terraform state.
|
|
func (r *resourceDisk) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
|
|
// Get plan to create resource group
|
|
var plan models.ResourceDiskModel
|
|
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Create resourceDisk: Error receiving the plan")
|
|
return
|
|
}
|
|
|
|
contextCreateMap := map[string]any{
|
|
"account_id": plan.AccountID.ValueInt64(),
|
|
"disk_name": plan.DiskName.ValueString(),
|
|
"size_max": plan.SizeMax.ValueInt64(),
|
|
"gid": plan.GID.ValueInt64(),
|
|
}
|
|
tflog.Info(ctx, "Create resourceDisk: start creating", contextCreateMap)
|
|
|
|
// Set timeouts
|
|
createTimeout, diags := plan.Timeouts.Create(ctx, constants.Timeout600s)
|
|
resp.Diagnostics.Append(diags...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Create resourceDisk: Error set timeout")
|
|
return
|
|
}
|
|
tflog.Info(ctx, "Create resourceDisk: set timeouts successfully", map[string]any{
|
|
"account_id": plan.AccountID.ValueInt64(),
|
|
"disk_name": plan.DiskName.ValueString(),
|
|
"size_max": plan.SizeMax.ValueInt64(),
|
|
"gid": plan.GID.ValueInt64(),
|
|
"createTimeout": createTimeout})
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, createTimeout)
|
|
defer cancel()
|
|
|
|
// Check if input values are valid in the platform
|
|
tflog.Info(ctx, "Create resourceDisk: starting input checks", contextCreateMap)
|
|
resp.Diagnostics.Append(resourceDiskCreateInputChecks(ctx, &plan, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Create resourceDisk: Error input checks")
|
|
return
|
|
}
|
|
tflog.Info(ctx, "Create resourceDisk: input checks successful", contextCreateMap)
|
|
|
|
// Make create request and get response
|
|
createReq := utilities.CreateRequestResourceDisk(ctx, &plan)
|
|
tflog.Info(ctx, "Create resourceDisk: before call CloudBroker().Disks().Create", map[string]any{"req": createReq})
|
|
diskId, err := r.client.CloudBroker().Disks().Create(ctx, createReq)
|
|
if err != nil {
|
|
resp.Diagnostics.AddError(
|
|
"Create resourceDisk: unable to Create Disk",
|
|
err.Error(),
|
|
)
|
|
return
|
|
}
|
|
plan.Id = types.StringValue(strconv.Itoa(int(diskId)))
|
|
tflog.Info(ctx, "Create resourceDisk: disk created", map[string]any{"diskId": diskId, "disk_name": plan.DiskName.ValueString()})
|
|
|
|
// additional settings after disk creation: in case of failures, warnings are added to resp.Diagnostics,
|
|
// because additional settings failure is not critical. If errors were added instead of warnings, terraform
|
|
// framework would mark resource as tainted and delete it, which would be unwanted behaviour.
|
|
|
|
if !plan.NodeIDs.IsUnknown() {
|
|
resp.Diagnostics.Append(utilities.ResourceDiskChangeNodes(ctx, diskId, nil, &plan, true, r.client)...)
|
|
}
|
|
|
|
if !plan.IOTune.IsUnknown() {
|
|
resp.Diagnostics.Append(utilities.LimitIOCreateDisk(ctx, diskId, &plan, r.client)...)
|
|
}
|
|
|
|
if !plan.Shareable.IsUnknown() && plan.Shareable.ValueBool() { // if shareable = true
|
|
resp.Diagnostics.Append(utilities.ShareableCreateDisk(ctx, diskId, r.client)...)
|
|
}
|
|
|
|
tflog.Info(ctx, "Create resourceDisk: resource creation is completed", map[string]any{"disk_id": diskId})
|
|
|
|
// Map response body to schema and populate Computed attribute values
|
|
resp.Diagnostics.Append(flattens.DiskResource(ctx, &plan, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
return
|
|
}
|
|
|
|
// Set data last update
|
|
plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850))
|
|
|
|
// Set state to fully populated data
|
|
resp.Diagnostics.Append(resp.State.Set(ctx, plan)...)
|
|
if resp.Diagnostics.HasError() {
|
|
return
|
|
}
|
|
}
|
|
|
|
// Read refreshes the Terraform state with the latest data.
|
|
func (r *resourceDisk) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
|
|
// Get current state
|
|
var state models.ResourceDiskModel
|
|
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Read resourceDisk: Error get state")
|
|
return
|
|
}
|
|
tflog.Info(ctx, "Read resourceDisk: got state successfully", map[string]any{"disk_id": state.Id.ValueString()})
|
|
|
|
// Set timeouts
|
|
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout300s)
|
|
resp.Diagnostics.Append(diags...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Read resourceDisk: Error set timeout")
|
|
return
|
|
}
|
|
tflog.Info(ctx, "Read resourceDisk: set timeouts successfully", map[string]any{
|
|
"disk_id": state.Id.ValueString(),
|
|
"readTimeout": readTimeout})
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
|
defer cancel()
|
|
|
|
// read status
|
|
resp.Diagnostics.Append(utilities.DiskReadStatus(ctx, &state, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Read resourceDisk: Error reading disk status")
|
|
return
|
|
}
|
|
|
|
// Overwrite items with refreshed state
|
|
resp.Diagnostics.Append(flattens.DiskResource(ctx, &state, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Read resourceDisk: Error flatten disk")
|
|
return
|
|
}
|
|
|
|
// Set refreshed state
|
|
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Read resourceDisk: Error set state")
|
|
return
|
|
}
|
|
tflog.Info(ctx, "End read resourceDisk")
|
|
}
|
|
|
|
// Update updates the resource and sets the updated Terraform state on success.
|
|
func (r *resourceDisk) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
|
|
// Retrieve values from plan
|
|
var plan models.ResourceDiskModel
|
|
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceDisk: Error receiving the plan")
|
|
return
|
|
}
|
|
tflog.Info(ctx, "Update resourceDisk: got plan successfully", map[string]any{"disk_id": plan.Id.ValueString()})
|
|
|
|
// Retrieve values from state
|
|
var state models.ResourceDiskModel
|
|
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceDisk: Error receiving the state")
|
|
return
|
|
}
|
|
tflog.Info(ctx, "Update resourceDisk: got state successfully", map[string]any{"disk_id": state.Id.ValueString()})
|
|
|
|
// Set timeouts
|
|
updateTimeout, diags := plan.Timeouts.Update(ctx, constants.Timeout300s)
|
|
resp.Diagnostics.Append(diags...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceDisk: Error set timeout")
|
|
return
|
|
}
|
|
tflog.Info(ctx, "Update resourceDisk: set timeouts successfully", map[string]any{
|
|
"disk_id": state.Id.ValueString(),
|
|
"updateTimeout": updateTimeout})
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, updateTimeout)
|
|
defer cancel()
|
|
|
|
// Checking if inputs are valid
|
|
tflog.Info(ctx, "Update resourceDisk: starting input checks", map[string]any{"disk_id": plan.Id.ValueString()})
|
|
resp.Diagnostics.Append(resourceDiskUpdateInputChecks(ctx, &plan, &state, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceDisk: Error input checks")
|
|
return
|
|
}
|
|
tflog.Info(ctx, "Update resourceDisk: input checks successful", map[string]any{"disk_id": state.Id.ValueString()})
|
|
|
|
diskId, err := strconv.Atoi(state.Id.ValueString())
|
|
if err != nil {
|
|
resp.Diagnostics.AddError("Update resourceDisk: Cannot parse disk ID from state", err.Error())
|
|
return
|
|
}
|
|
|
|
// resize disk
|
|
if !plan.SizeMax.Equal(state.SizeMax) {
|
|
resp.Diagnostics.Append(utilities.SizeMaxUpdateDisk(ctx, uint64(diskId), &plan, &state, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceDisk: Error resizing disk")
|
|
return
|
|
}
|
|
}
|
|
|
|
// rename disk
|
|
if !plan.DiskName.Equal(state.DiskName) {
|
|
resp.Diagnostics.Append(utilities.NameUpdateDisk(ctx, uint64(diskId), &plan, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceDisk: Error renaming disk")
|
|
return
|
|
}
|
|
}
|
|
|
|
// change io limits
|
|
if !reflect.DeepEqual(plan.IOTune, state.IOTune) && !plan.IOTune.IsUnknown() {
|
|
resp.Diagnostics.Append(utilities.LimitIOUpdateDisk(ctx, uint64(diskId), &plan, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceDisk: Error setting new io limits to disk")
|
|
return
|
|
}
|
|
}
|
|
|
|
// share/unshare disk
|
|
if !plan.Shareable.Equal(state.Shareable) && !plan.Shareable.IsUnknown() {
|
|
resp.Diagnostics.Append(utilities.ShareableUpdateDisk(ctx, uint64(diskId), plan.Shareable.ValueBool(), r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceDisk: Error with disk share/unshare")
|
|
return
|
|
}
|
|
}
|
|
|
|
// update nodeIDs
|
|
if !plan.NodeIDs.Equal(state.NodeIDs) && !plan.NodeIDs.IsUnknown() {
|
|
resp.Diagnostics.Append(utilities.ResourceDiskChangeNodes(ctx, uint64(diskId), &state, &plan, false, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceDiskUpdate: Error with update nodeIDs")
|
|
return
|
|
}
|
|
}
|
|
|
|
tflog.Info(ctx, "Update resourceDisk: disk update is completed", map[string]any{"disk_id": plan.Id.ValueString()})
|
|
|
|
// Map response body to schema and populate Computed attribute values
|
|
resp.Diagnostics.Append(flattens.DiskResource(ctx, &plan, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
return
|
|
}
|
|
|
|
// Set data last update
|
|
plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850))
|
|
|
|
// Set state to fully populated data
|
|
resp.Diagnostics.Append(resp.State.Set(ctx, plan)...)
|
|
if resp.Diagnostics.HasError() {
|
|
return
|
|
}
|
|
}
|
|
|
|
// Delete deletes the resource and removes the Terraform state on success.
|
|
func (r *resourceDisk) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
|
|
// Get current state
|
|
var state models.ResourceDiskModel
|
|
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Delete resourceDisk: Error get state")
|
|
return
|
|
}
|
|
tflog.Info(ctx, "Delete resourceDisk: got state successfully", map[string]any{"disk_id": state.Id.ValueString()})
|
|
|
|
// Set timeouts
|
|
deleteTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout300s)
|
|
resp.Diagnostics.Append(diags...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Delete resourceDisk: Error set timeout")
|
|
return
|
|
}
|
|
tflog.Info(ctx, "Delete resourceDisk: set timeouts successfully", map[string]any{
|
|
"disk_id": state.Id.ValueString(),
|
|
"deleteTimeout": deleteTimeout})
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, deleteTimeout)
|
|
defer cancel()
|
|
|
|
detach := true
|
|
permanently := true
|
|
|
|
if state.Detach.ValueBool() == false && !state.Detach.IsNull() && !state.Detach.IsUnknown() {
|
|
detach = false
|
|
}
|
|
|
|
if state.Permanently.ValueBool() == false && !state.Permanently.IsNull() && !state.Permanently.IsUnknown() {
|
|
permanently = false
|
|
}
|
|
// Delete existing resource group
|
|
delReq := disks.DeleteRequest{
|
|
DiskID: uint64(state.DiskId.ValueInt64()),
|
|
Detach: detach, // default true
|
|
Permanently: permanently, // default true
|
|
}
|
|
|
|
tflog.Info(ctx, "Delete resourceDisk: before call CloudBroker().Disks().Delete", map[string]any{"req": delReq})
|
|
_, err := r.client.CloudBroker().Disks().Delete(ctx, delReq)
|
|
if err != nil {
|
|
resp.Diagnostics.AddError("Delete resourceDisk: Error deleting disk with error: ", err.Error())
|
|
return
|
|
}
|
|
|
|
tflog.Info(ctx, "End delete resourceDisk", map[string]any{"disk_id": state.Id.ValueString()})
|
|
}
|
|
|
|
// Schema defines the schema for the resource.
|
|
func (r *resourceDisk) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
|
|
resp.Schema = schema.Schema{
|
|
Attributes: schemas.MakeSchemaResourceDisk(),
|
|
Blocks: map[string]schema.Block{
|
|
"timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}),
|
|
},
|
|
}
|
|
}
|
|
|
|
// Metadata returns the resource type name.
|
|
func (r *resourceDisk) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
|
|
resp.TypeName = req.ProviderTypeName + "_cb_disk"
|
|
}
|
|
|
|
// Configure adds the provider configured client to the resource.
|
|
func (r *resourceDisk) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
|
|
tflog.Info(ctx, "Get Configure resourceDisk")
|
|
r.client = client.Resource(ctx, &req, resp)
|
|
tflog.Info(ctx, "Getting Configure resourceDisk successfully")
|
|
}
|
|
|
|
func (r *resourceDisk) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
|
|
// Retrieve import ID and save to id attribute
|
|
resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp)
|
|
}
|