You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
333 lines
13 KiB
333 lines
13 KiB
package cbDisks
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
|
|
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
|
|
"github.com/hashicorp/terraform-plugin-framework/path"
|
|
"github.com/hashicorp/terraform-plugin-framework/resource"
|
|
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
|
|
"github.com/hashicorp/terraform-plugin-log/tflog"
|
|
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
|
|
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
|
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
|
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens"
|
|
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models"
|
|
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas"
|
|
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities"
|
|
)
|
|
|
|
// Ensure the implementation satisfies the expected interfaces.
|
|
var (
|
|
_ resource.Resource = &resourceDiskReplication{}
|
|
_ resource.ResourceWithImportState = &resourceDiskReplication{}
|
|
)
|
|
|
|
// NewResourceDisk is a helper function to simplify the provider implementation.
|
|
func NewResourceDiskReplications() resource.Resource {
|
|
return &resourceDiskReplication{}
|
|
}
|
|
|
|
// resourceDiskReplication is the resource implementation.
|
|
type resourceDiskReplication struct {
|
|
client *client.Client
|
|
}
|
|
|
|
// Create creates the resource and sets the initial Terraform state.
|
|
func (r *resourceDiskReplication) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
|
|
// Get plan to create resource group
|
|
var plan models.ResourceRecordDiskReplicationModel
|
|
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Create resourceDiskReplication: Error receiving the plan")
|
|
return
|
|
}
|
|
|
|
tflog.Info(ctx, "Create resourceDiskReplication: got plan successfully")
|
|
tflog.Info(ctx, "Create resourceDiskReplication: start creating")
|
|
|
|
// Set timeouts
|
|
createTimeout, diags := plan.Timeouts.Create(ctx, constants.Timeout600s)
|
|
resp.Diagnostics.Append(diags...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Create resourceDiskReplication: Error set timeout")
|
|
return
|
|
}
|
|
tflog.Info(ctx, "Create resourceDiskReplication: set timeouts successfully")
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, createTimeout)
|
|
defer cancel()
|
|
|
|
// Check if input values are valid in the platform
|
|
tflog.Info(ctx, "Create resourceDiskReplication: starting input checks")
|
|
resp.Diagnostics.Append(resourceDiskReplicationInputChecks(ctx, &plan, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Create resourceDiskReplication: Error input checks")
|
|
return
|
|
}
|
|
tflog.Info(ctx, "Create resourceDiskReplication: input checks successful")
|
|
|
|
reqCreate := disks.ReplicateRequest{
|
|
DiskID: uint64(plan.DiskId.ValueInt64()),
|
|
Name: plan.Name.ValueString(),
|
|
SepID: uint64(plan.SepID.ValueInt64()),
|
|
PoolName: plan.PoolName.ValueString(),
|
|
}
|
|
|
|
diskReplicaId, err := r.client.CloudBroker().Disks().Replicate(ctx, reqCreate)
|
|
if err != nil {
|
|
resp.Diagnostics.AddError(
|
|
"Create resourceDiskReplication: unable to replicate disk",
|
|
err.Error(),
|
|
)
|
|
return
|
|
}
|
|
|
|
tflog.Info(ctx, fmt.Sprintf("resourceDiskReplicationCreate: create replica complete for disk with ID: %d", plan.DiskId.ValueInt64()))
|
|
|
|
start := plan.Start.ValueBool()
|
|
ok := !(plan.Start.IsNull() || plan.Start.IsUnknown())
|
|
|
|
if ok && !start {
|
|
tflog.Info(ctx, fmt.Sprintf("resourceDiskReplicationCreate: replication between disk with ID: %d and replica with ID: %d, try to stop", uint64(plan.DiskId.ValueInt64()), diskReplicaId))
|
|
reqStop := disks.ReplicationStopRequest{
|
|
DiskID: uint64(plan.DiskId.ValueInt64()),
|
|
}
|
|
_, err = r.client.CloudBroker().Disks().ReplicationStop(ctx, reqStop)
|
|
if err != nil {
|
|
resp.Diagnostics.AddError(
|
|
fmt.Sprintf("resourceDiskReplicationCreate: replication between disk with ID: %d and replica with ID: %d, stoped", uint64(plan.DiskId.ValueInt64()), diskReplicaId),
|
|
err.Error(),
|
|
)
|
|
return
|
|
}
|
|
}
|
|
|
|
// Map response body to schema and populate Computed attribute values
|
|
resp.Diagnostics.Append(flattens.DiskReplicationResource(ctx, &plan, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
return
|
|
}
|
|
|
|
// Set state to fully populated data
|
|
resp.Diagnostics.Append(resp.State.Set(ctx, plan)...)
|
|
if resp.Diagnostics.HasError() {
|
|
return
|
|
}
|
|
}
|
|
|
|
// Read refreshes the Terraform state with the latest data.
|
|
func (r *resourceDiskReplication) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
|
|
// Get current state
|
|
var state models.ResourceRecordDiskReplicationModel
|
|
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
|
|
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Read resourceDiskReplication: Error get state")
|
|
return
|
|
}
|
|
tflog.Info(ctx, "Read resourceDiskReplication: got state successfully")
|
|
|
|
// Set timeouts
|
|
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout300s)
|
|
resp.Diagnostics.Append(diags...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Read resourceDiskReplication: Error set timeout")
|
|
return
|
|
}
|
|
tflog.Info(ctx, "Read resourceDiskReplication: set timeouts successfully", map[string]any{
|
|
"disk_id": state.DiskId.ValueInt64(),
|
|
"readTimeout": readTimeout})
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
|
defer cancel()
|
|
|
|
// read status
|
|
resp.Diagnostics.Append(utilities.ReplicationDiskReadStatus(ctx, &state, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Read resourceDiskReplication: Error reading disk status")
|
|
return
|
|
}
|
|
|
|
// Overwrite items with refreshed state
|
|
resp.Diagnostics.Append(flattens.DiskReplicationResource(ctx, &state, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Read resourceDiskReplication: Error flatten disk")
|
|
return
|
|
}
|
|
|
|
// Set refreshed state
|
|
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Read resourceDiskReplication: Error set state")
|
|
return
|
|
}
|
|
tflog.Info(ctx, "End read resourceDiskReplication")
|
|
}
|
|
|
|
// Update updates the resource and sets the updated Terraform state on success.
|
|
func (r *resourceDiskReplication) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
|
|
// Retrieve values from plan
|
|
var plan models.ResourceRecordDiskReplicationModel
|
|
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceDiskReplication: Error receiving the plan")
|
|
return
|
|
}
|
|
tflog.Info(ctx, "Update resourceDiskReplication: got plan successfully", map[string]any{"disk_id": plan.DiskId.ValueInt64()})
|
|
|
|
// Retrieve values from state
|
|
var state models.ResourceRecordDiskReplicationModel
|
|
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceDiskReplication: Error receiving the state")
|
|
return
|
|
}
|
|
tflog.Info(ctx, "Update resourceDiskReplication: got state successfully", map[string]any{"disk_id": state.DiskId.ValueInt64()})
|
|
|
|
// Set timeouts
|
|
updateTimeout, diags := plan.Timeouts.Update(ctx, constants.Timeout300s)
|
|
resp.Diagnostics.Append(diags...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceDiskReplication: Error set timeout")
|
|
return
|
|
}
|
|
tflog.Info(ctx, "Update resourceDiskReplication: set timeouts successfully", map[string]any{
|
|
"disk_id": state.DiskId.ValueInt64(),
|
|
"updateTimeout": updateTimeout})
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, updateTimeout)
|
|
defer cancel()
|
|
|
|
// Checking if inputs are valid
|
|
tflog.Info(ctx, "Update resourceDiskReplication: starting input checks", map[string]any{"disk_id": plan.DiskId.ValueInt64()})
|
|
resp.Diagnostics.Append(resourceDiskReplicationInputChecks(ctx, &plan, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceDiskReplication: Error input checks")
|
|
return
|
|
}
|
|
tflog.Info(ctx, "Update resourceDiskReplication: input checks successful", map[string]any{"disk_id": state.DiskId.ValueInt64()})
|
|
|
|
if !plan.Start.Equal(state.Start) {
|
|
resp.Diagnostics.Append(utilities.UtilityDiskReplicationUpdateStartStop(ctx, &state, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceDiskReplication: error with UtilityDiskReplicationUpdateStartStop")
|
|
return
|
|
}
|
|
}
|
|
|
|
if !plan.Pause.Equal(state.Pause) {
|
|
resp.Diagnostics.Append(utilities.UtilityDiskReplicationUpdatePause(ctx, &state, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceDiskReplication: error with UtilityDiskReplicationUpdatePause")
|
|
return
|
|
}
|
|
}
|
|
|
|
if !plan.Reverse.Equal(state.Reverse) {
|
|
resp.Diagnostics.Append(utilities.UtilityDiskReplicationUpdateReverse(ctx, &state, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceDiskReplication: error with UtilityDiskReplicationUpdateReverse")
|
|
return
|
|
}
|
|
}
|
|
|
|
tflog.Info(ctx, "Update resourceDiskReplication: disk update is completed", map[string]any{"disk_id": plan.DiskId.ValueInt64()})
|
|
|
|
// Overwrite items with refreshed state
|
|
resp.Diagnostics.Append(flattens.DiskReplicationResource(ctx, &state, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Read resourceDiskReplication: Error flatten disk")
|
|
return
|
|
}
|
|
|
|
// Set refreshed state
|
|
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Read resourceDiskReplication: Error set state")
|
|
return
|
|
}
|
|
tflog.Info(ctx, "End read resourceDiskReplication")
|
|
}
|
|
|
|
// Delete deletes the resource and removes the Terraform state on success.
|
|
func (r *resourceDiskReplication) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
|
|
// Get current state
|
|
var state models.ResourceRecordDiskReplicationModel
|
|
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Delete resourceDiskReplication: Error get state")
|
|
return
|
|
}
|
|
tflog.Info(ctx, "Delete resourceDiskReplication: got state successfully", map[string]any{"disk_id": state.DiskId.ValueInt64()})
|
|
|
|
// Set timeouts
|
|
deleteTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout300s)
|
|
resp.Diagnostics.Append(diags...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Delete resourceDiskReplication: Error set timeout")
|
|
return
|
|
}
|
|
tflog.Info(ctx, "Delete resourceDiskReplication: set timeouts successfully", map[string]any{
|
|
"disk_id": state.DiskId.ValueInt64(),
|
|
"deleteTimeout": deleteTimeout})
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, deleteTimeout)
|
|
defer cancel()
|
|
|
|
detach := true
|
|
permanently := true
|
|
|
|
if state.Detach.ValueBool() == false && !state.Detach.IsNull() && !state.Detach.IsUnknown() {
|
|
detach = false
|
|
}
|
|
|
|
if state.Permanently.ValueBool() == false && !state.Permanently.IsNull() && !state.Permanently.IsUnknown() {
|
|
permanently = false
|
|
}
|
|
|
|
// Delete existing resource group
|
|
delReq := disks.DeleteRequest{
|
|
DiskID: uint64(state.DiskId.ValueInt64()),
|
|
Detach: detach, // default true
|
|
Permanently: permanently, // default true
|
|
}
|
|
|
|
tflog.Info(ctx, "Delete resourceDiskReplication: before call CloudBroker().Disks().Delete", map[string]any{"req": delReq})
|
|
_, err := r.client.CloudBroker().Disks().Delete(ctx, delReq)
|
|
if err != nil {
|
|
resp.Diagnostics.AddError("Delete resourceDiskReplication: Error deleting disk with error: ", err.Error())
|
|
return
|
|
}
|
|
|
|
tflog.Info(ctx, "End delete resourceDiskReplication", map[string]any{"disk_id": state.DiskId.ValueInt64()})
|
|
}
|
|
|
|
// Schema defines the schema for the resource.
|
|
func (r *resourceDiskReplication) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
|
|
resp.Schema = schema.Schema{
|
|
Attributes: schemas.MakeSchemaResourceDiskReplication(),
|
|
Blocks: map[string]schema.Block{
|
|
"timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}),
|
|
},
|
|
}
|
|
}
|
|
|
|
// Metadata returns the resource type name.
|
|
func (r *resourceDiskReplication) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
|
|
resp.TypeName = req.ProviderTypeName + "_cb_disk_replication"
|
|
}
|
|
|
|
// Configure adds the provider configured client to the resource.
|
|
func (r *resourceDiskReplication) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
|
|
tflog.Info(ctx, "Get Configure resourceDiskReplication")
|
|
r.client = client.Resource(ctx, &req, resp)
|
|
tflog.Info(ctx, "Getting Configure resourceDiskReplication successfully")
|
|
}
|
|
|
|
func (r *resourceDiskReplication) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
|
|
// Retrieve import ID and save to id attribute
|
|
resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp)
|
|
}
|