You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

373 lines
13 KiB

7 months ago
package utilities
import (
"context"
"fmt"
"strconv"
"time"
"github.com/hashicorp/terraform-plugin-framework/types"
3 weeks ago
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
7 months ago
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/status"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types/basetypes"
"github.com/hashicorp/terraform-plugin-log/tflog"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
)
// DiskCheckPresence checks if disk with diskId exists
3 weeks ago
func DiskCheckPresence(ctx context.Context, diskId uint64, c *client.Client) (*disks.RecordDisk, error) {
7 months ago
tflog.Info(ctx, fmt.Sprintf("Get info about disk with ID - %v", diskId))
diskRecord, err := c.CloudAPI().Disks().Get(ctx, disks.GetRequest{DiskID: diskId})
if err != nil {
return nil, fmt.Errorf("cannot get info about disk with error: %w", err)
}
tflog.Info(ctx, "DiskCheckPresence resourceDisk: response from CloudAPI().Disks().Get", map[string]any{"disk_id": diskId, "response": diskRecord})
return diskRecord, err
}
// CreateRequestResourceDisk generates disk create request from plan
func CreateRequestResourceDisk(ctx context.Context, plan *models.ResourceDiskModel) disks.CreateRequest {
tflog.Info(ctx, "Start CreateRequestResourceDisk", map[string]any{
"account_id": plan.AccountID.ValueInt64(),
"disk_name": plan.DiskName.ValueString(),
"size_max": plan.SizeMax.ValueInt64(),
"gid": plan.GID.ValueInt64(),
})
// set up required parameters in disk create request
createReq := disks.CreateRequest{
AccountID: uint64(plan.AccountID.ValueInt64()),
Name: plan.DiskName.ValueString(),
Size: uint64(plan.SizeMax.ValueInt64()),
GID: uint64(plan.GID.ValueInt64()),
}
if plan.Type.IsUnknown() {
createReq.Type = "D" // default value
} else {
createReq.Type = plan.Type.ValueString()
}
if !plan.SEPID.IsUnknown() {
createReq.SEPID = uint64(plan.SEPID.ValueInt64())
}
if !plan.Pool.IsUnknown() {
createReq.Pool = plan.Pool.ValueString()
}
if !plan.Description.IsUnknown() {
createReq.Description = plan.Description.ValueString()
}
return createReq
}
// LimitIOCreateDisk sets IO limits that user specified in iotune field for created resource.
// In case of failure returns warnings.
3 weeks ago
func LimitIOCreateDisk(ctx context.Context, diskId uint64, plan *models.ResourceDiskModel, c *client.Client) diag.Diagnostics {
7 months ago
diags := diag.Diagnostics{}
limitIOReq := disks.LimitIORequest{
DiskID: diskId,
}
var iotunePlan models.IOTuneModel
// plan.IOTune is not null as it was checked before call
tflog.Info(ctx, "LimitIOCreateDisk: new iotune specified", map[string]any{"disk_id": diskId})
diags.Append(plan.IOTune.As(ctx, &iotunePlan, basetypes.ObjectAsOptions{})...)
if diags.HasError() {
tflog.Error(ctx, "LimitIOCreateDisk: cannot populate iotune with plan.IOTune object element")
return diags
}
limitIOReq.IOPS = uint64(iotunePlan.TotalIOPSSec.ValueInt64())
limitIOReq.ReadBytesSec = uint64(iotunePlan.ReadBytesSec.ValueInt64())
limitIOReq.ReadBytesSecMax = uint64(iotunePlan.ReadBytesSecMax.ValueInt64())
limitIOReq.ReadIOPSSec = uint64(iotunePlan.ReadIOPSSec.ValueInt64())
limitIOReq.ReadIOPSSecMax = uint64(iotunePlan.ReadIOPSSecMax.ValueInt64())
limitIOReq.SizeIOPSSec = uint64(iotunePlan.SizeIOPSSec.ValueInt64())
limitIOReq.TotalBytesSec = uint64(iotunePlan.TotalBytesSec.ValueInt64())
limitIOReq.TotalBytesSecMax = uint64(iotunePlan.TotalBytesSecMax.ValueInt64())
limitIOReq.TotalIOPSSecMax = uint64(iotunePlan.TotalIOPSSecMax.ValueInt64())
limitIOReq.TotalIOPSSec = uint64(iotunePlan.TotalIOPSSec.ValueInt64())
limitIOReq.WriteBytesSec = uint64(iotunePlan.WriteBytesSec.ValueInt64())
limitIOReq.WriteBytesSecMax = uint64(iotunePlan.WriteBytesSecMax.ValueInt64())
limitIOReq.WriteIOPSSec = uint64(iotunePlan.WriteIOPSSec.ValueInt64())
limitIOReq.WriteIOPSSecMax = uint64(iotunePlan.WriteIOPSSecMax.ValueInt64())
tflog.Info(ctx, "LimitIOCreateDisk: before calling CloudAPI().Disks().LimitIO", map[string]any{
"disk_id": diskId,
"limitIOReq": limitIOReq})
res, err := c.CloudAPI().Disks().LimitIO(ctx, limitIOReq)
if err != nil {
diags.AddWarning("LimitIOCreateDisk: Unable to limit io for Disk",
err.Error())
}
tflog.Info(ctx, "LimitIOCreateDisk: response from CloudAPI().Disks().LimitIO", map[string]any{
"disk_id": diskId,
"response": res})
return diags
}
// ShareableCreateDisk shares disk.
// In case of failure returns warnings.
3 weeks ago
func ShareableCreateDisk(ctx context.Context, diskId uint64, c *client.Client) diag.Diagnostics {
7 months ago
diags := diag.Diagnostics{}
tflog.Info(ctx, "ShareableCreateDisk: before calling CloudAPI().Disks().Share", map[string]any{"disk_id": diskId})
res, err := c.CloudAPI().Disks().Share(ctx, disks.ShareRequest{DiskID: diskId})
if err != nil {
diags.AddWarning("ShareableCreateDisk: Unable to share Disk",
err.Error())
}
tflog.Info(ctx, "ShareableCreateDisk: response from CloudAPI().Disks().Share", map[string]any{
"disk_id": diskId,
"response": res})
return diags
}
// DiskReadStatus loads disk resource by ids id, gets it current status. Performs restore and enable if needed for
// Deleted status.
// In case of failure returns errors.
3 weeks ago
func DiskReadStatus(ctx context.Context, state *models.ResourceDiskModel, c *client.Client) diag.Diagnostics {
7 months ago
tflog.Info(ctx, "DiskReadStatus: Read status disk with ID", map[string]any{"disk_id": state.Id.ValueString()})
diags := diag.Diagnostics{}
diskId, err := strconv.ParseUint(state.Id.ValueString(), 10, 64)
if err != nil {
diags.AddError("DiskReadStatus: Cannot parse disk ID from state", err.Error())
return diags
}
recordDisk, err := DiskCheckPresence(ctx, diskId, c)
if err != nil {
diags.AddError("DiskReadStatus: Unable to Read Disk before status check", err.Error())
return diags
}
// check resource status
switch recordDisk.Status {
case status.Modeled:
diags.AddError(
"Disk is in status Modeled",
"please, contact support for more information",
)
return diags
case status.Deleted:
// attempt to restore disk
tflog.Info(ctx, "DiskReadStatus: disk with status.Deleted is being read, attempt to restore it", map[string]any{
"disk_id": recordDisk.ID,
"status": recordDisk.Status})
diags.Append(RestoreDisk(ctx, diskId, c)...)
if diags.HasError() {
tflog.Error(ctx, "DiskReadStatus: cannot restore disk")
return diags
}
tflog.Info(ctx, "DiskReadStatus: disk restored successfully", map[string]any{"disk_id": diskId})
state.LastUpdated = types.StringValue(time.Now().Format(time.RFC850))
case status.Destroyed, status.Purged:
diags.AddError(
"DiskReadStatus: Disk is in status Destroyed or Purged",
fmt.Sprintf("the resource with disk_id %d cannot be read because it has been destroyed or purged", diskId),
)
return diags
}
return nil
}
// RestoreDisk performs disk Restore request.
// Returns error in case of failures.
3 weeks ago
func RestoreDisk(ctx context.Context, diskId uint64, c *client.Client) diag.Diagnostics {
7 months ago
diags := diag.Diagnostics{}
restoreReq := disks.RestoreRequest{
DiskID: diskId,
}
tflog.Info(ctx, "RestoreDisk: before calling CloudAPI().Disks().Restore", map[string]any{"diskId": diskId, "req": restoreReq})
res, err := c.CloudAPI().Disks().Restore(ctx, restoreReq)
if err != nil {
diags.AddError(
"RestoreDisk: cannot restore disk",
err.Error(),
)
return diags
}
tflog.Info(ctx, "RestoreDisk: response from CloudAPI().Disks().Restore", map[string]any{"disk_id": diskId, "response": res})
return nil
}
// SizeMaxUpdateDisk resizes disk.
// Returns error in case of failures.
3 weeks ago
func SizeMaxUpdateDisk(ctx context.Context, diskId uint64, plan, state *models.ResourceDiskModel, c *client.Client) diag.Diagnostics {
7 months ago
var diags diag.Diagnostics
resizeReq := disks.ResizeRequest{
DiskID: diskId,
}
// check if resize request is valid
if plan.SizeMax.ValueInt64() < state.SizeMax.ValueInt64() {
diags.AddError(
"SizeMaxUpdateDisk: reducing disk size is not allowed",
fmt.Sprintf("disk with id %s has state size %d, plan size %d",
plan.Id.ValueString(),
state.SizeMax.ValueInt64(),
plan.SizeMax.ValueInt64()))
return diags
}
resizeReq.Size = uint64(plan.SizeMax.ValueInt64())
tflog.Info(ctx, "SizeMaxUpdateDisk: before calling CloudAPI().Disks().Resize2", map[string]any{
"disk_id": plan.Id.ValueString(),
"size_max_state": state.SizeMax.ValueInt64(),
"size_max_plan": plan.SizeMax.ValueInt64(),
"req": resizeReq,
})
res, err := c.CloudAPI().Disks().Resize2(ctx, resizeReq)
if err != nil {
diags.AddError("can not resize disk", err.Error())
return diags
}
tflog.Info(ctx, "SizeMaxUpdateDisk: response from CloudAPI().Disks().Resize2", map[string]any{
"disk_id": plan.Id.ValueString(),
"response": res})
return nil
}
// NameUpdateDisk renames disk.
// Returns error in case of failures.
3 weeks ago
func NameUpdateDisk(ctx context.Context, diskId uint64, plan *models.ResourceDiskModel, c *client.Client) diag.Diagnostics {
7 months ago
var diags diag.Diagnostics
renameReq := disks.RenameRequest{
DiskID: diskId,
Name: plan.DiskName.ValueString(),
}
tflog.Info(ctx, "NameUpdateDisk: before calling CloudAPI().Disks().Rename", map[string]any{
"disk_id": plan.Id.ValueString(),
"disk_name_plan": plan.DiskName.ValueString(),
"req": renameReq,
})
res, err := c.CloudAPI().Disks().Rename(ctx, renameReq)
if err != nil {
diags.AddError("NameUpdateDisk: can not rename disk", err.Error())
return diags
}
tflog.Info(ctx, "NameUpdateDisk: response from CloudAPI().Disks().Rename", map[string]any{
"disk_id": plan.Id.ValueString(),
"response": res})
return nil
}
// LimitIOUpdateDisk changes IO limits that user specified in iotune field for updated resource.
// In case of failure returns errors.
3 weeks ago
func LimitIOUpdateDisk(ctx context.Context, diskId uint64, plan *models.ResourceDiskModel, c *client.Client) diag.Diagnostics {
7 months ago
diags := diag.Diagnostics{}
limitIOReq := disks.LimitIORequest{
DiskID: diskId,
}
var iotunePlan models.IOTuneModel
// plan.IOTune is not null as it was checked before call
tflog.Info(ctx, "LimitIOUpdateDisk: new iotune specified", map[string]any{"disk_id": diskId})
diags.Append(plan.IOTune.As(ctx, &iotunePlan, basetypes.ObjectAsOptions{})...)
if diags.HasError() {
tflog.Error(ctx, "LimitIOUpdateDisk: cannot populate iotune with plan.IOTune object element")
return diags
}
limitIOReq.IOPS = uint64(iotunePlan.TotalIOPSSec.ValueInt64())
limitIOReq.ReadBytesSec = uint64(iotunePlan.ReadBytesSec.ValueInt64())
limitIOReq.ReadBytesSecMax = uint64(iotunePlan.ReadBytesSecMax.ValueInt64())
limitIOReq.ReadIOPSSec = uint64(iotunePlan.ReadIOPSSec.ValueInt64())
limitIOReq.ReadIOPSSecMax = uint64(iotunePlan.ReadIOPSSecMax.ValueInt64())
limitIOReq.SizeIOPSSec = uint64(iotunePlan.SizeIOPSSec.ValueInt64())
limitIOReq.TotalBytesSec = uint64(iotunePlan.TotalBytesSec.ValueInt64())
limitIOReq.TotalBytesSecMax = uint64(iotunePlan.TotalBytesSecMax.ValueInt64())
limitIOReq.TotalIOPSSecMax = uint64(iotunePlan.TotalIOPSSecMax.ValueInt64())
limitIOReq.TotalIOPSSec = uint64(iotunePlan.TotalIOPSSec.ValueInt64())
limitIOReq.WriteBytesSec = uint64(iotunePlan.WriteBytesSec.ValueInt64())
limitIOReq.WriteBytesSecMax = uint64(iotunePlan.WriteBytesSecMax.ValueInt64())
limitIOReq.WriteIOPSSec = uint64(iotunePlan.WriteIOPSSec.ValueInt64())
limitIOReq.WriteIOPSSecMax = uint64(iotunePlan.WriteIOPSSecMax.ValueInt64())
tflog.Info(ctx, "LimitIOUpdateDisk: before calling CloudAPI().Disks().LimitIO", map[string]any{
"disk_id": diskId,
"limitIOReq": limitIOReq})
res, err := c.CloudAPI().Disks().LimitIO(ctx, limitIOReq)
if err != nil {
diags.AddError("LimitIOUpdateDisk: Unable to limit io for Disk",
err.Error())
return diags
}
tflog.Info(ctx, "LimitIOUpdateDisk: response from CloudAPI().Disks().LimitIO", map[string]any{
"disk_id": diskId,
"response": res})
return nil
}
// ShareableUpdateDisk shares or unshares disk.
// In case of failure returns errors.
3 weeks ago
func ShareableUpdateDisk(ctx context.Context, diskId uint64, share bool, c *client.Client) diag.Diagnostics {
7 months ago
diags := diag.Diagnostics{}
// share
if share {
tflog.Info(ctx, "ShareableUpdateDisk: before calling CloudAPI().Disks().Share", map[string]any{"disk_id": diskId})
res, err := c.CloudAPI().Disks().Share(ctx, disks.ShareRequest{DiskID: diskId})
if err != nil {
diags.AddError("ShareableUpdateDisk: Unable to share Disk",
err.Error())
return diags
}
tflog.Info(ctx, "ShareableUpdateDisk: response from CloudAPI().Disks().Share", map[string]any{
"disk_id": diskId,
"response": res})
}
// unshare
if !share {
tflog.Info(ctx, "ShareableUpdateDisk: before calling CloudAPI().Disks().Unshare", map[string]any{"disk_id": diskId})
res, err := c.CloudAPI().Disks().Unshare(ctx, disks.UnshareRequest{DiskID: diskId})
if err != nil {
diags.AddError("ShareableUpdateDisk: Unable to unshare Disk",
err.Error())
return diags
}
tflog.Info(ctx, "ShareableUpdateDisk: response from CloudAPI().Disks().Unshare", map[string]any{
"disk_id": diskId,
"response": res})
}
return nil
}