This commit is contained in:
asteam
2025-07-01 13:44:09 +03:00
parent 5382579a5f
commit ddbb12996d
1041 changed files with 2842 additions and 96448 deletions

View File

@@ -17,16 +17,16 @@ import (
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models"
)
// DiskCheckPresence checks if disk with diskId exists
func DiskCheckPresence(ctx context.Context, diskId uint64, c *client.Client) (*disks.RecordDisk, error) {
tflog.Info(ctx, fmt.Sprintf("Get info about disk with ID - %v", diskId))
// DiskCheckPresence checks if disk with diskID exists
func DiskCheckPresence(ctx context.Context, diskID uint64, c *client.Client) (*disks.RecordDisk, error) {
tflog.Info(ctx, fmt.Sprintf("Get info about disk with ID - %v", diskID))
diskRecord, err := c.CloudAPI().Disks().Get(ctx, disks.GetRequest{DiskID: diskId})
diskRecord, err := c.CloudAPI().Disks().Get(ctx, disks.GetRequest{DiskID: diskID})
if err != nil {
return nil, fmt.Errorf("cannot get info about disk with error: %w", err)
}
tflog.Info(ctx, "DiskCheckPresence resourceDisk: response from CloudAPI().Disks().Get", map[string]any{"disk_id": diskId, "response": diskRecord})
tflog.Info(ctx, "DiskCheckPresence resourceDisk: response from CloudAPI().Disks().Get", map[string]any{"disk_id": diskID, "response": diskRecord})
return diskRecord, err
}
@@ -37,7 +37,6 @@ func CreateRequestResourceDisk(ctx context.Context, plan *models.ResourceDiskMod
"account_id": plan.AccountID.ValueInt64(),
"disk_name": plan.DiskName.ValueString(),
"size_max": plan.SizeMax.ValueInt64(),
"gid": plan.GID.ValueInt64(),
})
// set up required parameters in disk create request
@@ -45,14 +44,8 @@ func CreateRequestResourceDisk(ctx context.Context, plan *models.ResourceDiskMod
AccountID: uint64(plan.AccountID.ValueInt64()),
Name: plan.DiskName.ValueString(),
Size: uint64(plan.SizeMax.ValueInt64()),
GID: uint64(plan.GID.ValueInt64()),
}
if plan.Type.IsUnknown() {
createReq.Type = "D" // default value
} else {
createReq.Type = plan.Type.ValueString()
}
if !plan.SEPID.IsUnknown() {
createReq.SEPID = uint64(plan.SEPID.ValueInt64())
}
@@ -68,16 +61,16 @@ func CreateRequestResourceDisk(ctx context.Context, plan *models.ResourceDiskMod
// LimitIOCreateDisk sets IO limits that user specified in iotune field for created resource.
// In case of failure returns warnings.
func LimitIOCreateDisk(ctx context.Context, diskId uint64, plan *models.ResourceDiskModel, c *client.Client) diag.Diagnostics {
func LimitIOCreateDisk(ctx context.Context, diskID uint64, plan *models.ResourceDiskModel, c *client.Client) diag.Diagnostics {
diags := diag.Diagnostics{}
limitIOReq := disks.LimitIORequest{
DiskID: diskId,
DiskID: diskID,
}
var iotunePlan models.IOTuneModel
// plan.IOTune is not null as it was checked before call
tflog.Info(ctx, "LimitIOCreateDisk: new iotune specified", map[string]any{"disk_id": diskId})
tflog.Info(ctx, "LimitIOCreateDisk: new iotune specified", map[string]any{"disk_id": diskID})
diags.Append(plan.IOTune.As(ctx, &iotunePlan, basetypes.ObjectAsOptions{})...)
if diags.HasError() {
tflog.Error(ctx, "LimitIOCreateDisk: cannot populate iotune with plan.IOTune object element")
@@ -103,7 +96,7 @@ func LimitIOCreateDisk(ctx context.Context, diskId uint64, plan *models.Resource
limitIOReq.WriteIOPSSecMax = uint64(iotunePlan.WriteIOPSSecMax.ValueInt64())
tflog.Info(ctx, "LimitIOCreateDisk: before calling CloudAPI().Disks().LimitIO", map[string]any{
"disk_id": diskId,
"disk_id": diskID,
"limitIOReq": limitIOReq})
res, err := c.CloudAPI().Disks().LimitIO(ctx, limitIOReq)
if err != nil {
@@ -111,7 +104,7 @@ func LimitIOCreateDisk(ctx context.Context, diskId uint64, plan *models.Resource
err.Error())
}
tflog.Info(ctx, "LimitIOCreateDisk: response from CloudAPI().Disks().LimitIO", map[string]any{
"disk_id": diskId,
"disk_id": diskID,
"response": res})
return diags
@@ -119,17 +112,17 @@ func LimitIOCreateDisk(ctx context.Context, diskId uint64, plan *models.Resource
// ShareableCreateDisk shares disk.
// In case of failure returns warnings.
func ShareableCreateDisk(ctx context.Context, diskId uint64, c *client.Client) diag.Diagnostics {
func ShareableCreateDisk(ctx context.Context, diskID uint64, c *client.Client) diag.Diagnostics {
diags := diag.Diagnostics{}
tflog.Info(ctx, "ShareableCreateDisk: before calling CloudAPI().Disks().Share", map[string]any{"disk_id": diskId})
res, err := c.CloudAPI().Disks().Share(ctx, disks.ShareRequest{DiskID: diskId})
tflog.Info(ctx, "ShareableCreateDisk: before calling CloudAPI().Disks().Share", map[string]any{"disk_id": diskID})
res, err := c.CloudAPI().Disks().Share(ctx, disks.ShareRequest{DiskID: diskID})
if err != nil {
diags.AddWarning("ShareableCreateDisk: Unable to share Disk",
err.Error())
}
tflog.Info(ctx, "ShareableCreateDisk: response from CloudAPI().Disks().Share", map[string]any{
"disk_id": diskId,
"disk_id": diskID,
"response": res})
return diags
@@ -139,17 +132,17 @@ func ShareableCreateDisk(ctx context.Context, diskId uint64, c *client.Client) d
// Deleted status.
// In case of failure returns errors.
func DiskReadStatus(ctx context.Context, state *models.ResourceDiskModel, c *client.Client) diag.Diagnostics {
tflog.Info(ctx, "DiskReadStatus: Read status disk with ID", map[string]any{"disk_id": state.Id.ValueString()})
tflog.Info(ctx, "DiskReadStatus: Read status disk with ID", map[string]any{"disk_id": state.ID.ValueString()})
diags := diag.Diagnostics{}
diskId, err := strconv.ParseUint(state.Id.ValueString(), 10, 64)
diskID, err := strconv.ParseUint(state.ID.ValueString(), 10, 64)
if err != nil {
diags.AddError("DiskReadStatus: Cannot parse disk ID from state", err.Error())
return diags
}
recordDisk, err := DiskCheckPresence(ctx, diskId, c)
recordDisk, err := DiskCheckPresence(ctx, diskID, c)
if err != nil {
diags.AddError("DiskReadStatus: Unable to Read Disk before status check", err.Error())
return diags
@@ -168,17 +161,17 @@ func DiskReadStatus(ctx context.Context, state *models.ResourceDiskModel, c *cli
tflog.Info(ctx, "DiskReadStatus: disk with status.Deleted is being read, attempt to restore it", map[string]any{
"disk_id": recordDisk.ID,
"status": recordDisk.Status})
diags.Append(RestoreDisk(ctx, diskId, c)...)
diags.Append(RestoreDisk(ctx, diskID, c)...)
if diags.HasError() {
tflog.Error(ctx, "DiskReadStatus: cannot restore disk")
return diags
}
tflog.Info(ctx, "DiskReadStatus: disk restored successfully", map[string]any{"disk_id": diskId})
tflog.Info(ctx, "DiskReadStatus: disk restored successfully", map[string]any{"disk_id": diskID})
state.LastUpdated = types.StringValue(time.Now().Format(time.RFC850))
case status.Destroyed, status.Purged:
diags.AddError(
"DiskReadStatus: Disk is in status Destroyed or Purged",
fmt.Sprintf("the resource with disk_id %d cannot be read because it has been destroyed or purged", diskId),
fmt.Sprintf("the resource with disk_id %d cannot be read because it has been destroyed or purged", diskID),
)
return diags
}
@@ -188,14 +181,14 @@ func DiskReadStatus(ctx context.Context, state *models.ResourceDiskModel, c *cli
// RestoreDisk performs disk Restore request.
// Returns error in case of failures.
func RestoreDisk(ctx context.Context, diskId uint64, c *client.Client) diag.Diagnostics {
func RestoreDisk(ctx context.Context, diskID uint64, c *client.Client) diag.Diagnostics {
diags := diag.Diagnostics{}
restoreReq := disks.RestoreRequest{
DiskID: diskId,
DiskID: diskID,
}
tflog.Info(ctx, "RestoreDisk: before calling CloudAPI().Disks().Restore", map[string]any{"diskId": diskId, "req": restoreReq})
tflog.Info(ctx, "RestoreDisk: before calling CloudAPI().Disks().Restore", map[string]any{"diskID": diskID, "req": restoreReq})
res, err := c.CloudAPI().Disks().Restore(ctx, restoreReq)
if err != nil {
@@ -205,18 +198,18 @@ func RestoreDisk(ctx context.Context, diskId uint64, c *client.Client) diag.Diag
)
return diags
}
tflog.Info(ctx, "RestoreDisk: response from CloudAPI().Disks().Restore", map[string]any{"disk_id": diskId, "response": res})
tflog.Info(ctx, "RestoreDisk: response from CloudAPI().Disks().Restore", map[string]any{"disk_id": diskID, "response": res})
return nil
}
// SizeMaxUpdateDisk resizes disk.
// Returns error in case of failures.
func SizeMaxUpdateDisk(ctx context.Context, diskId uint64, plan, state *models.ResourceDiskModel, c *client.Client) diag.Diagnostics {
func SizeMaxUpdateDisk(ctx context.Context, diskID uint64, plan, state *models.ResourceDiskModel, c *client.Client) diag.Diagnostics {
var diags diag.Diagnostics
resizeReq := disks.ResizeRequest{
DiskID: diskId,
DiskID: diskID,
}
// check if resize request is valid
@@ -224,7 +217,7 @@ func SizeMaxUpdateDisk(ctx context.Context, diskId uint64, plan, state *models.R
diags.AddError(
"SizeMaxUpdateDisk: reducing disk size is not allowed",
fmt.Sprintf("disk with id %s has state size %d, plan size %d",
plan.Id.ValueString(),
plan.ID.ValueString(),
state.SizeMax.ValueInt64(),
plan.SizeMax.ValueInt64()))
return diags
@@ -233,7 +226,7 @@ func SizeMaxUpdateDisk(ctx context.Context, diskId uint64, plan, state *models.R
resizeReq.Size = uint64(plan.SizeMax.ValueInt64())
tflog.Info(ctx, "SizeMaxUpdateDisk: before calling CloudAPI().Disks().Resize2", map[string]any{
"disk_id": plan.Id.ValueString(),
"disk_id": plan.ID.ValueString(),
"size_max_state": state.SizeMax.ValueInt64(),
"size_max_plan": plan.SizeMax.ValueInt64(),
"req": resizeReq,
@@ -247,7 +240,7 @@ func SizeMaxUpdateDisk(ctx context.Context, diskId uint64, plan, state *models.R
}
tflog.Info(ctx, "SizeMaxUpdateDisk: response from CloudAPI().Disks().Resize2", map[string]any{
"disk_id": plan.Id.ValueString(),
"disk_id": plan.ID.ValueString(),
"response": res})
return nil
@@ -255,16 +248,16 @@ func SizeMaxUpdateDisk(ctx context.Context, diskId uint64, plan, state *models.R
// NameUpdateDisk renames disk.
// Returns error in case of failures.
func NameUpdateDisk(ctx context.Context, diskId uint64, plan *models.ResourceDiskModel, c *client.Client) diag.Diagnostics {
func NameUpdateDisk(ctx context.Context, diskID uint64, plan *models.ResourceDiskModel, c *client.Client) diag.Diagnostics {
var diags diag.Diagnostics
renameReq := disks.RenameRequest{
DiskID: diskId,
DiskID: diskID,
Name: plan.DiskName.ValueString(),
}
tflog.Info(ctx, "NameUpdateDisk: before calling CloudAPI().Disks().Rename", map[string]any{
"disk_id": plan.Id.ValueString(),
"disk_id": plan.ID.ValueString(),
"disk_name_plan": plan.DiskName.ValueString(),
"req": renameReq,
})
@@ -277,7 +270,7 @@ func NameUpdateDisk(ctx context.Context, diskId uint64, plan *models.ResourceDis
}
tflog.Info(ctx, "NameUpdateDisk: response from CloudAPI().Disks().Rename", map[string]any{
"disk_id": plan.Id.ValueString(),
"disk_id": plan.ID.ValueString(),
"response": res})
return nil
@@ -285,16 +278,16 @@ func NameUpdateDisk(ctx context.Context, diskId uint64, plan *models.ResourceDis
// LimitIOUpdateDisk changes IO limits that user specified in iotune field for updated resource.
// In case of failure returns errors.
func LimitIOUpdateDisk(ctx context.Context, diskId uint64, plan *models.ResourceDiskModel, c *client.Client) diag.Diagnostics {
func LimitIOUpdateDisk(ctx context.Context, diskID uint64, plan *models.ResourceDiskModel, c *client.Client) diag.Diagnostics {
diags := diag.Diagnostics{}
limitIOReq := disks.LimitIORequest{
DiskID: diskId,
DiskID: diskID,
}
var iotunePlan models.IOTuneModel
// plan.IOTune is not null as it was checked before call
tflog.Info(ctx, "LimitIOUpdateDisk: new iotune specified", map[string]any{"disk_id": diskId})
tflog.Info(ctx, "LimitIOUpdateDisk: new iotune specified", map[string]any{"disk_id": diskID})
diags.Append(plan.IOTune.As(ctx, &iotunePlan, basetypes.ObjectAsOptions{})...)
if diags.HasError() {
tflog.Error(ctx, "LimitIOUpdateDisk: cannot populate iotune with plan.IOTune object element")
@@ -320,7 +313,7 @@ func LimitIOUpdateDisk(ctx context.Context, diskId uint64, plan *models.Resource
limitIOReq.WriteIOPSSecMax = uint64(iotunePlan.WriteIOPSSecMax.ValueInt64())
tflog.Info(ctx, "LimitIOUpdateDisk: before calling CloudAPI().Disks().LimitIO", map[string]any{
"disk_id": diskId,
"disk_id": diskID,
"limitIOReq": limitIOReq})
res, err := c.CloudAPI().Disks().LimitIO(ctx, limitIOReq)
if err != nil {
@@ -329,7 +322,7 @@ func LimitIOUpdateDisk(ctx context.Context, diskId uint64, plan *models.Resource
return diags
}
tflog.Info(ctx, "LimitIOUpdateDisk: response from CloudAPI().Disks().LimitIO", map[string]any{
"disk_id": diskId,
"disk_id": diskID,
"response": res})
return nil
@@ -337,34 +330,34 @@ func LimitIOUpdateDisk(ctx context.Context, diskId uint64, plan *models.Resource
// ShareableUpdateDisk shares or unshares disk.
// In case of failure returns errors.
func ShareableUpdateDisk(ctx context.Context, diskId uint64, share bool, c *client.Client) diag.Diagnostics {
func ShareableUpdateDisk(ctx context.Context, diskID uint64, share bool, c *client.Client) diag.Diagnostics {
diags := diag.Diagnostics{}
// share
if share {
tflog.Info(ctx, "ShareableUpdateDisk: before calling CloudAPI().Disks().Share", map[string]any{"disk_id": diskId})
res, err := c.CloudAPI().Disks().Share(ctx, disks.ShareRequest{DiskID: diskId})
tflog.Info(ctx, "ShareableUpdateDisk: before calling CloudAPI().Disks().Share", map[string]any{"disk_id": diskID})
res, err := c.CloudAPI().Disks().Share(ctx, disks.ShareRequest{DiskID: diskID})
if err != nil {
diags.AddError("ShareableUpdateDisk: Unable to share Disk",
err.Error())
return diags
}
tflog.Info(ctx, "ShareableUpdateDisk: response from CloudAPI().Disks().Share", map[string]any{
"disk_id": diskId,
"disk_id": diskID,
"response": res})
}
// unshare
if !share {
tflog.Info(ctx, "ShareableUpdateDisk: before calling CloudAPI().Disks().Unshare", map[string]any{"disk_id": diskId})
res, err := c.CloudAPI().Disks().Unshare(ctx, disks.UnshareRequest{DiskID: diskId})
tflog.Info(ctx, "ShareableUpdateDisk: before calling CloudAPI().Disks().Unshare", map[string]any{"disk_id": diskID})
res, err := c.CloudAPI().Disks().Unshare(ctx, disks.UnshareRequest{DiskID: diskID})
if err != nil {
diags.AddError("ShareableUpdateDisk: Unable to unshare Disk",
err.Error())
return diags
}
tflog.Info(ctx, "ShareableUpdateDisk: response from CloudAPI().Disks().Unshare", map[string]any{
"disk_id": diskId,
"disk_id": diskID,
"response": res})
}