You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
560 lines
21 KiB
560 lines
21 KiB
package kvmvm
|
|
|
|
import (
|
|
"context"
|
|
"strconv"
|
|
|
|
"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
|
|
"github.com/hashicorp/terraform-plugin-framework/path"
|
|
"github.com/hashicorp/terraform-plugin-framework/resource"
|
|
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
|
|
"github.com/hashicorp/terraform-plugin-framework/types"
|
|
"github.com/hashicorp/terraform-plugin-log/tflog"
|
|
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
|
|
|
|
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
|
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants"
|
|
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/flattens"
|
|
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
|
|
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/schemas"
|
|
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/utilities"
|
|
)
|
|
|
|
var (
|
|
_ resource.Resource = &resourceCompute{}
|
|
_ resource.ResourceWithImportState = &resourceCompute{}
|
|
)
|
|
|
|
// NewResourceCompute is a helper function to simplify the provider implementation.
|
|
func NewResourceCompute() resource.Resource {
|
|
return &resourceCompute{}
|
|
}
|
|
|
|
// resourceCompute is the resource implementation.
|
|
type resourceCompute struct {
|
|
client *client.Client
|
|
}
|
|
|
|
func (r *resourceCompute) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
|
|
// Get plan to create Compute
|
|
var plan models.ResourceComputeModel
|
|
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Create resourceCompute: Error receiving the plan")
|
|
return
|
|
}
|
|
tflog.Info(ctx, "Create resourceCompute: start creating", map[string]any{"name": plan.Name.ValueString()})
|
|
|
|
// Set timeouts
|
|
createTimeout, diags := plan.Timeouts.Create(ctx, constants.Timeout30m)
|
|
resp.Diagnostics.Append(diags...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Create resourceCompute: Error set timeout")
|
|
return
|
|
}
|
|
tflog.Info(ctx, "Create resourceCompute: set timeouts successfully", map[string]any{
|
|
"name": plan.Name.ValueString(),
|
|
"createTimeout": createTimeout})
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, createTimeout)
|
|
defer cancel()
|
|
|
|
// Check if input values are valid in the platform
|
|
tflog.Info(ctx, "Create resourceCompute: starting input checks", map[string]any{"name": plan.Name.ValueString()})
|
|
resp.Diagnostics.Append(resourceComputeInputChecks(ctx, &plan, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Create resourceCompute: Error input checks")
|
|
return
|
|
}
|
|
tflog.Info(ctx, "Create resourceCompute: input checks successful", map[string]any{"name": plan.Name.ValueString()})
|
|
|
|
// Make create request and get response for creation
|
|
ComputeId, diags := utilities.CreateResourceCompute(ctx, &plan, r.client)
|
|
if diags.HasError() {
|
|
resp.Diagnostics.Append(diags...)
|
|
tflog.Error(ctx, "Create resourceCompute: Error response for create resource Compute")
|
|
return
|
|
}
|
|
plan.ID = types.StringValue(strconv.Itoa(int(ComputeId)))
|
|
|
|
tflog.Info(ctx, "Create resourceCompute: new simple Compute created", map[string]any{"id": ComputeId, "name": plan.Name.ValueString()})
|
|
|
|
// attach extra disk(s) to new compute
|
|
if !plan.ExtraDisks.IsNull() {
|
|
resp.Diagnostics.Append(utilities.ComputeResourceExtraDiskCreate(ctx, &plan, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Create resourceCompute: error when attaching extra disk(s) to a new Compute ")
|
|
utilities.CleanupResourceCompute(ctx, ComputeId, r.client)
|
|
plan.ID = types.StringValue("")
|
|
return
|
|
}
|
|
}
|
|
|
|
// additional settings after Compute creation: in case of failures, warnings are added to resp.Diagnostics,
|
|
// because additional settings failure is not critical. If errors were added instead of warnings, terraform
|
|
// framework would mark resource as tainted and delete it, which would be unwanted behaviour.
|
|
|
|
// enable or disable Compute, warnings added to resp.Diagnostics in case of failure.
|
|
resp.Diagnostics.Append(utilities.ComputeResourceEnableDisable(ctx, &plan, r.client)...)
|
|
|
|
// Note bene: we created compute in a STOPPED state (this is required to properly attach 1st network interface),
|
|
// now we need to start it before we report the sequence complete
|
|
resp.Diagnostics.Append(utilities.ComputeResourceStartStop(ctx, &plan, r.client)...)
|
|
|
|
// add affinity_label if needed, warnings added to resp.Diagnostics in case of failure.
|
|
if !plan.AffinityLabel.IsUnknown() { //affinity_label is optional
|
|
resp.Diagnostics.Append(utilities.ComputeResourceAffinityLabel(ctx, &plan, r.client)...)
|
|
}
|
|
|
|
// add affinity_rules if needed, warnings added to resp.Diagnostics in case of failure.
|
|
if !plan.AffinityRules.IsNull() {
|
|
resp.Diagnostics.Append(utilities.ComputeResourceAffinityRules(ctx, &plan, r.client)...)
|
|
}
|
|
|
|
// add anti_affinity_rules if needed, warnings added to resp.Diagnostics in case of failure.
|
|
if !plan.AntiAffinityRules.IsNull() {
|
|
resp.Diagnostics.Append(utilities.ComputeResourceAntiAffinityRules(ctx, &plan, r.client)...)
|
|
}
|
|
|
|
// add tags if needed, warnings added to resp.Diagnostics in case of failure.
|
|
if !plan.Tags.IsNull() {
|
|
resp.Diagnostics.Append(utilities.ComputeResourceTags(ctx, &plan, r.client)...)
|
|
}
|
|
|
|
// add port_forwarding if needed, warnings added to resp.Diagnostics in case of failure.
|
|
if !plan.PortForwarding.IsNull() {
|
|
resp.Diagnostics.Append(utilities.ComputeResourcePortForwarding(ctx, &plan, r.client)...)
|
|
}
|
|
|
|
// add user_access if needed, warnings added to resp.Diagnostics in case of failure.
|
|
if !plan.UserAccess.IsNull() {
|
|
resp.Diagnostics.Append(utilities.ComputeResourceUserAccess(ctx, &plan, r.client)...)
|
|
}
|
|
|
|
// add snapshot if needed, warnings added to resp.Diagnostics in case of failure.
|
|
if !plan.Snapshot.IsNull() {
|
|
resp.Diagnostics.Append(utilities.ComputeResourceSnapshot(ctx, &plan, r.client)...)
|
|
}
|
|
|
|
// add cd if needed, warnings added to resp.Diagnostics in case of failure.
|
|
if !plan.CD.IsNull() {
|
|
resp.Diagnostics.Append(utilities.ComputeResourceCDInsert(ctx, &plan, r.client)...)
|
|
}
|
|
|
|
// attach PCI devices to compute if needed, warnings added to resp.Diagnostics in case of failure.
|
|
if !plan.PCIDevices.IsNull() {
|
|
resp.Diagnostics.Append(utilities.ComputeResourcePCIDevice(ctx, &plan, r.client)...)
|
|
}
|
|
|
|
// pin to stack if needed, warnings added to resp.Diagnostics in case of failure.
|
|
if plan.PinToStack.ValueBool() {
|
|
resp.Diagnostics.Append(utilities.ComputeResourcePinToStack(ctx, &plan, r.client)...)
|
|
}
|
|
|
|
// set auto start_w_node if pin_to_stack == false
|
|
if !plan.PinToStack.ValueBool() && plan.AutoStartWithNode.ValueBool() {
|
|
resp.Diagnostics.Append(utilities.ComputeResourceAutoStartWithNode(ctx, &plan, r.client)...)
|
|
}
|
|
|
|
// pause if needed, warnings added to resp.Diagnostics in case of failure.
|
|
if !plan.Pause.IsNull() && plan.Pause.ValueBool() {
|
|
resp.Diagnostics.Append(utilities.ComputeResourcePause(ctx, &plan, r.client)...)
|
|
}
|
|
|
|
tflog.Info(ctx, "Create resourceCompute: resource creation is completed", map[string]any{"id": ComputeId})
|
|
|
|
// Map response body to schema and populate Computed attribute values
|
|
resp.Diagnostics.Append(flattens.ComputeResource(ctx, &plan, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
return
|
|
}
|
|
|
|
// Set state to fully populated data
|
|
resp.Diagnostics.Append(resp.State.Set(ctx, plan)...)
|
|
if resp.Diagnostics.HasError() {
|
|
return
|
|
}
|
|
}
|
|
|
|
func (r *resourceCompute) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
|
|
// Get current state
|
|
var state models.ResourceComputeModel
|
|
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Read resourceCompute: Error get state")
|
|
return
|
|
}
|
|
tflog.Info(ctx, "Read resourceCompute: got state successfully", map[string]any{"ID": state.ID.ValueString()})
|
|
|
|
// Set timeouts
|
|
readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout300s)
|
|
resp.Diagnostics.Append(diags...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Read resourceCompute: Error set timeout")
|
|
return
|
|
}
|
|
tflog.Info(ctx, "Read resourceCompute: set timeouts successfully", map[string]any{
|
|
"ID": state.ID.ValueString(),
|
|
"readTimeout": readTimeout})
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
|
defer cancel()
|
|
|
|
// Read status Compute and if it is necessary to restore it
|
|
resp.Diagnostics.Append(utilities.ComputeReadStatus(ctx, &state, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Error read status or restore")
|
|
return
|
|
}
|
|
|
|
// Overwrite items with refreshed state
|
|
resp.Diagnostics.Append(flattens.ComputeResource(ctx, &state, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Read resourceCompute: Error flatten")
|
|
return
|
|
}
|
|
|
|
// Set refreshed state
|
|
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Read resourceCompute: Error set state")
|
|
return
|
|
}
|
|
tflog.Info(ctx, "End read resourceCompute")
|
|
}
|
|
|
|
func (r *resourceCompute) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
|
|
// Retrieve values from plan
|
|
var plan models.ResourceComputeModel
|
|
resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceCompute: Error receiving the plan")
|
|
return
|
|
}
|
|
tflog.Info(ctx, "Update resourceCompute: got plan successfully", map[string]any{"name": plan.Name.ValueString()})
|
|
|
|
// Retrieve values from state
|
|
var state models.ResourceComputeModel
|
|
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceCompute: Error receiving the state")
|
|
return
|
|
}
|
|
tflog.Info(ctx, "Update resourceCompute: got state successfully", map[string]any{"compute_id": state.ID.ValueString()})
|
|
|
|
// Set timeouts
|
|
updateTimeout, diags := plan.Timeouts.Update(ctx, constants.Timeout30m)
|
|
resp.Diagnostics.Append(diags...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Error set timeout")
|
|
return
|
|
}
|
|
tflog.Info(ctx, "Update resourceCompute: set timeouts successfully", map[string]any{
|
|
"ID": state.ID.ValueString(),
|
|
"updateTimeout": updateTimeout})
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, updateTimeout)
|
|
defer cancel()
|
|
|
|
// Checking for values in the platform
|
|
tflog.Info(ctx, "Update resourceCompute: starting input checks", map[string]any{"compute_id": state.ID.ValueString()})
|
|
resp.Diagnostics.Append(resourceComputeInputChecks(ctx, &plan, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceCompute: Error input checks")
|
|
return
|
|
}
|
|
tflog.Info(ctx, "Update resourceCompute: input checks successful", map[string]any{"compute_id": state.ID.ValueString()})
|
|
|
|
// Read status Compute and if it is necessary to restore it
|
|
resp.Diagnostics.Append(utilities.ComputeReadStatus(ctx, &state, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Error read status or restore")
|
|
return
|
|
}
|
|
|
|
plan.ID = state.ID
|
|
|
|
// Enable/disable Compute if needed
|
|
if !plan.Enabled.Equal(state.Enabled) {
|
|
resp.Diagnostics.Append(utilities.ComputeResourceEnableDisable(ctx, &plan, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceCompute: Error enable/disable Compute")
|
|
return
|
|
}
|
|
}
|
|
|
|
// Start/stop Compute if needed
|
|
if !plan.Started.Equal(state.Started) {
|
|
resp.Diagnostics.Append(utilities.ComputeResourceStartStop(ctx, &plan, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceCompute: Error start/stop Compute")
|
|
return
|
|
}
|
|
}
|
|
|
|
// Resize Compute if needed
|
|
if !plan.CPU.Equal(state.CPU) || !plan.RAM.Equal(state.RAM) {
|
|
resp.Diagnostics.Append(utilities.ComputeResourceResize(ctx, &state, &plan, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceCompute: Error resize Compute")
|
|
return
|
|
}
|
|
}
|
|
|
|
// Resize boot disk size if needed
|
|
if !plan.BootDiskSize.Equal(state.BootDiskSize) {
|
|
resp.Diagnostics.Append(utilities.ComputeResourceBootDiskResize(ctx, &state, &plan, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceCompute: Error resize boot disk")
|
|
return
|
|
}
|
|
}
|
|
|
|
// Extra disk(s) update if needed
|
|
if !plan.ExtraDisks.Equal(state.ExtraDisks) {
|
|
resp.Diagnostics.Append(utilities.ComputeResourceExtraDiskUpdate(ctx, &state, &plan, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceCompute: Error update extra disk list")
|
|
return
|
|
}
|
|
}
|
|
|
|
// Network(s) update if needed
|
|
if !plan.Network.Equal(state.Network) && !plan.Network.IsUnknown() {
|
|
resp.Diagnostics.Append(utilities.ComputeResourceNetworkUpdate(ctx, &state, &plan, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceCompute: Error update network(s)")
|
|
return
|
|
}
|
|
}
|
|
|
|
// PCI device(s) update if needed
|
|
if !plan.PCIDevices.Equal(state.PCIDevices) {
|
|
resp.Diagnostics.Append(utilities.ComputeResourcePCIDeviceUpdate(ctx, &state, &plan, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceCompute: Error update PCI device(s) list")
|
|
return
|
|
}
|
|
}
|
|
|
|
// Compute parameters update if needed
|
|
if (!plan.Description.IsUnknown() && !plan.Description.Equal(state.Description)) || !plan.Name.Equal(state.Name) ||
|
|
!plan.NumaAffinity.Equal(state.NumaAffinity) || !plan.CPUPin.Equal(state.CPUPin) || !plan.HPBacked.Equal(state.HPBacked) || (!plan.Chipset.IsUnknown() && !plan.Chipset.Equal(state.Chipset)) ||
|
|
!plan.AutoStartWithNode.Equal(state.AutoStartWithNode) {
|
|
resp.Diagnostics.Append(utilities.ComputeResourceComputeUpdate(ctx, &state, &plan, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceCompute: Error update compute parameters")
|
|
return
|
|
}
|
|
}
|
|
|
|
// Affinity label update if needed
|
|
if !plan.AffinityLabel.Equal(state.AffinityLabel) && !plan.AffinityLabel.IsUnknown() {
|
|
resp.Diagnostics.Append(utilities.ComputeResourceAffinityLabelUpdate(ctx, &plan, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceCompute: Error update affinity label")
|
|
return
|
|
}
|
|
}
|
|
|
|
// Affinity rules update if needed
|
|
if !plan.AffinityRules.Equal(state.AffinityRules) {
|
|
resp.Diagnostics.Append(utilities.ComputeResourceAffinityRulesUpdate(ctx, &state, &plan, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceCompute: Error update affinity rules")
|
|
return
|
|
}
|
|
}
|
|
|
|
// Anti affinity rules update if needed
|
|
if !plan.AntiAffinityRules.Equal(state.AntiAffinityRules) {
|
|
resp.Diagnostics.Append(utilities.ComputeResourceAntiAffinityRulesUpdate(ctx, &state, &plan, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceCompute: Error update anti affinity rules")
|
|
return
|
|
}
|
|
}
|
|
|
|
// Tags update if needed
|
|
if !plan.Tags.Equal(state.Tags) {
|
|
resp.Diagnostics.Append(utilities.ComputeResourceTagsUpdate(ctx, &state, &plan, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceCompute: Error update tags")
|
|
return
|
|
}
|
|
}
|
|
|
|
// Port forwarding update if needed
|
|
if !plan.PortForwarding.Equal(state.PortForwarding) {
|
|
resp.Diagnostics.Append(utilities.ComputeResourcePortForwardingUpdate(ctx, &state, &plan, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceCompute: Error update port forwarding rules")
|
|
return
|
|
}
|
|
}
|
|
|
|
// User access update if needed
|
|
if !plan.UserAccess.Equal(state.UserAccess) {
|
|
resp.Diagnostics.Append(utilities.ComputeResourceUserAccessUpdate(ctx, &state, &plan, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceCompute: Error update user(s) access rules")
|
|
return
|
|
}
|
|
}
|
|
|
|
// Snapshot update if needed
|
|
if !plan.Snapshot.Equal(state.Snapshot) {
|
|
resp.Diagnostics.Append(utilities.ComputeResourceSnapshotUpdate(ctx, &state, &plan, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceCompute: Error update snapshot(s)")
|
|
return
|
|
}
|
|
}
|
|
|
|
// Rollback if needed
|
|
if !plan.Rollback.Equal(state.Rollback) && !plan.Rollback.IsNull() {
|
|
resp.Diagnostics.Append(utilities.ComputeResourceRollback(ctx, &plan, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceCompute: Error rollback compute")
|
|
return
|
|
}
|
|
}
|
|
|
|
// Cd update if needed
|
|
if !plan.CD.Equal(state.CD) {
|
|
resp.Diagnostics.Append(utilities.ComputeResourceCDUpdate(ctx, &plan, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceCompute: Error update cd image update")
|
|
return
|
|
}
|
|
}
|
|
|
|
// pin to stack if needed
|
|
if !plan.PinToStack.Equal(state.PinToStack) {
|
|
resp.Diagnostics.Append(utilities.ComputeResourcePinToStackUpdate(ctx, &plan, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceCompute: Error pin/unpin to stack compute")
|
|
return
|
|
}
|
|
}
|
|
|
|
// compute pause if need
|
|
if !plan.Pause.Equal(state.Pause) && !plan.Pause.IsNull() {
|
|
resp.Diagnostics.Append(utilities.ComputeResourcePauseResumeCompute(ctx, &plan, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceCompute: Error pause/resume compute")
|
|
return
|
|
}
|
|
}
|
|
|
|
// reset compute if need
|
|
if !plan.Reset.Equal(state.Reset) && plan.Reset.ValueBool() {
|
|
resp.Diagnostics.Append(utilities.ComputeResourceResetCompute(ctx, &plan, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceCompute: Error reset compute")
|
|
return
|
|
}
|
|
}
|
|
|
|
// redeploy compute if need
|
|
if !plan.ImageID.Equal(state.ImageID) {
|
|
resp.Diagnostics.Append(utilities.ComputeResourceRedeploy(ctx, &plan, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceCompute: Error redeploy compute")
|
|
return
|
|
}
|
|
}
|
|
|
|
// custom fields update if needed
|
|
if !plan.CustomFields.Equal(state.CustomFields) && !plan.CustomFields.IsUnknown() {
|
|
resp.Diagnostics.Append(utilities.ComputeResourceCustomFieldUpdate(ctx, &plan, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Update resourceCompute: Error update custom fields")
|
|
return
|
|
}
|
|
}
|
|
|
|
tflog.Info(ctx, "Update resourceCompute: resource update is completed", map[string]any{"ID": plan.ID.ValueString()})
|
|
|
|
// Map response body to schema and populate Computed attribute values
|
|
resp.Diagnostics.Append(flattens.ComputeResource(ctx, &plan, r.client)...)
|
|
if resp.Diagnostics.HasError() {
|
|
return
|
|
}
|
|
|
|
// Set state to fully populated data
|
|
resp.Diagnostics.Append(resp.State.Set(ctx, plan)...)
|
|
if resp.Diagnostics.HasError() {
|
|
return
|
|
}
|
|
}
|
|
|
|
func (r *resourceCompute) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
|
|
// Get current state
|
|
var state models.ResourceComputeModel
|
|
resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Delete resourceCompute: Error get state")
|
|
return
|
|
}
|
|
tflog.Info(ctx, "Delete resourceCompute: got state successfully", map[string]any{"ID": state.ID.ValueString()})
|
|
|
|
// Set timeouts
|
|
readTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout300s)
|
|
resp.Diagnostics.Append(diags...)
|
|
if resp.Diagnostics.HasError() {
|
|
tflog.Error(ctx, "Delete resourceCompute: Error set timeout")
|
|
return
|
|
}
|
|
tflog.Info(ctx, "Delete resourceCompute: set timeouts successfully", map[string]any{
|
|
"id": state.ID.ValueString(),
|
|
"deleteTimeout": readTimeout})
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, readTimeout)
|
|
defer cancel()
|
|
|
|
// Delete existing Compute
|
|
delReq := compute.DeleteRequest{
|
|
ComputeID: uint64(state.ComputeId.ValueInt64()),
|
|
Permanently: state.Permanently.ValueBool(),
|
|
DetachDisks: state.DetachDisks.ValueBool(),
|
|
}
|
|
|
|
tflog.Info(ctx, "Delete resourceCompute: calling CloudAPI().Compute().Delete", map[string]any{
|
|
"ID": state.ID.ValueString(),
|
|
"req": delReq,
|
|
})
|
|
_, err := r.client.CloudAPI().Compute().Delete(ctx, delReq)
|
|
if err != nil {
|
|
resp.Diagnostics.AddError("Delete resourceCompute: Error deleting", err.Error())
|
|
return
|
|
}
|
|
|
|
tflog.Info(ctx, "End delete resource Compute ", map[string]any{"id": state.ID.ValueString()})
|
|
}
|
|
|
|
func (r *resourceCompute) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
|
|
resp.Schema = schema.Schema{
|
|
Attributes: schemas.MakeSchemaResourceCompute(),
|
|
Blocks: map[string]schema.Block{
|
|
"timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}),
|
|
},
|
|
}
|
|
}
|
|
|
|
func (r *resourceCompute) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
|
|
resp.TypeName = req.ProviderTypeName + "_kvmvm"
|
|
}
|
|
|
|
// Configure adds the provider configured client to the resource.
|
|
func (r *resourceCompute) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
|
|
tflog.Info(ctx, "Get Configure resourceCompute")
|
|
r.client = client.Resource(ctx, &req, resp)
|
|
tflog.Info(ctx, "Getting Configure resourceCompute successfully")
|
|
}
|
|
|
|
func (r *resourceCompute) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
|
|
// Retrieve import ID and save to id attribute
|
|
resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp)
|
|
}
|