1.2.1
This commit is contained in:
@@ -50,7 +50,9 @@ func DataSourceCompute(ctx context.Context, state *models.RecordComputeModel, c
|
||||
AutoStartWithNode: types.BoolValue(computeRecord.AutoStart),
|
||||
Chipset: types.StringValue(computeRecord.Chipset),
|
||||
BootDiskSize: types.Int64Value(int64(computeRecord.BootDiskSize)),
|
||||
BootOrder: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &computeRecord.BootOrder),
|
||||
CdImageId: types.Int64Value(int64(computeRecord.CdImageId)),
|
||||
Clones: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, &computeRecord.Clones),
|
||||
CloneReference: types.Int64Value(int64(computeRecord.CloneReference)),
|
||||
ComputeCIID: types.Int64Value(int64(computeRecord.ComputeCIID)),
|
||||
CPU: types.Int64Value(int64(computeRecord.CPU)),
|
||||
@@ -86,11 +88,13 @@ func DataSourceCompute(ctx context.Context, state *models.RecordComputeModel, c
|
||||
NumaNodeId: types.Int64Value(int64(computeRecord.NumaNodeId)),
|
||||
OSUsers: flattenOSUsers(ctx, &computeRecord.OSUsers),
|
||||
Pinned: types.BoolValue(computeRecord.Pinned),
|
||||
PreferredCPU: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, &computeRecord.PreferredCPU),
|
||||
PCIDevices: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, flattenPCI(ctx, pciDevicesList)),
|
||||
RAM: types.Int64Value(int64(computeRecord.RAM)),
|
||||
ReferenceID: types.StringValue(computeRecord.ReferenceID),
|
||||
Registered: types.BoolValue(computeRecord.Registered),
|
||||
ResName: types.StringValue(computeRecord.ResName),
|
||||
ReservedNodeCpus: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, &computeRecord.ReservedNodeCpus),
|
||||
RGID: types.Int64Value(int64(computeRecord.RGID)),
|
||||
RGName: types.StringValue(computeRecord.RGName),
|
||||
SnapSets: flattenSnapSet(ctx, &computeRecord.SnapSets),
|
||||
@@ -105,27 +109,14 @@ func DataSourceCompute(ctx context.Context, state *models.RecordComputeModel, c
|
||||
VirtualImageID: types.Int64Value(int64(computeRecord.VirtualImageID)),
|
||||
VirtualImageName: types.StringValue(computeRecord.VirtualImageName),
|
||||
VNCPassword: types.StringValue(computeRecord.VNCPassword),
|
||||
VGPUs: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, &computeRecord.VGPUs),
|
||||
}
|
||||
state.BootOrder, diags = types.ListValueFrom(ctx, types.StringType, computeRecord.BootOrder)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error FlattenDataSourceCompute", diags))
|
||||
}
|
||||
state.Clones, diags = types.ListValueFrom(ctx, types.Int64Type, computeRecord.Clones)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error FlattenDataSourceCompute", diags))
|
||||
}
|
||||
|
||||
state.Tags, diags = types.MapValueFrom(ctx, types.StringType, computeRecord.Tags)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error FlattenDataSourceCompute", diags))
|
||||
}
|
||||
state.ReservedNodeCpus, diags = types.ListValueFrom(ctx, types.Int64Type, computeRecord.ReservedNodeCpus)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error FlattenDataSourceCompute", diags))
|
||||
}
|
||||
state.VGPUs, diags = types.ListValueFrom(ctx, types.Int64Type, computeRecord.VGPUs)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error FlattenDataSourceCompute", diags))
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "End FlattenDataSourceCompute")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/utilities"
|
||||
)
|
||||
@@ -74,7 +75,9 @@ func flattenItemsList(ctx context.Context, computes *compute.ListComputes) []mod
|
||||
Architecture: types.StringValue(item.Architecture),
|
||||
AutoStartWithNode: types.BoolValue(item.AutoStart),
|
||||
BootDiskSize: types.Int64Value(int64(item.BootDiskSize)),
|
||||
BootOrder: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &item.BootOrder),
|
||||
CdImageId: types.Int64Value(int64(item.CdImageId)),
|
||||
Clones: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, &item.Clones),
|
||||
CloneReference: types.Int64Value(int64(item.CloneReference)),
|
||||
Chipset: types.StringValue(item.Chipset),
|
||||
ComputeCIID: types.Int64Value(int64(item.ComputeCIID)),
|
||||
@@ -105,10 +108,12 @@ func flattenItemsList(ctx context.Context, computes *compute.ListComputes) []mod
|
||||
NumaAffinity: types.StringValue(item.NumaAffinity),
|
||||
NumaNodeId: types.Int64Value(int64(item.NumaNodeId)),
|
||||
Pinned: types.BoolValue(item.Pinned),
|
||||
PreferredCPU: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, &item.PreferredCPU),
|
||||
RAM: types.Int64Value(int64(item.RAM)),
|
||||
ReferenceID: types.StringValue(item.ReferenceID),
|
||||
Registered: types.BoolValue(item.Registered),
|
||||
ResName: types.StringValue(item.ResName),
|
||||
ReservedNodeCpus: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, &item.ReservedNodeCpus),
|
||||
RGID: types.Int64Value(int64(item.RGID)),
|
||||
RGName: types.StringValue(item.RGName),
|
||||
SnapSets: flattenSnapSetInList(ctx, &item.SnapSets),
|
||||
@@ -122,28 +127,13 @@ func flattenItemsList(ctx context.Context, computes *compute.ListComputes) []mod
|
||||
UserManaged: types.BoolValue(item.UserManaged),
|
||||
VINSConnected: types.Int64Value(int64(item.VINSConnected)),
|
||||
VirtualImageID: types.Int64Value(int64(item.VirtualImageID)),
|
||||
VGPUs: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, &item.VGPUs),
|
||||
}
|
||||
|
||||
temp.BootOrder, diags = types.ListValueFrom(ctx, types.StringType, item.BootOrder)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattenItemsList", diags))
|
||||
}
|
||||
temp.Clones, diags = types.ListValueFrom(ctx, types.Int64Type, item.Clones)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattenItemsList", diags))
|
||||
}
|
||||
temp.Tags, diags = types.MapValueFrom(ctx, types.StringType, item.Tags)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattenItemsList", diags))
|
||||
}
|
||||
temp.ReservedNodeCpus, diags = types.ListValueFrom(ctx, types.Int64Type, item.ReservedNodeCpus)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattenItemsList", diags))
|
||||
}
|
||||
temp.VGPUs, diags = types.ListValueFrom(ctx, types.Int64Type, item.VGPUs)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattenItemsList", diags))
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/utilities"
|
||||
)
|
||||
@@ -72,7 +73,9 @@ func flattenItemsListDeleted(ctx context.Context, computes *compute.ListComputes
|
||||
Architecture: types.StringValue(item.Architecture),
|
||||
AutoStartWithNode: types.BoolValue(item.AutoStart),
|
||||
BootDiskSize: types.Int64Value(int64(item.BootDiskSize)),
|
||||
BootOrder: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &item.BootOrder),
|
||||
CdImageId: types.Int64Value(int64(item.CdImageId)),
|
||||
Clones: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, &item.Clones),
|
||||
Chipset: types.StringValue(item.Chipset),
|
||||
CloneReference: types.Int64Value(int64(item.CloneReference)),
|
||||
ComputeCIID: types.Int64Value(int64(item.ComputeCIID)),
|
||||
@@ -103,10 +106,12 @@ func flattenItemsListDeleted(ctx context.Context, computes *compute.ListComputes
|
||||
NumaAffinity: types.StringValue(item.NumaAffinity),
|
||||
NumaNodeId: types.Int64Value(int64(item.NumaNodeId)),
|
||||
Pinned: types.BoolValue(item.Pinned),
|
||||
PreferredCPU: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, &item.PreferredCPU),
|
||||
RAM: types.Int64Value(int64(item.RAM)),
|
||||
ReferenceID: types.StringValue(item.ReferenceID),
|
||||
Registered: types.BoolValue(item.Registered),
|
||||
ResName: types.StringValue(item.ResName),
|
||||
ReservedNodeCpus: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, &item.ReservedNodeCpus),
|
||||
RGID: types.Int64Value(int64(item.RGID)),
|
||||
RGName: types.StringValue(item.RGName),
|
||||
SnapSets: flattenSnapSetInListDeleted(ctx, &item.SnapSets),
|
||||
@@ -120,28 +125,13 @@ func flattenItemsListDeleted(ctx context.Context, computes *compute.ListComputes
|
||||
UserManaged: types.BoolValue(item.UserManaged),
|
||||
VINSConnected: types.Int64Value(int64(item.VINSConnected)),
|
||||
VirtualImageID: types.Int64Value(int64(item.VirtualImageID)),
|
||||
VGPUs: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, &item.VGPUs),
|
||||
}
|
||||
|
||||
temp.BootOrder, diags = types.ListValueFrom(ctx, types.StringType, item.BootOrder)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattenItemsListDeleted", diags))
|
||||
}
|
||||
temp.Clones, diags = types.ListValueFrom(ctx, types.Int64Type, item.Clones)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattenItemsListDeleted", diags))
|
||||
}
|
||||
temp.Tags, diags = types.MapValueFrom(ctx, types.StringType, item.Tags)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattenItemsListDeleted", diags))
|
||||
}
|
||||
temp.ReservedNodeCpus, diags = types.ListValueFrom(ctx, types.Int64Type, item.ReservedNodeCpus)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattenItemsList", diags))
|
||||
}
|
||||
temp.VGPUs, diags = types.ListValueFrom(ctx, types.Int64Type, item.VGPUs)
|
||||
if diags != nil {
|
||||
tflog.Error(ctx, fmt.Sprint("Error flattenItemsListDeleted", diags))
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
|
||||
|
||||
@@ -121,6 +121,7 @@ func ComputeResource(ctx context.Context, plan *models.ResourceComputeModel, c *
|
||||
NumaNodeId: types.Int64Value(int64(recordItemCompute.NumaNodeId)),
|
||||
OSUsers: flattenResourceOSUsers(ctx, &recordItemCompute.OSUsers),
|
||||
Pinned: types.BoolValue(recordItemCompute.Pinned),
|
||||
PreferredCPU: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, recordItemCompute.PreferredCPU),
|
||||
ReferenceID: types.StringValue(recordItemCompute.ReferenceID),
|
||||
Registered: types.BoolValue(recordItemCompute.Registered),
|
||||
ResName: types.StringValue(recordItemCompute.ResName),
|
||||
|
||||
@@ -61,6 +61,7 @@ type RecordComputeModel struct {
|
||||
OSUsers []ItemOSUserModel `tfsdk:"os_users"`
|
||||
PCIDevices types.List `tfsdk:"pci_devices"`
|
||||
Pinned types.Bool `tfsdk:"pinned"`
|
||||
PreferredCPU types.List `tfsdk:"preferred_cpu"`
|
||||
RAM types.Int64 `tfsdk:"ram"`
|
||||
ReferenceID types.String `tfsdk:"reference_id"`
|
||||
Registered types.Bool `tfsdk:"registered"`
|
||||
|
||||
@@ -73,6 +73,7 @@ type ItemComputeModel struct {
|
||||
NumaAffinity types.String `tfsdk:"numa_affinity"`
|
||||
NumaNodeId types.Int64 `tfsdk:"numa_node_id"`
|
||||
Pinned types.Bool `tfsdk:"pinned"`
|
||||
PreferredCPU types.List `tfsdk:"preferred_cpu"`
|
||||
RAM types.Int64 `tfsdk:"ram"`
|
||||
ReferenceID types.String `tfsdk:"reference_id"`
|
||||
Registered types.Bool `tfsdk:"registered"`
|
||||
|
||||
@@ -71,6 +71,7 @@ type ItemListDeletedComputeModel struct {
|
||||
NumaAffinity types.String `tfsdk:"numa_affinity"`
|
||||
NumaNodeId types.Int64 `tfsdk:"numa_node_id"`
|
||||
Pinned types.Bool `tfsdk:"pinned"`
|
||||
PreferredCPU types.List `tfsdk:"preferred_cpu"`
|
||||
RAM types.Int64 `tfsdk:"ram"`
|
||||
ReferenceID types.String `tfsdk:"reference_id"`
|
||||
Registered types.Bool `tfsdk:"registered"`
|
||||
|
||||
@@ -38,6 +38,7 @@ type ResourceComputeModel struct {
|
||||
Rollback types.Object `tfsdk:"rollback"`
|
||||
CD types.Object `tfsdk:"cd"`
|
||||
PinToStack types.Bool `tfsdk:"pin_to_stack"`
|
||||
PreferredCPU types.List `tfsdk:"preferred_cpu"`
|
||||
Description types.String `tfsdk:"description"`
|
||||
CloudInit types.String `tfsdk:"cloud_init"`
|
||||
Enabled types.Bool `tfsdk:"enabled"`
|
||||
|
||||
@@ -94,6 +94,11 @@ func (r *resourceCompute) Create(ctx context.Context, req resource.CreateRequest
|
||||
// because additional settings failure is not critical. If errors were added instead of warnings, terraform
|
||||
// framework would mark resource as tainted and delete it, which would be unwanted behaviour.
|
||||
|
||||
// pin to stack if needed, warnings added to resp.Diagnostics in case of failure.
|
||||
if plan.PinToStack.ValueBool() {
|
||||
resp.Diagnostics.Append(utilities.ComputeResourcePinToStack(ctx, &plan, r.client)...)
|
||||
}
|
||||
|
||||
// enable or disable Compute, warnings added to resp.Diagnostics in case of failure.
|
||||
resp.Diagnostics.Append(utilities.ComputeResourceEnableDisable(ctx, &plan, r.client)...)
|
||||
|
||||
@@ -146,11 +151,6 @@ func (r *resourceCompute) Create(ctx context.Context, req resource.CreateRequest
|
||||
resp.Diagnostics.Append(utilities.ComputeResourcePCIDevice(ctx, &plan, r.client)...)
|
||||
}
|
||||
|
||||
// pin to stack if needed, warnings added to resp.Diagnostics in case of failure.
|
||||
if plan.PinToStack.ValueBool() {
|
||||
resp.Diagnostics.Append(utilities.ComputeResourcePinToStack(ctx, &plan, r.client)...)
|
||||
}
|
||||
|
||||
// set auto start_w_node if pin_to_stack == false
|
||||
if !plan.PinToStack.ValueBool() && plan.AutoStartWithNode.ValueBool() {
|
||||
resp.Diagnostics.Append(utilities.ComputeResourceAutoStartWithNode(ctx, &plan, r.client)...)
|
||||
@@ -319,15 +319,6 @@ func (r *resourceCompute) Update(ctx context.Context, req resource.UpdateRequest
|
||||
}
|
||||
}
|
||||
|
||||
// Network(s) update if needed
|
||||
if !plan.Network.Equal(state.Network) && !plan.Network.IsUnknown() {
|
||||
resp.Diagnostics.Append(utilities.ComputeResourceNetworkUpdate(ctx, &state, &plan, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Update resourceCompute: Error update network(s)")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// PCI device(s) update if needed
|
||||
if !plan.PCIDevices.Equal(state.PCIDevices) {
|
||||
resp.Diagnostics.Append(utilities.ComputeResourcePCIDeviceUpdate(ctx, &state, &plan, r.client)...)
|
||||
@@ -337,10 +328,19 @@ func (r *resourceCompute) Update(ctx context.Context, req resource.UpdateRequest
|
||||
}
|
||||
}
|
||||
|
||||
// pin to stack if needed
|
||||
if !plan.PinToStack.Equal(state.PinToStack) {
|
||||
resp.Diagnostics.Append(utilities.ComputeResourcePinToStackUpdate(ctx, &plan, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Update resourceCompute: Error pin/unpin to stack compute")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Compute parameters update if needed
|
||||
if (!plan.Description.IsUnknown() && !plan.Description.Equal(state.Description)) || !plan.Name.Equal(state.Name) ||
|
||||
!plan.NumaAffinity.Equal(state.NumaAffinity) || !plan.CPUPin.Equal(state.CPUPin) || !plan.HPBacked.Equal(state.HPBacked) || (!plan.Chipset.IsUnknown() && !plan.Chipset.Equal(state.Chipset)) ||
|
||||
!plan.AutoStartWithNode.Equal(state.AutoStartWithNode) {
|
||||
!plan.AutoStartWithNode.Equal(state.AutoStartWithNode) || (!plan.PreferredCPU.IsUnknown() && !plan.PreferredCPU.Equal(state.Description)) {
|
||||
resp.Diagnostics.Append(utilities.ComputeResourceComputeUpdate(ctx, &state, &plan, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Update resourceCompute: Error update compute parameters")
|
||||
@@ -348,6 +348,15 @@ func (r *resourceCompute) Update(ctx context.Context, req resource.UpdateRequest
|
||||
}
|
||||
}
|
||||
|
||||
// Network(s) update if needed
|
||||
if !plan.Network.Equal(state.Network) && !plan.Network.IsUnknown() {
|
||||
resp.Diagnostics.Append(utilities.ComputeResourceNetworkUpdate(ctx, &state, &plan, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Update resourceCompute: Error update network(s)")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Affinity label update if needed
|
||||
if !plan.AffinityLabel.Equal(state.AffinityLabel) && !plan.AffinityLabel.IsUnknown() {
|
||||
resp.Diagnostics.Append(utilities.ComputeResourceAffinityLabelUpdate(ctx, &plan, r.client)...)
|
||||
@@ -429,15 +438,6 @@ func (r *resourceCompute) Update(ctx context.Context, req resource.UpdateRequest
|
||||
}
|
||||
}
|
||||
|
||||
// pin to stack if needed
|
||||
if !plan.PinToStack.Equal(state.PinToStack) {
|
||||
resp.Diagnostics.Append(utilities.ComputeResourcePinToStackUpdate(ctx, &plan, r.client)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
tflog.Error(ctx, "Update resourceCompute: Error pin/unpin to stack compute")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// compute pause if need
|
||||
if !plan.Pause.Equal(state.Pause) && !plan.Pause.IsNull() {
|
||||
resp.Diagnostics.Append(utilities.ComputeResourcePauseResumeCompute(ctx, &plan, r.client)...)
|
||||
|
||||
@@ -631,6 +631,10 @@ func MakeSchemaDataSourceCompute() map[string]schema.Attribute {
|
||||
"pinned": schema.BoolAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"preferred_cpu": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
"ram": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
@@ -391,6 +391,10 @@ func MakeSchemaDataSourceComputeList() map[string]schema.Attribute {
|
||||
"pinned": schema.BoolAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"preferred_cpu": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
"ram": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
@@ -383,6 +383,10 @@ func MakeSchemaDataSourceComputeListDeleted() map[string]schema.Attribute {
|
||||
"pinned": schema.BoolAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"preferred_cpu": schema.ListAttribute{
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
},
|
||||
"ram": schema.Int64Attribute{
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
@@ -390,6 +390,12 @@ func MakeSchemaResourceCompute() map[string]schema.Attribute {
|
||||
Default: booldefault.StaticBool(false),
|
||||
Description: "Use Huge Pages to allocate RAM of the virtual machine. The system must be pre-configured by allocating Huge Pages on the physical node.",
|
||||
},
|
||||
"preferred_cpu": schema.ListAttribute{
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ElementType: types.Int64Type,
|
||||
Description: "Recommended isolated CPUs. Field is ignored if compute.cpupin=False or compute.pinned=False",
|
||||
},
|
||||
"pci_devices": schema.SetAttribute{
|
||||
Optional: true,
|
||||
ElementType: types.Int64Type,
|
||||
|
||||
@@ -191,6 +191,15 @@ func CreateResourceCompute(ctx context.Context, plan *models.ResourceComputeMode
|
||||
createReqX86.HPBacked = true
|
||||
}
|
||||
|
||||
if !plan.PreferredCPU.IsUnknown() {
|
||||
preferredCPUList := plan.PreferredCPU.Elements()
|
||||
preferredCPU := make([]int64, 0, len(preferredCPUList))
|
||||
for _, elem := range preferredCPUList {
|
||||
preferredCPU = append(preferredCPU, elem.(types.Int64).ValueInt64())
|
||||
}
|
||||
createReqX86.PreferredCPU = preferredCPU
|
||||
}
|
||||
|
||||
tflog.Info(ctx, fmt.Sprintf("CreateResourceCompute: creating Compute of type KVM VM x86"))
|
||||
id, err := c.CloudAPI().KVMX86().Create(ctx, createReqX86)
|
||||
if err != nil {
|
||||
|
||||
@@ -47,6 +47,20 @@ func ComputeResourceResize(ctx context.Context, state *models.ResourceComputeMod
|
||||
resizeReq.CPU = uint64(plan.CPU.ValueInt64())
|
||||
}
|
||||
|
||||
if resizeReq.CPU != 0 {
|
||||
if !plan.PreferredCPU.IsUnknown() {
|
||||
preferredCPUList := plan.PreferredCPU.Elements()
|
||||
preferredCPU := make([]int64, 0, len(preferredCPUList))
|
||||
for _, elem := range preferredCPUList {
|
||||
preferredCPU = append(preferredCPU, elem.(types.Int64).ValueInt64())
|
||||
}
|
||||
resizeReq.PreferredCPU = preferredCPU
|
||||
}
|
||||
if len(state.PreferredCPU.Elements()) != 0 && len(resizeReq.PreferredCPU) == 0 {
|
||||
resizeReq.PreferredCPU = []int64{-1}
|
||||
}
|
||||
}
|
||||
|
||||
if !plan.RAM.Equal(state.RAM) {
|
||||
resizeReq.RAM = uint64(plan.RAM.ValueInt64())
|
||||
}
|
||||
@@ -288,7 +302,7 @@ func ComputeResourceNetworkUpdate(ctx context.Context, state *models.ResourceCom
|
||||
|
||||
needStart := false
|
||||
// need stop to attach first network
|
||||
if len(detachMap) == len(state.Network.Elements()) || (len(state.Network.Elements()) < 1) && len(attachMap) > 0 || hasDPDKnetwork(attachMap) {
|
||||
if len(detachMap) == len(state.Network.Elements()) || (len(state.Network.Elements()) < 1) && len(attachMap) > 0 {
|
||||
tflog.Info(ctx, "ComputeResourceNetworkUpdate: stop compute", map[string]any{"compute_id": computeId})
|
||||
_, err = c.CloudAPI().Compute().Stop(ctx, compute.StopRequest{ComputeID: computeId})
|
||||
if err != nil {
|
||||
@@ -336,6 +350,10 @@ func ComputeResourceNetworkUpdate(ctx context.Context, state *models.ResourceCom
|
||||
}
|
||||
}
|
||||
|
||||
if diags.HasError() {
|
||||
return diags
|
||||
}
|
||||
|
||||
if needStart {
|
||||
diags = ComputeResourceStartStop(ctx, plan, c)
|
||||
}
|
||||
@@ -349,15 +367,6 @@ func ComputeResourceNetworkUpdate(ctx context.Context, state *models.ResourceCom
|
||||
return nil
|
||||
}
|
||||
|
||||
func hasDPDKnetwork(networkAttachMap []map[string]attr.Value) bool {
|
||||
for _, elem := range networkAttachMap {
|
||||
if elem["net_type"].(types.String).ValueString() == "DPDK" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func ComputeResourceComputeUpdate(ctx context.Context, state *models.ResourceComputeModel, plan *models.ResourceComputeModel, c *client.Client) diag.Diagnostics {
|
||||
tflog.Info(ctx, "ComputeResourceComputeUpdate: start update compute parameters", map[string]any{"compute_id": plan.ID.ValueString()})
|
||||
diags := diag.Diagnostics{}
|
||||
@@ -392,11 +401,23 @@ func ComputeResourceComputeUpdate(ctx context.Context, state *models.ResourceCom
|
||||
req.Chipset = plan.Chipset.ValueString()
|
||||
}
|
||||
|
||||
if !plan.PreferredCPU.IsUnknown() {
|
||||
preferredCPUList := plan.PreferredCPU.Elements()
|
||||
preferredCPU := make([]int64, 0, len(preferredCPUList))
|
||||
for _, elem := range preferredCPUList {
|
||||
preferredCPU = append(preferredCPU, elem.(types.Int64).ValueInt64())
|
||||
}
|
||||
req.PreferredCPU = preferredCPU
|
||||
}
|
||||
if len(state.PreferredCPU.Elements()) != 0 && len(req.PreferredCPU) == 0 {
|
||||
req.PreferredCPU = []int64{-1}
|
||||
}
|
||||
|
||||
// Note bene: numa_affinity, cpu_pin and hp_backed are not allowed to be changed for compute in STARTED tech status.
|
||||
// If STARTED, we need to stop it before update
|
||||
|
||||
var isStopRequred bool
|
||||
if !plan.NumaAffinity.Equal(state.NumaAffinity) || !plan.CPUPin.Equal(state.CPUPin) || !plan.HPBacked.Equal(state.HPBacked) || !plan.Chipset.Equal(state.Chipset) {
|
||||
if !plan.NumaAffinity.Equal(state.NumaAffinity) || !plan.CPUPin.Equal(state.CPUPin) || !plan.HPBacked.Equal(state.HPBacked) || !plan.Chipset.Equal(state.Chipset) || (len(req.PreferredCPU) != 0) {
|
||||
isStopRequred = true
|
||||
tflog.Info(ctx, "ComputeResourceComputeUpdate: stop compute", map[string]any{"compute_id": computeId})
|
||||
_, err = c.CloudAPI().Compute().Stop(ctx, compute.StopRequest{ComputeID: computeId})
|
||||
|
||||
Reference in New Issue
Block a user