4.6.0
This commit is contained in:
@@ -33,7 +33,6 @@ package kvmvm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
// "net/url"
|
||||
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
|
||||
@@ -50,8 +49,10 @@ func dataSourceComputeRead(ctx context.Context, d *schema.ResourceData, m interf
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
d.SetId(strconv.Itoa(int(compFacts.ID)))
|
||||
flattenDataCompute(d, compFacts)
|
||||
if err = flattenDataCompute(d, compFacts); err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
package kvmvm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens"
|
||||
)
|
||||
|
||||
func flattenCompute(ctx context.Context, d *schema.ResourceData, computeRec *compute.RecordCompute) error {
|
||||
func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute) error {
|
||||
log.Debugf("flattenCompute: ID %d, RG ID %d", computeRec.ID, computeRec.RGID)
|
||||
|
||||
customFields, _ := json.Marshal(computeRec.CustomFields)
|
||||
@@ -49,7 +49,7 @@ func flattenCompute(ctx context.Context, d *schema.ResourceData, computeRec *com
|
||||
d.Set("deleted_time", computeRec.DeletedTime)
|
||||
d.Set("description", computeRec.Description)
|
||||
d.Set("devices", string(devices))
|
||||
err := d.Set("disks", flattenComputeDisks(ctx, d, computeRec.Disks, d.Get("extra_disks").(*schema.Set).List(), bootDisk.ID))
|
||||
err := d.Set("disks", flattenComputeDisks(computeRec.Disks, d.Get("disks").([]interface{}), d.Get("extra_disks").(*schema.Set).List(), bootDisk.ID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -58,18 +58,26 @@ func flattenCompute(ctx context.Context, d *schema.ResourceData, computeRec *com
|
||||
d.Set("guid", computeRec.GUID)
|
||||
d.Set("compute_id", computeRec.ID)
|
||||
d.Set("image_id", computeRec.ImageID)
|
||||
d.Set("image_name", computeRec.ImageName)
|
||||
d.Set("interfaces", flattenInterfaces(computeRec.Interfaces))
|
||||
d.Set("lock_status", computeRec.LockStatus)
|
||||
d.Set("manager_id", computeRec.ManagerID)
|
||||
d.Set("manager_type", computeRec.ManagerType)
|
||||
d.Set("migrationjob", computeRec.MigrationJob)
|
||||
d.Set("milestones", computeRec.Milestones)
|
||||
d.Set("natable_vins_id", computeRec.NatableVINSID)
|
||||
d.Set("natable_vins_ip", computeRec.NatableVINSIP)
|
||||
d.Set("natable_vins_name", computeRec.NatableVINSName)
|
||||
d.Set("natable_vins_network", computeRec.NatableVINSNetwork)
|
||||
d.Set("natable_vins_network_name", computeRec.NatableVINSNetworkName)
|
||||
d.Set("need_reboot", computeRec.NeedReboot)
|
||||
d.Set("numa_node_id", computeRec.NumaNodeId)
|
||||
d.Set("os_users", flattenOSUsers(computeRec.OSUsers))
|
||||
d.Set("pinned", computeRec.Pinned)
|
||||
d.Set("reference_id", computeRec.ReferenceID)
|
||||
d.Set("registered", computeRec.Registered)
|
||||
d.Set("res_name", computeRec.ResName)
|
||||
d.Set("reserved_node_cpus", computeRec.ReservedNodeCpus)
|
||||
d.Set("rg_name", computeRec.RGName)
|
||||
d.Set("snap_sets", flattenSnapSets(computeRec.SnapSets))
|
||||
d.Set("stack_id", computeRec.StackID)
|
||||
@@ -85,7 +93,7 @@ func flattenCompute(ctx context.Context, d *schema.ResourceData, computeRec *com
|
||||
d.Set("user_managed", computeRec.UserManaged)
|
||||
d.Set("vgpus", computeRec.VGPUs)
|
||||
d.Set("virtual_image_id", computeRec.VirtualImageID)
|
||||
|
||||
d.Set("virtual_image_name", computeRec.VirtualImageName)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -140,6 +148,7 @@ func flattenInterfaces(ifaces compute.ListInterfaces) []map[string]interface{} {
|
||||
"conn_id": iface.ConnID,
|
||||
"conn_type": iface.ConnType,
|
||||
"def_gw": iface.DefGW,
|
||||
"enabled": iface.Enabled,
|
||||
"flip_group_id": iface.FLIPGroupID,
|
||||
"guid": iface.GUID,
|
||||
"ip_address": iface.IPAddress,
|
||||
@@ -149,6 +158,7 @@ func flattenInterfaces(ifaces compute.ListInterfaces) []map[string]interface{} {
|
||||
"net_id": iface.NetID,
|
||||
"netmask": iface.NetMask,
|
||||
"net_type": iface.NetType,
|
||||
"node_id": iface.NodeID,
|
||||
"pci_slot": iface.PCISlot,
|
||||
"qos": flattenQOS(iface.QOS),
|
||||
"target": iface.Target,
|
||||
@@ -171,20 +181,30 @@ func flattenQOS(qos compute.QOS) []map[string]interface{} {
|
||||
}
|
||||
}
|
||||
|
||||
func flattenComputeDisks(ctx context.Context, d *schema.ResourceData, disksList compute.ListDisks, extraDisks []interface{}, bootDiskId uint64) []map[string]interface{} {
|
||||
func flattenComputeDisks(disksList compute.ListDisks, disksBlocks, extraDisks []interface{}, bootDiskId uint64) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0, len(disksList))
|
||||
|
||||
if len(disksBlocks) == 0 {
|
||||
return res
|
||||
}
|
||||
|
||||
sort.Slice(disksList, func(i, j int) bool {
|
||||
return disksList[i].ID < disksList[j].ID
|
||||
})
|
||||
|
||||
indexDataDisks := 0
|
||||
|
||||
for _, disk := range disksList {
|
||||
if disk.ID == bootDiskId || findInExtraDisks(uint(disk.ID), extraDisks) { //skip main bootdisk and extraDisks
|
||||
continue
|
||||
}
|
||||
|
||||
permanently, ok := ctx.Value(DiskKey(strconv.Itoa(int(disk.ID)))).(bool) // get permamently from Create or Update context
|
||||
if !ok {
|
||||
permanently = getPermanentlyByDiskID(d, disk.ID) // get permanently from state when Read is not after Create/Update
|
||||
}
|
||||
pernamentlyValue := disksBlocks[indexDataDisks].(map[string]interface{})["permanently"].(bool)
|
||||
nodeIds := disksBlocks[indexDataDisks].(map[string]interface{})["node_ids"].(*schema.Set)
|
||||
|
||||
temp := map[string]interface{}{
|
||||
"disk_name": disk.Name,
|
||||
"node_ids": nodeIds,
|
||||
"size": disk.SizeMax,
|
||||
"sep_id": disk.SEPID,
|
||||
"disk_type": disk.Type,
|
||||
@@ -195,31 +215,15 @@ func flattenComputeDisks(ctx context.Context, d *schema.ResourceData, disksList
|
||||
"shareable": disk.Shareable,
|
||||
"size_used": disk.SizeUsed,
|
||||
"size_max": disk.SizeMax,
|
||||
"permanently": permanently,
|
||||
"permanently": pernamentlyValue,
|
||||
}
|
||||
res = append(res, temp)
|
||||
indexDataDisks++
|
||||
}
|
||||
sort.Slice(res, func(i, j int) bool {
|
||||
return res[i]["disk_id"].(uint64) < res[j]["disk_id"].(uint64)
|
||||
})
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
// getPermanentlyByDiskID gets permanently value of specific disk (by diskId) from disks current state
|
||||
func getPermanentlyByDiskID(d *schema.ResourceData, diskId uint64) bool {
|
||||
disks := d.Get("disks").([]interface{})
|
||||
|
||||
for _, diskItem := range disks {
|
||||
disk := diskItem.(map[string]interface{})
|
||||
if uint64(disk["disk_id"].(int)) == diskId {
|
||||
return disk["permanently"].(bool)
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("getPermanentlyByDiskID: disk with id %d not found in state", diskId)
|
||||
return false
|
||||
}
|
||||
|
||||
func findInExtraDisks(diskId uint, extraDisks []interface{}) bool {
|
||||
for _, ExtraDisk := range extraDisks {
|
||||
if diskId == uint(ExtraDisk.(int)) {
|
||||
@@ -262,7 +266,7 @@ func flattenComputeList(computes *compute.ListComputes) []map[string]interface{}
|
||||
"arch": computeItem.Arch,
|
||||
"cd_image_id": computeItem.CdImageId,
|
||||
"boot_order": computeItem.BootOrder,
|
||||
"boot_disk_size": computeItem.BootDiskSize,
|
||||
"bootdisk_size": computeItem.BootDiskSize,
|
||||
"clone_reference": computeItem.CloneReference,
|
||||
"clones": computeItem.Clones,
|
||||
"computeci_id": computeItem.ComputeCIID,
|
||||
@@ -278,7 +282,9 @@ func flattenComputeList(computes *compute.ListComputes) []map[string]interface{}
|
||||
"driver": computeItem.Driver,
|
||||
"gid": computeItem.GID,
|
||||
"guid": computeItem.GUID,
|
||||
"hp_backed": computeItem.HPBacked,
|
||||
"compute_id": computeItem.ID,
|
||||
"cpu_pin": computeItem.CPUPin,
|
||||
"image_id": computeItem.ImageID,
|
||||
"interfaces": flattenInterfaces(computeItem.Interfaces),
|
||||
"lock_status": computeItem.LockStatus,
|
||||
@@ -288,12 +294,15 @@ func flattenComputeList(computes *compute.ListComputes) []map[string]interface{}
|
||||
"milestones": computeItem.Milestones,
|
||||
"name": computeItem.Name,
|
||||
"need_reboot": computeItem.NeedReboot,
|
||||
"numa_affinity": computeItem.NumaAffinity,
|
||||
"numa_node_id": computeItem.NumaNodeId,
|
||||
"os_users": flattenOSUsers(computeItem.OSUsers),
|
||||
"pinned": computeItem.Pinned,
|
||||
"ram": computeItem.RAM,
|
||||
"reference_id": computeItem.ReferenceID,
|
||||
"registered": computeItem.Registered,
|
||||
"res_name": computeItem.ResName,
|
||||
"reserved_node_cpus": computeItem.ReservedNodeCpus,
|
||||
"rg_id": computeItem.RGID,
|
||||
"rg_name": computeItem.RGName,
|
||||
"snap_sets": flattenSnapSets(computeItem.SnapSets),
|
||||
@@ -503,23 +512,32 @@ func flattenPCIDevice(deviceList []compute.ItemPCIDevice) []map[string]interface
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenVGPU(m []interface{}) []string {
|
||||
var output []string
|
||||
for _, item := range m {
|
||||
switch d := item.(type) {
|
||||
case string:
|
||||
output = append(output, d)
|
||||
case int:
|
||||
output = append(output, strconv.Itoa(d))
|
||||
case int64:
|
||||
output = append(output, strconv.FormatInt(d, 10))
|
||||
case float64:
|
||||
output = append(output, strconv.FormatInt(int64(d), 10))
|
||||
default:
|
||||
output = append(output, "")
|
||||
func flattenVGPU(vgpuList []compute.ItemVGPU) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0, len(vgpuList))
|
||||
for _, dev := range vgpuList {
|
||||
temp := map[string]interface{}{
|
||||
"account_id": dev.AccountID,
|
||||
"created_time": dev.CreatedTime,
|
||||
"deleted_time": dev.DeletedTime,
|
||||
"gid": dev.GID,
|
||||
"guid": dev.GUID,
|
||||
"vgpu_id": dev.ID,
|
||||
"last_claimed_by": dev.LastClaimedBy,
|
||||
"last_update_time": dev.LastUpdateTime,
|
||||
"mode": dev.Mode,
|
||||
"pci_slot": dev.PCISlot,
|
||||
"pgpuid": dev.PGPUID,
|
||||
"profile_id": dev.ProfileID,
|
||||
"ram": dev.RAM,
|
||||
"reference_id": dev.ReferenceID,
|
||||
"rg_id": dev.RGID,
|
||||
"status": dev.Status,
|
||||
"type": dev.Type,
|
||||
"vm_id": dev.VMID,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return output
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenNodes(m []interface{}) []string {
|
||||
@@ -553,6 +571,8 @@ func flattenDataCompute(d *schema.ResourceData, compFacts *compute.RecordCompute
|
||||
customFields, _ := json.Marshal(compFacts.CustomFields)
|
||||
devices, _ := json.Marshal(compFacts.Devices)
|
||||
userData, _ := json.Marshal(compFacts.Userdata)
|
||||
// general fields setting
|
||||
d.SetId(fmt.Sprintf("%d", compFacts.ID))
|
||||
d.Set("account_id", compFacts.AccountID)
|
||||
d.Set("account_name", compFacts.AccountName)
|
||||
d.Set("acl", flattenListACLInterface(compFacts.ACL))
|
||||
@@ -562,11 +582,11 @@ func flattenDataCompute(d *schema.ResourceData, compFacts *compute.RecordCompute
|
||||
d.Set("anti_affinity_rules", flattenAffinityRules(compFacts.AntiAffinityRules))
|
||||
d.Set("arch", compFacts.Arch)
|
||||
d.Set("boot_order", compFacts.BootOrder)
|
||||
d.Set("boot_disk_size", compFacts.BootDiskSize)
|
||||
d.Set("cd_image_id", compFacts.CdImageId)
|
||||
d.Set("clone_reference", compFacts.CloneReference)
|
||||
d.Set("clones", compFacts.Clones)
|
||||
d.Set("computeci_id", compFacts.ComputeCIID)
|
||||
d.Set("cpu_pin", compFacts.CPUPin)
|
||||
d.Set("cpus", compFacts.CPUs)
|
||||
d.Set("created_by", compFacts.CreatedBy)
|
||||
d.Set("created_time", compFacts.CreatedTime)
|
||||
@@ -579,7 +599,9 @@ func flattenDataCompute(d *schema.ResourceData, compFacts *compute.RecordCompute
|
||||
d.Set("driver", compFacts.Driver)
|
||||
d.Set("gid", compFacts.GID)
|
||||
d.Set("guid", compFacts.GUID)
|
||||
d.Set("hp_backed", compFacts.HPBacked)
|
||||
d.Set("image_id", compFacts.ImageID)
|
||||
d.Set("image_name", compFacts.ImageName)
|
||||
d.Set("interfaces", flattenInterfaces(compFacts.Interfaces))
|
||||
d.Set("lock_status", compFacts.LockStatus)
|
||||
d.Set("manager_id", compFacts.ManagerID)
|
||||
@@ -587,13 +609,21 @@ func flattenDataCompute(d *schema.ResourceData, compFacts *compute.RecordCompute
|
||||
d.Set("migrationjob", compFacts.MigrationJob)
|
||||
d.Set("milestones", compFacts.Milestones)
|
||||
d.Set("name", compFacts.Name)
|
||||
d.Set("natable_vins_id", compFacts.NatableVINSID)
|
||||
d.Set("natable_vins_ip", compFacts.NatableVINSIP)
|
||||
d.Set("natable_vins_name", compFacts.NatableVINSName)
|
||||
d.Set("natable_vins_network", compFacts.NatableVINSNetwork)
|
||||
d.Set("natable_vins_network_name", compFacts.NatableVINSNetworkName)
|
||||
d.Set("need_reboot", compFacts.NeedReboot)
|
||||
d.Set("numa_affinity", compFacts.NumaAffinity)
|
||||
d.Set("numa_node_id", compFacts.NumaNodeId)
|
||||
d.Set("os_users", flattenOSUsers(compFacts.OSUsers))
|
||||
d.Set("pinned", compFacts.Pinned)
|
||||
d.Set("ram", compFacts.RAM)
|
||||
d.Set("reference_id", compFacts.ReferenceID)
|
||||
d.Set("registered", compFacts.Registered)
|
||||
d.Set("res_name", compFacts.ResName)
|
||||
d.Set("reserved_node_cpus", compFacts.ReservedNodeCpus)
|
||||
d.Set("rg_id", compFacts.RGID)
|
||||
d.Set("rg_name", compFacts.RGName)
|
||||
d.Set("snap_sets", flattenSnapSets(compFacts.SnapSets))
|
||||
@@ -610,6 +640,15 @@ func flattenDataCompute(d *schema.ResourceData, compFacts *compute.RecordCompute
|
||||
d.Set("user_managed", compFacts.UserManaged)
|
||||
d.Set("vgpus", compFacts.VGPUs)
|
||||
d.Set("virtual_image_id", compFacts.VirtualImageID)
|
||||
d.Set("virtual_image_name", compFacts.VirtualImageName)
|
||||
//extra fields setting
|
||||
bootDisk := findBootDisk(compFacts.Disks)
|
||||
if bootDisk != nil {
|
||||
d.Set("boot_disk_size", bootDisk.SizeMax)
|
||||
d.Set("boot_disk_id", bootDisk.ID) // we may need boot disk ID in resize operations
|
||||
d.Set("sep_id", bootDisk.SEPID)
|
||||
d.Set("pool", bootDisk.Pool)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -669,6 +708,7 @@ func flattenDisk(diskList compute.ListDisks) []map[string]interface{} {
|
||||
"purge_attempts": disk.PurgeAttempts,
|
||||
"present_to": disk.PresentTo,
|
||||
"purge_time": disk.PurgeTime,
|
||||
"replication": flattenDiskReplication(disk.Replication),
|
||||
"reality_device_number": disk.RealityDeviceNumber,
|
||||
"reference_id": disk.ReferenceID,
|
||||
"res_id": disk.ResID,
|
||||
@@ -689,6 +729,20 @@ func flattenDisk(diskList compute.ListDisks) []map[string]interface{} {
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenDiskReplication(rep compute.ItemReplication) []map[string]interface{} {
|
||||
res := []map[string]interface{}{
|
||||
{
|
||||
"disk_id": rep.DiskID,
|
||||
"pool_id": rep.PoolID,
|
||||
"role": rep.Role,
|
||||
"self_volume_id": rep.SelfVolumeID,
|
||||
"storage_id": rep.StorageID,
|
||||
"volume_id": rep.VolumeID,
|
||||
},
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenIOTune(iot compute.IOTune) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
temp := map[string]interface{}{
|
||||
|
||||
@@ -2,6 +2,7 @@ package kvmvm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
@@ -25,12 +26,18 @@ func checkParamsExistence(ctx context.Context, d *schema.ResourceData, c *contro
|
||||
errs = append(errs, netErrs...)
|
||||
}
|
||||
|
||||
if disks, ok := d.GetOk("disks"); ok {
|
||||
if err := ic.IsMoreThanOneDisksTypeB(ctx, disks); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
return dc.ErrorsToDiagnostics(errs)
|
||||
}
|
||||
|
||||
func existNetworks(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg) []error {
|
||||
var errs []error
|
||||
var vinsIds, extNetIds []uint64
|
||||
var vinsIds, extNetIds, vfpoolIds []uint64
|
||||
|
||||
networksIface, ok := d.GetOk("network")
|
||||
if !ok {
|
||||
@@ -46,6 +53,11 @@ func existNetworks(ctx context.Context, d *schema.ResourceData, c *controller.Co
|
||||
vinsIds = append(vinsIds, uint64(network["net_id"].(int)))
|
||||
case "EXTNET":
|
||||
extNetIds = append(extNetIds, uint64(network["net_id"].(int)))
|
||||
case "VFNIC":
|
||||
if d.Get("driver").(string) == "KVM_PPC" {
|
||||
errs = append(errs, errors.New("'VFNIC' net_type is not allowed for driver 'KVM_PPC'"))
|
||||
}
|
||||
vfpoolIds = append(vfpoolIds, uint64(network["net_id"].(int)))
|
||||
default:
|
||||
continue
|
||||
}
|
||||
@@ -59,5 +71,9 @@ func existNetworks(ctx context.Context, d *schema.ResourceData, c *controller.Co
|
||||
errs = append(errs, extNetErrs...)
|
||||
}
|
||||
|
||||
if vfpoolErrs := ic.ExistVFPools(ctx, vfpoolIds, c); vfpoolErrs != nil {
|
||||
errs = append(errs, vfpoolErrs...)
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
@@ -49,9 +49,6 @@ import (
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
|
||||
)
|
||||
|
||||
// DiskKey is custom string type to set up context Key for Disk ID
|
||||
type DiskKey string
|
||||
|
||||
func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
log.Debugf("resourceComputeCreate: called for Compute name %q, RG ID %d", d.Get("name").(string), d.Get("rg_id").(int))
|
||||
c := m.(*controller.ControllerCfg)
|
||||
@@ -114,6 +111,7 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
interfacesPPC := make([]kvmppc.Interface, 0)
|
||||
for _, elem := range ns {
|
||||
netInterfaceVal := elem.(map[string]interface{})
|
||||
|
||||
reqInterface := kvmx86.Interface{
|
||||
NetType: netInterfaceVal["net_type"].(string),
|
||||
NetID: uint64(netInterfaceVal["net_id"].(int)),
|
||||
@@ -147,6 +145,57 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
}
|
||||
}
|
||||
|
||||
if disks, ok := d.GetOk("disks"); ok {
|
||||
disksX86 := make([]kvmx86.DataDisk, 0)
|
||||
disksPPC := make([]kvmppc.DataDisk, 0)
|
||||
|
||||
for _, elem := range disks.([]interface{}) {
|
||||
diskVal := elem.(map[string]interface{})
|
||||
reqDataDisk := kvmx86.DataDisk{
|
||||
DiskName: diskVal["disk_name"].(string),
|
||||
Size: uint64(diskVal["size"].(int)),
|
||||
}
|
||||
if sepId, ok := diskVal["sep_id"]; ok {
|
||||
reqDataDisk.SepID = uint64(sepId.(int))
|
||||
}
|
||||
if pool, ok := diskVal["pool"]; ok {
|
||||
reqDataDisk.Pool = pool.(string)
|
||||
}
|
||||
if desc, ok := diskVal["desc"]; ok {
|
||||
reqDataDisk.Description = desc.(string)
|
||||
}
|
||||
if imageID, ok := diskVal["image_id"]; ok {
|
||||
reqDataDisk.ImageID = uint64(imageID.(int))
|
||||
}
|
||||
disksX86 = append(disksX86, reqDataDisk)
|
||||
}
|
||||
|
||||
createReqX86.DataDisks = disksX86
|
||||
|
||||
for _, elem := range disks.([]interface{}) {
|
||||
diskVal := elem.(map[string]interface{})
|
||||
reqDataDisk := kvmppc.DataDisk{
|
||||
DiskName: diskVal["disk_name"].(string),
|
||||
Size: uint64(diskVal["size"].(int)),
|
||||
}
|
||||
if sepId, ok := diskVal["sep_id"]; ok {
|
||||
reqDataDisk.SepID = uint64(sepId.(int))
|
||||
}
|
||||
if pool, ok := diskVal["pool"]; ok {
|
||||
reqDataDisk.Pool = pool.(string)
|
||||
}
|
||||
if desc, ok := diskVal["desc"]; ok {
|
||||
reqDataDisk.Description = desc.(string)
|
||||
}
|
||||
if imageID, ok := diskVal["image_id"]; ok {
|
||||
reqDataDisk.ImageID = uint64(imageID.(int))
|
||||
}
|
||||
disksPPC = append(disksPPC, reqDataDisk)
|
||||
}
|
||||
|
||||
createReqPPC.DataDisks = disksPPC
|
||||
}
|
||||
|
||||
if cloudInit, ok := d.GetOk("cloud_init"); ok {
|
||||
userdata := cloudInit.(string)
|
||||
if userdata != "" && userdata != "applied" {
|
||||
@@ -177,7 +226,13 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
createReqX86.Name = d.Get("name").(string)
|
||||
createReqX86.CPU = uint64(d.Get("cpu").(int))
|
||||
createReqX86.RAM = uint64(d.Get("ram").(int))
|
||||
createReqX86.ImageID = uint64(d.Get("image_id").(int))
|
||||
|
||||
if image, ok := d.GetOk("image_id"); ok {
|
||||
createReqX86.ImageID = uint64(image.(int))
|
||||
}
|
||||
if withoutBootDisk, ok := d.GetOk("without_boot_disk"); ok {
|
||||
createReqX86.WithoutBootDisk = withoutBootDisk.(bool)
|
||||
}
|
||||
|
||||
createReqX86.Driver = driver
|
||||
|
||||
@@ -191,6 +246,12 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
createReqX86.CustomField = val
|
||||
}
|
||||
|
||||
if numaAffinity, ok := d.GetOk("numa_affinity"); ok {
|
||||
createReqX86.NumaAffinity = numaAffinity.(string)
|
||||
}
|
||||
createReqX86.CPUPin = d.Get("cpu_pin").(bool)
|
||||
createReqX86.HPBacked = d.Get("hp_backed").(bool)
|
||||
|
||||
log.Debugf("resourceComputeCreate: creating Compute of type KVM VM x86")
|
||||
apiResp, err := c.CloudBroker().KVMX86().Create(ctx, createReqX86)
|
||||
if err != nil {
|
||||
@@ -260,6 +321,9 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
}
|
||||
} else if ok && !start.(bool) {
|
||||
req := compute.StopRequest{ComputeID: computeId}
|
||||
if depresent, ok := d.Get("depresent").(bool); ok {
|
||||
req.Depresent = depresent
|
||||
}
|
||||
log.Debugf("resourceComputeCreate: stoping Compute ID %d after completing its resource configuration", computeId)
|
||||
if _, err := c.CloudBroker().Compute().Stop(ctx, req); err != nil {
|
||||
warnings.Add(err)
|
||||
@@ -280,45 +344,6 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
}
|
||||
}
|
||||
|
||||
if disks, ok := d.GetOk("disks"); ok {
|
||||
log.Debugf("resourceComputeCreate: Create disks on ComputeID: %d", computeId)
|
||||
addedDisks := disks.([]interface{})
|
||||
if len(addedDisks) > 0 {
|
||||
for _, disk := range addedDisks {
|
||||
diskConv := disk.(map[string]interface{})
|
||||
req := compute.DiskAddRequest{
|
||||
ComputeID: computeId,
|
||||
DiskName: diskConv["disk_name"].(string),
|
||||
Size: uint64(diskConv["size"].(int)),
|
||||
}
|
||||
|
||||
if uint64(diskConv["sep_id"].(int)) != 0 {
|
||||
req.SepID = uint64(diskConv["sep_id"].(int))
|
||||
}
|
||||
if diskConv["disk_type"].(string) != "" {
|
||||
req.DiskType = diskConv["disk_type"].(string)
|
||||
}
|
||||
if diskConv["pool"].(string) != "" {
|
||||
req.Pool = diskConv["pool"].(string)
|
||||
}
|
||||
if diskConv["desc"].(string) != "" {
|
||||
req.Description = diskConv["desc"].(string)
|
||||
}
|
||||
if diskConv["image_id"].(int) != 0 {
|
||||
req.ImageID = uint64(diskConv["image_id"].(int))
|
||||
}
|
||||
|
||||
diskId, err := c.CloudBroker().Compute().DiskAdd(ctx, req)
|
||||
if err != nil {
|
||||
cleanup = true
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
ctx = context.WithValue(ctx, DiskKey(strconv.Itoa(int(diskId))), diskConv["permanently"].(bool))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ars, ok := d.GetOk("affinity_rules"); ok {
|
||||
log.Debugf("resourceComputeCreate: Create affinity rules on ComputeID: %d", computeId)
|
||||
addedAR := ars.([]interface{})
|
||||
@@ -393,9 +418,11 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
req := compute.PFWAddRequest{
|
||||
ComputeID: computeId,
|
||||
PublicPortStart: uint64(pfwItem["public_port_start"].(int)),
|
||||
LocalBasePort: uint64(pfwItem["local_port"].(int)),
|
||||
Proto: pfwItem["proto"].(string),
|
||||
}
|
||||
if pfwItem["local_port"].(int) != 0 {
|
||||
req.LocalBasePort = uint64(pfwItem["local_port"].(int))
|
||||
}
|
||||
if int64(pfwItem["public_port_end"].(int)) != 0 {
|
||||
req.PublicPortEnd = int64(pfwItem["public_port_end"].(int))
|
||||
}
|
||||
@@ -494,6 +521,13 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
warnings.Add(err)
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := d.GetOk("disks"); ok {
|
||||
err := utilityComputeCreatePresentDisk(ctx, d, m)
|
||||
if err != nil {
|
||||
warnings.Add(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("resourceComputeCreate: new Compute ID %d, name %s creation sequence complete", computeId, d.Get("name").(string))
|
||||
@@ -560,7 +594,7 @@ func resourceComputeRead(ctx context.Context, d *schema.ResourceData, m interfac
|
||||
|
||||
d.SetId(strconv.FormatUint(computeRec.ID, 10))
|
||||
|
||||
if err = flattenCompute(ctx, d, computeRec); err != nil {
|
||||
if err = flattenCompute(d, computeRec); err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
@@ -645,7 +679,7 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
}
|
||||
}
|
||||
|
||||
if d.HasChanges("description", "name") {
|
||||
if d.HasChanges("description", "name", "numa_affinity", "cpu_pin", "hp_backed") {
|
||||
if err := utilityComputeUpdate(ctx, d, m); err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -33,7 +33,9 @@ package kvmvm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@@ -98,6 +100,9 @@ func utilityComputeStarted(ctx context.Context, d *schema.ResourceData, m interf
|
||||
if force, ok := d.Get("force_stop").(bool); ok {
|
||||
req.Force = force
|
||||
}
|
||||
if depresent, ok := d.Get("depresent").(bool); ok {
|
||||
req.Depresent = depresent
|
||||
}
|
||||
if _, err := c.CloudBroker().Compute().Stop(ctx, req); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -193,20 +198,20 @@ func utilityComputeUpdateDisks(ctx context.Context, d *schema.ResourceData, m in
|
||||
addedDisks := make([]interface{}, 0)
|
||||
resizedDisks := make([]interface{}, 0)
|
||||
renamedDisks := make([]interface{}, 0)
|
||||
|
||||
// save permanently in disks based on disk_id to context
|
||||
for _, diskItemInterface := range d.Get("disks").([]interface{}) {
|
||||
diskItem := diskItemInterface.(map[string]interface{})
|
||||
diskId := diskItem["disk_id"].(int)
|
||||
permanently := diskItem["permanently"].(bool)
|
||||
ctx = context.WithValue(ctx, DiskKey(strconv.Itoa(diskId)), permanently)
|
||||
}
|
||||
presentNewDisks := make([]interface{}, 0)
|
||||
presentOldDisks := make([]interface{}, 0)
|
||||
|
||||
oldDisks, newDisks := d.GetChange("disks")
|
||||
oldConv := oldDisks.([]interface{})
|
||||
newConv := newDisks.([]interface{})
|
||||
|
||||
for _, el := range oldConv {
|
||||
changeNodes, newEl := isChangeNodesDisk(newConv, el)
|
||||
if changeNodes {
|
||||
presentNewDisks = append(presentNewDisks, newEl)
|
||||
presentOldDisks = append(presentOldDisks, el)
|
||||
}
|
||||
// !isRenameDisk(newConv, el) && !isResizeDisk(newConv, el) are required in case two or more disks are being created and their disk_id is the same (=0)
|
||||
if !isContainsDisk(newConv, el) && !isRenameDisk(newConv, el) && !isResizeDisk(newConv, el) {
|
||||
flag := false
|
||||
extraDisks := d.Get("extra_disks").(*schema.Set).List()
|
||||
@@ -246,6 +251,10 @@ func utilityComputeUpdateDisks(ctx context.Context, d *schema.ResourceData, m in
|
||||
Force: false,
|
||||
}
|
||||
|
||||
if depresent, ok := d.Get("depresent").(bool); ok {
|
||||
stopReq.Depresent = depresent
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Compute().Stop(ctx, stopReq)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -304,12 +313,30 @@ func utilityComputeUpdateDisks(ctx context.Context, d *schema.ResourceData, m in
|
||||
if diskConv["image_id"].(int) != 0 {
|
||||
req.ImageID = uint64(diskConv["image_id"].(int))
|
||||
}
|
||||
diskId, err := c.CloudBroker().Compute().DiskAdd(ctx, req)
|
||||
diskID, err := c.CloudBroker().Compute().DiskAdd(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if nodeIDs, ok := diskConv["node_ids"]; ok {
|
||||
presentIDs := nodeIDs.(*schema.Set).List()
|
||||
if len(presentIDs) > 0 {
|
||||
log.Debugf("resourceComputeUpdate: start presents new disk ID:%d to nodes", diskID)
|
||||
}
|
||||
for _, presentID := range presentIDs {
|
||||
nodeID := uint64(presentID.(int))
|
||||
req := disks.PresentRequest{
|
||||
DiskID: diskID,
|
||||
NodeID: nodeID,
|
||||
}
|
||||
_, err := c.CloudBroker().Disks().Present(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx = context.WithValue(ctx, DiskKey(strconv.Itoa(int(diskId))), diskConv["permanently"].(bool))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -348,6 +375,43 @@ func utilityComputeUpdateDisks(ctx context.Context, d *schema.ResourceData, m in
|
||||
}
|
||||
}
|
||||
|
||||
for i := range presentNewDisks {
|
||||
newDisk := presentNewDisks[i].(map[string]interface{})
|
||||
oldDisk := presentOldDisks[i].(map[string]interface{})
|
||||
newArr := newDisk["node_ids"]
|
||||
oldArr := oldDisk["node_ids"]
|
||||
diskID := uint64(newDisk["disk_id"].(int))
|
||||
presentIDs := (newArr.(*schema.Set).Difference(oldArr.(*schema.Set))).List()
|
||||
depresentIDs := (oldArr.(*schema.Set).Difference(newArr.(*schema.Set))).List()
|
||||
for _, presentID := range presentIDs {
|
||||
nodeID := uint64(presentID.(int))
|
||||
|
||||
req := disks.PresentRequest{
|
||||
DiskID: diskID,
|
||||
NodeID: nodeID,
|
||||
}
|
||||
log.Debugf("resourceComputeUpdate: start presents disk ID:%d from nodes %d", req.DiskID, req.NodeID)
|
||||
_, err := c.CloudBroker().Disks().Present(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, depresentID := range depresentIDs {
|
||||
nodeID := uint64(depresentID.(int))
|
||||
|
||||
req := disks.DepresentRequest{
|
||||
DiskID: diskID,
|
||||
NodeID: nodeID,
|
||||
}
|
||||
log.Debugf("resourceComputeUpdate: start depresents disk ID:%d from nodes %d", req.DiskID, req.NodeID)
|
||||
_, err := c.CloudBroker().Disks().Depresent(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -398,6 +462,11 @@ func utilityComputeExtraDisksConfigure(ctx context.Context, d *schema.ResourceDa
|
||||
ComputeID: computeId,
|
||||
Force: false,
|
||||
}
|
||||
|
||||
if depresent, ok := d.Get("depresent").(bool); ok {
|
||||
stopReq.Depresent = depresent
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Compute().Stop(ctx, stopReq)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -481,7 +550,7 @@ func findBootDisk(disks compute.ListDisks) *compute.ItemDisk {
|
||||
return &disk
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return &compute.ItemDisk{}
|
||||
}
|
||||
|
||||
func networkSubresIPAddreDiffSupperss(key, oldVal, newVal string, d *schema.ResourceData) bool {
|
||||
@@ -560,8 +629,7 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
|
||||
needStart := false
|
||||
|
||||
if d.Get("network").(*schema.Set).Len() == 1 || old_set.(*schema.Set).Len() < 1 {
|
||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
if err := utilityComputeStop(ctx, computeId, m); err != nil {
|
||||
if err := utilityComputeStop(ctx, d, m); err != nil {
|
||||
apiErrCount++
|
||||
lastSavedError = err
|
||||
}
|
||||
@@ -624,11 +692,47 @@ func utilityComputeUpdate(ctx context.Context, d *schema.ResourceData, m interfa
|
||||
if d.HasChange("description") {
|
||||
req.Description = d.Get("description").(string)
|
||||
}
|
||||
if d.HasChange("numa_affinity") {
|
||||
req.NumaAffinity = d.Get("numa_affinity").(string)
|
||||
}
|
||||
if d.HasChange("cpu_pin") {
|
||||
req.CPUPin = d.Get("cpu_pin").(bool)
|
||||
}
|
||||
if d.HasChange("hp_backed") {
|
||||
req.HPBacked = d.Get("hp_backed").(bool)
|
||||
}
|
||||
|
||||
// Note bene: numa_affinity, cpu_pin and hp_backed are not allowed to be changed for compute in STARTED tech status.
|
||||
// If STARTED, we need to stop it before update
|
||||
var isStopRequired bool
|
||||
if d.HasChanges("numa_affinity", "cpu_pin", "hp_backed") && d.Get("started").(bool) {
|
||||
isStopRequired = true
|
||||
}
|
||||
if isStopRequired {
|
||||
stopReq := compute.StopRequest{
|
||||
ComputeID: computeId,
|
||||
Force: false,
|
||||
}
|
||||
if depresent, ok := d.Get("depresent").(bool); ok {
|
||||
stopReq.Depresent = depresent
|
||||
}
|
||||
if _, err := c.CloudBroker().Compute().Stop(ctx, stopReq); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// perform update
|
||||
if _, err := c.CloudBroker().Compute().Update(ctx, req); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If used to be STARTED, we need to start it after update
|
||||
if isStopRequired {
|
||||
if _, err := c.CloudBroker().Compute().Start(ctx, compute.StartRequest{ComputeID: computeId}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -865,11 +969,12 @@ func utilityComputeUpdatePFW(ctx context.Context, d *schema.ResourceData, m inte
|
||||
req := compute.PFWDelRequest{
|
||||
ComputeID: computeId,
|
||||
PublicPortStart: uint64(pfwItem["public_port_start"].(int)),
|
||||
LocalBasePort: uint64(pfwItem["local_port"].(int)),
|
||||
Proto: pfwItem["proto"].(string),
|
||||
RuleID: uint64(pfwItem["rule_id"].(int)),
|
||||
}
|
||||
|
||||
if pfwItem["local_port"].(int) != 0 {
|
||||
req.LocalBasePort = uint64(pfwItem["local_port"].(int))
|
||||
}
|
||||
if pfwItem["public_port_end"].(int) == -1 {
|
||||
req.PublicPortEnd = req.PublicPortStart
|
||||
} else {
|
||||
@@ -894,10 +999,11 @@ func utilityComputeUpdatePFW(ctx context.Context, d *schema.ResourceData, m inte
|
||||
ComputeID: computeId,
|
||||
PublicPortStart: uint64(pfwItem["public_port_start"].(int)),
|
||||
PublicPortEnd: int64(pfwItem["public_port_end"].(int)),
|
||||
LocalBasePort: uint64(pfwItem["local_port"].(int)),
|
||||
Proto: pfwItem["proto"].(string),
|
||||
}
|
||||
|
||||
if pfwItem["local_port"].(int) != 0 {
|
||||
req.LocalBasePort = uint64(pfwItem["local_port"].(int))
|
||||
}
|
||||
if pfwItem["reason"].(string) != "" {
|
||||
req.Reason = pfwItem["reason"].(string)
|
||||
}
|
||||
@@ -1033,6 +1139,9 @@ func utilityComputeRollback(ctx context.Context, d *schema.ResourceData, m inter
|
||||
ComputeID: computeId,
|
||||
Force: false,
|
||||
}
|
||||
if depresent, ok := d.Get("depresent").(bool); ok {
|
||||
req.Depresent = depresent
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Compute().Stop(ctx, req)
|
||||
if err != nil {
|
||||
@@ -1194,6 +1303,9 @@ func utilityComputeUpdateImage(ctx context.Context, d *schema.ResourceData, m in
|
||||
ComputeID: computeId,
|
||||
Force: false,
|
||||
}
|
||||
if depresent, ok := d.Get("depresent").(bool); ok {
|
||||
stopReq.Depresent = depresent
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Compute().Stop(ctx, stopReq)
|
||||
if err != nil {
|
||||
@@ -1261,14 +1373,17 @@ func utilityComputeUpdateCustomFields(ctx context.Context, d *schema.ResourceDat
|
||||
return nil
|
||||
}
|
||||
|
||||
func utilityComputeStop(ctx context.Context, computeID uint64, m interface{}) error {
|
||||
func utilityComputeStop(ctx context.Context, d *schema.ResourceData, m interface{}) error {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
req := compute.StopRequest{
|
||||
ComputeID: computeID,
|
||||
Force: true,
|
||||
Force: true,
|
||||
}
|
||||
req.ComputeID = uint64(d.Get("compute_id").(int))
|
||||
if depresent, ok := d.Get("depresent").(bool); ok {
|
||||
req.Depresent = depresent
|
||||
}
|
||||
|
||||
log.Debugf("utilityComputeNetworksConfigure: stopping compute %d", computeID)
|
||||
log.Debugf("utilityComputeNetworksConfigure: stopping compute %d", req.ComputeID)
|
||||
_, err := c.CloudBroker().Compute().Stop(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1288,6 +1403,51 @@ func utilityComputeStart(ctx context.Context, computeID uint64, m interface{}) (
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func utilityComputeCreatePresentDisk(ctx context.Context, d *schema.ResourceData, m interface{}) error {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
var errs error
|
||||
diskList := d.Get("disks")
|
||||
presentArr := make([]interface{}, 0)
|
||||
for _, elem := range diskList.([]interface{}) {
|
||||
diskVal := elem.(map[string]interface{})
|
||||
presentArr = append(presentArr, diskVal["node_ids"])
|
||||
}
|
||||
|
||||
computeRec, err := utilityComputeCheckPresence(ctx, d, m)
|
||||
bootDisk := findBootDisk(computeRec.Disks)
|
||||
if err != nil {
|
||||
errs = errors.Join(err)
|
||||
}
|
||||
computeDisksIDs := getComputeDiskIDs(computeRec.Disks, d.Get("disks").([]interface{}), d.Get("extra_disks").(*schema.Set).List(), bootDisk.ID)
|
||||
|
||||
for i, diskID := range computeDisksIDs {
|
||||
if len(presentArr) <= i || presentArr[i] == nil {
|
||||
continue
|
||||
}
|
||||
presentIDs := presentArr[i].(*schema.Set).List()
|
||||
|
||||
if len(presentIDs) > 0 {
|
||||
log.Debugf("resourceComputeCreate: start presents disk ID:%d to nodes", diskID)
|
||||
}
|
||||
|
||||
for _, presentID := range presentIDs {
|
||||
nodeID := uint64(presentID.(int))
|
||||
|
||||
req := disks.PresentRequest{
|
||||
DiskID: diskID.(uint64),
|
||||
NodeID: nodeID,
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Disks().Present(ctx, req)
|
||||
if err != nil {
|
||||
errs = errors.Join(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
func isResizeDisk(els []interface{}, el interface{}) bool {
|
||||
for _, elOld := range els {
|
||||
elOldConv := elOld.(map[string]interface{})
|
||||
@@ -1323,6 +1483,26 @@ func isContainsDisk(els []interface{}, el interface{}) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// isChangeNodesDisk get slice of new disks values and current value disk,
|
||||
// if need change nodes on disk returns true and new disk value, else return false and nil
|
||||
func isChangeNodesDisk(els []interface{}, elOld interface{}) (bool, interface{}) {
|
||||
for _, elNew := range els {
|
||||
elNewConv := elNew.(map[string]interface{})
|
||||
elOldConv := elOld.(map[string]interface{})
|
||||
if elOldConv["disk_id"].(int) == elNewConv["disk_id"].(int) {
|
||||
newArr := elNewConv["node_ids"]
|
||||
oldArr := elOldConv["node_ids"]
|
||||
presentID := (newArr.(*schema.Set).Difference(oldArr.(*schema.Set))).List()
|
||||
depresentID := (oldArr.(*schema.Set).Difference(newArr.(*schema.Set))).List()
|
||||
if len(presentID) > 0 || len(depresentID) > 0 {
|
||||
return true, elNew
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func isContainsAR(els []interface{}, el interface{}) bool {
|
||||
for _, elOld := range els {
|
||||
elOldConv := elOld.(map[string]interface{})
|
||||
@@ -1337,3 +1517,25 @@ func isContainsAR(els []interface{}, el interface{}) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func getComputeDiskIDs(disksList compute.ListDisks, disksBlocks, extraDisks []interface{}, bootDiskId uint64) []interface{} {
|
||||
res := make([]interface{}, 0)
|
||||
|
||||
if len(disksBlocks) == 0 {
|
||||
return res
|
||||
}
|
||||
|
||||
sort.Slice(disksList, func(i, j int) bool {
|
||||
return disksList[i].ID < disksList[j].ID
|
||||
})
|
||||
|
||||
for _, disk := range disksList {
|
||||
if disk.ID == bootDiskId || findInExtraDisks(uint(disk.ID), extraDisks) { //skip main bootdisk and extraDisks
|
||||
continue
|
||||
}
|
||||
|
||||
res = append(res, disk.ID)
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
@@ -78,6 +78,9 @@ func utilityDataComputeListCheckPresence(ctx context.Context, d *schema.Resource
|
||||
if includeDeleted, ok := d.GetOk("includedeleted"); ok {
|
||||
req.IncludeDeleted = includeDeleted.(bool)
|
||||
}
|
||||
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||
req.SortBy = sortBy.(string)
|
||||
}
|
||||
if page, ok := d.GetOk("page"); ok {
|
||||
req.Page = uint64(page.(int))
|
||||
}
|
||||
|
||||
@@ -71,6 +71,9 @@ func utilityDataComputeListDeletedCheckPresence(ctx context.Context, d *schema.R
|
||||
if extnetId, ok := d.GetOk("extnet_id"); ok {
|
||||
req.ExtNetID = uint64(extnetId.(int))
|
||||
}
|
||||
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||
req.SortBy = sortBy.(string)
|
||||
}
|
||||
if page, ok := d.GetOk("page"); ok {
|
||||
req.Page = uint64(page.(int))
|
||||
}
|
||||
|
||||
@@ -51,9 +51,15 @@ func utilityComputePCIDeviceListCheckPresence(ctx context.Context, d *schema.Res
|
||||
if devId, ok := d.GetOk("device_id"); ok {
|
||||
req.DevID = uint64(devId.(int))
|
||||
}
|
||||
if name, ok := d.GetOk("name"); ok {
|
||||
req.Name = name.(string)
|
||||
}
|
||||
if status, ok := d.GetOk("status"); ok {
|
||||
req.Status = status.(string)
|
||||
}
|
||||
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||
req.SortBy = sortBy.(string)
|
||||
}
|
||||
if page, ok := d.GetOk("page"); ok {
|
||||
req.Page = uint64(page.(int))
|
||||
}
|
||||
|
||||
@@ -57,6 +57,9 @@ func utilityComputeVGPUListCheckPresence(ctx context.Context, d *schema.Resource
|
||||
if includeDeleted, ok := d.GetOk("includedeleted"); ok {
|
||||
req.IncludeDeleted = includeDeleted.(bool)
|
||||
}
|
||||
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||
req.SortBy = sortBy.(string)
|
||||
}
|
||||
if page, ok := d.GetOk("page"); ok {
|
||||
req.Page = uint64(page.(int))
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user