This commit is contained in:
2024-03-26 12:17:33 +03:00
parent f49d9f8860
commit 91ba361af9
97 changed files with 6127 additions and 5997 deletions

View File

@@ -33,6 +33,7 @@ Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/w
package kvmvm
import (
"context"
"encoding/json"
"sort"
"strconv"
@@ -163,7 +164,7 @@ func flattenComputeList(computes *compute.ListComputes) []map[string]interface{}
"anti_affinity_rules": flattenListRules(compute.AntiAffinityRules),
"arch": compute.Architecture,
"boot_order": compute.BootOrder,
"bootdisk_size": compute.BootDiskSize,
"boot_disk_size": compute.BootDiskSize,
"clone_reference": compute.CloneReference,
"clones": compute.Clones,
"computeci_id": compute.ComputeCIID,
@@ -188,6 +189,7 @@ func flattenComputeList(computes *compute.ListComputes) []map[string]interface{}
"migrationjob": compute.MigrationJob,
"milestones": compute.Milestones,
"name": compute.Name,
"need_reboot": compute.NeedReboot,
"pinned": compute.Pinned,
"ram": compute.RAM,
"reference_id": compute.ReferenceID,
@@ -237,24 +239,31 @@ func flattenBootDisk(bootDisk *compute.ItemComputeDisk) []map[string]interface{}
return res
}
func flattenComputeDisksDemo(disksList compute.ListComputeDisks, extraDisks []interface{}, bootDiskId uint64) []map[string]interface{} {
func flattenComputeDisksDemo(ctx context.Context, d *schema.ResourceData, disksList compute.ListComputeDisks, extraDisks []interface{}, bootDiskId uint64) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(disksList))
for _, disk := range disksList {
if disk.ID == bootDiskId || findInExtraDisks(uint(disk.ID), extraDisks) { //skip main bootdisk and extraDisks
continue
}
permanently, ok := ctx.Value(DiskKey(strconv.Itoa(int(disk.ID)))).(bool) // get permamently from Create or Update context
if !ok {
permanently = getPermanentlyByDiskID(d, disk.ID) // get permanently from state when Read is not after Create/Update
}
temp := map[string]interface{}{
"disk_name": disk.Name,
"disk_id": disk.ID,
"disk_type": disk.Type,
"sep_id": disk.SepID,
"shareable": disk.Shareable,
"size_max": disk.SizeMax,
"size_used": disk.SizeUsed,
"pool": disk.Pool,
"desc": disk.Description,
"image_id": disk.ImageID,
"size": disk.SizeMax,
"disk_name": disk.Name,
"disk_id": disk.ID,
"disk_type": disk.Type,
"sep_id": disk.SepID,
"shareable": disk.Shareable,
"size_max": disk.SizeMax,
"size_used": disk.SizeUsed,
"pool": disk.Pool,
"desc": disk.Description,
"image_id": disk.ImageID,
"size": disk.SizeMax,
"permanently": permanently,
}
res = append(res, temp)
}
@@ -265,6 +274,21 @@ func flattenComputeDisksDemo(disksList compute.ListComputeDisks, extraDisks []in
return res
}
// getPermanentlyByDiskID gets permanently value of specific disk (by diskId) from disks current state
func getPermanentlyByDiskID(d *schema.ResourceData, diskId uint64) bool {
disks := d.Get("disks").([]interface{})
for _, diskItem := range disks {
disk := diskItem.(map[string]interface{})
if uint64(disk["disk_id"].(int)) == diskId {
return disk["permanently"].(bool)
}
}
log.Infof("getPermanentlyByDiskID: disk with id %d not found in state", diskId)
return false
}
func flattenNetwork(interfaces compute.ListInterfaces) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(interfaces))
@@ -289,7 +313,7 @@ func findBootDisk(disks compute.ListComputeDisks) *compute.ItemComputeDisk {
return nil
}
func flattenCompute(d *schema.ResourceData, computeRec compute.RecordCompute) error {
func flattenCompute(ctx context.Context, d *schema.ResourceData, computeRec compute.RecordCompute) error {
// This function expects that compFacts string contains response from API compute/get,
// i.e. detailed information about compute instance.
//
@@ -308,6 +332,7 @@ func flattenCompute(d *schema.ResourceData, computeRec compute.RecordCompute) er
d.Set("affinity_weight", computeRec.AffinityWeight)
d.Set("arch", computeRec.Architecture)
d.Set("boot_order", computeRec.BootOrder)
// we intentionally use the SizeMax field, do not change it until the BootDiskSize field is fixed on the platform
d.Set("boot_disk_size", bootDisk.SizeMax)
d.Set("boot_disk", flattenBootDisk(bootDisk))
d.Set("boot_disk_id", bootDisk.ID)
@@ -323,7 +348,7 @@ func flattenCompute(d *schema.ResourceData, computeRec compute.RecordCompute) er
d.Set("deleted_time", computeRec.DeletedTime)
d.Set("description", computeRec.Description)
d.Set("devices", string(devices))
err := d.Set("disks", flattenComputeDisksDemo(computeRec.Disks, d.Get("extra_disks").(*schema.Set).List(), bootDisk.ID))
err := d.Set("disks", flattenComputeDisksDemo(ctx, d, computeRec.Disks, d.Get("extra_disks").(*schema.Set).List(), bootDisk.ID))
if err != nil {
return err
}
@@ -344,6 +369,7 @@ func flattenCompute(d *schema.ResourceData, computeRec compute.RecordCompute) er
d.Set("migrationjob", computeRec.MigrationJob)
d.Set("milestones", computeRec.Milestones)
d.Set("name", computeRec.Name)
d.Set("need_reboot", computeRec.NeedReboot)
d.Set("natable_vins_id", computeRec.NatableVINSID)
d.Set("natable_vins_ip", computeRec.NatableVINSIP)
d.Set("natable_vins_name", computeRec.NatableVINSName)
@@ -537,7 +563,7 @@ func flattenDataCompute(d *schema.ResourceData, computeRec compute.RecordCompute
d.Set("anti_affinity_rules", flattenListRules(computeRec.AntiAffinityRules))
d.Set("arch", computeRec.Architecture)
d.Set("boot_order", computeRec.BootOrder)
d.Set("bootdisk_size", computeRec.BootDiskSize)
d.Set("boot_disk_size", computeRec.BootDiskSize)
d.Set("clone_reference", computeRec.CloneReference)
d.Set("clones", computeRec.Clones)
d.Set("computeci_id", computeRec.ComputeCIID)
@@ -563,6 +589,7 @@ func flattenDataCompute(d *schema.ResourceData, computeRec compute.RecordCompute
d.Set("migrationjob", computeRec.MigrationJob)
d.Set("milestones", computeRec.Milestones)
d.Set("name", computeRec.Name)
d.Set("need_reboot", computeRec.NeedReboot)
d.Set("natable_vins_id", computeRec.NatableVINSID)
d.Set("natable_vins_ip", computeRec.NatableVINSIP)
d.Set("natable_vins_name", computeRec.NatableVINSName)