This commit is contained in:
2024-03-26 12:17:33 +03:00
parent f49d9f8860
commit 91ba361af9
97 changed files with 6127 additions and 5997 deletions

View File

@@ -615,7 +615,7 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
},
},
"bootdisk_size": {
"boot_disk_size": {
Type: schema.TypeInt,
Computed: true,
},
@@ -720,6 +720,10 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"need_reboot" : {
Type: schema.TypeBool,
Computed: true,
},
"natable_vins_id": {
Type: schema.TypeInt,
Computed: true,

View File

@@ -124,7 +124,7 @@ func itemComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
},
},
"bootdisk_size": {
"boot_disk_size": {
Type: schema.TypeInt,
Computed: true,
},
@@ -233,6 +233,10 @@ func itemComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"need_reboot": {
Type: schema.TypeBool,
Computed: true,
},
"pinned": {
Type: schema.TypeBool,
Computed: true,

View File

@@ -33,6 +33,7 @@ Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/w
package kvmvm
import (
"context"
"encoding/json"
"sort"
"strconv"
@@ -163,7 +164,7 @@ func flattenComputeList(computes *compute.ListComputes) []map[string]interface{}
"anti_affinity_rules": flattenListRules(compute.AntiAffinityRules),
"arch": compute.Architecture,
"boot_order": compute.BootOrder,
"bootdisk_size": compute.BootDiskSize,
"boot_disk_size": compute.BootDiskSize,
"clone_reference": compute.CloneReference,
"clones": compute.Clones,
"computeci_id": compute.ComputeCIID,
@@ -188,6 +189,7 @@ func flattenComputeList(computes *compute.ListComputes) []map[string]interface{}
"migrationjob": compute.MigrationJob,
"milestones": compute.Milestones,
"name": compute.Name,
"need_reboot": compute.NeedReboot,
"pinned": compute.Pinned,
"ram": compute.RAM,
"reference_id": compute.ReferenceID,
@@ -237,24 +239,31 @@ func flattenBootDisk(bootDisk *compute.ItemComputeDisk) []map[string]interface{}
return res
}
func flattenComputeDisksDemo(disksList compute.ListComputeDisks, extraDisks []interface{}, bootDiskId uint64) []map[string]interface{} {
func flattenComputeDisksDemo(ctx context.Context, d *schema.ResourceData, disksList compute.ListComputeDisks, extraDisks []interface{}, bootDiskId uint64) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(disksList))
for _, disk := range disksList {
if disk.ID == bootDiskId || findInExtraDisks(uint(disk.ID), extraDisks) { //skip main bootdisk and extraDisks
continue
}
permanently, ok := ctx.Value(DiskKey(strconv.Itoa(int(disk.ID)))).(bool) // get permamently from Create or Update context
if !ok {
permanently = getPermanentlyByDiskID(d, disk.ID) // get permanently from state when Read is not after Create/Update
}
temp := map[string]interface{}{
"disk_name": disk.Name,
"disk_id": disk.ID,
"disk_type": disk.Type,
"sep_id": disk.SepID,
"shareable": disk.Shareable,
"size_max": disk.SizeMax,
"size_used": disk.SizeUsed,
"pool": disk.Pool,
"desc": disk.Description,
"image_id": disk.ImageID,
"size": disk.SizeMax,
"disk_name": disk.Name,
"disk_id": disk.ID,
"disk_type": disk.Type,
"sep_id": disk.SepID,
"shareable": disk.Shareable,
"size_max": disk.SizeMax,
"size_used": disk.SizeUsed,
"pool": disk.Pool,
"desc": disk.Description,
"image_id": disk.ImageID,
"size": disk.SizeMax,
"permanently": permanently,
}
res = append(res, temp)
}
@@ -265,6 +274,21 @@ func flattenComputeDisksDemo(disksList compute.ListComputeDisks, extraDisks []in
return res
}
// getPermanentlyByDiskID gets permanently value of specific disk (by diskId) from disks current state
func getPermanentlyByDiskID(d *schema.ResourceData, diskId uint64) bool {
disks := d.Get("disks").([]interface{})
for _, diskItem := range disks {
disk := diskItem.(map[string]interface{})
if uint64(disk["disk_id"].(int)) == diskId {
return disk["permanently"].(bool)
}
}
log.Infof("getPermanentlyByDiskID: disk with id %d not found in state", diskId)
return false
}
func flattenNetwork(interfaces compute.ListInterfaces) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(interfaces))
@@ -289,7 +313,7 @@ func findBootDisk(disks compute.ListComputeDisks) *compute.ItemComputeDisk {
return nil
}
func flattenCompute(d *schema.ResourceData, computeRec compute.RecordCompute) error {
func flattenCompute(ctx context.Context, d *schema.ResourceData, computeRec compute.RecordCompute) error {
// This function expects that compFacts string contains response from API compute/get,
// i.e. detailed information about compute instance.
//
@@ -308,6 +332,7 @@ func flattenCompute(d *schema.ResourceData, computeRec compute.RecordCompute) er
d.Set("affinity_weight", computeRec.AffinityWeight)
d.Set("arch", computeRec.Architecture)
d.Set("boot_order", computeRec.BootOrder)
// we intentionally use the SizeMax field, do not change it until the BootDiskSize field is fixed on the platform
d.Set("boot_disk_size", bootDisk.SizeMax)
d.Set("boot_disk", flattenBootDisk(bootDisk))
d.Set("boot_disk_id", bootDisk.ID)
@@ -323,7 +348,7 @@ func flattenCompute(d *schema.ResourceData, computeRec compute.RecordCompute) er
d.Set("deleted_time", computeRec.DeletedTime)
d.Set("description", computeRec.Description)
d.Set("devices", string(devices))
err := d.Set("disks", flattenComputeDisksDemo(computeRec.Disks, d.Get("extra_disks").(*schema.Set).List(), bootDisk.ID))
err := d.Set("disks", flattenComputeDisksDemo(ctx, d, computeRec.Disks, d.Get("extra_disks").(*schema.Set).List(), bootDisk.ID))
if err != nil {
return err
}
@@ -344,6 +369,7 @@ func flattenCompute(d *schema.ResourceData, computeRec compute.RecordCompute) er
d.Set("migrationjob", computeRec.MigrationJob)
d.Set("milestones", computeRec.Milestones)
d.Set("name", computeRec.Name)
d.Set("need_reboot", computeRec.NeedReboot)
d.Set("natable_vins_id", computeRec.NatableVINSID)
d.Set("natable_vins_ip", computeRec.NatableVINSIP)
d.Set("natable_vins_name", computeRec.NatableVINSName)
@@ -537,7 +563,7 @@ func flattenDataCompute(d *schema.ResourceData, computeRec compute.RecordCompute
d.Set("anti_affinity_rules", flattenListRules(computeRec.AntiAffinityRules))
d.Set("arch", computeRec.Architecture)
d.Set("boot_order", computeRec.BootOrder)
d.Set("bootdisk_size", computeRec.BootDiskSize)
d.Set("boot_disk_size", computeRec.BootDiskSize)
d.Set("clone_reference", computeRec.CloneReference)
d.Set("clones", computeRec.Clones)
d.Set("computeci_id", computeRec.ComputeCIID)
@@ -563,6 +589,7 @@ func flattenDataCompute(d *schema.ResourceData, computeRec compute.RecordCompute
d.Set("migrationjob", computeRec.MigrationJob)
d.Set("milestones", computeRec.Milestones)
d.Set("name", computeRec.Name)
d.Set("need_reboot", computeRec.NeedReboot)
d.Set("natable_vins_id", computeRec.NatableVINSID)
d.Set("natable_vins_ip", computeRec.NatableVINSIP)
d.Set("natable_vins_name", computeRec.NatableVINSName)

View File

@@ -40,6 +40,7 @@ import (
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/kvmppc"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/kvmx86"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
@@ -47,12 +48,16 @@ import (
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/statefuncs"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/validators"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
)
// DiskKey is custom string type to set up context Key for Disk ID
type DiskKey string
func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceComputeCreate: called for Compute name %q, RG ID %d", d.Get("name").(string), d.Get("rg_id").(int))
c := m.(*controller.ControllerCfg)
@@ -296,11 +301,13 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
req.ImageID = uint64(diskConv["image_id"].(int))
}
_, err := c.CloudAPI().Compute().DiskAdd(ctx, req)
diskId, err := c.CloudAPI().Compute().DiskAdd(ctx, req)
if err != nil {
cleanup = true
return diag.FromErr(err)
}
ctx = context.WithValue(ctx, DiskKey(strconv.Itoa(int(diskId))), diskConv["permanently"].(bool))
}
}
}
@@ -580,7 +587,7 @@ func resourceComputeRead(ctx context.Context, d *schema.ResourceData, m interfac
}
}
if err = flattenCompute(d, computeRec); err != nil {
if err = flattenCompute(ctx, d, computeRec); err != nil {
return diag.FromErr(err)
}
@@ -753,12 +760,18 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
doUpdate := false
resizeReq := compute.ResizeRequest{
ComputeID: computeRec.ID,
Force: true,
}
forceResize, ok := d.GetOk("force_resize")
if ok {
resizeReq.Force = forceResize.(bool)
}
warnings := dc.Warnings{}
oldCpu, newCpu := d.GetChange("cpu")
if oldCpu.(int) > newCpu.(int) && !forceResize.(bool) {
return diag.Errorf("Cannot resize compute ID %d: enable 'force_resize' to reduce compute vCPUs", computeRec.ID)
}
if oldCpu.(int) != newCpu.(int) {
resizeReq.CPU = uint64(newCpu.(int))
doUpdate = true
@@ -846,14 +859,23 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
if d.HasChange("disks") {
deletedDisks := make([]interface{}, 0)
addedDisks := make([]interface{}, 0)
updatedDisks := make([]interface{}, 0)
resizedDisks := make([]interface{}, 0)
renamedDisks := make([]interface{}, 0)
// save permanently in disks based on disk_id to context
for _, diskItemInterface := range d.Get("disks").([]interface{}) {
diskItem := diskItemInterface.(map[string]interface{})
diskId := diskItem["disk_id"].(int)
permanently := diskItem["permanently"].(bool)
ctx = context.WithValue(ctx, DiskKey(strconv.Itoa(diskId)), permanently)
}
oldDisks, newDisks := d.GetChange("disks")
oldConv := oldDisks.([]interface{})
newConv := newDisks.([]interface{})
for _, el := range oldConv {
if !isContainsDisk(newConv, el) {
if !isContainsDisk(newConv, el) && !isRenameDisk(newConv, el) && !isResizeDisk(newConv, el) {
flag := false
extraDisks := d.Get("extra_disks").(*schema.Set).List()
delDisk := el.(map[string]interface{})
@@ -877,10 +899,12 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
for _, el := range newConv {
if !isContainsDisk(oldConv, el) {
addedDisks = append(addedDisks, el)
} else {
if isChangeDisk(oldConv, el) {
updatedDisks = append(updatedDisks, el)
}
}
if isResizeDisk(oldConv, el) {
resizedDisks = append(resizedDisks, el)
}
if isRenameDisk(oldConv, el) {
renamedDisks = append(renamedDisks, el)
}
}
@@ -951,15 +975,17 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
if diskConv["image_id"].(int) != 0 {
req.ImageID = uint64(diskConv["image_id"].(int))
}
_, err := c.CloudAPI().Compute().DiskAdd(ctx, req)
diskId, err := c.CloudAPI().Compute().DiskAdd(ctx, req)
if err != nil {
return diag.FromErr(err)
}
ctx = context.WithValue(ctx, DiskKey(strconv.Itoa(int(diskId))), diskConv["permanently"].(bool))
}
}
if len(updatedDisks) > 0 {
for _, disk := range updatedDisks {
if len(resizedDisks) > 0 {
for _, disk := range resizedDisks {
diskConv := disk.(map[string]interface{})
if diskConv["disk_type"].(string) == "B" {
continue
@@ -976,6 +1002,22 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
}
}
}
if len(renamedDisks) > 0 {
for _, disk := range renamedDisks {
diskConv := disk.(map[string]interface{})
req := disks.RenameRequest{
DiskID: uint64(diskConv["disk_id"].(int)),
Name: diskConv["disk_name"].(string),
}
_, err := c.CloudAPI().Disks().Rename(ctx, req)
if err != nil {
return diag.FromErr(err)
}
}
}
}
if d.HasChange("started") {
@@ -1499,7 +1541,7 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
return append(warnings.Get(), resourceComputeRead(ctx, d, m)...)
}
func isChangeDisk(els []interface{}, el interface{}) bool {
func isResizeDisk(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})
elConv := el.(map[string]interface{})
@@ -1511,11 +1553,23 @@ func isChangeDisk(els []interface{}, el interface{}) bool {
return false
}
func isRenameDisk(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})
elConv := el.(map[string]interface{})
if elOldConv["disk_id"].(int) == elConv["disk_id"].(int) &&
elOldConv["disk_name"].(string) != elConv["disk_name"].(string) {
return true
}
}
return false
}
func isContainsDisk(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})
elConv := el.(map[string]interface{})
if elOldConv["disk_name"].(string) == elConv["disk_name"].(string) {
if elOldConv["disk_id"].(int) == elConv["disk_id"].(int) {
return true
}
}
@@ -1736,10 +1790,13 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
Description: "Number of CPUs to allocate to this compute instance.",
},
"ram": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntAtLeast(constants.MinRamPerCompute),
Description: "Amount of RAM in MB to allocate to this compute instance.",
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.All(
validation.IntAtLeast(constants.MinRamPerCompute),
validators.DivisibleBy(constants.RAMDivisibility),
),
Description: "Amount of RAM in MB to allocate to this compute instance.",
},
"image_id": {
Type: schema.TypeInt,
@@ -1788,7 +1845,7 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
},
"value": {
Type: schema.TypeString,
Required: true,
Optional: true,
Description: "value that must match the key to be taken into account when analyzing this rule",
},
},
@@ -1824,7 +1881,7 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
},
"value": {
Type: schema.TypeString,
Required: true,
Optional: true,
Description: "value that must match the key to be taken into account when analyzing this rule",
},
},
@@ -1997,6 +2054,12 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
Default: false,
Description: "Flag for redeploy compute",
},
"force_resize": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "Flag for resize compute",
},
"data_disks": {
Type: schema.TypeString,
Optional: true,
@@ -2135,6 +2198,10 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"need_reboot": {
Type: schema.TypeBool,
Computed: true,
},
"natable_vins_id": {
Type: schema.TypeInt,
Computed: true,

View File

@@ -234,7 +234,7 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
}
}
needStart := false
needStart:= false
if d.Get("network").(*schema.Set).Len() == 1 || old_set.(*schema.Set).Len() < 1 {
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
@@ -272,7 +272,7 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
if needStart {
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
if numErr, err := utilityComputeStart(ctx, computeId, m); err != nil {
apiErrCount += numErr
apiErrCount+= numErr
lastSavedError = err
}
}