This commit is contained in:
2024-03-26 12:17:33 +03:00
parent f49d9f8860
commit 91ba361af9
97 changed files with 6127 additions and 5997 deletions

View File

@@ -33,6 +33,7 @@ package kvmvm
import (
"context"
"strconv"
// "net/url"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
@@ -49,10 +50,8 @@ func dataSourceComputeRead(ctx context.Context, d *schema.ResourceData, m interf
return diag.FromErr(err)
}
if err = flattenDataCompute(d, compFacts); err != nil {
return diag.FromErr(err)
}
d.SetId(strconv.Itoa(int(compFacts.ID)))
flattenDataCompute(d, compFacts)
return nil
}

View File

@@ -1,8 +1,8 @@
package kvmvm
import (
"context"
"encoding/json"
"fmt"
"sort"
"strconv"
@@ -12,7 +12,7 @@ import (
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens"
)
func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute) error {
func flattenCompute(ctx context.Context, d *schema.ResourceData, computeRec *compute.RecordCompute) error {
log.Debugf("flattenCompute: ID %d, RG ID %d", computeRec.ID, computeRec.RGID)
customFields, _ := json.Marshal(computeRec.CustomFields)
@@ -20,14 +20,6 @@ func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute) e
userData, _ := json.Marshal(computeRec.Userdata)
bootDisk := findBootDisk(computeRec.Disks)
//extra fields setting
if len(computeRec.Disks) > 0 {
log.Debugf("flattenCompute: calling parseComputeDisksToExtraDisks for %d disks", len(computeRec.Disks))
if err := d.Set("extra_disks", parseComputeDisksToExtraDisks(computeRec.Disks)); err != nil {
return err
}
}
if len(computeRec.Interfaces) > 0 {
log.Debugf("flattenCompute: calling parseComputeInterfacesToNetworks for %d interfaces", len(computeRec.Interfaces))
if err := d.Set("network", parseComputeInterfacesToNetworks(computeRec.Interfaces)); err != nil {
@@ -44,7 +36,8 @@ func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute) e
d.Set("arch", computeRec.Arch)
d.Set("boot_order", computeRec.BootOrder)
d.Set("boot_disk_id", bootDisk.ID)
d.Set("boot_disk_size", computeRec.BootDiskSize)
// we intentionally use the SizeMax field, do not change it until the BootDiskSize field is fixed on the platform
d.Set("boot_disk_size", bootDisk.SizeMax)
d.Set("cd_image_id", computeRec.CdImageId)
d.Set("clone_reference", computeRec.CloneReference)
d.Set("clones", computeRec.Clones)
@@ -56,7 +49,7 @@ func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute) e
d.Set("deleted_time", computeRec.DeletedTime)
d.Set("description", computeRec.Description)
d.Set("devices", string(devices))
err := d.Set("disks", flattenComputeDisks(computeRec.Disks, d.Get("extra_disks").(*schema.Set).List(), bootDisk.ID))
err := d.Set("disks", flattenComputeDisks(ctx, d, computeRec.Disks, d.Get("extra_disks").(*schema.Set).List(), bootDisk.ID))
if err != nil {
return err
}
@@ -178,24 +171,31 @@ func flattenQOS(qos compute.QOS) []map[string]interface{} {
}
}
func flattenComputeDisks(disksList compute.ListDisks, extraDisks []interface{}, bootDiskId uint64) []map[string]interface{} {
func flattenComputeDisks(ctx context.Context, d *schema.ResourceData, disksList compute.ListDisks, extraDisks []interface{}, bootDiskId uint64) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(disksList))
for _, disk := range disksList {
if disk.ID == bootDiskId || findInExtraDisks(uint(disk.ID), extraDisks) { //skip main bootdisk and extraDisks
continue
}
permanently, ok := ctx.Value(DiskKey(strconv.Itoa(int(disk.ID)))).(bool) // get permamently from Create or Update context
if !ok {
permanently = getPermanentlyByDiskID(d, disk.ID) // get permanently from state when Read is not after Create/Update
}
temp := map[string]interface{}{
"disk_name": disk.Name,
"size": disk.SizeMax,
"sep_id": disk.SEPID,
"disk_type": disk.Type,
"pool": disk.Pool,
"desc": disk.Description,
"image_id": disk.ImageID,
"disk_id": disk.ID,
"shareable": disk.Shareable,
"size_used": disk.SizeUsed,
"size_max": disk.SizeMax,
"disk_name": disk.Name,
"size": disk.SizeMax,
"sep_id": disk.SEPID,
"disk_type": disk.Type,
"pool": disk.Pool,
"desc": disk.Description,
"image_id": disk.ImageID,
"disk_id": disk.ID,
"shareable": disk.Shareable,
"size_used": disk.SizeUsed,
"size_max": disk.SizeMax,
"permanently": permanently,
}
res = append(res, temp)
}
@@ -205,6 +205,21 @@ func flattenComputeDisks(disksList compute.ListDisks, extraDisks []interface{},
return res
}
// getPermanentlyByDiskID gets permanently value of specific disk (by diskId) from disks current state
func getPermanentlyByDiskID(d *schema.ResourceData, diskId uint64) bool {
disks := d.Get("disks").([]interface{})
for _, diskItem := range disks {
disk := diskItem.(map[string]interface{})
if uint64(disk["disk_id"].(int)) == diskId {
return disk["permanently"].(bool)
}
}
log.Infof("getPermanentlyByDiskID: disk with id %d not found in state", diskId)
return false
}
func findInExtraDisks(diskId uint, extraDisks []interface{}) bool {
for _, ExtraDisk := range extraDisks {
if diskId == uint(ExtraDisk.(int)) {
@@ -247,7 +262,7 @@ func flattenComputeList(computes *compute.ListComputes) []map[string]interface{}
"arch": computeItem.Arch,
"cd_image_id": computeItem.CdImageId,
"boot_order": computeItem.BootOrder,
"bootdisk_size": computeItem.BootDiskSize,
"boot_disk_size": computeItem.BootDiskSize,
"clone_reference": computeItem.CloneReference,
"clones": computeItem.Clones,
"computeci_id": computeItem.ComputeCIID,
@@ -538,8 +553,6 @@ func flattenDataCompute(d *schema.ResourceData, compFacts *compute.RecordCompute
customFields, _ := json.Marshal(compFacts.CustomFields)
devices, _ := json.Marshal(compFacts.Devices)
userData, _ := json.Marshal(compFacts.Userdata)
// general fields setting
d.SetId(fmt.Sprintf("%d", compFacts.ID))
d.Set("account_id", compFacts.AccountID)
d.Set("account_name", compFacts.AccountName)
d.Set("acl", flattenListACLInterface(compFacts.ACL))
@@ -549,6 +562,7 @@ func flattenDataCompute(d *schema.ResourceData, compFacts *compute.RecordCompute
d.Set("anti_affinity_rules", flattenAffinityRules(compFacts.AntiAffinityRules))
d.Set("arch", compFacts.Arch)
d.Set("boot_order", compFacts.BootOrder)
d.Set("boot_disk_size", compFacts.BootDiskSize)
d.Set("cd_image_id", compFacts.CdImageId)
d.Set("clone_reference", compFacts.CloneReference)
d.Set("clones", compFacts.Clones)
@@ -597,62 +611,9 @@ func flattenDataCompute(d *schema.ResourceData, compFacts *compute.RecordCompute
d.Set("vgpus", compFacts.VGPUs)
d.Set("virtual_image_id", compFacts.VirtualImageID)
//extra fields setting
bootDisk := findBootDisk(compFacts.Disks)
if bootDisk != nil {
d.Set("boot_disk_size", bootDisk.SizeMax)
d.Set("boot_disk_id", bootDisk.ID) // we may need boot disk ID in resize operations
d.Set("sep_id", bootDisk.SEPID)
d.Set("pool", bootDisk.Pool)
}
if len(compFacts.Disks) > 0 {
log.Debugf("flattenCompute: calling parseComputeDisksToExtraDisks for %d disks", len(compFacts.Disks))
if err := d.Set("extra_disks", parseComputeDisksToExtraDisks(compFacts.Disks)); err != nil {
return err
}
}
if len(compFacts.Interfaces) > 0 {
log.Debugf("flattenCompute: calling parseComputeInterfacesToNetworks for %d interfaces", len(compFacts.Interfaces))
if err := d.Set("network", parseComputeInterfacesToNetworks(compFacts.Interfaces)); err != nil {
return err
}
}
return nil
}
// Parse list of all disks from API compute/get into a list of "extra disks" attached to this compute
// Extra disks are all compute disks but a boot disk.
func parseComputeDisksToExtraDisks(disks compute.ListDisks) []interface{} {
// this return value will be used to d.Set("extra_disks",) item of dataSourceCompute schema,
// which is a simple list of integer disk IDs excluding boot disk ID
length := len(disks)
log.Debugf("parseComputeDisksToExtraDisks: called for %d disks", length)
if length == 0 || (length == 1 && disks[0].Type == "B") {
// the disk list is empty (which is kind of strange - diskless compute?), or
// there is only one disk in the list and it is a boot disk;
// as we skip boot disks, the result will be of 0 length anyway
return make([]interface{}, 0)
}
result := make([]interface{}, length-1)
idx := 0
for _, value := range disks {
if value.Type == "B" {
// skip boot disk when iterating over the list of disks
continue
}
result[idx] = value.ID
idx++
}
return result
}
// Parse the list of interfaces from compute/get response into a list of networks
// attached to this compute
func parseComputeInterfacesToNetworks(ifaces compute.ListInterfaces) []interface{} {

View File

@@ -49,6 +49,9 @@ import (
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
)
// DiskKey is custom string type to set up context Key for Disk ID
type DiskKey string
func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceComputeCreate: called for Compute name %q, RG ID %d", d.Get("name").(string), d.Get("rg_id").(int))
c := m.(*controller.ControllerCfg)
@@ -305,11 +308,13 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
req.ImageID = uint64(diskConv["image_id"].(int))
}
_, err := c.CloudBroker().Compute().DiskAdd(ctx, req)
diskId, err := c.CloudBroker().Compute().DiskAdd(ctx, req)
if err != nil {
cleanup = true
return diag.FromErr(err)
}
ctx = context.WithValue(ctx, DiskKey(strconv.Itoa(int(diskId))), diskConv["permanently"].(bool))
}
}
}
@@ -555,7 +560,7 @@ func resourceComputeRead(ctx context.Context, d *schema.ResourceData, m interfac
d.SetId(strconv.FormatUint(computeRec.ID, 10))
if err = flattenCompute(d, computeRec); err != nil {
if err = flattenCompute(ctx, d, computeRec); err != nil {
return diag.FromErr(err)
}

View File

@@ -5,6 +5,7 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/statefuncs"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/validators"
)
func dataSourceComputeSchemaMake() map[string]*schema.Schema {
@@ -145,6 +146,10 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
},
},
"boot_disk_size": {
Type: schema.TypeInt,
Computed: true,
},
"cd_image_id": {
Type: schema.TypeInt,
Computed: true,
@@ -769,57 +774,6 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
//extra parameters
"boot_disk_size": {
Type: schema.TypeInt,
Computed: true,
},
"boot_disk_id": {
Type: schema.TypeInt,
Computed: true,
},
"sep_id": {
Type: schema.TypeInt,
Computed: true,
},
"pool": {
Type: schema.TypeString,
Computed: true,
},
"extra_disks": {
Type: schema.TypeSet,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "IDs of the extra disk(s) attached to this compute.",
},
"network": {
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"net_id": {
Type: schema.TypeInt,
Computed: true,
},
"net_type": {
Type: schema.TypeString,
Computed: true,
},
"ip_address": {
Type: schema.TypeString,
Computed: true,
},
"mac": {
Type: schema.TypeString,
Computed: true,
},
},
},
Description: "Network connection(s) for this compute.",
},
}
return res
}
@@ -1023,7 +977,7 @@ func dataSourceComputeListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
},
},
"bootdisk_size": {
"boot_disk_size": {
Type: schema.TypeInt,
Computed: true,
},
@@ -2577,10 +2531,13 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
Description: "Number of CPUs to allocate to this compute instance.",
},
"ram": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntAtLeast(constants.MinRamPerCompute),
Description: "Amount of RAM in MB to allocate to this compute instance.",
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.All(
validation.IntAtLeast(constants.MinRamPerCompute),
validators.DivisibleBy(constants.RAMDivisibility),
),
Description: "Amount of RAM in MB to allocate to this compute instance.",
},
"image_id": {
Type: schema.TypeInt,
@@ -2631,7 +2588,7 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
"stack_id": {
Type: schema.TypeInt,
Optional: true,
Default: 0,
Computed: true,
Description: "ID of stack to start compute",
},
"is": {
@@ -2699,9 +2656,8 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
Description: "Set affinity label for compute",
},
"affinity_rules": {
Type: schema.TypeSet,
Type: schema.TypeList,
Optional: true,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"topology": {
@@ -2729,7 +2685,7 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
},
"value": {
Type: schema.TypeString,
Required: true,
Optional: true,
Description: "value that must match the key to be taken into account when analyzing this rule",
},
},
@@ -2738,7 +2694,6 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
"anti_affinity_rules": {
Type: schema.TypeList,
Optional: true,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"topology": {
@@ -2766,7 +2721,7 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
},
"value": {
Type: schema.TypeString,
Required: true,
Optional: true,
Description: "value that must match the key to be taken into account when analyzing this rule",
},
},
@@ -3015,6 +2970,12 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
Default: false,
Description: "Flag for redeploy compute",
},
"force_resize": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "Flag for resize compute",
},
"data_disks": {
Type: schema.TypeString,
Optional: true,

View File

@@ -33,11 +33,13 @@ package kvmvm
import (
"context"
"fmt"
"strconv"
"strings"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
@@ -109,11 +111,18 @@ func utilityComputeResize(ctx context.Context, d *schema.ResourceData, m interfa
resizeReq := compute.ResizeRequest{
ComputeID: computeId,
Force: true,
}
forceResize, ok := d.GetOk("force_resize")
if ok {
resizeReq.Force = forceResize.(bool)
}
doUpdate := false
oldCpu, newCpu := d.GetChange("cpu")
if oldCpu.(int) > newCpu.(int) && !forceResize.(bool) {
return fmt.Errorf("сannot resize compute ID %d: enable 'force_resize' to reduce compute vCPUs", computeId)
}
if oldCpu.(int) != newCpu.(int) {
resizeReq.CPU = uint64(newCpu.(int))
doUpdate = true
@@ -182,25 +191,52 @@ func utilityComputeUpdateDisks(ctx context.Context, d *schema.ResourceData, m in
deletedDisks := make([]interface{}, 0)
addedDisks := make([]interface{}, 0)
updatedDisks := make([]interface{}, 0)
resizedDisks := make([]interface{}, 0)
renamedDisks := make([]interface{}, 0)
// save permanently in disks based on disk_id to context
for _, diskItemInterface := range d.Get("disks").([]interface{}) {
diskItem := diskItemInterface.(map[string]interface{})
diskId := diskItem["disk_id"].(int)
permanently := diskItem["permanently"].(bool)
ctx = context.WithValue(ctx, DiskKey(strconv.Itoa(diskId)), permanently)
}
oldDisks, newDisks := d.GetChange("disks")
oldConv := oldDisks.([]interface{})
newConv := newDisks.([]interface{})
for _, el := range oldConv {
if !isContainsDisk(newConv, el) {
deletedDisks = append(deletedDisks, el)
if !isContainsDisk(newConv, el) && !isRenameDisk(newConv, el) && !isResizeDisk(newConv, el) {
flag := false
extraDisks := d.Get("extra_disks").(*schema.Set).List()
delDisk := el.(map[string]interface{})
delDiskId := delDisk["disk_id"].(int)
for _, extraDiskId := range extraDisks {
if extraDiskId.(int) == delDiskId {
flag = true
break
}
}
if !flag {
deletedDisks = append(deletedDisks, el)
} else {
log.Debugf("disk %d will not be deleted because it is present in the extra_disks block", delDiskId)
}
}
}
for _, el := range newConv {
if !isContainsDisk(oldConv, el) {
addedDisks = append(addedDisks, el)
} else {
if isChangeDisk(oldConv, el) {
updatedDisks = append(updatedDisks, el)
}
}
if isResizeDisk(oldConv, el) {
resizedDisks = append(resizedDisks, el)
}
if isRenameDisk(oldConv, el) {
renamedDisks = append(renamedDisks, el)
}
}
@@ -268,15 +304,17 @@ func utilityComputeUpdateDisks(ctx context.Context, d *schema.ResourceData, m in
if diskConv["image_id"].(int) != 0 {
req.ImageID = uint64(diskConv["image_id"].(int))
}
_, err := c.CloudBroker().Compute().DiskAdd(ctx, req)
diskId, err := c.CloudBroker().Compute().DiskAdd(ctx, req)
if err != nil {
return err
}
ctx = context.WithValue(ctx, DiskKey(strconv.Itoa(int(diskId))), diskConv["permanently"].(bool))
}
}
if len(updatedDisks) > 0 {
for _, disk := range updatedDisks {
if len(resizedDisks) > 0 {
for _, disk := range resizedDisks {
diskConv := disk.(map[string]interface{})
if diskConv["disk_type"].(string) == "B" {
continue
@@ -294,6 +332,22 @@ func utilityComputeUpdateDisks(ctx context.Context, d *schema.ResourceData, m in
}
}
if len(renamedDisks) > 0 {
for _, disk := range renamedDisks {
diskConv := disk.(map[string]interface{})
req := disks.RenameRequest{
DiskID: uint64(diskConv["disk_id"].(int)),
Name: diskConv["disk_name"].(string),
}
_, err := c.CloudBroker().Disks().Rename(ctx, req)
if err != nil {
return err
}
}
}
return nil
}
@@ -567,8 +621,8 @@ func utilityComputeUpdate(ctx context.Context, d *schema.ResourceData, m interfa
if d.HasChange("name") {
req.Name = d.Get("name").(string)
}
if d.HasChange("desc") {
req.Description = d.Get("desc").(string)
if d.HasChange("description") {
req.Description = d.Get("description").(string)
}
if _, err := c.CloudBroker().Compute().Update(ctx, req); err != nil {
@@ -1234,7 +1288,7 @@ func utilityComputeStart(ctx context.Context, computeID uint64, m interface{}) (
return 0, nil
}
func isChangeDisk(els []interface{}, el interface{}) bool {
func isResizeDisk(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})
elConv := el.(map[string]interface{})
@@ -1246,11 +1300,23 @@ func isChangeDisk(els []interface{}, el interface{}) bool {
return false
}
func isRenameDisk(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})
elConv := el.(map[string]interface{})
if elOldConv["disk_id"].(int) == elConv["disk_id"].(int) &&
elOldConv["disk_name"].(string) != elConv["disk_name"].(string) {
return true
}
}
return false
}
func isContainsDisk(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})
elConv := el.(map[string]interface{})
if elOldConv["disk_name"].(string) == elConv["disk_name"].(string) {
if elOldConv["disk_id"].(int) == elConv["disk_id"].(int) {
return true
}
}