From 8da3f8d3480ffbcd8d9da1ae094fda0122e64de9 Mon Sep 17 00:00:00 2001 From: Sergey Shubin svs1370 Date: Mon, 4 Oct 2021 19:55:03 +0300 Subject: [PATCH] Moving from TypeList to TypeSet for network and extra_disks attributes of compute --- decort/data_source_compute.go | 45 ++++++++++++++++++---- decort/disk_subresource.go | 1 + decort/network_subresource.go | 70 +++++++++++++++++++++++++++++++++++ decort/resource_compute.go | 18 ++++----- decort/resource_rg.go | 45 ++++++++++++++++------ decort/resource_vins.go | 4 ++ decort/utility_compute.go | 35 +++++++++++------- 7 files changed, 175 insertions(+), 43 deletions(-) diff --git a/decort/data_source_compute.go b/decort/data_source_compute.go index c7570db..40677bc 100644 --- a/decort/data_source_compute.go +++ b/decort/data_source_compute.go @@ -65,6 +65,8 @@ func parseComputeDisksToExtraDisks(disks []DiskRecord) []interface{} { return result } +// NOTE: this is a legacy function, which is not used as of rc-1.10 +// Use "parseComputeDisksToExtraDisks" instead func parseComputeDisks(disks []DiskRecord) []interface{} { // Return value was designed to d.Set("disks",) item of dataSourceCompute schema // However, this item was excluded from the schema as it is not directly @@ -80,21 +82,20 @@ func parseComputeDisks(disks []DiskRecord) []interface{} { } */ - result := make([]interface{}, length) + result := []interface{}{} if length == 0 { return result } - elem := make(map[string]interface{}) - - for i, value := range disks { + for _, value := range disks { /* if value.Type == "B" { // skip boot disk when parsing the list of disks continue } */ + elem := make(map[string]interface{}) // keys in this map should correspond to the Schema definition // as returned by dataSourceDiskSchemaMake() elem["name"] = value.Name @@ -111,7 +112,8 @@ func parseComputeDisks(disks []DiskRecord) []interface{} { // elem["status"] = value.Status // elem["tech_status"] = value.TechStatus elem["compute_id"] = value.ComputeID - result[i] = elem + + result = append(result, elem) } return result @@ -149,8 +151,32 @@ func parseBootDiskId(disks []DiskRecord) uint { // Parse the list of interfaces from compute/get response into a list of networks // attached to this compute +func parseComputeInterfacesToNetworks(ifaces []InterfaceRecord) []interface{} { + // return value will be used to d.Set("network") item of dataSourceCompute schema + length := len(ifaces) + log.Debugf("parseComputeInterfacesToNetworks: called for %d ifaces", length) + + result := []interface{}{} + + for _, value := range ifaces { + elem := make(map[string]interface{}) + // Keys in this map should correspond to the Schema definition + // as returned by networkSubresourceSchemaMake() + elem["net_id"] = value.NetID + elem["net_type"] = value.NetType + elem["ip_address"] = value.IPAddress + elem["mac"] = value.MAC + + // log.Debugf(" element %d: net_id=%d, net_type=%s", i, value.NetID, value.NetType) + + result = append(result, elem) + } + + return result +} +/* func parseComputeInterfacesToNetworks(ifaces []InterfaceRecord) []map[string]interface{} { - // return value will be used to d.Set("network",) item of dataSourceCompute schema + // return value will be used to d.Set("network") item of dataSourceCompute schema length := len(ifaces) log.Debugf("parseComputeInterfacesToNetworks: called for %d ifaces", length) @@ -172,7 +198,10 @@ func parseComputeInterfacesToNetworks(ifaces []InterfaceRecord) []map[string]int return result } +*/ + +// NOTE: this function is retained for historical purposes and actually not used as of rc-1.10 func parseComputeInterfaces(ifaces []InterfaceRecord) []map[string]interface{} { // return value was designed to d.Set("interfaces",) item of dataSourceCompute schema // However, this item was excluded from the schema as it is not directly @@ -374,7 +403,7 @@ func dataSourceCompute() *schema.Resource { }, "extra_disks": { - Type: schema.TypeList, + Type: schema.TypeSet, Computed: true, MaxItems: MaxExtraDisksPerCompute, Elem: &schema.Schema { @@ -395,7 +424,7 @@ func dataSourceCompute() *schema.Resource { */ "network": { - Type: schema.TypeList, + Type: schema.TypeSet, Optional: true, MaxItems: MaxNetworksPerCompute, Elem: &schema.Resource{ diff --git a/decort/disk_subresource.go b/decort/disk_subresource.go index 7b5372c..cf3a5c5 100644 --- a/decort/disk_subresource.go +++ b/decort/disk_subresource.go @@ -44,6 +44,7 @@ func diskSubresourceSchemaMake() map[string]*schema.Schema { "account_id": { Type: schema.TypeInt, Computed: true, + ValidateFunc: validation.IntAtLeast(1), Description: "ID of the account this disk belongs to.", }, diff --git a/decort/network_subresource.go b/decort/network_subresource.go index 01a7c47..2e8ca3d 100644 --- a/decort/network_subresource.go +++ b/decort/network_subresource.go @@ -21,11 +21,15 @@ import ( // "encoding/json" // "fmt" + "bytes" + "hash/fnv" log "github.com/sirupsen/logrus" // "net/url" + "sort" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + // "github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/hashcode" ) // This is subresource of compute resource used when creating/managing compute network connections @@ -39,6 +43,71 @@ func networkSubresIPAddreDiffSupperss(key, oldVal, newVal string, d *schema.Reso return true // suppress difference } +// This function is based on the original Terraform SerializeResourceForHash found +// in helper/schema/serialize.go +// It skips network subresource attributes, which are irrelevant for identification +// of unique network blocks +func networkSubresourceSerialize(output *bytes.Buffer, val interface{}, resource *schema.Resource) { + if val == nil { + return + } + + rs := resource.Schema + m := val.(map[string]interface{}) + + var keys []string + allComputed := true + + for k, val := range rs { + if val.Optional || val.Required { + allComputed = false + } + + keys = append(keys, k) + } + + sort.Strings(keys) + for _, k := range keys { + // explicitly ignore "ip_address" when hashing + if k == "ip_address" { + continue + } + + subSchema := rs[k] + // Skip attributes that are not user-provided. Computed attributes + // do not contribute to the hash since their ultimate value cannot + // be known at plan/diff time. + if !allComputed && !(subSchema.Required || subSchema.Optional) { + continue + } + + output.WriteString(k) + output.WriteRune(':') + value := m[k] + schema.SerializeValueForHash(output, value, subSchema) + } +} + +// HashNetworkSubresource hashes network subresource of compute resource. It uses +// specially designed networkSubresourceSerialize (see above) to make sure hashing +// does not involve attributes that we deem irrelevant to the uniqueness of network +// subresource definitions. +// It is this function that should be specified as SchemaSetFunc when creating Set +// from network subresource (e.g. in flattenCompute) +// +// This function is based on the original Terraform function HashResource from +// helper/schema/set.go +func HashNetworkSubresource(resource *schema.Resource) schema.SchemaSetFunc { + return func(v interface{}) int { + var serialized bytes.Buffer + networkSubresourceSerialize(&serialized, v, resource) + + hs := fnv.New32a() + hs.Write(serialized.Bytes()) + return int(hs.Sum32()) + } +} + func networkSubresourceSchemaMake() map[string]*schema.Schema { rets := map[string]*schema.Schema{ "net_type": { @@ -58,6 +127,7 @@ func networkSubresourceSchemaMake() map[string]*schema.Schema { "ip_address": { Type: schema.TypeString, Optional: true, + Computed: true, DiffSuppressFunc: networkSubresIPAddreDiffSupperss, Description: "Optional IP address to assign to this connection. This IP should belong to the selected network and free for use.", }, diff --git a/decort/resource_compute.go b/decort/resource_compute.go index 050e9bc..3d4c7cc 100644 --- a/decort/resource_compute.go +++ b/decort/resource_compute.go @@ -123,12 +123,12 @@ func resourceComputeCreate(d *schema.ResourceData, m interface{}) error { // Configure data disks if any extraDisksOk := true argVal, argSet = d.GetOk("extra_disks") - if argSet && len(argVal.([]interface{})) > 0 { + if argSet && argVal.(*schema.Set).Len() > 0 { // urlValues.Add("desc", argVal.(string)) - log.Debugf("resourceComputeCreate: calling utilityComputeExtraDisksConfigure to attach %d extra disk(s)", len(argVal.([]interface{}))) + log.Debugf("resourceComputeCreate: calling utilityComputeExtraDisksConfigure to attach %d extra disk(s)", argVal.(*schema.Set).Len()) err = controller.utilityComputeExtraDisksConfigure(d, false) // do_delta=false, as we are working on a new compute if err != nil { - log.Errorf("resourceComputeCreate: error when attaching extra disks to a new Compute ID %s: %s", compId, err) + log.Errorf("resourceComputeCreate: error when attaching extra disk(s) to a new Compute ID %s: %s", compId, err) extraDisksOk = false } } @@ -139,8 +139,8 @@ func resourceComputeCreate(d *schema.ResourceData, m interface{}) error { // Configure external networks if any netsOk := true argVal, argSet = d.GetOk("network") - if argSet && len(argVal.([]interface{})) > 0 { - log.Debugf("resourceComputeCreate: calling utilityComputeNetworksConfigure to attach %d network(s)", len(argVal.([]interface{}))) + if argSet && argVal.(*schema.Set).Len() > 0 { + log.Debugf("resourceComputeCreate: calling utilityComputeNetworksConfigure to attach %d network(s)", argVal.(*schema.Set).Len()) err = controller.utilityComputeNetworksConfigure(d, false) // do_delta=false, as we are working on a new compute if err != nil { log.Errorf("resourceComputeCreate: error when attaching networks to a new Compute ID %d: %s", compId, err) @@ -398,12 +398,12 @@ func resourceCompute() *schema.Resource { "boot_disk_size": { Type: schema.TypeInt, - Optional: true, - Description: "This compute instance boot disk size in GB.", + Required: true, + Description: "This compute instance boot disk size in GB. Make sure it is large enough to accomodate selected OS image.", }, "extra_disks": { - Type: schema.TypeList, + Type: schema.TypeSet, Optional: true, MaxItems: MaxExtraDisksPerCompute, Elem: &schema.Schema{ @@ -413,7 +413,7 @@ func resourceCompute() *schema.Resource { }, "network": { - Type: schema.TypeList, + Type: schema.TypeSet, Optional: true, MaxItems: MaxNetworksPerCompute, Elem: &schema.Resource{ diff --git a/decort/resource_rg.go b/decort/resource_rg.go index c59730d..5935a44 100644 --- a/decort/resource_rg.go +++ b/decort/resource_rg.go @@ -146,7 +146,28 @@ func resourceResgroupUpdate(d *schema.ResourceData, m interface{}) error { log.Debugf("resourceResgroupUpdate: called for RG name %s, account ID %d", d.Get("name").(string), d.Get("account_id").(int)) - do_update := false + /* NOTE: we do not allow changing the following attributes of an existing RG via terraform: + - def_net_type + - ipcidr + - ext_net_id + - ext_ip + + The following code fragment checks if any of these have been changed and generates error. + */ + for _, attr := range []string{"def_net_type", "ipcidr", "ext_ip"} { + attr_new, attr_old := d.GetChange("def_net_type") + if attr_new.(string) != attr_old.(string) { + return fmt.Errorf("resourceResgroupUpdate: RG ID %s: changing %s for existing RG is not allowed", d.Id(), attr) + } + } + + attr_new, attr_old := d.GetChange("ext_net_id") + if attr_new.(int) != attr_old.(int) { + return fmt.Errorf("resourceResgroupUpdate: RG ID %s: changing ext_net_id for existing RG is not allowed", d.Id()) + } + + + do_general_update := false // will be true if general RG update is necessary (API rg/update) controller := m.(*ControllerCfg) url_values := &url.Values{} @@ -157,7 +178,7 @@ func resourceResgroupUpdate(d *schema.ResourceData, m interface{}) error { log.Debugf("resourceResgroupUpdate: name specified - looking for deltas from the old settings.") name_old, _ := d.GetChange("name") if name_old.(string) != name_new.(string) { - do_update = true + do_general_update = true url_values.Add("name", name_new.(string)) } } @@ -170,31 +191,31 @@ func resourceResgroupUpdate(d *schema.ResourceData, m interface{}) error { quotarecord_old, _ := makeQuotaRecord(quota_value_old.([]interface{})) if quotarecord_new.Cpu != quotarecord_old.Cpu { - do_update = true + do_general_update = true log.Debugf("resourceResgroupUpdate: Cpu diff %d <- %d", quotarecord_new.Cpu, quotarecord_old.Cpu) url_values.Add("maxCPUCapacity", fmt.Sprintf("%d", quotarecord_new.Cpu)) } if quotarecord_new.Disk != quotarecord_old.Disk { - do_update = true + do_general_update = true log.Debugf("resourceResgroupUpdate: Disk diff %d <- %d", quotarecord_new.Disk, quotarecord_old.Disk) url_values.Add("maxVDiskCapacity", fmt.Sprintf("%d", quotarecord_new.Disk)) } if quotarecord_new.Ram != quotarecord_old.Ram { // NB: quota on RAM is stored as float32, in units of MB - do_update = true + do_general_update = true log.Debugf("resourceResgroupUpdate: Ram diff %f <- %f", quotarecord_new.Ram, quotarecord_old.Ram) url_values.Add("maxMemoryCapacity", fmt.Sprintf("%f", quotarecord_new.Ram)) } if quotarecord_new.ExtTraffic != quotarecord_old.ExtTraffic { - do_update = true + do_general_update = true log.Debugf("resourceResgroupUpdate: ExtTraffic diff %d <- %d", quotarecord_new.ExtTraffic, quotarecord_old.ExtTraffic) url_values.Add("maxNetworkPeerTransfer", fmt.Sprintf("%d", quotarecord_new.ExtTraffic)) } if quotarecord_new.ExtIPs != quotarecord_old.ExtIPs { - do_update = true + do_general_update = true log.Debugf("resourceResgroupUpdate: ExtIPs diff %d <- %d", quotarecord_new.ExtIPs, quotarecord_old.ExtIPs) url_values.Add("maxNumPublicIP", fmt.Sprintf("%d", quotarecord_new.ExtIPs)) } @@ -205,12 +226,12 @@ func resourceResgroupUpdate(d *schema.ResourceData, m interface{}) error { log.Debugf("resourceResgroupUpdate: description specified - looking for deltas from the old settings.") desc_old, _ := d.GetChange("description") if desc_old.(string) != desc_new.(string) { - do_update = true + do_general_update = true url_values.Add("desc", desc_new.(string)) } } - if do_update { + if do_general_update { log.Debugf("resourceResgroupUpdate: detected delta between new and old RG specs - updating the RG") _, err := controller.decortAPICall("POST", ResgroupUpdateAPI, url_values) if err != nil { @@ -303,7 +324,7 @@ func resourceResgroup() *schema.Resource { Type: schema.TypeString, Optional: true, Default: "PRIVATE", - // ValidateFunc: validation.StringInSlice([]string{"PRIVATE", "PUBLIC", "NONE"}, false), + ValidateFunc: validation.StringInSlice([]string{"PRIVATE", "PUBLIC", "NONE"}, false), Description: "Type of the network, which this resource group will use as default for its computes - PRIVATE or PUBLIC or NONE.", }, @@ -323,13 +344,13 @@ func resourceResgroup() *schema.Resource { Type: schema.TypeInt, Optional: true, Default: 0, - Description: "ID of the external network, which this resource group will use as default for its computes if def_net_type=PUBLIC", + Description: "ID of the external network for default ViNS. Pass 0 if def_net_type=PUBLIC or no external connection required for the defult ViNS when def_net_type=PRIVATE", }, "ext_ip": { Type: schema.TypeString, Optional: true, - Description: "IP address on the external netowrk to request, if def_net_type=PUBLIC", + Description: "IP address on the external netowrk to request when def_net_type=PRIVATE and ext_net_id is not 0", }, /* commented out, as in this version of provider we use default Grid ID diff --git a/decort/resource_vins.go b/decort/resource_vins.go index 09fbd08..caa9b58 100644 --- a/decort/resource_vins.go +++ b/decort/resource_vins.go @@ -33,6 +33,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" // "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) @@ -223,6 +224,7 @@ func resourceVinsSchemaMake() map[string]*schema.Schema { "name": { Type: schema.TypeString, Required: true, + ValidateFunc: validation.StringIsNotEmpty, Description: "Name of the ViNS. Names are case sensitive and unique within the context of an account or resource group.", }, @@ -247,12 +249,14 @@ func resourceVinsSchemaMake() map[string]*schema.Schema { Type: schema.TypeInt, Required: true, ForceNew: true, + ValidateFunc: validation.IntAtLeast(1), Description: "ID of the account, which this ViNS belongs to. For ViNS created at account level, resource group ID is 0.", }, "ext_net_id": { Type: schema.TypeInt, Required: true, + ValidateFunc: validation.IntAtLeast(0), Description: "ID of the external network this ViNS is connected to. Pass 0 if no external connection required.", }, diff --git a/decort/utility_compute.go b/decort/utility_compute.go index 34fbea5..0ff544d 100644 --- a/decort/utility_compute.go +++ b/decort/utility_compute.go @@ -45,8 +45,10 @@ func (ctrl *ControllerCfg) utilityComputeExtraDisksConfigure(d *schema.ResourceD // disks via atomic API calls. However, it will not retry failed manipulation on the same disk. log.Debugf("utilityComputeExtraDisksConfigure: called for Compute ID %s with do_delta = %b", d.Id(), do_delta) + // NB: as of rc-1.25 "extra_disks" are TypeSet with the elem of TypeInt old_set, new_set := d.GetChange("extra_disks") + /* old_disks := make([]interface{},0,0) if old_set != nil { old_disks = old_set.([]interface{}) @@ -56,16 +58,17 @@ func (ctrl *ControllerCfg) utilityComputeExtraDisksConfigure(d *schema.ResourceD if new_set != nil { new_disks = new_set.([]interface{}) } + */ apiErrCount := 0 var lastSavedError error if !do_delta { - if len(new_disks) < 1 { + if new_set.(*schema.Set).Len() < 1 { return nil } - for _, disk := range new_disks { + for _, disk := range new_set.(*schema.Set).List() { urlValues := &url.Values{} urlValues.Add("computeId", d.Id()) urlValues.Add("diskId", fmt.Sprintf("%d", disk.(int))) @@ -86,9 +89,10 @@ func (ctrl *ControllerCfg) utilityComputeExtraDisksConfigure(d *schema.ResourceD return nil } + detach_set := old_set.(*schema.Set).Difference(new_set.(*schema.Set)) + /* var attach_list, detach_list []int match := false - for _, oDisk := range old_disks { match = false for _, nDisk := range new_disks { @@ -101,8 +105,10 @@ func (ctrl *ControllerCfg) utilityComputeExtraDisksConfigure(d *schema.ResourceD detach_list = append(detach_list, oDisk.(int)) } } - log.Debugf("utilityComputeExtraDisksConfigure: detach list has %d items for Compute ID %s", len(detach_list), d.Id()) + */ + log.Debugf("utilityComputeExtraDisksConfigure: detach set has %d items for Compute ID %s", detach_set.Len(), d.Id()) + /* for _, nDisk := range new_disks { match = false for _, oDisk := range old_disks { @@ -115,29 +121,31 @@ func (ctrl *ControllerCfg) utilityComputeExtraDisksConfigure(d *schema.ResourceD attach_list = append(attach_list, nDisk.(int)) } } - log.Debugf("utilityComputeExtraDisksConfigure: attach list has %d items for Compute ID %s", len(attach_list), d.Id()) + */ + attach_set := new_set.(*schema.Set).Difference(old_set.(*schema.Set)) + log.Debugf("utilityComputeExtraDisksConfigure: attach set has %d items for Compute ID %s", attach_set.Len(), d.Id()) - for _, diskId := range detach_list { + for _, diskId := range detach_set.List() { urlValues := &url.Values{} urlValues.Add("computeId", d.Id()) - urlValues.Add("diskId", fmt.Sprintf("%d", diskId)) + urlValues.Add("diskId", fmt.Sprintf("%d", diskId.(int))) _, err := ctrl.decortAPICall("POST", ComputeDiskDetachAPI, urlValues) if err != nil { // failed to detach disk - there will be partial resource update - log.Debugf("utilityComputeExtraDisksConfigure: failed to detach disk ID %d from Compute ID %s: %s", diskId, d.Id(), err) + log.Debugf("utilityComputeExtraDisksConfigure: failed to detach disk ID %d from Compute ID %s: %s", diskId.(int), d.Id(), err) apiErrCount++ lastSavedError = err } } - for _, diskId := range attach_list { + for _, diskId := range attach_set.List() { urlValues := &url.Values{} urlValues.Add("computeId", d.Id()) - urlValues.Add("diskId", fmt.Sprintf("%d", diskId)) + urlValues.Add("diskId", fmt.Sprintf("%d", diskId.(int))) _, err := ctrl.decortAPICall("POST", ComputeDiskAttachAPI, urlValues) if err != nil { // failed to attach disk - there will be partial resource update - log.Debugf("utilityComputeExtraDisksConfigure: failed to attach disk ID %d to Compute ID %s: %s", diskId, d.Id(), err) + log.Debugf("utilityComputeExtraDisksConfigure: failed to attach disk ID %d to Compute ID %s: %s", diskId.(int), d.Id(), err) apiErrCount++ lastSavedError = err } @@ -152,7 +160,6 @@ func (ctrl *ControllerCfg) utilityComputeExtraDisksConfigure(d *schema.ResourceD return nil } -// TODO: implement do_delta logic func (ctrl *ControllerCfg) utilityComputeNetworksConfigure(d *schema.ResourceData, do_delta bool) error { // "d" is filled with data according to computeResource schema, so extra networks config is retrieved via "network" key // If do_delta is true, this function will identify changes between new and existing specs for network and try to @@ -170,12 +177,12 @@ func (ctrl *ControllerCfg) utilityComputeNetworksConfigure(d *schema.ResourceDat oldNets := make([]interface{},0,0) if old_set != nil { - oldNets = old_set.([]interface{}) // network is ar array of maps; for keys see func networkSubresourceSchemaMake() definition + oldNets = old_set.(*schema.Set).List() // network set is ar array of maps; for keys see func networkSubresourceSchemaMake() definition } newNets := make([]interface{},0,0) if new_set != nil { - newNets = new_set.([]interface{}) // network is ar array of maps; for keys see func networkSubresourceSchemaMake() definition + newNets = new_set.(*schema.Set).List() // network set is ar array of maps; for keys see func networkSubresourceSchemaMake() definition } apiErrCount := 0