4.5.1
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
@@ -33,12 +33,8 @@ package kvmvm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
// "net/url"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
@@ -46,121 +42,6 @@ import (
|
||||
// "github.com/hashicorp/terraform-plugin-sdk/helper/validation"
|
||||
)
|
||||
|
||||
// Parse list of all disks from API compute/get into a list of "extra disks" attached to this compute
|
||||
// Extra disks are all compute disks but a boot disk.
|
||||
func parseComputeDisksToExtraDisks(disks compute.ListDisks) []interface{} {
|
||||
// this return value will be used to d.Set("extra_disks",) item of dataSourceCompute schema,
|
||||
// which is a simple list of integer disk IDs excluding boot disk ID
|
||||
length := len(disks)
|
||||
log.Debugf("parseComputeDisksToExtraDisks: called for %d disks", length)
|
||||
|
||||
if length == 0 || (length == 1 && disks[0].Type == "B") {
|
||||
// the disk list is empty (which is kind of strange - diskless compute?), or
|
||||
// there is only one disk in the list and it is a boot disk;
|
||||
// as we skip boot disks, the result will be of 0 length anyway
|
||||
return make([]interface{}, 0)
|
||||
}
|
||||
|
||||
result := make([]interface{}, length-1)
|
||||
idx := 0
|
||||
for _, value := range disks {
|
||||
if value.Type == "B" {
|
||||
// skip boot disk when iterating over the list of disks
|
||||
continue
|
||||
}
|
||||
|
||||
result[idx] = value.ID
|
||||
idx++
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Parse the list of interfaces from compute/get response into a list of networks
|
||||
// attached to this compute
|
||||
func parseComputeInterfacesToNetworks(ifaces compute.ListInterfaces) []interface{} {
|
||||
// return value will be used to d.Set("network") item of dataSourceCompute schema
|
||||
length := len(ifaces)
|
||||
log.Debugf("parseComputeInterfacesToNetworks: called for %d ifaces", length)
|
||||
|
||||
result := []interface{}{}
|
||||
|
||||
for _, value := range ifaces {
|
||||
elem := make(map[string]interface{})
|
||||
// Keys in this map should correspond to the Schema definition
|
||||
// as returned by networkSubresourceSchemaMake()
|
||||
elem["net_id"] = value.NetID
|
||||
elem["net_type"] = value.NetType
|
||||
elem["ip_address"] = value.IPAddress
|
||||
elem["mac"] = value.MAC
|
||||
|
||||
// log.Debugf(" element %d: net_id=%d, net_type=%s", i, value.NetID, value.NetType)
|
||||
|
||||
result = append(result, elem)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func flattenDataCompute(d *schema.ResourceData, compFacts *compute.RecordCompute) error {
|
||||
// This function expects that compFacts string contains response from API compute/get,
|
||||
// i.e. detailed information about compute instance.
|
||||
//
|
||||
// NOTE: this function modifies ResourceData argument - as such it should never be called
|
||||
// from resourceComputeExists(...) method
|
||||
|
||||
log.Debugf("flattenCompute: ID %d, RG ID %d", compFacts.ID, compFacts.RGID)
|
||||
|
||||
d.SetId(fmt.Sprintf("%d", compFacts.ID))
|
||||
d.Set("name", compFacts.Name)
|
||||
d.Set("rg_id", compFacts.RGID)
|
||||
d.Set("rg_name", compFacts.RGName)
|
||||
d.Set("account_id", compFacts.AccountID)
|
||||
d.Set("account_name", compFacts.AccountName)
|
||||
d.Set("driver", compFacts.Driver)
|
||||
d.Set("cpu", compFacts.CPUs)
|
||||
d.Set("ram", compFacts.RAM)
|
||||
d.Set("image_id", compFacts.ImageID)
|
||||
d.Set("description", compFacts.Description)
|
||||
d.Set("cloud_init", "applied")
|
||||
|
||||
if compFacts.TechStatus == "STARTED" {
|
||||
d.Set("started", true)
|
||||
} else {
|
||||
d.Set("started", false)
|
||||
}
|
||||
|
||||
bootDisk := findBootDisk(compFacts.Disks)
|
||||
|
||||
d.Set("boot_disk_size", bootDisk.SizeMax)
|
||||
d.Set("boot_disk_id", bootDisk.ID) // we may need boot disk ID in resize operations
|
||||
d.Set("sep_id", bootDisk.SEPID)
|
||||
d.Set("pool", bootDisk.Pool)
|
||||
|
||||
if len(compFacts.Disks) > 0 {
|
||||
log.Debugf("flattenCompute: calling parseComputeDisksToExtraDisks for %d disks", len(compFacts.Disks))
|
||||
if err := d.Set("extra_disks", parseComputeDisksToExtraDisks(compFacts.Disks)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(compFacts.Interfaces) > 0 {
|
||||
log.Debugf("flattenCompute: calling parseComputeInterfacesToNetworks for %d interfaces", len(compFacts.Interfaces))
|
||||
if err := d.Set("network", parseComputeInterfacesToNetworks(compFacts.Interfaces)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(compFacts.OSUsers) > 0 {
|
||||
log.Debugf("flattenCompute: calling parseOsUsers for %d logins", len(compFacts.OSUsers))
|
||||
if err := d.Set("os_users", parseOsUsers(compFacts.OSUsers)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func dataSourceComputeRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
compFacts, err := utilityComputeCheckPresence(ctx, d, m)
|
||||
if compFacts == nil {
|
||||
@@ -186,143 +67,6 @@ func DataSourceCompute() *schema.Resource {
|
||||
Default: &constants.Timeout60s,
|
||||
},
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "Name of this compute instance. NOTE: this parameter is case sensitive.",
|
||||
},
|
||||
|
||||
// TODO: consider removing compute_id from the schema, as it not practical to call this data provider if
|
||||
// corresponding compute ID is already known
|
||||
"compute_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "ID of the compute instance. If ID is specified, name and resource group ID are ignored.",
|
||||
},
|
||||
|
||||
"rg_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "ID of the resource group where this compute instance is located.",
|
||||
},
|
||||
|
||||
"rg_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "Name of the resource group where this compute instance is located.",
|
||||
},
|
||||
|
||||
"account_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "ID of the account this compute instance belongs to.",
|
||||
},
|
||||
|
||||
"account_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "Name of the account this compute instance belongs to.",
|
||||
},
|
||||
|
||||
"driver": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "Hardware architecture of this compute instance.",
|
||||
},
|
||||
|
||||
"cpu": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "Number of CPUs allocated for this compute instance.",
|
||||
},
|
||||
|
||||
"ram": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "Amount of RAM in MB allocated for this compute instance.",
|
||||
},
|
||||
|
||||
"image_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "ID of the OS image this compute instance is based on.",
|
||||
},
|
||||
|
||||
"image_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "Name of the OS image this compute instance is based on.",
|
||||
},
|
||||
"boot_disk_size": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "This compute instance boot disk size in GB.",
|
||||
},
|
||||
|
||||
"boot_disk_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Description: "This compute instance boot disk ID.",
|
||||
},
|
||||
|
||||
"extra_disks": {
|
||||
Type: schema.TypeSet,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
},
|
||||
Description: "IDs of the extra disk(s) attached to this compute.",
|
||||
},
|
||||
|
||||
/*
|
||||
"disks": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: dataSourceDiskSchemaMake(), // ID, type, name, size, account ID, SEP ID, SEP type, pool, status, tech status, compute ID, image ID
|
||||
},
|
||||
Description: "Detailed specification for all disks attached to this compute instance (including bood disk).",
|
||||
},
|
||||
*/
|
||||
|
||||
// "network": {
|
||||
// Type: schema.TypeSet,
|
||||
// Optional: true,
|
||||
// MaxItems: constants.MaxNetworksPerCompute,
|
||||
// Elem: &schema.Resource{
|
||||
// Schema: networkSubresourceSchemaMake(),
|
||||
// },
|
||||
// Description: "Network connection(s) for this compute.",
|
||||
// },
|
||||
|
||||
"os_users": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: osUsersSubresourceSchemaMake(),
|
||||
},
|
||||
Description: "Guest OS users provisioned on this compute instance.",
|
||||
},
|
||||
|
||||
"description": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "User-defined text description of this compute instance.",
|
||||
},
|
||||
|
||||
"cloud_init": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Description: "Placeholder for cloud_init parameters.",
|
||||
},
|
||||
|
||||
"started": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: true,
|
||||
Description: "Is compute started.",
|
||||
},
|
||||
},
|
||||
Schema: dataSourceComputeSchemaMake(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,11 +2,14 @@ package kvmvm
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens"
|
||||
)
|
||||
|
||||
func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute) error {
|
||||
@@ -14,18 +17,35 @@ func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute) e
|
||||
|
||||
customFields, _ := json.Marshal(computeRec.CustomFields)
|
||||
devices, _ := json.Marshal(computeRec.Devices)
|
||||
|
||||
userData, _ := json.Marshal(computeRec.Userdata)
|
||||
bootDisk := findBootDisk(computeRec.Disks)
|
||||
|
||||
//extra fields setting
|
||||
if len(computeRec.Disks) > 0 {
|
||||
log.Debugf("flattenCompute: calling parseComputeDisksToExtraDisks for %d disks", len(computeRec.Disks))
|
||||
if err := d.Set("extra_disks", parseComputeDisksToExtraDisks(computeRec.Disks)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(computeRec.Interfaces) > 0 {
|
||||
log.Debugf("flattenCompute: calling parseComputeInterfacesToNetworks for %d interfaces", len(computeRec.Interfaces))
|
||||
if err := d.Set("network", parseComputeInterfacesToNetworks(computeRec.Interfaces)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
d.Set("account_id", computeRec.AccountID)
|
||||
d.Set("account_name", computeRec.AccountName)
|
||||
d.Set("acl", flattenListACLInterface(computeRec.ACL))
|
||||
d.Set("affinity_label", computeRec.AffinityLabel)
|
||||
d.Set("affinity_weight", computeRec.AffinityWeight)
|
||||
d.Set("affinity_rules", flattenAffinityRules(computeRec.AffinityRules))
|
||||
d.Set("anti_affinity_rules", flattenAffinityRules(computeRec.AntiAffinityRules))
|
||||
d.Set("arch", computeRec.Arch)
|
||||
d.Set("boot_order", computeRec.BootOrder)
|
||||
d.Set("boot_disk_id", bootDisk.ID)
|
||||
d.Set("boot_disk_size", computeRec.BootDiskSize)
|
||||
d.Set("cd_image_id", computeRec.CdImageId)
|
||||
d.Set("clone_reference", computeRec.CloneReference)
|
||||
d.Set("clones", computeRec.Clones)
|
||||
d.Set("computeci_id", computeRec.ComputeCIID)
|
||||
@@ -36,13 +56,11 @@ func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute) e
|
||||
d.Set("deleted_time", computeRec.DeletedTime)
|
||||
d.Set("description", computeRec.Description)
|
||||
d.Set("devices", string(devices))
|
||||
d.Set("disks",
|
||||
flattenComputeDisks(
|
||||
computeRec.Disks,
|
||||
d.Get("extra_disks").(*schema.Set).List(),
|
||||
bootDisk.ID,
|
||||
),
|
||||
)
|
||||
err := d.Set("disks", flattenComputeDisks(computeRec.Disks, d.Get("extra_disks").(*schema.Set).List(), bootDisk.ID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.Set("driver", computeRec.Driver)
|
||||
d.Set("gid", computeRec.GID)
|
||||
d.Set("guid", computeRec.GUID)
|
||||
d.Set("compute_id", computeRec.ID)
|
||||
@@ -53,6 +71,7 @@ func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute) e
|
||||
d.Set("manager_type", computeRec.ManagerType)
|
||||
d.Set("migrationjob", computeRec.MigrationJob)
|
||||
d.Set("milestones", computeRec.Milestones)
|
||||
d.Set("need_reboot", computeRec.NeedReboot)
|
||||
d.Set("os_users", flattenOSUsers(computeRec.OSUsers))
|
||||
d.Set("pinned", computeRec.Pinned)
|
||||
d.Set("reference_id", computeRec.ReferenceID)
|
||||
@@ -69,6 +88,7 @@ func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute) e
|
||||
d.Set("tech_status", computeRec.TechStatus)
|
||||
d.Set("updated_by", computeRec.UpdatedBy)
|
||||
d.Set("updated_time", computeRec.UpdatedTime)
|
||||
d.Set("user_data", string(userData))
|
||||
d.Set("user_managed", computeRec.UserManaged)
|
||||
d.Set("vgpus", computeRec.VGPUs)
|
||||
d.Set("virtual_image_id", computeRec.VirtualImageID)
|
||||
@@ -175,6 +195,7 @@ func flattenComputeDisks(disksList compute.ListDisks, extraDisks []interface{},
|
||||
"disk_id": disk.ID,
|
||||
"shareable": disk.Shareable,
|
||||
"size_used": disk.SizeUsed,
|
||||
"size_max": disk.SizeMax,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
@@ -193,15 +214,6 @@ func findInExtraDisks(diskId uint, extraDisks []interface{}) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func findBootDisk(disks compute.ListDisks) *compute.ItemDisk {
|
||||
for _, disk := range disks {
|
||||
if disk.Type == "B" {
|
||||
return &disk
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func flattenAffinityRules(rules compute.ListRules) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0, len(rules))
|
||||
|
||||
@@ -217,3 +229,541 @@ func flattenAffinityRules(rules compute.ListRules) []map[string]interface{} {
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenComputeList(computes *compute.ListComputes) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0, len(computes.Data))
|
||||
for _, computeItem := range computes.Data {
|
||||
customFields, _ := json.Marshal(computeItem.CustomFields)
|
||||
devices, _ := json.Marshal(computeItem.Devices)
|
||||
userData, _ := json.Marshal(computeItem.Userdata)
|
||||
temp := map[string]interface{}{
|
||||
"acl": flattenListACLInterface(computeItem.ACL),
|
||||
"account_id": computeItem.AccountID,
|
||||
"account_name": computeItem.AccountName,
|
||||
"affinity_label": computeItem.AffinityLabel,
|
||||
"affinity_rules": flattenListRules(computeItem.AffinityRules),
|
||||
"affinity_weight": computeItem.AffinityWeight,
|
||||
"anti_affinity_rules": flattenListRules(computeItem.AntiAffinityRules),
|
||||
"arch": computeItem.Arch,
|
||||
"cd_image_id": computeItem.CdImageId,
|
||||
"boot_order": computeItem.BootOrder,
|
||||
"bootdisk_size": computeItem.BootDiskSize,
|
||||
"clone_reference": computeItem.CloneReference,
|
||||
"clones": computeItem.Clones,
|
||||
"computeci_id": computeItem.ComputeCIID,
|
||||
"cpus": computeItem.CPUs,
|
||||
"created_by": computeItem.CreatedBy,
|
||||
"created_time": computeItem.CreatedTime,
|
||||
"custom_fields": string(customFields),
|
||||
"deleted_by": computeItem.DeletedBy,
|
||||
"deleted_time": computeItem.DeletedTime,
|
||||
"desc": computeItem.Description,
|
||||
"devices": string(devices),
|
||||
"disks": flattenDisks(computeItem.Disks),
|
||||
"driver": computeItem.Driver,
|
||||
"gid": computeItem.GID,
|
||||
"guid": computeItem.GUID,
|
||||
"compute_id": computeItem.ID,
|
||||
"image_id": computeItem.ImageID,
|
||||
"interfaces": flattenInterfaces(computeItem.Interfaces),
|
||||
"lock_status": computeItem.LockStatus,
|
||||
"manager_id": computeItem.ManagerID,
|
||||
"manager_type": computeItem.ManagerType,
|
||||
"migrationjob": computeItem.MigrationJob,
|
||||
"milestones": computeItem.Milestones,
|
||||
"name": computeItem.Name,
|
||||
"need_reboot": computeItem.NeedReboot,
|
||||
"os_users": flattenOSUsers(computeItem.OSUsers),
|
||||
"pinned": computeItem.Pinned,
|
||||
"ram": computeItem.RAM,
|
||||
"reference_id": computeItem.ReferenceID,
|
||||
"registered": computeItem.Registered,
|
||||
"res_name": computeItem.ResName,
|
||||
"rg_id": computeItem.RGID,
|
||||
"rg_name": computeItem.RGName,
|
||||
"snap_sets": flattenSnapSets(computeItem.SnapSets),
|
||||
"stack_id": computeItem.StackID,
|
||||
"stateless_sep_id": computeItem.StatelessSEPID,
|
||||
"stateless_sep_type": computeItem.StatelessSEPType,
|
||||
"status": computeItem.Status,
|
||||
"tags": flattenTags(computeItem.Tags),
|
||||
"tech_status": computeItem.TechStatus,
|
||||
"total_disk_size": computeItem.TotalDiskSize,
|
||||
"updated_by": computeItem.UpdatedBy,
|
||||
"updated_time": computeItem.UpdatedTime,
|
||||
"user_data": string(userData),
|
||||
"user_managed": computeItem.UserManaged,
|
||||
"vgpus": computeItem.VGPUs,
|
||||
"vins_connected": computeItem.VINSConnected,
|
||||
"virtual_image_id": computeItem.VirtualImageID,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenListACLInterface(listAcl []interface{}) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0, len(listAcl))
|
||||
for _, aclInterface := range listAcl {
|
||||
acl := aclInterface.(map[string]interface{})
|
||||
temp := map[string]interface{}{
|
||||
"explicit": acl["explicit"],
|
||||
"guid": acl["guid"],
|
||||
"right": acl["right"],
|
||||
"status": acl["status"],
|
||||
"type": acl["type"],
|
||||
"user_group_id": acl["user_group_id"],
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenListACL(listAcl compute.ListACL) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0, len(listAcl))
|
||||
for _, acl := range listAcl {
|
||||
temp := map[string]interface{}{
|
||||
"explicit": acl.Explicit,
|
||||
"guid": acl.GUID,
|
||||
"right": acl.Right,
|
||||
"status": acl.Status,
|
||||
"type": acl.Type,
|
||||
"user_group_id": acl.UserGroupID,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenListComputeACL(listAcl []compute.ItemComputeACL) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0, len(listAcl))
|
||||
for _, acl := range listAcl {
|
||||
temp := map[string]interface{}{
|
||||
"explicit": acl.Explicit,
|
||||
"guid": acl.GUID,
|
||||
"right": acl.Right,
|
||||
"status": acl.Status,
|
||||
"type": acl.Type,
|
||||
"user_group_id": acl.UserGroupID,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenListRules(listRules compute.ListRules) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0, len(listRules))
|
||||
for _, rule := range listRules {
|
||||
temp := map[string]interface{}{
|
||||
"guid": rule.GUID,
|
||||
"key": rule.Key,
|
||||
"mode": rule.Mode,
|
||||
"policy": rule.Policy,
|
||||
"topology": rule.Topology,
|
||||
"value": rule.Value,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenDisks(disks []compute.InfoDisk) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, disk := range disks {
|
||||
temp := map[string]interface{}{
|
||||
"disk_id": disk.ID,
|
||||
"pci_slot": disk.PCISlot,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenComputeAudits(computeAudits compute.ListDetailedAudits) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0, len(computeAudits))
|
||||
for _, computeAudit := range computeAudits {
|
||||
temp := map[string]interface{}{
|
||||
"call": computeAudit.Call,
|
||||
"responsetime": computeAudit.ResponseTime,
|
||||
"statuscode": computeAudit.StatusCode,
|
||||
"timestamp": computeAudit.Timestamp,
|
||||
"user": computeAudit.User,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenComputeGetAudits(computeAudits compute.ListAudits) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0, len(computeAudits))
|
||||
for _, computeAudit := range computeAudits {
|
||||
temp := map[string]interface{}{
|
||||
"epoch": computeAudit.Epoch,
|
||||
"message": computeAudit.Message,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenPfwList(computePfws *compute.ListPFW) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0, len(computePfws.Data))
|
||||
for _, computePfw := range computePfws.Data {
|
||||
temp := map[string]interface{}{
|
||||
"pfw_id": computePfw.ID,
|
||||
"local_ip": computePfw.LocalIP,
|
||||
"local_port": computePfw.LocalPort,
|
||||
"protocol": computePfw.Protocol,
|
||||
"public_port_end": computePfw.PublicPortEnd,
|
||||
"public_port_start": computePfw.PublicPortStart,
|
||||
"vm_id": computePfw.VMID,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenUserList(d *schema.ResourceData, userList *compute.ListUsers) {
|
||||
d.Set("account_acl", flattenListACL(userList.Data.AccountACL))
|
||||
d.Set("compute_acl", flattenListComputeACL(userList.Data.ComputeACL))
|
||||
d.Set("rg_acl", flattenListACL(userList.Data.RGACL))
|
||||
}
|
||||
|
||||
func flattenSnapshotList(computeSnapshots *compute.ListSnapShot) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0, len(computeSnapshots.Data))
|
||||
for _, snp := range computeSnapshots.Data {
|
||||
temp := map[string]interface{}{
|
||||
"disks": snp.Disks,
|
||||
"guid": snp.GUID,
|
||||
"label": snp.Label,
|
||||
"timestamp": snp.Timestamp,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenAffinityRelations(d *schema.ResourceData, ar *compute.RecordAffinityRelations) {
|
||||
d.Set("other_node", flattenNodes(ar.OtherNode))
|
||||
d.Set("other_node_indirect", flattenNodes(ar.OtherNodeIndirect))
|
||||
d.Set("other_node_indirect_soft", flattenNodes(ar.OtherNodeIndirectSoft))
|
||||
d.Set("other_node_soft", flattenNodes(ar.OtherNodeSoft))
|
||||
d.Set("same_node", flattenNodes(ar.SameNode))
|
||||
d.Set("same_node_soft", flattenNodes(ar.SameNodeSoft))
|
||||
}
|
||||
|
||||
func flattenSnapshotUsage(computeSnapshotUsages compute.ListSnapshotUsage) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0, len(computeSnapshotUsages))
|
||||
for _, computeUsage := range computeSnapshotUsages {
|
||||
temp := map[string]interface{}{
|
||||
"count": computeUsage.Count,
|
||||
"stored": computeUsage.Stored,
|
||||
"label": computeUsage.Label,
|
||||
"timestamp": computeUsage.Timestamp,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenPCIDevice(deviceList []compute.ItemPCIDevice) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0, len(deviceList))
|
||||
for _, dev := range deviceList {
|
||||
temp := map[string]interface{}{
|
||||
"ckey": dev.CKey,
|
||||
"meta": flattens.FlattenMeta(dev.Meta),
|
||||
"compute_id": dev.ComputeID,
|
||||
"description": dev.Description,
|
||||
"guid": dev.GUID,
|
||||
"hwpath": dev.HwPath,
|
||||
"device_id": dev.ID,
|
||||
"name": dev.Name,
|
||||
"rg_id": dev.RGID,
|
||||
"stack_id": dev.StackID,
|
||||
"status": dev.Status,
|
||||
"system_name": dev.SystemName,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenVGPU(m []interface{}) []string {
|
||||
var output []string
|
||||
for _, item := range m {
|
||||
switch d := item.(type) {
|
||||
case string:
|
||||
output = append(output, d)
|
||||
case int:
|
||||
output = append(output, strconv.Itoa(d))
|
||||
case int64:
|
||||
output = append(output, strconv.FormatInt(d, 10))
|
||||
case float64:
|
||||
output = append(output, strconv.FormatInt(int64(d), 10))
|
||||
default:
|
||||
output = append(output, "")
|
||||
}
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
func flattenNodes(m []interface{}) []string {
|
||||
var output []string
|
||||
for _, item := range m {
|
||||
switch d := item.(type) {
|
||||
case string:
|
||||
output = append(output, d)
|
||||
case int:
|
||||
output = append(output, strconv.Itoa(d))
|
||||
case int64:
|
||||
output = append(output, strconv.FormatInt(d, 10))
|
||||
case float64:
|
||||
output = append(output, strconv.FormatInt(int64(d), 10))
|
||||
default:
|
||||
output = append(output, "")
|
||||
}
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
func flattenDataCompute(d *schema.ResourceData, compFacts *compute.RecordCompute) error {
|
||||
// This function expects that compFacts string contains response from API compute/get,
|
||||
// i.e. detailed information about compute instance.
|
||||
//
|
||||
// NOTE: this function modifies ResourceData argument - as such it should never be called
|
||||
// from resourceComputeExists(...) method
|
||||
|
||||
log.Debugf("flattenCompute: ID %d, RG ID %d", compFacts.ID, compFacts.RGID)
|
||||
|
||||
customFields, _ := json.Marshal(compFacts.CustomFields)
|
||||
devices, _ := json.Marshal(compFacts.Devices)
|
||||
userData, _ := json.Marshal(compFacts.Userdata)
|
||||
// general fields setting
|
||||
d.SetId(fmt.Sprintf("%d", compFacts.ID))
|
||||
d.Set("account_id", compFacts.AccountID)
|
||||
d.Set("account_name", compFacts.AccountName)
|
||||
d.Set("acl", flattenListACLInterface(compFacts.ACL))
|
||||
d.Set("affinity_label", compFacts.AffinityLabel)
|
||||
d.Set("affinity_rules", flattenAffinityRules(compFacts.AffinityRules))
|
||||
d.Set("affinity_weight", compFacts.AffinityWeight)
|
||||
d.Set("anti_affinity_rules", flattenAffinityRules(compFacts.AntiAffinityRules))
|
||||
d.Set("arch", compFacts.Arch)
|
||||
d.Set("boot_order", compFacts.BootOrder)
|
||||
d.Set("cd_image_id", compFacts.CdImageId)
|
||||
d.Set("clone_reference", compFacts.CloneReference)
|
||||
d.Set("clones", compFacts.Clones)
|
||||
d.Set("computeci_id", compFacts.ComputeCIID)
|
||||
d.Set("cpus", compFacts.CPUs)
|
||||
d.Set("created_by", compFacts.CreatedBy)
|
||||
d.Set("created_time", compFacts.CreatedTime)
|
||||
d.Set("custom_fields", string(customFields))
|
||||
d.Set("deleted_by", compFacts.DeletedBy)
|
||||
d.Set("deleted_time", compFacts.DeletedTime)
|
||||
d.Set("desc", compFacts.Description)
|
||||
d.Set("devices", string(devices))
|
||||
d.Set("disks", flattenDisk(compFacts.Disks))
|
||||
d.Set("driver", compFacts.Driver)
|
||||
d.Set("gid", compFacts.GID)
|
||||
d.Set("guid", compFacts.GUID)
|
||||
d.Set("image_id", compFacts.ImageID)
|
||||
d.Set("interfaces", flattenInterfaces(compFacts.Interfaces))
|
||||
d.Set("lock_status", compFacts.LockStatus)
|
||||
d.Set("manager_id", compFacts.ManagerID)
|
||||
d.Set("manager_type", compFacts.ManagerType)
|
||||
d.Set("migrationjob", compFacts.MigrationJob)
|
||||
d.Set("milestones", compFacts.Milestones)
|
||||
d.Set("name", compFacts.Name)
|
||||
d.Set("need_reboot", compFacts.NeedReboot)
|
||||
d.Set("os_users", flattenOSUsers(compFacts.OSUsers))
|
||||
d.Set("pinned", compFacts.Pinned)
|
||||
d.Set("ram", compFacts.RAM)
|
||||
d.Set("reference_id", compFacts.ReferenceID)
|
||||
d.Set("registered", compFacts.Registered)
|
||||
d.Set("res_name", compFacts.ResName)
|
||||
d.Set("rg_id", compFacts.RGID)
|
||||
d.Set("rg_name", compFacts.RGName)
|
||||
d.Set("snap_sets", flattenSnapSets(compFacts.SnapSets))
|
||||
d.Set("stack_id", compFacts.StackID)
|
||||
d.Set("stack_name", compFacts.StackName)
|
||||
d.Set("stateless_sep_id", compFacts.StatelessSEPID)
|
||||
d.Set("stateless_sep_type", compFacts.StatelessSEPType)
|
||||
d.Set("status", compFacts.Status)
|
||||
d.Set("tags", flattenTags(compFacts.Tags))
|
||||
d.Set("tech_status", compFacts.TechStatus)
|
||||
d.Set("updated_by", compFacts.UpdatedBy)
|
||||
d.Set("updated_time", compFacts.UpdatedTime)
|
||||
d.Set("user_data", string(userData))
|
||||
d.Set("user_managed", compFacts.UserManaged)
|
||||
d.Set("vgpus", compFacts.VGPUs)
|
||||
d.Set("virtual_image_id", compFacts.VirtualImageID)
|
||||
|
||||
//extra fields setting
|
||||
bootDisk := findBootDisk(compFacts.Disks)
|
||||
if bootDisk != nil {
|
||||
d.Set("boot_disk_size", bootDisk.SizeMax)
|
||||
d.Set("boot_disk_id", bootDisk.ID) // we may need boot disk ID in resize operations
|
||||
d.Set("sep_id", bootDisk.SEPID)
|
||||
d.Set("pool", bootDisk.Pool)
|
||||
}
|
||||
|
||||
if len(compFacts.Disks) > 0 {
|
||||
log.Debugf("flattenCompute: calling parseComputeDisksToExtraDisks for %d disks", len(compFacts.Disks))
|
||||
if err := d.Set("extra_disks", parseComputeDisksToExtraDisks(compFacts.Disks)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(compFacts.Interfaces) > 0 {
|
||||
log.Debugf("flattenCompute: calling parseComputeInterfacesToNetworks for %d interfaces", len(compFacts.Interfaces))
|
||||
if err := d.Set("network", parseComputeInterfacesToNetworks(compFacts.Interfaces)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parse list of all disks from API compute/get into a list of "extra disks" attached to this compute
|
||||
// Extra disks are all compute disks but a boot disk.
|
||||
func parseComputeDisksToExtraDisks(disks compute.ListDisks) []interface{} {
|
||||
// this return value will be used to d.Set("extra_disks",) item of dataSourceCompute schema,
|
||||
// which is a simple list of integer disk IDs excluding boot disk ID
|
||||
length := len(disks)
|
||||
log.Debugf("parseComputeDisksToExtraDisks: called for %d disks", length)
|
||||
|
||||
if length == 0 || (length == 1 && disks[0].Type == "B") {
|
||||
// the disk list is empty (which is kind of strange - diskless compute?), or
|
||||
// there is only one disk in the list and it is a boot disk;
|
||||
// as we skip boot disks, the result will be of 0 length anyway
|
||||
return make([]interface{}, 0)
|
||||
}
|
||||
|
||||
result := make([]interface{}, length-1)
|
||||
idx := 0
|
||||
for _, value := range disks {
|
||||
if value.Type == "B" {
|
||||
// skip boot disk when iterating over the list of disks
|
||||
continue
|
||||
}
|
||||
|
||||
result[idx] = value.ID
|
||||
idx++
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Parse the list of interfaces from compute/get response into a list of networks
|
||||
// attached to this compute
|
||||
func parseComputeInterfacesToNetworks(ifaces compute.ListInterfaces) []interface{} {
|
||||
// return value will be used to d.Set("network") item of dataSourceCompute schema
|
||||
length := len(ifaces)
|
||||
log.Debugf("parseComputeInterfacesToNetworks: called for %d ifaces", length)
|
||||
|
||||
result := []interface{}{}
|
||||
|
||||
for _, value := range ifaces {
|
||||
elem := make(map[string]interface{})
|
||||
// Keys in this map should correspond to the Schema definition for "network"
|
||||
elem["net_id"] = value.NetID
|
||||
elem["net_type"] = value.NetType
|
||||
elem["ip_address"] = value.IPAddress
|
||||
elem["mac"] = value.MAC
|
||||
|
||||
result = append(result, elem)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func flattenDisk(diskList compute.ListDisks) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0, len(diskList))
|
||||
for _, disk := range diskList {
|
||||
temp := map[string]interface{}{
|
||||
"ckey": disk.CKey,
|
||||
"meta": flattens.FlattenMeta(disk.Meta),
|
||||
"account_id": disk.AccountID,
|
||||
"boot_partition": disk.BootPartition,
|
||||
"created_time": disk.CreatedTime,
|
||||
"deleted_time": disk.DeletedTime,
|
||||
"desc": disk.Description,
|
||||
"destruction_time": disk.DestructionTime,
|
||||
"disk_path": disk.DiskPath,
|
||||
"gid": disk.GID,
|
||||
"guid": disk.GUID,
|
||||
"disk_id": disk.ID,
|
||||
"image_id": disk.ImageID,
|
||||
"images": disk.Images,
|
||||
"iotune": flattenIOTune(disk.IOTune),
|
||||
"iqn": disk.IQN,
|
||||
"login": disk.Login,
|
||||
"milestones": disk.Milestones,
|
||||
"name": disk.Name,
|
||||
"order": disk.Order,
|
||||
"params": disk.Params,
|
||||
"parent_id": disk.ParentID,
|
||||
"passwd": disk.Password,
|
||||
"pci_slot": disk.PCISlot,
|
||||
"pool": disk.Pool,
|
||||
"purge_attempts": disk.PurgeAttempts,
|
||||
"present_to": disk.PresentTo,
|
||||
"purge_time": disk.PurgeTime,
|
||||
"reality_device_number": disk.RealityDeviceNumber,
|
||||
"reference_id": disk.ReferenceID,
|
||||
"res_id": disk.ResID,
|
||||
"res_name": disk.ResName,
|
||||
"role": disk.Role,
|
||||
"sep_id": disk.SEPID,
|
||||
"shareable": disk.Shareable,
|
||||
"size_max": disk.SizeMax,
|
||||
"size_used": disk.SizeUsed,
|
||||
"snapshots": flattendDiskSnapshotList(disk.Snapshots),
|
||||
"status": disk.Status,
|
||||
"tech_status": disk.TechStatus,
|
||||
"type": disk.Type,
|
||||
"vmid": disk.VMID,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenIOTune(iot compute.IOTune) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
temp := map[string]interface{}{
|
||||
"read_bytes_sec": iot.ReadBytesSec,
|
||||
"read_bytes_sec_max": iot.ReadBytesSecMax,
|
||||
"read_iops_sec": iot.ReadIOPSSec,
|
||||
"read_iops_sec_max": iot.ReadIOPSSecMax,
|
||||
"size_iops_sec": iot.SizeIOPSSec,
|
||||
"total_bytes_sec": iot.TotalBytesSec,
|
||||
"total_bytes_sec_max": iot.TotalBytesSecMax,
|
||||
"total_iops_sec": iot.TotalIOPSSec,
|
||||
"total_iops_sec_max": iot.TotalIOPSSecMax,
|
||||
"write_bytes_sec": iot.WriteBytesSec,
|
||||
"write_bytes_sec_max": iot.WriteBytesSecMax,
|
||||
"write_iops_sec": iot.WriteIOPSSec,
|
||||
"write_iops_sec_max": iot.WriteIOPSSecMax,
|
||||
}
|
||||
|
||||
res = append(res, temp)
|
||||
return res
|
||||
}
|
||||
|
||||
func flattendDiskSnapshotList(sl compute.ListDetailedSnapshots) []interface{} {
|
||||
res := make([]interface{}, 0)
|
||||
for _, snapshot := range sl {
|
||||
temp := map[string]interface{}{
|
||||
"guid": snapshot.GUID,
|
||||
"label": snapshot.Label,
|
||||
"res_id": snapshot.ResID,
|
||||
"snap_set_guid": snapshot.SnapSetGUID,
|
||||
"snap_set_time": snapshot.SnapSetTime,
|
||||
"timestamp": snapshot.TimeStamp,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
|
||||
return res
|
||||
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -34,6 +34,7 @@ package kvmvm
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute"
|
||||
@@ -42,6 +43,260 @@ import (
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
)
|
||||
|
||||
func utilityComputeEnabled(ctx context.Context, d *schema.ResourceData, m interface{}) error {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
enabled := d.Get("enabled").(bool)
|
||||
|
||||
if enabled {
|
||||
req := compute.EnableRequest{
|
||||
ComputeID: computeId,
|
||||
}
|
||||
|
||||
if _, err := c.CloudBroker().Compute().Enable(ctx, req); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
req := compute.DisableRequest{
|
||||
ComputeID: computeId,
|
||||
}
|
||||
|
||||
if _, err := c.CloudBroker().Compute().Disable(ctx, req); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
log.Debugf("resourceComputeUpdate: enable=%v Compute ID %s after completing its resource configuration", enabled, d.Id())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func utilityComputeStarted(ctx context.Context, d *schema.ResourceData, m interface{}) error {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
|
||||
if d.Get("started").(bool) {
|
||||
req := compute.StartRequest{
|
||||
ComputeID: computeId,
|
||||
}
|
||||
if altBootId, ok := d.Get("alt_boot_id").(int); ok {
|
||||
req.AltBootID = uint64(altBootId)
|
||||
}
|
||||
if stackId, ok := d.Get("stack_id").(int); ok {
|
||||
req.StackID = uint64(stackId)
|
||||
}
|
||||
if _, err := c.CloudBroker().Compute().Start(ctx, req); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
req := compute.StopRequest{
|
||||
ComputeID: computeId,
|
||||
}
|
||||
if force, ok := d.Get("force_stop").(bool); ok {
|
||||
req.Force = force
|
||||
}
|
||||
if _, err := c.CloudBroker().Compute().Stop(ctx, req); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func utilityComputeResize(ctx context.Context, d *schema.ResourceData, m interface{}) error {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
|
||||
resizeReq := compute.ResizeRequest{
|
||||
ComputeID: computeId,
|
||||
Force: true,
|
||||
}
|
||||
doUpdate := false
|
||||
|
||||
oldCpu, newCpu := d.GetChange("cpu")
|
||||
if oldCpu.(int) != newCpu.(int) {
|
||||
resizeReq.CPU = uint64(newCpu.(int))
|
||||
doUpdate = true
|
||||
} else {
|
||||
resizeReq.CPU = 0
|
||||
}
|
||||
|
||||
oldRam, newRam := d.GetChange("ram")
|
||||
if oldRam.(int) != newRam.(int) {
|
||||
resizeReq.RAM = uint64(newRam.(int))
|
||||
doUpdate = true
|
||||
} else {
|
||||
resizeReq.RAM = 0
|
||||
}
|
||||
|
||||
if doUpdate {
|
||||
log.Debugf("resourceComputeUpdate: changing CPU %d -> %d and/or RAM %d -> %d",
|
||||
oldCpu.(int), newCpu.(int),
|
||||
oldRam.(int), newRam.(int))
|
||||
_, err := c.CloudBroker().Compute().Resize(ctx, resizeReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func utilityComputeBootDiskResize(ctx context.Context, d *schema.ResourceData, m interface{}) error {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
|
||||
oldSize, newSize := d.GetChange("boot_disk_size")
|
||||
if oldSize.(int) < newSize.(int) {
|
||||
req := compute.DiskResizeRequest{ComputeID: computeId, Size: uint64(newSize.(int))}
|
||||
if diskId, ok := d.GetOk("boot_disk_id"); ok {
|
||||
req.DiskID = uint64(diskId.(int))
|
||||
|
||||
} else {
|
||||
bootDisk, err := utilityComputeBootDiskCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req.DiskID = bootDisk.ID
|
||||
}
|
||||
|
||||
log.Debugf("resourceComputeUpdate: compute ID %s, boot disk ID %d resize %d -> %d",
|
||||
d.Id(), d.Get("boot_disk_id").(int), oldSize.(int), newSize.(int))
|
||||
|
||||
_, err := c.CloudBroker().Compute().DiskResize(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
} else if oldSize.(int) > newSize.(int) {
|
||||
log.Warnf("resourceComputeUpdate: compute ID %s - shrinking boot disk is not allowed", d.Id())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func utilityComputeUpdateDisks(ctx context.Context, d *schema.ResourceData, m interface{}) error {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
|
||||
deletedDisks := make([]interface{}, 0)
|
||||
addedDisks := make([]interface{}, 0)
|
||||
updatedDisks := make([]interface{}, 0)
|
||||
|
||||
oldDisks, newDisks := d.GetChange("disks")
|
||||
oldConv := oldDisks.([]interface{})
|
||||
newConv := newDisks.([]interface{})
|
||||
|
||||
for _, el := range oldConv {
|
||||
if !isContainsDisk(newConv, el) {
|
||||
deletedDisks = append(deletedDisks, el)
|
||||
}
|
||||
}
|
||||
|
||||
for _, el := range newConv {
|
||||
if !isContainsDisk(oldConv, el) {
|
||||
addedDisks = append(addedDisks, el)
|
||||
} else {
|
||||
if isChangeDisk(oldConv, el) {
|
||||
updatedDisks = append(updatedDisks, el)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(deletedDisks) > 0 {
|
||||
stopReq := compute.StopRequest{
|
||||
ComputeID: computeId,
|
||||
Force: false,
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Compute().Stop(ctx, stopReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, disk := range deletedDisks {
|
||||
diskConv := disk.(map[string]interface{})
|
||||
if diskConv["disk_type"].(string) == "B" {
|
||||
continue
|
||||
}
|
||||
|
||||
req := compute.DiskDelRequest{
|
||||
ComputeID: computeId,
|
||||
DiskID: uint64(diskConv["disk_id"].(int)),
|
||||
Permanently: diskConv["permanently"].(bool),
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Compute().DiskDel(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
req := compute.StartRequest{
|
||||
ComputeID: computeId,
|
||||
AltBootID: 0,
|
||||
}
|
||||
_, err = c.CloudBroker().Compute().Start(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(addedDisks) > 0 {
|
||||
for _, disk := range addedDisks {
|
||||
diskConv := disk.(map[string]interface{})
|
||||
if diskConv["disk_type"].(string) == "B" {
|
||||
continue
|
||||
}
|
||||
req := compute.DiskAddRequest{
|
||||
ComputeID: computeId,
|
||||
DiskName: diskConv["disk_name"].(string),
|
||||
Size: uint64(diskConv["size"].(int)),
|
||||
}
|
||||
if diskConv["sep_id"].(int) != 0 {
|
||||
req.SepID = uint64(diskConv["sep_id"].(int))
|
||||
}
|
||||
if diskConv["disk_type"].(string) != "" {
|
||||
req.DiskType = diskConv["disk_type"].(string)
|
||||
}
|
||||
if diskConv["pool"].(string) != "" {
|
||||
req.Pool = diskConv["pool"].(string)
|
||||
}
|
||||
if diskConv["desc"].(string) != "" {
|
||||
req.Description = diskConv["desc"].(string)
|
||||
}
|
||||
if diskConv["image_id"].(int) != 0 {
|
||||
req.ImageID = uint64(diskConv["image_id"].(int))
|
||||
}
|
||||
_, err := c.CloudBroker().Compute().DiskAdd(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(updatedDisks) > 0 {
|
||||
for _, disk := range updatedDisks {
|
||||
diskConv := disk.(map[string]interface{})
|
||||
if diskConv["disk_type"].(string) == "B" {
|
||||
continue
|
||||
}
|
||||
req := compute.DiskResizeRequest{
|
||||
ComputeID: computeId,
|
||||
DiskID: uint64(diskConv["disk_id"].(int)),
|
||||
Size: uint64(diskConv["size"].(int)),
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Compute().DiskResize(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func utilityComputeExtraDisksConfigure(ctx context.Context, d *schema.ResourceData, m interface{}, do_delta bool) error {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
@@ -154,6 +409,10 @@ func utilityComputeCheckPresence(ctx context.Context, d *schema.ResourceData, m
|
||||
req.ComputeID = uint64(d.Get("compute_id").(int))
|
||||
}
|
||||
|
||||
if reason, ok := d.GetOk("reason"); ok {
|
||||
req.Reason = reason.(string)
|
||||
}
|
||||
|
||||
res, err := c.CloudBroker().Compute().Get(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -162,6 +421,15 @@ func utilityComputeCheckPresence(ctx context.Context, d *schema.ResourceData, m
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func findBootDisk(disks compute.ListDisks) *compute.ItemDisk {
|
||||
for _, disk := range disks {
|
||||
if disk.Type == "B" {
|
||||
return &disk
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func networkSubresIPAddreDiffSupperss(key, oldVal, newVal string, d *schema.ResourceData) bool {
|
||||
if newVal != "" && newVal != oldVal {
|
||||
log.Debugf("networkSubresIPAddreDiffSupperss: key=%s, oldVal=%q, newVal=%q -> suppress=FALSE", key, oldVal, newVal)
|
||||
@@ -288,6 +556,659 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
|
||||
return nil
|
||||
}
|
||||
|
||||
func utilityComputeUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) error {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
|
||||
req := compute.UpdateRequest{
|
||||
ComputeID: computeId,
|
||||
}
|
||||
|
||||
if d.HasChange("name") {
|
||||
req.Name = d.Get("name").(string)
|
||||
}
|
||||
if d.HasChange("desc") {
|
||||
req.Description = d.Get("desc").(string)
|
||||
}
|
||||
|
||||
if _, err := c.CloudBroker().Compute().Update(ctx, req); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func utilityComputeUpdateAffinityLabel(ctx context.Context, d *schema.ResourceData, m interface{}) error {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
|
||||
affinityLabel := d.Get("affinity_label").(string)
|
||||
if affinityLabel == "" {
|
||||
req := compute.AffinityLabelRemoveRequest{
|
||||
ComputeIDs: []uint64{computeId},
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Compute().AffinityLabelRemove(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
req := compute.AffinityLabelSetRequest{
|
||||
ComputeIDs: []uint64{computeId},
|
||||
AffinityLabel: affinityLabel,
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Compute().AffinityLabelSet(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func utilityComputeUpdateAffinityRules(ctx context.Context, d *schema.ResourceData, m interface{}) error {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
|
||||
deletedAR := make([]interface{}, 0)
|
||||
addedAR := make([]interface{}, 0)
|
||||
|
||||
oldAR, newAR := d.GetChange("affinity_rules")
|
||||
oldConv := oldAR.([]interface{})
|
||||
newConv := newAR.([]interface{})
|
||||
|
||||
if len(newConv) == 0 {
|
||||
req := compute.AffinityRulesClearRequest{
|
||||
ComputeIDs: []uint64{computeId},
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Compute().AffinityRulesClear(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
for _, el := range oldConv {
|
||||
if !isContainsAR(newConv, el) {
|
||||
deletedAR = append(deletedAR, el)
|
||||
}
|
||||
}
|
||||
for _, el := range newConv {
|
||||
if !isContainsAR(oldConv, el) {
|
||||
addedAR = append(addedAR, el)
|
||||
}
|
||||
}
|
||||
|
||||
if len(deletedAR) > 0 {
|
||||
for _, ar := range deletedAR {
|
||||
arConv := ar.(map[string]interface{})
|
||||
req := compute.AffinityRuleRemoveRequest{
|
||||
ComputeIDs: []uint64{computeId},
|
||||
Topology: arConv["topology"].(string),
|
||||
Policy: arConv["policy"].(string),
|
||||
Mode: arConv["mode"].(string),
|
||||
Key: arConv["key"].(string),
|
||||
Value: arConv["value"].(string),
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Compute().AffinityRuleRemove(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(addedAR) > 0 {
|
||||
for _, ar := range addedAR {
|
||||
arConv := ar.(map[string]interface{})
|
||||
req := compute.AffinityRuleAddRequest{
|
||||
ComputeIDs: []uint64{computeId},
|
||||
Topology: arConv["topology"].(string),
|
||||
Policy: arConv["policy"].(string),
|
||||
Mode: arConv["mode"].(string),
|
||||
Key: arConv["key"].(string),
|
||||
Value: arConv["value"].(string),
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Compute().AffinityRuleAdd(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func utilityComputeUpdateAntiAffinityRules(ctx context.Context, d *schema.ResourceData, m interface{}) error {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
|
||||
deletedAR := make([]interface{}, 0)
|
||||
addedAR := make([]interface{}, 0)
|
||||
|
||||
oldAR, newAR := d.GetChange("anti_affinity_rules")
|
||||
oldConv := oldAR.([]interface{})
|
||||
newConv := newAR.([]interface{})
|
||||
|
||||
if len(newConv) == 0 {
|
||||
req := compute.AntiAffinityRulesClearRequest{
|
||||
ComputeIDs: []uint64{computeId},
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Compute().AntiAffinityRulesClear(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
for _, el := range oldConv {
|
||||
if !isContainsAR(newConv, el) {
|
||||
deletedAR = append(deletedAR, el)
|
||||
}
|
||||
}
|
||||
for _, el := range newConv {
|
||||
if !isContainsAR(oldConv, el) {
|
||||
addedAR = append(addedAR, el)
|
||||
}
|
||||
}
|
||||
|
||||
if len(deletedAR) > 0 {
|
||||
for _, ar := range deletedAR {
|
||||
arConv := ar.(map[string]interface{})
|
||||
req := compute.AntiAffinityRuleRemoveRequest{
|
||||
ComputeIDs: []uint64{computeId},
|
||||
Topology: arConv["topology"].(string),
|
||||
Policy: arConv["policy"].(string),
|
||||
Mode: arConv["mode"].(string),
|
||||
Key: arConv["key"].(string),
|
||||
Value: arConv["value"].(string),
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Compute().AntiAffinityRuleRemove(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(addedAR) > 0 {
|
||||
for _, ar := range addedAR {
|
||||
arConv := ar.(map[string]interface{})
|
||||
req := compute.AntiAffinityRuleAddRequest{
|
||||
ComputeIDs: []uint64{computeId},
|
||||
Topology: arConv["topology"].(string),
|
||||
Policy: arConv["policy"].(string),
|
||||
Mode: arConv["mode"].(string),
|
||||
Key: arConv["key"].(string),
|
||||
Value: arConv["value"].(string),
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Compute().AntiAffinityRuleAdd(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func utilityComputeUpdateTags(ctx context.Context, d *schema.ResourceData, m interface{}) error {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
|
||||
oldSet, newSet := d.GetChange("tags")
|
||||
deletedTags := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List()
|
||||
if len(deletedTags) > 0 {
|
||||
for _, tagInterface := range deletedTags {
|
||||
tagItem := tagInterface.(map[string]interface{})
|
||||
req := compute.TagRemoveRequest{
|
||||
ComputeIDs: []uint64{computeId},
|
||||
Key: tagItem["key"].(string),
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Compute().TagRemove(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
addedTags := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List()
|
||||
if len(addedTags) > 0 {
|
||||
for _, tagInterface := range addedTags {
|
||||
tagItem := tagInterface.(map[string]interface{})
|
||||
req := compute.TagAddRequest{
|
||||
ComputeIDs: []uint64{computeId},
|
||||
Key: tagItem["key"].(string),
|
||||
Value: tagItem["value"].(string),
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Compute().TagAdd(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func utilityComputeUpdatePFW(ctx context.Context, d *schema.ResourceData, m interface{}) error {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
|
||||
oldSet, newSet := d.GetChange("port_forwarding")
|
||||
deletedPfws := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List()
|
||||
if len(deletedPfws) > 0 {
|
||||
for _, pfwInterface := range deletedPfws {
|
||||
pfwItem := pfwInterface.(map[string]interface{})
|
||||
req := compute.PFWDelRequest{
|
||||
ComputeID: computeId,
|
||||
PublicPortStart: uint64(pfwItem["public_port_start"].(int)),
|
||||
LocalBasePort: uint64(pfwItem["local_port"].(int)),
|
||||
Proto: pfwItem["proto"].(string),
|
||||
RuleID: uint64(pfwItem["rule_id"].(int)),
|
||||
}
|
||||
|
||||
if pfwItem["public_port_end"].(int) == -1 {
|
||||
req.PublicPortEnd = req.PublicPortStart
|
||||
} else {
|
||||
req.PublicPortEnd = uint64(pfwItem["public_port_end"].(int))
|
||||
}
|
||||
if pfwItem["reason"].(string) != "" {
|
||||
req.Reason = pfwItem["reason"].(string)
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Compute().PFWDel(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
addedPfws := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List()
|
||||
if len(addedPfws) > 0 {
|
||||
for _, pfwInterface := range addedPfws {
|
||||
pfwItem := pfwInterface.(map[string]interface{})
|
||||
req := compute.PFWAddRequest{
|
||||
ComputeID: computeId,
|
||||
PublicPortStart: uint64(pfwItem["public_port_start"].(int)),
|
||||
PublicPortEnd: int64(pfwItem["public_port_end"].(int)),
|
||||
LocalBasePort: uint64(pfwItem["local_port"].(int)),
|
||||
Proto: pfwItem["proto"].(string),
|
||||
}
|
||||
|
||||
if pfwItem["reason"].(string) != "" {
|
||||
req.Reason = pfwItem["reason"].(string)
|
||||
}
|
||||
|
||||
pwfId, err := c.CloudBroker().Compute().PFWAdd(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.Set("rule_id", pwfId)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func utilityComputeRestore(ctx context.Context, d *schema.ResourceData, m interface{}) error {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
|
||||
restoreReq := compute.RestoreRequest{ComputeID: computeId}
|
||||
|
||||
_, err := c.CloudBroker().Compute().Restore(ctx, restoreReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, ok := d.GetOk("enabled"); ok {
|
||||
if err := utilityComputeEnabled(ctx, d, m); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := d.GetOk("started"); ok {
|
||||
if err := utilityComputeStarted(ctx, d, m); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func utilityComputeUpdateUserAccess(ctx context.Context, d *schema.ResourceData, m interface{}) error {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
|
||||
oldSet, newSet := d.GetChange("user_access")
|
||||
deletedUserAcess := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List()
|
||||
if len(deletedUserAcess) > 0 {
|
||||
for _, userAcessInterface := range deletedUserAcess {
|
||||
userAccessItem := userAcessInterface.(map[string]interface{})
|
||||
req := compute.UserRevokeRequest{
|
||||
ComputeID: computeId,
|
||||
Username: userAccessItem["username"].(string),
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Compute().UserRevoke(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
addedUserAccess := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List()
|
||||
if len(addedUserAccess) > 0 {
|
||||
for _, userAccessInterface := range addedUserAccess {
|
||||
userAccessItem := userAccessInterface.(map[string]interface{})
|
||||
req := compute.UserGrantRequest{
|
||||
ComputeID: computeId,
|
||||
Username: userAccessItem["username"].(string),
|
||||
AccessType: userAccessItem["access_type"].(string),
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Compute().UserGrant(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func utilityComputeUpdateSnapshot(ctx context.Context, d *schema.ResourceData, m interface{}) error {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
|
||||
oldSet, newSet := d.GetChange("snapshot")
|
||||
deletedSnapshots := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List()
|
||||
if len(deletedSnapshots) > 0 {
|
||||
for _, snapshotInterface := range deletedSnapshots {
|
||||
snapshotItem := snapshotInterface.(map[string]interface{})
|
||||
req := compute.SnapshotDeleteRequest{
|
||||
ComputeID: computeId,
|
||||
Label: snapshotItem["label"].(string),
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Compute().SnapshotDelete(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
addedSnapshots := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List()
|
||||
if len(addedSnapshots) > 0 {
|
||||
for _, snapshotInterface := range addedSnapshots {
|
||||
snapshotItem := snapshotInterface.(map[string]interface{})
|
||||
req := compute.SnapshotCreateRequest{
|
||||
ComputeID: computeId,
|
||||
Label: snapshotItem["label"].(string),
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Compute().SnapshotCreate(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func utilityComputeRollback(ctx context.Context, d *schema.ResourceData, m interface{}) error {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
|
||||
if rollback, ok := d.GetOk("rollback"); ok {
|
||||
req := compute.StopRequest{
|
||||
ComputeID: computeId,
|
||||
Force: false,
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Compute().Stop(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rollbackInterface := rollback.(*schema.Set).List()[0]
|
||||
rollbackItem := rollbackInterface.(map[string]interface{})
|
||||
|
||||
rollbackReq := compute.SnapshotRollbackRequest{
|
||||
ComputeID: computeId,
|
||||
Label: rollbackItem["label"].(string),
|
||||
}
|
||||
|
||||
_, err = c.CloudBroker().Compute().SnapshotRollback(ctx, rollbackReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
startReq := compute.StartRequest{ComputeID: computeId}
|
||||
|
||||
log.Debugf("utilityComputeRollback: starting compute %d", computeId)
|
||||
|
||||
_, err = c.CloudBroker().Compute().Start(ctx, startReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func utilityComputeUpdateCD(ctx context.Context, d *schema.ResourceData, m interface{}) error {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
|
||||
oldSet, newSet := d.GetChange("cd")
|
||||
deletedCd := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List()
|
||||
if len(deletedCd) > 0 {
|
||||
req := compute.CDEjectRequest{
|
||||
ComputeID: computeId,
|
||||
}
|
||||
if reason, ok := d.GetOk("reason"); ok {
|
||||
req.Reason = reason.(string)
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Compute().CDEject(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
addedCd := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List()
|
||||
if len(addedCd) > 0 {
|
||||
cdItem := addedCd[0].(map[string]interface{})
|
||||
req := compute.CDInsertRequest{
|
||||
ComputeID: computeId,
|
||||
CDROMID: uint64(cdItem["cdrom_id"].(int)),
|
||||
}
|
||||
if reason, ok := d.GetOk("reason"); ok {
|
||||
req.Reason = reason.(string)
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Compute().CDInsert(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func utilityComputePinToStack(ctx context.Context, d *schema.ResourceData, m interface{}) error {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
|
||||
oldPin, newPin := d.GetChange("pin_to_stack")
|
||||
if oldPin.(bool) && !newPin.(bool) {
|
||||
req := compute.UnpinFromStackRequest{
|
||||
ComputeID: computeId,
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Compute().UnpinFromStack(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if !oldPin.(bool) && newPin.(bool) {
|
||||
req := compute.PinToStackRequest{
|
||||
ComputeID: computeId,
|
||||
TargetStackID: uint64(d.Get("target_stack_id").(int)),
|
||||
}
|
||||
|
||||
if force, ok := d.Get("force_pin").(bool); ok {
|
||||
req.Force = force
|
||||
}
|
||||
_, err := c.CloudBroker().Compute().PinToStack(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func utilityComputePause(ctx context.Context, d *schema.ResourceData, m interface{}) error {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
|
||||
oldPause, newPause := d.GetChange("pause")
|
||||
if oldPause.(bool) && !newPause.(bool) {
|
||||
req := compute.ResumeRequest{
|
||||
ComputeID: computeId,
|
||||
}
|
||||
_, err := c.CloudBroker().Compute().Resume(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if !oldPause.(bool) && newPause.(bool) {
|
||||
req := compute.PauseRequest{
|
||||
ComputeID: computeId,
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Compute().Pause(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func utilityComputeReset(ctx context.Context, d *schema.ResourceData, m interface{}) error {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
|
||||
oldReset, newReset := d.GetChange("reset")
|
||||
if !oldReset.(bool) && newReset.(bool) {
|
||||
req := compute.ResetRequest{
|
||||
ComputeID: computeId,
|
||||
}
|
||||
_, err := c.CloudBroker().Compute().Reset(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func utilityComputeUpdateImage(ctx context.Context, d *schema.ResourceData, m interface{}) error {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
|
||||
oldImage, newImage := d.GetChange("image_id")
|
||||
stopReq := compute.StopRequest{
|
||||
ComputeID: computeId,
|
||||
Force: false,
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Compute().Stop(ctx, stopReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if oldImage.(int) != newImage.(int) {
|
||||
req := compute.RedeployRequest{
|
||||
ComputeID: computeId,
|
||||
ImageID: uint64(newImage.(int)),
|
||||
}
|
||||
|
||||
if diskSize, ok := d.GetOk("boot_disk_size"); ok {
|
||||
req.DiskSize = uint64(diskSize.(int))
|
||||
}
|
||||
if dataDisks, ok := d.GetOk("data_disks"); ok {
|
||||
req.DataDisks = dataDisks.(string)
|
||||
}
|
||||
if autoStart, ok := d.GetOk("auto_start"); ok {
|
||||
req.AutoStart = autoStart.(bool)
|
||||
}
|
||||
if forceStop, ok := d.GetOk("force_stop"); ok {
|
||||
req.ForceStop = forceStop.(bool)
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Compute().Redeploy(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func utilityComputeUpdateCustomFields(ctx context.Context, d *schema.ResourceData, m interface{}) error {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
|
||||
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
|
||||
val := d.Get("custom_fields").(string)
|
||||
val = strings.ReplaceAll(val, "\\", "")
|
||||
val = strings.ReplaceAll(val, "\n", "")
|
||||
val = strings.ReplaceAll(val, "\t", "")
|
||||
val = strings.TrimSpace(val)
|
||||
|
||||
if len(val) > 0 {
|
||||
req := compute.SetCustomFieldsRequest{
|
||||
ComputeID: computeId,
|
||||
CustomFields: val,
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Compute().SetCustomFields(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// } else {
|
||||
// // req := compute.DeleteCustomFieldsRequest{
|
||||
// // ComputeID: computeId,
|
||||
// // }
|
||||
|
||||
// // _, err := c.CloudBroker().Compute().DeleteCustomFields(ctx, req)
|
||||
// // if err != nil {
|
||||
// // return err
|
||||
// // }
|
||||
// }
|
||||
return nil
|
||||
}
|
||||
|
||||
func isChangeDisk(els []interface{}, el interface{}) bool {
|
||||
for _, elOld := range els {
|
||||
elOldConv := elOld.(map[string]interface{})
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
@@ -10,7 +10,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
Reference in New Issue
Block a user