4.0.0
This commit is contained in:
@@ -33,13 +33,13 @@ package kvmvm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
// "net/url"
|
||||
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
@@ -48,7 +48,7 @@ import (
|
||||
|
||||
// Parse list of all disks from API compute/get into a list of "extra disks" attached to this compute
|
||||
// Extra disks are all compute disks but a boot disk.
|
||||
func parseComputeDisksToExtraDisks(disks []DiskRecord) []interface{} {
|
||||
func parseComputeDisksToExtraDisks(disks compute.ListDisks) []interface{} {
|
||||
// this return value will be used to d.Set("extra_disks",) item of dataSourceCompute schema,
|
||||
// which is a simple list of integer disk IDs excluding boot disk ID
|
||||
length := len(disks)
|
||||
@@ -76,7 +76,7 @@ func parseComputeDisksToExtraDisks(disks []DiskRecord) []interface{} {
|
||||
return result
|
||||
}
|
||||
|
||||
func findBootDisk(disks []DiskRecord) *DiskRecord {
|
||||
func findBootDisk(disks compute.ListDisks) *compute.ItemDisk {
|
||||
for _, d := range disks {
|
||||
if d.Type == "B" {
|
||||
return &d
|
||||
@@ -84,12 +84,12 @@ func findBootDisk(disks []DiskRecord) *DiskRecord {
|
||||
}
|
||||
|
||||
// some computes don't have a boot disk, so...
|
||||
return &DiskRecord{}
|
||||
return &compute.ItemDisk{}
|
||||
}
|
||||
|
||||
// Parse the list of interfaces from compute/get response into a list of networks
|
||||
// attached to this compute
|
||||
func parseComputeInterfacesToNetworks(ifaces []InterfaceRecord) []interface{} {
|
||||
func parseComputeInterfacesToNetworks(ifaces compute.ListInterfaces) []interface{} {
|
||||
// return value will be used to d.Set("network") item of dataSourceCompute schema
|
||||
length := len(ifaces)
|
||||
log.Debugf("parseComputeInterfacesToNetworks: called for %d ifaces", length)
|
||||
@@ -113,68 +113,58 @@ func parseComputeInterfacesToNetworks(ifaces []InterfaceRecord) []interface{} {
|
||||
return result
|
||||
}
|
||||
|
||||
func flattenCompute(d *schema.ResourceData, compFacts string) error {
|
||||
func flattenCompute(d *schema.ResourceData, compFacts *compute.RecordCompute) error {
|
||||
// This function expects that compFacts string contains response from API compute/get,
|
||||
// i.e. detailed information about compute instance.
|
||||
//
|
||||
// NOTE: this function modifies ResourceData argument - as such it should never be called
|
||||
// from resourceComputeExists(...) method
|
||||
model := ComputeGetResp{}
|
||||
log.Debugf("flattenCompute: ready to unmarshal string %s", compFacts)
|
||||
err := json.Unmarshal([]byte(compFacts), &model)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("flattenCompute: ID %d, RG ID %d", model.ID, model.RgID)
|
||||
log.Debugf("flattenCompute: ID %d, RG ID %d", compFacts.ID, compFacts.RGID)
|
||||
|
||||
d.SetId(fmt.Sprintf("%d", model.ID))
|
||||
// d.Set("compute_id", model.ID) - we should NOT set compute_id in the schema here: if it was set - it is already set, if it wasn't - we shouldn't
|
||||
d.Set("name", model.Name)
|
||||
d.Set("rg_id", model.RgID)
|
||||
d.Set("rg_name", model.RgName)
|
||||
d.Set("account_id", model.AccountID)
|
||||
d.Set("account_name", model.AccountName)
|
||||
d.Set("driver", model.Driver)
|
||||
d.Set("cpu", model.Cpu)
|
||||
d.Set("ram", model.Ram)
|
||||
// d.Set("boot_disk_size", model.BootDiskSize) - bootdiskSize key in API compute/get is always zero, so we set boot_disk_size in another way
|
||||
d.Set("image_id", model.ImageID)
|
||||
d.Set("description", model.Desc)
|
||||
d.Set("cloud_init", "applied") // NOTE: for existing compute we hard-code this value as an indicator for DiffSuppress fucntion
|
||||
// d.Set("status", model.Status)
|
||||
// d.Set("tech_status", model.TechStatus)
|
||||
d.SetId(fmt.Sprintf("%d", compFacts.ID))
|
||||
d.Set("name", compFacts.Name)
|
||||
d.Set("rg_id", compFacts.RGID)
|
||||
d.Set("rg_name", compFacts.RGName)
|
||||
d.Set("account_id", compFacts.AccountID)
|
||||
d.Set("account_name", compFacts.AccountName)
|
||||
d.Set("driver", compFacts.Driver)
|
||||
d.Set("cpu", compFacts.CPUs)
|
||||
d.Set("ram", compFacts.RAM)
|
||||
d.Set("image_id", compFacts.ImageID)
|
||||
d.Set("description", compFacts.Description)
|
||||
d.Set("cloud_init", "applied")
|
||||
|
||||
if model.TechStatus == "STARTED" {
|
||||
if compFacts.TechStatus == "STARTED" {
|
||||
d.Set("started", true)
|
||||
} else {
|
||||
d.Set("started", false)
|
||||
}
|
||||
|
||||
bootDisk := findBootDisk(model.Disks)
|
||||
bootDisk := findBootDisk(compFacts.Disks)
|
||||
|
||||
d.Set("boot_disk_size", bootDisk.SizeMax)
|
||||
d.Set("boot_disk_id", bootDisk.ID) // we may need boot disk ID in resize operations
|
||||
d.Set("sep_id", bootDisk.SepID)
|
||||
d.Set("sep_id", bootDisk.SEPID)
|
||||
d.Set("pool", bootDisk.Pool)
|
||||
|
||||
if len(model.Disks) > 0 {
|
||||
log.Debugf("flattenCompute: calling parseComputeDisksToExtraDisks for %d disks", len(model.Disks))
|
||||
if err = d.Set("extra_disks", parseComputeDisksToExtraDisks(model.Disks)); err != nil {
|
||||
if len(compFacts.Disks) > 0 {
|
||||
log.Debugf("flattenCompute: calling parseComputeDisksToExtraDisks for %d disks", len(compFacts.Disks))
|
||||
if err := d.Set("extra_disks", parseComputeDisksToExtraDisks(compFacts.Disks)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(model.Interfaces) > 0 {
|
||||
log.Debugf("flattenCompute: calling parseComputeInterfacesToNetworks for %d interfaces", len(model.Interfaces))
|
||||
if err = d.Set("network", parseComputeInterfacesToNetworks(model.Interfaces)); err != nil {
|
||||
if len(compFacts.Interfaces) > 0 {
|
||||
log.Debugf("flattenCompute: calling parseComputeInterfacesToNetworks for %d interfaces", len(compFacts.Interfaces))
|
||||
if err := d.Set("network", parseComputeInterfacesToNetworks(compFacts.Interfaces)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(model.OsUsers) > 0 {
|
||||
log.Debugf("flattenCompute: calling parseOsUsers for %d logins", len(model.OsUsers))
|
||||
if err = d.Set("os_users", parseOsUsers(model.OsUsers)); err != nil {
|
||||
if len(compFacts.OSUsers) > 0 {
|
||||
log.Debugf("flattenCompute: calling parseOsUsers for %d logins", len(compFacts.OSUsers))
|
||||
if err := d.Set("os_users", parseOsUsers(compFacts.OSUsers)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -184,10 +174,8 @@ func flattenCompute(d *schema.ResourceData, compFacts string) error {
|
||||
|
||||
func dataSourceComputeRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
compFacts, err := utilityComputeCheckPresence(ctx, d, m)
|
||||
if compFacts == "" {
|
||||
// if empty string is returned from utilityComputeCheckPresence then there is no
|
||||
// such Compute and err tells so - just return it to the calling party
|
||||
d.SetId("") // ensure ID is empty
|
||||
if compFacts == nil {
|
||||
d.SetId("")
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
@@ -277,7 +265,6 @@ func DataSourceCompute() *schema.Resource {
|
||||
Computed: true,
|
||||
Description: "Name of the OS image this compute instance is based on.",
|
||||
},
|
||||
|
||||
"boot_disk_size": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
|
||||
@@ -33,17 +33,18 @@ package kvmvm
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
)
|
||||
|
||||
func parseOsUsers(logins []OsUserRecord) []interface{} {
|
||||
func parseOsUsers(logins compute.ListOSUsers) []interface{} {
|
||||
var result = make([]interface{}, len(logins))
|
||||
|
||||
for index, value := range logins {
|
||||
elem := make(map[string]interface{})
|
||||
|
||||
elem["guid"] = value.Guid
|
||||
elem["guid"] = value.GUID
|
||||
elem["login"] = value.Login
|
||||
elem["password"] = value.Password
|
||||
elem["public_key"] = value.PubKey
|
||||
|
||||
@@ -33,14 +33,16 @@ package kvmvm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/kvmppc"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/kvmx86"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/statefuncs"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
@@ -49,110 +51,112 @@ import (
|
||||
|
||||
func cloudInitDiffSupperss(key, oldVal, newVal string, d *schema.ResourceData) bool {
|
||||
if oldVal == "" && newVal != "applied" {
|
||||
// if old value for "cloud_init" resource is empty string, it means that we are creating new compute
|
||||
// and there is a chance that the user will want custom cloud init parameters - so we check if
|
||||
// cloud_init is explicitly set in TF file by making sure that its new value is different from "applied",
|
||||
// which is a reserved key word.
|
||||
log.Debugf("cloudInitDiffSupperss: key=%s, oldVal=%q, newVal=%q -> suppress=FALSE", key, oldVal, newVal)
|
||||
return false // there is a difference between stored and new value
|
||||
return false
|
||||
}
|
||||
log.Debugf("cloudInitDiffSupperss: key=%s, oldVal=%q, newVal=%q -> suppress=TRUE", key, oldVal, newVal)
|
||||
return true // suppress difference
|
||||
return true
|
||||
}
|
||||
|
||||
func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
// we assume all mandatory parameters it takes to create a comptue instance are properly
|
||||
// specified - we rely on schema "Required" attributes to let Terraform validate them for us
|
||||
|
||||
log.Debugf("resourceComputeCreate: called for Compute name %q, RG ID %d", d.Get("name").(string), d.Get("rg_id").(int))
|
||||
|
||||
// create basic Compute (i.e. without extra disks and network connections - those will be attached
|
||||
// by subsequent individual API calls).
|
||||
c := m.(*controller.ControllerCfg)
|
||||
urlValues := &url.Values{}
|
||||
urlValues.Add("rgId", fmt.Sprintf("%d", d.Get("rg_id").(int)))
|
||||
urlValues.Add("name", d.Get("name").(string))
|
||||
urlValues.Add("cpu", fmt.Sprintf("%d", d.Get("cpu").(int)))
|
||||
urlValues.Add("ram", fmt.Sprintf("%d", d.Get("ram").(int)))
|
||||
urlValues.Add("imageId", fmt.Sprintf("%d", d.Get("image_id").(int)))
|
||||
urlValues.Add("bootDisk", fmt.Sprintf("%d", d.Get("boot_disk_size").(int)))
|
||||
urlValues.Add("netType", "NONE") // at the 1st step create isolated compute
|
||||
urlValues.Add("start", "0") // at the 1st step create compute in a stopped state
|
||||
reqPPC := kvmppc.CreateRequest{}
|
||||
reqX86 := kvmx86.CreateRequest{}
|
||||
|
||||
reqPPC.RGID = uint64(d.Get("rg_id").(int))
|
||||
reqX86.RGID = uint64(d.Get("rg_id").(int))
|
||||
|
||||
reqPPC.Name = d.Get("name").(string)
|
||||
reqX86.Name = d.Get("name").(string)
|
||||
|
||||
reqPPC.CPU = uint64(d.Get("cpu").(int))
|
||||
reqPPC.RAM = uint64(d.Get("ram").(int))
|
||||
reqPPC.ImageID = uint64(d.Get("image_id").(int))
|
||||
reqPPC.BootDisk = uint64(d.Get("boot_disk_size").(int))
|
||||
reqPPC.NetType = "NONE"
|
||||
reqPPC.Start = false
|
||||
|
||||
reqX86.CPU = uint64(d.Get("cpu").(int))
|
||||
reqX86.RAM = uint64(d.Get("ram").(int))
|
||||
reqX86.ImageID = uint64(d.Get("image_id").(int))
|
||||
reqX86.BootDisk = uint64(d.Get("boot_disk_size").(int))
|
||||
reqX86.NetType = "NONE"
|
||||
reqX86.Start = false
|
||||
|
||||
argVal, argSet := d.GetOk("description")
|
||||
if argSet {
|
||||
urlValues.Add("desc", argVal.(string))
|
||||
reqPPC.Description = argVal.(string)
|
||||
reqX86.Description = argVal.(string)
|
||||
}
|
||||
|
||||
if sepID, ok := d.GetOk("sep_id"); ok {
|
||||
urlValues.Add("sepId", strconv.Itoa(sepID.(int)))
|
||||
reqPPC.SEPID = uint64(sepID.(int))
|
||||
reqX86.SEPID = uint64(sepID.(int))
|
||||
}
|
||||
|
||||
if pool, ok := d.GetOk("pool"); ok {
|
||||
urlValues.Add("pool", pool.(string))
|
||||
}
|
||||
|
||||
/*
|
||||
sshKeysVal, sshKeysSet := d.GetOk("ssh_keys")
|
||||
if sshKeysSet {
|
||||
// process SSH Key settings and set API values accordingly
|
||||
log.Debugf("resourceComputeCreate: calling makeSshKeysArgString to setup SSH keys for guest login(s)")
|
||||
urlValues.Add("userdata", makeSshKeysArgString(sshKeysVal.([]interface{})))
|
||||
}
|
||||
*/
|
||||
|
||||
computeCreateAPI := KvmX86CreateAPI
|
||||
driver := d.Get("driver").(string)
|
||||
if driver == "KVM_PPC" {
|
||||
computeCreateAPI = KvmPPCCreateAPI
|
||||
log.Debugf("resourceComputeCreate: creating Compute of type KVM VM PowerPC")
|
||||
} else { // note that we do not validate arch value for explicit "KVM_X86" here
|
||||
log.Debugf("resourceComputeCreate: creating Compute of type KVM VM x86")
|
||||
reqPPC.Pool = pool.(string)
|
||||
reqX86.Pool = pool.(string)
|
||||
}
|
||||
|
||||
argVal, argSet = d.GetOk("cloud_init")
|
||||
if argSet {
|
||||
// userdata must not be empty string and must not be a reserved keyword "applied"
|
||||
userdata := argVal.(string)
|
||||
if userdata != "" && userdata != "applied" {
|
||||
urlValues.Add("userdata", userdata)
|
||||
reqPPC.Userdata = userdata
|
||||
reqX86.Userdata = userdata
|
||||
}
|
||||
}
|
||||
|
||||
apiResp, err := c.DecortAPICall(ctx, "POST", computeCreateAPI, urlValues)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
// Compute create API returns ID of the new Compute instance on success
|
||||
var createdID uint64
|
||||
driver := d.Get("driver").(string)
|
||||
if driver == "KVM_PPC" {
|
||||
log.Debugf("resourceComputeCreate: creating Compute of type KVM VM PowerPC")
|
||||
id, err := c.CloudBroker().KVMPPC().Create(ctx, reqPPC)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
d.SetId(apiResp) // update ID of the resource to tell Terraform that the resource exists, albeit partially
|
||||
compId, _ := strconv.Atoi(apiResp)
|
||||
createdID = id
|
||||
} else {
|
||||
log.Debugf("resourceComputeCreate: creating Compute of type KVM VM x86")
|
||||
id, err := c.CloudBroker().KVMX86().Create(ctx, reqX86)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
createdID = id
|
||||
}
|
||||
|
||||
d.SetId(strconv.FormatUint(createdID, 10))
|
||||
|
||||
cleanup := false
|
||||
defer func() {
|
||||
if cleanup {
|
||||
urlValues := &url.Values{}
|
||||
urlValues.Add("computeId", d.Id())
|
||||
urlValues.Add("permanently", "1")
|
||||
urlValues.Add("detachDisks", "1")
|
||||
req := compute.DeleteRequest{
|
||||
ComputeID: createdID,
|
||||
Permanently: true,
|
||||
DetachDisks: true,
|
||||
}
|
||||
|
||||
if _, err := c.DecortAPICall(ctx, "POST", ComputeDeleteAPI, urlValues); err != nil {
|
||||
if _, err := c.CloudBroker().Compute().Delete(ctx, req); err != nil {
|
||||
log.Errorf("resourceComputeCreate: could not delete compute after failed creation: %v", err)
|
||||
}
|
||||
|
||||
d.SetId("")
|
||||
}
|
||||
}()
|
||||
|
||||
log.Debugf("resourceComputeCreate: new simple Compute ID %d, name %s created", compId, d.Get("name").(string))
|
||||
log.Debugf("resourceComputeCreate: new simple Compute ID %d, name %s created", createdID, d.Get("name").(string))
|
||||
|
||||
// Configure data disks if any
|
||||
argVal, argSet = d.GetOk("extra_disks")
|
||||
if argSet && argVal.(*schema.Set).Len() > 0 {
|
||||
// urlValues.Add("desc", argVal.(string))
|
||||
log.Debugf("resourceComputeCreate: calling utilityComputeExtraDisksConfigure to attach %d extra disk(s)", argVal.(*schema.Set).Len())
|
||||
err = utilityComputeExtraDisksConfigure(ctx, d, m, false) // do_delta=false, as we are working on a new compute
|
||||
err := utilityComputeExtraDisksConfigure(ctx, d, m, false) // do_delta=false, as we are working on a new compute
|
||||
if err != nil {
|
||||
log.Errorf("resourceComputeCreate: error when attaching extra disk(s) to a new Compute ID %d: %v", compId, err)
|
||||
log.Errorf("resourceComputeCreate: error when attaching extra disk(s) to a new Compute ID %d: %v", createdID, err)
|
||||
cleanup = true
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
@@ -161,32 +165,28 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
argVal, argSet = d.GetOk("network")
|
||||
if argSet && argVal.(*schema.Set).Len() > 0 {
|
||||
log.Debugf("resourceComputeCreate: calling utilityComputeNetworksConfigure to attach %d network(s)", argVal.(*schema.Set).Len())
|
||||
err = utilityComputeNetworksConfigure(ctx, d, m, false) // do_delta=false, as we are working on a new compute
|
||||
err := utilityComputeNetworksConfigure(ctx, d, m, false) // do_delta=false, as we are working on a new compute
|
||||
if err != nil {
|
||||
log.Errorf("resourceComputeCreate: error when attaching networks to a new Compute ID %d: %s", compId, err)
|
||||
log.Errorf("resourceComputeCreate: error when attaching networks to a new Compute ID %d: %s", createdID, err)
|
||||
cleanup = true
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Note bene: we created compute in a STOPPED state (this is required to properly attach 1st network interface),
|
||||
// now we need to start it before we report the sequence complete
|
||||
if d.Get("started").(bool) {
|
||||
reqValues := &url.Values{}
|
||||
reqValues.Add("computeId", fmt.Sprintf("%d", compId))
|
||||
log.Debugf("resourceComputeCreate: starting Compute ID %d after completing its resource configuration", compId)
|
||||
if _, err := c.DecortAPICall(ctx, "POST", ComputeStartAPI, reqValues); err != nil {
|
||||
req := compute.StartRequest{
|
||||
ComputeID: createdID,
|
||||
}
|
||||
|
||||
log.Debugf("resourceComputeCreate: starting Compute ID %d after completing its resource configuration", createdID)
|
||||
if _, err := c.CloudBroker().Compute().Start(ctx, req); err != nil {
|
||||
cleanup = true
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("resourceComputeCreate: new Compute ID %d, name %s creation sequence complete", compId, d.Get("name").(string))
|
||||
log.Debugf("resourceComputeCreate: new Compute ID %d, name %s creation sequence complete", createdID, d.Get("name").(string))
|
||||
|
||||
// We may reuse dataSourceComputeRead here as we maintain similarity
|
||||
// between Compute resource and Compute data source schemas
|
||||
// Compute read function will also update resource ID on success, so that Terraform
|
||||
// will know the resource exists
|
||||
return dataSourceComputeRead(ctx, d, m)
|
||||
}
|
||||
|
||||
@@ -195,11 +195,11 @@ func resourceComputeRead(ctx context.Context, d *schema.ResourceData, m interfac
|
||||
d.Get("name").(string), d.Get("rg_id").(int))
|
||||
|
||||
compFacts, err := utilityComputeCheckPresence(ctx, d, m)
|
||||
if compFacts == "" {
|
||||
if compFacts == nil {
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
// Compute with such name and RG ID was not found
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -218,6 +218,7 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
d.Id(), d.Get("name").(string), d.Get("rg_id").(int))
|
||||
|
||||
c := m.(*controller.ControllerCfg)
|
||||
computeID, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
|
||||
/*
|
||||
1. Resize CPU/RAM
|
||||
@@ -228,32 +229,34 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
*/
|
||||
|
||||
// 1. Resize CPU/RAM
|
||||
params := &url.Values{}
|
||||
resizeReq := compute.ResizeRequest{
|
||||
ComputeID: computeID,
|
||||
Force: true,
|
||||
}
|
||||
doUpdate := false
|
||||
params.Add("computeId", d.Id())
|
||||
|
||||
oldCpu, newCpu := d.GetChange("cpu")
|
||||
if oldCpu.(int) != newCpu.(int) {
|
||||
params.Add("cpu", fmt.Sprintf("%d", newCpu.(int)))
|
||||
resizeReq.CPU = uint64(newCpu.(int))
|
||||
doUpdate = true
|
||||
} else {
|
||||
params.Add("cpu", "0") // no change to CPU allocation
|
||||
resizeReq.CPU = 0
|
||||
}
|
||||
|
||||
oldRam, newRam := d.GetChange("ram")
|
||||
if oldRam.(int) != newRam.(int) {
|
||||
params.Add("ram", fmt.Sprintf("%d", newRam.(int)))
|
||||
resizeReq.RAM = uint64(newRam.(int))
|
||||
doUpdate = true
|
||||
} else {
|
||||
params.Add("ram", "0")
|
||||
resizeReq.RAM = 0
|
||||
}
|
||||
|
||||
if doUpdate {
|
||||
log.Debugf("resourceComputeUpdate: changing CPU %d -> %d and/or RAM %d -> %d",
|
||||
oldCpu.(int), newCpu.(int),
|
||||
oldRam.(int), newRam.(int))
|
||||
params.Add("force", "true")
|
||||
_, err := c.DecortAPICall(ctx, "POST", ComputeResizeAPI, params)
|
||||
|
||||
_, err := c.CloudBroker().Compute().Resize(ctx, resizeReq)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
@@ -262,12 +265,15 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
// 2. Resize (grow) Boot disk
|
||||
oldSize, newSize := d.GetChange("boot_disk_size")
|
||||
if oldSize.(int) < newSize.(int) {
|
||||
bdsParams := &url.Values{}
|
||||
bdsParams.Add("diskId", fmt.Sprintf("%d", d.Get("boot_disk_id").(int)))
|
||||
bdsParams.Add("size", fmt.Sprintf("%d", newSize.(int)))
|
||||
req := disks.ResizeRequest{
|
||||
DiskID: uint64(d.Get("boot_disk_id").(int)),
|
||||
Size: uint64(newSize.(int)),
|
||||
}
|
||||
|
||||
log.Debugf("resourceComputeUpdate: compute ID %s, boot disk ID %d resize %d -> %d",
|
||||
d.Id(), d.Get("boot_disk_id").(int), oldSize.(int), newSize.(int))
|
||||
_, err := c.DecortAPICall(ctx, "POST", DisksResizeAPI, bdsParams)
|
||||
|
||||
_, err := c.CloudBroker().Disks().Resize(ctx, req)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
@@ -288,14 +294,18 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
}
|
||||
|
||||
if d.HasChange("started") {
|
||||
params := &url.Values{}
|
||||
params.Add("computeId", d.Id())
|
||||
if d.Get("started").(bool) {
|
||||
if _, err := c.DecortAPICall(ctx, "POST", ComputeStartAPI, params); err != nil {
|
||||
req := compute.StartRequest{
|
||||
ComputeID: computeID,
|
||||
}
|
||||
if _, err := c.CloudBroker().Compute().Start(ctx, req); err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
} else {
|
||||
if _, err := c.DecortAPICall(ctx, "POST", ComputeStopAPI, params); err != nil {
|
||||
req := compute.StopRequest{
|
||||
ComputeID: computeID,
|
||||
}
|
||||
if _, err := c.CloudBroker().Compute().Stop(ctx, req); err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
}
|
||||
@@ -307,21 +317,19 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
||||
}
|
||||
|
||||
func resourceComputeDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
// NOTE: this function destroys target Compute instance "permanently", so
|
||||
// there is no way to restore it.
|
||||
// If compute being destroyed has some extra disks attached, they are
|
||||
// detached from the compute
|
||||
log.Debugf("resourceComputeDelete: called for Compute name %s, RG ID %d",
|
||||
d.Get("name").(string), d.Get("rg_id").(int))
|
||||
|
||||
c := m.(*controller.ControllerCfg)
|
||||
computeID, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
|
||||
params := &url.Values{}
|
||||
params.Add("computeId", d.Id())
|
||||
params.Add("permanently", "1")
|
||||
params.Add("detachDisks", "1")
|
||||
req := compute.DeleteRequest{
|
||||
ComputeID: computeID,
|
||||
Permanently: d.Get("permanently").(bool),
|
||||
DetachDisks: d.Get("detach_disks").(bool),
|
||||
}
|
||||
|
||||
if _, err := c.DecortAPICall(ctx, "POST", ComputeDeleteAPI, params); err != nil {
|
||||
if _, err := c.CloudBroker().Compute().Delete(ctx, req); err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
|
||||
@@ -33,31 +33,23 @@ package kvmvm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute"
|
||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg"
|
||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
)
|
||||
|
||||
func utilityComputeExtraDisksConfigure(ctx context.Context, d *schema.ResourceData, m interface{}, do_delta bool) error {
|
||||
// d is filled with data according to computeResource schema, so extra disks config is retrieved via "extra_disks" key
|
||||
// If do_delta is true, this function will identify changes between new and existing specs for extra disks and try to
|
||||
// update compute configuration accordingly
|
||||
// Otherwise it will apply whatever is found in the new set of "extra_disks" right away.
|
||||
// Primary use of do_delta=false is when calling this function from compute Create handler.
|
||||
|
||||
// Note that this function will not abort on API errors, but will continue to configure (attach / detach) other individual
|
||||
// disks via atomic API calls. However, it will not retry failed manipulation on the same disk.
|
||||
c := m.(*controller.ControllerCfg)
|
||||
computeID, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
|
||||
log.Debugf("utilityComputeExtraDisksConfigure: called for Compute ID %s with do_delta = %t", d.Id(), do_delta)
|
||||
|
||||
// NB: as of rc-1.25 "extra_disks" are TypeSet with the elem of TypeInt
|
||||
old_set, new_set := d.GetChange("extra_disks")
|
||||
|
||||
apiErrCount := 0
|
||||
@@ -69,12 +61,13 @@ func utilityComputeExtraDisksConfigure(ctx context.Context, d *schema.ResourceDa
|
||||
}
|
||||
|
||||
for _, disk := range new_set.(*schema.Set).List() {
|
||||
urlValues := &url.Values{}
|
||||
urlValues.Add("computeId", d.Id())
|
||||
urlValues.Add("diskId", fmt.Sprintf("%d", disk.(int)))
|
||||
_, err := c.DecortAPICall(ctx, "POST", ComputeDiskAttachAPI, urlValues)
|
||||
req := compute.DiskAttachRequest{
|
||||
ComputeID: computeID,
|
||||
DiskID: uint64(disk.(int)),
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Compute().DiskAttach(ctx, req)
|
||||
if err != nil {
|
||||
// failed to attach extra disk - partial resource update
|
||||
apiErrCount++
|
||||
lastSavedError = err
|
||||
}
|
||||
@@ -92,10 +85,12 @@ func utilityComputeExtraDisksConfigure(ctx context.Context, d *schema.ResourceDa
|
||||
detach_set := old_set.(*schema.Set).Difference(new_set.(*schema.Set))
|
||||
log.Debugf("utilityComputeExtraDisksConfigure: detach set has %d items for Compute ID %s", detach_set.Len(), d.Id())
|
||||
for _, diskId := range detach_set.List() {
|
||||
urlValues := &url.Values{}
|
||||
urlValues.Add("computeId", d.Id())
|
||||
urlValues.Add("diskId", fmt.Sprintf("%d", diskId.(int)))
|
||||
_, err := c.DecortAPICall(ctx, "POST", ComputeDiskDetachAPI, urlValues)
|
||||
req := compute.DiskDetachRequest{
|
||||
ComputeID: computeID,
|
||||
DiskID: uint64(diskId.(int)),
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Compute().DiskDetach(ctx, req)
|
||||
if err != nil {
|
||||
// failed to detach disk - there will be partial resource update
|
||||
log.Errorf("utilityComputeExtraDisksConfigure: failed to detach disk ID %d from Compute ID %s: %s", diskId.(int), d.Id(), err)
|
||||
@@ -107,10 +102,12 @@ func utilityComputeExtraDisksConfigure(ctx context.Context, d *schema.ResourceDa
|
||||
attach_set := new_set.(*schema.Set).Difference(old_set.(*schema.Set))
|
||||
log.Debugf("utilityComputeExtraDisksConfigure: attach set has %d items for Compute ID %s", attach_set.Len(), d.Id())
|
||||
for _, diskId := range attach_set.List() {
|
||||
urlValues := &url.Values{}
|
||||
urlValues.Add("computeId", d.Id())
|
||||
urlValues.Add("diskId", fmt.Sprintf("%d", diskId.(int)))
|
||||
_, err := c.DecortAPICall(ctx, "POST", ComputeDiskAttachAPI, urlValues)
|
||||
req := compute.DiskAttachRequest{
|
||||
ComputeID: computeID,
|
||||
DiskID: uint64(diskId.(int)),
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Compute().DiskAttach(ctx, req)
|
||||
if err != nil {
|
||||
// failed to attach disk - there will be partial resource update
|
||||
log.Errorf("utilityComputeExtraDisksConfigure: failed to attach disk ID %d to Compute ID %s: %s", diskId.(int), d.Id(), err)
|
||||
@@ -129,13 +126,8 @@ func utilityComputeExtraDisksConfigure(ctx context.Context, d *schema.ResourceDa
|
||||
}
|
||||
|
||||
func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData, m interface{}, do_delta bool) error {
|
||||
// "d" is filled with data according to computeResource schema, so extra networks config is retrieved via "network" key
|
||||
// If do_delta is true, this function will identify changes between new and existing specs for network and try to
|
||||
// update compute configuration accordingly
|
||||
// Otherwise it will apply whatever is found in the new set of "network" right away.
|
||||
// Primary use of do_delta=false is when calling this function from compute Create handler.
|
||||
|
||||
c := m.(*controller.ControllerCfg)
|
||||
computeID, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||
|
||||
old_set, new_set := d.GetChange("network")
|
||||
|
||||
@@ -148,18 +140,19 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
|
||||
}
|
||||
|
||||
for _, runner := range new_set.(*schema.Set).List() {
|
||||
urlValues := &url.Values{}
|
||||
net_data := runner.(map[string]interface{})
|
||||
urlValues.Add("computeId", d.Id())
|
||||
urlValues.Add("netType", net_data["net_type"].(string))
|
||||
urlValues.Add("netId", fmt.Sprintf("%d", net_data["net_id"].(int)))
|
||||
ipaddr, ipSet := net_data["ip_address"] // "ip_address" key is optional
|
||||
if ipSet {
|
||||
urlValues.Add("ipAddr", ipaddr.(string))
|
||||
req := compute.NetAttachRequest{
|
||||
ComputeID: computeID,
|
||||
NetType: net_data["net_type"].(string),
|
||||
NetID: uint64(net_data["net_id"].(int)),
|
||||
}
|
||||
_, err := c.DecortAPICall(ctx, "POST", ComputeNetAttachAPI, urlValues)
|
||||
|
||||
ipaddr, ipSet := net_data["ip_address"]
|
||||
if ipSet {
|
||||
req.IPAddr = ipaddr.(string)
|
||||
}
|
||||
_, err := c.CloudBroker().Compute().NetAttach(ctx, req)
|
||||
if err != nil {
|
||||
// failed to attach network - partial resource update
|
||||
apiErrCount++
|
||||
lastSavedError = err
|
||||
}
|
||||
@@ -176,12 +169,14 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
|
||||
detach_set := old_set.(*schema.Set).Difference(new_set.(*schema.Set))
|
||||
log.Debugf("utilityComputeNetworksConfigure: detach set has %d items for Compute ID %s", detach_set.Len(), d.Id())
|
||||
for _, runner := range detach_set.List() {
|
||||
urlValues := &url.Values{}
|
||||
net_data := runner.(map[string]interface{})
|
||||
urlValues.Add("computeId", d.Id())
|
||||
urlValues.Add("ipAddr", net_data["ip_address"].(string))
|
||||
urlValues.Add("mac", net_data["mac"].(string))
|
||||
_, err := c.DecortAPICall(ctx, "POST", ComputeNetDetachAPI, urlValues)
|
||||
req := compute.NetDetachRequest{
|
||||
ComputeID: computeID,
|
||||
IPAddr: net_data["ip_address"].(string),
|
||||
MAC: net_data["mac"].(string),
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Compute().NetDetach(ctx, req)
|
||||
if err != nil {
|
||||
// failed to detach this network - there will be partial resource update
|
||||
log.Errorf("utilityComputeNetworksConfigure: failed to detach net ID %d of type %s from Compute ID %s: %s",
|
||||
@@ -194,15 +189,18 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
|
||||
attach_set := new_set.(*schema.Set).Difference(old_set.(*schema.Set))
|
||||
log.Debugf("utilityComputeNetworksConfigure: attach set has %d items for Compute ID %s", attach_set.Len(), d.Id())
|
||||
for _, runner := range attach_set.List() {
|
||||
urlValues := &url.Values{}
|
||||
net_data := runner.(map[string]interface{})
|
||||
urlValues.Add("computeId", d.Id())
|
||||
urlValues.Add("netId", fmt.Sprintf("%d", net_data["net_id"].(int)))
|
||||
urlValues.Add("netType", net_data["net_type"].(string))
|
||||
if net_data["ip_address"].(string) != "" {
|
||||
urlValues.Add("ipAddr", net_data["ip_address"].(string))
|
||||
req := compute.NetAttachRequest{
|
||||
ComputeID: computeID,
|
||||
NetID: uint64(net_data["net_id"].(int)),
|
||||
NetType: net_data["net_type"].(string),
|
||||
}
|
||||
_, err := c.DecortAPICall(ctx, "POST", ComputeNetAttachAPI, urlValues)
|
||||
|
||||
if net_data["ip_address"].(string) != "" {
|
||||
req.IPAddr = net_data["ip_address"].(string)
|
||||
}
|
||||
|
||||
_, err := c.CloudBroker().Compute().NetAttach(ctx, req)
|
||||
if err != nil {
|
||||
// failed to attach this network - there will be partial resource update
|
||||
log.Errorf("utilityComputeNetworksConfigure: failed to attach net ID %d of type %s to Compute ID %s: %s",
|
||||
@@ -221,31 +219,15 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
|
||||
return nil
|
||||
}
|
||||
|
||||
func utilityComputeCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (string, error) {
|
||||
// This function tries to locate Compute by one of the following approaches:
|
||||
// - if compute_id is specified - locate by compute ID
|
||||
// - if compute_name is specified - locate by a combination of compute name and resource
|
||||
// group ID
|
||||
//
|
||||
// If succeeded, it returns non-empty string that contains JSON formatted facts about the
|
||||
// Compute as returned by compute/get API call.
|
||||
// Otherwise it returns empty string and meaningful error.
|
||||
//
|
||||
// This function does not modify its ResourceData argument, so it is safe to use it as core
|
||||
// method for resource's Exists method.
|
||||
//
|
||||
|
||||
func utilityComputeCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*compute.RecordCompute, error) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
urlValues := &url.Values{}
|
||||
|
||||
// make it possible to use "read" & "check presence" functions with compute ID set so
|
||||
// that Import of Compute resource is possible
|
||||
idSet := false
|
||||
theId, err := strconv.Atoi(d.Id())
|
||||
if err != nil || theId <= 0 {
|
||||
computeID, err := strconv.ParseUint(d.Id(), 10, 64)
|
||||
if err != nil || computeID <= 0 {
|
||||
computeId, argSet := d.GetOk("compute_id") // NB: compute_id is NOT present in computeResource schema!
|
||||
if argSet {
|
||||
theId = computeId.(int)
|
||||
computeID = uint64(computeId.(int))
|
||||
idSet = true
|
||||
}
|
||||
} else {
|
||||
@@ -254,11 +236,14 @@ func utilityComputeCheckPresence(ctx context.Context, d *schema.ResourceData, m
|
||||
|
||||
if idSet {
|
||||
// compute ID is specified, try to get compute instance straight by this ID
|
||||
log.Debugf("utilityComputeCheckPresence: locating compute by its ID %d", theId)
|
||||
urlValues.Add("computeId", fmt.Sprintf("%d", theId))
|
||||
computeFacts, err := c.DecortAPICall(ctx, "POST", ComputeGetAPI, urlValues)
|
||||
log.Debugf("utilityComputeCheckPresence: locating compute by its ID %d", computeID)
|
||||
req := compute.GetRequest{
|
||||
ComputeID: computeID,
|
||||
}
|
||||
|
||||
computeFacts, err := c.CloudBroker().Compute().Get(ctx, req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
return computeFacts, nil
|
||||
}
|
||||
@@ -267,44 +252,40 @@ func utilityComputeCheckPresence(ctx context.Context, d *schema.ResourceData, m
|
||||
// and RG ID
|
||||
computeName, argSet := d.GetOk("name")
|
||||
if !argSet {
|
||||
return "", fmt.Errorf("Cannot locate compute instance if name is empty and no compute ID specified")
|
||||
return nil, fmt.Errorf("Cannot locate compute instance if name is empty and no compute ID specified")
|
||||
}
|
||||
|
||||
rgId, argSet := d.GetOk("rg_id")
|
||||
if !argSet {
|
||||
return "", fmt.Errorf("Cannot locate compute by name %s if no resource group ID is set", computeName.(string))
|
||||
return nil, fmt.Errorf("Cannot locate compute by name %s if no resource group ID is set", computeName.(string))
|
||||
}
|
||||
|
||||
urlValues.Add("rgId", fmt.Sprintf("%d", rgId))
|
||||
apiResp, err := c.DecortAPICall(ctx, "POST", RgListComputesAPI, urlValues)
|
||||
rgListComputesReq := rg.ListComputesRequest{
|
||||
RGID: uint64(rgId.(int)),
|
||||
}
|
||||
|
||||
computeList, err := c.CloudBroker().RG().ListComputes(ctx, rgListComputesReq)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugf("utilityComputeCheckPresence: ready to unmarshal string %s", apiResp)
|
||||
|
||||
computeList := RgListComputesResp{}
|
||||
err = json.Unmarshal([]byte(apiResp), &computeList)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// log.Printf("%#v", computeList)
|
||||
log.Debugf("utilityComputeCheckPresence: traversing decoded JSON of length %d", len(computeList))
|
||||
for index, item := range computeList {
|
||||
// need to match Compute by name, skip Computes with the same name in DESTROYED satus
|
||||
if item.Name == computeName.(string) && item.Status != "DESTROYED" {
|
||||
log.Debugf("utilityComputeCheckPresence: index %d, matched name %s", index, item.Name)
|
||||
// we found the Compute we need - now get detailed information via compute/get API
|
||||
cgetValues := &url.Values{}
|
||||
cgetValues.Add("computeId", fmt.Sprintf("%d", item.ID))
|
||||
apiResp, err = c.DecortAPICall(ctx, "POST", ComputeGetAPI, cgetValues)
|
||||
req := compute.GetRequest{
|
||||
ComputeID: item.ID,
|
||||
}
|
||||
|
||||
apiResp, err := c.CloudBroker().Compute().Get(ctx, req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
return apiResp, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil // there should be no error if Compute does not exist
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user