This commit is contained in:
stSolo
2023-01-24 17:50:38 +03:00
parent a355247845
commit 4d865ae921
48 changed files with 1218 additions and 468 deletions

View File

@@ -45,7 +45,6 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
// "github.com/hashicorp/terraform-plugin-sdk/helper/validation"
)
@@ -125,10 +124,10 @@ func findInExtraDisks(DiskId uint, ExtraDisks []interface{}) bool {
return false
}
func flattenComputeDisksDemo(disksList []DiskRecord, extraDisks []interface{}) []map[string]interface{} {
func flattenDataComputeDisksDemo(disksList []DiskRecord, extraDisks []interface{}) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, disk := range disksList {
if disk.Name == "bootdisk" || findInExtraDisks(disk.ID, extraDisks) { //skip main bootdisk and extraDisks
if findInExtraDisks(disk.ID, extraDisks) { //skip main bootdisk and extraDisks
continue
}
temp := map[string]interface{}{
@@ -136,6 +135,9 @@ func flattenComputeDisksDemo(disksList []DiskRecord, extraDisks []interface{}) [
"disk_id": disk.ID,
"disk_type": disk.Type,
"sep_id": disk.SepID,
"shareable": disk.Shareable,
"size_max": disk.SizeMax,
"size_used": disk.SizeUsed,
"pool": disk.Pool,
"desc": disk.Desc,
"image_id": disk.ImageID,
@@ -146,7 +148,7 @@ func flattenComputeDisksDemo(disksList []DiskRecord, extraDisks []interface{}) [
return res
}
func flattenCompute(d *schema.ResourceData, compFacts string) error {
func flattenDataCompute(d *schema.ResourceData, compFacts string) error {
// This function expects that compFacts string contains response from API compute/get,
// i.e. detailed information about compute instance.
//
@@ -219,7 +221,7 @@ func flattenCompute(d *schema.ResourceData, compFacts string) error {
}
}
err = d.Set("disks", flattenComputeDisksDemo(model.Disks, d.Get("extra_disks").(*schema.Set).List()))
err = d.Set("disks", flattenDataComputeDisksDemo(model.Disks, d.Get("extra_disks").(*schema.Set).List()))
if err != nil {
return err
}
@@ -236,7 +238,7 @@ func dataSourceComputeRead(ctx context.Context, d *schema.ResourceData, m interf
return diag.FromErr(err)
}
if err = flattenCompute(d, compFacts); err != nil {
if err = flattenDataCompute(d, compFacts); err != nil {
return diag.FromErr(err)
}
@@ -353,48 +355,53 @@ func DataSourceCompute() *schema.Resource {
"disks": {
Type: schema.TypeList,
Computed: true,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"disk_name": {
Type: schema.TypeString,
Required: true,
Computed: true,
Description: "Name for disk",
},
"size": {
Type: schema.TypeInt,
Required: true,
Computed: true,
Description: "Disk size in GiB",
},
"disk_type": {
Type: schema.TypeString,
Computed: true,
Optional: true,
ValidateFunc: validation.StringInSlice([]string{"B", "D"}, false),
Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data'",
Type: schema.TypeString,
Computed: true,
Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data'",
},
"sep_id": {
Type: schema.TypeInt,
Computed: true,
Optional: true,
Description: "Storage endpoint provider ID; by default the same with boot disk",
},
"shareable": {
Type: schema.TypeBool,
Computed: true,
},
"size_max": {
Type: schema.TypeInt,
Computed: true,
},
"size_used": {
Type: schema.TypeInt,
Computed: true,
},
"pool": {
Type: schema.TypeString,
Computed: true,
Optional: true,
Description: "Pool name; by default will be chosen automatically",
},
"desc": {
Type: schema.TypeString,
Computed: true,
Optional: true,
Description: "Optional description",
},
"image_id": {
Type: schema.TypeInt,
Computed: true,
Optional: true,
Description: "Specify image id for create disk from template",
},
"disk_id": {
@@ -404,8 +411,7 @@ func DataSourceCompute() *schema.Resource {
},
"permanently": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Computed: true,
Description: "Disk deletion status",
},
},

View File

@@ -0,0 +1,115 @@
package kvmvm
import (
"encoding/json"
"fmt"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/rudecs/terraform-provider-decort/internal/status"
log "github.com/sirupsen/logrus"
)
func flattenComputeDisksDemo(disksList []DiskRecord, extraDisks []interface{}) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, disk := range disksList {
if disk.Name == "bootdisk" || findInExtraDisks(disk.ID, extraDisks) { //skip main bootdisk and extraDisks
continue
}
temp := map[string]interface{}{
"disk_name": disk.Name,
"disk_id": disk.ID,
"disk_type": disk.Type,
"sep_id": disk.SepID,
"shareable": disk.Shareable,
"size_max": disk.SizeMax,
"size_used": disk.SizeUsed,
"pool": disk.Pool,
"desc": disk.Desc,
"image_id": disk.ImageID,
"size": disk.SizeMax,
}
res = append(res, temp)
}
return res
}
func flattenCompute(d *schema.ResourceData, compFacts string) error {
// This function expects that compFacts string contains response from API compute/get,
// i.e. detailed information about compute instance.
//
// NOTE: this function modifies ResourceData argument - as such it should never be called
// from resourceComputeExists(...) method
model := ComputeGetResp{}
log.Debugf("flattenCompute: ready to unmarshal string %s", compFacts)
err := json.Unmarshal([]byte(compFacts), &model)
if err != nil {
return err
}
log.Debugf("flattenCompute: ID %d, RG ID %d", model.ID, model.RgID)
d.SetId(fmt.Sprintf("%d", model.ID))
// d.Set("compute_id", model.ID) - we should NOT set compute_id in the schema here: if it was set - it is already set, if it wasn't - we shouldn't
d.Set("name", model.Name)
d.Set("rg_id", model.RgID)
d.Set("rg_name", model.RgName)
d.Set("account_id", model.AccountID)
d.Set("account_name", model.AccountName)
d.Set("driver", model.Driver)
d.Set("cpu", model.Cpu)
d.Set("ram", model.Ram)
// d.Set("boot_disk_size", model.BootDiskSize) - bootdiskSize key in API compute/get is always zero, so we set boot_disk_size in another way
if model.VirtualImageID != 0 {
d.Set("image_id", model.VirtualImageID)
} else {
d.Set("image_id", model.ImageID)
}
d.Set("description", model.Desc)
d.Set("enabled", false)
if model.Status == status.Enabled {
d.Set("enabled", true)
}
//d.Set("cloud_init", "applied") // NOTE: for existing compute we hard-code this value as an indicator for DiffSuppress fucntion
//d.Set("status", model.Status)
//d.Set("tech_status", model.TechStatus)
d.Set("started", false)
if model.TechStatus == "STARTED" {
d.Set("started", true)
}
bootDisk := findBootDisk(model.Disks)
d.Set("boot_disk_size", bootDisk.SizeMax)
d.Set("boot_disk_id", bootDisk.ID) // we may need boot disk ID in resize operations
d.Set("sep_id", bootDisk.SepID)
d.Set("pool", bootDisk.Pool)
//if len(model.Disks) > 0 {
//log.Debugf("flattenCompute: calling parseComputeDisksToExtraDisks for %d disks", len(model.Disks))
//if err = d.Set("extra_disks", parseComputeDisksToExtraDisks(model.Disks)); err != nil {
//return err
//}
//}
if len(model.Interfaces) > 0 {
log.Debugf("flattenCompute: calling parseComputeInterfacesToNetworks for %d interfaces", len(model.Interfaces))
if err = d.Set("network", parseComputeInterfacesToNetworks(model.Interfaces)); err != nil {
return err
}
}
if len(model.OsUsers) > 0 {
log.Debugf("flattenCompute: calling parseOsUsers for %d logins", len(model.OsUsers))
if err = d.Set("os_users", parseOsUsers(model.OsUsers)); err != nil {
return err
}
}
err = d.Set("disks", flattenComputeDisksDemo(model.Disks, d.Get("extra_disks").(*schema.Set).List()))
if err != nil {
return err
}
return nil
}

View File

@@ -72,6 +72,7 @@ type DiskRecord struct {
Role string `json:"role"`
SepType string `json:"sepType"`
SepID int `json:"sepId"` // NOTE: absent from compute/get output
Shareable bool `json:"shareable"`
SizeMax int `json:"sizeMax"`
SizeUsed int `json:"sizeUsed"` // sum over all snapshots of this disk to report total consumed space
Snapshots []SnapshotRecord `json:"snapshots"`

View File

@@ -694,6 +694,18 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
Optional: true,
Description: "Storage endpoint provider ID; by default the same with boot disk",
},
"shareable": {
Type: schema.TypeBool,
Computed: true,
},
"size_max": {
Type: schema.TypeInt,
Computed: true,
},
"size_used": {
Type: schema.TypeInt,
Computed: true,
},
"pool": {
Type: schema.TypeString,
Computed: true,