This commit is contained in:
stSolo
2023-01-24 17:50:38 +03:00
parent a355247845
commit 4d865ae921
48 changed files with 1218 additions and 468 deletions

View File

@@ -141,6 +141,22 @@ func flattenAccResources(r Resources) []map[string]interface{} {
return res
}
func flattenAccountSeps(seps map[string]map[string]ResourceSep) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for sepKey, sepVal := range seps {
for dataKey, dataVal := range sepVal {
temp := map[string]interface{}{
"sep_id": sepKey,
"data_name": dataKey,
"disk_size": dataVal.DiskSize,
"disk_size_max": dataVal.DiskSizeMax,
}
res = append(res, temp)
}
}
return res
}
func flattenAccResource(r Resource) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
@@ -150,6 +166,7 @@ func flattenAccResource(r Resource) []map[string]interface{} {
"exttraffic": r.Exttraffic,
"gpu": r.GPU,
"ram": r.RAM,
"seps": flattenAccountSeps(r.SEPs),
}
res = append(res, temp)
return res
@@ -161,6 +178,7 @@ func dataSourceAccountSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Required: true,
},
"dc_location": {
Type: schema.TypeString,
Computed: true,
@@ -199,6 +217,30 @@ func dataSourceAccountSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
},
},
},
@@ -231,6 +273,30 @@ func dataSourceAccountSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
},
},
},

View File

@@ -47,6 +47,7 @@ func flattenAccountDisksList(adl AccountDisksList) []map[string]interface{} {
"disk_name": ad.Name,
"pool": ad.Pool,
"sep_id": ad.SepId,
"shareable": ad.Shareable,
"size_max": ad.SizeMax,
"type": ad.Type,
}
@@ -98,6 +99,10 @@ func dataSourceAccountDisksListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"shareable": {
Type: schema.TypeBool,
Computed: true,
},
"size_max": {
Type: schema.TypeInt,
Computed: true,

View File

@@ -89,13 +89,19 @@ type AccountCloudApi struct {
type AccountCloudApiList []AccountCloudApi
type ResourceSep struct {
DiskSize float64 `json:"disksize"`
DiskSizeMax int `json:"disksizemax"`
}
type Resource struct {
CPU int `json:"cpu"`
Disksize int `json:"disksize"`
Extips int `json:"extips"`
Exttraffic int `json:"exttraffic"`
GPU int `json:"gpu"`
RAM int `json:"ram"`
CPU int `json:"cpu"`
Disksize int `json:"disksize"`
Extips int `json:"extips"`
Exttraffic int `json:"exttraffic"`
GPU int `json:"gpu"`
RAM int `json:"ram"`
SEPs map[string]map[string]ResourceSep `json:"seps"`
}
type Resources struct {
@@ -147,12 +153,13 @@ type AccountCompute struct {
type AccountComputesList []AccountCompute
type AccountDisk struct {
ID int `json:"id"`
Name string `json:"name"`
Pool string `json:"pool"`
SepId int `json:"sepId"`
SizeMax int `json:"sizeMax"`
Type string `json:"type"`
ID int `json:"id"`
Name string `json:"name"`
Pool string `json:"pool"`
SepId int `json:"sepId"`
Shareable bool `json:"shareable"`
SizeMax int `json:"sizeMax"`
Type string `json:"type"`
}
type AccountDisksList []AccountDisk

View File

@@ -593,6 +593,30 @@ func resourceAccountSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
},
},
},
@@ -625,6 +649,30 @@ func resourceAccountSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
},
},
},

View File

@@ -47,4 +47,6 @@ const (
disksSnapshotDeleteAPI = "/restmachine/cloudapi/disks/snapshotDelete"
disksSnapshotRollbackAPI = "/restmachine/cloudapi/disks/snapshotRollback"
disksShareAPI = "/restmachine/cloudapi/disks/share"
disksUnshareAPI = "/restmachine/cloudapi/disks/unshare"
)

View File

@@ -60,8 +60,7 @@ func dataSourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface
d.Set("account_name", disk.AccountName)
d.Set("acl", string(diskAcl))
d.Set("boot_partition", disk.BootPartition)
d.Set("compute_id", disk.ComputeID)
d.Set("compute_name", disk.ComputeName)
d.Set("computes", flattenDiskComputes(disk.Computes))
d.Set("created_time", disk.CreatedTime)
d.Set("deleted_time", disk.DeletedTime)
d.Set("desc", disk.Desc)
@@ -84,6 +83,7 @@ func dataSourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface
d.Set("passwd", disk.Passwd)
d.Set("pci_slot", disk.PciSlot)
d.Set("pool", disk.Pool)
d.Set("present_to", disk.PresentTo)
d.Set("purge_attempts", disk.PurgeAttempts)
d.Set("purge_time", disk.PurgeTime)
d.Set("reality_device_number", disk.RealityDeviceNumber)
@@ -93,6 +93,7 @@ func dataSourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface
d.Set("role", disk.Role)
d.Set("sep_id", disk.SepID)
d.Set("sep_type", disk.SepType)
d.Set("shareable", disk.Shareable)
d.Set("size_max", disk.SizeMax)
d.Set("size_used", disk.SizeUsed)
d.Set("snapshots", flattenDiskSnapshotList(disk.Snapshots))
@@ -130,15 +131,21 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "Number of disk partitions",
},
"compute_id": {
Type: schema.TypeInt,
Computed: true,
Description: "Compute ID",
},
"compute_name": {
Type: schema.TypeString,
Computed: true,
Description: "Compute name",
"computes": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"compute_id": {
Type: schema.TypeString,
Computed: true,
},
"compute_name": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"created_time": {
Type: schema.TypeInt,
@@ -316,6 +323,13 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "Pool for disk location",
},
"present_to": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"purge_attempts": {
Type: schema.TypeInt,
Computed: true,
@@ -361,6 +375,10 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "Type SEP. Defines the type of storage system and contains one of the values set in the cloud platform",
},
"shareable": {
Type: schema.TypeBool,
Computed: true,
},
"size_max": {
Type: schema.TypeInt,
Computed: true,

View File

@@ -42,6 +42,18 @@ import (
"github.com/rudecs/terraform-provider-decort/internal/constants"
)
func flattenDiskComputes(computes map[string]string) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for computeKey, computeVal := range computes {
temp := map[string]interface{}{
"compute_id": computeKey,
"compute_name": computeVal,
}
res = append(res, temp)
}
return res
}
func flattenIOTune(iot IOTune) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
@@ -72,9 +84,8 @@ func flattenDiskList(dl DisksList) []map[string]interface{} {
"account_id": disk.AccountID,
"account_name": disk.AccountName,
"acl": string(diskAcl),
"computes": flattenDiskComputes(disk.Computes),
"boot_partition": disk.BootPartition,
"compute_id": disk.ComputeID,
"compute_name": disk.ComputeName,
"created_time": disk.CreatedTime,
"deleted_time": disk.DeletedTime,
"desc": disk.Desc,
@@ -99,6 +110,7 @@ func flattenDiskList(dl DisksList) []map[string]interface{} {
"passwd": disk.Passwd,
"pci_slot": disk.PciSlot,
"pool": disk.Pool,
"present_to": disk.PresentTo,
"purge_attempts": disk.PurgeAttempts,
"purge_time": disk.PurgeTime,
"reality_device_number": disk.RealityDeviceNumber,
@@ -108,6 +120,7 @@ func flattenDiskList(dl DisksList) []map[string]interface{} {
"role": disk.Role,
"sep_id": disk.SepID,
"sep_type": disk.SepType,
"shareable": disk.Shareable,
"size_max": disk.SizeMax,
"size_used": disk.SizeUsed,
"snapshots": flattenDiskSnapshotList(disk.Snapshots),
@@ -199,15 +212,21 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "Number of disk partitions",
},
"compute_id": {
Type: schema.TypeInt,
Computed: true,
Description: "Compute ID",
},
"compute_name": {
Type: schema.TypeString,
Computed: true,
Description: "Compute name",
"computes": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"compute_id": {
Type: schema.TypeString,
Computed: true,
},
"compute_name": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"created_time": {
Type: schema.TypeInt,
@@ -400,6 +419,13 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "Pool for disk location",
},
"present_to": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"purge_attempts": {
Type: schema.TypeInt,
Computed: true,
@@ -445,6 +471,10 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "Type SEP. Defines the type of storage system and contains one of the values set in the cloud platform",
},
"shareable": {
Type: schema.TypeBool,
Computed: true,
},
"size_max": {
Type: schema.TypeInt,
Computed: true,

View File

@@ -37,9 +37,8 @@ type Disk struct {
AccountID int `json:"accountId"`
AccountName string `json:"accountName"`
BootPartition int `json:"bootPartition"`
Computes map[string]string `json:"computes"`
CreatedTime uint64 `json:"creationTime"`
ComputeID int `json:"computeId"`
ComputeName string `json:"computeName"`
DeletedTime uint64 `json:"deletionTime"`
DeviceName string `json:"devicename"`
Desc string `json:"desc"`
@@ -63,6 +62,7 @@ type Disk struct {
ParentId int `json:"parentId"`
PciSlot int `json:"pciSlot"`
Pool string `json:"pool"`
PresentTo []int `json:"presentTo"`
PurgeTime uint64 `json:"purgeTime"`
PurgeAttempts uint64 `json:"purgeAttempts"`
RealityDeviceNumber int `json:"realityDeviceNumber"`
@@ -71,6 +71,7 @@ type Disk struct {
ResName string `json:"resName"`
Role string `json:"role"`
SepType string `json:"sepType"`
Shareable bool `json:"shareable"`
SepID int `json:"sepId"` // NOTE: absent from compute/get output
SizeMax int `json:"sizeMax"`
SizeUsed int `json:"sizeUsed"` // sum over all snapshots of this disk to report total consumed space

View File

@@ -113,6 +113,15 @@ func resourceDiskCreate(ctx context.Context, d *schema.ResourceData, m interface
urlValues = &url.Values{}
}
if shareable := d.Get("shareable"); shareable.(bool) == true {
urlValues.Add("diskId", diskId)
_, err := c.DecortAPICall(ctx, "POST", disksShareAPI, urlValues)
if err != nil {
return diag.FromErr(err)
}
urlValues = &url.Values{}
}
dgn := resourceDiskRead(ctx, d, m)
if dgn != nil {
return dgn
@@ -167,8 +176,7 @@ func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}
d.Set("account_name", disk.AccountName)
d.Set("acl", string(diskAcl))
d.Set("boot_partition", disk.BootPartition)
d.Set("compute_id", disk.ComputeID)
d.Set("compute_name", disk.ComputeName)
d.Set("computes", flattenDiskComputes(disk.Computes))
d.Set("created_time", disk.CreatedTime)
d.Set("deleted_time", disk.DeletedTime)
d.Set("desc", disk.Desc)
@@ -191,6 +199,7 @@ func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}
d.Set("passwd", disk.Passwd)
d.Set("pci_slot", disk.PciSlot)
d.Set("pool", disk.Pool)
d.Set("present_to", disk.PresentTo)
d.Set("purge_attempts", disk.PurgeAttempts)
d.Set("purge_time", disk.PurgeTime)
d.Set("reality_device_number", disk.RealityDeviceNumber)
@@ -202,6 +211,7 @@ func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}
d.Set("sep_type", disk.SepType)
d.Set("size_max", disk.SizeMax)
d.Set("size_used", disk.SizeUsed)
d.Set("shareable", disk.Shareable)
d.Set("snapshots", flattenDiskSnapshotList(disk.Snapshots))
d.Set("status", disk.Status)
d.Set("tech_status", disk.TechStatus)
@@ -277,6 +287,24 @@ func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface
urlValues = &url.Values{}
}
if d.HasChange("shareable") {
oldShare, newShare := d.GetChange("shareable")
urlValues = &url.Values{}
urlValues.Add("diskId", d.Id())
if oldShare.(bool) == false && newShare.(bool) == true {
_, err := c.DecortAPICall(ctx, "POST", disksShareAPI, urlValues)
if err != nil {
return diag.FromErr(err)
}
}
if oldShare.(bool) == true && newShare.(bool) == false {
_, err := c.DecortAPICall(ctx, "POST", disksUnshareAPI, urlValues)
if err != nil {
return diag.FromErr(err)
}
}
}
return resourceDiskRead(ctx, d, m)
}
@@ -335,6 +363,13 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "Pool for disk location",
},
"present_to": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"sep_id": {
Type: schema.TypeInt,
Optional: true,
@@ -354,7 +389,6 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
ValidateFunc: validation.StringInSlice([]string{"D", "B", "T"}, false),
Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'",
},
"detach": {
Type: schema.TypeBool,
Optional: true,
@@ -373,6 +407,12 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
Default: "",
Description: "Reason for deletion",
},
"shareable": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
Default: false,
},
"disk_id": {
Type: schema.TypeInt,
@@ -393,15 +433,21 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "Number of disk partitions",
},
"compute_id": {
Type: schema.TypeInt,
Computed: true,
Description: "Compute ID",
},
"compute_name": {
Type: schema.TypeString,
Computed: true,
Description: "Compute name",
"computes": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"compute_id": {
Type: schema.TypeString,
Computed: true,
},
"compute_name": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"created_time": {
Type: schema.TypeInt,

View File

@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -80,6 +81,7 @@ func flattenImage(d *schema.ResourceData, img *ImageExtend) {
d.Set("pool_name", img.Pool)
d.Set("provider_name", img.ProviderName)
d.Set("purge_attempts", img.PurgeAttempts)
d.Set("present_to", img.PresentTo)
d.Set("res_id", img.ResId)
d.Set("rescuecd", img.RescueCD)
d.Set("sep_id", img.SepId)

View File

@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -161,6 +162,13 @@ func dataSourceImageExtendSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"present_to": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"res_id": {
Type: schema.TypeString,
Computed: true,

View File

@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -134,6 +135,7 @@ type ImageExtend struct {
Password string `json:"password"`
Pool string `json:"pool"`
ProviderName string `json:"provider_name"`
PresentTo []int `json:"presentTo"`
PurgeAttempts int `json:"purgeAttempts"`
ResId string `json:"resId"`
RescueCD bool `json:"rescuecd"`

View File

@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -49,6 +49,8 @@ type K8sNodeRecord struct {
ID int `json:"id"`
Name string `json:"name"`
} `json:"detailedInfo"`
SepID int `json:"SepId"`
SepPool string `json:"SepPool"`
}
//K8sRecord represents k8s instance

View File

@@ -56,10 +56,12 @@ func parseNode(nodeList []interface{}) K8sNodeRecord {
node := nodeList[0].(map[string]interface{})
return K8sNodeRecord{
Num: node["num"].(int),
Cpu: node["cpu"].(int),
Ram: node["ram"].(int),
Disk: node["disk"].(int),
Num: node["num"].(int),
Cpu: node["cpu"].(int),
Ram: node["ram"].(int),
Disk: node["disk"].(int),
SepID: node["sep_id"].(int),
SepPool: node["sep_pool"].(string),
}
}
@@ -112,6 +114,14 @@ func mastersSchemaMake() map[string]*schema.Schema {
Required: true,
Description: "Number of nodes to create.",
}
masters["sep_id"] = &schema.Schema{
Type: schema.TypeInt,
Optional: true,
}
masters["sep_pool"] = &schema.Schema{
Type: schema.TypeString,
Optional: true,
}
masters["cpu"] = &schema.Schema{
Type: schema.TypeInt,
Required: true,
@@ -140,6 +150,14 @@ func workersSchemaMake() map[string]*schema.Schema {
Required: true,
Description: "Number of nodes to create.",
}
workers["sep_id"] = &schema.Schema{
Type: schema.TypeInt,
Optional: true,
}
workers["sep_pool"] = &schema.Schema{
Type: schema.TypeString,
Optional: true,
}
workers["cpu"] = &schema.Schema{
Type: schema.TypeInt,
Required: true,

View File

@@ -69,6 +69,8 @@ func resourceK8sCreate(ctx context.Context, d *schema.ResourceData, m interface{
urlValues.Add("masterCpu", strconv.Itoa(masterNode.Cpu))
urlValues.Add("masterRam", strconv.Itoa(masterNode.Ram))
urlValues.Add("masterDisk", strconv.Itoa(masterNode.Disk))
urlValues.Add("masterSepId", strconv.Itoa(masterNode.SepID))
urlValues.Add("masterSepPool", masterNode.SepPool)
var workerNode K8sNodeRecord
if workers, ok := d.GetOk("workers"); ok {
@@ -80,6 +82,29 @@ func resourceK8sCreate(ctx context.Context, d *schema.ResourceData, m interface{
urlValues.Add("workerCpu", strconv.Itoa(workerNode.Cpu))
urlValues.Add("workerRam", strconv.Itoa(workerNode.Ram))
urlValues.Add("workerDisk", strconv.Itoa(workerNode.Disk))
urlValues.Add("workerSepId", strconv.Itoa(workerNode.SepID))
urlValues.Add("workerSepPool", workerNode.SepPool)
if labels, ok := d.GetOk("labels"); ok {
labels := labels.([]interface{})
for _, label := range labels {
urlValues.Add("labels", label.(string))
}
}
if taints, ok := d.GetOk("taints"); ok {
taints := taints.([]interface{})
for _, taint := range taints {
urlValues.Add("taints", taint.(string))
}
}
if annotations, ok := d.GetOk("annotations"); ok {
annotations := annotations.([]interface{})
for _, annotation := range annotations {
urlValues.Add("annotations", annotation.(string))
}
}
if withLB, ok := d.GetOk("with_lb"); ok {
urlValues.Add("withLB", strconv.FormatBool(withLB.(bool)))
@@ -133,7 +158,6 @@ func resourceK8sCreate(ctx context.Context, d *schema.ResourceData, m interface{
}
func resourceK8sRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
//log.Debugf("resourceK8sRead: called with id %s, rg %d", d.Id(), d.Get("rg_id").(int))
k8s, err := utilityDataK8sCheckPresence(ctx, d, m)
if err != nil {
@@ -282,28 +306,45 @@ func resourceK8sSchemaMake() map[string]*schema.Schema {
Required: true,
Description: "Name of the cluster.",
},
"rg_id": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
Description: "Resource group ID that this instance belongs to.",
},
"k8sci_id": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
Description: "ID of the k8s catalog item to base this instance on.",
},
"wg_name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
Description: "Name for first worker group created with cluster.",
},
"labels": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"taints": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"annotations": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"masters": {
Type: schema.TypeList,
Optional: true,
@@ -315,7 +356,6 @@ func resourceK8sSchemaMake() map[string]*schema.Schema {
},
Description: "Master node(s) configuration.",
},
"workers": {
Type: schema.TypeList,
Optional: true,
@@ -326,7 +366,6 @@ func resourceK8sSchemaMake() map[string]*schema.Schema {
},
Description: "Worker node(s) configuration.",
},
"with_lb": {
Type: schema.TypeBool,
Optional: true,
@@ -334,7 +373,6 @@ func resourceK8sSchemaMake() map[string]*schema.Schema {
Default: true,
Description: "Create k8s with load balancer if true.",
},
"extnet_id": {
Type: schema.TypeInt,
Optional: true,
@@ -342,7 +380,6 @@ func resourceK8sSchemaMake() map[string]*schema.Schema {
ForceNew: true,
Description: "ID of the external network to connect workers to. If omitted network will be chosen by the platfom.",
},
"desc": {
Type: schema.TypeString,
Optional: true,
@@ -417,13 +454,11 @@ func resourceK8sSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"default_wg_id": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of default workers group for this instace.",
},
"kubeconfig": {
Type: schema.TypeString,
Computed: true,

View File

@@ -45,7 +45,6 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
// "github.com/hashicorp/terraform-plugin-sdk/helper/validation"
)
@@ -125,10 +124,10 @@ func findInExtraDisks(DiskId uint, ExtraDisks []interface{}) bool {
return false
}
func flattenComputeDisksDemo(disksList []DiskRecord, extraDisks []interface{}) []map[string]interface{} {
func flattenDataComputeDisksDemo(disksList []DiskRecord, extraDisks []interface{}) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, disk := range disksList {
if disk.Name == "bootdisk" || findInExtraDisks(disk.ID, extraDisks) { //skip main bootdisk and extraDisks
if findInExtraDisks(disk.ID, extraDisks) { //skip main bootdisk and extraDisks
continue
}
temp := map[string]interface{}{
@@ -136,6 +135,9 @@ func flattenComputeDisksDemo(disksList []DiskRecord, extraDisks []interface{}) [
"disk_id": disk.ID,
"disk_type": disk.Type,
"sep_id": disk.SepID,
"shareable": disk.Shareable,
"size_max": disk.SizeMax,
"size_used": disk.SizeUsed,
"pool": disk.Pool,
"desc": disk.Desc,
"image_id": disk.ImageID,
@@ -146,7 +148,7 @@ func flattenComputeDisksDemo(disksList []DiskRecord, extraDisks []interface{}) [
return res
}
func flattenCompute(d *schema.ResourceData, compFacts string) error {
func flattenDataCompute(d *schema.ResourceData, compFacts string) error {
// This function expects that compFacts string contains response from API compute/get,
// i.e. detailed information about compute instance.
//
@@ -219,7 +221,7 @@ func flattenCompute(d *schema.ResourceData, compFacts string) error {
}
}
err = d.Set("disks", flattenComputeDisksDemo(model.Disks, d.Get("extra_disks").(*schema.Set).List()))
err = d.Set("disks", flattenDataComputeDisksDemo(model.Disks, d.Get("extra_disks").(*schema.Set).List()))
if err != nil {
return err
}
@@ -236,7 +238,7 @@ func dataSourceComputeRead(ctx context.Context, d *schema.ResourceData, m interf
return diag.FromErr(err)
}
if err = flattenCompute(d, compFacts); err != nil {
if err = flattenDataCompute(d, compFacts); err != nil {
return diag.FromErr(err)
}
@@ -353,48 +355,53 @@ func DataSourceCompute() *schema.Resource {
"disks": {
Type: schema.TypeList,
Computed: true,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"disk_name": {
Type: schema.TypeString,
Required: true,
Computed: true,
Description: "Name for disk",
},
"size": {
Type: schema.TypeInt,
Required: true,
Computed: true,
Description: "Disk size in GiB",
},
"disk_type": {
Type: schema.TypeString,
Computed: true,
Optional: true,
ValidateFunc: validation.StringInSlice([]string{"B", "D"}, false),
Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data'",
Type: schema.TypeString,
Computed: true,
Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data'",
},
"sep_id": {
Type: schema.TypeInt,
Computed: true,
Optional: true,
Description: "Storage endpoint provider ID; by default the same with boot disk",
},
"shareable": {
Type: schema.TypeBool,
Computed: true,
},
"size_max": {
Type: schema.TypeInt,
Computed: true,
},
"size_used": {
Type: schema.TypeInt,
Computed: true,
},
"pool": {
Type: schema.TypeString,
Computed: true,
Optional: true,
Description: "Pool name; by default will be chosen automatically",
},
"desc": {
Type: schema.TypeString,
Computed: true,
Optional: true,
Description: "Optional description",
},
"image_id": {
Type: schema.TypeInt,
Computed: true,
Optional: true,
Description: "Specify image id for create disk from template",
},
"disk_id": {
@@ -404,8 +411,7 @@ func DataSourceCompute() *schema.Resource {
},
"permanently": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Computed: true,
Description: "Disk deletion status",
},
},

View File

@@ -0,0 +1,115 @@
package kvmvm
import (
"encoding/json"
"fmt"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/rudecs/terraform-provider-decort/internal/status"
log "github.com/sirupsen/logrus"
)
func flattenComputeDisksDemo(disksList []DiskRecord, extraDisks []interface{}) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, disk := range disksList {
if disk.Name == "bootdisk" || findInExtraDisks(disk.ID, extraDisks) { //skip main bootdisk and extraDisks
continue
}
temp := map[string]interface{}{
"disk_name": disk.Name,
"disk_id": disk.ID,
"disk_type": disk.Type,
"sep_id": disk.SepID,
"shareable": disk.Shareable,
"size_max": disk.SizeMax,
"size_used": disk.SizeUsed,
"pool": disk.Pool,
"desc": disk.Desc,
"image_id": disk.ImageID,
"size": disk.SizeMax,
}
res = append(res, temp)
}
return res
}
func flattenCompute(d *schema.ResourceData, compFacts string) error {
// This function expects that compFacts string contains response from API compute/get,
// i.e. detailed information about compute instance.
//
// NOTE: this function modifies ResourceData argument - as such it should never be called
// from resourceComputeExists(...) method
model := ComputeGetResp{}
log.Debugf("flattenCompute: ready to unmarshal string %s", compFacts)
err := json.Unmarshal([]byte(compFacts), &model)
if err != nil {
return err
}
log.Debugf("flattenCompute: ID %d, RG ID %d", model.ID, model.RgID)
d.SetId(fmt.Sprintf("%d", model.ID))
// d.Set("compute_id", model.ID) - we should NOT set compute_id in the schema here: if it was set - it is already set, if it wasn't - we shouldn't
d.Set("name", model.Name)
d.Set("rg_id", model.RgID)
d.Set("rg_name", model.RgName)
d.Set("account_id", model.AccountID)
d.Set("account_name", model.AccountName)
d.Set("driver", model.Driver)
d.Set("cpu", model.Cpu)
d.Set("ram", model.Ram)
// d.Set("boot_disk_size", model.BootDiskSize) - bootdiskSize key in API compute/get is always zero, so we set boot_disk_size in another way
if model.VirtualImageID != 0 {
d.Set("image_id", model.VirtualImageID)
} else {
d.Set("image_id", model.ImageID)
}
d.Set("description", model.Desc)
d.Set("enabled", false)
if model.Status == status.Enabled {
d.Set("enabled", true)
}
//d.Set("cloud_init", "applied") // NOTE: for existing compute we hard-code this value as an indicator for DiffSuppress fucntion
//d.Set("status", model.Status)
//d.Set("tech_status", model.TechStatus)
d.Set("started", false)
if model.TechStatus == "STARTED" {
d.Set("started", true)
}
bootDisk := findBootDisk(model.Disks)
d.Set("boot_disk_size", bootDisk.SizeMax)
d.Set("boot_disk_id", bootDisk.ID) // we may need boot disk ID in resize operations
d.Set("sep_id", bootDisk.SepID)
d.Set("pool", bootDisk.Pool)
//if len(model.Disks) > 0 {
//log.Debugf("flattenCompute: calling parseComputeDisksToExtraDisks for %d disks", len(model.Disks))
//if err = d.Set("extra_disks", parseComputeDisksToExtraDisks(model.Disks)); err != nil {
//return err
//}
//}
if len(model.Interfaces) > 0 {
log.Debugf("flattenCompute: calling parseComputeInterfacesToNetworks for %d interfaces", len(model.Interfaces))
if err = d.Set("network", parseComputeInterfacesToNetworks(model.Interfaces)); err != nil {
return err
}
}
if len(model.OsUsers) > 0 {
log.Debugf("flattenCompute: calling parseOsUsers for %d logins", len(model.OsUsers))
if err = d.Set("os_users", parseOsUsers(model.OsUsers)); err != nil {
return err
}
}
err = d.Set("disks", flattenComputeDisksDemo(model.Disks, d.Get("extra_disks").(*schema.Set).List()))
if err != nil {
return err
}
return nil
}

View File

@@ -72,6 +72,7 @@ type DiskRecord struct {
Role string `json:"role"`
SepType string `json:"sepType"`
SepID int `json:"sepId"` // NOTE: absent from compute/get output
Shareable bool `json:"shareable"`
SizeMax int `json:"sizeMax"`
SizeUsed int `json:"sizeUsed"` // sum over all snapshots of this disk to report total consumed space
Snapshots []SnapshotRecord `json:"snapshots"`

View File

@@ -694,6 +694,18 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
Optional: true,
Description: "Storage endpoint provider ID; by default the same with boot disk",
},
"shareable": {
Type: schema.TypeBool,
Computed: true,
},
"size_max": {
Type: schema.TypeInt,
Computed: true,
},
"size_used": {
Type: schema.TypeInt,
Computed: true,
},
"pool": {
Type: schema.TypeString,
Computed: true,

View File

@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -34,10 +35,11 @@ package rg
import (
"context"
"encoding/json"
"fmt"
"net/url"
"strconv"
"github.com/rudecs/terraform-provider-decort/internal/constants"
log "github.com/sirupsen/logrus"
"github.com/rudecs/terraform-provider-decort/internal/controller"
// "net/url"
@@ -45,53 +47,31 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func flattenResgroup(d *schema.ResourceData, rg_facts string) error {
// NOTE: this function modifies ResourceData argument - as such it should never be called
// from resourceRsgroupExists(...) method
// log.Debugf("%s", rg_facts)
log.Debugf("flattenResgroup: ready to decode response body from API")
details := ResgroupGetResp{}
err := json.Unmarshal([]byte(rg_facts), &details)
func utilityDataResgroupCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*ResgroupGetResp, error) {
c := m.(*controller.ControllerCfg)
urlValues := &url.Values{}
rgData := &ResgroupGetResp{}
urlValues.Add("rgId", strconv.Itoa(d.Get("rg_id").(int)))
rgRaw, err := c.DecortAPICall(ctx, "POST", ResgroupGetAPI, urlValues)
if err != nil {
return err
return nil, err
}
log.Debugf("flattenResgroup: decoded RG name %q / ID %d, account ID %d",
details.Name, details.ID, details.AccountID)
d.SetId(fmt.Sprintf("%d", details.ID))
d.Set("rg_id", details.ID)
d.Set("name", details.Name)
d.Set("account_name", details.AccountName)
d.Set("account_id", details.AccountID)
// d.Set("grid_id", details.GridID)
d.Set("description", details.Desc)
d.Set("status", details.Status)
d.Set("def_net_type", details.DefaultNetType)
d.Set("def_net_id", details.DefaultNetID)
/*
d.Set("vins", details.Vins)
d.Set("computes", details.Computes)
*/
log.Debugf("flattenResgroup: calling flattenQuota()")
if err = d.Set("quota", parseQuota(details.Quota)); err != nil {
return err
err = json.Unmarshal([]byte(rgRaw), rgData)
if err != nil {
return nil, err
}
return nil
return rgData, nil
}
func dataSourceResgroupRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
rg_facts, err := utilityResgroupCheckPresence(ctx, d, m)
if rg_facts == "" {
// if empty string is returned from utilityResgroupCheckPresence then there is no
// such resource group and err tells so - just return it to the calling party
rg, err := utilityDataResgroupCheckPresence(ctx, d, m)
if err != nil {
d.SetId("") // ensure ID is empty in this case
return diag.FromErr(err)
}
return diag.FromErr(flattenResgroup(d, rg_facts))
return diag.FromErr(flattenDataResgroup(d, *rg))
}
func DataSourceResgroup() *schema.Resource {
@@ -126,7 +106,7 @@ func DataSourceResgroup() *schema.Resource {
"account_id": {
Type: schema.TypeInt,
Required: true,
Optional: true,
Description: "Unique ID of the account, which this resource group belongs to.",
},
@@ -135,15 +115,11 @@ func DataSourceResgroup() *schema.Resource {
Computed: true,
Description: "User-defined text description of this resource group.",
},
/* commented out, as in this version of provider we use default Grid ID
"grid_id": {
"gid": {
Type: schema.TypeInt,
Computed: true,
Description: "Unique ID of the grid, where this resource group is deployed.",
},
*/
"quota": {
Type: schema.TypeList,
Computed: true,
@@ -165,32 +141,150 @@ func DataSourceResgroup() *schema.Resource {
Description: "ID of the default network for this resource group (if any).",
},
/*
"status": {
Type: schema.TypeString,
Computed: true,
Description: "Current status of this resource group.",
},
"vins": {
Type: schema.TypeList, // this is a list of ints
Computed: true,
MaxItems: LimitMaxVinsPerResgroup,
Elem: &schema.Schema{
Type: schema.TypeInt,
"resources": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"current": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disksize": {
Type: schema.TypeInt,
Computed: true,
},
"extips": {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
},
},
},
"reserved": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disksize": {
Type: schema.TypeInt,
Computed: true,
},
"extips": {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
},
},
},
},
Description: "List of VINs deployed in this resource group.",
},
},
"computes": {
Type: schema.TypeList, //t his is a list of ints
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "List of computes deployed in this resource group.",
"status": {
Type: schema.TypeString,
Computed: true,
Description: "Current status of this resource group.",
},
"vins": {
Type: schema.TypeList, // this is a list of ints
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
*/
Description: "List of VINs deployed in this resource group.",
},
"vms": {
Type: schema.TypeList, //t his is a list of ints
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "List of computes deployed in this resource group.",
},
},
}
}

View File

@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -0,0 +1,145 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://github.com/rudecs/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
*/
package rg
import (
"fmt"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
)
func flattenAccountSeps(seps map[string]map[string]ResourceSep) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for sepKey, sepVal := range seps {
for dataKey, dataVal := range sepVal {
temp := map[string]interface{}{
"sep_id": sepKey,
"data_name": dataKey,
"disk_size": dataVal.DiskSize,
"disk_size_max": dataVal.DiskSizeMax,
}
res = append(res, temp)
}
}
return res
}
func flattenAccResource(r Resource) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"cpu": r.CPU,
"disksize": r.Disksize,
"extips": r.Extips,
"exttraffic": r.Exttraffic,
"gpu": r.GPU,
"ram": r.RAM,
"seps": flattenAccountSeps(r.SEPs),
}
res = append(res, temp)
return res
}
func flattenRgResources(r Resources) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"current": flattenAccResource(r.Current),
"reserved": flattenAccResource(r.Reserved),
}
res = append(res, temp)
return res
}
func flattenDataResgroup(d *schema.ResourceData, details ResgroupGetResp) error {
// NOTE: this function modifies ResourceData argument - as such it should never be called
// from resourceRsgroupExists(...) method
// log.Debugf("%s", rg_facts)
log.Debugf("flattenResgroup: decoded RG name %q / ID %d, account ID %d",
details.Name, details.ID, details.AccountID)
d.SetId(fmt.Sprintf("%d", details.ID))
d.Set("rg_id", details.ID)
d.Set("name", details.Name)
d.Set("account_name", details.AccountName)
d.Set("account_id", details.AccountID)
d.Set("gid", details.GridID)
d.Set("description", details.Desc)
d.Set("status", details.Status)
d.Set("def_net_type", details.DefaultNetType)
d.Set("def_net_id", details.DefaultNetID)
d.Set("resources", flattenRgResources(details.Resources))
d.Set("vins", details.Vins)
d.Set("vms", details.Computes)
log.Debugf("flattenResgroup: calling flattenQuota()")
if err := d.Set("quota", parseQuota(details.Quota)); err != nil {
return err
}
return nil
}
func flattenResgroup(d *schema.ResourceData, details ResgroupGetResp) error {
// NOTE: this function modifies ResourceData argument - as such it should never be called
// from resourceRsgroupExists(...) method
// log.Debugf("%s", rg_facts)
//log.Debugf("flattenResgroup: ready to decode response body from API")
//details := ResgroupGetResp{}
//err := json.Unmarshal([]byte(rg_facts), &details)
//if err != nil {
//return err
//}
log.Debugf("flattenResgroup: decoded RG name %q / ID %d, account ID %d",
details.Name, details.ID, details.AccountID)
d.SetId(fmt.Sprintf("%d", details.ID))
d.Set("rg_id", details.ID)
d.Set("name", details.Name)
d.Set("account_name", details.AccountName)
d.Set("account_id", details.AccountID)
d.Set("gid", details.GridID)
d.Set("description", details.Desc)
d.Set("status", details.Status)
d.Set("def_net_type", details.DefaultNetType)
d.Set("def_net_id", details.DefaultNetID)
d.Set("resources", flattenRgResources(details.Resources))
d.Set("vins", details.Vins)
d.Set("vms", details.Computes)
log.Debugf("flattenResgroup: calling flattenQuota()")
if err := d.Set("quota", parseQuota(details.Quota)); err != nil {
return err
}
return nil
}

View File

@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -91,27 +92,28 @@ type AccountAclRecord struct {
}
type ResgroupGetResp struct {
ACLs []UserAclRecord `json:"ACLs"`
Usage UsageRecord `json:"Resources"`
AccountID int `json:"accountId"`
AccountName string `json:"accountName"`
GridID int `json:"gid"`
CreatedBy string `json:"createdBy"`
CreatedTime uint64 `json:"createdTime"`
DefaultNetID int `json:"def_net_id"`
DefaultNetType string `json:"def_net_type"`
DeletedBy string `json:"deletedBy"`
DeletedTime uint64 `json:"deletedTime"`
Desc string `json:"desc"`
ID uint `json:"id"`
LockStatus string `json:"lockStatus"`
Name string `json:"name"`
Quota QuotaRecord `json:"resourceLimits"`
Status string `json:"status"`
UpdatedBy string `json:"updatedBy"`
UpdatedTime uint64 `json:"updatedTime"`
Vins []int `json:"vins"`
Computes []int `json:"vms"`
Resources Resources `json:"Resources"`
ACLs []UserAclRecord `json:"ACLs"`
//Usage UsageRecord `json:"Resources"`
AccountID int `json:"accountId"`
AccountName string `json:"accountName"`
GridID int `json:"gid"`
CreatedBy string `json:"createdBy"`
CreatedTime uint64 `json:"createdTime"`
DefaultNetID int `json:"def_net_id"`
DefaultNetType string `json:"def_net_type"`
DeletedBy string `json:"deletedBy"`
DeletedTime uint64 `json:"deletedTime"`
Desc string `json:"desc"`
ID uint `json:"id"`
LockStatus string `json:"lockStatus"`
Name string `json:"name"`
Quota QuotaRecord `json:"resourceLimits"`
Status string `json:"status"`
UpdatedBy string `json:"updatedBy"`
UpdatedTime uint64 `json:"updatedTime"`
Vins []int `json:"vins"`
Computes []int `json:"vms"`
Ignored map[string]interface{} `json:"-"`
}
@@ -147,3 +149,23 @@ type UsageRecord struct {
Current ResourceRecord `json:"Current"`
Reserved ResourceRecord `json:"Reserved"`
}
type ResourceSep struct {
DiskSize float64 `json:"disksize"`
DiskSizeMax int `json:"disksizemax"`
}
type Resource struct {
CPU int `json:"cpu"`
Disksize int `json:"disksize"`
Extips int `json:"extips"`
Exttraffic int `json:"exttraffic"`
GPU int `json:"gpu"`
RAM int `json:"ram"`
SEPs map[string]map[string]ResourceSep `json:"seps"`
}
type Resources struct {
Current Resource `json:"Current"`
Reserved Resource `json:"Reserved"`
}

View File

@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -33,9 +34,9 @@ package rg
import (
"context"
"encoding/json"
"fmt"
"net/url"
"strconv"
"github.com/rudecs/terraform-provider-decort/internal/constants"
"github.com/rudecs/terraform-provider-decort/internal/controller"
@@ -133,16 +134,11 @@ func resourceResgroupCreate(ctx context.Context, d *schema.ResourceData, m inter
d.SetId(api_resp) // rg/create API returns ID of the newly creted resource group on success
// rg.ID, _ = strconv.Atoi(api_resp)
if !set_quota {
resp, err := utilityResgroupCheckPresence(ctx, d, m)
rg, err := utilityResgroupCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
rg := ResgroupGetResp{}
if err := json.Unmarshal([]byte(resp), &rg); err != nil {
return diag.FromErr(err)
}
d.Set("quota", parseQuota(rg.Quota))
}
@@ -155,14 +151,14 @@ func resourceResgroupRead(ctx context.Context, d *schema.ResourceData, m interfa
d.Get("name").(string), d.Get("account_id").(int))
rg_facts, err := utilityResgroupCheckPresence(ctx, d, m)
if rg_facts == "" {
if err != nil {
// if empty string is returned from utilityResgroupCheckPresence then there is no
// such resource group and err tells so - just return it to the calling party
d.SetId("") // ensure ID is empty
return diag.FromErr(err)
}
return diag.FromErr(flattenResgroup(d, rg_facts))
return diag.FromErr(flattenResgroup(d, *rg_facts))
}
func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
@@ -272,24 +268,21 @@ func resourceResgroupDelete(ctx context.Context, d *schema.ResourceData, m inter
log.Debugf("resourceResgroupDelete: called for RG name %s, account ID %d",
d.Get("name").(string), d.Get("account_id").(int))
rg_facts, err := utilityResgroupCheckPresence(ctx, d, m)
if rg_facts == "" {
if err != nil {
return diag.FromErr(err)
}
// the target RG does not exist - in this case according to Terraform best practice
// we exit from Destroy method without error
return nil
c := m.(*controller.ControllerCfg)
url_values := &url.Values{}
url_values.Add("rgId", d.Id())
if force, ok := d.GetOk("force"); ok {
url_values.Add("force", strconv.FormatBool(force.(bool)))
}
if permanently, ok := d.GetOk("permanently"); ok {
url_values.Add("permanently", strconv.FormatBool(permanently.(bool)))
}
if reason, ok := d.GetOk("reason"); ok {
url_values.Add("reason", reason.(string))
}
url_values := &url.Values{}
url_values.Add("rgId", d.Id())
url_values.Add("force", "1")
url_values.Add("permanently", "1")
url_values.Add("reason", "Destroyed by DECORT Terraform provider")
c := m.(*controller.ControllerCfg)
_, err = c.DecortAPICall(ctx, "POST", ResgroupDeleteAPI, url_values)
_, err := c.DecortAPICall(ctx, "POST", ResgroupDeleteAPI, url_values)
if err != nil {
return diag.FromErr(err)
}
@@ -297,6 +290,257 @@ func resourceResgroupDelete(ctx context.Context, d *schema.ResourceData, m inter
return nil
}
func ResourceRgSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"account_id": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntAtLeast(1),
Description: "Unique ID of the account, which this resource group belongs to.",
},
"gid": {
Type: schema.TypeInt,
Required: true,
ForceNew: true, // change of Grid ID will require new RG
Description: "Unique ID of the grid, where this resource group is deployed.",
},
"name": {
Type: schema.TypeString,
Required: true,
Description: "Name of this resource group. Names are case sensitive and unique within the context of a account.",
},
"def_net_type": {
Type: schema.TypeString,
Optional: true,
Default: "PRIVATE",
ValidateFunc: validation.StringInSlice([]string{"PRIVATE", "PUBLIC", "NONE"}, false),
Description: "Type of the network, which this resource group will use as default for its computes - PRIVATE or PUBLIC or NONE.",
},
"def_net_id": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the default network for this resource group (if any).",
},
"ipcidr": {
Type: schema.TypeString,
Optional: true,
Description: "Address of the netowrk inside the private network segment (aka ViNS) if def_net_type=PRIVATE",
},
"ext_net_id": {
Type: schema.TypeInt,
Optional: true,
Default: 0,
Description: "ID of the external network for default ViNS. Pass 0 if def_net_type=PUBLIC or no external connection required for the defult ViNS when def_net_type=PRIVATE",
},
"ext_ip": {
Type: schema.TypeString,
Optional: true,
Description: "IP address on the external netowrk to request when def_net_type=PRIVATE and ext_net_id is not 0",
},
"quota": {
Type: schema.TypeList,
Optional: true,
Computed: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: quotaRgSubresourceSchemaMake(),
},
Description: "Quota settings for this resource group.",
},
"description": {
Type: schema.TypeString,
Optional: true,
Description: "User-defined text description of this resource group.",
},
"force": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "Set to True if you want force delete non-empty RG",
},
"permanently": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "Set to True if you want force delete non-empty RG",
},
"reason": {
Type: schema.TypeString,
Optional: true,
Description: "Set to True if you want force delete non-empty RG",
},
"account_name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the account, which this resource group belongs to.",
},
"resources": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"current": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disksize": {
Type: schema.TypeInt,
Computed: true,
},
"extips": {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
},
},
},
"reserved": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disksize": {
Type: schema.TypeInt,
Computed: true,
},
"extips": {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
},
},
},
},
},
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "Current status of this resource group.",
},
"vins": {
Type: schema.TypeList, //this is a list of ints
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "List of VINs deployed in this resource group.",
},
"vms": {
Type: schema.TypeList, //t his is a list of ints
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "List of computes deployed in this resource group.",
},
"computes": {
Type: schema.TypeList, //this is a list of ints
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "List of computes deployed in this resource group.",
},
}
}
func ResourceResgroup() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
@@ -318,113 +562,6 @@ func ResourceResgroup() *schema.Resource {
Default: &constants.Timeout300s,
},
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
Description: "Name of this resource group. Names are case sensitive and unique within the context of a account.",
},
"account_id": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntAtLeast(1),
Description: "Unique ID of the account, which this resource group belongs to.",
},
"def_net_type": {
Type: schema.TypeString,
Optional: true,
Default: "PRIVATE",
ValidateFunc: validation.StringInSlice([]string{"PRIVATE", "PUBLIC", "NONE"}, false),
Description: "Type of the network, which this resource group will use as default for its computes - PRIVATE or PUBLIC or NONE.",
},
"def_net_id": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the default network for this resource group (if any).",
},
"ipcidr": {
Type: schema.TypeString,
Optional: true,
Description: "Address of the netowrk inside the private network segment (aka ViNS) if def_net_type=PRIVATE",
},
"ext_net_id": {
Type: schema.TypeInt,
Optional: true,
Default: 0,
Description: "ID of the external network for default ViNS. Pass 0 if def_net_type=PUBLIC or no external connection required for the defult ViNS when def_net_type=PRIVATE",
},
"ext_ip": {
Type: schema.TypeString,
Optional: true,
Description: "IP address on the external netowrk to request when def_net_type=PRIVATE and ext_net_id is not 0",
},
/* commented out, as in this version of provider we use default Grid ID
"grid_id": {
Type: schema.TypeInt,
Optional: true,
Default: 0, // if 0 is passed, default Grid ID will be used
// DefaultFunc: utilityResgroupGetDefaultGridID,
ForceNew: true, // change of Grid ID will require new RG
Description: "Unique ID of the grid, where this resource group is deployed.",
},
*/
"quota": {
Type: schema.TypeList,
Optional: true,
Computed: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: quotaRgSubresourceSchemaMake(),
},
Description: "Quota settings for this resource group.",
},
"description": {
Type: schema.TypeString,
Optional: true,
Description: "User-defined text description of this resource group.",
},
"account_name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the account, which this resource group belongs to.",
},
/*
"status": {
Type: schema.TypeString,
Computed: true,
Description: "Current status of this resource group.",
},
"vins": {
Type: schema.TypeList, // this is a list of ints
Computed: true,
MaxItems: LimitMaxVinsPerResgroup,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "List of VINs deployed in this resource group.",
},
"computes": {
Type: schema.TypeList, // this is a list of ints
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "List of computes deployed in this resource group.",
},
*/
},
Schema: ResourceRgSchemaMake(),
}
}

View File

@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -34,19 +35,17 @@ package rg
import (
"context"
"encoding/json"
"fmt"
"net/url"
"strconv"
"github.com/rudecs/terraform-provider-decort/internal/controller"
log "github.com/sirupsen/logrus"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
// On success this function returns a string, as returned by API rg/get, which could be unmarshalled
// into ResgroupGetResp structure
func utilityResgroupCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (string, error) {
func utilityResgroupCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*ResgroupGetResp, error) {
// This function tries to locate resource group by one of the following algorithms depending
// on the parameters passed:
// - if resource group ID is specified -> by RG ID
@@ -67,73 +66,21 @@ func utilityResgroupCheckPresence(ctx context.Context, d *schema.ResourceData, m
c := m.(*controller.ControllerCfg)
urlValues := &url.Values{}
// make it possible to use "read" & "check presence" functions with RG ID set so
// that Import of RG resource is possible
idSet := false
theId, err := strconv.Atoi(d.Id())
if err != nil || theId <= 0 {
rgId, argSet := d.GetOk("rg_id")
if argSet {
theId = rgId.(int)
idSet = true
}
if d.Id() != "" {
urlValues.Add("rgId", d.Id())
} else {
idSet = true
urlValues.Add("rgId", strconv.Itoa(d.Get("rg_id").(int)))
}
if idSet {
// go straight for the RG by its ID
log.Debugf("utilityResgroupCheckPresence: locating RG by its ID %d", theId)
urlValues.Add("rgId", fmt.Sprintf("%d", theId))
rgFacts, err := c.DecortAPICall(ctx, "POST", ResgroupGetAPI, urlValues)
if err != nil {
return "", err
}
return rgFacts, nil
}
rgName, argSet := d.GetOk("name")
if !argSet {
// no RG ID and no RG name - we cannot locate resource group in this case
return "", fmt.Errorf("Cannot check resource group presence if name is empty and no resource group ID specified")
}
// Valid account ID is required to locate a resource group
// obtain Account ID by account name - it should not be zero on success
urlValues.Add("includedeleted", "false")
apiResp, err := c.DecortAPICall(ctx, "POST", ResgroupListAPI, urlValues)
rgData := &ResgroupGetResp{}
rgRaw, err := c.DecortAPICall(ctx, "POST", ResgroupGetAPI, urlValues)
if err != nil {
return "", err
return nil, err
}
// log.Debugf("%s", apiResp)
log.Debugf("utilityResgroupCheckPresence: ready to decode response body from %s", ResgroupListAPI)
model := ResgroupListResp{}
err = json.Unmarshal([]byte(apiResp), &model)
err = json.Unmarshal([]byte(rgRaw), rgData)
if err != nil {
return "", err
return nil, err
}
log.Debugf("utilityResgroupCheckPresence: traversing decoded Json of length %d", len(model))
for index, item := range model {
// match by RG name & account ID
if item.Name == rgName.(string) && item.AccountID == d.Get("account_id").(int) {
log.Debugf("utilityResgroupCheckPresence: match RG name %s / ID %d, account ID %d at index %d",
item.Name, item.ID, item.AccountID, index)
// not all required information is returned by rg/list API, so we need to initiate one more
// call to rg/get to obtain extra data to complete Resource population.
// Namely, we need resource quota settings
reqValues := &url.Values{}
reqValues.Add("rgId", fmt.Sprintf("%d", item.ID))
apiResp, err := c.DecortAPICall(ctx, "POST", ResgroupGetAPI, reqValues)
if err != nil {
return "", err
}
return apiResp, nil
}
}
return "", fmt.Errorf("Cannot find RG name %s owned by account ID %d", rgName, d.Get("account_id").(int))
return rgData, nil
}

View File

@@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.