This commit is contained in:
KasimBaybikov
2023-05-04 10:08:25 +03:00
parent 9bad8a6947
commit 8ca233dd32
288 changed files with 6645 additions and 11464 deletions

View File

@@ -1,52 +0,0 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package disks
const (
disksCreateAPI = "/restmachine/cloudapi/disks/create"
disksGetAPI = "/restmachine/cloudapi/disks/get"
disksListAPI = "/restmachine/cloudapi/disks/list"
disksResizeAPI = "/restmachine/cloudapi/disks/resize2"
disksRenameAPI = "/restmachine/cloudapi/disks/rename"
disksDeleteAPI = "/restmachine/cloudapi/disks/delete"
disksIOLimitAPI = "/restmachine/cloudapi/disks/limitIO"
disksRestoreAPI = "/restmachine/cloudapi/disks/restore"
disksListTypesAPI = "/restmachine/cloudapi/disks/listTypes"
disksListDeletedAPI = "/restmachine/cloudapi/disks/listDeleted"
disksListUnattachedAPI = "/restmachine/cloudapi/disks/listUnattached"
disksSnapshotDeleteAPI = "/restmachine/cloudapi/disks/snapshotDelete"
disksSnapshotRollbackAPI = "/restmachine/cloudapi/disks/snapshotRollback"
disksShareAPI = "/restmachine/cloudapi/disks/share"
disksUnshareAPI = "/restmachine/cloudapi/disks/unshare"
)

View File

@@ -34,7 +34,6 @@ package disks
import (
"context"
"encoding/json"
// "net/url"
@@ -54,53 +53,7 @@ func dataSourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface
id := uuid.New()
d.SetId(id.String())
diskAcl, _ := json.Marshal(disk.Acl)
d.Set("account_id", disk.AccountID)
d.Set("account_name", disk.AccountName)
d.Set("acl", string(diskAcl))
d.Set("boot_partition", disk.BootPartition)
d.Set("computes", flattenDiskComputes(disk.Computes))
d.Set("created_time", disk.CreatedTime)
d.Set("deleted_time", disk.DeletedTime)
d.Set("desc", disk.Desc)
d.Set("destruction_time", disk.DestructionTime)
d.Set("devicename", disk.DeviceName)
d.Set("disk_path", disk.DiskPath)
d.Set("gid", disk.GridID)
d.Set("guid", disk.GUID)
d.Set("disk_id", disk.ID)
d.Set("image_id", disk.ImageID)
d.Set("images", disk.Images)
d.Set("iotune", flattenIOTune(disk.IOTune))
d.Set("iqn", disk.IQN)
d.Set("login", disk.Login)
d.Set("milestones", disk.Milestones)
d.Set("disk_name", disk.Name)
d.Set("order", disk.Order)
d.Set("params", disk.Params)
d.Set("parent_id", disk.ParentId)
d.Set("passwd", disk.Passwd)
d.Set("pci_slot", disk.PciSlot)
d.Set("pool", disk.Pool)
d.Set("present_to", disk.PresentTo)
d.Set("purge_attempts", disk.PurgeAttempts)
d.Set("purge_time", disk.PurgeTime)
d.Set("reality_device_number", disk.RealityDeviceNumber)
d.Set("reference_id", disk.ReferenceId)
d.Set("res_id", disk.ResID)
d.Set("res_name", disk.ResName)
d.Set("role", disk.Role)
d.Set("sep_id", disk.SepID)
d.Set("sep_type", disk.SepType)
d.Set("shareable", disk.Shareable)
d.Set("size_max", disk.SizeMax)
d.Set("size_used", disk.SizeUsed)
d.Set("snapshots", flattenDiskSnapshotList(disk.Snapshots))
d.Set("status", disk.Status)
d.Set("tech_status", disk.TechStatus)
d.Set("type", disk.Type)
d.Set("vmid", disk.VMID)
flattenDisk(d, disk)
return nil
}
@@ -126,11 +79,11 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"boot_partition": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of disk partitions",
},
// "boot_partition": {
// Type: schema.TypeInt,
// Computed: true,
// Description: "Number of disk partitions",
// },
"computes": {
Type: schema.TypeList,
Computed: true,
@@ -172,21 +125,21 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "Name of the device",
},
"disk_path": {
Type: schema.TypeString,
Computed: true,
Description: "Disk path",
},
// "disk_path": {
// Type: schema.TypeString,
// Computed: true,
// Description: "Disk path",
// },
"gid": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the grid (platform)",
},
"guid": {
Type: schema.TypeInt,
Computed: true,
Description: "Disk ID on the storage side",
},
// "guid": {
// Type: schema.TypeInt,
// Computed: true,
// Description: "Disk ID on the storage side",
// },
"image_id": {
Type: schema.TypeInt,
Computed: true,
@@ -273,21 +226,21 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
},
},
},
"iqn": {
Type: schema.TypeString,
Computed: true,
Description: "Disk IQN",
},
"login": {
Type: schema.TypeString,
Computed: true,
Description: "Login to access the disk",
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
Description: "Milestones",
},
// "iqn": {
// Type: schema.TypeString,
// Computed: true,
// Description: "Disk IQN",
// },
// "login": {
// Type: schema.TypeString,
// Computed: true,
// Description: "Login to access the disk",
// },
// "milestones": {
// Type: schema.TypeInt,
// Computed: true,
// Description: "Milestones",
// },
"disk_name": {
Type: schema.TypeString,
Computed: true,
@@ -308,11 +261,11 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "ID of the parent disk",
},
"passwd": {
Type: schema.TypeString,
Computed: true,
Description: "Password to access the disk",
},
// "passwd": {
// Type: schema.TypeString,
// Computed: true,
// Description: "Password to access the disk",
// },
"pci_slot": {
Type: schema.TypeInt,
Computed: true,
@@ -330,26 +283,26 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
},
},
"purge_attempts": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of deletion attempts",
},
// "purge_attempts": {
// Type: schema.TypeInt,
// Computed: true,
// Description: "Number of deletion attempts",
// },
"purge_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Time of the last deletion attempt",
},
"reality_device_number": {
Type: schema.TypeInt,
Computed: true,
Description: "Reality device number",
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
Description: "ID of the reference to the disk",
},
// "reality_device_number": {
// Type: schema.TypeInt,
// Computed: true,
// Description: "Reality device number",
// },
// "reference_id": {
// Type: schema.TypeString,
// Computed: true,
// Description: "ID of the reference to the disk",
// },
"res_id": {
Type: schema.TypeString,
Computed: true,

View File

@@ -42,7 +42,7 @@ import (
)
func dataSourceDiskListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
diskList, err := utilityDiskListCheckPresence(ctx, d, m, disksListAPI)
diskList, err := utilityDiskListCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
@@ -95,11 +95,11 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"boot_partition": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of disk partitions",
},
// "boot_partition": {
// Type: schema.TypeInt,
// Computed: true,
// Description: "Number of disk partitions",
// },
"computes": {
Type: schema.TypeList,
Computed: true,
@@ -141,21 +141,21 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "Name of the device",
},
"disk_path": {
Type: schema.TypeString,
Computed: true,
Description: "Disk path",
},
// "disk_path": {
// Type: schema.TypeString,
// Computed: true,
// Description: "Disk path",
// },
"gid": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the grid (platform)",
},
"guid": {
Type: schema.TypeInt,
Computed: true,
Description: "Disk ID on the storage side",
},
// "guid": {
// Type: schema.TypeInt,
// Computed: true,
// Description: "Disk ID on the storage side",
// },
"disk_id": {
Type: schema.TypeInt,
Computed: true,
@@ -247,16 +247,16 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
},
},
},
"iqn": {
Type: schema.TypeString,
Computed: true,
Description: "Disk IQN",
},
"login": {
Type: schema.TypeString,
Computed: true,
Description: "Login to access the disk",
},
// "iqn": {
// Type: schema.TypeString,
// Computed: true,
// Description: "Disk IQN",
// },
// "login": {
// Type: schema.TypeString,
// Computed: true,
// Description: "Login to access the disk",
// },
"machine_id": {
Type: schema.TypeInt,
Computed: true,
@@ -267,11 +267,11 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "Machine name",
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
Description: "Milestones",
},
// "milestones": {
// Type: schema.TypeInt,
// Computed: true,
// Description: "Milestones",
// },
"disk_name": {
Type: schema.TypeString,
Computed: true,
@@ -292,11 +292,11 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "ID of the parent disk",
},
"passwd": {
Type: schema.TypeString,
Computed: true,
Description: "Password to access the disk",
},
// "passwd": {
// Type: schema.TypeString,
// Computed: true,
// Description: "Password to access the disk",
// },
"pci_slot": {
Type: schema.TypeInt,
Computed: true,
@@ -314,26 +314,26 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
},
},
"purge_attempts": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of deletion attempts",
},
// "purge_attempts": {
// Type: schema.TypeInt,
// Computed: true,
// Description: "Number of deletion attempts",
// },
"purge_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Time of the last deletion attempt",
},
"reality_device_number": {
Type: schema.TypeInt,
Computed: true,
Description: "Reality device number",
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
Description: "ID of the reference to the disk",
},
// "reality_device_number": {
// Type: schema.TypeInt,
// Computed: true,
// Description: "Reality device number",
// },
// "reference_id": {
// Type: schema.TypeString,
// Computed: true,
// Description: "ID of the reference to the disk",
// },
"res_id": {
Type: schema.TypeString,
Computed: true,

View File

@@ -41,24 +41,26 @@ import (
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func flattenDiskListTypesDetailed(tld TypesDetailedList) []map[string]interface{} {
func flattenDiskListTypesDetailed(tld []interface{}) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, typeListDetailed := range tld {
toMap := typeListDetailed.(map[string]interface{})
temp := map[string]interface{}{
"pools": flattenListTypesDetailedPools(typeListDetailed.Pools),
"sep_id": typeListDetailed.SepID,
"pools": flattenListTypesDetailedPools(toMap["pools"].([]interface{})),
"sep_id": toMap["sepId"].(float64),
}
res = append(res, temp)
}
return res
}
func flattenListTypesDetailedPools(pools PoolList) []interface{} {
func flattenListTypesDetailedPools(pools []interface{}) []interface{} {
res := make([]interface{}, 0)
for _, pool := range pools {
toMap := pool.(map[string]interface{})
temp := map[string]interface{}{
"name": pool.Name,
"types": pool.Types,
"name": toMap["name"].(string),
"types": toMap["types"].([]interface{}),
}
res = append(res, temp)
}

View File

@@ -34,92 +34,13 @@ package disks
import (
"context"
"encoding/json"
"net/url"
"strconv"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens"
log "github.com/sirupsen/logrus"
)
func utilityDiskListUnattachedCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (UnattachedList, error) {
unattachedList := UnattachedList{}
c := m.(*controller.ControllerCfg)
urlValues := &url.Values{}
if accountId, ok := d.GetOk("accountId"); ok {
urlValues.Add("accountId", strconv.Itoa(accountId.(int)))
}
log.Debugf("utilityDiskListUnattachedCheckPresence: load disk Unattached list")
unattachedListRaw, err := c.DecortAPICall(ctx, "POST", disksListUnattachedAPI, urlValues)
if err != nil {
return nil, err
}
err = json.Unmarshal([]byte(unattachedListRaw), &unattachedList)
if err != nil {
return nil, err
}
return unattachedList, nil
}
func flattenDiskListUnattached(ul UnattachedList) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, unattachedDisk := range ul {
unattachedDiskAcl, _ := json.Marshal(unattachedDisk.Acl)
tmp := map[string]interface{}{
"_ckey": unattachedDisk.Ckey,
"_meta": flattens.FlattenMeta(unattachedDisk.Meta),
"account_id": unattachedDisk.AccountID,
"account_name": unattachedDisk.AccountName,
"acl": string(unattachedDiskAcl),
"boot_partition": unattachedDisk.BootPartition,
"created_time": unattachedDisk.CreatedTime,
"deleted_time": unattachedDisk.DeletedTime,
"desc": unattachedDisk.Desc,
"destruction_time": unattachedDisk.DestructionTime,
"disk_path": unattachedDisk.DiskPath,
"gid": unattachedDisk.GridID,
"guid": unattachedDisk.GUID,
"disk_id": unattachedDisk.ID,
"image_id": unattachedDisk.ImageID,
"images": unattachedDisk.Images,
"iotune": flattenIOTune(unattachedDisk.IOTune),
"iqn": unattachedDisk.IQN,
"login": unattachedDisk.Login,
"milestones": unattachedDisk.Milestones,
"disk_name": unattachedDisk.Name,
"order": unattachedDisk.Order,
"params": unattachedDisk.Params,
"parent_id": unattachedDisk.ParentID,
"passwd": unattachedDisk.Passwd,
"pci_slot": unattachedDisk.PciSlot,
"pool": unattachedDisk.Pool,
"purge_attempts": unattachedDisk.PurgeAttempts,
"purge_time": unattachedDisk.PurgeTime,
"reality_device_number": unattachedDisk.RealityDeviceNumber,
"reference_id": unattachedDisk.ReferenceID,
"res_id": unattachedDisk.ResID,
"res_name": unattachedDisk.ResName,
"role": unattachedDisk.Role,
"sep_id": unattachedDisk.SepID,
"size_max": unattachedDisk.SizeMax,
"size_used": unattachedDisk.SizeUsed,
"snapshots": flattenDiskSnapshotList(unattachedDisk.Snapshots),
"status": unattachedDisk.Status,
"tech_status": unattachedDisk.TechStatus,
"type": unattachedDisk.Type,
"vmid": unattachedDisk.VMID,
}
res = append(res, tmp)
}
return res
}
func dataSourceDiskListUnattachedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
diskListUnattached, err := utilityDiskListUnattachedCheckPresence(ctx, d, m)
if err != nil {

View File

@@ -38,21 +38,19 @@ import (
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceDiskSnapshotRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
disk, err := utilityDiskCheckPresence(ctx, d, m)
if disk == nil {
if err != nil {
return diag.FromErr(err)
}
return nil
if err != nil {
return diag.FromErr(err)
}
snapshots := disk.Snapshots
snapshot := Snapshot{}
var snapshot disks.ItemSnapshot
label := d.Get("label").(string)
for _, sn := range snapshots {
for _, sn := range disk.Snapshots {
if label == sn.Label {
snapshot = sn
break
@@ -64,11 +62,9 @@ func dataSourceDiskSnapshotRead(ctx context.Context, d *schema.ResourceData, m i
id := uuid.New()
d.SetId(id.String())
d.Set("timestamp", snapshot.TimeStamp)
d.Set("guid", snapshot.Guid)
d.Set("res_id", snapshot.ResId)
d.Set("snap_set_guid", snapshot.SnapSetGuid)
d.Set("snap_set_time", snapshot.SnapSetTime)
flattenDiskSnapshot(d, snapshot)
return nil
}

View File

@@ -43,11 +43,8 @@ import (
func dataSourceDiskSnapshotListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
disk, err := utilityDiskCheckPresence(ctx, d, m)
if disk == nil {
if err != nil {
return diag.FromErr(err)
}
return nil
if err != nil {
return diag.FromErr(err)
}
id := uuid.New()

View File

@@ -42,7 +42,7 @@ import (
)
func dataSourceDiskListDeletedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
diskList, err := utilityDiskListCheckPresence(ctx, d, m, disksListDeletedAPI)
diskList, err := utilityDiskListCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}

View File

@@ -4,43 +4,106 @@ import (
"encoding/json"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens"
)
func flattenDisk(d *schema.ResourceData, disk Disk) {
diskAcl, _ := json.Marshal(disk.Acl)
func flattenDiskSnapshot(d *schema.ResourceData, snapshot disks.ItemSnapshot) {
d.Set("timestamp", snapshot.TimeStamp)
d.Set("guid", snapshot.GUID)
d.Set("res_id", snapshot.ResID)
d.Set("snap_set_guid", snapshot.SnapSetGUID)
d.Set("snap_set_time", snapshot.SnapSetTime)
}
func flattenDiskListUnattached(ul disks.ListDisksUnattached) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, unattachedDisk := range ul {
unattachedDiskAcl, _ := json.Marshal(unattachedDisk.ACL)
tmp := map[string]interface{}{
"_ckey": unattachedDisk.CKey,
"_meta": flattens.FlattenMeta(unattachedDisk.Meta),
"account_id": unattachedDisk.AccountID,
"account_name": unattachedDisk.AccountName,
"acl": string(unattachedDiskAcl),
"boot_partition": unattachedDisk.BootPartition,
"created_time": unattachedDisk.CreatedTime,
"deleted_time": unattachedDisk.DeletedTime,
"desc": unattachedDisk.Description,
"destruction_time": unattachedDisk.DestructionTime,
"disk_path": unattachedDisk.DiskPath,
"gid": unattachedDisk.GID,
"guid": unattachedDisk.GUID,
"disk_id": unattachedDisk.ID,
"image_id": unattachedDisk.ImageID,
"images": unattachedDisk.Images,
"iotune": flattenIOTune(unattachedDisk.IOTune),
"iqn": unattachedDisk.IQN,
"login": unattachedDisk.Login,
"milestones": unattachedDisk.Milestones,
"disk_name": unattachedDisk.Name,
"order": unattachedDisk.Order,
"params": unattachedDisk.Params,
"parent_id": unattachedDisk.ParentID,
"passwd": unattachedDisk.Password,
"pci_slot": unattachedDisk.PCISlot,
"pool": unattachedDisk.Pool,
"purge_attempts": unattachedDisk.PurgeAttempts,
"purge_time": unattachedDisk.PurgeTime,
"reality_device_number": unattachedDisk.RealityDeviceNumber,
"reference_id": unattachedDisk.ReferenceID,
"res_id": unattachedDisk.ResID,
"res_name": unattachedDisk.ResName,
"role": unattachedDisk.Role,
"sep_id": unattachedDisk.SEPID,
"size_max": unattachedDisk.SizeMax,
"size_used": unattachedDisk.SizeUsed,
"snapshots": flattenDiskSnapshotList(unattachedDisk.Snapshots),
"status": unattachedDisk.Status,
"tech_status": unattachedDisk.TechStatus,
"type": unattachedDisk.Type,
"vmid": unattachedDisk.VMID,
}
res = append(res, tmp)
}
return res
}
func flattenDisk(d *schema.ResourceData, disk *disks.RecordDisk) {
diskAcl, _ := json.Marshal(disk.ACL)
d.Set("account_id", disk.AccountID)
d.Set("account_name", disk.AccountName)
d.Set("acl", string(diskAcl))
d.Set("boot_partition", disk.BootPartition)
// d.Set("boot_partition", disk.BootPartition)
d.Set("computes", flattenDiskComputes(disk.Computes))
d.Set("created_time", disk.CreatedTime)
d.Set("deleted_time", disk.DeletedTime)
d.Set("desc", disk.Desc)
d.Set("desc", disk.Description)
d.Set("destruction_time", disk.DestructionTime)
d.Set("devicename", disk.DeviceName)
d.Set("disk_path", disk.DiskPath)
d.Set("gid", disk.GridID)
d.Set("guid", disk.GUID)
// d.Set("disk_path", disk.DiskPath)
d.Set("gid", disk.GID)
// d.Set("guid", disk.GUID)
d.Set("disk_id", disk.ID)
d.Set("image_id", disk.ImageID)
d.Set("images", disk.Images)
d.Set("iotune", flattenIOTune(disk.IOTune))
d.Set("iqn", disk.IQN)
d.Set("login", disk.Login)
d.Set("milestones", disk.Milestones)
// d.Set("iqn", disk.IQN)
// d.Set("login", disk.Login)
// d.Set("milestones", disk.Milestones)
d.Set("disk_name", disk.Name)
d.Set("order", disk.Order)
d.Set("params", disk.Params)
d.Set("parent_id", disk.ParentId)
d.Set("passwd", disk.Passwd)
d.Set("pci_slot", disk.PciSlot)
d.Set("parent_id", disk.ParentID)
// d.Set("passwd", disk.Passwd)
d.Set("pci_slot", disk.PCISlot)
d.Set("pool", disk.Pool)
d.Set("present_to", disk.PresentTo)
d.Set("purge_attempts", disk.PurgeAttempts)
// d.Set("purge_attempts", disk.PurgeAttempts)
d.Set("purge_time", disk.PurgeTime)
d.Set("reality_device_number", disk.RealityDeviceNumber)
d.Set("reference_id", disk.ReferenceId)
// d.Set("reality_device_number", disk.RealityDeviceNumber)
// d.Set("reference_id", disk.ReferenceID)
d.Set("res_id", disk.ResID)
d.Set("res_name", disk.ResName)
d.Set("role", disk.Role)
@@ -56,14 +119,14 @@ func flattenDisk(d *schema.ResourceData, disk Disk) {
d.Set("vmid", disk.VMID)
}
func flattenDiskSnapshotList(sl SnapshotList) []interface{} {
func flattenDiskSnapshotList(sl disks.ListSnapshots) []interface{} {
res := make([]interface{}, 0)
for _, snapshot := range sl {
temp := map[string]interface{}{
"guid": snapshot.Guid,
"guid": snapshot.GUID,
"label": snapshot.Label,
"res_id": snapshot.ResId,
"snap_set_guid": snapshot.SnapSetGuid,
"res_id": snapshot.ResID,
"snap_set_guid": snapshot.SnapSetGUID,
"snap_set_time": snapshot.SnapSetTime,
"timestamp": snapshot.TimeStamp,
}
@@ -73,80 +136,70 @@ func flattenDiskSnapshotList(sl SnapshotList) []interface{} {
return res
}
func flattenDiskList(dl DisksList) []map[string]interface{} {
func flattenDiskList(dl disks.ListDisks) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, disk := range dl {
diskAcl, _ := json.Marshal(disk.Acl)
diskAcl, _ := json.Marshal(disk.ACL)
temp := map[string]interface{}{
"account_id": disk.AccountID,
"account_name": disk.AccountName,
"acl": string(diskAcl),
"computes": flattenDiskComputes(disk.Computes),
"boot_partition": disk.BootPartition,
"created_time": disk.CreatedTime,
"deleted_time": disk.DeletedTime,
"desc": disk.Desc,
"destruction_time": disk.DestructionTime,
"devicename": disk.DeviceName,
"disk_path": disk.DiskPath,
"gid": disk.GridID,
"guid": disk.GUID,
"disk_id": disk.ID,
"image_id": disk.ImageID,
"images": disk.Images,
"iotune": flattenIOTune(disk.IOTune),
"iqn": disk.IQN,
"login": disk.Login,
"machine_id": disk.MachineId,
"machine_name": disk.MachineName,
"milestones": disk.Milestones,
"disk_name": disk.Name,
"order": disk.Order,
"params": disk.Params,
"parent_id": disk.ParentId,
"passwd": disk.Passwd,
"pci_slot": disk.PciSlot,
"pool": disk.Pool,
"present_to": disk.PresentTo,
"purge_attempts": disk.PurgeAttempts,
"purge_time": disk.PurgeTime,
"reality_device_number": disk.RealityDeviceNumber,
"reference_id": disk.ReferenceId,
"res_id": disk.ResID,
"res_name": disk.ResName,
"role": disk.Role,
"sep_id": disk.SepID,
"sep_type": disk.SepType,
"shareable": disk.Shareable,
"size_max": disk.SizeMax,
"size_used": disk.SizeUsed,
"snapshots": flattenDiskSnapshotList(disk.Snapshots),
"status": disk.Status,
"tech_status": disk.TechStatus,
"type": disk.Type,
"vmid": disk.VMID,
"account_id": disk.AccountID,
"account_name": disk.AccountName,
"acl": string(diskAcl),
"computes": flattenDiskComputes(disk.Computes),
"created_time": disk.CreatedTime,
"deleted_time": disk.DeletedTime,
"desc": disk.Description,
"destruction_time": disk.DestructionTime,
"devicename": disk.DeviceName,
"gid": disk.GID,
"disk_id": disk.ID,
"image_id": disk.ImageID,
"images": disk.Images,
"iotune": flattenIOTune(disk.IOTune),
"machine_id": disk.MachineID,
"machine_name": disk.MachineName,
"disk_name": disk.Name,
"order": disk.Order,
"params": disk.Params,
"parent_id": disk.ParentID,
"pci_slot": disk.PCISlot,
"pool": disk.Pool,
"present_to": disk.PresentTo,
"purge_time": disk.PurgeTime,
"res_id": disk.ResID,
"res_name": disk.ResName,
"role": disk.Role,
"sep_id": disk.SepID,
"sep_type": disk.SepType,
"shareable": disk.Shareable,
"size_max": disk.SizeMax,
"size_used": disk.SizeUsed,
"snapshots": flattenDiskSnapshotList(disk.Snapshots),
"status": disk.Status,
"tech_status": disk.TechStatus,
"type": disk.Type,
"vmid": disk.VMID,
}
res = append(res, temp)
}
return res
}
func flattenIOTune(iot IOTune) []map[string]interface{} {
func flattenIOTune(iot disks.IOTune) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"read_bytes_sec": iot.ReadBytesSec,
"read_bytes_sec_max": iot.ReadBytesSecMax,
"read_iops_sec": iot.ReadIopsSec,
"read_iops_sec_max": iot.ReadIopsSecMax,
"size_iops_sec": iot.SizeIopsSec,
"read_iops_sec": iot.ReadIOPSSec,
"read_iops_sec_max": iot.ReadIOPSSecMax,
"size_iops_sec": iot.SizeIOPSSec,
"total_bytes_sec": iot.TotalBytesSec,
"total_bytes_sec_max": iot.TotalBytesSecMax,
"total_iops_sec": iot.TotalIopsSec,
"total_iops_sec_max": iot.TotalIopsSecMax,
"total_iops_sec": iot.TotalIOPSSec,
"total_iops_sec_max": iot.TotalIOPSSecMax,
"write_bytes_sec": iot.WriteBytesSec,
"write_bytes_sec_max": iot.WriteBytesSecMax,
"write_iops_sec": iot.WriteIopsSec,
"write_iops_sec_max": iot.WriteIopsSecMax,
"write_iops_sec": iot.WriteIOPSSec,
"write_iops_sec_max": iot.WriteIOPSSecMax,
}
res = append(res, temp)

View File

@@ -1,181 +0,0 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package disks
type Disk struct {
Acl map[string]interface{} `json:"acl"`
AccountID int `json:"accountId"`
AccountName string `json:"accountName"`
BootPartition int `json:"bootPartition"`
Computes map[string]string `json:"computes"`
CreatedTime uint64 `json:"creationTime"`
DeletedTime uint64 `json:"deletionTime"`
DeviceName string `json:"devicename"`
Desc string `json:"desc"`
DestructionTime uint64 `json:"destructionTime"`
DiskPath string `json:"diskPath"`
GridID int `json:"gid"`
GUID int `json:"guid"`
ID uint `json:"id"`
ImageID int `json:"imageId"`
Images []int `json:"images"`
IOTune IOTune `json:"iotune"`
IQN string `json:"iqn"`
Login string `json:"login"`
Name string `json:"name"`
MachineId int `json:"machineId"`
MachineName string `json:"machineName"`
Milestones uint64 `json:"milestones"`
Order int `json:"order"`
Params string `json:"params"`
Passwd string `json:"passwd"`
ParentId int `json:"parentId"`
PciSlot int `json:"pciSlot"`
Pool string `json:"pool"`
PresentTo []int `json:"presentTo"`
PurgeTime uint64 `json:"purgeTime"`
PurgeAttempts uint64 `json:"purgeAttempts"`
RealityDeviceNumber int `json:"realityDeviceNumber"`
ReferenceId string `json:"referenceId"`
ResID string `json:"resId"`
ResName string `json:"resName"`
Role string `json:"role"`
SepType string `json:"sepType"`
Shareable bool `json:"shareable"`
SepID int `json:"sepId"` // NOTE: absent from compute/get output
SizeMax int `json:"sizeMax"`
SizeUsed float64 `json:"sizeUsed"` // sum over all snapshots of this disk to report total consumed space
Snapshots []Snapshot `json:"snapshots"`
Status string `json:"status"`
TechStatus string `json:"techStatus"`
Type string `json:"type"`
UpdateBy uint64 `json:"updateBy"`
VMID int `json:"vmid"`
}
type Snapshot struct {
Guid string `json:"guid"`
Label string `json:"label"`
ResId string `json:"resId"`
SnapSetGuid string `json:"snapSetGuid"`
SnapSetTime uint64 `json:"snapSetTime"`
TimeStamp uint64 `json:"timestamp"`
}
type SnapshotList []Snapshot
type DisksList []Disk
type IOTune struct {
ReadBytesSec int `json:"read_bytes_sec"`
ReadBytesSecMax int `json:"read_bytes_sec_max"`
ReadIopsSec int `json:"read_iops_sec"`
ReadIopsSecMax int `json:"read_iops_sec_max"`
SizeIopsSec int `json:"size_iops_sec"`
TotalBytesSec int `json:"total_bytes_sec"`
TotalBytesSecMax int `json:"total_bytes_sec_max"`
TotalIopsSec int `json:"total_iops_sec"`
TotalIopsSecMax int `json:"total_iops_sec_max"`
WriteBytesSec int `json:"write_bytes_sec"`
WriteBytesSecMax int `json:"write_bytes_sec_max"`
WriteIopsSec int `json:"write_iops_sec"`
WriteIopsSecMax int `json:"write_iops_sec_max"`
}
type Pool struct {
Name string `json:"name"`
Types []string `json:"types"`
}
type PoolList []Pool
type TypeDetailed struct {
Pools []Pool `json:"pools"`
SepID int `json:"sepId"`
}
type TypesDetailedList []TypeDetailed
type TypesList []string
type Unattached struct {
Ckey string `json:"_ckey"`
Meta []interface{} `json:"_meta"`
AccountID int `json:"accountId"`
AccountName string `json:"accountName"`
Acl map[string]interface{} `json:"acl"`
BootPartition int `json:"bootPartition"`
CreatedTime int `json:"createdTime"`
DeletedTime int `json:"deletedTime"`
Desc string `json:"desc"`
DestructionTime int `json:"destructionTime"`
DiskPath string `json:"diskPath"`
GridID int `json:"gid"`
GUID int `json:"guid"`
ID int `json:"id"`
ImageID int `json:"imageId"`
Images []int `json:"images"`
IOTune IOTune `json:"iotune"`
IQN string `json:"iqn"`
Login string `json:"login"`
Milestones int `json:"milestones"`
Name string `json:"name"`
Order int `json:"order"`
Params string `json:"params"`
ParentID int `json:"parentId"`
Passwd string `json:"passwd"`
PciSlot int `json:"pciSlot"`
Pool string `json:"pool"`
PurgeAttempts int `json:"purgeAttempts"`
PurgeTime int `json:"purgeTime"`
RealityDeviceNumber int `json:"realityDeviceNumber"`
ReferenceID string `json:"referenceId"`
ResID string `json:"resId"`
ResName string `json:"resName"`
Role string `json:"role"`
SepID int `json:"sepId"`
SizeMax int `json:"sizeMax"`
SizeUsed float64 `json:"sizeUsed"`
Snapshots []Snapshot `json:"snapshots"`
Status string `json:"status"`
TechStatus string `json:"techStatus"`
Type string `json:"type"`
VMID int `json:"vmid"`
}
type UnattachedList []Unattached
type Pair struct {
intPort int
extPortStart int
}

View File

@@ -2,76 +2,35 @@ package disks
import (
"context"
"encoding/json"
"net/url"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/locations"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
)
func existAccountID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) {
c := m.(*controller.ControllerCfg)
accountId := uint64(d.Get("account_id").(int))
req := account.ListRequest{}
urlValues := &url.Values{}
accountList := []struct {
ID int `json:"id"`
}{}
accountListAPI := "/restmachine/cloudapi/account/list"
accountListRaw, err := c.DecortAPICall(ctx, "POST", accountListAPI, urlValues)
accountList, err := c.CloudAPI().Account().List(ctx, req)
if err != nil {
return false, err
}
err = json.Unmarshal([]byte(accountListRaw), &accountList)
if err != nil {
return false, err
}
haveAccount := false
myAccount := d.Get("account_id").(int)
for _, account := range accountList {
if account.ID == myAccount {
haveAccount = true
break
}
}
return haveAccount, nil
return len(accountList.FilterByID(accountId)) != 0, nil
}
func existGID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) {
c := m.(*controller.ControllerCfg)
gid := uint64(d.Get("gid").(int))
req := locations.ListRequest{}
urlValues := &url.Values{}
locationList := []struct {
GID int `json:"gid"`
}{}
locationsListAPI := "/restmachine/cloudapi/locations/list"
locationListRaw, err := c.DecortAPICall(ctx, "POST", locationsListAPI, urlValues)
locationList, err := c.CloudAPI().Locations().List(ctx, req)
if err != nil {
return false, err
}
err = json.Unmarshal([]byte(locationListRaw), &locationList)
if err != nil {
return false, err
}
haveGID := false
gid := d.Get("gid").(int)
for _, location := range locationList {
if location.GID == gid {
haveGID = true
break
}
}
return haveGID, nil
return len(locationList.FilterByGID(gid)) != 0, nil
}

View File

@@ -35,11 +35,11 @@ package disks
import (
"context"
"fmt"
"net/url"
"strconv"
"strings"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
@@ -52,7 +52,7 @@ import (
func resourceDiskCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
c := m.(*controller.ControllerCfg)
urlValues := &url.Values{}
req := disks.CreateRequest{}
haveAccount, err := existAccountID(ctx, d, m)
if err != nil {
@@ -70,83 +70,78 @@ func resourceDiskCreate(ctx context.Context, d *schema.ResourceData, m interface
return diag.Errorf("resourceDiskCreate: can't create Disk because GID %d is not allowed or does not exist", d.Get("gid").(int))
}
urlValues.Add("accountId", strconv.Itoa(d.Get("account_id").(int)))
urlValues.Add("gid", strconv.Itoa(d.Get("gid").(int)))
urlValues.Add("name", d.Get("disk_name").(string))
urlValues.Add("size", strconv.Itoa(d.Get("size_max").(int)))
req.AccountID = uint64(d.Get("account_id").(int))
req.GID = uint64(d.Get("gid").(int))
req.Name = d.Get("disk_name").(string)
req.Size = uint64(d.Get("size_max").(int))
if typeRaw, ok := d.GetOk("type"); ok {
urlValues.Add("type", strings.ToUpper(typeRaw.(string)))
req.Type = strings.ToUpper(typeRaw.(string))
} else {
urlValues.Add("type", "D")
req.Type = "D"
}
if sepId, ok := d.GetOk("sep_id"); ok {
urlValues.Add("sep_id", strconv.Itoa(sepId.(int)))
req.SEPID = uint64(sepId.(int))
}
if poolName, ok := d.GetOk("pool"); ok {
urlValues.Add("pool", poolName.(string))
req.Pool = poolName.(string)
}
argVal, argSet := d.GetOk("desc")
if argSet {
urlValues.Add("description", argVal.(string))
req.Description = argVal.(string)
}
diskId, err := c.DecortAPICall(ctx, "POST", disksCreateAPI, urlValues)
diskId, err := c.CloudAPI().Disks().Create(ctx, req)
if err != nil {
return diag.FromErr(err)
}
urlValues = &url.Values{}
d.SetId(diskId)
d.SetId(strconv.FormatUint(diskId, 10))
if iotuneRaw, ok := d.GetOk("iotune"); ok {
iot := iotuneRaw.([]interface{})[0]
iotune := iot.(map[string]interface{})
urlValues.Add("diskId", diskId)
urlValues.Add("iops", strconv.Itoa(iotune["total_iops_sec"].(int)))
urlValues.Add("read_bytes_sec", strconv.Itoa(iotune["read_bytes_sec"].(int)))
urlValues.Add("read_bytes_sec_max", strconv.Itoa(iotune["read_bytes_sec_max"].(int)))
urlValues.Add("read_iops_sec", strconv.Itoa(iotune["read_iops_sec"].(int)))
urlValues.Add("read_iops_sec_max", strconv.Itoa(iotune["read_iops_sec_max"].(int)))
urlValues.Add("size_iops_sec", strconv.Itoa(iotune["size_iops_sec"].(int)))
urlValues.Add("total_bytes_sec", strconv.Itoa(iotune["total_bytes_sec"].(int)))
urlValues.Add("total_bytes_sec_max", strconv.Itoa(iotune["total_bytes_sec_max"].(int)))
urlValues.Add("total_iops_sec_max", strconv.Itoa(iotune["total_iops_sec_max"].(int)))
urlValues.Add("write_bytes_sec", strconv.Itoa(iotune["write_bytes_sec"].(int)))
urlValues.Add("write_bytes_sec_max", strconv.Itoa(iotune["write_bytes_sec_max"].(int)))
urlValues.Add("write_iops_sec", strconv.Itoa(iotune["write_iops_sec"].(int)))
urlValues.Add("write_iops_sec_max", strconv.Itoa(iotune["write_iops_sec_max"].(int)))
req := disks.LimitIORequest{
DiskID: diskId,
IOPS: uint64(iotune["total_iops_sec"].(int)),
ReadBytesSec: uint64(iotune["read_bytes_sec"].(int)),
ReadBytesSecMax: uint64(iotune["read_bytes_sec_max"].(int)),
ReadIOPSSec: uint64(iotune["read_iops_sec"].(int)),
ReadIOPSSecMax: uint64(iotune["read_iops_sec_max"].(int)),
SizeIOPSSec: uint64(iotune["size_iops_sec"].(int)),
TotalBytesSec: uint64(iotune["total_bytes_sec"].(int)),
TotalBytesSecMax: uint64(iotune["total_bytes_sec_max"].(int)),
TotalIOPSSecMax: uint64(iotune["total_iops_sec_max"].(int)),
TotalIOPSSec: uint64(iotune["total_iops_sec"].(int)),
WriteBytesSec: uint64(iotune["write_bytes_sec"].(int)),
WriteBytesSecMax: uint64(iotune["write_bytes_sec_max"].(int)),
WriteIOPSSec: uint64(iotune["write_iops_sec"].(int)),
WriteIOPSSecMax: uint64(iotune["write_iops_sec_max"].(int)),
}
_, err := c.DecortAPICall(ctx, "POST", disksIOLimitAPI, urlValues)
_, err := c.CloudAPI().Disks().LimitIO(ctx, req)
if err != nil {
return diag.FromErr(err)
}
urlValues = &url.Values{}
}
if shareable := d.Get("shareable"); shareable.(bool) == true {
urlValues.Add("diskId", diskId)
_, err := c.DecortAPICall(ctx, "POST", disksShareAPI, urlValues)
req := disks.ShareRequest{
DiskID: diskId,
}
_, err := c.CloudAPI().Disks().Share(ctx, req)
if err != nil {
return diag.FromErr(err)
}
urlValues = &url.Values{}
}
dgn := resourceDiskRead(ctx, d, m)
if dgn != nil {
return dgn
}
return nil
return resourceDiskRead(ctx, d, m)
}
func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
urlValues := &url.Values{}
c := m.(*controller.ControllerCfg)
warnings := dc.Warnings{}
@@ -161,13 +156,21 @@ func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}
switch disk.Status {
case status.Destroyed, status.Purged:
d.Set("disk_id", 0)
d.SetId("")
return resourceDiskCreate(ctx, d, m)
case status.Deleted:
hasChangeState = true
urlValues.Add("diskId", d.Id())
urlValues.Add("reason", d.Get("reason").(string))
req := disks.RestoreRequest{
DiskID: disk.ID,
}
_, err := c.DecortAPICall(ctx, "POST", disksRestoreAPI, urlValues)
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
} else {
req.Reason = "Terraform automatic restore"
}
_, err := c.CloudAPI().Disks().Restore(ctx, req)
if err != nil {
warnings.Add(err)
}
@@ -188,7 +191,7 @@ func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}
}
}
flattenDisk(d, *disk)
flattenDisk(d, disk)
return warnings.Get()
}
@@ -196,7 +199,6 @@ func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}
func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
c := m.(*controller.ControllerCfg)
warnings := dc.Warnings{}
urlValues := &url.Values{}
haveAccount, err := existAccountID(ctx, d, m)
if err != nil {
@@ -224,13 +226,21 @@ func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface
switch disk.Status {
case status.Destroyed, status.Purged:
d.Set("disk_id", 0)
d.SetId("")
return resourceDiskCreate(ctx, d, m)
case status.Deleted:
hasChangeState = true
urlValues.Add("diskId", d.Id())
urlValues.Add("reason", d.Get("reason").(string))
req := disks.RestoreRequest{
DiskID: disk.ID,
}
_, err := c.DecortAPICall(ctx, "POST", disksRestoreAPI, urlValues)
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
} else {
req.Reason = "Terraform automatic restore"
}
_, err := c.CloudAPI().Disks().Restore(ctx, req)
if err != nil {
warnings.Add(err)
}
@@ -256,68 +266,75 @@ func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface
if oldSize.(int) < newSize.(int) {
log.Debugf("resourceDiskUpdate: resizing disk ID %s - %d GB -> %d GB",
d.Id(), oldSize.(int), newSize.(int))
urlValues.Add("diskId", d.Id())
urlValues.Add("size", strconv.Itoa(newSize.(int)))
_, err := c.DecortAPICall(ctx, "POST", disksResizeAPI, urlValues)
req := disks.ResizeRequest{
DiskID: disk.ID,
Size: uint64(newSize.(int)),
}
_, err := c.CloudAPI().Disks().Resize(ctx, req)
if err != nil {
return diag.FromErr(err)
}
d.Set("size_max", newSize)
} else if oldSize.(int) > newSize.(int) {
return diag.FromErr(fmt.Errorf("resourceDiskUpdate: Disk ID %s - reducing disk size is not allowed", d.Id()))
}
urlValues = &url.Values{}
}
if d.HasChange("disk_name") {
urlValues.Add("diskId", d.Id())
urlValues.Add("name", d.Get("disk_name").(string))
_, err := c.DecortAPICall(ctx, "POST", disksRenameAPI, urlValues)
req := disks.RenameRequest{
DiskID: disk.ID,
Name: d.Get("disk_name").(string),
}
_, err := c.CloudAPI().Disks().Rename(ctx, req)
if err != nil {
return diag.FromErr(err)
}
urlValues = &url.Values{}
}
if d.HasChange("iotune") {
iot := d.Get("iotune").([]interface{})[0]
iotune := iot.(map[string]interface{})
urlValues.Add("diskId", d.Id())
urlValues.Add("iops", strconv.Itoa(iotune["total_iops_sec"].(int)))
urlValues.Add("read_bytes_sec", strconv.Itoa(iotune["read_bytes_sec"].(int)))
urlValues.Add("read_bytes_sec_max", strconv.Itoa(iotune["read_bytes_sec_max"].(int)))
urlValues.Add("read_iops_sec", strconv.Itoa(iotune["read_iops_sec"].(int)))
urlValues.Add("read_iops_sec_max", strconv.Itoa(iotune["read_iops_sec_max"].(int)))
urlValues.Add("size_iops_sec", strconv.Itoa(iotune["size_iops_sec"].(int)))
urlValues.Add("total_bytes_sec", strconv.Itoa(iotune["total_bytes_sec"].(int)))
urlValues.Add("total_bytes_sec_max", strconv.Itoa(iotune["total_bytes_sec_max"].(int)))
urlValues.Add("total_iops_sec_max", strconv.Itoa(iotune["total_iops_sec_max"].(int)))
urlValues.Add("write_bytes_sec", strconv.Itoa(iotune["write_bytes_sec"].(int)))
urlValues.Add("write_bytes_sec_max", strconv.Itoa(iotune["write_bytes_sec_max"].(int)))
urlValues.Add("write_iops_sec", strconv.Itoa(iotune["write_iops_sec"].(int)))
urlValues.Add("write_iops_sec_max", strconv.Itoa(iotune["write_iops_sec_max"].(int)))
req := disks.LimitIORequest{
DiskID: disk.ID,
IOPS: uint64(iotune["total_iops_sec"].(int)),
ReadBytesSec: uint64(iotune["read_bytes_sec"].(int)),
ReadBytesSecMax: uint64(iotune["read_bytes_sec_max"].(int)),
ReadIOPSSec: uint64(iotune["read_iops_sec"].(int)),
ReadIOPSSecMax: uint64(iotune["read_iops_sec_max"].(int)),
SizeIOPSSec: uint64(iotune["size_iops_sec"].(int)),
TotalBytesSec: uint64(iotune["total_bytes_sec"].(int)),
TotalBytesSecMax: uint64(iotune["total_bytes_sec_max"].(int)),
TotalIOPSSecMax: uint64(iotune["total_iops_sec_max"].(int)),
TotalIOPSSec: uint64(iotune["total_iops_sec"].(int)),
WriteBytesSec: uint64(iotune["write_bytes_sec"].(int)),
WriteBytesSecMax: uint64(iotune["write_bytes_sec_max"].(int)),
WriteIOPSSec: uint64(iotune["write_iops_sec"].(int)),
WriteIOPSSecMax: uint64(iotune["write_iops_sec_max"].(int)),
}
_, err := c.DecortAPICall(ctx, "POST", disksIOLimitAPI, urlValues)
_, err := c.CloudAPI().Disks().LimitIO(ctx, req)
if err != nil {
return diag.FromErr(err)
}
urlValues = &url.Values{}
}
if d.HasChange("shareable") {
oldShare, newShare := d.GetChange("shareable")
urlValues = &url.Values{}
urlValues.Add("diskId", d.Id())
if oldShare.(bool) == false && newShare.(bool) == true {
_, err := c.DecortAPICall(ctx, "POST", disksShareAPI, urlValues)
req := disks.ShareRequest{DiskID: disk.ID}
_, err := c.CloudAPI().Disks().Share(ctx, req)
if err != nil {
return diag.FromErr(err)
}
}
if oldShare.(bool) == true && newShare.(bool) == false {
_, err := c.DecortAPICall(ctx, "POST", disksUnshareAPI, urlValues)
req := disks.UnshareRequest{DiskID: disk.ID}
_, err := c.CloudAPI().Disks().Unshare(ctx, req)
if err != nil {
return diag.FromErr(err)
}
@@ -336,17 +353,22 @@ func resourceDiskDelete(ctx context.Context, d *schema.ResourceData, m interface
if disk.Status == status.Destroyed || disk.Status == status.Purged {
return nil
}
params := &url.Values{}
params.Add("diskId", d.Id())
params.Add("detach", strconv.FormatBool(d.Get("detach").(bool)))
params.Add("permanently", strconv.FormatBool(d.Get("permanently").(bool)))
params.Add("reason", d.Get("reason").(string))
req := disks.DeleteRequest{
DiskID: disk.ID,
Detach: d.Get("detach").(bool),
Permanently: d.Get("permanently").(bool),
Reason: d.Get("reason").(string),
}
c := m.(*controller.ControllerCfg)
_, err = c.DecortAPICall(ctx, "POST", disksDeleteAPI, params)
_, err = c.CloudAPI().Disks().Delete(ctx, req)
if err != nil {
return diag.FromErr(err)
}
d.SetId("")
return nil
}
@@ -531,11 +553,11 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"boot_partition": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of disk partitions",
},
// "boot_partition": {
// Type: schema.TypeInt,
// Computed: true,
// Description: "Number of disk partitions",
// },
"computes": {
Type: schema.TypeList,
Computed: true,
@@ -572,16 +594,16 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "Name of the device",
},
"disk_path": {
Type: schema.TypeString,
Computed: true,
Description: "Disk path",
},
"guid": {
Type: schema.TypeInt,
Computed: true,
Description: "Disk ID on the storage side",
},
// "disk_path": {
// Type: schema.TypeString,
// Computed: true,
// Description: "Disk path",
// },
// "guid": {
// Type: schema.TypeInt,
// Computed: true,
// Description: "Disk ID on the storage side",
// },
"image_id": {
Type: schema.TypeInt,
Computed: true,
@@ -595,22 +617,21 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
},
Description: "IDs of images using the disk",
},
"iqn": {
Type: schema.TypeString,
Computed: true,
Description: "Disk IQN",
},
"login": {
Type: schema.TypeString,
Computed: true,
Description: "Login to access the disk",
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
Description: "Milestones",
},
// "iqn": {
// Type: schema.TypeString,
// Computed: true,
// Description: "Disk IQN",
// },
// "login": {
// Type: schema.TypeString,
// Computed: true,
// Description: "Login to access the disk",
// },
// "milestones": {
// Type: schema.TypeInt,
// Computed: true,
// Description: "Milestones",
// },
"order": {
Type: schema.TypeInt,
Computed: true,
@@ -626,36 +647,36 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
Computed: true,
Description: "ID of the parent disk",
},
"passwd": {
Type: schema.TypeString,
Computed: true,
Description: "Password to access the disk",
},
// "passwd": {
// Type: schema.TypeString,
// Computed: true,
// Description: "Password to access the disk",
// },
"pci_slot": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the pci slot to which the disk is connected",
},
"purge_attempts": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of deletion attempts",
},
// "purge_attempts": {
// Type: schema.TypeInt,
// Computed: true,
// Description: "Number of deletion attempts",
// },
"purge_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Time of the last deletion attempt",
},
"reality_device_number": {
Type: schema.TypeInt,
Computed: true,
Description: "Reality device number",
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
Description: "ID of the reference to the disk",
},
// "reality_device_number": {
// Type: schema.TypeInt,
// Computed: true,
// Description: "Reality device number",
// },
// "reference_id": {
// Type: schema.TypeString,
// Computed: true,
// Description: "ID of the reference to the disk",
// },
"res_id": {
Type: schema.TypeString,
Computed: true,

View File

@@ -34,18 +34,16 @@ package disks
import (
"context"
"net/url"
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
)
func resourceDiskSnapshotCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
urlValues := &url.Values{}
c := m.(*controller.ControllerCfg)
disk, err := utilityDiskCheckPresence(ctx, d, m)
@@ -54,7 +52,7 @@ func resourceDiskSnapshotCreate(ctx context.Context, d *schema.ResourceData, m i
}
snapshots := disk.Snapshots
snapshot := Snapshot{}
snapshot := disks.ItemSnapshot{}
label := d.Get("label").(string)
for _, sn := range snapshots {
if label == sn.Label {
@@ -65,30 +63,31 @@ func resourceDiskSnapshotCreate(ctx context.Context, d *schema.ResourceData, m i
if label != snapshot.Label {
return diag.Errorf("Snapshot with label \"%v\" not found", label)
}
if rollback := d.Get("rollback").(bool); rollback {
urlValues.Add("diskId", strconv.Itoa(d.Get("disk_id").(int)))
urlValues.Add("label", label)
urlValues.Add("timestamp", strconv.Itoa(d.Get("timestamp").(int)))
req := disks.SnapshotRollbackRequest{
DiskID: disk.ID,
Label: label,
TimeStamp: uint64(d.Get("timestamp").(int)),
}
log.Debugf("resourceDiskCreate: Snapshot rollback with label", label)
_, err := c.DecortAPICall(ctx, "POST", disksSnapshotRollbackAPI, urlValues)
_, err := c.CloudAPI().Disks().SnapshotRollback(ctx, req)
if err != nil {
return diag.FromErr(err)
}
urlValues = &url.Values{}
}
return resourceDiskSnapshotRead(ctx, d, m)
}
func resourceDiskSnapshotRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
disk, err := utilityDiskCheckPresence(ctx, d, m)
if disk == nil {
if err != nil {
return diag.FromErr(err)
}
return nil
if err != nil {
return diag.FromErr(err)
}
snapshots := disk.Snapshots
snapshot := Snapshot{}
snapshot := disks.ItemSnapshot{}
label := d.Get("label").(string)
for _, sn := range snapshots {
if label == sn.Label {
@@ -100,27 +99,20 @@ func resourceDiskSnapshotRead(ctx context.Context, d *schema.ResourceData, m int
return diag.Errorf("Snapshot with label \"%v\" not found", label)
}
d.SetId(d.Get("label").(string))
d.Set("timestamp", snapshot.TimeStamp)
d.Set("guid", snapshot.Guid)
d.Set("res_id", snapshot.ResId)
d.Set("snap_set_guid", snapshot.SnapSetGuid)
d.Set("snap_set_time", snapshot.SnapSetTime)
flattenDiskSnapshot(d, snapshot)
return nil
}
func resourceDiskSnapshotUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
urlValues := &url.Values{}
c := m.(*controller.ControllerCfg)
disk, err := utilityDiskCheckPresence(ctx, d, m)
if disk == nil {
if err != nil {
return diag.FromErr(err)
}
return nil
if err != nil {
return diag.FromErr(err)
}
snapshots := disk.Snapshots
snapshot := Snapshot{}
snapshot := disks.ItemSnapshot{}
label := d.Get("label").(string)
for _, sn := range snapshots {
if label == sn.Label {
@@ -128,19 +120,23 @@ func resourceDiskSnapshotUpdate(ctx context.Context, d *schema.ResourceData, m i
break
}
}
if label != snapshot.Label {
return diag.Errorf("Snapshot with label \"%v\" not found", label)
}
if d.HasChange("rollback") && d.Get("rollback").(bool) == true {
urlValues.Add("diskId", strconv.Itoa(d.Get("disk_id").(int)))
urlValues.Add("label", label)
urlValues.Add("timestamp", strconv.Itoa(d.Get("timestamp").(int)))
req := disks.SnapshotRollbackRequest{
DiskID: disk.ID,
Label: label,
TimeStamp: uint64(d.Get("timestamp").(int)),
}
log.Debugf("resourceDiskUpdtae: Snapshot rollback with label", label)
_, err := c.DecortAPICall(ctx, "POST", disksSnapshotRollbackAPI, urlValues)
_, err := c.CloudAPI().Disks().SnapshotRollback(ctx, req)
if err != nil {
return diag.FromErr(err)
}
urlValues = &url.Values{}
}
return resourceDiskSnapshotRead(ctx, d, m)
@@ -150,22 +146,23 @@ func resourceDiskSnapshotDelete(ctx context.Context, d *schema.ResourceData, m i
c := m.(*controller.ControllerCfg)
disk, err := utilityDiskCheckPresence(ctx, d, m)
if disk == nil { //if disk not exits, can't call snapshotDelete
if err != nil {
d.SetId("")
if err != nil {
return diag.FromErr(err)
}
return nil
return diag.FromErr(err)
}
params := &url.Values{}
params.Add("diskId", strconv.Itoa(d.Get("disk_id").(int)))
params.Add("label", d.Get("label").(string))
req := disks.SnapshotDeleteRequest{
DiskID: disk.ID,
Label: d.Get("label").(string),
}
_, err = c.DecortAPICall(ctx, "POST", disksSnapshotDeleteAPI, params)
_, err = c.CloudAPI().Disks().SnapshotDelete(ctx, req)
if err != nil {
return diag.FromErr(err)
}
d.SetId("")
return nil
}

View File

@@ -34,39 +34,33 @@ package disks
import (
"context"
"encoding/json"
"net/url"
"strconv"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func utilityDiskCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*Disk, error) {
func utilityDiskCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*disks.RecordDisk, error) {
c := m.(*controller.ControllerCfg)
urlValues := &url.Values{}
disk := &Disk{}
req := disks.GetRequest{}
if d.Get("disk_id") != nil {
if d.Get("disk_id").(int) == 0 {
urlValues.Add("diskId", d.Id())
id, _ := strconv.ParseUint(d.Id(), 10, 64)
req.DiskID = id
} else {
urlValues.Add("diskId", strconv.Itoa(d.Get("disk_id").(int)))
req.DiskID = uint64(d.Get("disk_id").(int))
}
} else {
urlValues.Add("diskId", strconv.Itoa(d.Get("disk_id").(int)))
id, _ := strconv.ParseUint(d.Id(), 10, 64)
req.DiskID = id
}
log.Debugf("utilityDiskCheckPresence: load disk")
diskRaw, err := c.DecortAPICall(ctx, "POST", disksGetAPI, urlValues)
if err != nil {
return nil, err
}
err = json.Unmarshal([]byte(diskRaw), disk)
disk, err := c.CloudAPI().Disks().Get(ctx, req)
if err != nil {
return nil, err
}

View File

@@ -34,42 +34,34 @@ package disks
import (
"context"
"encoding/json"
"net/url"
"strconv"
"strings"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func utilityDiskListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}, api string) (DisksList, error) {
diskList := DisksList{}
func utilityDiskListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (disks.ListDisks, error) {
c := m.(*controller.ControllerCfg)
urlValues := &url.Values{}
req := disks.ListRequest{}
if page, ok := d.GetOk("page"); ok {
urlValues.Add("page", strconv.Itoa(page.(int)))
req.Page = uint64(page.(int))
}
if size, ok := d.GetOk("size"); ok {
urlValues.Add("size", strconv.Itoa(size.(int)))
req.Size = uint64(size.(int))
}
if diskType, ok := d.GetOk("type"); ok {
urlValues.Add("type", strings.ToUpper(diskType.(string)))
req.Type = strings.ToUpper(diskType.(string))
}
if accountId, ok := d.GetOk("accountId"); ok {
urlValues.Add("accountId", strconv.Itoa(accountId.(int)))
req.AccountID = uint64(accountId.(int))
}
log.Debugf("utilityDiskListCheckPresence: load disk list")
diskListRaw, err := c.DecortAPICall(ctx, "POST", api, urlValues)
if err != nil {
return nil, err
}
err = json.Unmarshal([]byte(diskListRaw), &diskList)
diskList, err := c.CloudAPI().Disks().List(ctx, req)
if err != nil {
return nil, err
}

View File

@@ -0,0 +1,26 @@
package disks
import (
"context"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
)
func utilityDiskListUnattachedCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (disks.ListDisksUnattached, error) {
c := m.(*controller.ControllerCfg)
req := disks.ListUnattachedRequest{}
if accountId, ok := d.GetOk("accountId"); ok {
req.AccountID = uint64(accountId.(int))
}
log.Debugf("utilityDiskListUnattachedCheckPresence: load disk Unattached list")
unattachedList, err := c.CloudAPI().Disks().ListUnattached(ctx, req)
if err != nil {
return nil, err
}
return unattachedList, nil
}

View File

@@ -34,26 +34,21 @@ package disks
import (
"context"
"encoding/json"
"net/url"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
)
func utilityDiskListTypesDetailedCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (TypesDetailedList, error) {
listTypesDetailed := TypesDetailedList{}
func utilityDiskListTypesDetailedCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) ([]interface{}, error) {
c := m.(*controller.ControllerCfg)
urlValues := &url.Values{}
urlValues.Add("detailed", "true")
log.Debugf("utilityDiskListTypesDetailedCheckPresence: load disk list Types Detailed")
diskListRaw, err := c.DecortAPICall(ctx, "POST", disksListTypesAPI, urlValues)
if err != nil {
return nil, err
req := disks.ListTypesRequest{
Detailed: true,
}
err = json.Unmarshal([]byte(diskListRaw), &listTypesDetailed)
log.Debugf("utilityDiskListTypesDetailedCheckPresence: load disk list Types Detailed")
listTypesDetailed, err := c.CloudAPI().Disks().ListTypes(ctx, req)
if err != nil {
return nil, err
}

View File

@@ -34,26 +34,21 @@ package disks
import (
"context"
"encoding/json"
"net/url"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
)
func utilityDiskListTypesCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (TypesList, error) {
typesList := TypesList{}
func utilityDiskListTypesCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) ([]interface{}, error) {
c := m.(*controller.ControllerCfg)
urlValues := &url.Values{}
urlValues.Add("detailed", "false")
log.Debugf("utilityDiskListTypesCheckPresence: load disk list Types Detailed")
diskListRaw, err := c.DecortAPICall(ctx, "POST", disksListTypesAPI, urlValues)
if err != nil {
return nil, err
req := disks.ListTypesRequest{
Detailed: false,
}
err = json.Unmarshal([]byte(diskListRaw), &typesList)
log.Debugf("utilityDiskListTypesCheckPresence: load disk list Types Detailed")
typesList, err := c.CloudAPI().Disks().ListTypes(ctx, req)
if err != nil {
return nil, err
}