This commit is contained in:
2026-04-23 12:46:24 +03:00
parent 156b0a2d0c
commit af79f6ab3e
16 changed files with 624 additions and 53 deletions

View File

@@ -335,6 +335,7 @@ func flattenComputeDisksDemo(disksList compute.ListComputeDisks, disksBlocks, ex
"permanently": pernamentlyValue,
"cache": disk.Cache,
"blk_discard": disk.BLKDiscard,
"iotune": flattenIotune(disk.IOTune),
}
res = append(res, temp)
indexDataDisks++

View File

@@ -390,6 +390,12 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
}
}
if _, ok := d.GetOk("disks"); ok {
if err := utilityComputeCreateIOTune(ctx, d, m); err != nil {
warnings.Add(err)
}
}
if !cleanup {
if enabled, ok := d.GetOk("enabled"); ok {
@@ -1124,6 +1130,7 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
resizedDisks := make([]interface{}, 0)
renamedDisks := make([]interface{}, 0)
changeStoragePolicyDisks := make([]interface{}, 0)
iotuneUpdatedDisks := make([]interface{}, 0)
oldDisks, newDisks := d.GetChange("disks")
oldConv := oldDisks.([]interface{})
@@ -1164,6 +1171,9 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
if isChangeStoragePolicy(oldConv, el) {
changeStoragePolicyDisks = append(changeStoragePolicyDisks, el)
}
if isChangeIOTuneDisk(oldConv, el) {
iotuneUpdatedDisks = append(iotuneUpdatedDisks, el)
}
}
if len(deletedDisks) > 0 {
@@ -1216,10 +1226,33 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
if diskConv["image_id"].(int) != 0 {
req.ImageID = uint64(diskConv["image_id"].(int))
}
_, err := c.CloudAPI().Compute().DiskAdd(ctx, req)
diskID, err := c.CloudAPI().Compute().DiskAdd(ctx, req)
if err != nil {
return diag.FromErr(err)
}
if iotuneRaw, ok := diskConv["iotune"].([]interface{}); ok && len(iotuneRaw) > 0 {
iotuneMap := iotuneRaw[0].(map[string]interface{})
limitReq := disks.LimitIORequest{
DiskID: diskID,
ReadBytesSec: uint64(iotuneMap["read_bytes_sec"].(int)),
ReadBytesSecMax: uint64(iotuneMap["read_bytes_sec_max"].(int)),
ReadIOPSSec: uint64(iotuneMap["read_iops_sec"].(int)),
ReadIOPSSecMax: uint64(iotuneMap["read_iops_sec_max"].(int)),
SizeIOPSSec: uint64(iotuneMap["size_iops_sec"].(int)),
TotalBytesSec: uint64(iotuneMap["total_bytes_sec"].(int)),
TotalBytesSecMax: uint64(iotuneMap["total_bytes_sec_max"].(int)),
TotalIOPSSec: uint64(iotuneMap["total_iops_sec"].(int)),
TotalIOPSSecMax: uint64(iotuneMap["total_iops_sec_max"].(int)),
WriteBytesSec: uint64(iotuneMap["write_bytes_sec"].(int)),
WriteBytesSecMax: uint64(iotuneMap["write_bytes_sec_max"].(int)),
WriteIOPSSec: uint64(iotuneMap["write_iops_sec"].(int)),
WriteIOPSSecMax: uint64(iotuneMap["write_iops_sec_max"].(int)),
}
_, err = c.CloudAPI().Disks().LimitIO(ctx, limitReq)
if err != nil {
return diag.FromErr(err)
}
}
}
}
@@ -1273,6 +1306,44 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
}
}
}
if len(iotuneUpdatedDisks) > 0 {
for _, disk := range iotuneUpdatedDisks {
diskConv := disk.(map[string]interface{})
if diskConv["disk_type"].(string) == "B" {
continue
}
diskID := uint64(diskConv["disk_id"].(int))
if diskID == 0 {
continue
}
iotuneRaw, ok := diskConv["iotune"].([]interface{})
if !ok || len(iotuneRaw) == 0 {
continue
}
iotuneMap := iotuneRaw[0].(map[string]interface{})
req := disks.LimitIORequest{
DiskID: diskID,
ReadBytesSec: uint64(iotuneMap["read_bytes_sec"].(int)),
ReadBytesSecMax: uint64(iotuneMap["read_bytes_sec_max"].(int)),
ReadIOPSSec: uint64(iotuneMap["read_iops_sec"].(int)),
ReadIOPSSecMax: uint64(iotuneMap["read_iops_sec_max"].(int)),
SizeIOPSSec: uint64(iotuneMap["size_iops_sec"].(int)),
TotalBytesSec: uint64(iotuneMap["total_bytes_sec"].(int)),
TotalBytesSecMax: uint64(iotuneMap["total_bytes_sec_max"].(int)),
TotalIOPSSec: uint64(iotuneMap["total_iops_sec"].(int)),
TotalIOPSSecMax: uint64(iotuneMap["total_iops_sec_max"].(int)),
WriteBytesSec: uint64(iotuneMap["write_bytes_sec"].(int)),
WriteBytesSecMax: uint64(iotuneMap["write_bytes_sec_max"].(int)),
WriteIOPSSec: uint64(iotuneMap["write_iops_sec"].(int)),
WriteIOPSSecMax: uint64(iotuneMap["write_iops_sec_max"].(int)),
}
_, err := c.CloudAPI().Disks().LimitIO(ctx, req)
if err != nil {
return diag.FromErr(err)
}
}
}
}
if d.HasChange("affinity_label") {
@@ -1862,6 +1933,40 @@ func isChangeStoragePolicy(els []interface{}, el interface{}) bool {
return false
}
func isChangeIOTuneDisk(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})
elConv := el.(map[string]interface{})
if elOldConv["disk_id"].(int) != elConv["disk_id"].(int) {
continue
}
oldIOTune := elOldConv["iotune"].([]interface{})
newIOTune := elConv["iotune"].([]interface{})
if len(oldIOTune) == 0 && len(newIOTune) == 0 {
return false
}
if len(oldIOTune) == 0 || len(newIOTune) == 0 {
return true
}
oldMap := oldIOTune[0].(map[string]interface{})
newMap := newIOTune[0].(map[string]interface{})
return oldMap["read_bytes_sec"].(int) != newMap["read_bytes_sec"].(int) ||
oldMap["read_bytes_sec_max"].(int) != newMap["read_bytes_sec_max"].(int) ||
oldMap["read_iops_sec"].(int) != newMap["read_iops_sec"].(int) ||
oldMap["read_iops_sec_max"].(int) != newMap["read_iops_sec_max"].(int) ||
oldMap["size_iops_sec"].(int) != newMap["size_iops_sec"].(int) ||
oldMap["total_bytes_sec"].(int) != newMap["total_bytes_sec"].(int) ||
oldMap["total_bytes_sec_max"].(int) != newMap["total_bytes_sec_max"].(int) ||
oldMap["total_iops_sec"].(int) != newMap["total_iops_sec"].(int) ||
oldMap["total_iops_sec_max"].(int) != newMap["total_iops_sec_max"].(int) ||
oldMap["write_bytes_sec"].(int) != newMap["write_bytes_sec"].(int) ||
oldMap["write_bytes_sec_max"].(int) != newMap["write_bytes_sec_max"].(int) ||
oldMap["write_iops_sec"].(int) != newMap["write_iops_sec"].(int) ||
oldMap["write_iops_sec_max"].(int) != newMap["write_iops_sec_max"].(int)
}
return false
}
func isContainsDisk(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})
@@ -1967,6 +2072,81 @@ func disksSubresourceSchemaMake() map[string]*schema.Schema {
Optional: true,
Description: "Disk deletion status",
},
"iotune": {
Type: schema.TypeList,
Optional: true,
Computed: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"read_bytes_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"read_bytes_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"read_iops_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"read_iops_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"size_iops_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"total_bytes_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"total_bytes_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"total_iops_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"total_iops_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"write_bytes_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"write_bytes_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"write_iops_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"write_iops_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
},
},
},
"disk_id": {
Type: schema.TypeInt,
Computed: true,

View File

@@ -43,6 +43,7 @@ import (
"github.com/hashicorp/go-cty/cty"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
@@ -628,3 +629,84 @@ func enabledNetwork(rawNetworkConfig cty.Value, netID uint64, netType string) bo
return false
}
func getComputeDiskIDsAPI(disksList compute.ListComputeDisks, disksBlocks, extraDisks []interface{}, bootDiskId uint64) []interface{} {
res := make([]interface{}, 0)
if len(disksBlocks) == 0 {
return res
}
sort.Slice(disksList, func(i, j int) bool {
return disksList[i].ID < disksList[j].ID
})
for _, disk := range disksList {
if disk.ID == bootDiskId || findInExtraDisks(uint(disk.ID), extraDisks) {
continue
}
res = append(res, disk.ID)
}
return res
}
func utilityComputeCreateIOTune(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
diskList := d.Get("disks").([]interface{})
iotuneArr := make([]interface{}, 0, len(diskList))
hasAny := false
for _, elem := range diskList {
diskVal := elem.(map[string]interface{})
iotune := diskVal["iotune"].([]interface{})
iotuneArr = append(iotuneArr, iotune)
if len(iotune) > 0 {
hasAny = true
}
}
if !hasAny {
return nil
}
computeRec, err := utilityComputeCheckPresence(ctx, d, m)
if err != nil {
return err
}
bootDisk := findBootDisk(computeRec.Disks)
computeDisksIDs := getComputeDiskIDsAPI(computeRec.Disks, diskList, d.Get("extra_disks").(*schema.Set).List(), bootDisk.ID)
for i, diskID := range computeDisksIDs {
if i >= len(iotuneArr) {
continue
}
iotune, ok := iotuneArr[i].([]interface{})
if !ok || len(iotune) == 0 {
continue
}
iotuneMap := iotune[0].(map[string]interface{})
req := disks.LimitIORequest{
DiskID: diskID.(uint64),
ReadBytesSec: uint64(iotuneMap["read_bytes_sec"].(int)),
ReadBytesSecMax: uint64(iotuneMap["read_bytes_sec_max"].(int)),
ReadIOPSSec: uint64(iotuneMap["read_iops_sec"].(int)),
ReadIOPSSecMax: uint64(iotuneMap["read_iops_sec_max"].(int)),
SizeIOPSSec: uint64(iotuneMap["size_iops_sec"].(int)),
TotalBytesSec: uint64(iotuneMap["total_bytes_sec"].(int)),
TotalBytesSecMax: uint64(iotuneMap["total_bytes_sec_max"].(int)),
TotalIOPSSec: uint64(iotuneMap["total_iops_sec"].(int)),
TotalIOPSSecMax: uint64(iotuneMap["total_iops_sec_max"].(int)),
WriteBytesSec: uint64(iotuneMap["write_bytes_sec"].(int)),
WriteBytesSecMax: uint64(iotuneMap["write_bytes_sec_max"].(int)),
WriteIOPSSec: uint64(iotuneMap["write_iops_sec"].(int)),
WriteIOPSSecMax: uint64(iotuneMap["write_iops_sec_max"].(int)),
}
_, err := c.CloudAPI().Disks().LimitIO(ctx, req)
if err != nil {
return err
}
}
return nil
}

View File

@@ -289,6 +289,7 @@ func flattenComputeDisks(disksList compute.ListDisks, disksBlocks, extraDisks []
"update_time": disk.UpdatedTime,
"cache": disk.Cache,
"blk_discard": disk.BLKDiscard,
"iotune": flattenIOTune(disk.IOTune),
}
res = append(res, temp)
indexDataDisks++

View File

@@ -677,6 +677,9 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
if err != nil {
warnings.Add(err)
}
if err := utilityComputeCreateIOTune(ctx, d, m); err != nil {
warnings.Add(err)
}
}
if readOnly, ok := d.GetOk("read_only"); ok {

View File

@@ -3531,22 +3531,22 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
DiffSuppressFunc: networkSubresIPAddreDiffSupperss,
Description: "unique_identifier of LogicalPort on SDN side",
},
"enabled": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
Description: "network enable flag",
},
"net_mask": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "Subnet mask, used only for DPDK and VFNIC network types",
},
"enabled": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
Description: "network enable flag",
},
"net_mask": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "Subnet mask, used only for DPDK and VFNIC network types",
},
},
Description: "Optional network connection(s) for this compute. You may specify several network blocks, one for each connection.",
},
Description: "Optional network connection(s) for this compute. You may specify several network blocks, one for each connection.",
},
"libvirt_settings": {
Type: schema.TypeSet,
@@ -3793,6 +3793,81 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
Optional: true,
Description: "Disk deletion status",
},
"iotune": {
Type: schema.TypeList,
Optional: true,
Computed: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"read_bytes_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"read_bytes_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"read_iops_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"read_iops_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"size_iops_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"total_bytes_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"total_bytes_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"total_iops_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"total_iops_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"write_bytes_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"write_bytes_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"write_iops_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"write_iops_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
},
},
},
"disk_id": {
Type: schema.TypeInt,
Computed: true,

View File

@@ -262,6 +262,7 @@ func utilityComputeUpdateDisks(ctx context.Context, d *schema.ResourceData, m in
changeStoragePolicyDisks := make([]interface{}, 0)
cacheUpdatedDisks := make([]interface{}, 0)
blkDiscardUpdatedDisks := make([]interface{}, 0)
iotuneUpdatedDisks := make([]interface{}, 0)
migratedDisks := make([]interface{}, 0)
presentNewDisks := make([]interface{}, 0)
presentOldDisks := make([]interface{}, 0)
@@ -320,6 +321,10 @@ func utilityComputeUpdateDisks(ctx context.Context, d *schema.ResourceData, m in
if isChangeBLKDiscardDisk(oldConv, el) {
blkDiscardUpdatedDisks = append(blkDiscardUpdatedDisks, el)
}
if isChangeIOTuneDisk(oldConv, el) {
iotuneUpdatedDisks = append(iotuneUpdatedDisks, el)
}
}
if len(deletedDisks) > 0 {
@@ -393,9 +398,30 @@ func utilityComputeUpdateDisks(ctx context.Context, d *schema.ResourceData, m in
}
}
}
if err != nil {
return err
if iotuneRaw, ok := diskConv["iotune"].([]interface{}); ok && len(iotuneRaw) > 0 {
iotuneMap := iotuneRaw[0].(map[string]interface{})
limitReq := disks.LimitIORequest{
DiskID: diskID,
ReadBytesSec: uint64(iotuneMap["read_bytes_sec"].(int)),
ReadBytesSecMax: uint64(iotuneMap["read_bytes_sec_max"].(int)),
ReadIOPSSec: uint64(iotuneMap["read_iops_sec"].(int)),
ReadIOPSSecMax: uint64(iotuneMap["read_iops_sec_max"].(int)),
SizeIOPSSec: uint64(iotuneMap["size_iops_sec"].(int)),
TotalBytesSec: uint64(iotuneMap["total_bytes_sec"].(int)),
TotalBytesSecMax: uint64(iotuneMap["total_bytes_sec_max"].(int)),
TotalIOPSSec: uint64(iotuneMap["total_iops_sec"].(int)),
TotalIOPSSecMax: uint64(iotuneMap["total_iops_sec_max"].(int)),
WriteBytesSec: uint64(iotuneMap["write_bytes_sec"].(int)),
WriteBytesSecMax: uint64(iotuneMap["write_bytes_sec_max"].(int)),
WriteIOPSSec: uint64(iotuneMap["write_iops_sec"].(int)),
WriteIOPSSecMax: uint64(iotuneMap["write_iops_sec_max"].(int)),
}
_, err := c.CloudBroker().Disks().LimitIO(ctx, limitReq)
if err != nil {
return err
}
}
}
}
@@ -495,6 +521,44 @@ func utilityComputeUpdateDisks(ctx context.Context, d *schema.ResourceData, m in
}
}
if len(iotuneUpdatedDisks) > 0 {
for _, disk := range iotuneUpdatedDisks {
diskConv := disk.(map[string]interface{})
if diskConv["disk_type"].(string) == "B" {
continue
}
diskID := uint64(diskConv["disk_id"].(int))
if diskID == 0 {
continue
}
iotuneRaw, ok := diskConv["iotune"].([]interface{})
if !ok || len(iotuneRaw) == 0 {
continue
}
iotuneMap := iotuneRaw[0].(map[string]interface{})
req := disks.LimitIORequest{
DiskID: diskID,
ReadBytesSec: uint64(iotuneMap["read_bytes_sec"].(int)),
ReadBytesSecMax: uint64(iotuneMap["read_bytes_sec_max"].(int)),
ReadIOPSSec: uint64(iotuneMap["read_iops_sec"].(int)),
ReadIOPSSecMax: uint64(iotuneMap["read_iops_sec_max"].(int)),
SizeIOPSSec: uint64(iotuneMap["size_iops_sec"].(int)),
TotalBytesSec: uint64(iotuneMap["total_bytes_sec"].(int)),
TotalBytesSecMax: uint64(iotuneMap["total_bytes_sec_max"].(int)),
TotalIOPSSec: uint64(iotuneMap["total_iops_sec"].(int)),
TotalIOPSSecMax: uint64(iotuneMap["total_iops_sec_max"].(int)),
WriteBytesSec: uint64(iotuneMap["write_bytes_sec"].(int)),
WriteBytesSecMax: uint64(iotuneMap["write_bytes_sec_max"].(int)),
WriteIOPSSec: uint64(iotuneMap["write_iops_sec"].(int)),
WriteIOPSSecMax: uint64(iotuneMap["write_iops_sec_max"].(int)),
}
_, err := c.CloudBroker().Disks().LimitIO(ctx, req)
if err != nil {
return err
}
}
}
if len(migratedDisks) > 0 {
if err := utilityComputeMigrateDisks(ctx, d, m, migratedDisks, oldConv); err != nil {
return err
@@ -2077,6 +2141,100 @@ func isChangeBLKDiscardDisk(els []interface{}, el interface{}) bool {
return false
}
func isChangeIOTuneDisk(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})
elConv := el.(map[string]interface{})
if elOldConv["disk_id"].(int) != elConv["disk_id"].(int) {
continue
}
oldIOTune := elOldConv["iotune"].([]interface{})
newIOTune := elConv["iotune"].([]interface{})
if len(oldIOTune) == 0 && len(newIOTune) == 0 {
return false
}
if len(oldIOTune) == 0 || len(newIOTune) == 0 {
return true
}
oldMap := oldIOTune[0].(map[string]interface{})
newMap := newIOTune[0].(map[string]interface{})
return oldMap["read_bytes_sec"].(int) != newMap["read_bytes_sec"].(int) ||
oldMap["read_bytes_sec_max"].(int) != newMap["read_bytes_sec_max"].(int) ||
oldMap["read_iops_sec"].(int) != newMap["read_iops_sec"].(int) ||
oldMap["read_iops_sec_max"].(int) != newMap["read_iops_sec_max"].(int) ||
oldMap["size_iops_sec"].(int) != newMap["size_iops_sec"].(int) ||
oldMap["total_bytes_sec"].(int) != newMap["total_bytes_sec"].(int) ||
oldMap["total_bytes_sec_max"].(int) != newMap["total_bytes_sec_max"].(int) ||
oldMap["total_iops_sec"].(int) != newMap["total_iops_sec"].(int) ||
oldMap["total_iops_sec_max"].(int) != newMap["total_iops_sec_max"].(int) ||
oldMap["write_bytes_sec"].(int) != newMap["write_bytes_sec"].(int) ||
oldMap["write_bytes_sec_max"].(int) != newMap["write_bytes_sec_max"].(int) ||
oldMap["write_iops_sec"].(int) != newMap["write_iops_sec"].(int) ||
oldMap["write_iops_sec_max"].(int) != newMap["write_iops_sec_max"].(int)
}
return false
}
func utilityComputeCreateIOTune(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
diskList := d.Get("disks").([]interface{})
iotuneArr := make([]interface{}, 0, len(diskList))
hasAny := false
for _, elem := range diskList {
diskVal := elem.(map[string]interface{})
iotune := diskVal["iotune"].([]interface{})
iotuneArr = append(iotuneArr, iotune)
if len(iotune) > 0 {
hasAny = true
}
}
if !hasAny {
return nil
}
computeRec, err := utilityComputeCheckPresence(ctx, d, m)
if err != nil {
return err
}
bootDisk := findBootDisk(computeRec.Disks)
computeDisksIDs := getComputeDiskIDs(computeRec.Disks, diskList, d.Get("extra_disks").(*schema.Set).List(), bootDisk.ID)
for i, diskID := range computeDisksIDs {
if i >= len(iotuneArr) {
continue
}
iotune, ok := iotuneArr[i].([]interface{})
if !ok || len(iotune) == 0 {
continue
}
iotuneMap := iotune[0].(map[string]interface{})
req := disks.LimitIORequest{
DiskID: diskID.(uint64),
ReadBytesSec: uint64(iotuneMap["read_bytes_sec"].(int)),
ReadBytesSecMax: uint64(iotuneMap["read_bytes_sec_max"].(int)),
ReadIOPSSec: uint64(iotuneMap["read_iops_sec"].(int)),
ReadIOPSSecMax: uint64(iotuneMap["read_iops_sec_max"].(int)),
SizeIOPSSec: uint64(iotuneMap["size_iops_sec"].(int)),
TotalBytesSec: uint64(iotuneMap["total_bytes_sec"].(int)),
TotalBytesSecMax: uint64(iotuneMap["total_bytes_sec_max"].(int)),
TotalIOPSSec: uint64(iotuneMap["total_iops_sec"].(int)),
TotalIOPSSecMax: uint64(iotuneMap["total_iops_sec_max"].(int)),
WriteBytesSec: uint64(iotuneMap["write_bytes_sec"].(int)),
WriteBytesSecMax: uint64(iotuneMap["write_bytes_sec_max"].(int)),
WriteIOPSSec: uint64(iotuneMap["write_iops_sec"].(int)),
WriteIOPSSecMax: uint64(iotuneMap["write_iops_sec_max"].(int)),
}
_, err := c.CloudBroker().Disks().LimitIO(ctx, req)
if err != nil {
return err
}
}
return nil
}
func isChangeStoragePolicy(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})