This commit is contained in:
2026-04-23 12:29:28 +03:00
parent 754b4e1143
commit 6b82f23445
15 changed files with 637 additions and 9 deletions

View File

@@ -289,6 +289,7 @@ func flattenComputeDisks(disksList compute.ListDisks, disksBlocks, extraDisks []
"delete_by": disk.DeletedBy,
"delete_time": disk.DeletedTime,
"update_time": disk.UpdatedTime,
"iotune": flattenIOTune(disk.IOTune),
}
res = append(res, temp)
indexDataDisks++

View File

@@ -656,6 +656,9 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
if err != nil {
warnings.Add(err)
}
if err := utilityComputeCreateIOTune(ctx, d, m); err != nil {
warnings.Add(err)
}
}
}

View File

@@ -3764,6 +3764,81 @@ func resourceComputeSchemaMake() map[string]*schema.Schema {
Optional: true,
Description: "Disk deletion status",
},
"iotune": {
Type: schema.TypeList,
Optional: true,
Computed: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"read_bytes_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"read_bytes_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"read_iops_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"read_iops_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"size_iops_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"total_bytes_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"total_bytes_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"total_iops_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"total_iops_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"write_bytes_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"write_bytes_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"write_iops_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"write_iops_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
},
},
},
"disk_id": {
Type: schema.TypeInt,
Computed: true,

View File

@@ -235,6 +235,7 @@ func utilityComputeUpdateDisks(ctx context.Context, d *schema.ResourceData, m in
resizedDisks := make([]interface{}, 0)
renamedDisks := make([]interface{}, 0)
changeStoragePolicyDisks := make([]interface{}, 0)
iotuneUpdatedDisks := make([]interface{}, 0)
presentNewDisks := make([]interface{}, 0)
presentOldDisks := make([]interface{}, 0)
@@ -283,6 +284,9 @@ func utilityComputeUpdateDisks(ctx context.Context, d *schema.ResourceData, m in
if isChangeStoragePolicy(oldConv, el) {
changeStoragePolicyDisks = append(changeStoragePolicyDisks, el)
}
if isChangeIOTuneDisk(oldConv, el) {
iotuneUpdatedDisks = append(iotuneUpdatedDisks, el)
}
}
if len(deletedDisks) > 0 {
@@ -353,9 +357,33 @@ func utilityComputeUpdateDisks(ctx context.Context, d *schema.ResourceData, m in
}
}
}
if err != nil {
return err
if iotuneRaw, ok := diskConv["iotune"].([]interface{}); ok && len(iotuneRaw) > 0 {
if diskConv["disk_type"].(string) == "B" {
continue
}
iotuneMap := iotuneRaw[0].(map[string]interface{})
limitReq := disks.LimitIORequest{
DiskID: diskID,
ReadBytesSec: uint64(iotuneMap["read_bytes_sec"].(int)),
ReadBytesSecMax: uint64(iotuneMap["read_bytes_sec_max"].(int)),
ReadIOPSSec: uint64(iotuneMap["read_iops_sec"].(int)),
ReadIOPSSecMax: uint64(iotuneMap["read_iops_sec_max"].(int)),
SizeIOPSSec: uint64(iotuneMap["size_iops_sec"].(int)),
TotalBytesSec: uint64(iotuneMap["total_bytes_sec"].(int)),
TotalBytesSecMax: uint64(iotuneMap["total_bytes_sec_max"].(int)),
TotalIOPSSec: uint64(iotuneMap["total_iops_sec"].(int)),
TotalIOPSSecMax: uint64(iotuneMap["total_iops_sec_max"].(int)),
WriteBytesSec: uint64(iotuneMap["write_bytes_sec"].(int)),
WriteBytesSecMax: uint64(iotuneMap["write_bytes_sec_max"].(int)),
WriteIOPSSec: uint64(iotuneMap["write_iops_sec"].(int)),
WriteIOPSSecMax: uint64(iotuneMap["write_iops_sec_max"].(int)),
}
_, err := c.CloudBroker().Disks().LimitIO(ctx, limitReq)
if err != nil {
return err
}
}
}
}
@@ -409,6 +437,44 @@ func utilityComputeUpdateDisks(ctx context.Context, d *schema.ResourceData, m in
}
}
if len(iotuneUpdatedDisks) > 0 {
for _, disk := range iotuneUpdatedDisks {
diskConv := disk.(map[string]interface{})
if diskConv["disk_type"].(string) == "B" {
continue
}
diskID := uint64(diskConv["disk_id"].(int))
if diskID == 0 {
continue
}
iotuneRaw, ok := diskConv["iotune"].([]interface{})
if !ok || len(iotuneRaw) == 0 {
continue
}
iotuneMap := iotuneRaw[0].(map[string]interface{})
req := disks.LimitIORequest{
DiskID: diskID,
ReadBytesSec: uint64(iotuneMap["read_bytes_sec"].(int)),
ReadBytesSecMax: uint64(iotuneMap["read_bytes_sec_max"].(int)),
ReadIOPSSec: uint64(iotuneMap["read_iops_sec"].(int)),
ReadIOPSSecMax: uint64(iotuneMap["read_iops_sec_max"].(int)),
SizeIOPSSec: uint64(iotuneMap["size_iops_sec"].(int)),
TotalBytesSec: uint64(iotuneMap["total_bytes_sec"].(int)),
TotalBytesSecMax: uint64(iotuneMap["total_bytes_sec_max"].(int)),
TotalIOPSSec: uint64(iotuneMap["total_iops_sec"].(int)),
TotalIOPSSecMax: uint64(iotuneMap["total_iops_sec_max"].(int)),
WriteBytesSec: uint64(iotuneMap["write_bytes_sec"].(int)),
WriteBytesSecMax: uint64(iotuneMap["write_bytes_sec_max"].(int)),
WriteIOPSSec: uint64(iotuneMap["write_iops_sec"].(int)),
WriteIOPSSecMax: uint64(iotuneMap["write_iops_sec_max"].(int)),
}
_, err := c.CloudBroker().Disks().LimitIO(ctx, req)
if err != nil {
return err
}
}
}
for i := range presentNewDisks {
newDisk := presentNewDisks[i].(map[string]interface{})
oldDisk := presentOldDisks[i].(map[string]interface{})
@@ -1928,6 +1994,100 @@ func isContainsDisk(els []interface{}, el interface{}) bool {
return false
}
func isChangeIOTuneDisk(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})
elConv := el.(map[string]interface{})
if elOldConv["disk_id"].(int) != elConv["disk_id"].(int) {
continue
}
oldIOTune := elOldConv["iotune"].([]interface{})
newIOTune := elConv["iotune"].([]interface{})
if len(oldIOTune) == 0 && len(newIOTune) == 0 {
return false
}
if len(oldIOTune) == 0 || len(newIOTune) == 0 {
return true
}
oldMap := oldIOTune[0].(map[string]interface{})
newMap := newIOTune[0].(map[string]interface{})
return oldMap["read_bytes_sec"].(int) != newMap["read_bytes_sec"].(int) ||
oldMap["read_bytes_sec_max"].(int) != newMap["read_bytes_sec_max"].(int) ||
oldMap["read_iops_sec"].(int) != newMap["read_iops_sec"].(int) ||
oldMap["read_iops_sec_max"].(int) != newMap["read_iops_sec_max"].(int) ||
oldMap["size_iops_sec"].(int) != newMap["size_iops_sec"].(int) ||
oldMap["total_bytes_sec"].(int) != newMap["total_bytes_sec"].(int) ||
oldMap["total_bytes_sec_max"].(int) != newMap["total_bytes_sec_max"].(int) ||
oldMap["total_iops_sec"].(int) != newMap["total_iops_sec"].(int) ||
oldMap["total_iops_sec_max"].(int) != newMap["total_iops_sec_max"].(int) ||
oldMap["write_bytes_sec"].(int) != newMap["write_bytes_sec"].(int) ||
oldMap["write_bytes_sec_max"].(int) != newMap["write_bytes_sec_max"].(int) ||
oldMap["write_iops_sec"].(int) != newMap["write_iops_sec"].(int) ||
oldMap["write_iops_sec_max"].(int) != newMap["write_iops_sec_max"].(int)
}
return false
}
func utilityComputeCreateIOTune(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
diskList := d.Get("disks").([]interface{})
iotuneArr := make([]interface{}, 0, len(diskList))
hasAny := false
for _, elem := range diskList {
diskVal := elem.(map[string]interface{})
iotune := diskVal["iotune"].([]interface{})
iotuneArr = append(iotuneArr, iotune)
if len(iotune) > 0 {
hasAny = true
}
}
if !hasAny {
return nil
}
computeRec, err := utilityComputeCheckPresence(ctx, d, m)
if err != nil {
return err
}
bootDisk := findBootDisk(computeRec.Disks)
computeDisksIDs := getComputeDiskIDs(computeRec.Disks, diskList, d.Get("extra_disks").(*schema.Set).List(), bootDisk.ID)
for i, diskID := range computeDisksIDs {
if i >= len(iotuneArr) {
continue
}
iotune, ok := iotuneArr[i].([]interface{})
if !ok || len(iotune) == 0 {
continue
}
iotuneMap := iotune[0].(map[string]interface{})
req := disks.LimitIORequest{
DiskID: diskID.(uint64),
ReadBytesSec: uint64(iotuneMap["read_bytes_sec"].(int)),
ReadBytesSecMax: uint64(iotuneMap["read_bytes_sec_max"].(int)),
ReadIOPSSec: uint64(iotuneMap["read_iops_sec"].(int)),
ReadIOPSSecMax: uint64(iotuneMap["read_iops_sec_max"].(int)),
SizeIOPSSec: uint64(iotuneMap["size_iops_sec"].(int)),
TotalBytesSec: uint64(iotuneMap["total_bytes_sec"].(int)),
TotalBytesSecMax: uint64(iotuneMap["total_bytes_sec_max"].(int)),
TotalIOPSSec: uint64(iotuneMap["total_iops_sec"].(int)),
TotalIOPSSecMax: uint64(iotuneMap["total_iops_sec_max"].(int)),
WriteBytesSec: uint64(iotuneMap["write_bytes_sec"].(int)),
WriteBytesSecMax: uint64(iotuneMap["write_bytes_sec_max"].(int)),
WriteIOPSSec: uint64(iotuneMap["write_iops_sec"].(int)),
WriteIOPSSecMax: uint64(iotuneMap["write_iops_sec_max"].(int)),
}
_, err := c.CloudBroker().Disks().LimitIO(ctx, req)
if err != nil {
return err
}
}
return nil
}
// isChangeNodesDisk get slice of new disks values and current value disk,
// if need change nodes on disk returns true and new disk value, else return false and nil
func isChangeNodesDisk(els []interface{}, elOld interface{}) (bool, interface{}) {