Add files
This commit is contained in:
41
internal/service/cloudbroker/disks/api.go
Normal file
41
internal/service/cloudbroker/disks/api.go
Normal file
@@ -0,0 +1,41 @@
|
||||
/*
|
||||
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||
Orchestration Technology) with Terraform by Hashicorp.
|
||||
|
||||
Source code: https://github.com/rudecs/terraform-provider-decort
|
||||
|
||||
Please see README.md to learn where to place source code so that it
|
||||
builds seamlessly.
|
||||
|
||||
Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
|
||||
*/
|
||||
|
||||
package disks
|
||||
|
||||
const disksCreateAPI = "/restmachine/cloudbroker/disks/create"
|
||||
const disksGetAPI = "/restmachine/cloudbroker/disks/get"
|
||||
const disksListAPI = "/restmachine/cloudbroker/disks/list"
|
||||
const disksResizeAPI = "/restmachine/cloudbroker/disks/resize2"
|
||||
const disksRenameAPI = "/restmachine/cloudbroker/disks/rename"
|
||||
const disksDeleteAPI = "/restmachine/cloudbroker/disks/delete"
|
||||
const disksIOLimitAPI = "/restmachine/cloudbroker/disks/limitIO"
|
||||
const disksRestoreAPI = "/restmachine/cloudbroker/disks/restore"
|
||||
389
internal/service/cloudbroker/disks/data_source_disk.go
Normal file
389
internal/service/cloudbroker/disks/data_source_disk.go
Normal file
@@ -0,0 +1,389 @@
|
||||
/*
|
||||
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||
Orchestration Technology) with Terraform by Hashicorp.
|
||||
|
||||
Source code: https://github.com/rudecs/terraform-provider-decort
|
||||
|
||||
Please see README.md to learn where to place source code so that it
|
||||
builds seamlessly.
|
||||
|
||||
Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
|
||||
*/
|
||||
|
||||
package disks
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
// "net/url"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/constants"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
)
|
||||
|
||||
func dataSourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
disk, err := utilityDiskCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
id := uuid.New()
|
||||
d.SetId(id.String())
|
||||
|
||||
diskAcl, _ := json.Marshal(disk.Acl)
|
||||
|
||||
d.Set("account_id", disk.AccountID)
|
||||
d.Set("account_name", disk.AccountName)
|
||||
d.Set("acl", string(diskAcl))
|
||||
d.Set("boot_partition", disk.BootPartition)
|
||||
d.Set("compute_id", disk.ComputeID)
|
||||
d.Set("compute_name", disk.ComputeName)
|
||||
d.Set("created_time", disk.CreatedTime)
|
||||
d.Set("deleted_time", disk.DeletedTime)
|
||||
d.Set("desc", disk.Desc)
|
||||
d.Set("destruction_time", disk.DestructionTime)
|
||||
d.Set("devicename", disk.DeviceName)
|
||||
d.Set("disk_path", disk.DiskPath)
|
||||
d.Set("gid", disk.GridID)
|
||||
d.Set("guid", disk.GUID)
|
||||
d.Set("disk_id", disk.ID)
|
||||
d.Set("image_id", disk.ImageID)
|
||||
d.Set("images", disk.Images)
|
||||
d.Set("iotune", flattenIOTune(disk.IOTune))
|
||||
d.Set("iqn", disk.IQN)
|
||||
d.Set("login", disk.Login)
|
||||
d.Set("milestones", disk.Milestones)
|
||||
d.Set("disk_name", disk.Name)
|
||||
d.Set("order", disk.Order)
|
||||
d.Set("params", disk.Params)
|
||||
d.Set("parent_id", disk.ParentId)
|
||||
d.Set("passwd", disk.Passwd)
|
||||
d.Set("pci_slot", disk.PciSlot)
|
||||
d.Set("pool", disk.Pool)
|
||||
d.Set("purge_attempts", disk.PurgeAttempts)
|
||||
d.Set("purge_time", disk.PurgeTime)
|
||||
d.Set("reality_device_number", disk.RealityDeviceNumber)
|
||||
d.Set("reference_id", disk.ReferenceId)
|
||||
d.Set("res_id", disk.ResID)
|
||||
d.Set("res_name", disk.ResName)
|
||||
d.Set("role", disk.Role)
|
||||
d.Set("sep_id", disk.SepID)
|
||||
d.Set("sep_type", disk.SepType)
|
||||
d.Set("size_max", disk.SizeMax)
|
||||
d.Set("size_used", disk.SizeUsed)
|
||||
d.Set("snapshots", flattendDiskSnapshotList(disk.Snapshots))
|
||||
d.Set("status", disk.Status)
|
||||
d.Set("tech_status", disk.TechStatus)
|
||||
d.Set("type", disk.Type)
|
||||
d.Set("vmid", disk.VMID)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func dataSourceDiskSchemaMake() map[string]*schema.Schema {
|
||||
rets := map[string]*schema.Schema{
|
||||
"disk_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"account_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"account_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"acl": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"boot_partition": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"compute_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"compute_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"created_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"desc": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"destruction_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"devicename": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_path": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"gid": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"guid": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"image_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"images": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
"iotune": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"read_bytes_sec": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"read_bytes_sec_max": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"read_iops_sec": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"read_iops_sec_max": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"size_iops_sec": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"total_bytes_sec": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"total_bytes_sec_max": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"total_iops_sec": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"total_iops_sec_max": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"write_bytes_sec": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"write_bytes_sec_max": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"write_iops_sec": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"write_iops_sec_max": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"iqn": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"login": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"milestones": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"order": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"params": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"parent_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"passwd": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"pci_slot": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"pool": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"purge_attempts": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"purge_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"reality_device_number": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"reference_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"res_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"res_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"role": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"sep_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"sep_type": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"size_max": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"size_used": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"snapshots": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"guid": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"label": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"res_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"snap_set_guid": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"snap_set_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"timestamp": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"tech_status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"type": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"vmid": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
|
||||
return rets
|
||||
}
|
||||
|
||||
func DataSourceDisk() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
SchemaVersion: 1,
|
||||
|
||||
ReadContext: dataSourceDiskRead,
|
||||
|
||||
Timeouts: &schema.ResourceTimeout{
|
||||
Read: &constants.Timeout30s,
|
||||
Default: &constants.Timeout60s,
|
||||
},
|
||||
|
||||
Schema: dataSourceDiskSchemaMake(),
|
||||
}
|
||||
}
|
||||
473
internal/service/cloudbroker/disks/data_source_disk_list.go
Normal file
473
internal/service/cloudbroker/disks/data_source_disk_list.go
Normal file
@@ -0,0 +1,473 @@
|
||||
/*
|
||||
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||
Orchestration Technology) with Terraform by Hashicorp.
|
||||
|
||||
Source code: https://github.com/rudecs/terraform-provider-decort
|
||||
|
||||
Please see README.md to learn where to place source code so that it
|
||||
builds seamlessly.
|
||||
|
||||
Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
|
||||
*/
|
||||
|
||||
package disks
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/constants"
|
||||
)
|
||||
|
||||
func flattenIOTune(iot IOTune) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
temp := map[string]interface{}{
|
||||
"read_bytes_sec": iot.ReadBytesSec,
|
||||
"read_bytes_sec_max": iot.ReadBytesSecMax,
|
||||
"read_iops_sec": iot.ReadIopsSec,
|
||||
"read_iops_sec_max": iot.ReadIopsSecMax,
|
||||
"size_iops_sec": iot.SizeIopsSec,
|
||||
"total_bytes_sec": iot.TotalBytesSec,
|
||||
"total_bytes_sec_max": iot.TotalBytesSecMax,
|
||||
"total_iops_sec": iot.TotalIopsSec,
|
||||
"total_iops_sec_max": iot.TotalIopsSecMax,
|
||||
"write_bytes_sec": iot.WriteBytesSec,
|
||||
"write_bytes_sec_max": iot.WriteBytesSecMax,
|
||||
"write_iops_sec": iot.WriteIopsSec,
|
||||
"write_iops_sec_max": iot.WriteIopsSecMax,
|
||||
}
|
||||
|
||||
res = append(res, temp)
|
||||
return res
|
||||
}
|
||||
|
||||
func flattenDiskList(dl DisksList) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, 0)
|
||||
for _, disk := range dl {
|
||||
diskAcl, _ := json.Marshal(disk.Acl)
|
||||
temp := map[string]interface{}{
|
||||
"account_id": disk.AccountID,
|
||||
"account_name": disk.AccountName,
|
||||
"acl": string(diskAcl),
|
||||
"boot_partition": disk.BootPartition,
|
||||
"compute_id": disk.ComputeID,
|
||||
"compute_name": disk.ComputeName,
|
||||
"created_time": disk.CreatedTime,
|
||||
"deleted_time": disk.DeletedTime,
|
||||
"desc": disk.Desc,
|
||||
"destruction_time": disk.DestructionTime,
|
||||
"devicename": disk.DeviceName,
|
||||
"disk_path": disk.DiskPath,
|
||||
"gid": disk.GridID,
|
||||
"guid": disk.GUID,
|
||||
"disk_id": disk.ID,
|
||||
"image_id": disk.ImageID,
|
||||
"images": disk.Images,
|
||||
"iotune": flattenIOTune(disk.IOTune),
|
||||
"iqn": disk.IQN,
|
||||
"login": disk.Login,
|
||||
"machine_id": disk.MachineId,
|
||||
"machine_name": disk.MachineName,
|
||||
"milestones": disk.Milestones,
|
||||
"disk_name": disk.Name,
|
||||
"order": disk.Order,
|
||||
"params": disk.Params,
|
||||
"parent_id": disk.ParentId,
|
||||
"passwd": disk.Passwd,
|
||||
"pci_slot": disk.PciSlot,
|
||||
"pool": disk.Pool,
|
||||
"purge_attempts": disk.PurgeAttempts,
|
||||
"purge_time": disk.PurgeTime,
|
||||
"reality_device_number": disk.RealityDeviceNumber,
|
||||
"reference_id": disk.ReferenceId,
|
||||
"res_id": disk.ResID,
|
||||
"res_name": disk.ResName,
|
||||
"role": disk.Role,
|
||||
"sep_id": disk.SepID,
|
||||
"sep_type": disk.SepType,
|
||||
"size_max": disk.SizeMax,
|
||||
"size_used": disk.SizeUsed,
|
||||
"snapshots": flattendDiskSnapshotList(disk.Snapshots),
|
||||
"status": disk.Status,
|
||||
"tech_status": disk.TechStatus,
|
||||
"type": disk.Type,
|
||||
"vmid": disk.VMID,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
return res
|
||||
|
||||
}
|
||||
|
||||
func flattendDiskSnapshotList(sl SnapshotList) []interface{} {
|
||||
res := make([]interface{}, 0)
|
||||
for _, snapshot := range sl {
|
||||
temp := map[string]interface{}{
|
||||
"guid": snapshot.Guid,
|
||||
"label": snapshot.Label,
|
||||
"res_id": snapshot.ResId,
|
||||
"snap_set_guid": snapshot.SnapSetGuid,
|
||||
"snap_set_time": snapshot.SnapSetTime,
|
||||
"timestamp": snapshot.TimeStamp,
|
||||
}
|
||||
res = append(res, temp)
|
||||
}
|
||||
|
||||
return res
|
||||
|
||||
}
|
||||
|
||||
func dataSourceDiskListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
diskList, err := utilityDiskListCheckPresence(ctx, d, m)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
id := uuid.New()
|
||||
d.SetId(id.String())
|
||||
d.Set("items", flattenDiskList(diskList))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
|
||||
res := map[string]*schema.Schema{
|
||||
"account_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "ID of the account the disks belong to",
|
||||
},
|
||||
"type": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "type of the disks",
|
||||
},
|
||||
"page": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "Page number",
|
||||
},
|
||||
"size": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Description: "Page size",
|
||||
},
|
||||
"items": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"account_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"account_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"acl": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"boot_partition": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"compute_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"compute_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"created_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"desc": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"destruction_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"devicename": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_path": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"gid": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"guid": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"image_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"images": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
"iotune": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"read_bytes_sec": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"read_bytes_sec_max": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"read_iops_sec": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"read_iops_sec_max": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"size_iops_sec": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"total_bytes_sec": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"total_bytes_sec_max": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"total_iops_sec": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"total_iops_sec_max": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"write_bytes_sec": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"write_bytes_sec_max": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"write_iops_sec": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"write_iops_sec_max": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"iqn": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"login": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"machine_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"machine_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"milestones": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"order": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"params": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"parent_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"passwd": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"pci_slot": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"pool": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"purge_attempts": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"purge_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"reality_device_number": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"reference_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"res_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"res_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"role": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"sep_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"sep_type": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"size_max": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"size_used": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"snapshots": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"guid": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"label": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"res_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"snap_set_guid": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"snap_set_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"timestamp": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"tech_status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"type": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"vmid": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func DataSourceDiskList() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
SchemaVersion: 1,
|
||||
|
||||
ReadContext: dataSourceDiskListRead,
|
||||
|
||||
Timeouts: &schema.ResourceTimeout{
|
||||
Read: &constants.Timeout30s,
|
||||
Default: &constants.Timeout60s,
|
||||
},
|
||||
|
||||
Schema: dataSourceDiskListSchemaMake(),
|
||||
}
|
||||
}
|
||||
111
internal/service/cloudbroker/disks/models.go
Normal file
111
internal/service/cloudbroker/disks/models.go
Normal file
@@ -0,0 +1,111 @@
|
||||
/*
|
||||
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||
Orchestration Technology) with Terraform by Hashicorp.
|
||||
|
||||
Source code: https://github.com/rudecs/terraform-provider-decort
|
||||
|
||||
Please see README.md to learn where to place source code so that it
|
||||
builds seamlessly.
|
||||
|
||||
Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
|
||||
*/
|
||||
|
||||
package disks
|
||||
|
||||
type Disk struct {
|
||||
Acl map[string]interface{} `json:"acl"`
|
||||
AccountID int `json:"accountId"`
|
||||
AccountName string `json:"accountName"`
|
||||
BootPartition int `json:"bootPartition"`
|
||||
CreatedTime uint64 `json:"creationTime"`
|
||||
ComputeID int `json:"computeId"`
|
||||
ComputeName string `json:"computeName"`
|
||||
DeletedTime uint64 `json:"deletionTime"`
|
||||
DeviceName string `json:"devicename"`
|
||||
Desc string `json:"desc"`
|
||||
DestructionTime uint64 `json:"destructionTime"`
|
||||
DiskPath string `json:"diskPath"`
|
||||
GridID int `json:"gid"`
|
||||
GUID int `json:"guid"`
|
||||
ID uint `json:"id"`
|
||||
ImageID int `json:"imageId"`
|
||||
Images []int `json:"images"`
|
||||
IOTune IOTune `json:"iotune"`
|
||||
IQN string `json:"iqn"`
|
||||
Login string `json:"login"`
|
||||
Name string `json:"name"`
|
||||
MachineId int `json:"machineId"`
|
||||
MachineName string `json:"machineName"`
|
||||
Milestones uint64 `json:"milestones"`
|
||||
Order int `json:"order"`
|
||||
Params string `json:"params"`
|
||||
Passwd string `json:"passwd"`
|
||||
ParentId int `json:"parentId"`
|
||||
PciSlot int `json:"pciSlot"`
|
||||
Pool string `json:"pool"`
|
||||
PurgeTime uint64 `json:"purgeTime"`
|
||||
PurgeAttempts uint64 `json:"purgeAttempts"`
|
||||
RealityDeviceNumber int `json:"realityDeviceNumber"`
|
||||
ReferenceId string `json:"referenceId"`
|
||||
ResID string `json:"resId"`
|
||||
ResName string `json:"resName"`
|
||||
Role string `json:"role"`
|
||||
SepType string `json:"sepType"`
|
||||
SepID int `json:"sepId"` // NOTE: absent from compute/get output
|
||||
SizeMax int `json:"sizeMax"`
|
||||
SizeUsed int `json:"sizeUsed"` // sum over all snapshots of this disk to report total consumed space
|
||||
Snapshots []Snapshot `json:"snapshots"`
|
||||
Status string `json:"status"`
|
||||
TechStatus string `json:"techStatus"`
|
||||
Type string `json:"type"`
|
||||
UpdateBy uint64 `json:"updateBy"`
|
||||
VMID int `json:"vmid"`
|
||||
}
|
||||
|
||||
type Snapshot struct {
|
||||
Guid string `json:"guid"`
|
||||
Label string `json:"label"`
|
||||
ResId string `json:"resId"`
|
||||
SnapSetGuid string `json:"snapSetGuid"`
|
||||
SnapSetTime uint64 `json:"snapSetTime"`
|
||||
TimeStamp uint64 `json:"timestamp"`
|
||||
}
|
||||
|
||||
type SnapshotList []Snapshot
|
||||
|
||||
type DisksList []Disk
|
||||
|
||||
type IOTune struct {
|
||||
ReadBytesSec int `json:"read_bytes_sec"`
|
||||
ReadBytesSecMax int `json:"read_bytes_sec_max"`
|
||||
ReadIopsSec int `json:"read_iops_sec"`
|
||||
ReadIopsSecMax int `json:"read_iops_sec_max"`
|
||||
SizeIopsSec int `json:"size_iops_sec"`
|
||||
TotalBytesSec int `json:"total_bytes_sec"`
|
||||
TotalBytesSecMax int `json:"total_bytes_sec_max"`
|
||||
TotalIopsSec int `json:"total_iops_sec"`
|
||||
TotalIopsSecMax int `json:"total_iops_sec_max"`
|
||||
WriteBytesSec int `json:"write_bytes_sec"`
|
||||
WriteBytesSecMax int `json:"write_bytes_sec_max"`
|
||||
WriteIopsSec int `json:"write_iops_sec"`
|
||||
WriteIopsSecMax int `json:"write_iops_sec_max"`
|
||||
}
|
||||
638
internal/service/cloudbroker/disks/resource_disk.go
Normal file
638
internal/service/cloudbroker/disks/resource_disk.go
Normal file
@@ -0,0 +1,638 @@
|
||||
/*
|
||||
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||
Orchestration Technology) with Terraform by Hashicorp.
|
||||
|
||||
Source code: https://github.com/rudecs/terraform-provider-decort
|
||||
|
||||
Please see README.md to learn where to place source code so that it
|
||||
builds seamlessly.
|
||||
|
||||
Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
|
||||
*/
|
||||
|
||||
package disks
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/rudecs/terraform-provider-decort/internal/constants"
|
||||
"github.com/rudecs/terraform-provider-decort/internal/controller"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
|
||||
)
|
||||
|
||||
func resourceDiskCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
urlValues := &url.Values{}
|
||||
|
||||
urlValues.Add("accountId", fmt.Sprintf("%d", d.Get("account_id").(int)))
|
||||
urlValues.Add("gid", fmt.Sprintf("%d", d.Get("gid").(int)))
|
||||
urlValues.Add("name", d.Get("disk_name").(string))
|
||||
urlValues.Add("size", fmt.Sprintf("%d", d.Get("size_max").(int)))
|
||||
if typeRaw, ok := d.GetOk("type"); ok {
|
||||
urlValues.Add("type", strings.ToUpper(typeRaw.(string)))
|
||||
} else {
|
||||
urlValues.Add("type", "D")
|
||||
}
|
||||
|
||||
if sepId, ok := d.GetOk("sep_id"); ok {
|
||||
urlValues.Add("sep_id", strconv.Itoa(sepId.(int)))
|
||||
}
|
||||
|
||||
if poolName, ok := d.GetOk("pool"); ok {
|
||||
urlValues.Add("pool", poolName.(string))
|
||||
}
|
||||
|
||||
argVal, argSet := d.GetOk("desc")
|
||||
if argSet {
|
||||
urlValues.Add("description", argVal.(string))
|
||||
}
|
||||
|
||||
diskId, err := c.DecortAPICall(ctx, "POST", disksCreateAPI, urlValues)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
urlValues = &url.Values{}
|
||||
|
||||
d.SetId(diskId) // update ID of the resource to tell Terraform that the disk resource exists
|
||||
|
||||
if iotuneRaw, ok := d.GetOk("iotune"); ok {
|
||||
iot := iotuneRaw.([]interface{})[0]
|
||||
iotune := iot.(map[string]interface{})
|
||||
urlValues.Add("diskId", diskId)
|
||||
urlValues.Add("iops", strconv.Itoa(iotune["total_iops_sec"].(int)))
|
||||
urlValues.Add("read_bytes_sec", strconv.Itoa(iotune["read_bytes_sec"].(int)))
|
||||
urlValues.Add("read_bytes_sec_max", strconv.Itoa(iotune["read_bytes_sec_max"].(int)))
|
||||
urlValues.Add("read_iops_sec", strconv.Itoa(iotune["read_iops_sec"].(int)))
|
||||
urlValues.Add("read_iops_sec_max", strconv.Itoa(iotune["read_iops_sec_max"].(int)))
|
||||
urlValues.Add("size_iops_sec", strconv.Itoa(iotune["size_iops_sec"].(int)))
|
||||
urlValues.Add("total_bytes_sec", strconv.Itoa(iotune["total_bytes_sec"].(int)))
|
||||
urlValues.Add("total_bytes_sec_max", strconv.Itoa(iotune["total_bytes_sec_max"].(int)))
|
||||
urlValues.Add("total_iops_sec_max", strconv.Itoa(iotune["total_iops_sec_max"].(int)))
|
||||
urlValues.Add("write_bytes_sec", strconv.Itoa(iotune["write_bytes_sec"].(int)))
|
||||
urlValues.Add("write_bytes_sec_max", strconv.Itoa(iotune["write_bytes_sec_max"].(int)))
|
||||
urlValues.Add("write_iops_sec", strconv.Itoa(iotune["write_iops_sec"].(int)))
|
||||
urlValues.Add("write_iops_sec_max", strconv.Itoa(iotune["write_iops_sec_max"].(int)))
|
||||
|
||||
_, err := c.DecortAPICall(ctx, "POST", disksIOLimitAPI, urlValues)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
urlValues = &url.Values{}
|
||||
}
|
||||
|
||||
dgn := resourceDiskRead(ctx, d, m)
|
||||
if dgn != nil {
|
||||
return dgn
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
disk, err := utilityDiskCheckPresence(ctx, d, m)
|
||||
if disk == nil {
|
||||
d.SetId("")
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
diskAcl, _ := json.Marshal(disk.Acl)
|
||||
|
||||
d.Set("account_id", disk.AccountID)
|
||||
d.Set("account_name", disk.AccountName)
|
||||
d.Set("acl", string(diskAcl))
|
||||
d.Set("boot_partition", disk.BootPartition)
|
||||
d.Set("compute_id", disk.ComputeID)
|
||||
d.Set("compute_name", disk.ComputeName)
|
||||
d.Set("created_time", disk.CreatedTime)
|
||||
d.Set("deleted_time", disk.DeletedTime)
|
||||
d.Set("desc", disk.Desc)
|
||||
d.Set("destruction_time", disk.DestructionTime)
|
||||
d.Set("devicename", disk.DeviceName)
|
||||
d.Set("disk_path", disk.DiskPath)
|
||||
d.Set("gid", disk.GridID)
|
||||
d.Set("guid", disk.GUID)
|
||||
d.Set("disk_id", disk.ID)
|
||||
d.Set("image_id", disk.ImageID)
|
||||
d.Set("images", disk.Images)
|
||||
d.Set("iotune", flattenIOTune(disk.IOTune))
|
||||
d.Set("iqn", disk.IQN)
|
||||
d.Set("login", disk.Login)
|
||||
d.Set("milestones", disk.Milestones)
|
||||
d.Set("disk_name", disk.Name)
|
||||
d.Set("order", disk.Order)
|
||||
d.Set("params", disk.Params)
|
||||
d.Set("parent_id", disk.ParentId)
|
||||
d.Set("passwd", disk.Passwd)
|
||||
d.Set("pci_slot", disk.PciSlot)
|
||||
d.Set("pool", disk.Pool)
|
||||
d.Set("purge_attempts", disk.PurgeAttempts)
|
||||
d.Set("purge_time", disk.PurgeTime)
|
||||
d.Set("reality_device_number", disk.RealityDeviceNumber)
|
||||
d.Set("reference_id", disk.ReferenceId)
|
||||
d.Set("res_id", disk.ResID)
|
||||
d.Set("res_name", disk.ResName)
|
||||
d.Set("role", disk.Role)
|
||||
d.Set("sep_id", disk.SepID)
|
||||
d.Set("sep_type", disk.SepType)
|
||||
d.Set("size_max", disk.SizeMax)
|
||||
d.Set("size_used", disk.SizeUsed)
|
||||
d.Set("snapshots", flattendDiskSnapshotList(disk.Snapshots))
|
||||
d.Set("status", disk.Status)
|
||||
d.Set("tech_status", disk.TechStatus)
|
||||
d.Set("type", disk.Type)
|
||||
d.Set("vmid", disk.VMID)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
|
||||
c := m.(*controller.ControllerCfg)
|
||||
urlValues := &url.Values{}
|
||||
|
||||
if d.HasChange("size_max") {
|
||||
oldSize, newSize := d.GetChange("size_max")
|
||||
if oldSize.(int) < newSize.(int) {
|
||||
log.Debugf("resourceDiskUpdate: resizing disk ID %s - %d GB -> %d GB",
|
||||
d.Id(), oldSize.(int), newSize.(int))
|
||||
urlValues.Add("diskId", d.Id())
|
||||
urlValues.Add("size", fmt.Sprintf("%d", newSize.(int)))
|
||||
_, err := c.DecortAPICall(ctx, "POST", disksResizeAPI, urlValues)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
d.Set("size_max", newSize)
|
||||
} else if oldSize.(int) > newSize.(int) {
|
||||
return diag.FromErr(fmt.Errorf("resourceDiskUpdate: Disk ID %s - reducing disk size is not allowed", d.Id()))
|
||||
}
|
||||
urlValues = &url.Values{}
|
||||
}
|
||||
|
||||
if d.HasChange("disk_name") {
|
||||
urlValues.Add("diskId", d.Id())
|
||||
urlValues.Add("name", d.Get("disk_name").(string))
|
||||
_, err := c.DecortAPICall(ctx, "POST", disksRenameAPI, urlValues)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
urlValues = &url.Values{}
|
||||
}
|
||||
|
||||
if d.HasChange("iotune") {
|
||||
iot := d.Get("iotune").([]interface{})[0]
|
||||
iotune := iot.(map[string]interface{})
|
||||
urlValues.Add("diskId", d.Id())
|
||||
urlValues.Add("iops", strconv.Itoa(iotune["total_iops_sec"].(int)))
|
||||
urlValues.Add("read_bytes_sec", strconv.Itoa(iotune["read_bytes_sec"].(int)))
|
||||
urlValues.Add("read_bytes_sec_max", strconv.Itoa(iotune["read_bytes_sec_max"].(int)))
|
||||
urlValues.Add("read_iops_sec", strconv.Itoa(iotune["read_iops_sec"].(int)))
|
||||
urlValues.Add("read_iops_sec_max", strconv.Itoa(iotune["read_iops_sec_max"].(int)))
|
||||
urlValues.Add("size_iops_sec", strconv.Itoa(iotune["size_iops_sec"].(int)))
|
||||
urlValues.Add("total_bytes_sec", strconv.Itoa(iotune["total_bytes_sec"].(int)))
|
||||
urlValues.Add("total_bytes_sec_max", strconv.Itoa(iotune["total_bytes_sec_max"].(int)))
|
||||
urlValues.Add("total_iops_sec_max", strconv.Itoa(iotune["total_iops_sec_max"].(int)))
|
||||
urlValues.Add("write_bytes_sec", strconv.Itoa(iotune["write_bytes_sec"].(int)))
|
||||
urlValues.Add("write_bytes_sec_max", strconv.Itoa(iotune["write_bytes_sec_max"].(int)))
|
||||
urlValues.Add("write_iops_sec", strconv.Itoa(iotune["write_iops_sec"].(int)))
|
||||
urlValues.Add("write_iops_sec_max", strconv.Itoa(iotune["write_iops_sec_max"].(int)))
|
||||
|
||||
_, err := c.DecortAPICall(ctx, "POST", disksIOLimitAPI, urlValues)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
urlValues = &url.Values{}
|
||||
}
|
||||
|
||||
if d.HasChange("restore") {
|
||||
if d.Get("restore").(bool) {
|
||||
urlValues.Add("diskId", d.Id())
|
||||
urlValues.Add("reason", d.Get("reason").(string))
|
||||
|
||||
_, err := c.DecortAPICall(ctx, "POST", disksRestoreAPI, urlValues)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
urlValues = &url.Values{}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return resourceDiskRead(ctx, d, m)
|
||||
}
|
||||
|
||||
func resourceDiskDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||
|
||||
disk, err := utilityDiskCheckPresence(ctx, d, m)
|
||||
if disk == nil {
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
params := &url.Values{}
|
||||
params.Add("diskId", d.Id())
|
||||
params.Add("detach", strconv.FormatBool(d.Get("detach").(bool)))
|
||||
params.Add("permanently", strconv.FormatBool(d.Get("permanently").(bool)))
|
||||
params.Add("reason", d.Get("reason").(string))
|
||||
|
||||
c := m.(*controller.ControllerCfg)
|
||||
_, err = c.DecortAPICall(ctx, "POST", disksDeleteAPI, params)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceDiskExists(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) {
|
||||
|
||||
diskFacts, err := utilityDiskCheckPresence(ctx, d, m)
|
||||
if diskFacts == nil {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func resourceDiskSchemaMake() map[string]*schema.Schema {
|
||||
rets := map[string]*schema.Schema{
|
||||
"account_id": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"disk_name": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"size_max": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"gid": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"pool": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"sep_id": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"desc": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"type": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ValidateFunc: validation.StringInSlice([]string{"D", "B", "T"}, false),
|
||||
},
|
||||
|
||||
"detach": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
Description: "detach disk from machine first",
|
||||
},
|
||||
"permanently": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
Description: "whether to completely delete the disk, works only with non attached disks",
|
||||
},
|
||||
"reason": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "",
|
||||
Description: "reason for an action",
|
||||
},
|
||||
"restore": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
Description: "restore deleting disk",
|
||||
},
|
||||
|
||||
"disk_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"account_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"acl": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"boot_partition": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"compute_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"compute_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"created_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"deleted_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"destruction_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"devicename": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"disk_path": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"guid": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"image_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"images": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
},
|
||||
},
|
||||
"iotune": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
MaxItems: 1,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"read_bytes_sec": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"read_bytes_sec_max": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"read_iops_sec": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"read_iops_sec_max": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"size_iops_sec": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"total_bytes_sec": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"total_bytes_sec_max": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"total_iops_sec": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"total_iops_sec_max": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"write_bytes_sec": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"write_bytes_sec_max": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"write_iops_sec": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"write_iops_sec_max": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"iqn": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"login": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"milestones": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"order": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"params": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"parent_id": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"passwd": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"pci_slot": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"purge_attempts": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"purge_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"reality_device_number": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"reference_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"res_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"res_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"role": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"sep_type": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"size_used": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"snapshots": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"guid": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"label": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"res_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"snap_set_guid": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"snap_set_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
"timestamp": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"tech_status": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"vmid": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
}
|
||||
|
||||
return rets
|
||||
}
|
||||
|
||||
func ResourceDisk() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
SchemaVersion: 1,
|
||||
|
||||
CreateContext: resourceDiskCreate,
|
||||
ReadContext: resourceDiskRead,
|
||||
UpdateContext: resourceDiskUpdate,
|
||||
DeleteContext: resourceDiskDelete,
|
||||
|
||||
Importer: &schema.ResourceImporter{
|
||||
State: schema.ImportStatePassthrough,
|
||||
},
|
||||
|
||||
Timeouts: &schema.ResourceTimeout{
|
||||
Create: &constants.Timeout180s,
|
||||
Read: &constants.Timeout30s,
|
||||
Update: &constants.Timeout180s,
|
||||
Delete: &constants.Timeout60s,
|
||||
Default: &constants.Timeout60s,
|
||||
},
|
||||
|
||||
Schema: resourceDiskSchemaMake(),
|
||||
}
|
||||
}
|
||||
70
internal/service/cloudbroker/disks/utility_disk.go
Normal file
70
internal/service/cloudbroker/disks/utility_disk.go
Normal file
@@ -0,0 +1,70 @@
|
||||
/*
|
||||
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||
Orchestration Technology) with Terraform by Hashicorp.
|
||||
|
||||
Source code: https://github.com/rudecs/terraform-provider-decort
|
||||
|
||||
Please see README.md to learn where to place source code so that it
|
||||
builds seamlessly.
|
||||
|
||||
Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
|
||||
*/
|
||||
|
||||
package disks
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/rudecs/terraform-provider-decort/internal/controller"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
)
|
||||
|
||||
func utilityDiskCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*Disk, error) {
|
||||
c := m.(*controller.ControllerCfg)
|
||||
urlValues := &url.Values{}
|
||||
|
||||
disk := &Disk{}
|
||||
|
||||
if d.Get("disk_id").(int) == 0 {
|
||||
urlValues.Add("diskId", d.Id())
|
||||
} else {
|
||||
urlValues.Add("diskId", strconv.Itoa(d.Get("disk_id").(int)))
|
||||
}
|
||||
|
||||
log.Debugf("utilityDiskCheckPresence: load disk")
|
||||
diskRaw, err := c.DecortAPICall(ctx, "POST", disksGetAPI, urlValues)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(diskRaw), disk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return disk, nil
|
||||
}
|
||||
77
internal/service/cloudbroker/disks/utility_disk_list.go
Normal file
77
internal/service/cloudbroker/disks/utility_disk_list.go
Normal file
@@ -0,0 +1,77 @@
|
||||
/*
|
||||
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||
Authors:
|
||||
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||
Orchestration Technology) with Terraform by Hashicorp.
|
||||
|
||||
Source code: https://github.com/rudecs/terraform-provider-decort
|
||||
|
||||
Please see README.md to learn where to place source code so that it
|
||||
builds seamlessly.
|
||||
|
||||
Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
|
||||
*/
|
||||
|
||||
package disks
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/rudecs/terraform-provider-decort/internal/controller"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
)
|
||||
|
||||
func utilityDiskListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (DisksList, error) {
|
||||
diskList := DisksList{}
|
||||
c := m.(*controller.ControllerCfg)
|
||||
urlValues := &url.Values{}
|
||||
|
||||
if page, ok := d.GetOk("page"); ok {
|
||||
urlValues.Add("page", strconv.Itoa(page.(int)))
|
||||
}
|
||||
if size, ok := d.GetOk("size"); ok {
|
||||
urlValues.Add("size", strconv.Itoa(size.(int)))
|
||||
}
|
||||
if diskType, ok := d.GetOk("type"); ok {
|
||||
urlValues.Add("type", strings.ToUpper(diskType.(string)))
|
||||
}
|
||||
if accountId, ok := d.GetOk("accountId"); ok {
|
||||
urlValues.Add("accountId", strconv.Itoa(accountId.(int)))
|
||||
}
|
||||
|
||||
log.Debugf("utilityDiskListCheckPresence: load disk list")
|
||||
diskListRaw, err := c.DecortAPICall(ctx, "POST", disksListAPI, urlValues)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(diskListRaw), &diskList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return diskList, nil
|
||||
}
|
||||
Reference in New Issue
Block a user