This commit is contained in:
2023-12-18 18:36:55 +03:00
parent 294680282e
commit e2ee45ee14
155 changed files with 10125 additions and 17209 deletions

View File

@@ -46,6 +46,7 @@ import (
func dataSourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
disk, err := utilityDiskCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
@@ -57,299 +58,6 @@ func dataSourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface
return nil
}
func dataSourceDiskSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"disk_id": {
Type: schema.TypeInt,
Required: true,
},
"account_id": {
Type: schema.TypeInt,
Computed: true,
},
"account_name": {
Type: schema.TypeString,
Computed: true,
},
"acl": {
Type: schema.TypeString,
Computed: true,
},
"boot_partition": {
Type: schema.TypeInt,
Computed: true,
},
"computes": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"compute_id": {
Type: schema.TypeString,
Computed: true,
},
"compute_name": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
},
"desc": {
Type: schema.TypeString,
Computed: true,
},
"destruction_time": {
Type: schema.TypeInt,
Computed: true,
},
"devicename": {
Type: schema.TypeString,
Computed: true,
},
"disk_path": {
Type: schema.TypeString,
Computed: true,
},
"gid": {
Type: schema.TypeInt,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"image_id": {
Type: schema.TypeInt,
Computed: true,
},
"images": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"iotune": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"read_bytes_sec": {
Type: schema.TypeInt,
Computed: true,
},
"read_bytes_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
"read_iops_sec": {
Type: schema.TypeInt,
Computed: true,
},
"read_iops_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
"size_iops_sec": {
Type: schema.TypeInt,
Computed: true,
},
"total_bytes_sec": {
Type: schema.TypeInt,
Computed: true,
},
"total_bytes_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
"total_iops_sec": {
Type: schema.TypeInt,
Computed: true,
},
"total_iops_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
"write_bytes_sec": {
Type: schema.TypeInt,
Computed: true,
},
"write_bytes_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
"write_iops_sec": {
Type: schema.TypeInt,
Computed: true,
},
"write_iops_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"iqn": {
Type: schema.TypeString,
Computed: true,
},
"login": {
Type: schema.TypeString,
Computed: true,
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
},
"disk_name": {
Type: schema.TypeString,
Computed: true,
},
"order": {
Type: schema.TypeInt,
Computed: true,
},
"params": {
Type: schema.TypeString,
Computed: true,
},
"parent_id": {
Type: schema.TypeInt,
Computed: true,
},
"passwd": {
Type: schema.TypeString,
Computed: true,
},
"pci_slot": {
Type: schema.TypeInt,
Computed: true,
},
"pool": {
Type: schema.TypeString,
Computed: true,
},
"present_to": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"purge_attempts": {
Type: schema.TypeInt,
Computed: true,
},
"purge_time": {
Type: schema.TypeInt,
Computed: true,
},
"reality_device_number": {
Type: schema.TypeInt,
Computed: true,
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
},
"res_name": {
Type: schema.TypeString,
Computed: true,
},
"role": {
Type: schema.TypeString,
Computed: true,
},
"sep_id": {
Type: schema.TypeInt,
Computed: true,
},
"sep_type": {
Type: schema.TypeString,
Computed: true,
},
"shareable": {
Type: schema.TypeString,
Computed: true,
},
"size_max": {
Type: schema.TypeInt,
Computed: true,
},
"size_used": {
Type: schema.TypeFloat,
Computed: true,
},
"snapshots": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"guid": {
Type: schema.TypeString,
Computed: true,
},
"label": {
Type: schema.TypeString,
Computed: true,
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
},
"snap_set_guid": {
Type: schema.TypeString,
Computed: true,
},
"snap_set_time": {
Type: schema.TypeInt,
Computed: true,
},
"timestamp": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"tech_status": {
Type: schema.TypeString,
Computed: true,
},
"type": {
Type: schema.TypeString,
Computed: true,
},
"vmid": {
Type: schema.TypeInt,
Computed: true,
},
}
return rets
}
func DataSourceDisk() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,

View File

@@ -1,442 +1,71 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package disks
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceDiskListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
diskList, err := utilityDiskListCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenDiskList(diskList))
d.Set("entry_count", diskList.EntryCount)
return nil
}
func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"by_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by ID",
},
"name": {
Type: schema.TypeString,
Optional: true,
Description: "Find by name",
},
"account_name": {
Type: schema.TypeString,
Optional: true,
Description: "Find by account name",
},
"disk_max_size": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by max disk size",
},
"status": {
Type: schema.TypeString,
Optional: true,
Description: "Find by status",
},
"shared": {
Type: schema.TypeBool,
Optional: true,
Description: "Find by shared field",
},
"account_id": {
Type: schema.TypeInt,
Optional: true,
Description: "ID of the account the disks belong to",
},
"type": {
Type: schema.TypeString,
Optional: true,
Description: "type of the disks",
},
"sep_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by sep id",
},
"pool": {
Type: schema.TypeString,
Optional: true,
Description: "Find by pool name",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "Page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "Page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"account_id": {
Type: schema.TypeInt,
Computed: true,
},
"account_name": {
Type: schema.TypeString,
Computed: true,
},
"acl": {
Type: schema.TypeString,
Computed: true,
},
"boot_partition": {
Type: schema.TypeInt,
Computed: true,
},
"computes": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"compute_id": {
Type: schema.TypeString,
Computed: true,
},
"compute_name": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
},
"desc": {
Type: schema.TypeString,
Computed: true,
},
"destruction_time": {
Type: schema.TypeInt,
Computed: true,
},
"devicename": {
Type: schema.TypeString,
Computed: true,
},
"disk_path": {
Type: schema.TypeString,
Computed: true,
},
"gid": {
Type: schema.TypeInt,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"disk_id": {
Type: schema.TypeInt,
Computed: true,
},
"image_id": {
Type: schema.TypeInt,
Computed: true,
},
"images": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"iotune": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"read_bytes_sec": {
Type: schema.TypeInt,
Computed: true,
},
"read_bytes_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
"read_iops_sec": {
Type: schema.TypeInt,
Computed: true,
},
"read_iops_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
"size_iops_sec": {
Type: schema.TypeInt,
Computed: true,
},
"total_bytes_sec": {
Type: schema.TypeInt,
Computed: true,
},
"total_bytes_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
"total_iops_sec": {
Type: schema.TypeInt,
Computed: true,
},
"total_iops_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
"write_bytes_sec": {
Type: schema.TypeInt,
Computed: true,
},
"write_bytes_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
"write_iops_sec": {
Type: schema.TypeInt,
Computed: true,
},
"write_iops_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"iqn": {
Type: schema.TypeString,
Computed: true,
},
"login": {
Type: schema.TypeString,
Computed: true,
},
"machine_id": {
Type: schema.TypeInt,
Computed: true,
},
"machine_name": {
Type: schema.TypeString,
Computed: true,
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
},
"disk_name": {
Type: schema.TypeString,
Computed: true,
},
"order": {
Type: schema.TypeInt,
Computed: true,
},
"params": {
Type: schema.TypeString,
Computed: true,
},
"parent_id": {
Type: schema.TypeInt,
Computed: true,
},
"passwd": {
Type: schema.TypeString,
Computed: true,
},
"pci_slot": {
Type: schema.TypeInt,
Computed: true,
},
"pool": {
Type: schema.TypeString,
Computed: true,
},
"present_to": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"purge_attempts": {
Type: schema.TypeInt,
Computed: true,
},
"purge_time": {
Type: schema.TypeInt,
Computed: true,
},
"reality_device_number": {
Type: schema.TypeInt,
Computed: true,
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
},
"res_name": {
Type: schema.TypeString,
Computed: true,
},
"role": {
Type: schema.TypeString,
Computed: true,
},
"sep_id": {
Type: schema.TypeInt,
Computed: true,
},
"sep_type": {
Type: schema.TypeString,
Computed: true,
},
"shareable": {
Type: schema.TypeBool,
Computed: true,
},
"size_max": {
Type: schema.TypeInt,
Computed: true,
},
"size_used": {
Type: schema.TypeFloat,
Computed: true,
},
"snapshots": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"guid": {
Type: schema.TypeString,
Computed: true,
},
"label": {
Type: schema.TypeString,
Computed: true,
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
},
"snap_set_guid": {
Type: schema.TypeString,
Computed: true,
},
"snap_set_time": {
Type: schema.TypeInt,
Computed: true,
},
"timestamp": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"tech_status": {
Type: schema.TypeString,
Computed: true,
},
"type": {
Type: schema.TypeString,
Computed: true,
},
"vmid": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
}
return res
}
func DataSourceDiskList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceDiskListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceDiskListSchemaMake(),
}
}
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package disks
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceDiskListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
diskList, err := utilityDiskListCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenDiskList(diskList))
d.Set("entry_count", diskList.EntryCount)
return nil
}
func DataSourceDiskList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceDiskListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceDiskListSchemaMake(),
}
}

View File

@@ -12,6 +12,7 @@ import (
func dataSourceDiskListTypesRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
listTypes, err := utilityDiskListTypesCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
@@ -23,24 +24,6 @@ func dataSourceDiskListTypesRead(ctx context.Context, d *schema.ResourceData, m
return nil
}
func dataSourceDiskListTypesSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "The types of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'",
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
}
return res
}
func DataSourceDiskListTypes() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,

View File

@@ -12,6 +12,7 @@ import (
func dataSourceDiskListTypesDetailedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
listTypesDetailed, err := utilityDiskListTypesDetailedCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
@@ -32,53 +33,6 @@ func DataSourceDiskListTypesDetailed() *schema.Resource {
Default: &constants.Timeout60s,
},
Schema: map[string]*schema.Schema{
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"pools": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Computed: true,
Description: "Pool name",
},
"system": {
Type: schema.TypeString,
Computed: true,
},
"types": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "The types of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'",
},
},
},
},
"sep_id": {
Type: schema.TypeInt,
Computed: true,
Description: "Storage endpoint provider ID to create disk",
},
"sep_name": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
},
Schema: dataSourceDiskListTypesDetailedSchemaMake(),
}
}

View File

@@ -45,6 +45,7 @@ import (
func dataSourceDiskListUnattachedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
diskListUnattached, err := utilityDiskListUnattachedCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
@@ -70,401 +71,3 @@ func DataSourceDiskListUnattached() *schema.Resource {
Schema: dataSourceDiskListUnattachedSchemaMake(),
}
}
func dataSourceDiskListUnattachedSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"by_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by ID",
},
"account_name": {
Type: schema.TypeString,
Optional: true,
Description: "Find by account name",
},
"disk_max_size": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by max disk size",
},
"status": {
Type: schema.TypeString,
Optional: true,
Description: "Find by status",
},
"type": {
Type: schema.TypeString,
Optional: true,
Description: "type of the disks",
},
"account_id": {
Type: schema.TypeInt,
Optional: true,
Description: "ID of the account the disks belong to",
},
"sep_id": {
Type: schema.TypeInt,
Optional: true,
Description: "ID of SEP",
},
"pool": {
Type: schema.TypeString,
Optional: true,
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "Page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "Page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"_ckey": {
Type: schema.TypeString,
Computed: true,
Description: "CKey",
},
"_meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "Meta parameters",
},
"account_id": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the account the disks belong to",
},
"account_name": {
Type: schema.TypeString,
Computed: true,
Description: "The name of the subscriber '(account') to whom this disk belongs",
},
"acl": {
Type: schema.TypeString,
Computed: true,
},
"boot_partition": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of disk partitions",
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Created time",
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Deleted time",
},
"desc": {
Type: schema.TypeString,
Computed: true,
Description: "Description of disk",
},
"destruction_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Time of final deletion",
},
"disk_path": {
Type: schema.TypeString,
Computed: true,
Description: "Disk path",
},
"gid": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the grid (platform)",
},
"guid": {
Type: schema.TypeInt,
Computed: true,
Description: "Disk ID on the storage side",
},
"disk_id": {
Type: schema.TypeInt,
Computed: true,
Description: "The unique ID of the subscriber-owner of the disk",
},
"image_id": {
Type: schema.TypeInt,
Computed: true,
Description: "Image ID",
},
"images": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "IDs of images using the disk",
},
"iotune": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"read_bytes_sec": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of bytes to read per second",
},
"read_bytes_sec_max": {
Type: schema.TypeInt,
Computed: true,
Description: "Maximum number of bytes to read",
},
"read_iops_sec": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of io read operations per second",
},
"read_iops_sec_max": {
Type: schema.TypeInt,
Computed: true,
Description: "Maximum number of io read operations",
},
"size_iops_sec": {
Type: schema.TypeInt,
Computed: true,
Description: "Size of io operations",
},
"total_bytes_sec": {
Type: schema.TypeInt,
Computed: true,
Description: "Total size bytes per second",
},
"total_bytes_sec_max": {
Type: schema.TypeInt,
Computed: true,
Description: "Maximum total size of bytes per second",
},
"total_iops_sec": {
Type: schema.TypeInt,
Computed: true,
Description: "Total number of io operations per second",
},
"total_iops_sec_max": {
Type: schema.TypeInt,
Computed: true,
Description: "Maximum total number of io operations per second",
},
"write_bytes_sec": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of bytes to write per second",
},
"write_bytes_sec_max": {
Type: schema.TypeInt,
Computed: true,
Description: "Maximum number of bytes to write per second",
},
"write_iops_sec": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of write operations per second",
},
"write_iops_sec_max": {
Type: schema.TypeInt,
Computed: true,
Description: "Maximum number of write operations per second",
},
},
},
},
"iqn": {
Type: schema.TypeString,
Computed: true,
Description: "Disk IQN",
},
"login": {
Type: schema.TypeString,
Computed: true,
Description: "Login to access the disk",
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
Description: "Milestones",
},
"disk_name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of disk",
},
"order": {
Type: schema.TypeInt,
Computed: true,
Description: "Disk order",
},
"params": {
Type: schema.TypeString,
Computed: true,
Description: "Disk params",
},
"parent_id": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the parent disk",
},
"passwd": {
Type: schema.TypeString,
Computed: true,
Description: "Password to access the disk",
},
"pci_slot": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the pci slot to which the disk is connected",
},
"pool": {
Type: schema.TypeString,
Computed: true,
Description: "Pool for disk location",
},
"present_to": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"purge_attempts": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of deletion attempts",
},
"purge_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Time of the last deletion attempt",
},
"reality_device_number": {
Type: schema.TypeInt,
Computed: true,
Description: "Reality device number",
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
Description: "ID of the reference to the disk",
},
"res_id": {
Type: schema.TypeString,
Computed: true,
Description: "Resource ID",
},
"res_name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the resource",
},
"role": {
Type: schema.TypeString,
Computed: true,
Description: "Disk role",
},
"sep_id": {
Type: schema.TypeInt,
Computed: true,
Description: "Storage endpoint provider ID to create disk",
},
"shareable": {
Type: schema.TypeBool,
Computed: true,
},
"size_max": {
Type: schema.TypeInt,
Computed: true,
Description: "Size in GB",
},
"size_used": {
Type: schema.TypeFloat,
Computed: true,
Description: "Number of used space, in GB",
},
"snapshots": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"guid": {
Type: schema.TypeString,
Computed: true,
Description: "ID of the snapshot",
},
"label": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the snapshot",
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
Description: "Reference to the snapshot",
},
"snap_set_guid": {
Type: schema.TypeString,
Computed: true,
Description: "The set snapshot ID",
},
"snap_set_time": {
Type: schema.TypeInt,
Computed: true,
Description: "The set time of the snapshot",
},
"timestamp": {
Type: schema.TypeInt,
Computed: true,
Description: "Snapshot time",
},
},
},
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "Disk status",
},
"tech_status": {
Type: schema.TypeString,
Computed: true,
Description: "Technical status of the disk",
},
"type": {
Type: schema.TypeString,
Computed: true,
Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'",
},
"vmid": {
Type: schema.TypeInt,
Computed: true,
Description: "Virtual Machine ID (Deprecated)",
},
},
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
}
return res
}

View File

@@ -45,6 +45,7 @@ import (
func dataSourceDiskSnapshotRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
disk, err := utilityDiskCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
@@ -82,48 +83,3 @@ func DataSourceDiskSnapshot() *schema.Resource {
Schema: dataSourceDiskSnapshotSchemaMake(),
}
}
func dataSourceDiskSnapshotSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"disk_id": {
Type: schema.TypeInt,
Required: true,
Description: "The unique ID of the subscriber-owner of the disk",
},
"label": {
Type: schema.TypeString,
Required: true,
Description: "Name of the snapshot",
},
"guid": {
Type: schema.TypeString,
Computed: true,
Description: "ID of the snapshot",
},
"timestamp": {
Type: schema.TypeInt,
Computed: true,
Description: "Snapshot time",
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
Description: "Reference to the snapshot",
},
"snap_set_guid": {
Type: schema.TypeString,
Computed: true,
Description: "The set snapshot ID",
},
"snap_set_time": {
Type: schema.TypeInt,
Computed: true,
Description: "The set time of the snapshot",
},
}
return rets
}

View File

@@ -44,6 +44,7 @@ import (
func dataSourceDiskSnapshotListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
disk, err := utilityDiskCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
@@ -67,56 +68,3 @@ func DataSourceDiskSnapshotList() *schema.Resource {
Schema: dataSourceDiskSnapshotListSchemaMake(),
}
}
func dataSourceDiskSnapshotListSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"disk_id": {
Type: schema.TypeInt,
Required: true,
Description: "The unique ID of the subscriber-owner of the disk",
},
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"label": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the snapshot",
},
"guid": {
Type: schema.TypeString,
Computed: true,
Description: "ID of the snapshot",
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"timestamp": {
Type: schema.TypeInt,
Computed: true,
Description: "Snapshot time",
},
"res_id": {
Type: schema.TypeString,
Computed: true,
Description: "Reference to the snapshot",
},
"snap_set_guid": {
Type: schema.TypeString,
Computed: true,
Description: "The set snapshot ID",
},
"snap_set_time": {
Type: schema.TypeInt,
Computed: true,
Description: "The set time of the snapshot",
},
},
},
},
}
return rets
}

View File

@@ -106,12 +106,11 @@ func flattenDiskList(dl *disks.ListDisks) []map[string]interface{} {
for _, disk := range dl.Data {
diskAcl, _ := json.Marshal(disk.ACL)
temp := map[string]interface{}{
"account_id": disk.AccountID,
"account_name": disk.AccountName,
"acl": string(diskAcl),
"boot_partition": disk.BootPartition,
// "compute_id": disk.MachineID,
// "compute_name": disk.MachineName,
"account_id": disk.AccountID,
"account_name": disk.AccountName,
"acl": string(diskAcl),
"boot_partition": disk.BootPartition,
"computes": flattenDiskComputes(disk.Computes),
"created_time": disk.CreatedTime,
"deleted_time": disk.DeletedTime,
"desc": disk.Description,

View File

@@ -2,54 +2,27 @@ package disks
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/ic"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
)
func existAccountID(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
func checkParamsExistence(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg) diag.Diagnostics {
var errs []error
accountID := uint64(d.Get("account_id").(int))
accountList, err := c.CloudBroker().Account().List(ctx, account.ListRequest{})
if err != nil {
return err
}
if len(accountList.FilterByID(accountID).Data) == 0 {
return fmt.Errorf("resourceDiskCreate: can't create/update Disk because AccountID %d is not allowed or does not exist", accountID)
}
return nil
}
func existGID(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
gid := uint64(d.Get("gid").(int))
gidList, err := c.CloudBroker().Grid().List(ctx, grid.ListRequest{})
if err != nil {
return err
if err := ic.ExistAccount(ctx, accountID, c); err != nil {
errs = append(errs, err)
}
for _, elem := range gidList.Data {
if elem.GID == gid {
return nil
}
if err := ic.ExistGID(ctx, gid, c); err != nil {
errs = append(errs, err)
}
return fmt.Errorf("resourceDiskCreate: can't create/update Disk because GID %d is not allowed or does not exist", gid)
}
func checkParamsExists(ctx context.Context, d *schema.ResourceData, m interface{}) error {
err := existAccountID(ctx, d, m)
if err != nil {
return err
}
return existGID(ctx, d, m)
return dc.ErrorsToDiagnostics(errs)
}

View File

@@ -44,16 +44,15 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
log "github.com/sirupsen/logrus"
)
func resourceDiskCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceDiskCreate: called for disk %s", d.Get("disk_name").(string))
c := m.(*controller.ControllerCfg)
err := checkParamsExists(ctx, d, m)
if err != nil {
return diag.FromErr(err)
if diags := checkParamsExistence(ctx, d, c); diags != nil {
return diags
}
req := disks.CreateRequest{
@@ -72,6 +71,10 @@ func resourceDiskCreate(ctx context.Context, d *schema.ResourceData, m interface
req.SSDSize = uint64(ssdSize.(int))
}
if iops, ok := d.GetOk("iops"); ok {
req.IOPS = uint64(iops.(int))
}
if sepID, ok := d.GetOk("sep_id"); ok {
req.SEPID = uint64(sepID.(int))
}
@@ -82,45 +85,23 @@ func resourceDiskCreate(ctx context.Context, d *schema.ResourceData, m interface
diskID, err := c.CloudBroker().Disks().Create(ctx, req)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
d.SetId(strconv.FormatUint(diskID, 10))
d.Set("disk_id", diskID)
w := dc.Warnings{}
if iotuneRaw, ok := d.GetOk("iotune"); ok {
iot := iotuneRaw.([]interface{})[0]
iotune := iot.(map[string]interface{})
req := disks.LimitIORequest{
DiskID: diskID,
IOPS: uint64(iotune["total_iops_sec"].(int)),
ReadBytesSec: uint64(iotune["read_bytes_sec"].(int)),
ReadBytesSecMax: uint64(iotune["read_bytes_sec_max"].(int)),
ReadIOPSSec: uint64(iotune["read_iops_sec"].(int)),
ReadIOPSSecMax: uint64(iotune["read_iops_sec_max"].(int)),
SizeIOPSSec: uint64(iotune["size_iops_sec"].(int)),
TotalBytesSec: uint64(iotune["total_bytes_sec"].(int)),
TotalBytesSecMax: uint64(iotune["total_bytes_sec_max"].(int)),
TotalIOPSSecMax: uint64(iotune["total_iops_sec_max"].(int)),
TotalIOPSSec: uint64(iotune["total_iops_sec"].(int)),
WriteBytesSec: uint64(iotune["write_bytes_sec"].(int)),
WriteBytesSecMax: uint64(iotune["write_bytes_sec_max"].(int)),
WriteIOPSSec: uint64(iotune["write_iops_sec"].(int)),
WriteIOPSSecMax: uint64(iotune["write_iops_sec_max"].(int)),
}
_, err := c.CloudBroker().Disks().LimitIO(ctx, req)
if err != nil {
if _, ok := d.GetOk("iotune"); ok {
if err := resourceDiskChangeIotune(ctx, d, m); err != nil {
w.Add(err)
}
}
if shareable := d.Get("shareable"); shareable.(bool) {
_, err := c.CloudBroker().Disks().Share(ctx, disks.ShareRequest{
DiskID: diskID,
})
if err != nil {
if err := resourceDiskChangeShareable(ctx, d, m); err != nil {
w.Add(err)
}
}
@@ -129,7 +110,7 @@ func resourceDiskCreate(ctx context.Context, d *schema.ResourceData, m interface
}
func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
c := m.(*controller.ControllerCfg)
log.Debugf("resourceDiskRead: called for disk_id %d", d.Get("disk_id").(int))
w := dc.Warnings{}
disk, err := utilityDiskCheckPresence(ctx, d, m)
@@ -144,23 +125,13 @@ func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}
case status.Destroyed, status.Purged:
d.Set("disk_id", 0)
d.SetId("")
return resourceDiskCreate(ctx, d, m)
return diag.Errorf("The resource cannot be read because it has been destroyed")
//return resourceDiskCreate(ctx, d, m)
case status.Deleted:
hasChangeState = true
req := disks.RestoreRequest{
DiskID: disk.ID,
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
} else {
req.Reason = "Terraform automatic restore"
}
_, err := c.CloudBroker().Disks().Restore(ctx, req)
if err != nil {
w.Add(err)
}
//hasChangeState = true
//if err := resourceDiskRestore(ctx, d, m); err != nil {
// w.Add(err)
//}
case status.Assigned:
case status.Modeled:
return diag.Errorf("The disk is in status: %s, please, contact support for more information", disk.Status)
@@ -173,6 +144,7 @@ func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}
if hasChangeState {
disk, err = utilityDiskCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
}
@@ -183,12 +155,12 @@ func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}
}
func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceDiskUpdate: called for disk_id %d", d.Get("disk_id").(int))
c := m.(*controller.ControllerCfg)
w := dc.Warnings{}
err := checkParamsExists(ctx, d, m)
if err != nil {
return diag.FromErr(err)
if diags := checkParamsExistence(ctx, d, c); diags != nil {
return diags
}
disk, err := utilityDiskCheckPresence(ctx, d, m)
@@ -203,21 +175,11 @@ func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface
case status.Destroyed, status.Purged:
d.Set("disk_id", 0)
d.SetId("")
return resourceDiskCreate(ctx, d, m)
return diag.Errorf("The resource cannot be updated because it has been destroyed")
//return resourceDiskCreate(ctx, d, m)
case status.Deleted:
hasChangeState = true
req := disks.RestoreRequest{
DiskID: disk.ID,
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
} else {
req.Reason = "Terraform automatic restore"
}
_, err := c.CloudBroker().Disks().Restore(ctx, req)
if err != nil {
if err := resourceDiskRestore(ctx, d, m); err != nil {
return diag.FromErr(err)
}
case status.Assigned:
@@ -239,75 +201,32 @@ func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface
if d.HasChange("size_max") {
oldSize, newSize := d.GetChange("size_max")
if oldSize.(int) < newSize.(int) {
log.Debugf("resourceDiskUpdate: resizing disk ID %s - %d GB -> %d GB",
d.Id(), oldSize.(int), newSize.(int))
_, err := c.CloudBroker().Disks().Resize(ctx, disks.ResizeRequest{
DiskID: disk.ID,
Size: uint64(newSize.(int)),
})
if err != nil {
w.Add(err)
}
} else if oldSize.(int) > newSize.(int) {
if oldSize.(int) > newSize.(int) {
return diag.FromErr(fmt.Errorf("resourceDiskUpdate: Disk ID %s - reducing disk size is not allowed", d.Id()))
}
log.Debugf("resourceDiskUpdate: resizing disk ID %s - %d GB -> %d GB",
d.Id(), oldSize.(int), newSize.(int))
if err := resourceDiskChangeSize(ctx, d, m); err != nil {
w.Add(err)
}
}
if d.HasChange("disk_name") {
_, err := c.CloudBroker().Disks().Rename(ctx, disks.RenameRequest{
DiskID: disk.ID,
Name: d.Get("disk_name").(string),
})
if err != nil {
if err := resourceDiskChangeDiskName(ctx, d, m); err != nil {
w.Add(err)
}
}
if d.HasChange("iotune") {
iot := d.Get("iotune").([]interface{})[0]
iotune := iot.(map[string]interface{})
req := disks.LimitIORequest{
IOPS: uint64(iotune["total_iops_sec"].(int)),
ReadBytesSec: uint64(iotune["read_bytes_sec"].(int)),
ReadBytesSecMax: uint64(iotune["read_bytes_sec_max"].(int)),
ReadIOPSSec: uint64(iotune["read_iops_sec"].(int)),
ReadIOPSSecMax: uint64(iotune["read_iops_sec_max"].(int)),
SizeIOPSSec: uint64(iotune["size_iops_sec"].(int)),
TotalBytesSec: uint64(iotune["total_bytes_sec"].(int)),
TotalBytesSecMax: uint64(iotune["total_bytes_sec_max"].(int)),
TotalIOPSSecMax: uint64(iotune["total_iops_sec_max"].(int)),
TotalIOPSSec: uint64(iotune["total_iops_sec"].(int)),
WriteBytesSec: uint64(iotune["write_bytes_sec"].(int)),
WriteBytesSecMax: uint64(iotune["write_bytes_sec_max"].(int)),
WriteIOPSSec: uint64(iotune["write_iops_sec"].(int)),
WriteIOPSSecMax: uint64(iotune["write_iops_sec_max"].(int)),
}
_, err := c.CloudBroker().Disks().LimitIO(ctx, req)
if err != nil {
if err := resourceDiskChangeIotune(ctx, d, m); err != nil {
w.Add(err)
}
}
if d.HasChange("shareable") {
old, new := d.GetChange("shareable")
if !old.(bool) && new.(bool) && !disk.Shareable {
_, err := c.CloudBroker().Disks().Share(ctx, disks.ShareRequest{
DiskID: disk.ID,
})
if err != nil {
w.Add(err)
}
}
if old.(bool) && !new.(bool) && disk.Shareable {
_, err := c.CloudBroker().Disks().Unshare(ctx, disks.UnshareRequest{
DiskID: disk.ID,
})
if err != nil {
w.Add(err)
}
if err := resourceDiskChangeShareable(ctx, d, m); err != nil {
w.Add(err)
}
}
@@ -315,6 +234,7 @@ func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface
}
func resourceDiskDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceDiskDelete: called for disk_id %d", d.Get("disk_id").(int))
c := m.(*controller.ControllerCfg)
disk, err := utilityDiskCheckPresence(ctx, d, m)
@@ -340,349 +260,91 @@ func resourceDiskDelete(ctx context.Context, d *schema.ResourceData, m interface
return nil
}
func resourceDiskSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"account_id": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
},
"gid": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
},
"disk_name": {
Type: schema.TypeString,
Required: true,
},
"type": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringInSlice([]string{"D", "B", "T"}, false),
Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'",
},
"desc": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"size_max": {
Type: schema.TypeInt,
Required: true,
},
"ssd_size": {
Type: schema.TypeInt,
Optional: true,
},
"sep_id": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"pool": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"detach": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "detach disk from machine first",
},
"permanently": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "whether to completely delete the disk, works only with non attached disks",
},
"reason": {
Type: schema.TypeString,
Optional: true,
Default: "",
Description: "reason for an action",
},
"shareable": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
},
"restore": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "restore deleting disk",
},
"account_name": {
Type: schema.TypeString,
Computed: true,
},
"acl": {
Type: schema.TypeString,
Computed: true,
},
"boot_partition": {
Type: schema.TypeInt,
Computed: true,
},
"computes": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"compute_id": {
Type: schema.TypeString,
Computed: true,
},
"compute_name": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
},
"destruction_time": {
Type: schema.TypeInt,
Computed: true,
},
"devicename": {
Type: schema.TypeString,
Computed: true,
},
"disk_path": {
Type: schema.TypeString,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"disk_id": {
Type: schema.TypeInt,
Computed: true,
},
"image_id": {
Type: schema.TypeInt,
Computed: true,
},
"images": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"iotune": {
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"read_bytes_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"read_bytes_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"read_iops_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"read_iops_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"size_iops_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"total_bytes_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"total_bytes_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"total_iops_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"total_iops_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"write_bytes_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"write_bytes_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"write_iops_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"write_iops_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
},
},
},
"iqn": {
Type: schema.TypeString,
Computed: true,
},
"login": {
Type: schema.TypeString,
Computed: true,
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
},
func resourceDiskChangeIotune(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
"order": {
Type: schema.TypeInt,
Computed: true,
},
"params": {
Type: schema.TypeString,
Computed: true,
},
"parent_id": {
Type: schema.TypeInt,
Computed: true,
},
"passwd": {
Type: schema.TypeString,
Computed: true,
},
"pci_slot": {
Type: schema.TypeInt,
Computed: true,
},
"present_to": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"purge_attempts": {
Type: schema.TypeInt,
Computed: true,
},
"purge_time": {
Type: schema.TypeInt,
Computed: true,
},
"reality_device_number": {
Type: schema.TypeInt,
Computed: true,
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
},
"res_name": {
Type: schema.TypeString,
Computed: true,
},
"role": {
Type: schema.TypeString,
Computed: true,
},
"sep_type": {
Type: schema.TypeString,
Computed: true,
},
"size_used": {
Type: schema.TypeFloat,
Computed: true,
},
"snapshots": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"guid": {
Type: schema.TypeString,
Computed: true,
},
"label": {
Type: schema.TypeString,
Computed: true,
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
},
"snap_set_guid": {
Type: schema.TypeString,
Computed: true,
},
"snap_set_time": {
Type: schema.TypeInt,
Computed: true,
},
"timestamp": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"tech_status": {
Type: schema.TypeString,
Computed: true,
},
"vmid": {
Type: schema.TypeInt,
Computed: true,
},
iotuneRaw := d.Get("iotune")
diskId := uint64(d.Get("disk_id").(int))
iot := iotuneRaw.([]interface{})[0]
iotune := iot.(map[string]interface{})
req := disks.LimitIORequest{
DiskID: diskId,
ReadBytesSec: uint64(iotune["read_bytes_sec"].(int)),
ReadBytesSecMax: uint64(iotune["read_bytes_sec_max"].(int)),
ReadIOPSSec: uint64(iotune["read_iops_sec"].(int)),
ReadIOPSSecMax: uint64(iotune["read_iops_sec_max"].(int)),
SizeIOPSSec: uint64(iotune["size_iops_sec"].(int)),
TotalBytesSec: uint64(iotune["total_bytes_sec"].(int)),
TotalBytesSecMax: uint64(iotune["total_bytes_sec_max"].(int)),
TotalIOPSSecMax: uint64(iotune["total_iops_sec_max"].(int)),
TotalIOPSSec: uint64(iotune["total_iops_sec"].(int)),
WriteBytesSec: uint64(iotune["write_bytes_sec"].(int)),
WriteBytesSecMax: uint64(iotune["write_bytes_sec_max"].(int)),
WriteIOPSSec: uint64(iotune["write_iops_sec"].(int)),
WriteIOPSSecMax: uint64(iotune["write_iops_sec_max"].(int)),
}
return rets
if _, ok := iotune["total_iops_sec"]; ok {
req.IOPS = uint64(iotune["total_iops_sec"].(int))
} else if _, ok := d.GetOk("iops"); ok {
req.IOPS = uint64(d.Get("iops").(int))
}
_, err := c.CloudBroker().Disks().LimitIO(ctx, req)
return err
}
func resourceDiskChangeShareable(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
diskId := uint64(d.Get("disk_id").(int))
shareable := d.Get("shareable").(bool)
if shareable {
_, err := c.CloudBroker().Disks().Share(ctx, disks.ShareRequest{DiskID: diskId})
return err
}
_, err := c.CloudBroker().Disks().Unshare(ctx, disks.UnshareRequest{DiskID: diskId})
return err
}
func resourceDiskRestore(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
req := disks.RestoreRequest{
DiskID: uint64(d.Get("disk_id").(int)),
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
} else {
req.Reason = "Terraform automatic restore"
}
_, err := c.CloudBroker().Disks().Restore(ctx, req)
return err
}
func resourceDiskChangeDiskName(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
_, err := c.CloudBroker().Disks().Rename(ctx, disks.RenameRequest{
DiskID: uint64(d.Get("disk_id").(int)),
Name: d.Get("disk_name").(string),
})
return err
}
func resourceDiskChangeSize(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
_, err := c.CloudBroker().Disks().Resize2(ctx, disks.ResizeRequest{
DiskID: uint64(d.Get("disk_id").(int)),
Size: uint64(d.Get("size_max").(int)),
})
return err
}
func ResourceDisk() *schema.Resource {

View File

@@ -2,6 +2,7 @@ package disks
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
@@ -12,35 +13,21 @@ import (
)
func resourceDiskSnapshotCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
c := m.(*controller.ControllerCfg)
log.Debugf("resourceDiskSnapshotCreate: call for disk_id %d, label %s",
d.Get("disk_id").(int),
d.Get("label").(string))
disk, err := utilityDiskCheckPresence(ctx, d, m)
_, err := utilityDiskSnapshotCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
snapshots := disk.Snapshots
snapshot := disks.ItemSnapshot{}
diskId := uint64(d.Get("disk_id").(int))
label := d.Get("label").(string)
for _, sn := range snapshots {
if label == sn.Label {
snapshot = sn
break
}
}
if label != snapshot.Label {
return diag.Errorf("Snapshot with label \"%v\" not found", label)
}
d.SetId(fmt.Sprintf("%d#%s", diskId, label))
if rollback := d.Get("rollback").(bool); rollback {
req := disks.SnapshotRollbackRequest{
DiskID: disk.ID,
Label: label,
TimeStamp: uint64(d.Get("timestamp").(int)),
}
log.Debugf("resourceDiskCreate: Snapshot rollback with label", label)
_, err := c.CloudBroker().Disks().SnapshotRollback(ctx, req)
err := resourceDiskSnapshotChangeRollback(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
@@ -49,61 +36,30 @@ func resourceDiskSnapshotCreate(ctx context.Context, d *schema.ResourceData, m i
}
func resourceDiskSnapshotRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
disk, err := utilityDiskCheckPresence(ctx, d, m)
log.Debugf("resourceDiskSnapshotRead: snapshot id %s", d.Id())
snapshot, err := utilityDiskSnapshotCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
snapshots := disk.Snapshots
snapshot := disks.ItemSnapshot{}
label := d.Get("label").(string)
for _, sn := range snapshots {
if label == sn.Label {
snapshot = sn
break
}
}
if label != snapshot.Label {
return diag.Errorf("Snapshot with label \"%v\" not found", label)
}
flattenDiskSnapshot(d, snapshot)
return nil
}
func resourceDiskSnapshotUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
c := m.(*controller.ControllerCfg)
disk, err := utilityDiskCheckPresence(ctx, d, m)
log.Debugf("resourceDiskSnapshotUpdate: snapshot id %s", d.Id())
_, err := utilityDiskSnapshotCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
snapshots := disk.Snapshots
snapshot := disks.ItemSnapshot{}
label := d.Get("label").(string)
for _, sn := range snapshots {
if label == sn.Label {
snapshot = sn
break
}
}
if label != snapshot.Label {
return diag.Errorf("Snapshot with label \"%v\" not found", label)
}
if d.HasChange("rollback") && d.Get("rollback").(bool) == true {
req := disks.SnapshotRollbackRequest{
DiskID: disk.ID,
Label: label,
TimeStamp: uint64(d.Get("timestamp").(int)),
}
log.Debugf("resourceDiskUpdtae: Snapshot rollback with label", label)
_, err := c.CloudBroker().Disks().SnapshotRollback(ctx, req)
if d.HasChange("rollback") {
err := resourceDiskSnapshotChangeRollback(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
@@ -113,16 +69,18 @@ func resourceDiskSnapshotUpdate(ctx context.Context, d *schema.ResourceData, m i
}
func resourceDiskSnapshotDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
c := m.(*controller.ControllerCfg)
log.Debugf("resourceDiskSnapshotDelete: snapshot id %s", d.Id())
disk, err := utilityDiskCheckPresence(ctx, d, m)
_, err := utilityDiskSnapshotCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
c := m.(*controller.ControllerCfg)
req := disks.SnapshotDeleteRequest{
DiskID: disk.ID,
DiskID: uint64(d.Get("disk_id").(int)),
Label: d.Get("label").(string),
}
@@ -136,6 +94,31 @@ func resourceDiskSnapshotDelete(ctx context.Context, d *schema.ResourceData, m i
return nil
}
func resourceDiskSnapshotChangeRollback(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
rollback := d.Get("rollback").(bool)
if rollback {
label := d.Get("label").(string)
timestamp := uint64(d.Get("timestamp").(int))
diskId := uint64(d.Get("disk_id").(int))
req := disks.SnapshotRollbackRequest{
DiskID: diskId,
Label: label,
TimeStamp: timestamp,
}
log.Debugf("resourceDiskUpdate: Snapshot rollback with label %s", label)
if _, err := c.CloudBroker().Disks().SnapshotRollback(ctx, req); err != nil {
return err
}
}
return nil
}
func ResourceDiskSnapshot() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
@@ -160,56 +143,3 @@ func ResourceDiskSnapshot() *schema.Resource {
Schema: resourceDiskSnapshotSchemaMake(),
}
}
func resourceDiskSnapshotSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"disk_id": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
Description: "The unique ID of the subscriber-owner of the disk",
},
"label": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
Description: "Name of the snapshot",
},
"rollback": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "Needed in order to make a snapshot rollback",
},
"timestamp": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "Snapshot time",
},
"guid": {
Type: schema.TypeString,
Computed: true,
Description: "ID of the snapshot",
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
Description: "Reference to the snapshot",
},
"snap_set_guid": {
Type: schema.TypeString,
Computed: true,
Description: "The set snapshot ID",
},
"snap_set_time": {
Type: schema.TypeInt,
Computed: true,
Description: "The set time of the snapshot",
},
}
}

View File

@@ -75,7 +75,7 @@ func utilityDiskListDeletedCheckPresence(ctx context.Context, d *schema.Resource
req.Size = uint64(size.(int))
}
log.Debugf("utilityDiskListDeletedCheckPresence: load disk list")
log.Debugf("utilityDiskListDeletedCheckPresence: load disk list deleted")
diskList, err := c.CloudBroker().Disks().ListDeleted(ctx, req)
if err != nil {
return nil, err

View File

@@ -15,6 +15,13 @@ func utilityDiskListTypesCheckPresence(ctx context.Context, d *schema.ResourceDa
Detailed: false,
}
if page, ok := d.GetOk("page"); ok {
req.Page = uint64(page.(int))
}
if size, ok := d.GetOk("size"); ok {
req.Size = uint64(size.(int))
}
log.Debugf("utilityDiskListTypesCheckPresence: load disk list Types Detailed")
typesList, err := c.CloudBroker().Disks().ListTypes(ctx, req)
if err != nil {

View File

@@ -12,10 +12,18 @@ import (
func utilityDiskListTypesDetailedCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*disks.ListTypes, error) {
c := m.(*controller.ControllerCfg)
log.Debugf("utilityDiskListTypesDetailedCheckPresence: load disk list Types Detailed")
listTypesDetailed, err := c.CloudBroker().Disks().ListTypes(ctx, disks.ListTypesRequest{
req := disks.ListTypesRequest{
Detailed: true,
})
}
if page, ok := d.GetOk("page"); ok {
req.Page = uint64(page.(int))
}
if size, ok := d.GetOk("size"); ok {
req.Size = uint64(size.(int))
}
log.Debugf("utilityDiskListTypesDetailedCheckPresence: load disk list Types Detailed")
listTypesDetailed, err := c.CloudBroker().Disks().ListTypes(ctx, req)
log.Debugf("%+v", listTypesDetailed.Data[0].(map[string]interface{}))

View File

@@ -28,7 +28,7 @@ func utilityDiskListUnattachedCheckPresence(ctx context.Context, d *schema.Resou
if diskType, ok := d.GetOk("type"); ok {
req.Type = diskType.(string)
}
if accountId, ok := d.GetOk("accountId"); ok {
if accountId, ok := d.GetOk("account_id"); ok {
req.AccountID = uint64(accountId.(int))
}
if sepId, ok := d.GetOk("sep_id"); ok {