4.5.0-alpha

This commit is contained in:
Nikita Sorokin
2023-11-07 18:26:09 +03:00
parent 2453a32d01
commit 2bc0fbae9a
198 changed files with 18877 additions and 4003 deletions

View File

@@ -79,6 +79,22 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"computes": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"compute_id": {
Type: schema.TypeString,
Computed: true,
},
"compute_name": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
@@ -222,6 +238,13 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"present_to": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"purge_attempts": {
Type: schema.TypeInt,
Computed: true,
@@ -258,6 +281,10 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"shareable": {
Type: schema.TypeString,
Computed: true,
},
"size_max": {
Type: schema.TypeInt,
Computed: true,
@@ -279,6 +306,10 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,

View File

@@ -49,12 +49,43 @@ func dataSourceDiskListRead(ctx context.Context, d *schema.ResourceData, m inter
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenDiskList(diskList))
d.Set("entry_count", diskList.EntryCount)
return nil
}
func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"by_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by ID",
},
"name": {
Type: schema.TypeString,
Optional: true,
Description: "Find by name",
},
"account_name": {
Type: schema.TypeString,
Optional: true,
Description: "Find by account name",
},
"disk_max_size": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by max disk size",
},
"status": {
Type: schema.TypeString,
Optional: true,
Description: "Find by status",
},
"shared": {
Type: schema.TypeBool,
Optional: true,
Description: "Find by shared field",
},
"account_id": {
Type: schema.TypeInt,
Optional: true,
@@ -65,6 +96,16 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
Optional: true,
Description: "type of the disks",
},
"sep_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by sep id",
},
"pool": {
Type: schema.TypeString,
Optional: true,
Description: "Find by pool name",
},
"page": {
Type: schema.TypeInt,
Optional: true,
@@ -96,6 +137,22 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"computes": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"compute_id": {
Type: schema.TypeString,
Computed: true,
},
"compute_name": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
@@ -251,6 +308,13 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"present_to": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"purge_attempts": {
Type: schema.TypeInt,
Computed: true,
@@ -287,6 +351,10 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"shareable": {
Type: schema.TypeBool,
Computed: true,
},
"size_max": {
Type: schema.TypeInt,
Computed: true,
@@ -308,6 +376,10 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
@@ -346,6 +418,10 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
},
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
}
return res
}

View File

@@ -0,0 +1,56 @@
package disks
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceDiskListTypesRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
listTypes, err := utilityDiskListTypesCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", listTypes.Data)
d.Set("entry_count", listTypes.EntryCount)
return nil
}
func dataSourceDiskListTypesSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "The types of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'",
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
}
return res
}
func DataSourceDiskListTypes() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceDiskListTypesRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceDiskListTypesSchemaMake(),
}
}

View File

@@ -0,0 +1,84 @@
package disks
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceDiskListTypesDetailedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
listTypesDetailed, err := utilityDiskListTypesDetailedCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenDiskListTypesDetailed(listTypesDetailed))
d.Set("entry_count", listTypesDetailed.EntryCount)
return nil
}
func DataSourceDiskListTypesDetailed() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceDiskListTypesDetailedRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: map[string]*schema.Schema{
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"pools": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Computed: true,
Description: "Pool name",
},
"system": {
Type: schema.TypeString,
Computed: true,
},
"types": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "The types of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'",
},
},
},
},
"sep_id": {
Type: schema.TypeInt,
Computed: true,
Description: "Storage endpoint provider ID to create disk",
},
"sep_name": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
},
}
}

View File

@@ -0,0 +1,470 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Nikita Sorokin, <nesorokin@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package disks
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceDiskListUnattachedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
diskListUnattached, err := utilityDiskListUnattachedCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenDiskListUnattached(diskListUnattached))
d.Set("entry_count", diskListUnattached.EntryCount)
return nil
}
func DataSourceDiskListUnattached() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceDiskListUnattachedRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceDiskListUnattachedSchemaMake(),
}
}
func dataSourceDiskListUnattachedSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"by_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by ID",
},
"account_name": {
Type: schema.TypeString,
Optional: true,
Description: "Find by account name",
},
"disk_max_size": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by max disk size",
},
"status": {
Type: schema.TypeString,
Optional: true,
Description: "Find by status",
},
"type": {
Type: schema.TypeString,
Optional: true,
Description: "type of the disks",
},
"account_id": {
Type: schema.TypeInt,
Optional: true,
Description: "ID of the account the disks belong to",
},
"sep_id": {
Type: schema.TypeInt,
Optional: true,
Description: "ID of SEP",
},
"pool": {
Type: schema.TypeString,
Optional: true,
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "Page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "Page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"_ckey": {
Type: schema.TypeString,
Computed: true,
Description: "CKey",
},
"_meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "Meta parameters",
},
"account_id": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the account the disks belong to",
},
"account_name": {
Type: schema.TypeString,
Computed: true,
Description: "The name of the subscriber '(account') to whom this disk belongs",
},
"acl": {
Type: schema.TypeString,
Computed: true,
},
"boot_partition": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of disk partitions",
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Created time",
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Deleted time",
},
"desc": {
Type: schema.TypeString,
Computed: true,
Description: "Description of disk",
},
"destruction_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Time of final deletion",
},
"disk_path": {
Type: schema.TypeString,
Computed: true,
Description: "Disk path",
},
"gid": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the grid (platform)",
},
"guid": {
Type: schema.TypeInt,
Computed: true,
Description: "Disk ID on the storage side",
},
"disk_id": {
Type: schema.TypeInt,
Computed: true,
Description: "The unique ID of the subscriber-owner of the disk",
},
"image_id": {
Type: schema.TypeInt,
Computed: true,
Description: "Image ID",
},
"images": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "IDs of images using the disk",
},
"iotune": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"read_bytes_sec": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of bytes to read per second",
},
"read_bytes_sec_max": {
Type: schema.TypeInt,
Computed: true,
Description: "Maximum number of bytes to read",
},
"read_iops_sec": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of io read operations per second",
},
"read_iops_sec_max": {
Type: schema.TypeInt,
Computed: true,
Description: "Maximum number of io read operations",
},
"size_iops_sec": {
Type: schema.TypeInt,
Computed: true,
Description: "Size of io operations",
},
"total_bytes_sec": {
Type: schema.TypeInt,
Computed: true,
Description: "Total size bytes per second",
},
"total_bytes_sec_max": {
Type: schema.TypeInt,
Computed: true,
Description: "Maximum total size of bytes per second",
},
"total_iops_sec": {
Type: schema.TypeInt,
Computed: true,
Description: "Total number of io operations per second",
},
"total_iops_sec_max": {
Type: schema.TypeInt,
Computed: true,
Description: "Maximum total number of io operations per second",
},
"write_bytes_sec": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of bytes to write per second",
},
"write_bytes_sec_max": {
Type: schema.TypeInt,
Computed: true,
Description: "Maximum number of bytes to write per second",
},
"write_iops_sec": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of write operations per second",
},
"write_iops_sec_max": {
Type: schema.TypeInt,
Computed: true,
Description: "Maximum number of write operations per second",
},
},
},
},
"iqn": {
Type: schema.TypeString,
Computed: true,
Description: "Disk IQN",
},
"login": {
Type: schema.TypeString,
Computed: true,
Description: "Login to access the disk",
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
Description: "Milestones",
},
"disk_name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of disk",
},
"order": {
Type: schema.TypeInt,
Computed: true,
Description: "Disk order",
},
"params": {
Type: schema.TypeString,
Computed: true,
Description: "Disk params",
},
"parent_id": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the parent disk",
},
"passwd": {
Type: schema.TypeString,
Computed: true,
Description: "Password to access the disk",
},
"pci_slot": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the pci slot to which the disk is connected",
},
"pool": {
Type: schema.TypeString,
Computed: true,
Description: "Pool for disk location",
},
"present_to": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"purge_attempts": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of deletion attempts",
},
"purge_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Time of the last deletion attempt",
},
"reality_device_number": {
Type: schema.TypeInt,
Computed: true,
Description: "Reality device number",
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
Description: "ID of the reference to the disk",
},
"res_id": {
Type: schema.TypeString,
Computed: true,
Description: "Resource ID",
},
"res_name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the resource",
},
"role": {
Type: schema.TypeString,
Computed: true,
Description: "Disk role",
},
"sep_id": {
Type: schema.TypeInt,
Computed: true,
Description: "Storage endpoint provider ID to create disk",
},
"shareable": {
Type: schema.TypeBool,
Computed: true,
},
"size_max": {
Type: schema.TypeInt,
Computed: true,
Description: "Size in GB",
},
"size_used": {
Type: schema.TypeFloat,
Computed: true,
Description: "Number of used space, in GB",
},
"snapshots": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"guid": {
Type: schema.TypeString,
Computed: true,
Description: "ID of the snapshot",
},
"label": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the snapshot",
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
Description: "Reference to the snapshot",
},
"snap_set_guid": {
Type: schema.TypeString,
Computed: true,
Description: "The set snapshot ID",
},
"snap_set_time": {
Type: schema.TypeInt,
Computed: true,
Description: "The set time of the snapshot",
},
"timestamp": {
Type: schema.TypeInt,
Computed: true,
Description: "Snapshot time",
},
},
},
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "Disk status",
},
"tech_status": {
Type: schema.TypeString,
Computed: true,
Description: "Technical status of the disk",
},
"type": {
Type: schema.TypeString,
Computed: true,
Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'",
},
"vmid": {
Type: schema.TypeInt,
Computed: true,
Description: "Virtual Machine ID (Deprecated)",
},
},
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
}
return res
}

View File

@@ -0,0 +1,129 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package disks
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceDiskSnapshotRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
disk, err := utilityDiskCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
var snapshot disks.ItemSnapshot
label := d.Get("label").(string)
for _, sn := range disk.Snapshots {
if label == sn.Label {
snapshot = sn
break
}
}
if label != snapshot.Label {
return diag.Errorf("Snapshot with label \"%v\" not found", label)
}
id := uuid.New()
d.SetId(id.String())
flattenDiskSnapshot(d, snapshot)
return nil
}
func DataSourceDiskSnapshot() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceDiskSnapshotRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceDiskSnapshotSchemaMake(),
}
}
func dataSourceDiskSnapshotSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"disk_id": {
Type: schema.TypeInt,
Required: true,
Description: "The unique ID of the subscriber-owner of the disk",
},
"label": {
Type: schema.TypeString,
Required: true,
Description: "Name of the snapshot",
},
"guid": {
Type: schema.TypeString,
Computed: true,
Description: "ID of the snapshot",
},
"timestamp": {
Type: schema.TypeInt,
Computed: true,
Description: "Snapshot time",
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
Description: "Reference to the snapshot",
},
"snap_set_guid": {
Type: schema.TypeString,
Computed: true,
Description: "The set snapshot ID",
},
"snap_set_time": {
Type: schema.TypeInt,
Computed: true,
Description: "The set time of the snapshot",
},
}
return rets
}

View File

@@ -0,0 +1,122 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package disks
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceDiskSnapshotListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
disk, err := utilityDiskCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenDiskSnapshotList(disk.Snapshots))
return nil
}
func DataSourceDiskSnapshotList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceDiskSnapshotListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceDiskSnapshotListSchemaMake(),
}
}
func dataSourceDiskSnapshotListSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"disk_id": {
Type: schema.TypeInt,
Required: true,
Description: "The unique ID of the subscriber-owner of the disk",
},
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"label": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the snapshot",
},
"guid": {
Type: schema.TypeString,
Computed: true,
Description: "ID of the snapshot",
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"timestamp": {
Type: schema.TypeInt,
Computed: true,
Description: "Snapshot time",
},
"res_id": {
Type: schema.TypeString,
Computed: true,
Description: "Reference to the snapshot",
},
"snap_set_guid": {
Type: schema.TypeString,
Computed: true,
Description: "The set snapshot ID",
},
"snap_set_time": {
Type: schema.TypeInt,
Computed: true,
Description: "The set time of the snapshot",
},
},
},
},
}
return rets
}

View File

@@ -5,6 +5,7 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens"
)
func flattenDisk(d *schema.ResourceData, disk *disks.RecordDisk) {
@@ -14,6 +15,7 @@ func flattenDisk(d *schema.ResourceData, disk *disks.RecordDisk) {
d.Set("account_name", disk.AccountName)
d.Set("acl", string(diskAcl))
d.Set("boot_partition", disk.BootPartition)
d.Set("computes", flattenDiskComputes(disk.Computes))
d.Set("created_time", disk.CreatedTime)
d.Set("deleted_time", disk.DeletedTime)
d.Set("desc", disk.Description)
@@ -37,6 +39,7 @@ func flattenDisk(d *schema.ResourceData, disk *disks.RecordDisk) {
d.Set("pci_slot", disk.PCISlot)
d.Set("pool", disk.Pool)
d.Set("purge_attempts", disk.PurgeAttempts)
d.Set("present_to", disk.PresentTo)
d.Set("purge_time", disk.PurgeTime)
d.Set("reality_device_number", disk.RealityDeviceNumber)
d.Set("reference_id", disk.ReferenceID)
@@ -45,6 +48,7 @@ func flattenDisk(d *schema.ResourceData, disk *disks.RecordDisk) {
d.Set("role", disk.Role)
d.Set("sep_id", disk.SEPID)
d.Set("sep_type", disk.SEPType)
d.Set("shareable", disk.Shareable)
d.Set("size_max", disk.SizeMax)
d.Set("size_used", disk.SizeUsed)
d.Set("snapshots", flattendDiskSnapshotList(disk.Snapshots))
@@ -54,6 +58,27 @@ func flattenDisk(d *schema.ResourceData, disk *disks.RecordDisk) {
d.Set("vmid", disk.VMID)
}
func flattenDiskSnapshot(d *schema.ResourceData, snapshot disks.ItemSnapshot) {
d.Set("timestamp", snapshot.Timestamp)
d.Set("guid", snapshot.GUID)
d.Set("reference_id", snapshot.ReferenceID)
d.Set("res_id", snapshot.ResID)
d.Set("snap_set_guid", snapshot.SnapSetGUID)
d.Set("snap_set_time", snapshot.SnapSetTime)
}
func flattenDiskComputes(computes map[string]string) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(computes))
for key, val := range computes {
tmp := map[string]interface{}{
"compute_id": key,
"compute_name": val,
}
res = append(res, tmp)
}
return res
}
func flattenIOTune(iot disks.IOTune) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
@@ -140,6 +165,7 @@ func flattendDiskSnapshotList(sl disks.ListSnapshots) []interface{} {
temp := map[string]interface{}{
"guid": snapshot.GUID,
"label": snapshot.Label,
"reference_id": snapshot.ReferenceID,
"res_id": snapshot.ResID,
"snap_set_guid": snapshot.SnapSetGUID,
"snap_set_time": snapshot.SnapSetTime,
@@ -151,3 +177,105 @@ func flattendDiskSnapshotList(sl disks.ListSnapshots) []interface{} {
return res
}
func flattenDiskListTypesDetailed(tld *disks.ListTypes) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, typeListDetailed := range tld.Data {
toMap := typeListDetailed.(map[string]interface{})
temp := map[string]interface{}{
"pools": flattenListTypesDetailedPools(toMap["pools"].([]interface{})),
"sep_id": toMap["sepId"].(float64),
"sep_name": toMap["sepName"].(string),
}
res = append(res, temp)
}
return res
}
func flattenListTypesDetailedPools(pools []interface{}) []interface{} {
res := make([]interface{}, 0)
for _, pool := range pools {
toMap := pool.(map[string]interface{})
temp := map[string]interface{}{
"name": toMap["name"].(string),
"system": toMap["system"].(string),
"types": toMap["types"].([]interface{}),
}
res = append(res, temp)
}
return res
}
func flattenDiskListUnattached(ul *disks.ListUnattachedDisks) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, unattachedDisk := range ul.Data {
unattachedDiskAcl, _ := json.Marshal(unattachedDisk.ACL)
tmp := map[string]interface{}{
"_ckey": unattachedDisk.CKey,
"_meta": flattens.FlattenMeta(unattachedDisk.Meta),
"account_id": unattachedDisk.AccountID,
"account_name": unattachedDisk.AccountName,
"acl": string(unattachedDiskAcl),
"boot_partition": unattachedDisk.BootPartition,
"created_time": unattachedDisk.CreatedTime,
"deleted_time": unattachedDisk.DeletedTime,
"desc": unattachedDisk.Description,
"destruction_time": unattachedDisk.DestructionTime,
"disk_path": unattachedDisk.DiskPath,
"gid": unattachedDisk.GID,
"guid": unattachedDisk.GUID,
"disk_id": unattachedDisk.ID,
"image_id": unattachedDisk.ImageID,
"images": unattachedDisk.Images,
"iotune": flattenIOTune(unattachedDisk.IOTune),
"iqn": unattachedDisk.IQN,
"login": unattachedDisk.Login,
"milestones": unattachedDisk.Milestones,
"disk_name": unattachedDisk.Name,
"order": unattachedDisk.Order,
"params": unattachedDisk.Params,
"parent_id": unattachedDisk.ParentID,
"passwd": unattachedDisk.Password,
"pci_slot": unattachedDisk.PCISlot,
"pool": unattachedDisk.Pool,
"present_to": unattachedDisk.PresentTo,
"purge_attempts": unattachedDisk.PurgeAttempts,
"purge_time": unattachedDisk.PurgeTime,
"reality_device_number": unattachedDisk.RealityDeviceNumber,
"reference_id": unattachedDisk.ReferenceID,
"res_id": unattachedDisk.ResID,
"res_name": unattachedDisk.ResName,
"role": unattachedDisk.Role,
"sep_id": unattachedDisk.SEPID,
"shareable": unattachedDisk.Shareable,
"size_max": unattachedDisk.SizeMax,
"size_used": unattachedDisk.SizeUsed,
"snapshots": flattenDiskSnapshotList(unattachedDisk.Snapshots),
"status": unattachedDisk.Status,
"tech_status": unattachedDisk.TechStatus,
"type": unattachedDisk.Type,
"vmid": unattachedDisk.VMID,
}
res = append(res, tmp)
}
return res
}
func flattenDiskSnapshotList(sl disks.ListSnapshots) []interface{} {
res := make([]interface{}, 0)
for _, snapshot := range sl {
temp := map[string]interface{}{
"guid": snapshot.GUID,
"label": snapshot.Label,
"reference_id": snapshot.ReferenceID,
"res_id": snapshot.ResID,
"snap_set_guid": snapshot.SnapSetGUID,
"snap_set_time": snapshot.SnapSetTime,
"timestamp": snapshot.Timestamp,
}
res = append(res, temp)
}
return res
}

View File

@@ -0,0 +1,55 @@
package disks
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
)
func existAccountID(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
accountID := uint64(d.Get("account_id").(int))
accountList, err := c.CloudBroker().Account().List(ctx, account.ListRequest{})
if err != nil {
return err
}
if len(accountList.FilterByID(accountID).Data) == 0 {
return fmt.Errorf("resourceDiskCreate: can't create/update Disk because AccountID %d is not allowed or does not exist", accountID)
}
return nil
}
func existGID(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
gid := uint64(d.Get("gid").(int))
gidList, err := c.CloudBroker().Grid().List(ctx, grid.ListRequest{})
if err != nil {
return err
}
for _, elem := range gidList.Data {
if elem.GID == gid {
return nil
}
}
return fmt.Errorf("resourceDiskCreate: can't create/update Disk because GID %d is not allowed or does not exist", gid)
}
func checkParamsExists(ctx context.Context, d *schema.ResourceData, m interface{}) error {
err := existAccountID(ctx, d, m)
if err != nil {
return err
}
return existGID(ctx, d, m)
}

View File

@@ -35,129 +35,233 @@ import (
"context"
"fmt"
"strconv"
"strings"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
log "github.com/sirupsen/logrus"
)
func resourceDiskCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
c := m.(*controller.ControllerCfg)
req := disks.CreateRequest{}
req.AccountID = uint64(d.Get("account_id").(int))
req.GID = uint64(d.Get("gid").(int))
req.Name = d.Get("disk_name").(string)
req.Size = uint64(d.Get("size_max").(int))
if typeRaw, ok := d.GetOk("type"); ok {
req.Type = strings.ToUpper(typeRaw.(string))
} else {
req.Type = "D"
}
if sepId, ok := d.GetOk("sep_id"); ok {
req.SEPID = uint64(sepId.(int))
}
if poolName, ok := d.GetOk("pool"); ok {
req.Pool = poolName.(string)
}
argVal, argSet := d.GetOk("desc")
if argSet {
req.Description = argVal.(string)
}
diskId, err := c.CloudBroker().Disks().Create(ctx, req)
err := checkParamsExists(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
d.SetId(strconv.FormatUint(diskId, 10))
req := disks.CreateRequest{
AccountID: uint64(d.Get("account_id").(int)),
GID: uint64(d.Get("gid").(int)),
Name: d.Get("disk_name").(string),
Size: uint64(d.Get("size_max").(int)),
Type: d.Get("type").(string),
}
if desc, ok := d.GetOk("desc"); ok {
req.Description = desc.(string)
}
if ssdSize, ok := d.GetOk("ssd_size"); ok {
req.SSDSize = uint64(ssdSize.(int))
}
if sepID, ok := d.GetOk("sep_id"); ok {
req.SEPID = uint64(sepID.(int))
}
if pool, ok := d.GetOk("pool"); ok {
req.Pool = pool.(string)
}
diskID, err := c.CloudBroker().Disks().Create(ctx, req)
if err != nil {
return diag.FromErr(err)
}
d.SetId(strconv.FormatUint(diskID, 10))
w := dc.Warnings{}
if iotuneRaw, ok := d.GetOk("iotune"); ok {
iot := iotuneRaw.([]interface{})[0]
iotune := iot.(map[string]interface{})
req := disks.LimitIORequest{
DiskID: diskId,
DiskID: diskID,
IOPS: uint64(iotune["total_iops_sec"].(int)),
TotalBytesSec: uint64(iotune["total_bytes_sec"].(int)),
ReadBytesSec: uint64(iotune["read_bytes_sec"].(int)),
WriteBytesSec: uint64(iotune["write_bytes_sec"].(int)),
TotalIOPSSec: uint64(iotune["total_iops_sec"].(int)),
ReadIOPSSec: uint64(iotune["read_iops_sec"].(int)),
WriteIOPSSec: uint64(iotune["write_iops_sec"].(int)),
TotalBytesSecMax: uint64(iotune["total_bytes_sec_max"].(int)),
ReadBytesSecMax: uint64(iotune["read_bytes_sec_max"].(int)),
WriteBytesSecMax: uint64(iotune["write_bytes_sec_max"].(int)),
TotalIOPSSecMax: uint64(iotune["total_iops_sec_max"].(int)),
ReadIOPSSec: uint64(iotune["read_iops_sec"].(int)),
ReadIOPSSecMax: uint64(iotune["read_iops_sec_max"].(int)),
WriteIOPSSecMax: uint64(iotune["write_iops_sec_max"].(int)),
SizeIOPSSec: uint64(iotune["size_iops_sec"].(int)),
TotalBytesSec: uint64(iotune["total_bytes_sec"].(int)),
TotalBytesSecMax: uint64(iotune["total_bytes_sec_max"].(int)),
TotalIOPSSecMax: uint64(iotune["total_iops_sec_max"].(int)),
TotalIOPSSec: uint64(iotune["total_iops_sec"].(int)),
WriteBytesSec: uint64(iotune["write_bytes_sec"].(int)),
WriteBytesSecMax: uint64(iotune["write_bytes_sec_max"].(int)),
WriteIOPSSec: uint64(iotune["write_iops_sec"].(int)),
WriteIOPSSecMax: uint64(iotune["write_iops_sec_max"].(int)),
}
_, err := c.CloudBroker().Disks().LimitIO(ctx, req)
if err != nil {
return diag.FromErr(err)
w.Add(err)
}
}
return resourceDiskRead(ctx, d, m)
if shareable := d.Get("shareable"); shareable.(bool) {
_, err := c.CloudBroker().Disks().Share(ctx, disks.ShareRequest{
DiskID: diskID,
})
if err != nil {
w.Add(err)
}
}
return append(w.Get(), resourceDiskRead(ctx, d, m)...)
}
func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
c := m.(*controller.ControllerCfg)
w := dc.Warnings{}
disk, err := utilityDiskCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
hasChangeState := false
switch disk.Status {
case status.Destroyed, status.Purged:
d.Set("disk_id", 0)
d.SetId("")
return resourceDiskCreate(ctx, d, m)
case status.Deleted:
hasChangeState = true
req := disks.RestoreRequest{
DiskID: disk.ID,
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
} else {
req.Reason = "Terraform automatic restore"
}
_, err := c.CloudBroker().Disks().Restore(ctx, req)
if err != nil {
w.Add(err)
}
case status.Assigned:
case status.Modeled:
return diag.Errorf("The disk is in status: %s, please, contact support for more information", disk.Status)
case status.Creating:
case status.Created:
case status.Allocated:
case status.Unallocated:
}
if hasChangeState {
disk, err = utilityDiskCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
flattenDisk(d, disk)
return nil
return w.Get()
}
func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
c := m.(*controller.ControllerCfg)
diskID, _ := strconv.ParseUint(d.Id(), 10, 64)
w := dc.Warnings{}
err := checkParamsExists(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
disk, err := utilityDiskCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
hasChangeState := false
switch disk.Status {
case status.Destroyed, status.Purged:
d.Set("disk_id", 0)
d.SetId("")
return resourceDiskCreate(ctx, d, m)
case status.Deleted:
hasChangeState = true
req := disks.RestoreRequest{
DiskID: disk.ID,
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
} else {
req.Reason = "Terraform automatic restore"
}
_, err := c.CloudBroker().Disks().Restore(ctx, req)
if err != nil {
return diag.FromErr(err)
}
case status.Assigned:
case status.Modeled:
return diag.Errorf("The disk is in status: %s, please, contact support for more information", disk.Status)
case status.Creating:
case status.Created:
case status.Allocated:
case status.Unallocated:
}
if hasChangeState {
disk, err = utilityDiskCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
}
if d.HasChange("size_max") {
oldSize, newSize := d.GetChange("size_max")
if oldSize.(int) < newSize.(int) {
log.Debugf("resourceDiskUpdate: resizing disk ID %s - %d GB -> %d GB",
d.Id(), oldSize.(int), newSize.(int))
req := disks.ResizeRequest{
DiskID: diskID,
_, err := c.CloudBroker().Disks().Resize(ctx, disks.ResizeRequest{
DiskID: disk.ID,
Size: uint64(newSize.(int)),
}
_, err := c.CloudBroker().Disks().Resize(ctx, req)
})
if err != nil {
return diag.FromErr(err)
w.Add(err)
}
d.Set("size_max", newSize)
} else if oldSize.(int) > newSize.(int) {
return diag.FromErr(fmt.Errorf("resourceDiskUpdate: Disk ID %s - reducing disk size is not allowed", d.Id()))
}
}
if d.HasChange("disk_name") {
req := disks.RenameRequest{
DiskID: diskID,
_, err := c.CloudBroker().Disks().Rename(ctx, disks.RenameRequest{
DiskID: disk.ID,
Name: d.Get("disk_name").(string),
}
_, err := c.CloudBroker().Disks().Rename(ctx, req)
})
if err != nil {
return diag.FromErr(err)
w.Add(err)
}
}
@@ -165,44 +269,49 @@ func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface
iot := d.Get("iotune").([]interface{})[0]
iotune := iot.(map[string]interface{})
req := disks.LimitIORequest{
DiskID: diskID,
IOPS: uint64(iotune["total_iops_sec"].(int)),
TotalBytesSec: uint64(iotune["total_bytes_sec"].(int)),
ReadBytesSec: uint64(iotune["read_bytes_sec"].(int)),
WriteBytesSec: uint64(iotune["write_bytes_sec"].(int)),
TotalIOPSSec: uint64(iotune["total_iops_sec"].(int)),
ReadIOPSSec: uint64(iotune["read_iops_sec"].(int)),
WriteIOPSSec: uint64(iotune["write_iops_sec"].(int)),
TotalBytesSecMax: uint64(iotune["total_bytes_sec_max"].(int)),
ReadBytesSecMax: uint64(iotune["read_bytes_sec_max"].(int)),
WriteBytesSecMax: uint64(iotune["write_bytes_sec_max"].(int)),
TotalIOPSSecMax: uint64(iotune["total_iops_sec_max"].(int)),
ReadIOPSSec: uint64(iotune["read_iops_sec"].(int)),
ReadIOPSSecMax: uint64(iotune["read_iops_sec_max"].(int)),
WriteIOPSSecMax: uint64(iotune["write_iops_sec_max"].(int)),
SizeIOPSSec: uint64(iotune["size_iops_sec"].(int)),
TotalBytesSec: uint64(iotune["total_bytes_sec"].(int)),
TotalBytesSecMax: uint64(iotune["total_bytes_sec_max"].(int)),
TotalIOPSSecMax: uint64(iotune["total_iops_sec_max"].(int)),
TotalIOPSSec: uint64(iotune["total_iops_sec"].(int)),
WriteBytesSec: uint64(iotune["write_bytes_sec"].(int)),
WriteBytesSecMax: uint64(iotune["write_bytes_sec_max"].(int)),
WriteIOPSSec: uint64(iotune["write_iops_sec"].(int)),
WriteIOPSSecMax: uint64(iotune["write_iops_sec_max"].(int)),
}
_, err := c.CloudBroker().Disks().LimitIO(ctx, req)
if err != nil {
return diag.FromErr(err)
w.Add(err)
}
}
if d.HasChange("restore") {
if d.Get("restore").(bool) {
req := disks.RestoreRequest{
DiskID: diskID,
Reason: d.Get("reason").(string),
}
_, err := c.CloudBroker().Disks().Restore(ctx, req)
if d.HasChange("shareable") {
old, new := d.GetChange("shareable")
if !old.(bool) && new.(bool) && !disk.Shareable {
_, err := c.CloudBroker().Disks().Share(ctx, disks.ShareRequest{
DiskID: disk.ID,
})
if err != nil {
return diag.FromErr(err)
w.Add(err)
}
}
if old.(bool) && !new.(bool) && disk.Shareable {
_, err := c.CloudBroker().Disks().Unshare(ctx, disks.UnshareRequest{
DiskID: disk.ID,
})
if err != nil {
w.Add(err)
}
}
}
return resourceDiskRead(ctx, d, m)
return append(w.Get(), resourceDiskRead(ctx, d, m)...)
}
func resourceDiskDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
@@ -210,6 +319,7 @@ func resourceDiskDelete(ctx context.Context, d *schema.ResourceData, m interface
disk, err := utilityDiskCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
@@ -225,6 +335,8 @@ func resourceDiskDelete(ctx context.Context, d *schema.ResourceData, m interface
return diag.FromErr(err)
}
d.SetId("")
return nil
}
@@ -233,41 +345,46 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
"account_id": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
},
"gid": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
},
"disk_name": {
Type: schema.TypeString,
Required: true,
},
"size_max": {
Type: schema.TypeInt,
Required: true,
},
"gid": {
Type: schema.TypeInt,
Required: true,
},
"pool": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"sep_id": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
"type": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringInSlice([]string{"D", "B", "T"}, false),
Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'",
},
"desc": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"type": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ValidateFunc: validation.StringInSlice([]string{"D", "B", "T"}, false),
"size_max": {
Type: schema.TypeInt,
Required: true,
},
"ssd_size": {
Type: schema.TypeInt,
Optional: true,
},
"sep_id": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"pool": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"detach": {
Type: schema.TypeBool,
Optional: true,
@@ -286,17 +403,17 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
Default: "",
Description: "reason for an action",
},
"shareable": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
},
"restore": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "restore deleting disk",
},
"disk_id": {
Type: schema.TypeInt,
Computed: true,
},
"account_name": {
Type: schema.TypeString,
Computed: true,
@@ -309,6 +426,22 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"computes": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"compute_id": {
Type: schema.TypeString,
Computed: true,
},
"compute_name": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
@@ -333,6 +466,10 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"disk_id": {
Type: schema.TypeInt,
Computed: true,
},
"image_id": {
Type: schema.TypeInt,
Computed: true,
@@ -452,7 +589,13 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
Type: schema.TypeInt,
Computed: true,
},
"present_to": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"purge_attempts": {
Type: schema.TypeInt,
Computed: true,
@@ -481,7 +624,6 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"sep_type": {
Type: schema.TypeString,
Computed: true,
@@ -503,6 +645,10 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
Type: schema.TypeString,
Computed: true,
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,

View File

@@ -0,0 +1,215 @@
package disks
import (
"context"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
)
func resourceDiskSnapshotCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
c := m.(*controller.ControllerCfg)
disk, err := utilityDiskCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
snapshots := disk.Snapshots
snapshot := disks.ItemSnapshot{}
label := d.Get("label").(string)
for _, sn := range snapshots {
if label == sn.Label {
snapshot = sn
break
}
}
if label != snapshot.Label {
return diag.Errorf("Snapshot with label \"%v\" not found", label)
}
if rollback := d.Get("rollback").(bool); rollback {
req := disks.SnapshotRollbackRequest{
DiskID: disk.ID,
Label: label,
TimeStamp: uint64(d.Get("timestamp").(int)),
}
log.Debugf("resourceDiskCreate: Snapshot rollback with label", label)
_, err := c.CloudBroker().Disks().SnapshotRollback(ctx, req)
if err != nil {
return diag.FromErr(err)
}
}
return resourceDiskSnapshotRead(ctx, d, m)
}
func resourceDiskSnapshotRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
disk, err := utilityDiskCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
snapshots := disk.Snapshots
snapshot := disks.ItemSnapshot{}
label := d.Get("label").(string)
for _, sn := range snapshots {
if label == sn.Label {
snapshot = sn
break
}
}
if label != snapshot.Label {
return diag.Errorf("Snapshot with label \"%v\" not found", label)
}
flattenDiskSnapshot(d, snapshot)
return nil
}
func resourceDiskSnapshotUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
c := m.(*controller.ControllerCfg)
disk, err := utilityDiskCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
snapshots := disk.Snapshots
snapshot := disks.ItemSnapshot{}
label := d.Get("label").(string)
for _, sn := range snapshots {
if label == sn.Label {
snapshot = sn
break
}
}
if label != snapshot.Label {
return diag.Errorf("Snapshot with label \"%v\" not found", label)
}
if d.HasChange("rollback") && d.Get("rollback").(bool) == true {
req := disks.SnapshotRollbackRequest{
DiskID: disk.ID,
Label: label,
TimeStamp: uint64(d.Get("timestamp").(int)),
}
log.Debugf("resourceDiskUpdtae: Snapshot rollback with label", label)
_, err := c.CloudBroker().Disks().SnapshotRollback(ctx, req)
if err != nil {
return diag.FromErr(err)
}
}
return resourceDiskSnapshotRead(ctx, d, m)
}
func resourceDiskSnapshotDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
c := m.(*controller.ControllerCfg)
disk, err := utilityDiskCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
req := disks.SnapshotDeleteRequest{
DiskID: disk.ID,
Label: d.Get("label").(string),
}
_, err = c.CloudBroker().Disks().SnapshotDelete(ctx, req)
if err != nil {
return diag.FromErr(err)
}
d.SetId("")
return nil
}
func ResourceDiskSnapshot() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
CreateContext: resourceDiskSnapshotCreate,
ReadContext: resourceDiskSnapshotRead,
UpdateContext: resourceDiskSnapshotUpdate,
DeleteContext: resourceDiskSnapshotDelete,
Importer: &schema.ResourceImporter{
StateContext: schema.ImportStatePassthroughContext,
},
Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout600s,
Read: &constants.Timeout300s,
Update: &constants.Timeout300s,
Delete: &constants.Timeout300s,
Default: &constants.Timeout300s,
},
Schema: resourceDiskSnapshotSchemaMake(),
}
}
func resourceDiskSnapshotSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"disk_id": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
Description: "The unique ID of the subscriber-owner of the disk",
},
"label": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
Description: "Name of the snapshot",
},
"rollback": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "Needed in order to make a snapshot rollback",
},
"timestamp": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "Snapshot time",
},
"guid": {
Type: schema.TypeString,
Computed: true,
Description: "ID of the snapshot",
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
Description: "Reference to the snapshot",
},
"snap_set_guid": {
Type: schema.TypeString,
Computed: true,
Description: "The set snapshot ID",
},
"snap_set_time": {
Type: schema.TypeInt,
Computed: true,
Description: "The set time of the snapshot",
},
}
}

View File

@@ -46,9 +46,9 @@ func utilityDiskCheckPresence(ctx context.Context, d *schema.ResourceData, m int
c := m.(*controller.ControllerCfg)
req := disks.GetRequest{}
if d.Get("disk_id").(int) == 0 {
id, _ := strconv.ParseUint(d.Id(), 10, 64)
req.DiskID = id
if d.Id() != "" {
diskID, _ := strconv.ParseUint(d.Id(), 10, 64)
req.DiskID = diskID
} else {
req.DiskID = uint64(d.Get("disk_id").(int))
}

View File

@@ -46,18 +46,42 @@ func utilityDiskListCheckPresence(ctx context.Context, d *schema.ResourceData, m
c := m.(*controller.ControllerCfg)
req := disks.ListRequest{}
if by_id, ok := d.GetOk("by_id"); ok {
req.ByID = uint64(by_id.(int))
}
if name, ok := d.GetOk("name"); ok {
req.Name = name.(string)
}
if account_name, ok := d.GetOk("account_name"); ok {
req.AccountName = account_name.(string)
}
if disk_max_size, ok := d.GetOk("disk_max_size"); ok {
req.DiskMaxSize = int64(disk_max_size.(int))
}
if status, ok := d.GetOk("status"); ok {
req.Status = status.(string)
}
if shared, ok := d.GetOk("shared"); ok {
req.Shared = shared.(bool)
}
if accountId, ok := d.GetOk("account_id"); ok {
req.AccountID = uint64(accountId.(int))
}
if diskType, ok := d.GetOk("type"); ok {
req.Type = strings.ToUpper(diskType.(string))
}
if pool, ok := d.GetOk("pool"); ok {
req.Pool = pool.(string)
}
if sepID, ok := d.GetOk("sep_id"); ok {
req.SEPID = uint64(sepID.(int))
}
if page, ok := d.GetOk("page"); ok {
req.Page = uint64(page.(int))
}
if size, ok := d.GetOk("size"); ok {
req.Size = uint64(size.(int))
}
if diskType, ok := d.GetOk("type"); ok {
req.Type = strings.ToUpper(diskType.(string))
}
if accountId, ok := d.GetOk("accountId"); ok {
req.AccountID = uint64(accountId.(int))
}
log.Debugf("utilityDiskListCheckPresence: load disk list")
diskList, err := c.CloudBroker().Disks().List(ctx, req)

View File

@@ -0,0 +1,85 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Tim Tkachev, <tvtkachev@basistech.ru>
Nikita Sorokin, <nesorokin@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package disks
import (
"context"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
)
func utilityDiskListDeletedCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*disks.ListDisks, error) {
c := m.(*controller.ControllerCfg)
req := disks.ListDeletedRequest{}
if by_id, ok := d.GetOk("by_id"); ok {
req.ByID = uint64(by_id.(int))
}
if name, ok := d.GetOk("name"); ok {
req.Name = name.(string)
}
if account_name, ok := d.GetOk("account_name"); ok {
req.AccountName = account_name.(string)
}
if disk_max_size, ok := d.GetOk("disk_max_size"); ok {
req.DiskMaxSize = int64(disk_max_size.(int))
}
if shared, ok := d.GetOk("shared"); ok {
req.Shared = shared.(bool)
}
if account_id, ok := d.GetOk("account_id"); ok {
req.AccountID = uint64(account_id.(int))
}
if typev, ok := d.GetOk("type"); ok {
req.Type = typev.(string)
}
if page, ok := d.GetOk("page"); ok {
req.Page = uint64(page.(int))
}
if size, ok := d.GetOk("size"); ok {
req.Size = uint64(size.(int))
}
log.Debugf("utilityDiskListDeletedCheckPresence: load disk list")
diskList, err := c.CloudBroker().Disks().ListDeleted(ctx, req)
if err != nil {
return nil, err
}
return diskList, nil
}

View File

@@ -0,0 +1,25 @@
package disks
import (
"context"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
)
func utilityDiskListTypesCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*disks.ListTypes, error) {
c := m.(*controller.ControllerCfg)
req := disks.ListTypesRequest{
Detailed: false,
}
log.Debugf("utilityDiskListTypesCheckPresence: load disk list Types Detailed")
typesList, err := c.CloudBroker().Disks().ListTypes(ctx, req)
if err != nil {
return nil, err
}
return typesList, nil
}

View File

@@ -0,0 +1,27 @@
package disks
import (
"context"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
)
func utilityDiskListTypesDetailedCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*disks.ListTypes, error) {
c := m.(*controller.ControllerCfg)
log.Debugf("utilityDiskListTypesDetailedCheckPresence: load disk list Types Detailed")
listTypesDetailed, err := c.CloudBroker().Disks().ListTypes(ctx, disks.ListTypesRequest{
Detailed: true,
})
log.Debugf("%+v", listTypesDetailed.Data[0].(map[string]interface{}))
if err != nil {
return nil, err
}
return listTypesDetailed, nil
}

View File

@@ -0,0 +1,54 @@
package disks
import (
"context"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
)
func utilityDiskListUnattachedCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*disks.ListUnattachedDisks, error) {
c := m.(*controller.ControllerCfg)
req := disks.ListUnattachedRequest{}
if by_id, ok := d.GetOk("by_id"); ok {
req.ByID = uint64(by_id.(int))
}
if account_name, ok := d.GetOk("account_name"); ok {
req.AccountName = account_name.(string)
}
if disk_max_size, ok := d.GetOk("disk_max_size"); ok {
req.DiskMaxSize = int64(disk_max_size.(int))
}
if status, ok := d.GetOk("status"); ok {
req.Status = status.(string)
}
if diskType, ok := d.GetOk("type"); ok {
req.Type = diskType.(string)
}
if accountId, ok := d.GetOk("accountId"); ok {
req.AccountID = uint64(accountId.(int))
}
if sepId, ok := d.GetOk("sep_id"); ok {
req.SEPID = uint64(sepId.(int))
}
if pool, ok := d.GetOk("pool"); ok {
req.Pool = pool.(string)
}
if page, ok := d.GetOk("page"); ok {
req.Page = uint64(page.(int))
}
if size, ok := d.GetOk("size"); ok {
req.Size = uint64(size.(int))
}
log.Debugf("utilityDiskListUnattachedCheckPresence: load disk Unattached list")
unattachedList, err := c.CloudBroker().Disks().ListUnattached(ctx, req)
if err != nil {
return nil, err
}
return unattachedList, nil
}