From 31be0a0b5467ecb9ed3a4200b2129581af380f8d Mon Sep 17 00:00:00 2001 From: stSolo Date: Mon, 31 Oct 2022 14:06:22 +0300 Subject: [PATCH] v3.2.0 --- CHANGELOG.md | 37 +- Dockerfile | 10 + Makefile | 10 +- entrypoint.sh | 4 + internal/constants/timeouts.go | 2 + .../provider/cloudapi/data_sources_map.go | 6 + internal/provider/cloudapi/resource_map.go | 1 + .../cloudapi/account/resource_account.go | 10 +- .../cloudapi/bservice/resource_bservice.go | 10 +- .../bservice/resource_bservice_group.go | 10 +- internal/service/cloudapi/disks/api.go | 25 +- .../cloudapi/disks/data_source_disk.go | 299 ++++++---- .../cloudapi/disks/data_source_disk_list.go | 313 ++++++---- .../disks/data_source_disk_list_types.go | 82 +++ .../data_source_disk_list_types_detailed.go | 133 +++++ .../disks/data_source_disk_list_unattached.go | 485 +++++++++++++++ .../disks/data_source_disk_snapshot.go | 129 ++++ .../disks/data_source_disk_snapshot_list.go | 121 ++++ .../disks/data_source_list_deleted.go | 69 +++ internal/service/cloudapi/disks/models.go | 64 ++ .../service/cloudapi/disks/resource_disk.go | 420 +++++++------ .../cloudapi/disks/resource_disk_snapshot.go | 246 ++++++++ .../service/cloudapi/disks/utility_disk.go | 1 + .../cloudapi/disks/utility_disk_list.go | 5 +- .../disks/utility_disk_types_detailed_list.go | 62 ++ .../cloudapi/disks/utility_disk_types_list.go | 62 ++ .../service/cloudapi/image/resource_image.go | 10 +- .../cloudapi/image/resource_image_virtual.go | 10 +- internal/service/cloudapi/k8s/resource_k8s.go | 10 +- .../service/cloudapi/k8s/resource_k8s_wg.go | 10 +- internal/service/cloudapi/kvmvm/api.go | 33 +- .../cloudapi/kvmvm/data_source_compute.go | 48 +- internal/service/cloudapi/kvmvm/models.go | 1 + .../cloudapi/kvmvm/network_subresource.go | 1 + .../cloudapi/kvmvm/osusers_subresource.go | 1 + .../cloudapi/kvmvm/resource_compute.go | 561 ++++++++++++------ .../service/cloudapi/kvmvm/utility_compute.go | 32 +- internal/service/cloudapi/lb/resource_lb.go | 10 +- .../cloudapi/lb/resource_lb_backend.go | 10 +- .../cloudapi/lb/resource_lb_backend_server.go | 10 +- .../cloudapi/lb/resource_lb_frontend.go | 10 +- .../cloudapi/lb/resource_lb_frontend_bind.go | 10 +- internal/service/cloudapi/pfw/resource_pfw.go | 10 +- internal/service/cloudapi/rg/resource_rg.go | 10 +- .../cloudapi/snapshot/resource_snapshot.go | 10 +- internal/service/cloudapi/vins/api.go | 1 + .../cloudapi/vins/data_source_vins_list.go | 1 + .../service/cloudapi/vins/resource_vins.go | 10 +- internal/status/status.go | 32 + provider.tf | 9 + samples/README.md | 7 + .../cloudapi/data_disk_list_deleted/main.tf | 54 ++ samples/cloudapi/data_disk_list_types/main.tf | 39 ++ .../data_disk_list_types_detailed/main.tf | 52 ++ .../data_disk_list_unattached/main.tf | 39 ++ samples/cloudapi/data_disk_snapshot/main.tf | 44 ++ .../cloudapi/data_disk_snapshot_list/main.tf | 39 ++ .../cloudapi/resource_disk_snapshot/main.tf | 47 ++ samples/cloudapi/resource_kvmvm/main.tf | 44 ++ 59 files changed, 3121 insertions(+), 710 deletions(-) create mode 100644 Dockerfile create mode 100644 entrypoint.sh create mode 100644 internal/service/cloudapi/disks/data_source_disk_list_types.go create mode 100644 internal/service/cloudapi/disks/data_source_disk_list_types_detailed.go create mode 100644 internal/service/cloudapi/disks/data_source_disk_list_unattached.go create mode 100644 internal/service/cloudapi/disks/data_source_disk_snapshot.go create mode 100644 internal/service/cloudapi/disks/data_source_disk_snapshot_list.go create mode 100644 internal/service/cloudapi/disks/data_source_list_deleted.go create mode 100644 internal/service/cloudapi/disks/resource_disk_snapshot.go create mode 100644 internal/service/cloudapi/disks/utility_disk_types_detailed_list.go create mode 100644 internal/service/cloudapi/disks/utility_disk_types_list.go create mode 100644 internal/status/status.go create mode 100644 provider.tf create mode 100644 samples/cloudapi/data_disk_list_deleted/main.tf create mode 100644 samples/cloudapi/data_disk_list_types/main.tf create mode 100644 samples/cloudapi/data_disk_list_types_detailed/main.tf create mode 100644 samples/cloudapi/data_disk_list_unattached/main.tf create mode 100644 samples/cloudapi/data_disk_snapshot/main.tf create mode 100644 samples/cloudapi/data_disk_snapshot_list/main.tf create mode 100644 samples/cloudapi/resource_disk_snapshot/main.tf diff --git a/CHANGELOG.md b/CHANGELOG.md index 7a0c34c..2dba97b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,36 @@ -### Bug fixes +### New data sources -- error naming lb resources +- decort_disk_snapshot_list +- decort_snapshot +- decort_disk_list_deleted +- decort_disk_list_unattached +- decort_disk_list_types +- decort_disk_list_types_detailed + +### New resources + +- decort_disk_snapshot + +### New features + +- add dockerfile for creating an image for the tf provider +- change behaviour to disks: check the disk status during update the tf state +- add disk block to kvmvm resource + +### New articles on wiki + +- [Сборка terraform провайдера в образ](https://github.com/rudecs/terraform-provider-decort/wiki/04.05-Сборка-terraform-провайдера-в-образ) +- [Массовое создание ресурсов. Мета аргументы](https://github.com/rudecs/terraform-provider-decort/wiki/05.04-Массовое-создание-ресурсов.-Мета-аргументы) +- [Удаление ресурсов](https://github.com/rudecs/terraform-provider-decort/wiki/05.05-Удаление-ресурсов) +- [Управление снимком диска](https://github.com/rudecs/terraform-provider-decort/wiki/07.01.19-Resource-функция-decort_disk_snapshot-управление-снимком-диска) +- [Получение списка типов для диска](https://github.com/rudecs/terraform-provider-decort/wiki/06.01.39-Data-функция-decort_disk_list_types-получение-списка-типов-диска) +- [Расширенное получение списка поддерживаемых типов](https://github.com/rudecs/terraform-provider-decort/wiki/06.01.40-Data-функция-decort_disk_list_types_detailed-расширенное-получение-информации-о-поддерживаемых-типах-дисков) +- [Получение информации об удаленных дисках](https://github.com/rudecs/terraform-provider-decort/wiki/06.01.41-Data-функция-decort_disk_list_deleted-получение-информации-об-удаленных-дисках) +- [Получение информации о неподключенных дисках](https://github.com/rudecs/terraform-provider-decort/wiki/06.01.42-Data-функция-decort_disk_list_unattached-получение-информации-о-неподключенных-дисках) +- [Получение списка снимков состояния диска](https://github.com/rudecs/terraform-provider-decort/wiki/06.01.43-Data-функция-decort_disk_snapshot_list-получение-списка-снимков-состояния-диска) +- [Получение информацуии о снимке состояния диска](https://github.com/rudecs/terraform-provider-decort/wiki/06.01.44-Data-функция-decort_disk_snapshot-получение-информации-о-снимке-состояния) + +### Update articles + +- [Управление дисковыми ресурсами.](https://github.com/rudecs/terraform-provider-decort/wiki/07.01.03-Resource-функция-decort_disk-управление-дисковыми-ресурсами) +- [Управление виртуальными серверами, создаваемыми на базе системы виртуализации KVM](https://github.com/rudecs/terraform-provider-decort/wiki/07.01.01-Resource-функция-decort_kvmvm-управление-виртуальными-машинами-на-базе-KVM) diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..19231c6 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,10 @@ +FROM docker.io/hashicorp/terraform:latest + +WORKDIR /opt/decort/tf/ +COPY provider.tf ./ +COPY terraform-provider-decort ./terraform.d/plugins/digitalenergy.online/decort/decort/3.1.1/linux_amd64/ +RUN terraform init + +WORKDIR /tf +COPY entrypoint.sh / +ENTRYPOINT ["/entrypoint.sh", "/bin/terraform"] diff --git a/Makefile b/Makefile index f445734..e067b0b 100644 --- a/Makefile +++ b/Makefile @@ -6,12 +6,18 @@ NAME=terraform-provider-decort BINARY=${NAME}.exe WORKPATH= ./examples/terraform.d/plugins/${HOSTNAME}/${NAMESPACE}/${NAMESPACE}/${VERSION}/${OS_ARCH} MAINPATH = ./cmd/decort/ -VERSION=1.1 +VERSION=3.1.1 #OS_ARCH=darwin_amd64 -OS_ARCH=windows_amd64 +#OS_ARCH=windows_amd64 +#OS_ARCH=linux_amd64 default: install +image: + GOOS=linux GOARCH=amd64 go build -o terraform-provider-decort ./cmd/decort/ + docker build . -t rudecs/tf:3.1.1 + rm terraform-provider-decort + lint: golangci-lint run --timeout 600s diff --git a/entrypoint.sh b/entrypoint.sh new file mode 100644 index 0000000..cb01e56 --- /dev/null +++ b/entrypoint.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +cp -aL /opt/decort/tf/* /opt/decort/tf/.* ./ +exec "$@" diff --git a/internal/constants/timeouts.go b/internal/constants/timeouts.go index 71cc7d1..b8faaf9 100644 --- a/internal/constants/timeouts.go +++ b/internal/constants/timeouts.go @@ -25,4 +25,6 @@ import "time" var Timeout30s = time.Second * 30 var Timeout60s = time.Second * 60 var Timeout180s = time.Second * 180 +var Timeout300s = time.Second * 300 +var Timeout600s = time.Second * 600 var Timeout20m = time.Minute * 20 diff --git a/internal/provider/cloudapi/data_sources_map.go b/internal/provider/cloudapi/data_sources_map.go index 2b721c9..7b89c1a 100644 --- a/internal/provider/cloudapi/data_sources_map.go +++ b/internal/provider/cloudapi/data_sources_map.go @@ -44,6 +44,12 @@ func NewDataSourcesMap() map[string]*schema.Resource { "decort_disk": disks.DataSourceDisk(), "decort_disk_list": disks.DataSourceDiskList(), "decort_rg_list": rg.DataSourceRgList(), + "decort_disk_list_types_detailed": disks.DataSourceDiskListTypesDetailed(), + "decort_disk_list_types": disks.DataSourceDiskListTypes(), + "decort_disk_list_deleted": disks.DataSourceDiskListDeleted(), + "decort_disk_list_unattached": disks.DataSourceDiskListUnattached(), + "decort_disk_snapshot": disks.DataSourceDiskSnapshot(), + "decort_disk_snapshot_list": disks.DataSourceDiskSnapshotList(), "decort_account_list": account.DataSourceAccountList(), "decort_account_computes_list": account.DataSourceAccountComputesList(), "decort_account_disks_list": account.DataSourceAccountDisksList(), diff --git a/internal/provider/cloudapi/resource_map.go b/internal/provider/cloudapi/resource_map.go index d750a57..62bbbbf 100644 --- a/internal/provider/cloudapi/resource_map.go +++ b/internal/provider/cloudapi/resource_map.go @@ -39,6 +39,7 @@ func NewRersourcesMap() map[string]*schema.Resource { "decort_resgroup": rg.ResourceResgroup(), "decort_kvmvm": kvmvm.ResourceCompute(), "decort_disk": disks.ResourceDisk(), + "decort_disk_snapshot": disks.ResourceDiskSnapshot(), "decort_vins": vins.ResourceVins(), "decort_pfw": pfw.ResourcePfw(), "decort_k8s": k8s.ResourceK8s(), diff --git a/internal/service/cloudapi/account/resource_account.go b/internal/service/cloudapi/account/resource_account.go index 8149058..7e79c4f 100644 --- a/internal/service/cloudapi/account/resource_account.go +++ b/internal/service/cloudapi/account/resource_account.go @@ -790,11 +790,11 @@ func ResourceAccount() *schema.Resource { }, Timeouts: &schema.ResourceTimeout{ - Create: &constants.Timeout60s, - Read: &constants.Timeout30s, - Update: &constants.Timeout60s, - Delete: &constants.Timeout60s, - Default: &constants.Timeout60s, + Create: &constants.Timeout600s, + Read: &constants.Timeout300s, + Update: &constants.Timeout300s, + Delete: &constants.Timeout300s, + Default: &constants.Timeout300s, }, Schema: resourceAccountSchemaMake(), diff --git a/internal/service/cloudapi/bservice/resource_bservice.go b/internal/service/cloudapi/bservice/resource_bservice.go index e97e833..f01fdd5 100644 --- a/internal/service/cloudapi/bservice/resource_bservice.go +++ b/internal/service/cloudapi/bservice/resource_bservice.go @@ -515,11 +515,11 @@ func ResourceBasicService() *schema.Resource { }, Timeouts: &schema.ResourceTimeout{ - Create: &constants.Timeout60s, - Read: &constants.Timeout30s, - Update: &constants.Timeout60s, - Delete: &constants.Timeout60s, - Default: &constants.Timeout60s, + Create: &constants.Timeout600s, + Read: &constants.Timeout300s, + Update: &constants.Timeout300s, + Delete: &constants.Timeout300s, + Default: &constants.Timeout300s, }, Schema: resourceBasicServiceSchemaMake(), diff --git a/internal/service/cloudapi/bservice/resource_bservice_group.go b/internal/service/cloudapi/bservice/resource_bservice_group.go index 56cb12d..584645f 100644 --- a/internal/service/cloudapi/bservice/resource_bservice_group.go +++ b/internal/service/cloudapi/bservice/resource_bservice_group.go @@ -620,11 +620,11 @@ func ResourceBasicServiceGroup() *schema.Resource { }, Timeouts: &schema.ResourceTimeout{ - Create: &constants.Timeout60s, - Read: &constants.Timeout30s, - Update: &constants.Timeout60s, - Delete: &constants.Timeout60s, - Default: &constants.Timeout60s, + Create: &constants.Timeout600s, + Read: &constants.Timeout300s, + Update: &constants.Timeout300s, + Delete: &constants.Timeout300s, + Default: &constants.Timeout300s, }, Schema: resourceBasicServiceGroupSchemaMake(), diff --git a/internal/service/cloudapi/disks/api.go b/internal/service/cloudapi/disks/api.go index 9cc2c71..63d6897 100644 --- a/internal/service/cloudapi/disks/api.go +++ b/internal/service/cloudapi/disks/api.go @@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, +Kasim Baybikov, Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -31,11 +32,19 @@ Documentation: https://github.com/rudecs/terraform-provider-decort/wiki package disks -const disksCreateAPI = "/restmachine/cloudapi/disks/create" -const disksGetAPI = "/restmachine/cloudapi/disks/get" -const disksListAPI = "/restmachine/cloudapi/disks/list" -const disksResizeAPI = "/restmachine/cloudapi/disks/resize2" -const disksRenameAPI = "/restmachine/cloudapi/disks/rename" -const disksDeleteAPI = "/restmachine/cloudapi/disks/delete" -const disksIOLimitAPI = "/restmachine/cloudapi/disks/limitIO" -const disksRestoreAPI = "/restmachine/cloudapi/disks/restore" +const ( + disksCreateAPI = "/restmachine/cloudapi/disks/create" + disksGetAPI = "/restmachine/cloudapi/disks/get" + disksListAPI = "/restmachine/cloudapi/disks/list" + disksResizeAPI = "/restmachine/cloudapi/disks/resize2" + disksRenameAPI = "/restmachine/cloudapi/disks/rename" + disksDeleteAPI = "/restmachine/cloudapi/disks/delete" + disksIOLimitAPI = "/restmachine/cloudapi/disks/limitIO" + disksRestoreAPI = "/restmachine/cloudapi/disks/restore" + disksListTypesAPI = "/restmachine/cloudapi/disks/listTypes" + disksListDeletedAPI = "/restmachine/cloudapi/disks/listDeleted" + disksListUnattachedAPI = "/restmachine/cloudapi/disks/listUnattached" + + disksSnapshotDeleteAPI = "/restmachine/cloudapi/disks/snapshotDelete" + disksSnapshotRollbackAPI = "/restmachine/cloudapi/disks/snapshotRollback" +) diff --git a/internal/service/cloudapi/disks/data_source_disk.go b/internal/service/cloudapi/disks/data_source_disk.go index f5e1982..838a0ed 100644 --- a/internal/service/cloudapi/disks/data_source_disk.go +++ b/internal/service/cloudapi/disks/data_source_disk.go @@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, +Kasim Baybikov, Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -94,7 +95,7 @@ func dataSourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface d.Set("sep_type", disk.SepType) d.Set("size_max", disk.SizeMax) d.Set("size_used", disk.SizeUsed) - d.Set("snapshots", flattendDiskSnapshotList(disk.Snapshots)) + d.Set("snapshots", flattenDiskSnapshotList(disk.Snapshots)) d.Set("status", disk.Status) d.Set("tech_status", disk.TechStatus) d.Set("type", disk.Type) @@ -106,68 +107,83 @@ func dataSourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface func dataSourceDiskSchemaMake() map[string]*schema.Schema { rets := map[string]*schema.Schema{ "disk_id": { - Type: schema.TypeInt, - Required: true, + Type: schema.TypeInt, + Required: true, + Description: "The unique ID of the subscriber-owner of the disk", }, "account_id": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "The unique ID of the subscriber-owner of the disk", }, "account_name": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "The name of the subscriber '(account') to whom this disk belongs", }, "acl": { Type: schema.TypeString, Computed: true, }, "boot_partition": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Number of disk partitions", }, "compute_id": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Compute ID", }, "compute_name": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Compute name", }, "created_time": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Created time", }, "deleted_time": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Deleted time", }, "desc": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Description of disk", }, "destruction_time": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Time of final deletion", }, "devicename": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Name of the device", }, "disk_path": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Disk path", }, "gid": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "ID of the grid (platform)", }, "guid": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Disk ID on the storage side", }, "image_id": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Image ID", }, "images": { Type: schema.TypeList, @@ -175,6 +191,7 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema { Elem: &schema.Schema{ Type: schema.TypeString, }, + Description: "IDs of images using the disk", }, "iotune": { Type: schema.TypeList, @@ -182,143 +199,177 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "read_bytes_sec": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Number of bytes to read per second", }, "read_bytes_sec_max": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Maximum number of bytes to read", }, "read_iops_sec": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Number of io read operations per second", }, "read_iops_sec_max": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Maximum number of io read operations", }, "size_iops_sec": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Size of io operations", }, "total_bytes_sec": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Total size bytes per second", }, "total_bytes_sec_max": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Maximum total size of bytes per second", }, "total_iops_sec": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Total number of io operations per second", }, "total_iops_sec_max": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Maximum total number of io operations per second", }, "write_bytes_sec": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Number of bytes to write per second", }, "write_bytes_sec_max": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Maximum number of bytes to write per second", }, "write_iops_sec": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Number of write operations per second", }, "write_iops_sec_max": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Maximum number of write operations per second", }, }, }, }, "iqn": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Disk IQN", }, "login": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Login to access the disk", }, "milestones": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Milestones", }, "disk_name": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Name of disk", }, "order": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Disk order", }, "params": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Disk params", }, "parent_id": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "ID of the parent disk", }, "passwd": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Password to access the disk", }, "pci_slot": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "ID of the pci slot to which the disk is connected", }, "pool": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Pool for disk location", }, "purge_attempts": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Number of deletion attempts", }, "purge_time": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Time of the last deletion attempt", }, "reality_device_number": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Reality device number", }, "reference_id": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "ID of the reference to the disk", }, "res_id": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Resource ID", }, "res_name": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Name of the resource", }, "role": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Disk role", }, "sep_id": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Storage endpoint provider ID to create disk", }, "sep_type": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Type SEP. Defines the type of storage system and contains one of the values set in the cloud platform", }, "size_max": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Size in GB", }, "size_used": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Number of used space, in GB", }, "snapshots": { Type: schema.TypeList, @@ -326,47 +377,57 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "guid": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "ID of the snapshot", }, "label": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Name of the snapshot", }, "res_id": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Reference to the snapshot", }, "snap_set_guid": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "The set snapshot ID", }, "snap_set_time": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "The set time of the snapshot", }, "timestamp": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Snapshot time", }, }, }, }, "status": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Disk status", }, "tech_status": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Technical status of the disk", }, "type": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'", }, "vmid": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Virtual Machine ID (Deprecated)", }, } diff --git a/internal/service/cloudapi/disks/data_source_disk_list.go b/internal/service/cloudapi/disks/data_source_disk_list.go index efe16b6..7c43d8a 100644 --- a/internal/service/cloudapi/disks/data_source_disk_list.go +++ b/internal/service/cloudapi/disks/data_source_disk_list.go @@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, +Kasim Baybikov, Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -109,7 +110,7 @@ func flattenDiskList(dl DisksList) []map[string]interface{} { "sep_type": disk.SepType, "size_max": disk.SizeMax, "size_used": disk.SizeUsed, - "snapshots": flattendDiskSnapshotList(disk.Snapshots), + "snapshots": flattenDiskSnapshotList(disk.Snapshots), "status": disk.Status, "tech_status": disk.TechStatus, "type": disk.Type, @@ -121,7 +122,7 @@ func flattenDiskList(dl DisksList) []map[string]interface{} { } -func flattendDiskSnapshotList(sl SnapshotList) []interface{} { +func flattenDiskSnapshotList(sl SnapshotList) []interface{} { res := make([]interface{}, 0) for _, snapshot := range sl { temp := map[string]interface{}{ @@ -140,7 +141,7 @@ func flattendDiskSnapshotList(sl SnapshotList) []interface{} { } func dataSourceDiskListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - diskList, err := utilityDiskListCheckPresence(ctx, d, m) + diskList, err := utilityDiskListCheckPresence(ctx, d, m, disksListAPI) if err != nil { return diag.FromErr(err) } @@ -180,68 +181,83 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "account_id": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "The unique ID of the subscriber-owner of the disk", }, "account_name": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "The name of the subscriber '(account') to whom this disk belongs", }, "acl": { Type: schema.TypeString, Computed: true, }, "boot_partition": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Number of disk partitions", }, "compute_id": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Compute ID", }, "compute_name": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Compute name", }, "created_time": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Created time", }, "deleted_time": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Deleted time", }, "desc": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Description of disk", }, "destruction_time": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Time of final deletion", }, "devicename": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Name of the device", }, "disk_path": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Disk path", }, "gid": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "ID of the grid (platform)", }, "guid": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Disk ID on the storage side", }, "disk_id": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "The unique ID of the subscriber-owner of the disk", }, "image_id": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Image ID", }, "images": { Type: schema.TypeList, @@ -249,6 +265,7 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema { Elem: &schema.Schema{ Type: schema.TypeString, }, + Description: "IDs of images using the disk", }, "iotune": { Type: schema.TypeList, @@ -256,151 +273,187 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "read_bytes_sec": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Number of bytes to read per second", }, "read_bytes_sec_max": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Maximum number of bytes to read", }, "read_iops_sec": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Number of io read operations per second", }, "read_iops_sec_max": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Maximum number of io read operations", }, "size_iops_sec": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Size of io operations", }, "total_bytes_sec": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Total size bytes per second", }, "total_bytes_sec_max": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Maximum total size of bytes per second", }, "total_iops_sec": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Total number of io operations per second", }, "total_iops_sec_max": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Maximum total number of io operations per second", }, "write_bytes_sec": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Number of bytes to write per second", }, "write_bytes_sec_max": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Maximum number of bytes to write per second", }, "write_iops_sec": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Number of write operations per second", }, "write_iops_sec_max": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Maximum number of write operations per second", }, }, }, }, "iqn": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Disk IQN", }, "login": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Login to access the disk", }, "machine_id": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Machine ID", }, "machine_name": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Machine name", }, "milestones": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Milestones", }, "disk_name": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Name of disk", }, "order": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Disk order", }, "params": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Disk params", }, "parent_id": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "ID of the parent disk", }, "passwd": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Password to access the disk", }, "pci_slot": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "ID of the pci slot to which the disk is connected", }, "pool": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Pool for disk location", }, "purge_attempts": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Number of deletion attempts", }, "purge_time": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Time of the last deletion attempt", }, "reality_device_number": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Reality device number", }, "reference_id": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "ID of the reference to the disk", }, "res_id": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Resource ID", }, "res_name": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Name of the resource", }, "role": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Disk role", }, "sep_id": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Storage endpoint provider ID to create disk", }, "sep_type": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Type SEP. Defines the type of storage system and contains one of the values set in the cloud platform", }, "size_max": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Size in GB", }, "size_used": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Number of used space, in GB", }, "snapshots": { Type: schema.TypeList, @@ -408,47 +461,57 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "guid": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "ID of the snapshot", }, "label": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Name of the snapshot", }, "res_id": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Reference to the snapshot", }, "snap_set_guid": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "The set snapshot ID", }, "snap_set_time": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "The set time of the snapshot", }, "timestamp": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Snapshot time", }, }, }, }, "status": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Disk status", }, "tech_status": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Technical status of the disk", }, "type": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'", }, "vmid": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Virtual Machine ID (Deprecated)", }, }, }, diff --git a/internal/service/cloudapi/disks/data_source_disk_list_types.go b/internal/service/cloudapi/disks/data_source_disk_list_types.go new file mode 100644 index 0000000..b796961 --- /dev/null +++ b/internal/service/cloudapi/disks/data_source_disk_list_types.go @@ -0,0 +1,82 @@ +/* +Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://github.com/rudecs/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://github.com/rudecs/terraform-provider-decort/wiki +*/ + +package disks + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/constants" +) + +func dataSourceDiskListTypesRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + listTypes, err := utilityDiskListTypesCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + id := uuid.New() + d.SetId(id.String()) + d.Set("types", listTypes) + return nil +} + +func dataSourceDiskListTypesSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "types": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "The types of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'", + }, + } + return res +} + +func DataSourceDiskListTypes() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + ReadContext: dataSourceDiskListTypesRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceDiskListTypesSchemaMake(), + } +} diff --git a/internal/service/cloudapi/disks/data_source_disk_list_types_detailed.go b/internal/service/cloudapi/disks/data_source_disk_list_types_detailed.go new file mode 100644 index 0000000..debd776 --- /dev/null +++ b/internal/service/cloudapi/disks/data_source_disk_list_types_detailed.go @@ -0,0 +1,133 @@ +/* +Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://github.com/rudecs/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://github.com/rudecs/terraform-provider-decort/wiki +*/ + +package disks + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/constants" +) + +func flattenDiskListTypesDetailed(tld TypesDetailedList) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for _, typeListDetailed := range tld { + temp := map[string]interface{}{ + "pools": flattenListTypesDetailedPools(typeListDetailed.Pools), + "sep_id": typeListDetailed.SepID, + } + res = append(res, temp) + } + return res +} + +func flattenListTypesDetailedPools(pools PoolList) []interface{} { + res := make([]interface{}, 0) + for _, pool := range pools { + temp := map[string]interface{}{ + "name": pool.Name, + "types": pool.Types, + } + res = append(res, temp) + } + + return res +} + +func dataSourceDiskListTypesDetailedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + listTypesDetailed, err := utilityDiskListTypesDetailedCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + id := uuid.New() + d.SetId(id.String()) + d.Set("items", flattenDiskListTypesDetailed(listTypesDetailed)) + return nil +} + +func dataSourceDiskListTypesDetailedSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "items": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pools": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: "Pool name", + }, + "types": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "The types of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'", + }, + }, + }, + }, + "sep_id": { + Type: schema.TypeInt, + Computed: true, + Description: "Storage endpoint provider ID to create disk", + }, + }, + }, + }, + } + return res +} + +func DataSourceDiskListTypesDetailed() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + ReadContext: dataSourceDiskListTypesDetailedRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceDiskListTypesDetailedSchemaMake(), + } +} diff --git a/internal/service/cloudapi/disks/data_source_disk_list_unattached.go b/internal/service/cloudapi/disks/data_source_disk_list_unattached.go new file mode 100644 index 0000000..13ea8a1 --- /dev/null +++ b/internal/service/cloudapi/disks/data_source_disk_list_unattached.go @@ -0,0 +1,485 @@ +/* +Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://github.com/rudecs/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://github.com/rudecs/terraform-provider-decort/wiki +*/ + +package disks + +import ( + "context" + "encoding/json" + "net/url" + "strconv" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/constants" + "github.com/rudecs/terraform-provider-decort/internal/controller" + "github.com/rudecs/terraform-provider-decort/internal/flattens" + log "github.com/sirupsen/logrus" +) + +func utilityDiskListUnattachedCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (UnattachedList, error) { + unattachedList := UnattachedList{} + c := m.(*controller.ControllerCfg) + urlValues := &url.Values{} + if accountId, ok := d.GetOk("accountId"); ok { + urlValues.Add("accountId", strconv.Itoa(accountId.(int))) + } + + log.Debugf("utilityDiskListUnattachedCheckPresence: load disk Unattached list") + unattachedListRaw, err := c.DecortAPICall(ctx, "POST", disksListUnattachedAPI, urlValues) + if err != nil { + return nil, err + } + err = json.Unmarshal([]byte(unattachedListRaw), &unattachedList) + if err != nil { + return nil, err + } + return unattachedList, nil +} + +func flattenDiskListUnattached(ul UnattachedList) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for _, unattachedDisk := range ul { + unattachedDiskAcl, _ := json.Marshal(unattachedDisk.Acl) + tmp := map[string]interface{}{ + "_ckey": unattachedDisk.Ckey, + "_meta": flattens.FlattenMeta(unattachedDisk.Meta), + "account_id": unattachedDisk.AccountID, + "account_name": unattachedDisk.AccountName, + "acl": string(unattachedDiskAcl), + "boot_partition": unattachedDisk.BootPartition, + "created_time": unattachedDisk.CreatedTime, + "deleted_time": unattachedDisk.DeletedTime, + "desc": unattachedDisk.Desc, + "destruction_time": unattachedDisk.DestructionTime, + "disk_path": unattachedDisk.DiskPath, + "gid": unattachedDisk.GridID, + "guid": unattachedDisk.GUID, + "disk_id": unattachedDisk.ID, + "image_id": unattachedDisk.ImageID, + "images": unattachedDisk.Images, + "iotune": flattenIOTune(unattachedDisk.IOTune), + "iqn": unattachedDisk.IQN, + "login": unattachedDisk.Login, + "milestones": unattachedDisk.Milestones, + "disk_name": unattachedDisk.Name, + "order": unattachedDisk.Order, + "params": unattachedDisk.Params, + "parent_id": unattachedDisk.ParentID, + "passwd": unattachedDisk.Passwd, + "pci_slot": unattachedDisk.PciSlot, + "pool": unattachedDisk.Pool, + "purge_attempts": unattachedDisk.PurgeAttempts, + "purge_time": unattachedDisk.PurgeTime, + "reality_device_number": unattachedDisk.RealityDeviceNumber, + "reference_id": unattachedDisk.ReferenceID, + "res_id": unattachedDisk.ResID, + "res_name": unattachedDisk.ResName, + "role": unattachedDisk.Role, + "sep_id": unattachedDisk.SepID, + "size_max": unattachedDisk.SizeMax, + "size_used": unattachedDisk.SizeUsed, + "snapshots": flattenDiskSnapshotList(unattachedDisk.Snapshots), + "status": unattachedDisk.Status, + "tech_status": unattachedDisk.TechStatus, + "type": unattachedDisk.Type, + "vmid": unattachedDisk.VMID, + } + res = append(res, tmp) + } + return res +} + +func dataSourceDiskListUnattachedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + diskListUnattached, err := utilityDiskListUnattachedCheckPresence(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + + id := uuid.New() + d.SetId(id.String()) + d.Set("items", flattenDiskListUnattached(diskListUnattached)) + + return nil +} + +func DataSourceDiskListUnattached() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceDiskListUnattachedRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceDiskListUnattachedSchemaMake(), + } +} + +func dataSourceDiskListUnattachedSchemaMake() map[string]*schema.Schema { + res := map[string]*schema.Schema{ + "account_id": { + Type: schema.TypeInt, + Optional: true, + Description: "ID of the account the disks belong to", + }, + + "items": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "_ckey": { + Type: schema.TypeString, + Computed: true, + Description: "CKey", + }, + "_meta": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "Meta parameters", + }, + "account_id": { + Type: schema.TypeInt, + Computed: true, + Description: "ID of the account the disks belong to", + }, + "account_name": { + Type: schema.TypeString, + Computed: true, + Description: "The name of the subscriber '(account') to whom this disk belongs", + }, + "acl": { + Type: schema.TypeString, + Computed: true, + }, + "boot_partition": { + Type: schema.TypeInt, + Computed: true, + Description: "Number of disk partitions", + }, + "created_time": { + Type: schema.TypeInt, + Computed: true, + Description: "Created time", + }, + "deleted_time": { + Type: schema.TypeInt, + Computed: true, + Description: "Deleted time", + }, + "desc": { + Type: schema.TypeString, + Computed: true, + Description: "Description of disk", + }, + "destruction_time": { + Type: schema.TypeInt, + Computed: true, + Description: "Time of final deletion", + }, + "disk_path": { + Type: schema.TypeString, + Computed: true, + Description: "Disk path", + }, + "gid": { + Type: schema.TypeInt, + Computed: true, + Description: "ID of the grid (platform)", + }, + "guid": { + Type: schema.TypeInt, + Computed: true, + Description: "Disk ID on the storage side", + }, + "disk_id": { + Type: schema.TypeInt, + Computed: true, + Description: "The unique ID of the subscriber-owner of the disk", + }, + "image_id": { + Type: schema.TypeInt, + Computed: true, + Description: "Image ID", + }, + "images": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "IDs of images using the disk", + }, + "iotune": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "read_bytes_sec": { + Type: schema.TypeInt, + Computed: true, + Description: "Number of bytes to read per second", + }, + "read_bytes_sec_max": { + Type: schema.TypeInt, + Computed: true, + Description: "Maximum number of bytes to read", + }, + "read_iops_sec": { + Type: schema.TypeInt, + Computed: true, + Description: "Number of io read operations per second", + }, + "read_iops_sec_max": { + Type: schema.TypeInt, + Computed: true, + Description: "Maximum number of io read operations", + }, + "size_iops_sec": { + Type: schema.TypeInt, + Computed: true, + Description: "Size of io operations", + }, + "total_bytes_sec": { + Type: schema.TypeInt, + Computed: true, + Description: "Total size bytes per second", + }, + "total_bytes_sec_max": { + Type: schema.TypeInt, + Computed: true, + Description: "Maximum total size of bytes per second", + }, + "total_iops_sec": { + Type: schema.TypeInt, + Computed: true, + Description: "Total number of io operations per second", + }, + "total_iops_sec_max": { + Type: schema.TypeInt, + Computed: true, + Description: "Maximum total number of io operations per second", + }, + "write_bytes_sec": { + Type: schema.TypeInt, + Computed: true, + Description: "Number of bytes to write per second", + }, + "write_bytes_sec_max": { + Type: schema.TypeInt, + Computed: true, + Description: "Maximum number of bytes to write per second", + }, + "write_iops_sec": { + Type: schema.TypeInt, + Computed: true, + Description: "Number of write operations per second", + }, + "write_iops_sec_max": { + Type: schema.TypeInt, + Computed: true, + Description: "Maximum number of write operations per second", + }, + }, + }, + }, + "iqn": { + Type: schema.TypeString, + Computed: true, + Description: "Disk IQN", + }, + "login": { + Type: schema.TypeString, + Computed: true, + Description: "Login to access the disk", + }, + "milestones": { + Type: schema.TypeInt, + Computed: true, + Description: "Milestones", + }, + "disk_name": { + Type: schema.TypeString, + Computed: true, + Description: "Name of disk", + }, + "order": { + Type: schema.TypeInt, + Computed: true, + Description: "Disk order", + }, + "params": { + Type: schema.TypeString, + Computed: true, + Description: "Disk params", + }, + "parent_id": { + Type: schema.TypeInt, + Computed: true, + Description: "ID of the parent disk", + }, + "passwd": { + Type: schema.TypeString, + Computed: true, + Description: "Password to access the disk", + }, + "pci_slot": { + Type: schema.TypeInt, + Computed: true, + Description: "ID of the pci slot to which the disk is connected", + }, + "pool": { + Type: schema.TypeString, + Computed: true, + Description: "Pool for disk location", + }, + "purge_attempts": { + Type: schema.TypeInt, + Computed: true, + Description: "Number of deletion attempts", + }, + "purge_time": { + Type: schema.TypeInt, + Computed: true, + Description: "Time of the last deletion attempt", + }, + "reality_device_number": { + Type: schema.TypeInt, + Computed: true, + Description: "Reality device number", + }, + "reference_id": { + Type: schema.TypeString, + Computed: true, + Description: "ID of the reference to the disk", + }, + "res_id": { + Type: schema.TypeString, + Computed: true, + Description: "Resource ID", + }, + "res_name": { + Type: schema.TypeString, + Computed: true, + Description: "Name of the resource", + }, + "role": { + Type: schema.TypeString, + Computed: true, + Description: "Disk role", + }, + "sep_id": { + Type: schema.TypeInt, + Computed: true, + Description: "Storage endpoint provider ID to create disk", + }, + "size_max": { + Type: schema.TypeInt, + Computed: true, + Description: "Size in GB", + }, + "size_used": { + Type: schema.TypeInt, + Computed: true, + Description: "Number of used space, in GB", + }, + "snapshots": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "guid": { + Type: schema.TypeString, + Computed: true, + Description: "ID of the snapshot", + }, + "label": { + Type: schema.TypeString, + Computed: true, + Description: "Name of the snapshot", + }, + "res_id": { + Type: schema.TypeString, + Computed: true, + Description: "Reference to the snapshot", + }, + "snap_set_guid": { + Type: schema.TypeString, + Computed: true, + Description: "The set snapshot ID", + }, + "snap_set_time": { + Type: schema.TypeInt, + Computed: true, + Description: "The set time of the snapshot", + }, + "timestamp": { + Type: schema.TypeInt, + Computed: true, + Description: "Snapshot time", + }, + }, + }, + }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: "Disk status", + }, + "tech_status": { + Type: schema.TypeString, + Computed: true, + Description: "Technical status of the disk", + }, + "type": { + Type: schema.TypeString, + Computed: true, + Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'", + }, + "vmid": { + Type: schema.TypeInt, + Computed: true, + Description: "Virtual Machine ID (Deprecated)", + }, + }, + }, + }, + } + return res +} diff --git a/internal/service/cloudapi/disks/data_source_disk_snapshot.go b/internal/service/cloudapi/disks/data_source_disk_snapshot.go new file mode 100644 index 0000000..f95c099 --- /dev/null +++ b/internal/service/cloudapi/disks/data_source_disk_snapshot.go @@ -0,0 +1,129 @@ +/* +Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://github.com/rudecs/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://github.com/rudecs/terraform-provider-decort/wiki +*/ + +package disks + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/constants" +) + +func dataSourceDiskSnapshotRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + disk, err := utilityDiskCheckPresence(ctx, d, m) + if disk == nil { + if err != nil { + return diag.FromErr(err) + } + return nil + } + snapshots := disk.Snapshots + snapshot := Snapshot{} + label := d.Get("label").(string) + for _, sn := range snapshots { + if label == sn.Label { + snapshot = sn + break + } + } + if label != snapshot.Label { + return diag.Errorf("Snapshot with label \"%v\" not found", label) + } + + id := uuid.New() + d.SetId(id.String()) + d.Set("timestamp", snapshot.TimeStamp) + d.Set("guid", snapshot.Guid) + d.Set("res_id", snapshot.ResId) + d.Set("snap_set_guid", snapshot.SnapSetGuid) + d.Set("snap_set_time", snapshot.SnapSetTime) + return nil +} + +func DataSourceDiskSnapshot() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceDiskSnapshotRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceDiskSnapshotSchemaMake(), + } +} + +func dataSourceDiskSnapshotSchemaMake() map[string]*schema.Schema { + rets := map[string]*schema.Schema{ + "disk_id": { + Type: schema.TypeInt, + Required: true, + Description: "The unique ID of the subscriber-owner of the disk", + }, + "label": { + Type: schema.TypeString, + Required: true, + Description: "Name of the snapshot", + }, + "guid": { + Type: schema.TypeString, + Computed: true, + Description: "ID of the snapshot", + }, + "timestamp": { + Type: schema.TypeInt, + Computed: true, + Description: "Snapshot time", + }, + "res_id": { + Type: schema.TypeString, + Computed: true, + Description: "Reference to the snapshot", + }, + "snap_set_guid": { + Type: schema.TypeString, + Computed: true, + Description: "The set snapshot ID", + }, + "snap_set_time": { + Type: schema.TypeInt, + Computed: true, + Description: "The set time of the snapshot", + }, + } + return rets +} diff --git a/internal/service/cloudapi/disks/data_source_disk_snapshot_list.go b/internal/service/cloudapi/disks/data_source_disk_snapshot_list.go new file mode 100644 index 0000000..9d555e7 --- /dev/null +++ b/internal/service/cloudapi/disks/data_source_disk_snapshot_list.go @@ -0,0 +1,121 @@ +/* +Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://github.com/rudecs/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://github.com/rudecs/terraform-provider-decort/wiki +*/ + +package disks + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/constants" +) + +func dataSourceDiskSnapshotListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + disk, err := utilityDiskCheckPresence(ctx, d, m) + if disk == nil { + if err != nil { + return diag.FromErr(err) + } + return nil + } + + id := uuid.New() + d.SetId(id.String()) + d.Set("items", flattenDiskSnapshotList(disk.Snapshots)) + return nil +} + +func DataSourceDiskSnapshotList() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceDiskSnapshotListRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceDiskSnapshotListSchemaMake(), + } +} + +func dataSourceDiskSnapshotListSchemaMake() map[string]*schema.Schema { + rets := map[string]*schema.Schema{ + "disk_id": { + Type: schema.TypeInt, + Required: true, + Description: "The unique ID of the subscriber-owner of the disk", + }, + "items": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "label": { + Type: schema.TypeString, + Computed: true, + Description: "Name of the snapshot", + }, + "guid": { + Type: schema.TypeString, + Computed: true, + Description: "ID of the snapshot", + }, + "timestamp": { + Type: schema.TypeInt, + Computed: true, + Description: "Snapshot time", + }, + "res_id": { + Type: schema.TypeString, + Computed: true, + Description: "Reference to the snapshot", + }, + "snap_set_guid": { + Type: schema.TypeString, + Computed: true, + Description: "The set snapshot ID", + }, + "snap_set_time": { + Type: schema.TypeInt, + Computed: true, + Description: "The set time of the snapshot", + }, + }, + }, + }, + } + return rets +} diff --git a/internal/service/cloudapi/disks/data_source_list_deleted.go b/internal/service/cloudapi/disks/data_source_list_deleted.go new file mode 100644 index 0000000..0599310 --- /dev/null +++ b/internal/service/cloudapi/disks/data_source_list_deleted.go @@ -0,0 +1,69 @@ +/* +Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://github.com/rudecs/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://github.com/rudecs/terraform-provider-decort/wiki +*/ + +package disks + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/constants" +) + +func dataSourceDiskListDeletedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + diskList, err := utilityDiskListCheckPresence(ctx, d, m, disksListDeletedAPI) + if err != nil { + return diag.FromErr(err) + } + + id := uuid.New() + d.SetId(id.String()) + d.Set("items", flattenDiskList(diskList)) + + return nil +} + +func DataSourceDiskListDeleted() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + ReadContext: dataSourceDiskListDeletedRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceDiskListSchemaMake(), + } +} diff --git a/internal/service/cloudapi/disks/models.go b/internal/service/cloudapi/disks/models.go index b5298e0..6ae2f09 100644 --- a/internal/service/cloudapi/disks/models.go +++ b/internal/service/cloudapi/disks/models.go @@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, +Kasim Baybikov, Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -109,3 +110,66 @@ type IOTune struct { WriteIopsSec int `json:"write_iops_sec"` WriteIopsSecMax int `json:"write_iops_sec_max"` } + +type Pool struct { + Name string `json:"name"` + Types []string `json:"types"` +} + +type PoolList []Pool + +type TypeDetailed struct { + Pools []Pool `json:"pools"` + SepID int `json:"sepId"` +} + +type TypesDetailedList []TypeDetailed + +type TypesList []string + +type Unattached struct { + Ckey string `json:"_ckey"` + Meta []interface{} `json:"_meta"` + AccountID int `json:"accountId"` + AccountName string `json:"accountName"` + Acl map[string]interface{} `json:"acl"` + BootPartition int `json:"bootPartition"` + CreatedTime int `json:"createdTime"` + DeletedTime int `json:"deletedTime"` + Desc string `json:"desc"` + DestructionTime int `json:"destructionTime"` + DiskPath string `json:"diskPath"` + GridID int `json:"gid"` + GUID int `json:"guid"` + ID int `json:"id"` + ImageID int `json:"imageId"` + Images []int `json:"images"` + IOTune IOTune `json:"iotune"` + IQN string `json:"iqn"` + Login string `json:"login"` + Milestones int `json:"milestones"` + Name string `json:"name"` + Order int `json:"order"` + Params string `json:"params"` + ParentID int `json:"parentId"` + Passwd string `json:"passwd"` + PciSlot int `json:"pciSlot"` + Pool string `json:"pool"` + PurgeAttempts int `json:"purgeAttempts"` + PurgeTime int `json:"purgeTime"` + RealityDeviceNumber int `json:"realityDeviceNumber"` + ReferenceID string `json:"referenceId"` + ResID string `json:"resId"` + ResName string `json:"resName"` + Role string `json:"role"` + SepID int `json:"sepId"` + SizeMax int `json:"sizeMax"` + SizeUsed int `json:"sizeUsed"` + Snapshots []Snapshot `json:"snapshots"` + Status string `json:"status"` + TechStatus string `json:"techStatus"` + Type string `json:"type"` + VMID int `json:"vmid"` +} + +type UnattachedList []Unattached diff --git a/internal/service/cloudapi/disks/resource_disk.go b/internal/service/cloudapi/disks/resource_disk.go index 872e2a4..50d2f20 100644 --- a/internal/service/cloudapi/disks/resource_disk.go +++ b/internal/service/cloudapi/disks/resource_disk.go @@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, +Kasim Baybikov, Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -41,6 +42,7 @@ import ( "github.com/rudecs/terraform-provider-decort/internal/constants" "github.com/rudecs/terraform-provider-decort/internal/controller" + "github.com/rudecs/terraform-provider-decort/internal/status" log "github.com/sirupsen/logrus" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -119,6 +121,9 @@ func resourceDiskCreate(ctx context.Context, d *schema.ResourceData, m interface } func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + urlValues := &url.Values{} + c := m.(*controller.ControllerCfg) + disk, err := utilityDiskCheckPresence(ctx, d, m) if disk == nil { d.SetId("") @@ -128,6 +133,28 @@ func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{} return nil } + if disk.Status == status.Destroyed || disk.Status == status.Purged { + d.Set("disk_id", 0) + return resourceDiskCreate(ctx, d, m) + } else if disk.Status == status.Deleted { + urlValues.Add("diskId", d.Id()) + urlValues.Add("reason", d.Get("reason").(string)) + + _, err := c.DecortAPICall(ctx, "POST", disksRestoreAPI, urlValues) + if err != nil { + return diag.FromErr(err) + } + urlValues = &url.Values{} + disk, err = utilityDiskCheckPresence(ctx, d, m) + if disk == nil { + d.SetId("") + if err != nil { + return diag.FromErr(err) + } + return nil + } + } + diskAcl, _ := json.Marshal(disk.Acl) d.Set("account_id", disk.AccountID) @@ -169,7 +196,7 @@ func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{} d.Set("sep_type", disk.SepType) d.Set("size_max", disk.SizeMax) d.Set("size_used", disk.SizeUsed) - d.Set("snapshots", flattendDiskSnapshotList(disk.Snapshots)) + d.Set("snapshots", flattenDiskSnapshotList(disk.Snapshots)) d.Set("status", disk.Status) d.Set("tech_status", disk.TechStatus) d.Set("type", disk.Type) @@ -179,9 +206,27 @@ func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{} } func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - c := m.(*controller.ControllerCfg) urlValues := &url.Values{} + disk, err := utilityDiskCheckPresence(ctx, d, m) + if disk == nil { + if err != nil { + return diag.FromErr(err) + } + return nil + } + if disk.Status == status.Destroyed || disk.Status == status.Purged { + return resourceDiskCreate(ctx, d, m) + } else if disk.Status == status.Deleted { + urlValues.Add("diskId", d.Id()) + urlValues.Add("reason", d.Get("reason").(string)) + + _, err := c.DecortAPICall(ctx, "POST", disksRestoreAPI, urlValues) + if err != nil { + return diag.FromErr(err) + } + urlValues = &url.Values{} + } if d.HasChange("size_max") { oldSize, newSize := d.GetChange("size_max") @@ -238,26 +283,10 @@ func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface urlValues = &url.Values{} } - if d.HasChange("restore") { - if d.Get("restore").(bool) { - urlValues.Add("diskId", d.Id()) - urlValues.Add("reason", d.Get("reason").(string)) - - _, err := c.DecortAPICall(ctx, "POST", disksRestoreAPI, urlValues) - if err != nil { - return diag.FromErr(err) - } - - urlValues = &url.Values{} - } - - } - return resourceDiskRead(ctx, d, m) } func resourceDiskDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - disk, err := utilityDiskCheckPresence(ctx, d, m) if disk == nil { if err != nil { @@ -265,7 +294,9 @@ func resourceDiskDelete(ctx context.Context, d *schema.ResourceData, m interface } return nil } - + if disk.Status == status.Destroyed || disk.Status == status.Purged { + return nil + } params := &url.Values{} params.Add("diskId", d.Id()) params.Add("detach", strconv.FormatBool(d.Get("detach").(bool))) @@ -277,126 +308,141 @@ func resourceDiskDelete(ctx context.Context, d *schema.ResourceData, m interface if err != nil { return diag.FromErr(err) } - return nil } func resourceDiskSchemaMake() map[string]*schema.Schema { rets := map[string]*schema.Schema{ "account_id": { - Type: schema.TypeInt, - Required: true, + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "The unique ID of the subscriber-owner of the disk", }, "disk_name": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Name of disk", }, "size_max": { - Type: schema.TypeInt, - Required: true, + Type: schema.TypeInt, + Required: true, + Description: "Size in GB", }, "gid": { - Type: schema.TypeInt, - Required: true, + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "ID of the grid (platform)", }, "pool": { - Type: schema.TypeString, - Optional: true, - Computed: true, + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Pool for disk location", }, "sep_id": { - Type: schema.TypeInt, - Optional: true, - Computed: true, + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Storage endpoint provider ID to create disk", }, "desc": { - Type: schema.TypeString, - Optional: true, - Computed: true, + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Description of disk", }, "type": { Type: schema.TypeString, Optional: true, Computed: true, ValidateFunc: validation.StringInSlice([]string{"D", "B", "T"}, false), + Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'", }, "detach": { Type: schema.TypeBool, Optional: true, Default: false, - Description: "detach disk from machine first", + Description: "Detaching the disk from compute", }, "permanently": { Type: schema.TypeBool, Optional: true, Default: false, - Description: "whether to completely delete the disk, works only with non attached disks", + Description: "Whether to completely delete the disk, works only with non attached disks", }, "reason": { Type: schema.TypeString, Optional: true, Default: "", - Description: "reason for an action", - }, - "restore": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "restore deleting disk", + Description: "Reason for deletion", }, "disk_id": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Disk ID. Duplicates the value of the ID parameter", }, "account_name": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "The name of the subscriber '(account') to whom this disk belongs", }, "acl": { Type: schema.TypeString, Computed: true, }, "boot_partition": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Number of disk partitions", }, "compute_id": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Compute ID", }, "compute_name": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Compute name", }, "created_time": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Created time", }, "deleted_time": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Deleted time", }, "destruction_time": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Time of final deletion", }, "devicename": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Name of the device", }, "disk_path": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Disk path", }, "guid": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Disk ID on the storage side", }, "image_id": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Image ID", }, "images": { Type: schema.TypeList, @@ -404,6 +450,7 @@ func resourceDiskSchemaMake() map[string]*schema.Schema { Elem: &schema.Schema{ Type: schema.TypeString, }, + Description: "IDs of images using the disk", }, "iotune": { Type: schema.TypeList, @@ -413,143 +460,171 @@ func resourceDiskSchemaMake() map[string]*schema.Schema { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "read_bytes_sec": { - Type: schema.TypeInt, - Optional: true, - Computed: true, + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Number of bytes to read per second", }, "read_bytes_sec_max": { - Type: schema.TypeInt, - Optional: true, - Computed: true, + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Maximum number of bytes to read", }, "read_iops_sec": { - Type: schema.TypeInt, - Optional: true, - Computed: true, + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Number of io read operations per second", }, "read_iops_sec_max": { - Type: schema.TypeInt, - Optional: true, - Computed: true, + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Maximum number of io read operations", }, "size_iops_sec": { - Type: schema.TypeInt, - Optional: true, - Computed: true, + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Size of io operations", }, "total_bytes_sec": { - Type: schema.TypeInt, - Optional: true, - Computed: true, + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Total size bytes per second", }, "total_bytes_sec_max": { - Type: schema.TypeInt, - Optional: true, - Computed: true, + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Maximum total size of bytes per second", }, "total_iops_sec": { - Type: schema.TypeInt, - Optional: true, - Computed: true, + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Total number of io operations per second", }, "total_iops_sec_max": { - Type: schema.TypeInt, - Optional: true, - Computed: true, + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Maximum total number of io operations per second", }, "write_bytes_sec": { - Type: schema.TypeInt, - Optional: true, - Computed: true, + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Number of bytes to write per second", }, "write_bytes_sec_max": { - Type: schema.TypeInt, - Optional: true, - Computed: true, + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Maximum number of bytes to write per second", }, "write_iops_sec": { - Type: schema.TypeInt, - Optional: true, - Computed: true, + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Number of write operations per second", }, "write_iops_sec_max": { - Type: schema.TypeInt, - Optional: true, - Computed: true, + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Maximum number of write operations per second", }, }, }, }, "iqn": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Disk IQN", }, "login": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Login to access the disk", }, "milestones": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Milestones", }, "order": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Disk order", }, "params": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Disk params", }, "parent_id": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "ID of the parent disk", }, "passwd": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Password to access the disk", }, "pci_slot": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "ID of the pci slot to which the disk is connected", }, - "purge_attempts": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Number of deletion attempts", }, "purge_time": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Time of the last deletion attempt", }, "reality_device_number": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Reality device number", }, "reference_id": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "ID of the reference to the disk", }, "res_id": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Resource ID", }, "res_name": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Name of the resource", }, "role": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Disk role", }, - "sep_type": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Type SEP. Defines the type of storage system and contains one of the values set in the cloud platform", }, "size_used": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Number of used space, in GB", }, "snapshots": { Type: schema.TypeList, @@ -557,43 +632,52 @@ func resourceDiskSchemaMake() map[string]*schema.Schema { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "guid": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "ID of the snapshot", }, "label": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Name of the snapshot", }, "res_id": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Reference to the snapshot", }, "snap_set_guid": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "The set snapshot ID", }, "snap_set_time": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "The set time of the snapshot", }, "timestamp": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Snapshot time", }, }, }, }, "status": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Disk status", }, "tech_status": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Technical status of the disk", }, "vmid": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Description: "Virtual Machine ID (Deprecated)", }, } @@ -614,11 +698,11 @@ func ResourceDisk() *schema.Resource { }, Timeouts: &schema.ResourceTimeout{ - Create: &constants.Timeout180s, - Read: &constants.Timeout30s, - Update: &constants.Timeout180s, - Delete: &constants.Timeout60s, - Default: &constants.Timeout60s, + Create: &constants.Timeout600s, + Read: &constants.Timeout300s, + Update: &constants.Timeout300s, + Delete: &constants.Timeout300s, + Default: &constants.Timeout300s, }, Schema: resourceDiskSchemaMake(), diff --git a/internal/service/cloudapi/disks/resource_disk_snapshot.go b/internal/service/cloudapi/disks/resource_disk_snapshot.go new file mode 100644 index 0000000..891d65b --- /dev/null +++ b/internal/service/cloudapi/disks/resource_disk_snapshot.go @@ -0,0 +1,246 @@ +/* +Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://github.com/rudecs/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://github.com/rudecs/terraform-provider-decort/wiki +*/ + +package disks + +import ( + "context" + "net/url" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/constants" + "github.com/rudecs/terraform-provider-decort/internal/controller" + log "github.com/sirupsen/logrus" +) + +func resourceDiskSnapshotCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + urlValues := &url.Values{} + c := m.(*controller.ControllerCfg) + disk, err := utilityDiskCheckPresence(ctx, d, m) + if disk == nil { + if err != nil { + return diag.FromErr(err) + } + return nil + } + snapshots := disk.Snapshots + snapshot := Snapshot{} + label := d.Get("label").(string) + for _, sn := range snapshots { + if label == sn.Label { + snapshot = sn + break + } + } + if label != snapshot.Label { + return diag.Errorf("Snapshot with label \"%v\" not found", label) + } + if rollback := d.Get("rollback").(bool); rollback { + urlValues.Add("diskId", strconv.Itoa(d.Get("disk_id").(int))) + urlValues.Add("label", label) + urlValues.Add("timestamp", strconv.Itoa(d.Get("timestamp").(int))) + log.Debugf("resourceDiskCreate: Snapshot rollback with label", label) + _, err := c.DecortAPICall(ctx, "POST", disksSnapshotRollbackAPI, urlValues) + if err != nil { + return diag.FromErr(err) + } + urlValues = &url.Values{} + } + return resourceDiskSnapshotRead(ctx, d, m) +} + +func resourceDiskSnapshotRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + disk, err := utilityDiskCheckPresence(ctx, d, m) + if disk == nil { + if err != nil { + return diag.FromErr(err) + } + return nil + } + snapshots := disk.Snapshots + snapshot := Snapshot{} + label := d.Get("label").(string) + for _, sn := range snapshots { + if label == sn.Label { + snapshot = sn + break + } + } + if label != snapshot.Label { + return diag.Errorf("Snapshot with label \"%v\" not found", label) + } + + d.SetId(d.Get("label").(string)) + d.Set("timestamp", snapshot.TimeStamp) + d.Set("guid", snapshot.Guid) + d.Set("res_id", snapshot.ResId) + d.Set("snap_set_guid", snapshot.SnapSetGuid) + d.Set("snap_set_time", snapshot.SnapSetTime) + return nil +} + +func resourceDiskSnapshotUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + urlValues := &url.Values{} + c := m.(*controller.ControllerCfg) + disk, err := utilityDiskCheckPresence(ctx, d, m) + if disk == nil { + if err != nil { + return diag.FromErr(err) + } + return nil + } + snapshots := disk.Snapshots + snapshot := Snapshot{} + label := d.Get("label").(string) + for _, sn := range snapshots { + if label == sn.Label { + snapshot = sn + break + } + } + if label != snapshot.Label { + return diag.Errorf("Snapshot with label \"%v\" not found", label) + } + if d.HasChange("rollback") && d.Get("rollback").(bool) == true { + urlValues.Add("diskId", strconv.Itoa(d.Get("disk_id").(int))) + urlValues.Add("label", label) + urlValues.Add("timestamp", strconv.Itoa(d.Get("timestamp").(int))) + log.Debugf("resourceDiskUpdtae: Snapshot rollback with label", label) + _, err := c.DecortAPICall(ctx, "POST", disksSnapshotRollbackAPI, urlValues) + if err != nil { + return diag.FromErr(err) + } + urlValues = &url.Values{} + } + + return resourceDiskSnapshotRead(ctx, d, m) +} + +func resourceDiskSnapshotDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + c := m.(*controller.ControllerCfg) + + disk, err := utilityDiskCheckPresence(ctx, d, m) + if disk == nil { //if disk not exits, can't call snapshotDelete + d.SetId("") + if err != nil { + return diag.FromErr(err) + } + return nil + } + + params := &url.Values{} + params.Add("diskId", strconv.Itoa(d.Get("disk_id").(int))) + params.Add("label", d.Get("label").(string)) + + _, err = c.DecortAPICall(ctx, "POST", disksSnapshotDeleteAPI, params) + if err != nil { + return diag.FromErr(err) + } + return nil +} + +func resourceDiskSnapshotSchemaMake() map[string]*schema.Schema { + rets := map[string]*schema.Schema{ + "disk_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "The unique ID of the subscriber-owner of the disk", + }, + "label": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the snapshot", + }, + "rollback": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Needed in order to make a snapshot rollback", + }, + "guid": { + Type: schema.TypeString, + Computed: true, + Description: "ID of the snapshot", + }, + "timestamp": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Snapshot time", + }, + "res_id": { + Type: schema.TypeString, + Computed: true, + Description: "Reference to the snapshot", + }, + "snap_set_guid": { + Type: schema.TypeString, + Computed: true, + Description: "The set snapshot ID", + }, + "snap_set_time": { + Type: schema.TypeInt, + Computed: true, + Description: "The set time of the snapshot", + }, + } + return rets +} + +func ResourceDiskSnapshot() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + CreateContext: resourceDiskSnapshotCreate, + ReadContext: resourceDiskSnapshotRead, + UpdateContext: resourceDiskSnapshotUpdate, + DeleteContext: resourceDiskSnapshotDelete, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: &constants.Timeout600s, + Read: &constants.Timeout300s, + Update: &constants.Timeout300s, + Delete: &constants.Timeout300s, + Default: &constants.Timeout300s, + }, + + Schema: resourceDiskSnapshotSchemaMake(), + } +} diff --git a/internal/service/cloudapi/disks/utility_disk.go b/internal/service/cloudapi/disks/utility_disk.go index 1fb4544..0851490 100644 --- a/internal/service/cloudapi/disks/utility_disk.go +++ b/internal/service/cloudapi/disks/utility_disk.go @@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, +Kasim Baybikov, Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/internal/service/cloudapi/disks/utility_disk_list.go b/internal/service/cloudapi/disks/utility_disk_list.go index 1782ac3..65d8d7e 100644 --- a/internal/service/cloudapi/disks/utility_disk_list.go +++ b/internal/service/cloudapi/disks/utility_disk_list.go @@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, +Kasim Baybikov, Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -44,7 +45,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -func utilityDiskListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (DisksList, error) { +func utilityDiskListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}, api string) (DisksList, error) { diskList := DisksList{} c := m.(*controller.ControllerCfg) urlValues := &url.Values{} @@ -63,7 +64,7 @@ func utilityDiskListCheckPresence(ctx context.Context, d *schema.ResourceData, m } log.Debugf("utilityDiskListCheckPresence: load disk list") - diskListRaw, err := c.DecortAPICall(ctx, "POST", disksListAPI, urlValues) + diskListRaw, err := c.DecortAPICall(ctx, "POST", api, urlValues) if err != nil { return nil, err } diff --git a/internal/service/cloudapi/disks/utility_disk_types_detailed_list.go b/internal/service/cloudapi/disks/utility_disk_types_detailed_list.go new file mode 100644 index 0000000..ea9f205 --- /dev/null +++ b/internal/service/cloudapi/disks/utility_disk_types_detailed_list.go @@ -0,0 +1,62 @@ +/* +Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://github.com/rudecs/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://github.com/rudecs/terraform-provider-decort/wiki +*/ + +package disks + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/controller" + log "github.com/sirupsen/logrus" +) + +func utilityDiskListTypesDetailedCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (TypesDetailedList, error) { + listTypesDetailed := TypesDetailedList{} + c := m.(*controller.ControllerCfg) + urlValues := &url.Values{} + urlValues.Add("detailed", "true") + log.Debugf("utilityDiskListTypesDetailedCheckPresence: load disk list Types Detailed") + diskListRaw, err := c.DecortAPICall(ctx, "POST", disksListTypesAPI, urlValues) + if err != nil { + return nil, err + } + + err = json.Unmarshal([]byte(diskListRaw), &listTypesDetailed) + if err != nil { + return nil, err + } + + return listTypesDetailed, nil +} diff --git a/internal/service/cloudapi/disks/utility_disk_types_list.go b/internal/service/cloudapi/disks/utility_disk_types_list.go new file mode 100644 index 0000000..485bfad --- /dev/null +++ b/internal/service/cloudapi/disks/utility_disk_types_list.go @@ -0,0 +1,62 @@ +/* +Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://github.com/rudecs/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://github.com/rudecs/terraform-provider-decort/wiki +*/ + +package disks + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/rudecs/terraform-provider-decort/internal/controller" + log "github.com/sirupsen/logrus" +) + +func utilityDiskListTypesCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (TypesList, error) { + typesList := TypesList{} + c := m.(*controller.ControllerCfg) + urlValues := &url.Values{} + urlValues.Add("detailed", "false") + log.Debugf("utilityDiskListTypesCheckPresence: load disk list Types Detailed") + diskListRaw, err := c.DecortAPICall(ctx, "POST", disksListTypesAPI, urlValues) + if err != nil { + return nil, err + } + + err = json.Unmarshal([]byte(diskListRaw), &typesList) + if err != nil { + return nil, err + } + + return typesList, nil +} diff --git a/internal/service/cloudapi/image/resource_image.go b/internal/service/cloudapi/image/resource_image.go index eb5bc39..20bea28 100644 --- a/internal/service/cloudapi/image/resource_image.go +++ b/internal/service/cloudapi/image/resource_image.go @@ -248,11 +248,11 @@ func ResourceImage() *schema.Resource { }, Timeouts: &schema.ResourceTimeout{ - Create: &constants.Timeout60s, - Read: &constants.Timeout30s, - Update: &constants.Timeout60s, - Delete: &constants.Timeout60s, - Default: &constants.Timeout60s, + Create: &constants.Timeout600s, + Read: &constants.Timeout300s, + Update: &constants.Timeout300s, + Delete: &constants.Timeout300s, + Default: &constants.Timeout300s, }, Schema: resourceImageSchemaMake(dataSourceImageExtendSchemaMake()), diff --git a/internal/service/cloudapi/image/resource_image_virtual.go b/internal/service/cloudapi/image/resource_image_virtual.go index 51dcb3d..900d071 100644 --- a/internal/service/cloudapi/image/resource_image_virtual.go +++ b/internal/service/cloudapi/image/resource_image_virtual.go @@ -120,11 +120,11 @@ func ResourceImageVirtual() *schema.Resource { }, Timeouts: &schema.ResourceTimeout{ - Create: &constants.Timeout60s, - Read: &constants.Timeout30s, - Update: &constants.Timeout60s, - Delete: &constants.Timeout60s, - Default: &constants.Timeout60s, + Create: &constants.Timeout600s, + Read: &constants.Timeout300s, + Update: &constants.Timeout300s, + Delete: &constants.Timeout300s, + Default: &constants.Timeout300s, }, Schema: resourceImageVirtualSchemaMake(dataSourceImageExtendSchemaMake()), diff --git a/internal/service/cloudapi/k8s/resource_k8s.go b/internal/service/cloudapi/k8s/resource_k8s.go index 3c72d93..c3e7d66 100644 --- a/internal/service/cloudapi/k8s/resource_k8s.go +++ b/internal/service/cloudapi/k8s/resource_k8s.go @@ -380,11 +380,11 @@ func ResourceK8s() *schema.Resource { }, Timeouts: &schema.ResourceTimeout{ - Create: &constants.Timeout20m, - Read: &constants.Timeout30s, - Update: &constants.Timeout20m, - Delete: &constants.Timeout60s, - Default: &constants.Timeout60s, + Create: &constants.Timeout600s, + Read: &constants.Timeout300s, + Update: &constants.Timeout300s, + Delete: &constants.Timeout300s, + Default: &constants.Timeout300s, }, Schema: resourceK8sSchemaMake(), diff --git a/internal/service/cloudapi/k8s/resource_k8s_wg.go b/internal/service/cloudapi/k8s/resource_k8s_wg.go index 66cb6cb..0834a32 100644 --- a/internal/service/cloudapi/k8s/resource_k8s_wg.go +++ b/internal/service/cloudapi/k8s/resource_k8s_wg.go @@ -232,11 +232,11 @@ func ResourceK8sWg() *schema.Resource { }, Timeouts: &schema.ResourceTimeout{ - Create: &constants.Timeout20m, - Read: &constants.Timeout30s, - Update: &constants.Timeout20m, - Delete: &constants.Timeout60s, - Default: &constants.Timeout60s, + Create: &constants.Timeout600s, + Read: &constants.Timeout300s, + Update: &constants.Timeout300s, + Delete: &constants.Timeout300s, + Default: &constants.Timeout300s, }, Schema: resourceK8sWgSchemaMake(), diff --git a/internal/service/cloudapi/kvmvm/api.go b/internal/service/cloudapi/kvmvm/api.go index 09305af..1094df0 100644 --- a/internal/service/cloudapi/kvmvm/api.go +++ b/internal/service/cloudapi/kvmvm/api.go @@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, +Kasim Baybikov, Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -31,17 +32,21 @@ Documentation: https://github.com/rudecs/terraform-provider-decort/wiki package kvmvm -const KvmX86CreateAPI = "/restmachine/cloudapi/kvmx86/create" -const KvmPPCCreateAPI = "/restmachine/cloudapi/kvmppc/create" -const ComputeGetAPI = "/restmachine/cloudapi/compute/get" -const RgListComputesAPI = "/restmachine/cloudapi/rg/listComputes" -const ComputeNetAttachAPI = "/restmachine/cloudapi/compute/netAttach" -const ComputeNetDetachAPI = "/restmachine/cloudapi/compute/netDetach" -const ComputeDiskAttachAPI = "/restmachine/cloudapi/compute/diskAttach" -const ComputeDiskDetachAPI = "/restmachine/cloudapi/compute/diskDetach" -const ComputeStartAPI = "/restmachine/cloudapi/compute/start" -const ComputeStopAPI = "/restmachine/cloudapi/compute/stop" -const ComputeResizeAPI = "/restmachine/cloudapi/compute/resize" -const DisksResizeAPI = "/restmachine/cloudapi/disks/resize2" -const ComputeDeleteAPI = "/restmachine/cloudapi/compute/delete" -const ComputeUpdateAPI = "/restmachine/cloudapi/compute/update" +const ( + KvmX86CreateAPI = "/restmachine/cloudapi/kvmx86/create" + KvmPPCCreateAPI = "/restmachine/cloudapi/kvmppc/create" + ComputeGetAPI = "/restmachine/cloudapi/compute/get" + RgListComputesAPI = "/restmachine/cloudapi/rg/listComputes" + ComputeNetAttachAPI = "/restmachine/cloudapi/compute/netAttach" + ComputeNetDetachAPI = "/restmachine/cloudapi/compute/netDetach" + ComputeDiskAttachAPI = "/restmachine/cloudapi/compute/diskAttach" + ComputeDiskDetachAPI = "/restmachine/cloudapi/compute/diskDetach" + ComputeStartAPI = "/restmachine/cloudapi/compute/start" + ComputeStopAPI = "/restmachine/cloudapi/compute/stop" + ComputeResizeAPI = "/restmachine/cloudapi/compute/resize" + DisksResizeAPI = "/restmachine/cloudapi/disks/resize2" + ComputeDeleteAPI = "/restmachine/cloudapi/compute/delete" + ComputeUpdateAPI = "/restmachine/cloudapi/compute/update" + ComputeDiskAddAPI = "/restmachine/cloudapi/compute/diskAdd" + ComputeDiskDeleteAPI = "/restmachine/cloudapi/compute/diskDel" +) diff --git a/internal/service/cloudapi/kvmvm/data_source_compute.go b/internal/service/cloudapi/kvmvm/data_source_compute.go index cd20f87..b662acf 100644 --- a/internal/service/cloudapi/kvmvm/data_source_compute.go +++ b/internal/service/cloudapi/kvmvm/data_source_compute.go @@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, +Kasim Baybikov, Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -113,6 +114,36 @@ func parseComputeInterfacesToNetworks(ifaces []InterfaceRecord) []interface{} { return result } +func findInExtraDisks(DiskId uint, ExtraDisks []interface{}) bool { + for _, ExtraDisk := range ExtraDisks { + if DiskId == uint(ExtraDisk.(int)) { + return true + } + } + return false +} + +func flattenComputeDisksDemo(disksList []DiskRecord, extraDisks []interface{}) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for _, disk := range disksList { + if disk.Name == "bootdisk" || findInExtraDisks(disk.ID, extraDisks) { //skip main bootdisk and extraDisks + continue + } + temp := map[string]interface{}{ + "disk_name": disk.Name, + "disk_id": disk.ID, + "disk_type": disk.Type, + "sep_id": disk.SepID, + "pool": disk.Pool, + "desc": disk.Desc, + "image_id": disk.ImageID, + "size": disk.SizeMax, + } + res = append(res, temp) + } + return res +} + func flattenCompute(d *schema.ResourceData, compFacts string) error { // This function expects that compFacts string contains response from API compute/get, // i.e. detailed information about compute instance. @@ -162,12 +193,12 @@ func flattenCompute(d *schema.ResourceData, compFacts string) error { d.Set("sep_id", bootDisk.SepID) d.Set("pool", bootDisk.Pool) - if len(model.Disks) > 0 { - log.Debugf("flattenCompute: calling parseComputeDisksToExtraDisks for %d disks", len(model.Disks)) - if err = d.Set("extra_disks", parseComputeDisksToExtraDisks(model.Disks)); err != nil { - return err - } - } + //if len(model.Disks) > 0 { + //log.Debugf("flattenCompute: calling parseComputeDisksToExtraDisks for %d disks", len(model.Disks)) + //if err = d.Set("extra_disks", parseComputeDisksToExtraDisks(model.Disks)); err != nil { + //return err + //} + //} if len(model.Interfaces) > 0 { log.Debugf("flattenCompute: calling parseComputeInterfacesToNetworks for %d interfaces", len(model.Interfaces)) @@ -183,6 +214,11 @@ func flattenCompute(d *schema.ResourceData, compFacts string) error { } } + err = d.Set("disks", flattenComputeDisksDemo(model.Disks, d.Get("extra_disks").(*schema.Set).List())) + if err != nil { + return err + } + return nil } diff --git a/internal/service/cloudapi/kvmvm/models.go b/internal/service/cloudapi/kvmvm/models.go index 220fafd..b967a5a 100644 --- a/internal/service/cloudapi/kvmvm/models.go +++ b/internal/service/cloudapi/kvmvm/models.go @@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, +Kasim Baybikov, Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/internal/service/cloudapi/kvmvm/network_subresource.go b/internal/service/cloudapi/kvmvm/network_subresource.go index 9bc94b4..d851f6c 100644 --- a/internal/service/cloudapi/kvmvm/network_subresource.go +++ b/internal/service/cloudapi/kvmvm/network_subresource.go @@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, +Kasim Baybikov, Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/internal/service/cloudapi/kvmvm/osusers_subresource.go b/internal/service/cloudapi/kvmvm/osusers_subresource.go index 2e6787f..cf64f7e 100644 --- a/internal/service/cloudapi/kvmvm/osusers_subresource.go +++ b/internal/service/cloudapi/kvmvm/osusers_subresource.go @@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, +Kasim Baybikov, Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/internal/service/cloudapi/kvmvm/resource_compute.go b/internal/service/cloudapi/kvmvm/resource_compute.go index 5f03215..a81c496 100644 --- a/internal/service/cloudapi/kvmvm/resource_compute.go +++ b/internal/service/cloudapi/kvmvm/resource_compute.go @@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, +Kasim Baybikov, Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -131,6 +132,7 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf if err != nil { return diag.FromErr(err) } + urlValues = &url.Values{} // Compute create API returns ID of the new Compute instance on success d.SetId(apiResp) // update ID of the resource to tell Terraform that the resource exists, albeit partially @@ -148,6 +150,7 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf log.Errorf("resourceComputeCreate: could not delete compute after failed creation: %v", err) } d.SetId("") + urlValues = &url.Values{} } }() @@ -189,13 +192,50 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf } } + if !cleanup { + if disks, ok := d.GetOk("disks"); ok { + log.Debugf("resourceComputeCreate: Create disks on ComputeID: %d", compId) + addedDisks := disks.([]interface{}) + if len(addedDisks) > 0 { + for _, disk := range addedDisks { + diskConv := disk.(map[string]interface{}) + + urlValues.Add("computeId", d.Id()) + urlValues.Add("diskName", diskConv["disk_name"].(string)) + urlValues.Add("size", strconv.Itoa(diskConv["size"].(int))) + if diskConv["disk_type"].(string) != "" { + urlValues.Add("diskType", diskConv["disk_type"].(string)) + } + if diskConv["sep_id"].(int) != 0 { + urlValues.Add("sepId", strconv.Itoa(diskConv["sep_id"].(int))) + } + if diskConv["pool"].(string) != "" { + urlValues.Add("pool", diskConv["pool"].(string)) + } + if diskConv["desc"].(string) != "" { + urlValues.Add("desc", diskConv["desc"].(string)) + } + if diskConv["image_id"].(int) != 0 { + urlValues.Add("imageId", strconv.Itoa(diskConv["image_id"].(int))) + } + _, err := c.DecortAPICall(ctx, "POST", ComputeDiskAddAPI, urlValues) + if err != nil { + cleanup = true + return diag.FromErr(err) + } + urlValues = &url.Values{} + } + } + } + } + log.Debugf("resourceComputeCreate: new Compute ID %d, name %s creation sequence complete", compId, d.Get("name").(string)) // We may reuse dataSourceComputeRead here as we maintain similarity // between Compute resource and Compute data source schemas // Compute read function will also update resource ID on success, so that Terraform // will know the resource exists - return dataSourceComputeRead(ctx, d, m) + return resourceComputeRead(ctx, d, m) } func resourceComputeRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { @@ -236,32 +276,32 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf */ // 1. Resize CPU/RAM - params := &url.Values{} + urlValues := &url.Values{} doUpdate := false - params.Add("computeId", d.Id()) + urlValues.Add("computeId", d.Id()) oldCpu, newCpu := d.GetChange("cpu") if oldCpu.(int) != newCpu.(int) { - params.Add("cpu", fmt.Sprintf("%d", newCpu.(int))) + urlValues.Add("cpu", fmt.Sprintf("%d", newCpu.(int))) doUpdate = true } else { - params.Add("cpu", "0") // no change to CPU allocation + urlValues.Add("cpu", "0") // no change to CPU allocation } oldRam, newRam := d.GetChange("ram") if oldRam.(int) != newRam.(int) { - params.Add("ram", fmt.Sprintf("%d", newRam.(int))) + urlValues.Add("ram", fmt.Sprintf("%d", newRam.(int))) doUpdate = true } else { - params.Add("ram", "0") + urlValues.Add("ram", "0") } if doUpdate { log.Debugf("resourceComputeUpdate: changing CPU %d -> %d and/or RAM %d -> %d", oldCpu.(int), newCpu.(int), oldRam.(int), newRam.(int)) - params.Add("force", "true") - _, err := c.DecortAPICall(ctx, "POST", ComputeResizeAPI, params) + urlValues.Add("force", "true") + _, err := c.DecortAPICall(ctx, "POST", ComputeResizeAPI, urlValues) if err != nil { return diag.FromErr(err) } @@ -284,13 +324,15 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf } // 3. Calculate and apply changes to data disks - err := utilityComputeExtraDisksConfigure(ctx, d, m, true) // pass do_delta = true to apply changes, if any - if err != nil { - return diag.FromErr(err) + if d.HasChange("extra_disks") { + err := utilityComputeExtraDisksConfigure(ctx, d, m, true) // pass do_delta = true to apply changes, if any + if err != nil { + return diag.FromErr(err) + } } // 4. Calculate and apply changes to network connections - err = utilityComputeNetworksConfigure(ctx, d, m, true) // pass do_delta = true to apply changes, if any + err := utilityComputeNetworksConfigure(ctx, d, m, true) // pass do_delta = true to apply changes, if any if err != nil { return diag.FromErr(err) } @@ -319,9 +361,108 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf } } + urlValues = &url.Values{} + if d.HasChange("disks") { + deletedDisks := make([]interface{}, 0) + addedDisks := make([]interface{}, 0) + + oldDisks, newDisks := d.GetChange("disks") + oldConv := oldDisks.([]interface{}) + newConv := newDisks.([]interface{}) + + for _, el := range oldConv { + if !isContainsDisk(newConv, el) { + deletedDisks = append(deletedDisks, el) + } + } + + for _, el := range newConv { + if !isContainsDisk(oldConv, el) { + addedDisks = append(addedDisks, el) + } + } + + if len(deletedDisks) > 0 { + urlValues.Add("computeId", d.Id()) + urlValues.Add("force", "false") + _, err := c.DecortAPICall(ctx, "POST", ComputeStopAPI, urlValues) + if err != nil { + return diag.FromErr(err) + } + urlValues = &url.Values{} + + for _, disk := range deletedDisks { + diskConv := disk.(map[string]interface{}) + if diskConv["disk_name"].(string) == "bootdisk" { + continue + } + urlValues.Add("computeId", d.Id()) + urlValues.Add("diskId", strconv.Itoa(diskConv["disk_id"].(int))) + urlValues.Add("permanently", strconv.FormatBool(diskConv["permanently"].(bool))) + _, err := c.DecortAPICall(ctx, "POST", ComputeDiskDeleteAPI, urlValues) + if err != nil { + return diag.FromErr(err) + } + + urlValues = &url.Values{} + } + urlValues.Add("computeId", d.Id()) + urlValues.Add("altBootId", "0") + _, err = c.DecortAPICall(ctx, "POST", ComputeStartAPI, urlValues) + if err != nil { + return diag.FromErr(err) + } + urlValues = &url.Values{} + } + + if len(addedDisks) > 0 { + for _, disk := range addedDisks { + diskConv := disk.(map[string]interface{}) + if diskConv["disk_name"].(string) == "bootdisk" { + continue + } + urlValues.Add("computeId", d.Id()) + urlValues.Add("diskName", diskConv["disk_name"].(string)) + urlValues.Add("size", strconv.Itoa(diskConv["size"].(int))) + if diskConv["disk_type"].(string) != "" { + urlValues.Add("diskType", diskConv["disk_type"].(string)) + } + if diskConv["sep_id"].(int) != 0 { + urlValues.Add("sepId", strconv.Itoa(diskConv["sep_id"].(int))) + } + if diskConv["pool"].(string) != "" { + urlValues.Add("pool", diskConv["pool"].(string)) + } + if diskConv["desc"].(string) != "" { + urlValues.Add("desc", diskConv["desc"].(string)) + } + if diskConv["image_id"].(int) != 0 { + urlValues.Add("imageId", strconv.Itoa(diskConv["image_id"].(int))) + } + _, err := c.DecortAPICall(ctx, "POST", ComputeDiskAddAPI, urlValues) + if err != nil { + return diag.FromErr(err) + } + + urlValues = &url.Values{} + } + } + } + // we may reuse dataSourceComputeRead here as we maintain similarity // between Compute resource and Compute data source schemas - return dataSourceComputeRead(ctx, d, m) + return resourceComputeRead(ctx, d, m) +} + +func isContainsDisk(els []interface{}, el interface{}) bool { + for _, elOld := range els { + elOldConv := elOld.(map[string]interface{}) + elConv := el.(map[string]interface{}) + if elOldConv["disk_name"].(string) == elConv["disk_name"].(string) { + return true + } + } + return false } func resourceComputeDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { @@ -346,199 +487,265 @@ func resourceComputeDelete(ctx context.Context, d *schema.ResourceData, m interf return nil } -func ResourceCompute() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - CreateContext: resourceComputeCreate, - ReadContext: resourceComputeRead, - UpdateContext: resourceComputeUpdate, - DeleteContext: resourceComputeDelete, - - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, +func ResourceComputeSchemaMake() map[string]*schema.Schema { + rets := map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "Name of this compute. Compute names are case sensitive and must be unique in the resource group.", }, - Timeouts: &schema.ResourceTimeout{ - Create: &constants.Timeout180s, - Read: &constants.Timeout30s, - Update: &constants.Timeout180s, - Delete: &constants.Timeout60s, - Default: &constants.Timeout60s, + "rg_id": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntAtLeast(1), + Description: "ID of the resource group where this compute should be deployed.", }, - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: "Name of this compute. Compute names are case sensitive and must be unique in the resource group.", - }, - - "rg_id": { - Type: schema.TypeInt, - Required: true, - ValidateFunc: validation.IntAtLeast(1), - Description: "ID of the resource group where this compute should be deployed.", - }, + "driver": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + StateFunc: statefuncs.StateFuncToUpper, + ValidateFunc: validation.StringInSlice([]string{"KVM_X86", "KVM_PPC"}, false), // observe case while validating + Description: "Hardware architecture of this compute instance.", + }, - "driver": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - StateFunc: statefuncs.StateFuncToUpper, - ValidateFunc: validation.StringInSlice([]string{"KVM_X86", "KVM_PPC"}, false), // observe case while validating - Description: "Hardware architecture of this compute instance.", - }, + "cpu": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, constants.MaxCpusPerCompute), + Description: "Number of CPUs to allocate to this compute instance.", + }, - "cpu": { - Type: schema.TypeInt, - Required: true, - ValidateFunc: validation.IntBetween(1, constants.MaxCpusPerCompute), - Description: "Number of CPUs to allocate to this compute instance.", - }, + "ram": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntAtLeast(constants.MinRamPerCompute), + Description: "Amount of RAM in MB to allocate to this compute instance.", + }, - "ram": { - Type: schema.TypeInt, - Required: true, - ValidateFunc: validation.IntAtLeast(constants.MinRamPerCompute), - Description: "Amount of RAM in MB to allocate to this compute instance.", - }, + "image_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "ID of the OS image to base this compute instance on.", + }, - "image_id": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - Description: "ID of the OS image to base this compute instance on.", - }, + "boot_disk_size": { + Type: schema.TypeInt, + Required: true, + Description: "This compute instance boot disk size in GB. Make sure it is large enough to accomodate selected OS image.", + }, - "boot_disk_size": { - Type: schema.TypeInt, - Required: true, - Description: "This compute instance boot disk size in GB. Make sure it is large enough to accomodate selected OS image.", + "disks": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disk_name": { + Type: schema.TypeString, + Required: true, + Description: "Name for disk", + }, + "size": { + Type: schema.TypeInt, + Required: true, + Description: "Disk size in GiB", + }, + "disk_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"B", "D"}, false), + Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data'", + }, + "sep_id": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: "Storage endpoint provider ID; by default the same with boot disk", + }, + "pool": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Pool name; by default will be chosen automatically", + }, + "desc": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Optional description", + }, + "image_id": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: "Specify image id for create disk from template", + }, + "disk_id": { + Type: schema.TypeInt, + Computed: true, + Description: "Disk ID", + }, + "permanently": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Disk deletion status", + }, + }, }, + }, + "sep_id": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + Description: "ID of SEP to create bootDisk on. Uses image's sepId if not set.", + }, - "sep_id": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ForceNew: true, - Description: "ID of SEP to create bootDisk on. Uses image's sepId if not set.", - }, + "pool": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: "Pool to use if sepId is set, can be also empty if needed to be chosen by system.", + }, - "pool": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: "Pool to use if sepId is set, can be also empty if needed to be chosen by system.", + "extra_disks": { + Type: schema.TypeSet, + Optional: true, + MaxItems: constants.MaxExtraDisksPerCompute, + Elem: &schema.Schema{ + Type: schema.TypeInt, }, + Description: "Optional list of IDs of extra disks to attach to this compute. You may specify several extra disks.", + }, - "extra_disks": { - Type: schema.TypeSet, - Optional: true, - MaxItems: constants.MaxExtraDisksPerCompute, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - Description: "Optional list of IDs of extra disks to attach to this compute. You may specify several extra disks.", + "network": { + Type: schema.TypeSet, + Optional: true, + MaxItems: constants.MaxNetworksPerCompute, + Elem: &schema.Resource{ + Schema: networkSubresourceSchemaMake(), }, + Description: "Optional network connection(s) for this compute. You may specify several network blocks, one for each connection.", + }, - "network": { - Type: schema.TypeSet, + /* + "ssh_keys": { + Type: schema.TypeList, Optional: true, - MaxItems: constants.MaxNetworksPerCompute, + MaxItems: MaxSshKeysPerCompute, Elem: &schema.Resource{ - Schema: networkSubresourceSchemaMake(), + Schema: sshSubresourceSchemaMake(), }, - Description: "Optional network connection(s) for this compute. You may specify several network blocks, one for each connection.", + Description: "SSH keys to authorize on this compute instance.", }, + */ - /* - "ssh_keys": { - Type: schema.TypeList, - Optional: true, - MaxItems: MaxSshKeysPerCompute, - Elem: &schema.Resource{ - Schema: sshSubresourceSchemaMake(), - }, - Description: "SSH keys to authorize on this compute instance.", - }, - */ + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Optional text description of this compute instance.", + }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: "Optional text description of this compute instance.", - }, + "cloud_init": { + Type: schema.TypeString, + Optional: true, + Default: "applied", + DiffSuppressFunc: cloudInitDiffSupperss, + Description: "Optional cloud_init parameters. Applied when creating new compute instance only, ignored in all other cases.", + }, - "cloud_init": { - Type: schema.TypeString, - Optional: true, - Default: "applied", - DiffSuppressFunc: cloudInitDiffSupperss, - Description: "Optional cloud_init parameters. Applied when creating new compute instance only, ignored in all other cases.", - }, + // The rest are Compute properties, which are "computed" once it is created + "rg_name": { + Type: schema.TypeString, + Computed: true, + Description: "Name of the resource group where this compute instance is located.", + }, - // The rest are Compute properties, which are "computed" once it is created - "rg_name": { - Type: schema.TypeString, - Computed: true, - Description: "Name of the resource group where this compute instance is located.", - }, + "account_id": { + Type: schema.TypeInt, + Computed: true, + Description: "ID of the account this compute instance belongs to.", + }, - "account_id": { - Type: schema.TypeInt, - Computed: true, - Description: "ID of the account this compute instance belongs to.", - }, + "account_name": { + Type: schema.TypeString, + Computed: true, + Description: "Name of the account this compute instance belongs to.", + }, - "account_name": { - Type: schema.TypeString, - Computed: true, - Description: "Name of the account this compute instance belongs to.", - }, + "boot_disk_id": { + Type: schema.TypeInt, + Computed: true, + Description: "This compute instance boot disk ID.", + }, - "boot_disk_id": { - Type: schema.TypeInt, - Computed: true, - Description: "This compute instance boot disk ID.", + "os_users": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: osUsersSubresourceSchemaMake(), }, + Description: "Guest OS users provisioned on this compute instance.", + }, - "os_users": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: osUsersSubresourceSchemaMake(), - }, - Description: "Guest OS users provisioned on this compute instance.", - }, + "started": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: "Is compute started.", + }, + "detach_disks": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "permanently": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "is": { + Type: schema.TypeString, + Optional: true, + Description: "system name", + }, + "ipa_type": { + Type: schema.TypeString, + Optional: true, + Description: "compute purpose", + }, + } + return rets +} - "started": { - Type: schema.TypeBool, - Optional: true, - Default: true, - Description: "Is compute started.", - }, - "detach_disks": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "permanently": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "is": { - Type: schema.TypeString, - Optional: true, - Description: "system name", - }, - "ipa_type": { - Type: schema.TypeString, - Optional: true, - Description: "compute purpose", - }, +func ResourceCompute() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + CreateContext: resourceComputeCreate, + ReadContext: resourceComputeRead, + UpdateContext: resourceComputeUpdate, + DeleteContext: resourceComputeDelete, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, }, + + Timeouts: &schema.ResourceTimeout{ + Create: &constants.Timeout600s, + Read: &constants.Timeout300s, + Update: &constants.Timeout300s, + Delete: &constants.Timeout300s, + Default: &constants.Timeout300s, + }, + + Schema: ResourceComputeSchemaMake(), } } diff --git a/internal/service/cloudapi/kvmvm/utility_compute.go b/internal/service/cloudapi/kvmvm/utility_compute.go index 259040b..8be54c1 100644 --- a/internal/service/cloudapi/kvmvm/utility_compute.go +++ b/internal/service/cloudapi/kvmvm/utility_compute.go @@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, +Kasim Baybikov, Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -91,16 +92,33 @@ func utilityComputeExtraDisksConfigure(ctx context.Context, d *schema.ResourceDa detach_set := old_set.(*schema.Set).Difference(new_set.(*schema.Set)) log.Debugf("utilityComputeExtraDisksConfigure: detach set has %d items for Compute ID %s", detach_set.Len(), d.Id()) - for _, diskId := range detach_set.List() { + + if detach_set.Len() > 0 { urlValues := &url.Values{} urlValues.Add("computeId", d.Id()) - urlValues.Add("diskId", fmt.Sprintf("%d", diskId.(int))) - _, err := c.DecortAPICall(ctx, "POST", ComputeDiskDetachAPI, urlValues) + urlValues.Add("force", "false") + _, err := c.DecortAPICall(ctx, "POST", ComputeStopAPI, urlValues) if err != nil { - // failed to detach disk - there will be partial resource update - log.Errorf("utilityComputeExtraDisksConfigure: failed to detach disk ID %d from Compute ID %s: %s", diskId.(int), d.Id(), err) - apiErrCount++ - lastSavedError = err + return err + } + for _, diskId := range detach_set.List() { + urlValues := &url.Values{} + urlValues.Add("computeId", d.Id()) + urlValues.Add("diskId", fmt.Sprintf("%d", diskId.(int))) + _, err := c.DecortAPICall(ctx, "POST", ComputeDiskDetachAPI, urlValues) + if err != nil { + // failed to detach disk - there will be partial resource update + log.Errorf("utilityComputeExtraDisksConfigure: failed to detach disk ID %d from Compute ID %s: %s", diskId.(int), d.Id(), err) + apiErrCount++ + lastSavedError = err + } + } + urlValues = &url.Values{} + urlValues.Add("computeId", d.Id()) + urlValues.Add("altBootId", "0") + _, err = c.DecortAPICall(ctx, "POST", ComputeStartAPI, urlValues) + if err != nil { + return err } } diff --git a/internal/service/cloudapi/lb/resource_lb.go b/internal/service/cloudapi/lb/resource_lb.go index b4b94dd..cb7f56c 100644 --- a/internal/service/cloudapi/lb/resource_lb.go +++ b/internal/service/cloudapi/lb/resource_lb.go @@ -269,11 +269,11 @@ func ResourceLB() *schema.Resource { }, Timeouts: &schema.ResourceTimeout{ - Create: &constants.Timeout60s, - Read: &constants.Timeout30s, - Update: &constants.Timeout60s, - Delete: &constants.Timeout60s, - Default: &constants.Timeout60s, + Create: &constants.Timeout600s, + Read: &constants.Timeout300s, + Update: &constants.Timeout300s, + Delete: &constants.Timeout300s, + Default: &constants.Timeout300s, }, Schema: lbResourceSchemaMake(), diff --git a/internal/service/cloudapi/lb/resource_lb_backend.go b/internal/service/cloudapi/lb/resource_lb_backend.go index 2d9ee54..1fa94ed 100644 --- a/internal/service/cloudapi/lb/resource_lb_backend.go +++ b/internal/service/cloudapi/lb/resource_lb_backend.go @@ -214,11 +214,11 @@ func ResourceLBBackend() *schema.Resource { }, Timeouts: &schema.ResourceTimeout{ - Create: &constants.Timeout60s, - Read: &constants.Timeout30s, - Update: &constants.Timeout60s, - Delete: &constants.Timeout60s, - Default: &constants.Timeout60s, + Create: &constants.Timeout600s, + Read: &constants.Timeout300s, + Update: &constants.Timeout300s, + Delete: &constants.Timeout300s, + Default: &constants.Timeout300s, }, Schema: map[string]*schema.Schema{ diff --git a/internal/service/cloudapi/lb/resource_lb_backend_server.go b/internal/service/cloudapi/lb/resource_lb_backend_server.go index 089e711..ab54420 100644 --- a/internal/service/cloudapi/lb/resource_lb_backend_server.go +++ b/internal/service/cloudapi/lb/resource_lb_backend_server.go @@ -225,11 +225,11 @@ func ResourceLBBackendServer() *schema.Resource { }, Timeouts: &schema.ResourceTimeout{ - Create: &constants.Timeout60s, - Read: &constants.Timeout30s, - Update: &constants.Timeout60s, - Delete: &constants.Timeout60s, - Default: &constants.Timeout60s, + Create: &constants.Timeout600s, + Read: &constants.Timeout300s, + Update: &constants.Timeout300s, + Delete: &constants.Timeout300s, + Default: &constants.Timeout300s, }, Schema: map[string]*schema.Schema{ diff --git a/internal/service/cloudapi/lb/resource_lb_frontend.go b/internal/service/cloudapi/lb/resource_lb_frontend.go index 13a1161..97e92b3 100644 --- a/internal/service/cloudapi/lb/resource_lb_frontend.go +++ b/internal/service/cloudapi/lb/resource_lb_frontend.go @@ -138,11 +138,11 @@ func ResourceLBFrontend() *schema.Resource { }, Timeouts: &schema.ResourceTimeout{ - Create: &constants.Timeout60s, - Read: &constants.Timeout30s, - Update: &constants.Timeout60s, - Delete: &constants.Timeout60s, - Default: &constants.Timeout60s, + Create: &constants.Timeout600s, + Read: &constants.Timeout300s, + Update: &constants.Timeout300s, + Delete: &constants.Timeout300s, + Default: &constants.Timeout300s, }, Schema: map[string]*schema.Schema{ diff --git a/internal/service/cloudapi/lb/resource_lb_frontend_bind.go b/internal/service/cloudapi/lb/resource_lb_frontend_bind.go index d4b3750..8261fb5 100644 --- a/internal/service/cloudapi/lb/resource_lb_frontend_bind.go +++ b/internal/service/cloudapi/lb/resource_lb_frontend_bind.go @@ -162,11 +162,11 @@ func ResourceLBFrontendBind() *schema.Resource { }, Timeouts: &schema.ResourceTimeout{ - Create: &constants.Timeout60s, - Read: &constants.Timeout30s, - Update: &constants.Timeout60s, - Delete: &constants.Timeout60s, - Default: &constants.Timeout60s, + Create: &constants.Timeout600s, + Read: &constants.Timeout300s, + Update: &constants.Timeout300s, + Delete: &constants.Timeout300s, + Default: &constants.Timeout300s, }, Schema: map[string]*schema.Schema{ diff --git a/internal/service/cloudapi/pfw/resource_pfw.go b/internal/service/cloudapi/pfw/resource_pfw.go index 478fba0..8cf9914 100644 --- a/internal/service/cloudapi/pfw/resource_pfw.go +++ b/internal/service/cloudapi/pfw/resource_pfw.go @@ -185,11 +185,11 @@ func ResourcePfw() *schema.Resource { }, Timeouts: &schema.ResourceTimeout{ - Create: &constants.Timeout60s, - Read: &constants.Timeout30s, - Update: &constants.Timeout60s, - Delete: &constants.Timeout60s, - Default: &constants.Timeout60s, + Create: &constants.Timeout600s, + Read: &constants.Timeout300s, + Update: &constants.Timeout300s, + Delete: &constants.Timeout300s, + Default: &constants.Timeout300s, }, Schema: resourcePfwSchemaMake(), diff --git a/internal/service/cloudapi/rg/resource_rg.go b/internal/service/cloudapi/rg/resource_rg.go index 3e1c799..0373465 100644 --- a/internal/service/cloudapi/rg/resource_rg.go +++ b/internal/service/cloudapi/rg/resource_rg.go @@ -311,11 +311,11 @@ func ResourceResgroup() *schema.Resource { }, Timeouts: &schema.ResourceTimeout{ - Create: &constants.Timeout180s, - Read: &constants.Timeout30s, - Update: &constants.Timeout180s, - Delete: &constants.Timeout60s, - Default: &constants.Timeout60s, + Create: &constants.Timeout600s, + Read: &constants.Timeout300s, + Update: &constants.Timeout300s, + Delete: &constants.Timeout300s, + Default: &constants.Timeout300s, }, Schema: map[string]*schema.Schema{ diff --git a/internal/service/cloudapi/snapshot/resource_snapshot.go b/internal/service/cloudapi/snapshot/resource_snapshot.go index 8090437..a56ebc8 100644 --- a/internal/service/cloudapi/snapshot/resource_snapshot.go +++ b/internal/service/cloudapi/snapshot/resource_snapshot.go @@ -182,11 +182,11 @@ func ResourceSnapshot() *schema.Resource { }, Timeouts: &schema.ResourceTimeout{ - Create: &constants.Timeout60s, - Read: &constants.Timeout30s, - Update: &constants.Timeout60s, - Delete: &constants.Timeout60s, - Default: &constants.Timeout60s, + Create: &constants.Timeout600s, + Read: &constants.Timeout300s, + Update: &constants.Timeout300s, + Delete: &constants.Timeout300s, + Default: &constants.Timeout300s, }, Schema: resourceSnapshotSchemaMake(), diff --git a/internal/service/cloudapi/vins/api.go b/internal/service/cloudapi/vins/api.go index 4a21e3e..049b8a4 100644 --- a/internal/service/cloudapi/vins/api.go +++ b/internal/service/cloudapi/vins/api.go @@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, +Kasim Baybikov, Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/internal/service/cloudapi/vins/data_source_vins_list.go b/internal/service/cloudapi/vins/data_source_vins_list.go index 2178eb5..ba9d84a 100644 --- a/internal/service/cloudapi/vins/data_source_vins_list.go +++ b/internal/service/cloudapi/vins/data_source_vins_list.go @@ -3,6 +3,7 @@ Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, +Kasim Baybikov, Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/internal/service/cloudapi/vins/resource_vins.go b/internal/service/cloudapi/vins/resource_vins.go index 6cb6aa8..c8aa317 100644 --- a/internal/service/cloudapi/vins/resource_vins.go +++ b/internal/service/cloudapi/vins/resource_vins.go @@ -296,11 +296,11 @@ func ResourceVins() *schema.Resource { }, Timeouts: &schema.ResourceTimeout{ - Create: &constants.Timeout180s, - Read: &constants.Timeout30s, - Update: &constants.Timeout180s, - Delete: &constants.Timeout60s, - Default: &constants.Timeout60s, + Create: &constants.Timeout600s, + Read: &constants.Timeout300s, + Update: &constants.Timeout300s, + Delete: &constants.Timeout300s, + Default: &constants.Timeout300s, }, Schema: resourceVinsSchemaMake(), diff --git a/internal/status/status.go b/internal/status/status.go new file mode 100644 index 0000000..f8d7124 --- /dev/null +++ b/internal/status/status.go @@ -0,0 +1,32 @@ +package status + +type Status = string + +var ( + //The disk is linked to any Compute + Assigned Status = "ASSIGNED" + + //An object model has been created in the database + Modeled Status = "MODELED" + + //In the process of creation + Creating Status = "CREATING" + + //Creating + Created Status = "CREATED" + + //Physical resources are allocated for the object + Allocated Status = "ALLOCATED" + + //The object has released (returned to the platform) the physical resources that it occupied + Unallocated Status = "UNALLOCATED" + + //Permanently deleted + Destroyed Status = "DESTROYED" + + //Deleted to Trash + Deleted Status = "DELETED" + + //Deleted from storage + Purged Status = "PURGED" +) diff --git a/provider.tf b/provider.tf new file mode 100644 index 0000000..c65df29 --- /dev/null +++ b/provider.tf @@ -0,0 +1,9 @@ +terraform { + required_providers { + decort = { + source = "digitalenergy.online/decort/decort" + version = "3.1.1" + } + } +} + diff --git a/samples/README.md b/samples/README.md index 98b59a3..4a3d2a2 100644 --- a/samples/README.md +++ b/samples/README.md @@ -43,6 +43,12 @@ - lb - lb_list - lb_list_deleted + - disk_list_deleted + - disk_list_unattached + - disk_list_types + - disk_list_types_detailed + - disk_snapshot_list + - disk_snapshot - resources: - image - virtual_image @@ -60,6 +66,7 @@ - lb_backend - lb_frontend_bind - lb_backend_server + - disk_snapshot - cloudbroker: - data: - grid diff --git a/samples/cloudapi/data_disk_list_deleted/main.tf b/samples/cloudapi/data_disk_list_deleted/main.tf new file mode 100644 index 0000000..5184b37 --- /dev/null +++ b/samples/cloudapi/data_disk_list_deleted/main.tf @@ -0,0 +1,54 @@ +/* +Пример использования +Получение списка дисков со статусом DELETED +*/ +#Расскомментируйте этот код, +#и внесите необходимые правки в версию и путь, +#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером +/* +terraform { + required_providers { + decort = { + version = "1.1" + source = "digitalenergy.online/decort/decort" + } + } +} +*/ + + +provider "decort" { + authenticator = "oauth2" + #controller_url = + controller_url = "https://ds1.digitalenergy.online" + #oauth2_url = + oauth2_url = "https://sso.digitalenergy.online" + allow_unverified_ssl = true +} + +data "decort_disk_list_deleted" "dld" { + #id аккаунта для получения списка дисков + #опциональный параметр + #тип - число + #account_id = 11111 + + #тип диска + #опциональный параметр + #тип - строка + #возможные типы: "b" - boot_disk, "d" - data_disk + #type = "d" + + #кол-во страниц для вывода + #опицональный параметр + #тип - число + #page = 1 + + #размер страницы + #опицональный параметр + #тип - число + #size = 1 +} + +output "test" { + value = data.decort_disk_list_deleted.dld +} diff --git a/samples/cloudapi/data_disk_list_types/main.tf b/samples/cloudapi/data_disk_list_types/main.tf new file mode 100644 index 0000000..b984b31 --- /dev/null +++ b/samples/cloudapi/data_disk_list_types/main.tf @@ -0,0 +1,39 @@ +/* +Пример использования +Получение списка типов дисков +*/ +#Расскомментируйте этот код, +#и внесите необходимые правки в версию и путь, +#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером +/* +terraform { + required_providers { + decort = { + version = "1.1" + source = "digitalenergy.online/decort/decort" + } + } +} +*/ + + +provider "decort" { + authenticator = "oauth2" + #controller_url = + controller_url = "https://ds1.digitalenergy.online" + #oauth2_url = + oauth2_url = "https://sso.digitalenergy.online" + allow_unverified_ssl = true +} + +data "decort_disk_list_types" "dlt" { + #Нет входных параметров + + #Выходной параметр + #тип - лист строк + #types {} +} + +output "test" { + value = data.decort_disk_list_types.dlt +} diff --git a/samples/cloudapi/data_disk_list_types_detailed/main.tf b/samples/cloudapi/data_disk_list_types_detailed/main.tf new file mode 100644 index 0000000..8bcc09e --- /dev/null +++ b/samples/cloudapi/data_disk_list_types_detailed/main.tf @@ -0,0 +1,52 @@ +/* +Пример использования +Получение списка типов дисков, но детально +*/ +#Расскомментируйте этот код, +#и внесите необходимые правки в версию и путь, +#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером +/* +terraform { + required_providers { + decort = { + version = "1.1" + source = "digitalenergy.online/decort/decort" + } + } +} +*/ + + +provider "decort" { + authenticator = "oauth2" + #controller_url = + controller_url = "https://ds1.digitalenergy.online" + #oauth2_url = + oauth2_url = "https://sso.digitalenergy.online" + allow_unverified_ssl = true +} + + +data "decort_disk_list_types_detailed" "dltd" { + #Нет входных параметров + + #Выходной параметр + #тип - лист типов + # items {} + + #Выходной параметр + #Список пулов + # pools + + #Выходной параметр + #Имя + # name + + #Выходной параметр + #Список типов + #types +} + +output "test" { + value = data.decort_disk_list_types_detailed.dltd +} diff --git a/samples/cloudapi/data_disk_list_unattached/main.tf b/samples/cloudapi/data_disk_list_unattached/main.tf new file mode 100644 index 0000000..175a7e5 --- /dev/null +++ b/samples/cloudapi/data_disk_list_unattached/main.tf @@ -0,0 +1,39 @@ +/* +Пример использования +Получение списка доступных неприсоединенных дисков +*/ +#Расскомментируйте этот код, +#и внесите необходимые правки в версию и путь, +#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером +/* +terraform { + required_providers { + decort = { + version = "1.1" + source = "digitalenergy.online/decort/decort" + } + } +} +*/ + + +provider "decort" { + authenticator = "oauth2" + #controller_url = + controller_url = "https://ds1.digitalenergy.online" + #oauth2_url = + oauth2_url = "https://sso.digitalenergy.online" + allow_unverified_ssl = true +} + + +data "decort_disk_list_unattached" "dlu" { + #Номер аккаунта + #опциональный параметр + #тип - число + account_id = 100 +} + +output "test" { + value = data.decort_disk_list_unattached.dlu +} diff --git a/samples/cloudapi/data_disk_snapshot/main.tf b/samples/cloudapi/data_disk_snapshot/main.tf new file mode 100644 index 0000000..1f63f6c --- /dev/null +++ b/samples/cloudapi/data_disk_snapshot/main.tf @@ -0,0 +1,44 @@ +/* +Пример использования +Получение конкретного снапшота +*/ +#Расскомментируйте этот код, +#и внесите необходимые правки в версию и путь, +#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером +/* +terraform { + required_providers { + decort = { + version = "1.1" + source = "digitalenergy.online/decort/decort" + } + } +} +*/ + + +provider "decort" { + authenticator = "oauth2" + #controller_url = + controller_url = "https://ds1.digitalenergy.online" + #oauth2_url = + oauth2_url = "https://sso.digitalenergy.online" + allow_unverified_ssl = true +} + + +data "decort_disk_snapshot" "ds" { + #Номер диска + #обязательный параметр + #тип - число + disk_id = 20100 + + #Ярлык диска + #обязательный параметр + #тип - строка + label = "label" +} + +output "test" { + value = data.decort_disk_snapshot.ds +} diff --git a/samples/cloudapi/data_disk_snapshot_list/main.tf b/samples/cloudapi/data_disk_snapshot_list/main.tf new file mode 100644 index 0000000..6f58cee --- /dev/null +++ b/samples/cloudapi/data_disk_snapshot_list/main.tf @@ -0,0 +1,39 @@ +/* +Пример использования +Получение списка снапшотов диска +*/ +#Расскомментируйте этот код, +#и внесите необходимые правки в версию и путь, +#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером +/* +terraform { + required_providers { + decort = { + version = "1.1" + source = "digitalenergy.online/decort/decort" + } + } +} +*/ + + +provider "decort" { + authenticator = "oauth2" + #controller_url = + controller_url = "https://ds1.digitalenergy.online" + #oauth2_url = + oauth2_url = "https://sso.digitalenergy.online" + allow_unverified_ssl = true +} + + +data "decort_disk_snapshot_list" "ds" { + #Номер диска + #обязательный параметр + #тип - число + disk_id = 20100 +} + +output "test" { + value = data.decort_disk_snapshot_list.ds +} diff --git a/samples/cloudapi/resource_disk_snapshot/main.tf b/samples/cloudapi/resource_disk_snapshot/main.tf new file mode 100644 index 0000000..a5d9863 --- /dev/null +++ b/samples/cloudapi/resource_disk_snapshot/main.tf @@ -0,0 +1,47 @@ +/* +Пример использования +Ресурс снапшота диска +*/ +#Расскомментируйте этот код, +#и внесите необходимые правки в версию и путь, +#чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером + +terraform { + required_providers { + decort = { + version = "1.1" + source = "digitalenergy.online/decort/decort" + } + } +} + + +provider "decort" { + authenticator = "oauth2" + #controller_url = + controller_url = "https://ds1.digitalenergy.online" + #oauth2_url = + oauth2_url = "https://sso.digitalenergy.online" + allow_unverified_ssl = true +} + +resource "decort_disk_snapshot" "ds" { + #Номер диска + #обязательный параметр + #тип - число + disk_id = 20100 + + #Ярлык диска + #обязательный параметр + #тип - строка + label = "label" + + #флаг rollback + #опциональный параметр + #тип - bool + rollback = false +} + +output "test" { + value = decort_disk_snapshot.ds +} diff --git a/samples/cloudapi/resource_kvmvm/main.tf b/samples/cloudapi/resource_kvmvm/main.tf index 8d591ad..301a7b0 100644 --- a/samples/cloudapi/resource_kvmvm/main.tf +++ b/samples/cloudapi/resource_kvmvm/main.tf @@ -72,6 +72,50 @@ resource "decort_kvmvm" "comp" { #тип - строка description = "test update description in tf words update" + #Создание и добавление диска дял compute + #опциональный параметр + #тип - список дисков + disks { + #Имя диска + #Обязательный для диска параметр + #Тип - строка + disk_name = "disk_name" + + #Размер диска + #Обязательный для диска параметр + #Тип - число + size = 5 + + #Тип диска + #опциональный параметр + #тип - строка + disk_type = "D" + + #опциональный параметр + #тип - число + sep_id = 1 + + #Название пула + #опциональный параметр + #тип - строка + pool = "data01" + + #Описание диска + #опциональный параметр + #тип - строка + desc = "" + + #Айди образа + #опциональный параметр + #опциональный параметр + image_id = 378 + + #Флаг для удаления диска + #опциональный параметр + #тип - bool + permanently = false + } + }