diff --git a/CHANGELOG.md b/CHANGELOG.md index e8116a5..b33cf06 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,75 @@ -## Version 1.0.1 +## Version 1.1.0 -### Feature +### Добавлено -### Bugfix -- Fixed dynamix_lb_list data source in cloudapi/lb for the optional field 'rg_id' to work correctly -- Fixex reading configuration data from environment variables -- Fixed configuration validation \ No newline at end of file +#### audit +| Идентификатор
задачи | Описание | +| --- | --- | +| BATF-618 | Datasource `dynamix_audit` в cloudapi/audit | + +#### account +| Идентификатор
задачи | Описание | +| --- | --- | +| BATF-603 | Опциональное поле `sort_by` в datasource `dynamix_account_flipgroups_list` в cloudapi/account | + +#### dpdknet +| Идентификатор
задачи | Описание | +| --- | --- | +| BATF-617 | Datasources `dynamix_dpdknet и dynamix_dpdknet_list` в cloudapi/dpkknet | + +#### image +| Идентификатор
задачи | Описание | +| --- | --- | +| BATF-609 | Опциональное поле `enabled` в datasource `dynamix_image_list` в cloudapi/image | + +#### k8s +| Идентификатор
задачи | Описание | +| --- | --- | +| BATF-610 | Опциональные поля `worker_chipset, chipset` в resource `dynamix_k8s_wg` в cloudapi/k8s | +| BATF-610 | Опциональные поля `address_vip, chipset` в resource `dynamix_k8s_cp` в cloudapi/k8s | +| BATF-610 | Вычисляемые поля `extnet_only, ha_mode, address_vip` в datasource `dynamix_k8s` в cloudapi/k8s | + +#### kvmvm +| Идентификатор
задачи | Описание | +| --- | --- | +| BATF-611 | Опциональные поля `pci_devices, chipset, network/weight` в resource `dynamix_kvmvm` в cloudapi/kvmvm | +| BATF-611 | Вычисляемые поля `bus_number, libvirt_settings` в resource `dynamix_kvmvm` в cloudapi/kvmvm | +| BATF-611 | Вычисляемые поля `pci_devices, chipset, bus_number, libvirt_settings` в datasources `dynamix_kvmvm, dynamix_kvmvm_list, dynamix_kvmvm_list_deleted` в cloudapi/kvmvm | +| BATF-652 | Опциональные поля `pci_devices, chipset, network/weight` в resource `dynamix_kvmvm` в cloudapi/kvmvm | + +#### rg +| Идентификатор
задачи | Описание | +| --- | --- | +| BATF-613 | Опциональное поле `uniq_pools` в resource `dynamix_resgroup` в cloudapi/rg | + +#### vins +| Идентификатор
задачи | Описание | +| --- | --- | +| BATF-616 | Вычисляемые поля `bus_number, mtu и libvirt_settings` в datasource `dynamix_vins и resource dynamix_vins` в cloudapi/vins | +| BATF-616 | Опциональное поле `vnf_dev_id` в datasource `dynamix_vins_list` в cloudapi/vins | + +### Исправлено + +#### kvmvm +| Идентификатор
задачи | Описание | +| --- | --- | +| BATF-611 | Возможность смены IP сети без отключения сети и остановки ВМ в resource `dynamix_kvmvm` в cloudapi/kvmvm | +| BATF-611 | Добавлена поддержка DPDK сетей в resource `dynamix_kvmvm` в cloudapi/kvmvm | + +### Удалено + +#### all +| Идентификатор
задачи | Описание | +| --- | --- | +| BATF-620 | Поле `reason` во всех resources | +| BATF-623 | Поддержка архитектуры `kvmppc` во всех resources | + +#### account +| Идентификатор
задачи | Описание | +| --- | --- | +| BATF-603 | Опциональное поле `recursive_delete` в `resource dynamix_account` в cloudapi/account | + +#### image +| Идентификатор
задачи | Описание | +| --- | --- | +| BATF-609 | Опциональное поле `permanently`` в `resources dynamix_account, dynamix_image_virtual` в cloudapi/image | \ No newline at end of file diff --git a/Makefile b/Makefile index fac9d61..cb4a19c 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ ZIPDIR = ./zip BINARY=${NAME} WORKPATH= ./examples/terraform.d/plugins/${HOSTNAME}/${NAMESPACE}/${SECONDNAMESPACE}/${VERSION}/${OS_ARCH} MAINPATH = ./cmd/dynamix/ -VERSION=1.0.1 +VERSION=1.1.0 OS_ARCH=$(shell go env GOHOSTOS)_$(shell go env GOHOSTARCH) FILES = ${BINARY}_${VERSION}_darwin_amd64\ diff --git a/README.md b/README.md index a84cea8..6f7799b 100644 --- a/README.md +++ b/README.md @@ -141,3 +141,41 @@ | data source dynamix_lb_list_deleted | server_default_settings, server_settings, primary_node, secondary_node | | resource dynamix_lb | server_default_settings, server_settings, primary_node, secondary_node | | resource dynamix_lb_backend | server_settings | + +### Административная группа API + +#### Аккаунты accounts + +Следующие поля в terraform-provider-decort имели тип списка (List), а в terraform-provider-dynamix имеют тип Single (единичная структура): + +| Название ресурса | Поля схемы | +|--------------------------|-------------------| +| data source dynamix_cb_account_resource_consumption_list | consumed, reserved | +| data source dynamix_cb_account_resource_consumption_get | consumed, reserved, resource_limits | +| data source dynamix_cb_account_rg_list | computes, reserved, resource_limits, limits, reserved | +| resource dynamix_cb_account | resource_limits, computes, machines | +| data source dynamix_cb_disk_list_deleted | iotune | +| data source dynamix_cb_disk_list_unattached | iotune | +| data source dynamix_cb_disk_list | iotune | +| data source dynamix_cb_disk_replication | iotune, replication | +| data source dynamix_cb_disk | iotune | +| resource dynamix_cb_disk_replication | iotune, replication | +| resource dynamix_cb_disk | iotune | | + +#### Диски disks + +| Название ресурса | Поля схемы | Изменение по сравнению с terraform-provider-decort | Комментарий | +|------------------------------------------|-------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| resource dynamix_cb_disk | iotune, shareable | Если при создании ресурса диска операция настроки лимитов (поле iotune) и/или операция поделиться диском (поле shareable) выполняются некорректно, теперь ресурс создается с предупреждениями (Warnings). Ранее ресурс создавался с ошибками (Errors). | Данное изменение касается только создания ресурса. Обновление ресурса проходит также: если операция изменения iotune и/или shareable выполняется некорректно, возвращаются ошибки (Errors). | +| resource dynamix_cb_disk | - | Операция автоматического восстановления диска (для диска, находящегося в корзине) теперь происходит при чтении ресурса. Ранее она происходила при обновлении ресурса. | | +| data source dynamix_cb_disk_list_unattached | ckey, meta | Изменилось названия поля с "_ckey" на "ckey" и с "_meta" на "meta". | | + +Следующие поля в terraform-provider-decort имели тип списка (List), а в terraform-provider-dynamix имеют тип Single (единичная структура): + +| Название ресурса | Поля схемы | +|------------------------------------------|------------| +| data source dynamix_cb_disk_list_unattached | iotune | +| data source dynamix_cb_disk | iotune | +| data source dynamix_cb_disk_list | iotune | +| data source dynamix_cb_disk_list_deleted | iotune | +| resource dynamix_cb_disk | iotune | \ No newline at end of file diff --git a/docs/data-sources/account_flipgroups_list.md b/docs/data-sources/account_flipgroups_list.md index 17543d9..ef0b5e2 100644 --- a/docs/data-sources/account_flipgroups_list.md +++ b/docs/data-sources/account_flipgroups_list.md @@ -27,6 +27,7 @@ description: |- - `name` (String) find by name - `page` (Number) page number - `size` (Number) page size +- `sort_by` (String) sort by one of supported fields, format +|-(field) - `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) - `vins_id` (Number) find by vins ID - `vins_name` (String) find by vins name diff --git a/docs/data-sources/audit.md b/docs/data-sources/audit.md new file mode 100644 index 0000000..f400d39 --- /dev/null +++ b/docs/data-sources/audit.md @@ -0,0 +1,47 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_audit Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_audit (Data Source) + + + + + + +## Schema + +### Required + +- `audit_guid` (String) + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `args` (String) +- `call` (String) +- `guid` (String) +- `id` (String) The ID of this resource. +- `kwargs` (String) +- `remote_addr` (String) +- `responsetime` (Number) +- `result` (String) +- `status_code` (Number) +- `tags` (String) +- `timestamp` (Number) +- `timestamp_end` (Number) +- `user` (String) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). diff --git a/docs/data-sources/dpdknet.md b/docs/data-sources/dpdknet.md new file mode 100644 index 0000000..7a89abf --- /dev/null +++ b/docs/data-sources/dpdknet.md @@ -0,0 +1,47 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_dpdknet Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_dpdknet (Data Source) + + + + + + +## Schema + +### Required + +- `dpdk_id` (Number) + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `account_access` (List of Number) +- `compute_ids` (List of Number) +- `created_time` (Number) +- `desc` (String) +- `gid` (Number) +- `guid` (Number) +- `id` (String) The ID of this resource. +- `name` (String) +- `ovs_bridge` (String) +- `rg_access` (List of Number) +- `status` (String) +- `updated_time` (Number) +- `vlan_id` (Number) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). diff --git a/docs/data-sources/dpdknet_list.md b/docs/data-sources/dpdknet_list.md new file mode 100644 index 0000000..e4949ff --- /dev/null +++ b/docs/data-sources/dpdknet_list.md @@ -0,0 +1,65 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_dpdknet_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_dpdknet_list (Data Source) + + + + + + +## Schema + +### Optional + +- `by_id` (Number) +- `compute_ids` (List of Number) +- `desc` (String) +- `gid` (Number) +- `name` (String) +- `page` (Number) +- `size` (Number) +- `sort_by` (String) +- `status` (String) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Required: + +- `dpdk_id` (Number) + +Read-Only: + +- `account_access` (List of Number) +- `compute_ids` (List of Number) +- `created_time` (Number) +- `desc` (String) +- `gid` (Number) +- `guid` (Number) +- `name` (String) +- `ovs_bridge` (String) +- `rg_access` (List of Number) +- `status` (String) +- `updated_time` (Number) +- `vlan_id` (Number) diff --git a/docs/data-sources/image_list.md b/docs/data-sources/image_list.md index 3fd387f..0007d28 100644 --- a/docs/data-sources/image_list.md +++ b/docs/data-sources/image_list.md @@ -20,6 +20,7 @@ description: |- - `architecture` (String) - `bootable` (Boolean) - `by_id` (Number) +- `enabled` (Boolean) - `hot_resize` (Boolean) - `image_size` (Number) - `name` (String) diff --git a/docs/data-sources/k8s.md b/docs/data-sources/k8s.md index c8b6b52..09a2240 100644 --- a/docs/data-sources/k8s.md +++ b/docs/data-sources/k8s.md @@ -28,12 +28,15 @@ description: |- - `account_id` (Number) - `account_name` (String) - `acl` (Attributes) (see [below for nested schema](#nestedatt--acl)) +- `address_vip` (Attributes) (see [below for nested schema](#nestedatt--address_vip)) - `bservice_id` (Number) - `created_by` (String) - `created_time` (Number) - `deleted_by` (String) - `deleted_time` (Number) - `extnet_id` (Number) +- `extnet_only` (Boolean) +- `ha_mode` (Boolean) - `id` (String) The ID of this resource. - `k8sci_id` (Number) - `k8sci_name` (String) @@ -109,6 +112,15 @@ Read-Only: + +### Nested Schema for `address_vip` + +Read-Only: + +- `backend_ip` (String) +- `frontend_ip` (String) + + ### Nested Schema for `masters` diff --git a/docs/data-sources/kvmvm.md b/docs/data-sources/kvmvm.md index d58d4f6..78cc9af 100644 --- a/docs/data-sources/kvmvm.md +++ b/docs/data-sources/kvmvm.md @@ -36,6 +36,7 @@ description: |- - `boot_order` (List of String) - `bootdisk_size` (Number) - `cd_image_id` (Number) +- `chipset` (String) - `clone_reference` (Number) - `clones` (List of Number) - `computeci_id` (Number) @@ -72,6 +73,7 @@ description: |- - `numa_affinity` (String) - `numa_node_id` (Number) - `os_users` (Attributes List) (see [below for nested schema](#nestedatt--os_users)) +- `pci_devices` (List of Number) - `pinned` (Boolean) - `ram` (Number) - `reference_id` (String) @@ -185,6 +187,7 @@ Read-Only: - `account_id` (Number) - `acl` (String) - `boot_partition` (Number) +- `bus_number` (Number) - `ckey` (String) - `created_time` (Number) - `deleted_time` (Number) @@ -277,6 +280,7 @@ Read-Only: Read-Only: +- `bus_number` (Number) - `conn_id` (Number) - `conn_type` (String) - `def_gw` (String) @@ -284,8 +288,10 @@ Read-Only: - `flip_group_id` (Number) - `guid` (String) - `ip_address` (String) +- `libvirt_settings` (Attributes) (see [below for nested schema](#nestedatt--interfaces--libvirt_settings)) - `listen_ssh` (Boolean) - `mac` (String) +- `mtu` (Number) - `name` (String) - `net_id` (Number) - `net_type` (String) @@ -297,6 +303,20 @@ Read-Only: - `type` (String) - `vnfs` (List of Number) + +### Nested Schema for `interfaces.libvirt_settings` + +Read-Only: + +- `event_idx` (String) +- `guid` (String) +- `ioeventfd` (String) +- `queues` (Number) +- `rx_queue_size` (Number) +- `tx_queue_size` (Number) +- `txmode` (String) + + ### Nested Schema for `interfaces.qos` diff --git a/docs/data-sources/kvmvm_list.md b/docs/data-sources/kvmvm_list.md index 324672a..8a848ca 100644 --- a/docs/data-sources/kvmvm_list.md +++ b/docs/data-sources/kvmvm_list.md @@ -64,6 +64,7 @@ Read-Only: - `boot_order` (List of String) - `bootdisk_size` (Number) - `cd_image_id` (Number) +- `chipset` (String) - `clone_reference` (Number) - `clones` (List of Number) - `compute_id` (Number) @@ -159,6 +160,7 @@ Read-Only: Read-Only: +- `bus_number` (Number) - `disk_id` (Number) - `pci_slot` (Number) @@ -168,6 +170,7 @@ Read-Only: Read-Only: +- `bus_number` (Number) - `conn_id` (Number) - `conn_type` (String) - `def_gw` (String) @@ -175,6 +178,7 @@ Read-Only: - `flip_group_id` (Number) - `guid` (String) - `ip_address` (String) +- `libvirt_settings` (Attributes) (see [below for nested schema](#nestedatt--items--interfaces--libvirt_settings)) - `listen_ssh` (Boolean) - `mac` (String) - `name` (String) @@ -188,6 +192,20 @@ Read-Only: - `type` (String) - `vnfs` (List of Number) + +### Nested Schema for `items.interfaces.libvirt_settings` + +Read-Only: + +- `event_idx` (String) +- `guid` (String) +- `ioeventfd` (String) +- `queues` (Number) +- `rx_queue_size` (Number) +- `tx_queue_size` (Number) +- `txmode` (String) + + ### Nested Schema for `items.interfaces.qos` diff --git a/docs/data-sources/kvmvm_list_deleted.md b/docs/data-sources/kvmvm_list_deleted.md index cc09fbf..16edb39 100644 --- a/docs/data-sources/kvmvm_list_deleted.md +++ b/docs/data-sources/kvmvm_list_deleted.md @@ -62,6 +62,7 @@ Read-Only: - `boot_order` (List of String) - `bootdisk_size` (Number) - `cd_image_id` (Number) +- `chipset` (String) - `clone_reference` (Number) - `clones` (List of Number) - `compute_id` (Number) @@ -157,6 +158,7 @@ Read-Only: Read-Only: +- `bus_number` (Number) - `disk_id` (Number) - `pci_slot` (Number) @@ -166,6 +168,7 @@ Read-Only: Read-Only: +- `bus_number` (Number) - `conn_id` (Number) - `conn_type` (String) - `def_gw` (String) @@ -173,6 +176,7 @@ Read-Only: - `flip_group_id` (Number) - `guid` (String) - `ip_address` (String) +- `libvirt_settings` (Attributes) (see [below for nested schema](#nestedatt--items--interfaces--libvirt_settings)) - `listen_ssh` (Boolean) - `mac` (String) - `name` (String) @@ -186,6 +190,20 @@ Read-Only: - `type` (String) - `vnfs` (List of Number) + +### Nested Schema for `items.interfaces.libvirt_settings` + +Read-Only: + +- `event_idx` (String) +- `guid` (String) +- `ioeventfd` (String) +- `queues` (Number) +- `rx_queue_size` (Number) +- `tx_queue_size` (Number) +- `txmode` (String) + + ### Nested Schema for `items.interfaces.qos` diff --git a/docs/data-sources/resgroup.md b/docs/data-sources/resgroup.md index c67a7ab..061c27b 100644 --- a/docs/data-sources/resgroup.md +++ b/docs/data-sources/resgroup.md @@ -21,7 +21,6 @@ description: |- ### Optional -- `reason` (String) reason for request - `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) ### Read-Only diff --git a/docs/data-sources/rg_usage.md b/docs/data-sources/rg_usage.md index 0270fd7..5b59f38 100644 --- a/docs/data-sources/rg_usage.md +++ b/docs/data-sources/rg_usage.md @@ -21,7 +21,6 @@ description: |- ### Optional -- `reason` (String) reason for action - `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) ### Read-Only diff --git a/docs/data-sources/vins.md b/docs/data-sources/vins.md index 314fc53..cececca 100644 --- a/docs/data-sources/vins.md +++ b/docs/data-sources/vins.md @@ -146,6 +146,7 @@ Read-Only: Read-Only: +- `bus_number` (Number) - `conn_id` (Number) - `conn_type` (String) - `def_gw` (String) @@ -153,8 +154,10 @@ Read-Only: - `flipgroup_id` (Number) - `guid` (String) - `ip_address` (String) +- `libvirt_settings` (Attributes) (see [below for nested schema](#nestedatt--vnf_dev--interfaces--libvirt_settings)) - `listen_ssh` (Boolean) - `mac` (String) +- `mtu` (Number) - `name` (String) - `net_id` (Number) - `net_mask` (Number) @@ -166,6 +169,20 @@ Read-Only: - `type` (String) - `vnfs` (List of Number) + +### Nested Schema for `vnf_dev.interfaces.libvirt_settings` + +Read-Only: + +- `event_idx` (String) +- `guid` (String) +- `ioeventfd` (String) +- `queues` (Number) +- `rx_queue_size` (Number) +- `tx_queue_size` (Number) +- `txmode` (String) + + ### Nested Schema for `vnf_dev.interfaces.qos` diff --git a/docs/data-sources/vins_list.md b/docs/data-sources/vins_list.md index 41c9dbf..e1b93fb 100644 --- a/docs/data-sources/vins_list.md +++ b/docs/data-sources/vins_list.md @@ -27,6 +27,7 @@ description: |- - `size` (Number) Page size - `sort_by` (String) sort by one of supported fields, format +|-(field) - `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) +- `vnf_dev_id` (Number) Filter by VNF Device id ### Read-Only diff --git a/docs/resources/account.md b/docs/resources/account.md index 0f19829..79e2287 100644 --- a/docs/resources/account.md +++ b/docs/resources/account.md @@ -91,10 +91,6 @@ Required: - `access_type` (String) - `user_id` (String) -Optional: - -- `recursive_delete` (Boolean) - ### Nested Schema for `acl` diff --git a/docs/resources/disk.md b/docs/resources/disk.md index bbb1e44..6970301 100644 --- a/docs/resources/disk.md +++ b/docs/resources/disk.md @@ -29,7 +29,6 @@ description: |- - `iotune` (Attributes) (see [below for nested schema](#nestedatt--iotune)) - `permanently` (Boolean) Whether to completely delete the disk, works only with non attached disks - `pool` (String) Pool for disk location -- `reason` (String) Reason for deletion - `sep_id` (Number) Storage endpoint provider ID to create disk - `shareable` (Boolean) share disk - `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) diff --git a/docs/resources/disk_replication.md b/docs/resources/disk_replication.md index 7e49831..106ff0f 100644 --- a/docs/resources/disk_replication.md +++ b/docs/resources/disk_replication.md @@ -27,7 +27,6 @@ description: |- - `detach` (Boolean) - `pause` (Boolean) - `permanently` (Boolean) -- `reason` (String) - `reverse` (Boolean) - `start` (Boolean) - `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) diff --git a/docs/resources/image.md b/docs/resources/image.md index 3a2ff61..b30d624 100644 --- a/docs/resources/image.md +++ b/docs/resources/image.md @@ -26,11 +26,10 @@ description: |- ### Optional -- `architecture` (String) Binary architecture of this image, one of X86_64 of PPC64_LE +- `architecture` (String) Binary architecture of this image, one of X86_64 - `hot_resize` (Boolean) Does this machine supports hot resize - `password` (String) Optional password for the image - `password_dl` (String) Password for upload binary media -- `permanently` (Boolean) - `pool_name` (String) Pool for image create - `sep_id` (Number) Storage endpoint provider ID - `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) diff --git a/docs/resources/image_virtual.md b/docs/resources/image_virtual.md index d4e4007..551b1fe 100644 --- a/docs/resources/image_virtual.md +++ b/docs/resources/image_virtual.md @@ -22,7 +22,6 @@ description: |- ### Optional -- `permanently` (Boolean) - `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) ### Read-Only diff --git a/docs/resources/k8s_cp.md b/docs/resources/k8s_cp.md index 5548e57..7915fb9 100644 --- a/docs/resources/k8s_cp.md +++ b/docs/resources/k8s_cp.md @@ -25,6 +25,7 @@ description: |- ### Optional - `additional_sans` (List of String) is used to define settings and actions that should be performed before any other component in the cluster starts. It allows you to configure things like node registration, network setup, and other initialization tasks. insert a valid JSON string with all levels of nesting. +- `chipset` (String) Type of the emulated system - `cluster_config` (String) is used to configure the behavior and settings of the Kube-proxy, which is responsible for network proxying and load balancing within the cluster. It includes parameters such as proxy mode, cluster IP ranges, and other Kube-proxy specific configurations. insert a valid JSON string with all levels of nesting. - `cpu` (Number) Node CPU count. - `desc` (String) Text description of this instance. @@ -39,7 +40,7 @@ description: |- - `kubelet_config` (String) is used to configure the behavior and settings of the Kube-proxy, which is responsible for network proxying and load balancing within the cluster. It includes parameters such as proxy mode, cluster IP ranges, and other Kube-proxy specific configurations. insert a valid JSON string with all levels of nesting. - `lb_sysctl_params` (Attributes List) Custom sysctl values for Load Balancer instance. Applied on boot. (see [below for nested schema](#nestedatt--lb_sysctl_params)) - `num` (Number) Number of VMs to create. Can be either 1,3 or 5 -- `oidc_cert` (String) insert ssl certificate in x509 pem format +- `oidc_cert` (String) Insert ssl certificate in x509 pem format - `permanently` (Boolean) whether to completely delete the k8s cluster - `ram` (Number) Node RAM in MB. - `restore` (Boolean) if true, restore the k8s cluster from Recycle Bin @@ -55,6 +56,7 @@ description: |- - `account_id` (Number) - `account_name` (String) - `acl` (Attributes) (see [below for nested schema](#nestedatt--acl)) +- `address_vip` (Attributes) (see [below for nested schema](#nestedatt--address_vip)) - `bservice_id` (Number) - `created_by` (String) - `created_time` (Number) @@ -145,6 +147,15 @@ Read-Only: + +### Nested Schema for `address_vip` + +Read-Only: + +- `backend_ip` (String) +- `frontend_ip` (String) + + ### Nested Schema for `detailed_info` diff --git a/docs/resources/k8s_wg.md b/docs/resources/k8s_wg.md index 6bcb3f2..ab9bead 100644 --- a/docs/resources/k8s_wg.md +++ b/docs/resources/k8s_wg.md @@ -23,6 +23,7 @@ description: |- ### Optional - `annotations` (List of String) +- `chipset` (String) Type of the emulated system of work group - `cloud_init` (String) - `cpu` (Number) Worker node CPU count. - `disk` (Number) Worker node boot disk size. If unspecified or 0, size is defined by OS image size. @@ -31,6 +32,7 @@ description: |- - `ram` (Number) Worker node RAM in MB. - `taints` (List of String) - `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) +- `worker_chipset` (String) Type of the emulated system of worker nodes - `worker_sep_id` (Number) - `worker_sep_pool` (String) diff --git a/docs/resources/kvmvm.md b/docs/resources/kvmvm.md index 3a3e084..470a676 100644 --- a/docs/resources/kvmvm.md +++ b/docs/resources/kvmvm.md @@ -31,6 +31,7 @@ description: |- - `auto_start` (Boolean) Flag for redeploy compute - `boot_disk_size` (Number) This compute instance boot disk size in GB. Make sure it is large enough to accomodate selected OS image. - `cd` (Attributes) (see [below for nested schema](#nestedatt--cd)) +- `chipset` (String) Type of the emulated system, Q35 or i440fx - `cloud_init` (String) Optional cloud_init parameters. Applied when creating new compute instance only, ignored in all other cases. - `cpu_pin` (Boolean) Run VM on dedicated CPUs. To use this feature, the system must be pre-configured by allocating CPUs on the physical node. - `custom_fields` (String) custom fields for Compute. Must be dict @@ -48,6 +49,7 @@ description: |- - `network` (Attributes Set) Optional network connection(s) for this compute. You may specify several network blocks, one for each connection. (see [below for nested schema](#nestedatt--network)) - `numa_affinity` (String) Rule for VM placement with NUMA affinity. - `pause` (Boolean) +- `pci_devices` (Set of Number) ID of the connected pci devices - `permanently` (Boolean) - `pin_to_stack` (Boolean) - `pool` (String) Pool to use if sepId is set, can be also empty if needed to be chosen by system. @@ -171,6 +173,8 @@ Required: Optional: - `ip_address` (String) Optional IP address to assign to this connection. This IP should belong to the selected network and free for use. +- `mtu` (Number) Maximum transmission unit, used only for DPDK type, must be 1-9216 +- `weight` (Number) Weight the network if you need to sort network list, the smallest attach first. zero or null weight attach last Read-Only: @@ -293,6 +297,7 @@ Read-Only: - `account_id` (Number) - `acl` (String) - `boot_partition` (Number) +- `bus_number` (Number) - `ckey` (String) - `created_time` (Number) - `deleted_time` (Number) @@ -388,6 +393,7 @@ Read-Only: - `account_id` (Number) - `acl` (String) - `boot_partition` (Number) +- `bus_number` (Number) - `ckey` (String) - `created_time` (Number) - `deleted_time` (Number) @@ -480,6 +486,7 @@ Read-Only: Read-Only: +- `bus_number` (Number) - `conn_id` (Number) - `conn_type` (String) - `enabled` (Boolean) @@ -487,8 +494,10 @@ Read-Only: - `get_gw` (String) - `guid` (String) - `ip_address` (String) +- `libvirt_settings` (Attributes) (see [below for nested schema](#nestedatt--interfaces--libvirt_settings)) - `listen_ssh` (Boolean) - `mac` (String) +- `mtu` (Number) - `name` (String) - `net_id` (Number) - `net_type` (String) @@ -500,6 +509,20 @@ Read-Only: - `type` (String) - `vnfs` (List of Number) + +### Nested Schema for `interfaces.libvirt_settings` + +Read-Only: + +- `event_idx` (String) +- `guid` (String) +- `ioeventfd` (String) +- `queues` (Number) +- `rx_queue_size` (Number) +- `tx_queue_size` (Number) +- `txmode` (String) + + ### Nested Schema for `interfaces.qos` diff --git a/docs/resources/resgroup.md b/docs/resources/resgroup.md index f1f0259..49228b8 100644 --- a/docs/resources/resgroup.md +++ b/docs/resources/resgroup.md @@ -35,10 +35,10 @@ description: |- - `owner` (String) username - owner of this RG. Leave blank to set current user as owner - `permanently` (Boolean) Set to True if you want force delete non-empty RG - `quota` (Attributes) Quota settings for this resource group. (see [below for nested schema](#nestedatt--quota)) -- `reason` (String) Set to True if you want force delete non-empty RG - `register_computes` (Boolean) Register computes in registration system - `restore` (Boolean) - `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) +- `uniq_pools` (List of String) List of strings with pools. Applies only when updating ### Read-Only @@ -60,7 +60,6 @@ description: |- - `rg_id` (Number) - `secret` (String) - `status` (String) -- `uniq_pools` (List of String) - `updated_by` (String) - `updated_time` (Number) - `vins` (List of Number) @@ -74,10 +73,6 @@ Required: - `right` (String) Access rights to set, one of 'R', 'RCX' or 'ARCXDU' - `user` (String) User or group name to grant access -Optional: - -- `reason` (String) Reason for action - ### Nested Schema for `def_net` @@ -89,7 +84,6 @@ Required: Optional: - `net_id` (Number) Network segment ID. If netType is PUBLIC and netId is 0 then default external network segment will be selected. If netType is PRIVATE and netId=0, the first ViNS defined for this RG will be selected. Otherwise, netId identifies either existing external network segment or ViNS. -- `reason` (String) Reason for action diff --git a/docs/resources/vins.md b/docs/resources/vins.md index 24aa34c..25121c4 100644 --- a/docs/resources/vins.md +++ b/docs/resources/vins.md @@ -203,6 +203,7 @@ Read-Only: Read-Only: +- `bus_number` (Number) - `conn_id` (Number) - `conn_type` (String) - `def_gw` (String) @@ -210,8 +211,10 @@ Read-Only: - `flipgroup_id` (Number) - `guid` (String) - `ip_address` (String) +- `libvirt_settings` (Attributes) (see [below for nested schema](#nestedatt--vnf_dev--interfaces--libvirt_settings)) - `listen_ssh` (Boolean) - `mac` (String) +- `mtu` (Number) - `name` (String) - `net_id` (Number) - `net_mask` (Number) @@ -223,6 +226,20 @@ Read-Only: - `type` (String) - `vnfs` (List of Number) + +### Nested Schema for `vnf_dev.interfaces.libvirt_settings` + +Read-Only: + +- `event_idx` (String) +- `guid` (String) +- `ioeventfd` (String) +- `queues` (Number) +- `rx_queue_size` (Number) +- `tx_queue_size` (Number) +- `txmode` (String) + + ### Nested Schema for `vnf_dev.interfaces.qos` diff --git a/go.mod b/go.mod index 6555683..7027d19 100644 --- a/go.mod +++ b/go.mod @@ -9,21 +9,10 @@ require ( github.com/hashicorp/terraform-plugin-framework-validators v0.12.0 github.com/hashicorp/terraform-plugin-log v0.9.0 github.com/sirupsen/logrus v1.9.3 - repository.basistech.ru/BASIS/decort-golang-sdk v1.8.1 + repository.basistech.ru/BASIS/decort-golang-sdk v1.9.2 ) require ( - github.com/BurntSushi/toml v1.2.1 // indirect - github.com/Kunde21/markdownfmt/v3 v3.1.0 // indirect - github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/semver/v3 v3.2.0 // indirect - github.com/Masterminds/sprig/v3 v3.2.3 // indirect - github.com/ProtonMail/go-crypto v1.1.0-alpha.2 // indirect - github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect - github.com/armon/go-radix v1.0.0 // indirect - github.com/bgentry/speakeasy v0.1.0 // indirect - github.com/bmatcuk/doublestar/v4 v4.6.1 // indirect - github.com/cloudflare/circl v1.3.7 // indirect github.com/fatih/color v1.16.0 // indirect github.com/gabriel-vasile/mimetype v1.4.4 // indirect github.com/go-playground/locales v0.14.1 // indirect @@ -31,51 +20,26 @@ require ( github.com/go-playground/validator/v10 v10.21.0 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/go-querystring v1.1.0 // indirect - github.com/hashicorp/cli v1.1.6 // indirect - github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/go-checkpoint v0.5.0 // indirect - github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-hclog v1.5.0 // indirect - github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-plugin v1.6.0 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect - github.com/hashicorp/go-version v1.7.0 // indirect - github.com/hashicorp/hc-install v0.7.0 // indirect - github.com/hashicorp/terraform-exec v0.21.0 // indirect - github.com/hashicorp/terraform-json v0.22.1 // indirect - github.com/hashicorp/terraform-plugin-docs v0.19.4 // indirect github.com/hashicorp/terraform-plugin-go v0.23.0 // indirect github.com/hashicorp/terraform-registry-address v0.2.3 // indirect github.com/hashicorp/terraform-svchost v0.1.1 // indirect github.com/hashicorp/yamux v0.1.1 // indirect - github.com/huandu/xstrings v1.3.3 // indirect - github.com/imdario/mergo v0.3.15 // indirect github.com/leodido/go-urn v1.4.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-runewidth v0.0.9 // indirect - github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-testing-interface v1.14.1 // indirect - github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/oklog/run v1.0.0 // indirect - github.com/posener/complete v1.2.3 // indirect - github.com/shopspring/decimal v1.3.1 // indirect - github.com/spf13/cast v1.5.0 // indirect github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect - github.com/yuin/goldmark v1.7.1 // indirect - github.com/yuin/goldmark-meta v1.1.0 // indirect - github.com/zclconf/go-cty v1.14.4 // indirect - go.abhg.dev/goldmark/frontmatter v0.2.0 // indirect golang.org/x/crypto v0.24.0 // indirect - golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df // indirect - golang.org/x/mod v0.17.0 // indirect golang.org/x/net v0.26.0 // indirect golang.org/x/sys v0.21.0 // indirect golang.org/x/text v0.16.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect google.golang.org/grpc v1.63.2 // indirect google.golang.org/protobuf v1.34.0 // indirect - gopkg.in/yaml.v2 v2.3.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index abc5dfb..6932d25 100644 --- a/go.sum +++ b/go.sum @@ -1,30 +1,7 @@ -github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= -github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/Kunde21/markdownfmt/v3 v3.1.0 h1:KiZu9LKs+wFFBQKhrZJrFZwtLnCCWJahL+S+E/3VnM0= -github.com/Kunde21/markdownfmt/v3 v3.1.0/go.mod h1:tPXN1RTyOzJwhfHoon9wUr4HGYmWgVxSQN6VBJDkrVc= -github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= -github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= -github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= -github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= -github.com/ProtonMail/go-crypto v1.1.0-alpha.2 h1:bkyFVUP+ROOARdgCiJzNQo2V2kiB97LyUpzH9P6Hrlg= -github.com/ProtonMail/go-crypto v1.1.0-alpha.2/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= -github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= -github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= -github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bmatcuk/doublestar/v4 v4.6.1 h1:FH9SifrbvJhnlQpztAx++wlkk70QBf0iBWDwNy7PA4I= -github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= -github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= -github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= @@ -43,39 +20,14 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/hashicorp/cli v1.1.6 h1:CMOV+/LJfL1tXCOKrgAX0uRKnzjj/mpmqNXloRSy2K8= -github.com/hashicorp/cli v1.1.6/go.mod h1:MPon5QYlgjjo0BSoAiN0ESeT5fRzDjVRp+uioJ0piz4= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= -github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU= -github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= -github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-plugin v1.6.0 h1:wgd4KxHJTVGGqWBq4QPB1i5BZNEx9BR8+OFmHDmTk8A= github.com/hashicorp/go-plugin v1.6.0/go.mod h1:lBS5MtSSBZk0SHc66KACcjjlU6WzEVP/8pwz68aMkCI= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= -github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hc-install v0.7.0 h1:Uu9edVqjKQxxuD28mR5TikkKDd/p55S8vzPC1659aBk= -github.com/hashicorp/hc-install v0.7.0/go.mod h1:ELmmzZlGnEcqoUMKUuykHaPCIR1sYLYX+KSggWSKZuA= -github.com/hashicorp/terraform-exec v0.21.0 h1:uNkLAe95ey5Uux6KJdua6+cv8asgILFVWkd/RG0D2XQ= -github.com/hashicorp/terraform-exec v0.21.0/go.mod h1:1PPeMYou+KDUSSeRE9szMZ/oHf4fYUmB923Wzbq1ICg= -github.com/hashicorp/terraform-json v0.22.1 h1:xft84GZR0QzjPVWs4lRUwvTcPnegqlyS7orfb5Ltvec= -github.com/hashicorp/terraform-json v0.22.1/go.mod h1:JbWSQCLFSXFFhg42T7l9iJwdGXBYV8fmmD6o/ML4p3A= -github.com/hashicorp/terraform-plugin-docs v0.19.4 h1:G3Bgo7J22OMtegIgn8Cd/CaSeyEljqjH3G39w28JK4c= -github.com/hashicorp/terraform-plugin-docs v0.19.4/go.mod h1:4pLASsatTmRynVzsjEhbXZ6s7xBlUw/2Kt0zfrq8HxA= github.com/hashicorp/terraform-plugin-framework v1.9.0 h1:caLcDoxiRucNi2hk8+j3kJwkKfvHznubyFsJMWfZqKU= github.com/hashicorp/terraform-plugin-framework v1.9.0/go.mod h1:qBXLDn69kM97NNVi/MQ9qgd1uWWsVftGSnygYG1tImM= github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1 h1:gm5b1kHgFFhaKFhm4h2TgvMUlNzFAtUqlcOWnWPm+9E= @@ -92,53 +44,27 @@ github.com/hashicorp/terraform-svchost v0.1.1 h1:EZZimZ1GxdqFRinZ1tpJwVxxt49xc/S github.com/hashicorp/terraform-svchost v0.1.1/go.mod h1:mNsjQfZyf/Jhz35v6/0LWcv26+X7JPS+buii2c9/ctc= github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= -github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= -github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= -github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= -github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= -github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo= -github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= -github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= -github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= @@ -146,62 +72,22 @@ github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IU github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok= github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yuin/goldmark v1.7.1 h1:3bajkSilaCbjdKVsKdZjZCLBNPL9pYzrCakKaf4U49U= -github.com/yuin/goldmark v1.7.1/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E= -github.com/yuin/goldmark-meta v1.1.0 h1:pWw+JLHGZe8Rk0EGsMVssiNb/AaPMHfSRszZeUeiOUc= -github.com/yuin/goldmark-meta v1.1.0/go.mod h1:U4spWENafuA7Zyg+Lj5RqK/MF+ovMYtBvXi1lBb2VP0= -github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8= -github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= -go.abhg.dev/goldmark/frontmatter v0.2.0 h1:P8kPG0YkL12+aYk2yU3xHv4tcXzeVnN+gU0tJ5JnxRw= -go.abhg.dev/goldmark/frontmatter v0.2.0/go.mod h1:XqrEkZuM57djk7zrlRUB02x8I5J0px76YjkOzhB4YlU= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= -golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df h1:UA2aFVmmsIlefxMk29Dp2juaUSth8Pyn3Tq5Y5mJGME= -golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de h1:cZGRis4/ot9uVm639a+rHCUaG0JJHEsdyzSQTMX+suY= google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY= @@ -211,11 +97,8 @@ google.golang.org/protobuf v1.34.0 h1:Qo/qEd2RZPCf2nKuorzksSknv0d3ERwp1vFG38gSmH google.golang.org/protobuf v1.34.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -repository.basistech.ru/BASIS/decort-golang-sdk v1.8.1 h1:wpxjnVO7hGaQVWbtFoYVbNoeFxk0QUnEfDsPuWjiNk0= -repository.basistech.ru/BASIS/decort-golang-sdk v1.8.1/go.mod h1:OaUynHHuSjWMzpfyoL4au6oLcUogqUkPPBKA15pbHWo= +repository.basistech.ru/BASIS/decort-golang-sdk v1.9.2 h1:MPH1tMQrDN1Gri4FrQP3cx60uR3uZioEDb707D88/7c= +repository.basistech.ru/BASIS/decort-golang-sdk v1.9.2/go.mod h1:OaUynHHuSjWMzpfyoL4au6oLcUogqUkPPBKA15pbHWo= diff --git a/internal/provider/data_sources_map.go b/internal/provider/data_sources_map.go index 82925e7..81ae7d2 100644 --- a/internal/provider/data_sources_map.go +++ b/internal/provider/data_sources_map.go @@ -3,8 +3,10 @@ package provider import ( "github.com/hashicorp/terraform-plugin-framework/datasource" "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/audit" "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice" "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/dpdknet" "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/extnet" "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/flipgroup" "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/image" @@ -17,6 +19,8 @@ import ( "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/vins" cbAccount "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account" cbAudit "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit" + cbDisks "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks" + cbK8ci "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/k8ci" cbLb "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb" cbNode "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/node" cbRG "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg" @@ -43,6 +47,8 @@ func newDataSourcesMap() []func() datasource.DataSource { account.NewDataSourceAccountReservedUnits, account.NewDataSourceAccountGetResourceConsumptionList, + audit.NewDataSourceAudit, + bservice.NewDataSourceBService, bservice.NewDataSourceBServiceList, bservice.NewDataSourceBServiceGroup, @@ -59,6 +65,9 @@ func newDataSourcesMap() []func() datasource.DataSource { disks.NewDataSourceDiskSnapshotList, disks.NewDataSourceDiskReplication, + dpdknet.NewDataSourceDPDK, + dpdknet.NewDataSourceDPDKList, + extnet.NewDataSourceExtNet, extnet.NewDataSourceExtNetComputesList, extnet.NewDataSourceExtNetDefault, @@ -147,9 +156,23 @@ func newDataSourcesMap() []func() datasource.DataSource { cbAudit.NewDataSourceAuditLinkedJobs, cbAudit.NewDataSourceAuditList, + cbDisks.NewDataSourceDiskListDeleted, + cbDisks.NewDataSourceDiskListTypesDetailed, + cbDisks.NewDataSourceDiskListTypes, + cbDisks.NewDataSourceDiskListUnattached, + cbDisks.NewdataSourceDiskList, + cbDisks.NewDataSourceDiskReplication, + cbDisks.NewDataSourceDiskSnapshotList, + cbDisks.NewDataSourceDiskSnapshot, + cbDisks.NewDataSourceDisk, + cbNode.NewDataSourceNode, cbNode.NewDataSourceNodeList, + cbK8ci.NewDataSourceK8CI, + cbK8ci.NewDataSourceK8CIList, + cbK8ci.NewDataSourceK8CIListDeleted, + cbLb.NewDataSourceLB, cbLb.NewDataSourceLBList, cbLb.NewDataSourceLBListDeleted, diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 6f3ec58..eab1729 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -142,7 +142,7 @@ func (p *DynamixProvider) Configure(ctx context.Context, req provider.ConfigureR return } - // Set up default values, values from env and save user provided values into decortConfig + // Set up default values, values from env and save user provided values into dynamixConfig dynamixConfig := dynamixProviderConfig{} dynamixConfig.new(config) diff --git a/internal/provider/resources_map.go b/internal/provider/resources_map.go index d2da980..ba5b4bf 100644 --- a/internal/provider/resources_map.go +++ b/internal/provider/resources_map.go @@ -12,8 +12,11 @@ import ( "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/lb" "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg" "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/vins" - cbLb "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb" cbAccount "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account" + cbDisks "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks" + cbK8CI "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/k8ci" + cbLb "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb" + cbRG "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg" cbVFpool "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vfpool" cbVins "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins" ) @@ -43,11 +46,18 @@ func newResourcesMap() []func() resource.Resource { cbAccount.NewResourceAccount, + cbDisks.NewResourceDiskReplications, + cbDisks.NewResourceDiskSnapshot, + cbDisks.NewResourceDisk, + + cbK8CI.NewResourceK8Ci, + cbLb.NewResourceLB, cbLb.NewResourceLBBackend, cbLb.NewResourceLBBackendServer, cbLb.NewResourceLBFrontend, cbLb.NewResourceLBFrontendBind, + cbRG.NewResourceRG, cbVFpool.NewResourceVFPool, cbVins.NewResourceVINS, cbVins.NewResourceVINSStaticRoute, diff --git a/internal/service/cloudapi/account/flattens/flatten_data_source_account_flipgroups_list.go b/internal/service/cloudapi/account/flattens/flatten_data_source_account_flipgroups_list.go index 2c912c0..1ef2a44 100644 --- a/internal/service/cloudapi/account/flattens/flatten_data_source_account_flipgroups_list.go +++ b/internal/service/cloudapi/account/flattens/flatten_data_source_account_flipgroups_list.go @@ -37,6 +37,7 @@ func AccountFlipgroupsListDataSource(ctx context.Context, state *models.DataSour ExtNetID: state.ExtNetID, ByIP: state.ByIP, FLIPGroupID: state.FLIPGroupID, + SortBy: state.SortBy, Page: state.Page, Size: state.Size, Timeouts: state.Timeouts, diff --git a/internal/service/cloudapi/account/models/model_data_source_account_flipgroups_list.go b/internal/service/cloudapi/account/models/model_data_source_account_flipgroups_list.go index b05e9c5..fc7020c 100644 --- a/internal/service/cloudapi/account/models/model_data_source_account_flipgroups_list.go +++ b/internal/service/cloudapi/account/models/model_data_source_account_flipgroups_list.go @@ -14,6 +14,7 @@ type DataSourceAccountFlipgroupsListModel struct { ExtNetID types.Int64 `tfsdk:"extnet_id"` ByIP types.String `tfsdk:"by_ip"` FLIPGroupID types.Int64 `tfsdk:"flipgroup_id"` + SortBy types.String `tfsdk:"sort_by"` Page types.Int64 `tfsdk:"page"` Size types.Int64 `tfsdk:"size"` Timeouts timeouts.Value `tfsdk:"timeouts"` diff --git a/internal/service/cloudapi/account/models/model_resource_account.go b/internal/service/cloudapi/account/models/model_resource_account.go index b36b7ef..19f3a01 100644 --- a/internal/service/cloudapi/account/models/model_resource_account.go +++ b/internal/service/cloudapi/account/models/model_resource_account.go @@ -50,9 +50,8 @@ type ResourceAccountModel struct { } type UsersModel struct { - UserID types.String `tfsdk:"user_id"` - AccessType types.String `tfsdk:"access_type"` - RecursiveDelete types.Bool `tfsdk:"recursive_delete"` + UserID types.String `tfsdk:"user_id"` + AccessType types.String `tfsdk:"access_type"` } type ResourceLimitsInAccountResourceModel struct { diff --git a/internal/service/cloudapi/account/schemas/schema_data_source_account_flipgroups_list.go b/internal/service/cloudapi/account/schemas/schema_data_source_account_flipgroups_list.go index df68540..5a0f7a6 100644 --- a/internal/service/cloudapi/account/schemas/schema_data_source_account_flipgroups_list.go +++ b/internal/service/cloudapi/account/schemas/schema_data_source_account_flipgroups_list.go @@ -37,6 +37,10 @@ func MakeSchemaDataSourceAccountFlipgroupsList() map[string]schema.Attribute { Optional: true, Description: "find by flipgroup id", }, + "sort_by": schema.StringAttribute{ + Optional: true, + Description: "sort by one of supported fields, format +|-(field)", + }, "page": schema.Int64Attribute{ Optional: true, Description: "page number", diff --git a/internal/service/cloudapi/account/schemas/schema_resource_account.go b/internal/service/cloudapi/account/schemas/schema_resource_account.go index 6a5ec39..502f51e 100644 --- a/internal/service/cloudapi/account/schemas/schema_resource_account.go +++ b/internal/service/cloudapi/account/schemas/schema_resource_account.go @@ -38,10 +38,6 @@ func MakeSchemaResourceAccount() map[string]schema.Attribute { "access_type": schema.StringAttribute{ Required: true, }, - "recursive_delete": schema.BoolAttribute{ - Optional: true, - // default is false - }, }, }, }, diff --git a/internal/service/cloudapi/account/utilities/utility_data_source_account_flipgroups_list.go b/internal/service/cloudapi/account/utilities/utility_data_source_account_flipgroups_list.go index 78533c2..2b2f888 100644 --- a/internal/service/cloudapi/account/utilities/utility_data_source_account_flipgroups_list.go +++ b/internal/service/cloudapi/account/utilities/utility_data_source_account_flipgroups_list.go @@ -33,6 +33,9 @@ func AccountFlipgroupsListCheckPresence(ctx context.Context, plan *models.DataSo if !plan.FLIPGroupID.IsNull() { flipgroupsListReq.FLIPGroupID = uint64(plan.FLIPGroupID.ValueInt64()) } + if !plan.SortBy.IsNull() { + flipgroupsListReq.SortBy = plan.SortBy.ValueString() + } if !plan.Page.IsNull() { flipgroupsListReq.Page = uint64(plan.Page.ValueInt64()) } diff --git a/internal/service/cloudapi/account/utilities/utility_resource_account.go b/internal/service/cloudapi/account/utilities/utility_resource_account.go index b38ea6e..ff41bc9 100644 --- a/internal/service/cloudapi/account/utilities/utility_resource_account.go +++ b/internal/service/cloudapi/account/utilities/utility_resource_account.go @@ -298,9 +298,8 @@ func AddDeleteUsersAccount(ctx context.Context, accountId uint64, plan, state *m for _, user := range deletedUsers { delUserReq := account.DeleteUserRequest{ - AccountID: accountId, - UserID: user.UserID.ValueString(), - RecursiveDelete: user.RecursiveDelete.ValueBool(), // default false + AccountID: accountId, + UserID: user.UserID.ValueString(), } tflog.Info(ctx, "AddDeleteUsersAccount: before calling CloudAPI().Account().DeleteUser", map[string]any{"account_id": accountId, "req": delUserReq}) diff --git a/internal/service/cloudapi/audit/data_source_audit.go b/internal/service/cloudapi/audit/data_source_audit.go new file mode 100644 index 0000000..a71b0ff --- /dev/null +++ b/internal/service/cloudapi/audit/data_source_audit.go @@ -0,0 +1,91 @@ +package audit + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/audit/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/audit/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/audit/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceAudit{} +) + +func NewDataSourceAudit() datasource.DataSource { + return &dataSourceAudit{} +} + +// dataSourceAudit is the data source implementation. +type dataSourceAudit struct { + client *decort.DecortClient +} + +func (d *dataSourceAudit) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceAudit + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAudit: Error get state") + return + } + auditGuid := state.AuditGuid.ValueString() + tflog.Info(ctx, "Read dataSourceAudit: got state successfully", map[string]any{"audit_guid": auditGuid}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout180s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAudit: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceAudit: set timeouts successfully", map[string]any{ + "audit_guid": auditGuid, + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.AuditDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAudit: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAudit: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceAudit", map[string]any{"audit_guid": auditGuid}) +} + +func (d *dataSourceAudit) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceAudit(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceAudit) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_audit" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceAudit) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceAudit") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceAudit successfully") +} diff --git a/internal/service/cloudapi/audit/flattens/flatten_data_source_audit.go b/internal/service/cloudapi/audit/flattens/flatten_data_source_audit.go new file mode 100644 index 0000000..54ec851 --- /dev/null +++ b/internal/service/cloudapi/audit/flattens/flatten_data_source_audit.go @@ -0,0 +1,49 @@ +package flattens + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/audit/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/audit/utilities" +) + +func AuditDataSource(ctx context.Context, state *models.DataSourceAudit, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.AuditDataSource") + + diags := diag.Diagnostics{} + + auditGuid := state.AuditGuid.ValueString() + + recordAudit, diags := utilities.AuditDataSourceCheckPresence(ctx, auditGuid, c) + if diags.HasError() { + return diags + } + + tflog.Info(ctx, "flattens.AuditDataSource: before flatten", map[string]any{"audit_guid": auditGuid}) + + *state = models.DataSourceAudit{ + AuditGuid: state.AuditGuid, + Timeouts: state.Timeouts, + + Arguments: types.StringValue(recordAudit.Arguments), + Call: types.StringValue(recordAudit.Call), + GUID: types.StringValue(recordAudit.GUID), + ID: types.StringValue(recordAudit.GUID), + Kwargs: types.StringValue(recordAudit.Kwargs), + RemoteAddr: types.StringValue(recordAudit.RemoteAddr), + ResponseTime: types.Float64Value(recordAudit.ResponseTime), + Result: types.StringValue(recordAudit.Result), + StatusCode: types.Int64Value(int64(recordAudit.StatusCode)), + Tags: types.StringValue(recordAudit.Tags), + Timestamp: types.Float64Value(recordAudit.Timestamp), + TimestampEnd: types.Float64Value(recordAudit.TimestampEnd), + User: types.StringValue(recordAudit.User), + } + + tflog.Info(ctx, "End flattens.AuditDataSource", map[string]any{"audit_guid": auditGuid}) + return nil +} diff --git a/internal/service/cloudapi/audit/models/model_data_source_audit.go b/internal/service/cloudapi/audit/models/model_data_source_audit.go new file mode 100644 index 0000000..7466085 --- /dev/null +++ b/internal/service/cloudapi/audit/models/model_data_source_audit.go @@ -0,0 +1,27 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceAudit struct { + //required field + AuditGuid types.String `tfsdk:"audit_guid"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + //response field + Arguments types.String `tfsdk:"args"` + Call types.String `tfsdk:"call"` + GUID types.String `tfsdk:"guid"` + ID types.String `tfsdk:"id"` + Kwargs types.String `tfsdk:"kwargs"` + RemoteAddr types.String `tfsdk:"remote_addr"` + ResponseTime types.Float64 `tfsdk:"responsetime"` + Result types.String `tfsdk:"result"` + StatusCode types.Int64 `tfsdk:"status_code"` + Tags types.String `tfsdk:"tags"` + Timestamp types.Float64 `tfsdk:"timestamp"` + TimestampEnd types.Float64 `tfsdk:"timestamp_end"` + User types.String `tfsdk:"user"` +} diff --git a/internal/service/cloudapi/audit/schemas/schema_data_source_audit.go b/internal/service/cloudapi/audit/schemas/schema_data_source_audit.go new file mode 100644 index 0000000..72686ec --- /dev/null +++ b/internal/service/cloudapi/audit/schemas/schema_data_source_audit.go @@ -0,0 +1,52 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceAudit() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "audit_guid": schema.StringAttribute{ + Required: true, + }, + "args": schema.StringAttribute{ + Computed: true, + }, + "call": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "kwargs": schema.StringAttribute{ + Computed: true, + }, + "remote_addr": schema.StringAttribute{ + Computed: true, + }, + "responsetime": schema.Float64Attribute{ + Computed: true, + }, + "result": schema.StringAttribute{ + Computed: true, + }, + "status_code": schema.Int64Attribute{ + Computed: true, + }, + "tags": schema.StringAttribute{ + Computed: true, + }, + "timestamp": schema.Float64Attribute{ + Computed: true, + }, + "timestamp_end": schema.Float64Attribute{ + Computed: true, + }, + "user": schema.StringAttribute{ + Computed: true, + }, + "id": schema.StringAttribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudapi/audit/utilities/utility_data_source_audit.go b/internal/service/cloudapi/audit/utilities/utility_data_source_audit.go new file mode 100644 index 0000000..4b788c8 --- /dev/null +++ b/internal/service/cloudapi/audit/utilities/utility_data_source_audit.go @@ -0,0 +1,27 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/audit" +) + +func AuditDataSourceCheckPresence(ctx context.Context, auditGuid string, c *decort.DecortClient) (*audit.RecordAudit, diag.Diagnostics) { + tflog.Info(ctx, fmt.Sprintf("AuditDataSourceCheckPresence: Get info about audit with ID - %v", auditGuid)) + + diags := diag.Diagnostics{} + + recordAudit, err := c.CloudAPI().Audit().Get(ctx, audit.GetRequest{AuditGuid: auditGuid}) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about audit with ID %v", auditGuid), err.Error()) + return nil, diags + } + + tflog.Info(ctx, "AuditDataSourceCheckPresence: response from CloudBroker().Audit().Get", map[string]any{"audit_guid": auditGuid, "response": recordAudit}) + + return recordAudit, nil +} diff --git a/internal/service/cloudapi/disks/flattens/flatten_resource_disk.go b/internal/service/cloudapi/disks/flattens/flatten_resource_disk.go index 791e448..4408f6e 100644 --- a/internal/service/cloudapi/disks/flattens/flatten_resource_disk.go +++ b/internal/service/cloudapi/disks/flattens/flatten_resource_disk.go @@ -52,7 +52,6 @@ func DiskResource(ctx context.Context, plan *models.ResourceDiskModel, c *decort Type: plan.Type, Detach: plan.Detach, Permanently: plan.Permanently, - Reason: plan.Reason, Shareable: plan.Shareable, Timeouts: plan.Timeouts, diff --git a/internal/service/cloudapi/disks/flattens/flatten_resource_disk_replication.go b/internal/service/cloudapi/disks/flattens/flatten_resource_disk_replication.go index 5c6e42e..991bfcf 100644 --- a/internal/service/cloudapi/disks/flattens/flatten_resource_disk_replication.go +++ b/internal/service/cloudapi/disks/flattens/flatten_resource_disk_replication.go @@ -45,7 +45,6 @@ func DiskReplicationResource(ctx context.Context, state *models.ResourceRecordDi Start: state.Start, Detach: state.Detach, Permanently: state.Permanently, - Reason: state.Reason, Id: types.StringValue(strconv.Itoa(int(recordDisk.Replication.DiskID))), ACL: types.StringValue(string(diskAcl)), AccountID: types.Int64Value(int64(recordDisk.AccountID)), diff --git a/internal/service/cloudapi/disks/models/model_resource_disk.go b/internal/service/cloudapi/disks/models/model_resource_disk.go index 9a7f639..a80e4c4 100644 --- a/internal/service/cloudapi/disks/models/model_resource_disk.go +++ b/internal/service/cloudapi/disks/models/model_resource_disk.go @@ -20,7 +20,6 @@ type ResourceDiskModel struct { Type types.String `tfsdk:"type"` Detach types.Bool `tfsdk:"detach"` Permanently types.Bool `tfsdk:"permanently"` - Reason types.String `tfsdk:"reason"` Shareable types.Bool `tfsdk:"shareable"` IOTune types.Object `tfsdk:"iotune"` Timeouts timeouts.Value `tfsdk:"timeouts"` diff --git a/internal/service/cloudapi/disks/models/model_resource_disk_replication.go b/internal/service/cloudapi/disks/models/model_resource_disk_replication.go index 508ad84..53ef140 100644 --- a/internal/service/cloudapi/disks/models/model_resource_disk_replication.go +++ b/internal/service/cloudapi/disks/models/model_resource_disk_replication.go @@ -17,7 +17,6 @@ type ResourceRecordDiskReplicationModel struct { Start types.Bool `tfsdk:"start"` Detach types.Bool `tfsdk:"detach"` Permanently types.Bool `tfsdk:"permanently"` - Reason types.String `tfsdk:"reason"` Timeouts timeouts.Value `tfsdk:"timeouts"` // response fields diff --git a/internal/service/cloudapi/disks/resource_disk.go b/internal/service/cloudapi/disks/resource_disk.go index 7b0b5d0..9f150ac 100644 --- a/internal/service/cloudapi/disks/resource_disk.go +++ b/internal/service/cloudapi/disks/resource_disk.go @@ -312,10 +312,6 @@ func (r *resourceDisk) Delete(ctx context.Context, req resource.DeleteRequest, r Permanently: state.Permanently.ValueBool(), // default false } - if !state.Reason.IsNull() { - delReq.Reason = state.Reason.ValueString() - } - tflog.Info(ctx, "Delete resourceDisk: before call CloudAPI().Disks().Delete", map[string]any{"req": delReq}) _, err := r.client.CloudAPI().Disks().Delete(ctx, delReq) if err != nil { diff --git a/internal/service/cloudapi/disks/resource_disk_replication.go b/internal/service/cloudapi/disks/resource_disk_replication.go index 3aa5e40..e9028a5 100644 --- a/internal/service/cloudapi/disks/resource_disk_replication.go +++ b/internal/service/cloudapi/disks/resource_disk_replication.go @@ -284,10 +284,6 @@ func (r *resourceDiskReplication) Delete(ctx context.Context, req resource.Delet Permanently: state.Permanently.ValueBool(), // default false } - if !state.Reason.IsNull() { - delReq.Reason = state.Reason.ValueString() - } - tflog.Info(ctx, "Delete resourceDiskReplication: before call CloudAPI().Disks().Delete", map[string]any{"req": delReq}) _, err := r.client.CloudAPI().Disks().Delete(ctx, delReq) if err != nil { diff --git a/internal/service/cloudapi/disks/schemas/schema_resource_disk.go b/internal/service/cloudapi/disks/schemas/schema_resource_disk.go index b9dd7ea..187ce88 100644 --- a/internal/service/cloudapi/disks/schemas/schema_resource_disk.go +++ b/internal/service/cloudapi/disks/schemas/schema_resource_disk.go @@ -64,10 +64,6 @@ func MakeSchemaResourceDisk() map[string]schema.Attribute { Description: "Whether to completely delete the disk, works only with non attached disks", // default is false }, - "reason": schema.StringAttribute{ - Optional: true, - Description: "Reason for deletion", - }, "shareable": schema.BoolAttribute{ Optional: true, Computed: true, diff --git a/internal/service/cloudapi/disks/schemas/schema_resource_disk_replication.go b/internal/service/cloudapi/disks/schemas/schema_resource_disk_replication.go index 96fc861..f5a22ba 100644 --- a/internal/service/cloudapi/disks/schemas/schema_resource_disk_replication.go +++ b/internal/service/cloudapi/disks/schemas/schema_resource_disk_replication.go @@ -39,9 +39,6 @@ func MakeSchemaResourceDiskReplication() map[string]schema.Attribute { "permanently": schema.BoolAttribute{ Optional: true, }, - "reason": schema.StringAttribute{ - Optional: true, - }, // computed attributes "id": schema.StringAttribute{ Computed: true, diff --git a/internal/service/cloudapi/disks/utilities/utility_resource_disk.go b/internal/service/cloudapi/disks/utilities/utility_resource_disk.go index 555d52a..09622cb 100644 --- a/internal/service/cloudapi/disks/utilities/utility_resource_disk.go +++ b/internal/service/cloudapi/disks/utilities/utility_resource_disk.go @@ -193,7 +193,6 @@ func RestoreDisk(ctx context.Context, diskId uint64, c *decort.DecortClient) dia restoreReq := disks.RestoreRequest{ DiskID: diskId, - Reason: "Terraform automatic restore", } tflog.Info(ctx, "RestoreDisk: before calling CloudAPI().Disks().Restore", map[string]any{"diskId": diskId, "req": restoreReq}) diff --git a/internal/service/cloudapi/dpdknet/data_source_dpdknet.go b/internal/service/cloudapi/dpdknet/data_source_dpdknet.go new file mode 100644 index 0000000..d688e67 --- /dev/null +++ b/internal/service/cloudapi/dpdknet/data_source_dpdknet.go @@ -0,0 +1,91 @@ +package dpdknet + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/dpdknet/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/dpdknet/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/dpdknet/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceDPDK{} +) + +func NewDataSourceDPDK() datasource.DataSource { + return &dataSourceDPDK{} +} + +// dataSourceDPDK is the data source implementation. +type dataSourceDPDK struct { + client *decort.DecortClient +} + +func (d *dataSourceDPDK) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceDPDKModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDPDK: Error get state") + return + } + dpdkId := uint64(state.DPDKID.ValueInt64()) + tflog.Info(ctx, "Read dataSourceDPDK: got state successfully", map[string]any{"dpdk_id": dpdkId}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout180s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDPDK: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceDPDK: set timeouts successfully", map[string]any{ + "dpdk_id": dpdkId, + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.DPDKDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDPDK: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDPDK: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceDPDK", map[string]any{"dpdk_id": dpdkId}) +} + +func (d *dataSourceDPDK) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceDPDK(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceDPDK) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_dpdknet" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceDPDK) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceDPDK") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceDPDK successfully") +} diff --git a/internal/service/cloudapi/dpdknet/data_source_dpdknet_list.go b/internal/service/cloudapi/dpdknet/data_source_dpdknet_list.go new file mode 100644 index 0000000..2a603be --- /dev/null +++ b/internal/service/cloudapi/dpdknet/data_source_dpdknet_list.go @@ -0,0 +1,88 @@ +package dpdknet + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/dpdknet/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/dpdknet/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/dpdknet/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceDPDKList{} +) + +func NewDataSourceDPDKList() datasource.DataSource { + return &dataSourceDPDKList{} +} + +// dataSourceDPDKList is the data source implementation. +type dataSourceDPDKList struct { + client *decort.DecortClient +} + +func (d *dataSourceDPDKList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceDPDKListModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDPDKList: Error get state") + return + } + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout180s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDPDKList: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceDPDKList: set timeouts successfully", map[string]any{ + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.DPDKListDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDPDKList: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDPDKList: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceDPDKList") +} + +func (d *dataSourceDPDKList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceDPDKList(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceDPDKList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_dpdknet_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceDPDKList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceDPDKList") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceDPDKList successfully") +} diff --git a/internal/service/cloudapi/dpdknet/flattens/flatten_data_source_dpdknet.go b/internal/service/cloudapi/dpdknet/flattens/flatten_data_source_dpdknet.go new file mode 100644 index 0000000..020185f --- /dev/null +++ b/internal/service/cloudapi/dpdknet/flattens/flatten_data_source_dpdknet.go @@ -0,0 +1,52 @@ +package flattens + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/dpdknet/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/dpdknet/utilities" +) + +func DPDKDataSource(ctx context.Context, state *models.DataSourceDPDKModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.DPDKDataSource") + + diags := diag.Diagnostics{} + + dpdkId := uint64(state.DPDKID.ValueInt64()) + + recordDPDK, diags := utilities.DPDKDataSourceCheckPresence(ctx, dpdkId, c) + if diags.HasError() { + return diags + } + + tflog.Info(ctx, "flattens.DPDKDataSource: before flatten", map[string]any{"dpdk_id": dpdkId}) + + id := uuid.New() + *state = models.DataSourceDPDKModel{ + DPDKID: state.DPDKID, + Timeouts: state.Timeouts, + + Id: types.StringValue(id.String()), + AccountAccess: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, recordDPDK.AccountAccess), + CreatedTime: types.Int64Value(int64(recordDPDK.CreatedTime)), + Desc: types.StringValue(recordDPDK.Description), + GID: types.Int64Value(int64(recordDPDK.GID)), + GUID: types.Int64Value(int64(recordDPDK.GUID)), + Name: types.StringValue(recordDPDK.Name), + RGAccess: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, recordDPDK.RGAccess), + Status: types.StringValue(recordDPDK.Status), + OVSBridge: types.StringValue(recordDPDK.OVSBridge), + VlanID: types.Int64Value(int64(recordDPDK.VlanID)), + ComputeIDs: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, recordDPDK.ComputeIDs), + UpdatedTime: types.Int64Value(int64(recordDPDK.UpdatedTime)), + } + + tflog.Info(ctx, "End flattens.DPDKDataSource", map[string]any{"dpdk_id": dpdkId}) + return nil +} diff --git a/internal/service/cloudapi/dpdknet/flattens/flatten_data_source_dpdknet_list.go b/internal/service/cloudapi/dpdknet/flattens/flatten_data_source_dpdknet_list.go new file mode 100644 index 0000000..7903ee2 --- /dev/null +++ b/internal/service/cloudapi/dpdknet/flattens/flatten_data_source_dpdknet_list.go @@ -0,0 +1,72 @@ +package flattens + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/dpdknet/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/dpdknet/utilities" +) + +func DPDKListDataSource(ctx context.Context, state *models.DataSourceDPDKListModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.DPDKListDataSource") + + diags := diag.Diagnostics{} + + dpdkList, err := utilities.DPDKListDataSourceCheckPresence(ctx, state, c) + if err != nil { + diags.AddError("Cannot get info about list DPDK", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.DPDKListDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceDPDKListModel{ + ByID: state.ByID, + GID: state.GID, + Name: state.Name, + Desc: state.Desc, + Status: state.Status, + ComputeIDs: state.ComputeIDs, + SortBy: state.SortBy, + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + + Id: types.StringValue(id.String()), + } + + items := make([]models.ItemDPDKListModel, 0, len(dpdkList.Data)) + for _, dpdkItem := range dpdkList.Data { + + item := models.ItemDPDKListModel{ + AccountAccess: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, dpdkItem.AccountAccess), + CreatedTime: types.Int64Value(int64(dpdkItem.CreatedTime)), + Desc: types.StringValue(dpdkItem.Description), + DPDKID: types.Int64Value(int64(dpdkItem.ID)), + GID: types.Int64Value(int64(dpdkItem.GID)), + GUID: types.Int64Value(int64(dpdkItem.GUID)), + Name: types.StringValue(dpdkItem.Name), + RGAccess: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, dpdkItem.RGAccess), + Status: types.StringValue(dpdkItem.Status), + OVSBridge: types.StringValue(dpdkItem.OVSBridge), + VlanID: types.Int64Value(int64(dpdkItem.VlanID)), + ComputeIDs: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, dpdkItem.ComputeIDs), + UpdatedTime: types.Int64Value(int64(dpdkItem.UpdatedTime)), + } + + items = append(items, item) + } + + state.Items = items + state.EntryCount = types.Int64Value(int64(dpdkList.EntryCount)) + + tflog.Info(ctx, "End flattens.DPDKListDataSource") + return nil +} diff --git a/internal/service/cloudapi/dpdknet/models/model_data_source_dpdknet.go b/internal/service/cloudapi/dpdknet/models/model_data_source_dpdknet.go new file mode 100644 index 0000000..8d1e9a5 --- /dev/null +++ b/internal/service/cloudapi/dpdknet/models/model_data_source_dpdknet.go @@ -0,0 +1,27 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceDPDKModel struct { + //required field + DPDKID types.Int64 `tfsdk:"dpdk_id"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + //response field + AccountAccess types.List `tfsdk:"account_access"` + CreatedTime types.Int64 `tfsdk:"created_time"` + Desc types.String `tfsdk:"desc"` + GID types.Int64 `tfsdk:"gid"` + GUID types.Int64 `tfsdk:"guid"` + Id types.String `tfsdk:"id"` + Name types.String `tfsdk:"name"` + RGAccess types.List `tfsdk:"rg_access"` + Status types.String `tfsdk:"status"` + OVSBridge types.String `tfsdk:"ovs_bridge"` + VlanID types.Int64 `tfsdk:"vlan_id"` + ComputeIDs types.List `tfsdk:"compute_ids"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` +} diff --git a/internal/service/cloudapi/dpdknet/models/model_data_source_dpdknet_list.go b/internal/service/cloudapi/dpdknet/models/model_data_source_dpdknet_list.go new file mode 100644 index 0000000..6ee640a --- /dev/null +++ b/internal/service/cloudapi/dpdknet/models/model_data_source_dpdknet_list.go @@ -0,0 +1,42 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceDPDKListModel struct { + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // optional attributes + ByID types.Int64 `tfsdk:"by_id"` + GID types.Int64 `tfsdk:"gid"` + Name types.String `tfsdk:"name"` + Desc types.String `tfsdk:"desc"` + Status types.String `tfsdk:"status"` + ComputeIDs types.List `tfsdk:"compute_ids"` + SortBy types.String `tfsdk:"sort_by"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + + // computed attributes + Id types.String `tfsdk:"id"` + Items []ItemDPDKListModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemDPDKListModel struct { + DPDKID types.Int64 `tfsdk:"dpdk_id"` + AccountAccess types.List `tfsdk:"account_access"` + CreatedTime types.Int64 `tfsdk:"created_time"` + Desc types.String `tfsdk:"desc"` + GID types.Int64 `tfsdk:"gid"` + GUID types.Int64 `tfsdk:"guid"` + Name types.String `tfsdk:"name"` + RGAccess types.List `tfsdk:"rg_access"` + Status types.String `tfsdk:"status"` + OVSBridge types.String `tfsdk:"ovs_bridge"` + VlanID types.Int64 `tfsdk:"vlan_id"` + ComputeIDs types.List `tfsdk:"compute_ids"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` +} diff --git a/internal/service/cloudapi/dpdknet/schemas/schema_data_source_dpdknet.go b/internal/service/cloudapi/dpdknet/schemas/schema_data_source_dpdknet.go new file mode 100644 index 0000000..8b7281a --- /dev/null +++ b/internal/service/cloudapi/dpdknet/schemas/schema_data_source_dpdknet.go @@ -0,0 +1,56 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceDPDK() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "dpdk_id": schema.Int64Attribute{ + Required: true, + }, + "account_access": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "desc": schema.StringAttribute{ + Computed: true, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "rg_access": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "ovs_bridge": schema.StringAttribute{ + Computed: true, + }, + "vlan_id": schema.Int64Attribute{ + Computed: true, + }, + "compute_ids": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + "id": schema.StringAttribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudapi/dpdknet/schemas/schema_data_source_dpdknet_list.go b/internal/service/cloudapi/dpdknet/schemas/schema_data_source_dpdknet_list.go new file mode 100644 index 0000000..b43ff34 --- /dev/null +++ b/internal/service/cloudapi/dpdknet/schemas/schema_data_source_dpdknet_list.go @@ -0,0 +1,97 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceDPDKList() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // optional attributes + "by_id": schema.Int64Attribute{ + Optional: true, + }, + "gid": schema.Int64Attribute{ + Optional: true, + }, + "name": schema.StringAttribute{ + Optional: true, + }, + "desc": schema.StringAttribute{ + Optional: true, + }, + "status": schema.StringAttribute{ + Optional: true, + }, + "compute_ids": schema.ListAttribute{ + Optional: true, + ElementType: types.Int64Type, + }, + "sort_by": schema.StringAttribute{ + Optional: true, + }, + "page": schema.Int64Attribute{ + Optional: true, + }, + "size": schema.Int64Attribute{ + Optional: true, + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "dpdk_id": schema.Int64Attribute{ + Required: true, + }, + "account_access": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "desc": schema.StringAttribute{ + Computed: true, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "rg_access": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "ovs_bridge": schema.StringAttribute{ + Computed: true, + }, + "vlan_id": schema.Int64Attribute{ + Computed: true, + }, + "compute_ids": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudapi/dpdknet/utilities/utility_data_source_dpdknet.go b/internal/service/cloudapi/dpdknet/utilities/utility_data_source_dpdknet.go new file mode 100644 index 0000000..0f4ea6c --- /dev/null +++ b/internal/service/cloudapi/dpdknet/utilities/utility_data_source_dpdknet.go @@ -0,0 +1,27 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/dpdknet" +) + +func DPDKDataSourceCheckPresence(ctx context.Context, dpdkId uint64, c *decort.DecortClient) (*dpdknet.RecordDPDKNet, diag.Diagnostics) { + tflog.Info(ctx, fmt.Sprintf("DPDKDataSourceCheckPresence: Get info about DPDK net with ID - %d", dpdkId)) + + diags := diag.Diagnostics{} + + recordDPDK, err := c.CloudAPI().DPDKNet().Get(ctx, dpdknet.GetRequest{DPDKID: dpdkId}) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about DPDK net with ID %d", dpdkId), err.Error()) + return nil, diags + } + + tflog.Info(ctx, "DPDKDataSourceCheckPresence: response from CloudBroker().DPDKNet().Get", map[string]any{"dpdk_id": dpdkId, "response": recordDPDK}) + + return recordDPDK, nil +} diff --git a/internal/service/cloudapi/dpdknet/utilities/utility_data_source_dpdknet_list.go b/internal/service/cloudapi/dpdknet/utilities/utility_data_source_dpdknet_list.go new file mode 100644 index 0000000..efe037d --- /dev/null +++ b/internal/service/cloudapi/dpdknet/utilities/utility_data_source_dpdknet_list.go @@ -0,0 +1,61 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/dpdknet" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/dpdknet/models" +) + +func DPDKListDataSourceCheckPresence(ctx context.Context, plan *models.DataSourceDPDKListModel, c *decort.DecortClient) (*dpdknet.ListDPDKNet, error) { + tflog.Info(ctx, fmt.Sprintf("DPDKListDataSourceCheckPresence: Get DPDK list info")) + + listDPDKReq := dpdknet.ListRequest{} + + if !plan.ByID.IsNull() { + listDPDKReq.ByID = uint64(plan.ByID.ValueInt64()) + } + if !plan.GID.IsNull() { + listDPDKReq.GID = uint64(plan.GID.ValueInt64()) + } + if !plan.Name.IsNull() { + listDPDKReq.Name = plan.Name.ValueString() + } + if !plan.Desc.IsNull() { + listDPDKReq.Description = plan.Desc.ValueString() + } + if !plan.Status.IsNull() { + listDPDKReq.Status = plan.Status.ValueString() + } + if !plan.ComputeIDs.IsNull() { + computeIDs := make([]uint64, 0, len(plan.ComputeIDs.Elements())) + diags := plan.ComputeIDs.ElementsAs(ctx, &computeIDs, false) + if diags.HasError() { + tflog.Error(ctx, "DPDKListDataSourceCheckPresence: cannot populate computeIDs with plan.ComputeIDs List elements") + return nil, fmt.Errorf("cannot populate computeIDs with plan.ComputeIDs List elements") + } + listDPDKReq.ComputeIDs = computeIDs + } + if !plan.SortBy.IsNull() { + listDPDKReq.SortBy = plan.SortBy.ValueString() + } + if !plan.Page.IsNull() { + listDPDKReq.Page = uint64(plan.Page.ValueInt64()) + } + if !plan.Size.IsNull() { + listDPDKReq.Size = uint64(plan.Size.ValueInt64()) + } + + tflog.Info(ctx, "DPDKListDataSourceCheckPresence: before call CloudAPI().DPDKNet().List", map[string]any{"response": listDPDKReq}) + dpdkList, err := c.CloudAPI().DPDKNet().List(ctx, listDPDKReq) + if err != nil { + return nil, fmt.Errorf("cannot get info about data source list DPDK net with error: %w", err) + } + + tflog.Info(ctx, "DPDKListDataSourceCheckPresence: response from CloudAPI().DPDKNet().List", map[string]any{"response": dpdkList}) + + return dpdkList, err +} diff --git a/internal/service/cloudapi/flipgroup/utilities/utility_data_source_flipgroup_list.go b/internal/service/cloudapi/flipgroup/utilities/utility_data_source_flipgroup_list.go index 2d82ade..ce5c1b1 100644 --- a/internal/service/cloudapi/flipgroup/utilities/utility_data_source_flipgroup_list.go +++ b/internal/service/cloudapi/flipgroup/utilities/utility_data_source_flipgroup_list.go @@ -67,7 +67,8 @@ func FlipgroupListDataSourceCheckPresence(ctx context.Context, plan *models.Data for _, val := range plan.CliendIDs.Elements() { result = append(result, uint64(val.(types.Int64).ValueInt64())) } - listFlipgroupReq.ClientIDs = result + //TODO fix it + //listFlipgroupReq.ClientIDs = result } if !plan.Status.IsNull() { diff --git a/internal/service/cloudapi/ic/input_checks.go b/internal/service/cloudapi/ic/input_checks.go index 8da32fc..2d9d7c1 100644 --- a/internal/service/cloudapi/ic/input_checks.go +++ b/internal/service/cloudapi/ic/input_checks.go @@ -9,6 +9,7 @@ import ( account "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/dpdknet" extnet "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/extnet" image "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/image" k8ci "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/k8ci" @@ -448,6 +449,30 @@ func ExistVFPool(ctx context.Context, vfpoolId uint64, c *decort.DecortClient) e return nil } +func ExistDPDK(ctx context.Context, dpdkId uint64, c *decort.DecortClient) error { + + req := dpdknet.ListRequest{ + ByID: dpdkId, + } + + dpdkList, err := c.CloudAPI().DPDKNet().List(ctx, req) + if err != nil { + return err + } + + if len(dpdkList.Data) == 0 { + return fmt.Errorf("DPDK net with ID %v not found", dpdkId) + } + + for _, item := range dpdkList.Data { + if item.Status != "ENABLED" { + return fmt.Errorf("DPDK net with ID %v must be enabled", dpdkId) + } + } + + return nil +} + func ExistSnapshotInCompute(ctx context.Context, computeID uint64, label string, c *decort.DecortClient) error { req := compute.SnapshotListRequest{ ComputeID: computeID, diff --git a/internal/service/cloudapi/image/flattens/flatten_data_source_image_list.go b/internal/service/cloudapi/image/flattens/flatten_data_source_image_list.go index a499fd8..510e899 100644 --- a/internal/service/cloudapi/image/flattens/flatten_data_source_image_list.go +++ b/internal/service/cloudapi/image/flattens/flatten_data_source_image_list.go @@ -42,6 +42,7 @@ func DataSourceImageList(ctx context.Context, state *models.ListImagesModel, c * Bootable: state.Bootable, SortBy: state.SortBy, Page: state.Page, + Enabled: state.Enabled, Size: state.Size, Timeouts: state.Timeouts, Id: types.StringValue(id.String()), diff --git a/internal/service/cloudapi/image/flattens/flatten_resource_image.go b/internal/service/cloudapi/image/flattens/flatten_resource_image.go index 5e6826c..e5c190e 100644 --- a/internal/service/cloudapi/image/flattens/flatten_resource_image.go +++ b/internal/service/cloudapi/image/flattens/flatten_resource_image.go @@ -48,7 +48,6 @@ func ResourceImage(ctx context.Context, plan *models.ImageResourceModel, c *deco SepID: types.Int64Value(int64(image.SepID)), PoolName: types.StringValue(image.Pool), Architecture: types.StringValue(image.Architecture), - Permanently: plan.Permanently, ImageId: types.Int64Value(int64(image.ID)), Timeouts: plan.Timeouts, Id: types.StringValue(strconv.Itoa(int(image.ID))), diff --git a/internal/service/cloudapi/image/flattens/flatten_resource_image_virtual.go b/internal/service/cloudapi/image/flattens/flatten_resource_image_virtual.go index 51b6d56..a9cdb88 100644 --- a/internal/service/cloudapi/image/flattens/flatten_resource_image_virtual.go +++ b/internal/service/cloudapi/image/flattens/flatten_resource_image_virtual.go @@ -33,7 +33,6 @@ func ResourceImageVirtual(ctx context.Context, plan *models.ImageVirtualResource *plan = models.ImageVirtualResourceModel{ ImageName: types.StringValue(image.Name), LinkTo: types.Int64Value(int64(image.LinkTo)), - Permanently: plan.Permanently, Timeouts: plan.Timeouts, Id: types.StringValue(strconv.Itoa(int(image.ID))), LastUpdated: plan.LastUpdated, diff --git a/internal/service/cloudapi/image/models/model_data_source_image_list.go b/internal/service/cloudapi/image/models/model_data_source_image_list.go index d1b6d5b..2141d43 100644 --- a/internal/service/cloudapi/image/models/model_data_source_image_list.go +++ b/internal/service/cloudapi/image/models/model_data_source_image_list.go @@ -22,6 +22,7 @@ type ListImagesModel struct { SortBy types.String `tfsdk:"sort_by"` Page types.Int64 `tfsdk:"page"` Size types.Int64 `tfsdk:"size"` + Enabled types.Bool `tfsdk:"enabled"` // responce fields Id types.String `tfsdk:"id"` diff --git a/internal/service/cloudapi/image/models/model_resource_image.go b/internal/service/cloudapi/image/models/model_resource_image.go index b2d6232..901e31b 100644 --- a/internal/service/cloudapi/image/models/model_resource_image.go +++ b/internal/service/cloudapi/image/models/model_resource_image.go @@ -23,7 +23,6 @@ type ImageResourceModel struct { SepID types.Int64 `tfsdk:"sep_id"` PoolName types.String `tfsdk:"pool_name"` Architecture types.String `tfsdk:"architecture"` - Permanently types.Bool `tfsdk:"permanently"` Timeouts timeouts.Value `tfsdk:"timeouts"` // responce fields Id types.String `tfsdk:"id"` diff --git a/internal/service/cloudapi/image/models/model_resource_image_virtual.go b/internal/service/cloudapi/image/models/model_resource_image_virtual.go index 18ca120..9d5f83d 100644 --- a/internal/service/cloudapi/image/models/model_resource_image_virtual.go +++ b/internal/service/cloudapi/image/models/model_resource_image_virtual.go @@ -7,10 +7,9 @@ import ( type ImageVirtualResourceModel struct { // request fields - ImageName types.String `tfsdk:"image_name"` - LinkTo types.Int64 `tfsdk:"link_to"` - Permanently types.Bool `tfsdk:"permanently"` - Timeouts timeouts.Value `tfsdk:"timeouts"` + ImageName types.String `tfsdk:"image_name"` + LinkTo types.Int64 `tfsdk:"link_to"` + Timeouts timeouts.Value `tfsdk:"timeouts"` // responce fields Id types.String `tfsdk:"id"` LastUpdated types.String `tfsdk:"last_updated"` diff --git a/internal/service/cloudapi/image/resource_image.go b/internal/service/cloudapi/image/resource_image.go index 5e7af5f..ac8bf12 100644 --- a/internal/service/cloudapi/image/resource_image.go +++ b/internal/service/cloudapi/image/resource_image.go @@ -234,15 +234,8 @@ func (r *resourceImage) Delete(ctx context.Context, req resource.DeleteRequest, ctx, cancel := context.WithTimeout(ctx, deleteTimeout) defer cancel() - var permanently bool - if state.Permanently.IsNull() { - permanently = true - } else { - permanently = state.Permanently.ValueBool() - } - // Delete image - _, err := r.client.CloudAPI().Image().Delete(ctx, image.DeleteRequest{ImageID: uint64(state.ImageId.ValueInt64()), Permanently: permanently}) + _, err := r.client.CloudAPI().Image().Delete(ctx, image.DeleteRequest{ImageID: uint64(state.ImageId.ValueInt64())}) if err != nil { resp.Diagnostics.AddError("Error deleting image with error: ", err.Error()) return diff --git a/internal/service/cloudapi/image/resource_image_virtual.go b/internal/service/cloudapi/image/resource_image_virtual.go index 53d6e59..1dbb9fb 100644 --- a/internal/service/cloudapi/image/resource_image_virtual.go +++ b/internal/service/cloudapi/image/resource_image_virtual.go @@ -229,15 +229,8 @@ func (r *resourceImageVirtual) Delete(ctx context.Context, req resource.DeleteRe ctx, cancel := context.WithTimeout(ctx, deleteTimeout) defer cancel() - var permanently bool - if state.Permanently.IsNull() { - permanently = true - } else { - permanently = state.Permanently.ValueBool() - } - // Delete image - _, err := r.client.CloudAPI().Image().Delete(ctx, image.DeleteRequest{ImageID: uint64(state.ImageId.ValueInt64()), Permanently: permanently}) + _, err := r.client.CloudAPI().Image().Delete(ctx, image.DeleteRequest{ImageID: uint64(state.ImageId.ValueInt64())}) if err != nil { resp.Diagnostics.AddError("Error deleting image virtual with error: ", err.Error()) return diff --git a/internal/service/cloudapi/image/schemas/schema_data_source_image_list.go b/internal/service/cloudapi/image/schemas/schema_data_source_image_list.go index 85c700b..46c1586 100644 --- a/internal/service/cloudapi/image/schemas/schema_data_source_image_list.go +++ b/internal/service/cloudapi/image/schemas/schema_data_source_image_list.go @@ -54,6 +54,9 @@ func MakeSchemaDataSourceImageList() map[string]schema.Attribute { "size": schema.Int64Attribute{ Optional: true, }, + "enabled": schema.BoolAttribute{ + Optional: true, + }, // computed attributes "id": schema.StringAttribute{ Computed: true, diff --git a/internal/service/cloudapi/image/schemas/schema_resource_image.go b/internal/service/cloudapi/image/schemas/schema_resource_image.go index b53e72c..84d1854 100644 --- a/internal/service/cloudapi/image/schemas/schema_resource_image.go +++ b/internal/service/cloudapi/image/schemas/schema_resource_image.go @@ -84,12 +84,9 @@ func MakeSchemaResourceImage() map[string]schema.Attribute { Computed: true, Optional: true, Validators: []validator.String{ - stringvalidator.OneOf("X86_64", "PPC64_LE"), + stringvalidator.OneOf("X86_64"), }, - Description: "Binary architecture of this image, one of X86_64 of PPC64_LE", - }, - "permanently": schema.BoolAttribute{ - Optional: true, + Description: "Binary architecture of this image, one of X86_64", }, // computed attributes "id": schema.StringAttribute{ diff --git a/internal/service/cloudapi/image/schemas/schema_resource_image_virtual.go b/internal/service/cloudapi/image/schemas/schema_resource_image_virtual.go index 1ec40d8..715ba79 100644 --- a/internal/service/cloudapi/image/schemas/schema_resource_image_virtual.go +++ b/internal/service/cloudapi/image/schemas/schema_resource_image_virtual.go @@ -18,10 +18,6 @@ func MakeSchemaResourceImageVirtual() map[string]schema.Attribute { Required: true, Description: "ID of real image to link this virtual image to upon creation", }, - // optional attributes - "permanently": schema.BoolAttribute{ - Optional: true, - }, // computed attributes "id": schema.StringAttribute{ Computed: true, diff --git a/internal/service/cloudapi/image/utilities/utility_data_source_image_list.go b/internal/service/cloudapi/image/utilities/utility_data_source_image_list.go index 3479040..3e6bf24 100644 --- a/internal/service/cloudapi/image/utilities/utility_data_source_image_list.go +++ b/internal/service/cloudapi/image/utilities/utility_data_source_image_list.go @@ -56,6 +56,9 @@ func ImageListCheckPresence(ctx context.Context, state *models.ListImagesModel, if !state.Size.IsNull() { req.Size = uint64(state.Size.ValueInt64()) } + if !state.Enabled.IsNull() { + req.Enabled = state.Enabled.ValueBool() + } if !state.Page.IsNull() { req.Page = uint64(state.Page.ValueInt64()) } diff --git a/internal/service/cloudapi/k8s/flattens/flatten_data_source_k8s.go b/internal/service/cloudapi/k8s/flattens/flatten_data_source_k8s.go index cab1069..9e1539e 100644 --- a/internal/service/cloudapi/k8s/flattens/flatten_data_source_k8s.go +++ b/internal/service/cloudapi/k8s/flattens/flatten_data_source_k8s.go @@ -60,31 +60,34 @@ func DataSourceK8s(ctx context.Context, state *models.RecordK8SDataSourceModel, id := uuid.New() *state = models.RecordK8SDataSourceModel{ - K8SID: state.K8SID, - Timeouts: state.Timeouts, - Id: types.StringValue(id.String()), - ACL: flattenACLDataSource(ctx, &cluster.ACL), - AccountID: types.Int64Value(int64(cluster.AccountID)), - AccountName: types.StringValue(cluster.AccountName), - BServiceID: types.Int64Value(int64(cluster.BServiceID)), - K8CI: types.Int64Value(int64(cluster.CIID)), - CreatedBy: types.StringValue(cluster.CreatedBy), - CreatedTime: types.Int64Value(int64(cluster.CreatedTime)), - DeletedBy: types.StringValue(cluster.DeletedBy), - DeletedTime: types.Int64Value(int64(cluster.DeletedTime)), - K8CIName: types.StringValue(cluster.K8CIName), - Masters: flattenMasterGroup(ctx, &cluster.K8SGroups.Masters, masterComputeList), - Workers: flattenK8sGroup(ctx, &cluster.K8SGroups.Workers, workersComputeList), - LBID: types.Int64Value(int64(cluster.LBID)), - Name: types.StringValue(cluster.Name), - NetworkPlugin: types.StringValue(cluster.NetworkPlugin), - RGID: types.Int64Value(int64(cluster.RGID)), - RGName: types.StringValue(cluster.RGName), - Status: types.StringValue(cluster.Status), - TechStatus: types.StringValue(cluster.TechStatus), - UpdatedBy: types.StringValue(cluster.UpdatedBy), - UpdatedTime: types.Int64Value(int64(cluster.UpdatedTime)), - VinsId: types.Int64Value(int64(k8sList.Data[0].VINSID)), + K8SID: state.K8SID, + Timeouts: state.Timeouts, + Id: types.StringValue(id.String()), + ACL: flattenACLDataSource(ctx, &cluster.ACL), + AccountID: types.Int64Value(int64(cluster.AccountID)), + AccountName: types.StringValue(cluster.AccountName), + K8SAddressVIP: flattenAddressVIP(ctx, cluster.AddressVIP), + BServiceID: types.Int64Value(int64(cluster.BServiceID)), + K8CI: types.Int64Value(int64(cluster.CIID)), + CreatedBy: types.StringValue(cluster.CreatedBy), + CreatedTime: types.Int64Value(int64(cluster.CreatedTime)), + DeletedBy: types.StringValue(cluster.DeletedBy), + DeletedTime: types.Int64Value(int64(cluster.DeletedTime)), + ExtnetOnly: types.BoolValue(cluster.ExtnetOnly), + HighlyAvailableLB: types.BoolValue(cluster.HighlyAvailableLB), + K8CIName: types.StringValue(cluster.K8CIName), + Masters: flattenMasterGroup(ctx, &cluster.K8SGroups.Masters, masterComputeList), + Workers: flattenK8sGroup(ctx, &cluster.K8SGroups.Workers, workersComputeList), + LBID: types.Int64Value(int64(cluster.LBID)), + Name: types.StringValue(cluster.Name), + NetworkPlugin: types.StringValue(cluster.NetworkPlugin), + RGID: types.Int64Value(int64(cluster.RGID)), + RGName: types.StringValue(cluster.RGName), + Status: types.StringValue(cluster.Status), + TechStatus: types.StringValue(cluster.TechStatus), + UpdatedBy: types.StringValue(cluster.UpdatedBy), + UpdatedTime: types.Int64Value(int64(cluster.UpdatedTime)), + VinsId: types.Int64Value(int64(k8sList.Data[0].VINSID)), } if cluster.LBID != 0 { @@ -106,6 +109,18 @@ func DataSourceK8s(ctx context.Context, state *models.RecordK8SDataSourceModel, return nil } +func flattenAddressVIP(ctx context.Context, addressedVip k8s.K8SAddressVIP) *models.K8SAddressVIP { + tflog.Info(ctx, "Start flattenAddressVIP") + + res := models.K8SAddressVIP{ + BackendIP: types.StringValue(addressedVip.BackendIP), + FrontendIP: types.StringValue(addressedVip.FrontendIP), + } + + tflog.Info(ctx, "End flattenAddressVIP") + return &res +} + func flattenMasterGroup(ctx context.Context, mastersGroup *k8s.MasterGroup, masters []*compute.RecordCompute) *models.MasterGroupDataSourceModel { tflog.Info(ctx, "Start flattenMasterGroup") diff --git a/internal/service/cloudapi/k8s/flattens/flatten_resource_k8s_cp.go b/internal/service/cloudapi/k8s/flattens/flatten_resource_k8s_cp.go index f2555a9..7f8c058 100644 --- a/internal/service/cloudapi/k8s/flattens/flatten_resource_k8s_cp.go +++ b/internal/service/cloudapi/k8s/flattens/flatten_resource_k8s_cp.go @@ -7,6 +7,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" "github.com/hashicorp/terraform-plugin-log/tflog" decort "repository.basistech.ru/BASIS/decort-golang-sdk" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute" @@ -67,6 +68,7 @@ func K8SCPResource(ctx context.Context, plan *models.ResourceK8SCPModel, c *deco Description: plan.Description, ExtNetOnly: plan.ExtNetOnly, OidcCertificate: plan.OidcCertificate, + Chipset: plan.Chipset, Start: plan.Start, Enabled: plan.Enabled, Permanently: plan.Permanently, @@ -90,6 +92,7 @@ func K8SCPResource(ctx context.Context, plan *models.ResourceK8SCPModel, c *deco CreatedTime: types.Int64Value(int64(cluster.CreatedTime)), DeletedBy: types.StringValue(cluster.DeletedBy), DeletedTime: types.Int64Value(int64(cluster.DeletedTime)), + K8SAddressVIP: flattenK8SAddressVIP(ctx, cluster.AddressVIP), K8SID: types.Int64Value(int64(cluster.ID)), K8CIName: types.StringValue(cluster.K8CIName), LBID: types.Int64Value(int64(cluster.LBID)), @@ -126,6 +129,23 @@ func K8SCPResource(ctx context.Context, plan *models.ResourceK8SCPModel, c *deco return nil } +func flattenK8SAddressVIP(ctx context.Context, addressedVip k8s.K8SAddressVIP) basetypes.ObjectValue { + tflog.Info(ctx, "Start flattenAddressVIP") + + temp := models.AddressVIPModel{ + BackendIP: types.StringValue(addressedVip.BackendIP), + FrontendIP: types.StringValue(addressedVip.FrontendIP), + } + + res, err := types.ObjectValueFrom(ctx, models.AddressVIP, temp) + if err != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenK8SAddressVIP struct to obj", err)) + } + + tflog.Info(ctx, "End flattenAddressVIP") + return res +} + func flattenDetailedInfo(ctx context.Context, di *k8s.ListDetailedInfo, computes []*compute.RecordCompute) types.List { tflog.Info(ctx, "Start flattenDetailedInfo") diff --git a/internal/service/cloudapi/k8s/flattens/flatten_resource_k8s_wg.go b/internal/service/cloudapi/k8s/flattens/flatten_resource_k8s_wg.go index 503b4c7..7bc09e9 100644 --- a/internal/service/cloudapi/k8s/flattens/flatten_resource_k8s_wg.go +++ b/internal/service/cloudapi/k8s/flattens/flatten_resource_k8s_wg.go @@ -48,6 +48,8 @@ func K8SWGResource(ctx context.Context, plan *models.ResourceK8SWGModel, c *deco WorkerSEPPool: plan.WorkerSEPPool, CloudInit: plan.CloudInit, Timeouts: plan.Timeouts, + Chipset: plan.Chipset, + WorkerChipset: plan.WorkerChipset, Id: types.StringValue(strconv.Itoa(int(wg.ID))), WorkerGroupId: types.Int64Value(int64(wg.ID)), LastUpdated: plan.LastUpdated, diff --git a/internal/service/cloudapi/k8s/models/model_data_source_k8s.go b/internal/service/cloudapi/k8s/models/model_data_source_k8s.go index 0f57f86..49e1b96 100644 --- a/internal/service/cloudapi/k8s/models/model_data_source_k8s.go +++ b/internal/service/cloudapi/k8s/models/model_data_source_k8s.go @@ -10,32 +10,40 @@ type RecordK8SDataSourceModel struct { K8SID types.Int64 `tfsdk:"k8s_id"` Timeouts timeouts.Value `tfsdk:"timeouts"` // response fields - Id types.String `tfsdk:"id"` - ACL *RecordACLDataSourceModel `tfsdk:"acl"` - AccountID types.Int64 `tfsdk:"account_id"` - AccountName types.String `tfsdk:"account_name"` - BServiceID types.Int64 `tfsdk:"bservice_id"` - K8CI types.Int64 `tfsdk:"k8sci_id"` - CreatedBy types.String `tfsdk:"created_by"` - CreatedTime types.Int64 `tfsdk:"created_time"` - DeletedBy types.String `tfsdk:"deleted_by"` - DeletedTime types.Int64 `tfsdk:"deleted_time"` - ExtNetID types.Int64 `tfsdk:"extnet_id"` - K8CIName types.String `tfsdk:"k8sci_name"` - Masters *MasterGroupDataSourceModel `tfsdk:"masters"` - Workers []ItemK8SGroupDataSourceModel `tfsdk:"workers"` - LBID types.Int64 `tfsdk:"lb_id"` - LBIP types.String `tfsdk:"lb_ip"` - Name types.String `tfsdk:"name"` - NetworkPlugin types.String `tfsdk:"network_plugin"` - RGID types.Int64 `tfsdk:"rg_id"` - RGName types.String `tfsdk:"rg_name"` - Status types.String `tfsdk:"status"` - TechStatus types.String `tfsdk:"tech_status"` - UpdatedBy types.String `tfsdk:"updated_by"` - UpdatedTime types.Int64 `tfsdk:"updated_time"` - Kubeconfig types.String `tfsdk:"kubeconfig"` - VinsId types.Int64 `tfsdk:"vins_id"` + Id types.String `tfsdk:"id"` + ACL *RecordACLDataSourceModel `tfsdk:"acl"` + AccountID types.Int64 `tfsdk:"account_id"` + AccountName types.String `tfsdk:"account_name"` + BServiceID types.Int64 `tfsdk:"bservice_id"` + K8CI types.Int64 `tfsdk:"k8sci_id"` + CreatedBy types.String `tfsdk:"created_by"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DeletedBy types.String `tfsdk:"deleted_by"` + ExtnetOnly types.Bool `tfsdk:"extnet_only"` + HighlyAvailableLB types.Bool `tfsdk:"ha_mode"` + K8SAddressVIP *K8SAddressVIP `tfsdk:"address_vip"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + ExtNetID types.Int64 `tfsdk:"extnet_id"` + K8CIName types.String `tfsdk:"k8sci_name"` + Masters *MasterGroupDataSourceModel `tfsdk:"masters"` + Workers []ItemK8SGroupDataSourceModel `tfsdk:"workers"` + LBID types.Int64 `tfsdk:"lb_id"` + LBIP types.String `tfsdk:"lb_ip"` + Name types.String `tfsdk:"name"` + NetworkPlugin types.String `tfsdk:"network_plugin"` + RGID types.Int64 `tfsdk:"rg_id"` + RGName types.String `tfsdk:"rg_name"` + Status types.String `tfsdk:"status"` + TechStatus types.String `tfsdk:"tech_status"` + UpdatedBy types.String `tfsdk:"updated_by"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` + Kubeconfig types.String `tfsdk:"kubeconfig"` + VinsId types.Int64 `tfsdk:"vins_id"` +} + +type K8SAddressVIP struct { + BackendIP types.String `tfsdk:"backend_ip"` + FrontendIP types.String `tfsdk:"frontend_ip"` } type RecordACLDataSourceModel struct { diff --git a/internal/service/cloudapi/k8s/models/model_resource_k8s_cp.go b/internal/service/cloudapi/k8s/models/model_resource_k8s_cp.go index ea53981..da05f78 100644 --- a/internal/service/cloudapi/k8s/models/model_resource_k8s_cp.go +++ b/internal/service/cloudapi/k8s/models/model_resource_k8s_cp.go @@ -16,6 +16,7 @@ type ResourceK8SCPModel struct { SEPPool types.String `tfsdk:"sep_pool"` Num types.Int64 `tfsdk:"num"` CPU types.Int64 `tfsdk:"cpu"` + Chipset types.String `tfsdk:"chipset"` RAM types.Int64 `tfsdk:"ram"` Disk types.Int64 `tfsdk:"disk"` ExtNetID types.Int64 `tfsdk:"extnet_id"` @@ -51,6 +52,7 @@ type ResourceK8SCPModel struct { DeletedTime types.Int64 `tfsdk:"deleted_time"` K8SID types.Int64 `tfsdk:"k8s_id"` K8CIName types.String `tfsdk:"k8s_ci_name"` + K8SAddressVIP types.Object `tfsdk:"address_vip"` LBID types.Int64 `tfsdk:"lb_id"` LBIP types.String `tfsdk:"lb_ip"` MasterGroupId types.Int64 `tfsdk:"master_group_id"` @@ -63,6 +65,11 @@ type ResourceK8SCPModel struct { Kubeconfig types.String `tfsdk:"kubeconfig"` } +type AddressVIPModel struct { + BackendIP types.String `tfsdk:"backend_ip"` + FrontendIP types.String `tfsdk:"frontend_ip"` +} + type RecordACLModel struct { AccountACL types.List `tfsdk:"account_acl"` K8SACL types.List `tfsdk:"k8s_acl"` @@ -93,6 +100,11 @@ type ItemInterfacesModel struct { IpAddress types.String `tfsdk:"ip_address"` } +var AddressVIP map[string]attr.Type = map[string]attr.Type{ + "backend_ip": types.StringType, + "frontend_ip": types.StringType, +} + var ItemInterfaces map[string]attr.Type = map[string]attr.Type{ "def_gw": types.StringType, "ip_address": types.StringType, diff --git a/internal/service/cloudapi/k8s/models/model_resource_k8s_wg.go b/internal/service/cloudapi/k8s/models/model_resource_k8s_wg.go index 9b1bcda..78fae64 100644 --- a/internal/service/cloudapi/k8s/models/model_resource_k8s_wg.go +++ b/internal/service/cloudapi/k8s/models/model_resource_k8s_wg.go @@ -11,8 +11,10 @@ type ResourceK8SWGModel struct { K8SID types.Int64 `tfsdk:"k8s_id"` Name types.String `tfsdk:"name"` Num types.Int64 `tfsdk:"num"` + WorkerChipset types.String `tfsdk:"worker_chipset"` CPU types.Int64 `tfsdk:"cpu"` RAM types.Int64 `tfsdk:"ram"` + Chipset types.String `tfsdk:"chipset"` Disk types.Int64 `tfsdk:"disk"` Annotations types.List `tfsdk:"annotations"` Labels types.List `tfsdk:"labels"` diff --git a/internal/service/cloudapi/k8s/schemas/schema_data_source_k8s.go b/internal/service/cloudapi/k8s/schemas/schema_data_source_k8s.go index e13586d..664f790 100644 --- a/internal/service/cloudapi/k8s/schemas/schema_data_source_k8s.go +++ b/internal/service/cloudapi/k8s/schemas/schema_data_source_k8s.go @@ -122,6 +122,23 @@ func MakeSchemaDataSourceK8S() map[string]schema.Attribute { "extnet_id": schema.Int64Attribute{ Computed: true, }, + "extnet_only": schema.BoolAttribute{ + Computed: true, + }, + "ha_mode": schema.BoolAttribute{ + Computed: true, + }, + "address_vip": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "backend_ip": schema.StringAttribute{ + Computed: true, + }, + "frontend_ip": schema.StringAttribute{ + Computed: true, + }, + }, + }, "k8sci_name": schema.StringAttribute{ Computed: true, }, diff --git a/internal/service/cloudapi/k8s/schemas/schema_resource_k8s_cp.go b/internal/service/cloudapi/k8s/schemas/schema_resource_k8s_cp.go index 2e0fcb3..c6c5d54 100644 --- a/internal/service/cloudapi/k8s/schemas/schema_resource_k8s_cp.go +++ b/internal/service/cloudapi/k8s/schemas/schema_resource_k8s_cp.go @@ -126,7 +126,14 @@ func MakeSchemaResourceK8SCP() map[string]schema.Attribute { }, "oidc_cert": schema.StringAttribute{ Optional: true, - Description: "insert ssl certificate in x509 pem format", + Description: "Insert ssl certificate in x509 pem format", + }, + "chipset": schema.StringAttribute{ + Optional: true, + Description: "Type of the emulated system", + Validators: []validator.String{ + stringvalidator.OneOfCaseInsensitive("Q35", "i440fx"), + }, }, "lb_sysctl_params": schema.ListNestedAttribute{ Optional: true, @@ -302,6 +309,17 @@ func MakeSchemaResourceK8SCP() map[string]schema.Attribute { "account_name": schema.StringAttribute{ Computed: true, }, + "address_vip": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "backend_ip": schema.StringAttribute{ + Computed: true, + }, + "frontend_ip": schema.StringAttribute{ + Computed: true, + }, + }, + }, "bservice_id": schema.Int64Attribute{ Computed: true, }, diff --git a/internal/service/cloudapi/k8s/schemas/schema_resource_k8s_wg.go b/internal/service/cloudapi/k8s/schemas/schema_resource_k8s_wg.go index 1a84c98..332c9ba 100644 --- a/internal/service/cloudapi/k8s/schemas/schema_resource_k8s_wg.go +++ b/internal/service/cloudapi/k8s/schemas/schema_resource_k8s_wg.go @@ -1,6 +1,7 @@ package schemas import ( + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" "github.com/hashicorp/terraform-plugin-framework/resource/schema" "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" @@ -28,11 +29,25 @@ func MakeSchemaResourceK8SWG() map[string]schema.Attribute { Computed: true, Description: "Number of worker nodes to create.", }, + "worker_chipset": schema.StringAttribute{ + Optional: true, + Description: "Type of the emulated system of worker nodes", + Validators: []validator.String{ + stringvalidator.OneOfCaseInsensitive("Q35", "i440fx"), + }, + }, "cpu": schema.Int64Attribute{ Optional: true, Computed: true, Description: "Worker node CPU count.", }, + "chipset": schema.StringAttribute{ + Optional: true, + Description: "Type of the emulated system of work group", + Validators: []validator.String{ + stringvalidator.OneOfCaseInsensitive("Q35", "i440fx"), + }, + }, "ram": schema.Int64Attribute{ Optional: true, Computed: true, diff --git a/internal/service/cloudapi/k8s/utilities/utility_resource_k8s_cp.go b/internal/service/cloudapi/k8s/utilities/utility_resource_k8s_cp.go index 8cf5660..0c918c4 100644 --- a/internal/service/cloudapi/k8s/utilities/utility_resource_k8s_cp.go +++ b/internal/service/cloudapi/k8s/utilities/utility_resource_k8s_cp.go @@ -100,6 +100,10 @@ func CreateRequestResourceK8CP(ctx context.Context, plan *models.ResourceK8SCPMo req.OidcCertificate = plan.OidcCertificate.ValueString() } + if !plan.Chipset.IsNull() { + req.Chipset = plan.Chipset.ValueString() + } + if !plan.Description.IsNull() { req.Description = plan.Description.ValueString() } @@ -427,7 +431,7 @@ func K8SCPDeleteMaster(ctx context.Context, plan *models.ResourceK8SCPModel, sta diags := diag.Diagnostics{} - deleteMasterComp := make([]string, 0) + deleteMasterComp := make([]uint64, 0) for i, val := range state.DetailedInfo.Elements() { if i == 2 { @@ -437,15 +441,16 @@ func K8SCPDeleteMaster(ctx context.Context, plan *models.ResourceK8SCPModel, sta if err != nil { tflog.Error(ctx, fmt.Sprint("Error flattenDetailedInfo struct to obj", err), map[string]any{"k8s_id": plan.Id.ValueString()}) } - id := obj.Attributes()["compute_id"] + id := obj.Attributes()["compute_id"].(types.Int64).ValueInt64() - deleteMasterComp = append(deleteMasterComp, id.String()) + deleteMasterComp = append(deleteMasterComp, uint64(id)) } req := k8s.DeleteMasterFromGroupRequest{ K8SID: uint64(state.K8SID.ValueInt64()), MasterGroupID: uint64(state.MasterGroupId.ValueInt64()), - MasterIDs: deleteMasterComp, + //TODO fix it + //MasterIDs: deleteMasterComp, } _, err := c.CloudAPI().K8S().DeleteMasterFromGroup(ctx, req) diff --git a/internal/service/cloudapi/k8s/utilities/utility_resource_k8s_wg.go b/internal/service/cloudapi/k8s/utilities/utility_resource_k8s_wg.go index df263ee..848c83d 100644 --- a/internal/service/cloudapi/k8s/utilities/utility_resource_k8s_wg.go +++ b/internal/service/cloudapi/k8s/utilities/utility_resource_k8s_wg.go @@ -13,6 +13,7 @@ import ( decort "repository.basistech.ru/BASIS/decort-golang-sdk" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/k8s" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/tasks" "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/k8s/models" ) @@ -76,6 +77,9 @@ func CreateRequestResourceK8WG(ctx context.Context, plan *models.ResourceK8SWGMo if !plan.CloudInit.IsNull() { req.UserData = plan.CloudInit.ValueString() } + if !plan.Chipset.IsNull() { + req.Chipset = plan.Chipset.ValueString() + } tflog.Info(ctx, "End CreateRequestResourceK8WG", map[string]any{"name": plan.Name.ValueString()}) return req @@ -87,14 +91,44 @@ func ResourceK8SWGCreate(ctx context.Context, plan *models.ResourceK8SWGModel, c diags := diag.Diagnostics{} // Make request and get response - wgId, err := c.CloudAPI().K8S().WorkersGroupAdd(ctx, CreateRequestResourceK8WG(ctx, plan)) + resp, err := c.CloudAPI().K8S().WorkersGroupAdd(ctx, CreateRequestResourceK8WG(ctx, plan)) if err != nil { tflog.Error(ctx, "Error response for create k8s_wg") diags.AddError("Unable to Create K8SWG", err.Error()) return diags } - plan.Id = types.StringValue(strconv.Itoa(int(wgId))) + taskReq := tasks.GetRequest{ + AuditID: strings.Trim(resp, `"`), + } + for { + task, err := c.CloudAPI().Tasks().Get(ctx, taskReq) + if err != nil { + diags.AddError("The audit cannot be found", err.Error()) + return diags + } + + tflog.Info(ctx, fmt.Sprintf("ResourceK8SWGCreate instance creating - %s", task.Stage)) + + if task.Completed { + if task.Error != "" { + diags.AddError("Cannot create k8s wg instance:", task.Error) + return diags + } + + result, err := task.Result.ID() + if err != nil { + diags.AddError("Cannot get wg ID:", err.Error()) + return diags + } + plan.Id = types.StringValue(strconv.Itoa(result)) + plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850)) + break + } + + time.Sleep(time.Second * 20) + } + plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850)) wg, k8sId, err := K8SWGResourceCheckPresence(ctx, plan, c) @@ -185,6 +219,7 @@ func K8SWGUpdateNumWorkers(ctx context.Context, plan, state *models.ResourceK8SW K8SID: k8sId, WorkersGroupID: wg.ID, Num: uint64(newNum) - wg.Num, + Chipset: plan.WorkerChipset.ValueString(), } tflog.Info(ctx, "Add workers in wg with id", map[string]any{"wg_id": state.WorkerGroupId.ValueInt64(), "k8s_id": state.K8SID.ValueInt64()}) diff --git a/internal/service/cloudapi/kvmvm/flattens/flatten_data_source_compute.go b/internal/service/cloudapi/kvmvm/flattens/flatten_data_source_compute.go index 9538c27..008f471 100644 --- a/internal/service/cloudapi/kvmvm/flattens/flatten_data_source_compute.go +++ b/internal/service/cloudapi/kvmvm/flattens/flatten_data_source_compute.go @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-log/tflog" decort "repository.basistech.ru/BASIS/decort-golang-sdk" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens" "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models" "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/utilities" ) @@ -26,6 +27,8 @@ func DataSourceCompute(ctx context.Context, state *models.RecordComputeModel, c return diags } + pciDevicesList, err := utilities.ComputePCIDevicesListCheckPresence(ctx, state, c) + id := uuid.New() customFields, _ := json.Marshal(computeRecord.CustomFields) @@ -44,6 +47,7 @@ func DataSourceCompute(ctx context.Context, state *models.RecordComputeModel, c AffinityWeight: types.Int64Value(int64(computeRecord.AffinityWeight)), AntiAffinityRules: flattenAffinityRule(ctx, &computeRecord.AntiAffinityRules), Architecture: types.StringValue(computeRecord.Architecture), + Chipset: types.StringValue(computeRecord.Chipset), BootDiskSize: types.Int64Value(int64(computeRecord.BootDiskSize)), CdImageId: types.Int64Value(int64(computeRecord.CdImageId)), CloneReference: types.Int64Value(int64(computeRecord.CloneReference)), @@ -81,6 +85,7 @@ func DataSourceCompute(ctx context.Context, state *models.RecordComputeModel, c NumaNodeId: types.Int64Value(int64(computeRecord.NumaNodeId)), OSUsers: flattenOSUsers(ctx, &computeRecord.OSUsers), Pinned: types.BoolValue(computeRecord.Pinned), + PCIDevices: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, flattenPCI(ctx, pciDevicesList)), RAM: types.Int64Value(int64(computeRecord.RAM)), ReferenceID: types.StringValue(computeRecord.ReferenceID), Registered: types.BoolValue(computeRecord.Registered), @@ -189,6 +194,7 @@ func flattenDisks(ctx context.Context, disks *compute.ListComputeDisks) []models ACL: types.StringValue(string(acl)), AccountID: types.Int64Value(int64(item.AccountID)), BootPartition: types.Int64Value(int64(item.BootPartition)), + BusNumber: types.Int64Value(int64(item.BusNumber)), CreatedTime: types.Int64Value(int64(item.CreatedTime)), DeletedTime: types.Int64Value(int64(item.CreatedTime)), Description: types.StringValue(item.Description), @@ -290,6 +296,7 @@ func flattenInterfaces(ctx context.Context, interfaces *compute.ListInterfaces) res := make([]models.ItemInterfaceModel, 0, len(*interfaces)) for _, item := range *interfaces { temp := models.ItemInterfaceModel{ + BusNumber: types.Int64Value(int64(item.BusNumber)), ConnID: types.Int64Value(int64(item.ConnID)), ConnType: types.StringValue(item.ConnType), DefGW: types.StringValue(item.DefGW), @@ -297,14 +304,24 @@ func flattenInterfaces(ctx context.Context, interfaces *compute.ListInterfaces) FLIPGroupID: types.Int64Value(int64(item.FLIPGroupID)), GUID: types.StringValue(item.GUID), IPAddress: types.StringValue(item.IPAddress), - ListenSSH: types.BoolValue(item.ListenSSH), - MAC: types.StringValue(item.MAC), - Name: types.StringValue(item.Name), - NetID: types.Int64Value(int64(item.NetID)), - NetMask: types.Int64Value(int64(item.NetMask)), - NetType: types.StringValue(item.NetType), - NodeID: types.Int64Value(int64(item.NodeID)), - PCISlot: types.Int64Value(item.PCISlot), + LibvirtSettings: &models.LibvirtModel{ + GUID: types.StringValue(item.LibvirtSettings.GUID), + TXMode: types.StringValue(item.LibvirtSettings.TXMode), + IOEventFD: types.StringValue(item.LibvirtSettings.IOEventFD), + EventIDx: types.StringValue(item.LibvirtSettings.EventIDx), + Queues: types.Int64Value(int64(item.LibvirtSettings.Queues)), + RXQueueSize: types.Int64Value(int64(item.LibvirtSettings.RXQueueSize)), + TXQueueSize: types.Int64Value(int64(item.LibvirtSettings.TXQueueSize)), + }, + ListenSSH: types.BoolValue(item.ListenSSH), + MAC: types.StringValue(item.MAC), + MTU: types.Int64Value(int64(item.MTU)), + Name: types.StringValue(item.Name), + NetID: types.Int64Value(int64(item.NetID)), + NetMask: types.Int64Value(int64(item.NetMask)), + NetType: types.StringValue(item.NetType), + NodeID: types.Int64Value(int64(item.NodeID)), + PCISlot: types.Int64Value(item.PCISlot), QOS: &models.QOSModel{ ERate: types.Int64Value(int64(item.QOS.ERate)), GUID: types.StringValue(item.QOS.GUID), @@ -365,3 +382,15 @@ func flattenOSUsers(ctx context.Context, osUsers *compute.ListOSUser) []models.I tflog.Info(ctx, "End flattenOSUsers") return res } + +func flattenPCI(ctx context.Context, pciList *compute.ListPCIDevices) []uint64 { + tflog.Info(ctx, "Start flattenPCI") + res := make([]uint64, 0, len(pciList.Data)) + + for _, v := range pciList.Data { + res = append(res, v.ID) + } + + tflog.Info(ctx, "End flattenPCI") + return res +} diff --git a/internal/service/cloudapi/kvmvm/flattens/flatten_data_source_compute_list.go b/internal/service/cloudapi/kvmvm/flattens/flatten_data_source_compute_list.go index c094d11..a895034 100644 --- a/internal/service/cloudapi/kvmvm/flattens/flatten_data_source_compute_list.go +++ b/internal/service/cloudapi/kvmvm/flattens/flatten_data_source_compute_list.go @@ -75,6 +75,7 @@ func flattenItemsList(ctx context.Context, computes *compute.ListComputes) []mod BootDiskSize: types.Int64Value(int64(item.BootDiskSize)), CdImageId: types.Int64Value(int64(item.CdImageId)), CloneReference: types.Int64Value(int64(item.CloneReference)), + Chipset: types.StringValue(item.Chipset), ComputeCIID: types.Int64Value(int64(item.ComputeCIID)), CPU: types.Int64Value(int64(item.CPU)), CPUPin: types.BoolValue(item.CPUPin), @@ -195,8 +196,9 @@ func flattenDisksInList(ctx context.Context, disks *compute.ListInfoDisks) []mod res := make([]models.DiskInListModel, 0, len(*disks)) for _, item := range *disks { temp := models.DiskInListModel{ - DiskId: types.Int64Value(int64(item.ID)), - PCISlot: types.Int64Value(item.PCISlot), + BusNumber: types.Int64Value(int64(item.BusNumber)), + DiskId: types.Int64Value(int64(item.ID)), + PCISlot: types.Int64Value(item.PCISlot), } res = append(res, temp) } @@ -213,6 +215,7 @@ func flattenInterfaceInList(ctx context.Context, interfaces *compute.ListInterfa res := make([]models.ItemVNFInterfaceInListModel, 0, len(*interfaces)) for _, item := range *interfaces { temp := models.ItemVNFInterfaceInListModel{ + BusNumber: types.Int64Value(int64(item.BusNumber)), ConnID: types.Int64Value(int64(item.ConnID)), ConnType: types.StringValue(item.ConnType), DefGW: types.StringValue(item.DefGW), @@ -220,14 +223,23 @@ func flattenInterfaceInList(ctx context.Context, interfaces *compute.ListInterfa FLIPGroupID: types.Int64Value(int64(item.FLIPGroupID)), GUID: types.StringValue(item.GUID), IPAddress: types.StringValue(item.IPAddress), - ListenSSH: types.BoolValue(item.ListenSSH), - MAC: types.StringValue(item.MAC), - Name: types.StringValue(item.Name), - NetID: types.Int64Value(int64(item.NetID)), - NetMask: types.Int64Value(int64(item.NetMask)), - NetType: types.StringValue(item.NetType), - NodeID: types.Int64Value(int64(item.NodeID)), - PCISlot: types.Int64Value(item.PCISlot), + LibvirtSettings: &models.LibvirtModel{ + GUID: types.StringValue(item.LibvirtSettings.GUID), + TXMode: types.StringValue(item.LibvirtSettings.TXMode), + IOEventFD: types.StringValue(item.LibvirtSettings.IOEventFD), + EventIDx: types.StringValue(item.LibvirtSettings.EventIDx), + Queues: types.Int64Value(int64(item.LibvirtSettings.Queues)), + RXQueueSize: types.Int64Value(int64(item.LibvirtSettings.RXQueueSize)), + TXQueueSize: types.Int64Value(int64(item.LibvirtSettings.TXQueueSize)), + }, + ListenSSH: types.BoolValue(item.ListenSSH), + MAC: types.StringValue(item.MAC), + Name: types.StringValue(item.Name), + NetID: types.Int64Value(int64(item.NetID)), + NetMask: types.Int64Value(int64(item.NetMask)), + NetType: types.StringValue(item.NetType), + NodeID: types.Int64Value(int64(item.NodeID)), + PCISlot: types.Int64Value(item.PCISlot), QOS: &models.QOSInListModel{ ERate: types.Int64Value(int64(item.QOS.ERate)), GUID: types.StringValue(item.QOS.GUID), diff --git a/internal/service/cloudapi/kvmvm/flattens/flatten_data_source_compute_list_deleted.go b/internal/service/cloudapi/kvmvm/flattens/flatten_data_source_compute_list_deleted.go index 4a81719..fbcaaa4 100644 --- a/internal/service/cloudapi/kvmvm/flattens/flatten_data_source_compute_list_deleted.go +++ b/internal/service/cloudapi/kvmvm/flattens/flatten_data_source_compute_list_deleted.go @@ -72,6 +72,7 @@ func flattenItemsListDeleted(ctx context.Context, computes *compute.ListComputes Architecture: types.StringValue(item.Architecture), BootDiskSize: types.Int64Value(int64(item.BootDiskSize)), CdImageId: types.Int64Value(int64(item.CdImageId)), + Chipset: types.StringValue(item.Chipset), CloneReference: types.Int64Value(int64(item.CloneReference)), ComputeCIID: types.Int64Value(int64(item.ComputeCIID)), CPU: types.Int64Value(int64(item.CPU)), @@ -193,8 +194,9 @@ func flattenDisksInListDeleted(ctx context.Context, disks *compute.ListInfoDisks res := make([]models.DiskInListDeletedModel, 0, len(*disks)) for _, item := range *disks { temp := models.DiskInListDeletedModel{ - DiskId: types.Int64Value(int64(item.ID)), - PCISlot: types.Int64Value(item.PCISlot), + BusNumber: types.Int64Value(int64(item.BusNumber)), + DiskId: types.Int64Value(int64(item.ID)), + PCISlot: types.Int64Value(item.PCISlot), } res = append(res, temp) } @@ -211,6 +213,7 @@ func flattenInterfaceInListDeleted(ctx context.Context, interfaces *compute.List res := make([]models.ItemVNFInterfaceInListDeletedModel, 0, len(*interfaces)) for _, item := range *interfaces { temp := models.ItemVNFInterfaceInListDeletedModel{ + BusNumber: types.Int64Value(int64(item.BusNumber)), ConnID: types.Int64Value(int64(item.ConnID)), ConnType: types.StringValue(item.ConnType), DefGW: types.StringValue(item.DefGW), @@ -218,14 +221,23 @@ func flattenInterfaceInListDeleted(ctx context.Context, interfaces *compute.List FLIPGroupID: types.Int64Value(int64(item.FLIPGroupID)), GUID: types.StringValue(item.GUID), IPAddress: types.StringValue(item.IPAddress), - ListenSSH: types.BoolValue(item.ListenSSH), - MAC: types.StringValue(item.MAC), - Name: types.StringValue(item.Name), - NetID: types.Int64Value(int64(item.NetID)), - NetMask: types.Int64Value(int64(item.NetMask)), - NetType: types.StringValue(item.NetType), - NodeID: types.Int64Value(int64(item.NodeID)), - PCISlot: types.Int64Value(item.PCISlot), + LibvirtSettings: &models.LibvirtModel{ + GUID: types.StringValue(item.LibvirtSettings.GUID), + TXMode: types.StringValue(item.LibvirtSettings.TXMode), + IOEventFD: types.StringValue(item.LibvirtSettings.IOEventFD), + EventIDx: types.StringValue(item.LibvirtSettings.EventIDx), + Queues: types.Int64Value(int64(item.LibvirtSettings.Queues)), + RXQueueSize: types.Int64Value(int64(item.LibvirtSettings.RXQueueSize)), + TXQueueSize: types.Int64Value(int64(item.LibvirtSettings.TXQueueSize)), + }, + ListenSSH: types.BoolValue(item.ListenSSH), + MAC: types.StringValue(item.MAC), + Name: types.StringValue(item.Name), + NetID: types.Int64Value(int64(item.NetID)), + NetMask: types.Int64Value(int64(item.NetMask)), + NetType: types.StringValue(item.NetType), + NodeID: types.Int64Value(int64(item.NodeID)), + PCISlot: types.Int64Value(item.PCISlot), QOS: &models.QOSInListModel{ ERate: types.Int64Value(int64(item.QOS.ERate)), GUID: types.StringValue(item.QOS.GUID), diff --git a/internal/service/cloudapi/kvmvm/flattens/flatten_resource_compute.go b/internal/service/cloudapi/kvmvm/flattens/flatten_resource_compute.go index 4a92fbc..743f303 100644 --- a/internal/service/cloudapi/kvmvm/flattens/flatten_resource_compute.go +++ b/internal/service/cloudapi/kvmvm/flattens/flatten_resource_compute.go @@ -49,15 +49,17 @@ func ComputeResource(ctx context.Context, plan *models.ResourceComputeModel, c * AffinityRules: plan.AffinityRules, AntiAffinityRules: plan.AntiAffinityRules, CustomFields: types.StringValue(string(customFields)), + Chipset: types.StringValue(recordItemCompute.Chipset), Stateless: plan.Stateless, SepId: types.Int64Value(int64(bootdisk.SepID)), Pool: types.StringValue(bootdisk.Pool), ExtraDisks: plan.ExtraDisks, - Network: flattenNetwork(ctx, &recordItemCompute.Interfaces), + Network: flattenNetwork(ctx, plan.Network, &recordItemCompute.Interfaces), Tags: plan.Tags, PortForwarding: plan.PortForwarding, UserAccess: plan.UserAccess, Snapshot: plan.Snapshot, + PCIDevices: plan.PCIDevices, Rollback: plan.Rollback, CD: plan.CD, PinToStack: plan.PinToStack, @@ -205,6 +207,7 @@ func flattenDisk(ctx context.Context, disk *compute.ItemComputeDisk) types.Objec ACL: types.StringValue(string(acl)), AccountID: types.Int64Value(int64(disk.AccountID)), BootPartition: types.Int64Value(int64(disk.BootPartition)), + BusNumber: types.Int64Value(int64(disk.BusNumber)), CreatedTime: types.Int64Value(int64(disk.CreatedTime)), DeletedTime: types.Int64Value(int64(disk.DeletedTime)), Description: types.StringValue(disk.Description), @@ -385,25 +388,28 @@ func flattenResourceInterfaces(ctx context.Context, interfaces *compute.ListInte for _, item := range *interfaces { temp := models.ItemResourceInterfacesModel{ - ConnID: types.Int64Value(int64(item.ConnID)), - ConnType: types.StringValue(item.ConnType), - GetGW: types.StringValue(item.DefGW), - Enabled: types.BoolValue(item.Enabled), - FLIPGroupID: types.Int64Value(int64(item.FLIPGroupID)), - GUID: types.StringValue(item.GUID), - IPAddress: types.StringValue(item.IPAddress), - ListenSSH: types.BoolValue(item.ListenSSH), - MAC: types.StringValue(item.MAC), - Name: types.StringValue(item.Name), - NetID: types.Int64Value(int64(item.NetID)), - NetMask: types.Int64Value(int64(item.NetMask)), - NetType: types.StringValue(item.NetType), - NodeID: types.Int64Value(int64(item.NodeID)), - PCISlot: types.Int64Value(item.PCISlot), - QOS: flattenQOS(ctx, &item.QOS), - Target: types.StringValue(item.Target), - Type: types.StringValue(item.Type), - VNFs: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, item.VNFs), + BusNumber: types.Int64Value(int64(item.BusNumber)), + ConnID: types.Int64Value(int64(item.ConnID)), + ConnType: types.StringValue(item.ConnType), + GetGW: types.StringValue(item.DefGW), + Enabled: types.BoolValue(item.Enabled), + FLIPGroupID: types.Int64Value(int64(item.FLIPGroupID)), + GUID: types.StringValue(item.GUID), + IPAddress: types.StringValue(item.IPAddress), + ListenSSH: types.BoolValue(item.ListenSSH), + MAC: types.StringValue(item.MAC), + MTU: types.Int64Value(int64(item.MTU)), + Name: types.StringValue(item.Name), + NetID: types.Int64Value(int64(item.NetID)), + NetMask: types.Int64Value(int64(item.NetMask)), + NetType: types.StringValue(item.NetType), + NodeID: types.Int64Value(int64(item.NodeID)), + PCISlot: types.Int64Value(item.PCISlot), + QOS: flattenQOS(ctx, &item.QOS), + LibvirtSettings: flattenLibvirtSetttings(ctx, &item.LibvirtSettings), + Target: types.StringValue(item.Target), + Type: types.StringValue(item.Type), + VNFs: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, item.VNFs), } obj, err := types.ObjectValueFrom(ctx, models.ItemInterfaces, temp) if err != nil { @@ -439,6 +445,27 @@ func flattenQOS(ctx context.Context, QOS *compute.QOS) types.Object { return res } +func flattenLibvirtSetttings(ctx context.Context, settings *compute.LibvirtSettings) types.Object { + tflog.Info(ctx, "Start flattenLibvirtSetttings") + temp := models.LibvirtModel{ + GUID: types.StringValue(settings.GUID), + TXMode: types.StringValue(settings.TXMode), + IOEventFD: types.StringValue(settings.IOEventFD), + EventIDx: types.StringValue(settings.EventIDx), + Queues: types.Int64Value(int64(settings.Queues)), + RXQueueSize: types.Int64Value(int64(settings.RXQueueSize)), + TXQueueSize: types.Int64Value(int64(settings.TXQueueSize)), + } + + res, err := types.ObjectValueFrom(ctx, models.ItemLibvirtSettings, temp) + if err != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenLibvirtSetttings struct to obj", err)) + } + + tflog.Info(ctx, "End flattenLibvirtSetttings") + return res +} + func flattenSnapSets(ctx context.Context, snapSets *compute.ListSnapSets) types.List { tflog.Info(ctx, "Start flattenSnapSets") tempSlice := make([]types.Object, 0, len(*snapSets)) @@ -465,7 +492,7 @@ func flattenSnapSets(ctx context.Context, snapSets *compute.ListSnapSets) types. return res } -func flattenNetwork(ctx context.Context, interfaces *compute.ListInterfaces) types.Set { +func flattenNetwork(ctx context.Context, networks types.Set, interfaces *compute.ListInterfaces) types.Set { tflog.Info(ctx, "Start flattenNetwork") tempSlice := make([]types.Object, 0, len(*interfaces)) @@ -476,6 +503,8 @@ func flattenNetwork(ctx context.Context, interfaces *compute.ListInterfaces) typ NetId: types.Int64Value(int64(item.NetID)), IpAddress: types.StringValue(item.IPAddress), Mac: types.StringValue(item.MAC), + Weight: flattenNetworkWeight(ctx, networks, item), + MTU: types.Int64Value(int64(item.MTU)), } obj, err := types.ObjectValueFrom(ctx, models.ItemNetwork, temp) if err != nil { @@ -492,3 +521,16 @@ func flattenNetwork(ctx context.Context, interfaces *compute.ListInterfaces) typ tflog.Info(ctx, "End flattenNetwork") return res } + +func flattenNetworkWeight(ctx context.Context, networks types.Set, item compute.ItemVNFInterface) types.Int64 { + tflog.Info(ctx, "Start flattenNetworkWeight") + networkList := networks.Elements() + for _, network := range networkList { + networkMap := network.(types.Object).Attributes() + if uint64(networkMap["net_id"].(types.Int64).ValueInt64()) == item.NetID && networkMap["net_type"].(types.String).ValueString() == item.NetType { + return types.Int64Value(networkMap["weight"].(types.Int64).ValueInt64()) + } + } + tflog.Info(ctx, "End flattenNetworkWeight") + return types.Int64Value(0) +} diff --git a/internal/service/cloudapi/kvmvm/input_check.go b/internal/service/cloudapi/kvmvm/input_check.go index 45b6d84..cf847c3 100644 --- a/internal/service/cloudapi/kvmvm/input_check.go +++ b/internal/service/cloudapi/kvmvm/input_check.go @@ -59,14 +59,17 @@ func resourceComputeInputChecks(ctx context.Context, plan *models.ResourceComput diags.AddError(fmt.Sprintf("Cannot create compute because extnet ID %d is not allowed or does not exist", extNetId), err.Error()) } case "VFNIC": - if strings.EqualFold(plan.Driver.ValueString(), "KVM_PPC") { - diags.AddError("can't create compute because 'VFNIC' net_type is not allowed for driver 'KVM_PPC'", "") - } vfpoolId := uint64(elemMap["net_id"].(types.Int64).ValueInt64()) err = ic.ExistVFPool(ctx, vfpoolId, c) if err != nil { diags.AddError(fmt.Sprintf("Cannot create compute because vfpool ID %d is not allowed or does not exist", vfpoolId), err.Error()) } + case "DPDK": + dpdkId := uint64(elemMap["net_id"].(types.Int64).ValueInt64()) + err = ic.ExistDPDK(ctx, dpdkId, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot create compute because DPDK net ID %d is not allowed or does not exist", dpdkId), err.Error()) + } } } } diff --git a/internal/service/cloudapi/kvmvm/models/model_data_source_compute.go b/internal/service/cloudapi/kvmvm/models/model_data_source_compute.go index 031b9b5..89ccee6 100644 --- a/internal/service/cloudapi/kvmvm/models/model_data_source_compute.go +++ b/internal/service/cloudapi/kvmvm/models/model_data_source_compute.go @@ -19,6 +19,7 @@ type RecordComputeModel struct { AffinityWeight types.Int64 `tfsdk:"affinity_weight"` AntiAffinityRules []ItemRuleModel `tfsdk:"anti_affinity_rules"` Architecture types.String `tfsdk:"arch"` + Chipset types.String `tfsdk:"chipset"` BootOrder types.List `tfsdk:"boot_order"` BootDiskSize types.Int64 `tfsdk:"bootdisk_size"` CdImageId types.Int64 `tfsdk:"cd_image_id"` @@ -57,6 +58,7 @@ type RecordComputeModel struct { NumaAffinity types.String `tfsdk:"numa_affinity"` NumaNodeId types.Int64 `tfsdk:"numa_node_id"` OSUsers []ItemOSUserModel `tfsdk:"os_users"` + PCIDevices types.List `tfsdk:"pci_devices"` Pinned types.Bool `tfsdk:"pinned"` RAM types.Int64 `tfsdk:"ram"` ReferenceID types.String `tfsdk:"reference_id"` @@ -109,6 +111,7 @@ type ItemDiskModel struct { ACL types.String `tfsdk:"acl"` AccountID types.Int64 `tfsdk:"account_id"` BootPartition types.Int64 `tfsdk:"boot_partition"` + BusNumber types.Int64 `tfsdk:"bus_number"` CreatedTime types.Int64 `tfsdk:"created_time"` DeletedTime types.Int64 `tfsdk:"deleted_time"` Description types.String `tfsdk:"desc"` @@ -149,25 +152,28 @@ type ItemDiskModel struct { } type ItemInterfaceModel struct { - ConnID types.Int64 `tfsdk:"conn_id"` - ConnType types.String `tfsdk:"conn_type"` - DefGW types.String `tfsdk:"def_gw"` - Enabled types.Bool `tfsdk:"enabled"` - FLIPGroupID types.Int64 `tfsdk:"flip_group_id"` - GUID types.String `tfsdk:"guid"` - IPAddress types.String `tfsdk:"ip_address"` - ListenSSH types.Bool `tfsdk:"listen_ssh"` - MAC types.String `tfsdk:"mac"` - Name types.String `tfsdk:"name"` - NetID types.Int64 `tfsdk:"net_id"` - NetMask types.Int64 `tfsdk:"netmask"` - NetType types.String `tfsdk:"net_type"` - NodeID types.Int64 `tfsdk:"node_id"` - PCISlot types.Int64 `tfsdk:"pci_slot"` - QOS *QOSModel `tfsdk:"qos"` - Target types.String `tfsdk:"target"` - Type types.String `tfsdk:"type"` - VNFs types.List `tfsdk:"vnfs"` + BusNumber types.Int64 `tfsdk:"bus_number"` + ConnID types.Int64 `tfsdk:"conn_id"` + ConnType types.String `tfsdk:"conn_type"` + DefGW types.String `tfsdk:"def_gw"` + Enabled types.Bool `tfsdk:"enabled"` + FLIPGroupID types.Int64 `tfsdk:"flip_group_id"` + GUID types.String `tfsdk:"guid"` + IPAddress types.String `tfsdk:"ip_address"` + LibvirtSettings *LibvirtModel `tfsdk:"libvirt_settings"` + ListenSSH types.Bool `tfsdk:"listen_ssh"` + MAC types.String `tfsdk:"mac"` + MTU types.Int64 `tfsdk:"mtu"` + Name types.String `tfsdk:"name"` + NetID types.Int64 `tfsdk:"net_id"` + NetMask types.Int64 `tfsdk:"netmask"` + NetType types.String `tfsdk:"net_type"` + NodeID types.Int64 `tfsdk:"node_id"` + PCISlot types.Int64 `tfsdk:"pci_slot"` + QOS *QOSModel `tfsdk:"qos"` + Target types.String `tfsdk:"target"` + Type types.String `tfsdk:"type"` + VNFs types.List `tfsdk:"vnfs"` } type QOSModel struct { @@ -177,6 +183,16 @@ type QOSModel struct { InRate types.Int64 `tfsdk:"in_rate"` } +type LibvirtModel struct { + GUID types.String `tfsdk:"guid"` + TXMode types.String `tfsdk:"txmode"` + IOEventFD types.String `tfsdk:"ioeventfd"` + EventIDx types.String `tfsdk:"event_idx"` + Queues types.Int64 `tfsdk:"queues"` + RXQueueSize types.Int64 `tfsdk:"rx_queue_size"` + TXQueueSize types.Int64 `tfsdk:"tx_queue_size"` +} + type ItemSnapSetModel struct { Disks types.List `tfsdk:"disks"` GUID types.String `tfsdk:"guid"` diff --git a/internal/service/cloudapi/kvmvm/models/model_data_source_compute_list.go b/internal/service/cloudapi/kvmvm/models/model_data_source_compute_list.go index e9ff9fe..6aa8138 100644 --- a/internal/service/cloudapi/kvmvm/models/model_data_source_compute_list.go +++ b/internal/service/cloudapi/kvmvm/models/model_data_source_compute_list.go @@ -43,6 +43,7 @@ type ItemComputeModel struct { CdImageId types.Int64 `tfsdk:"cd_image_id"` CloneReference types.Int64 `tfsdk:"clone_reference"` Clones types.List `tfsdk:"clones"` + Chipset types.String `tfsdk:"chipset"` ComputeCIID types.Int64 `tfsdk:"computeci_id"` CPU types.Int64 `tfsdk:"cpus"` CPUPin types.Bool `tfsdk:"cpu_pin"` @@ -112,30 +113,33 @@ type ItemRuleInListModel struct { } type DiskInListModel struct { - DiskId types.Int64 `tfsdk:"disk_id"` - PCISlot types.Int64 `tfsdk:"pci_slot"` + BusNumber types.Int64 `tfsdk:"bus_number"` + DiskId types.Int64 `tfsdk:"disk_id"` + PCISlot types.Int64 `tfsdk:"pci_slot"` } type ItemVNFInterfaceInListModel struct { - ConnID types.Int64 `tfsdk:"conn_id"` - ConnType types.String `tfsdk:"conn_type"` - DefGW types.String `tfsdk:"def_gw"` - Enabled types.Bool `tfsdk:"enabled"` - FLIPGroupID types.Int64 `tfsdk:"flip_group_id"` - GUID types.String `tfsdk:"guid"` - IPAddress types.String `tfsdk:"ip_address"` - ListenSSH types.Bool `tfsdk:"listen_ssh"` - MAC types.String `tfsdk:"mac"` - Name types.String `tfsdk:"name"` - NetID types.Int64 `tfsdk:"net_id"` - NetMask types.Int64 `tfsdk:"netmask"` - NetType types.String `tfsdk:"net_type"` - NodeID types.Int64 `tfsdk:"node_id"` - PCISlot types.Int64 `tfsdk:"pci_slot"` - QOS *QOSInListModel `tfsdk:"qos"` - Target types.String `tfsdk:"target"` - Type types.String `tfsdk:"type"` - VNFs types.List `tfsdk:"vnfs"` + BusNumber types.Int64 `tfsdk:"bus_number"` + ConnID types.Int64 `tfsdk:"conn_id"` + ConnType types.String `tfsdk:"conn_type"` + DefGW types.String `tfsdk:"def_gw"` + Enabled types.Bool `tfsdk:"enabled"` + FLIPGroupID types.Int64 `tfsdk:"flip_group_id"` + GUID types.String `tfsdk:"guid"` + IPAddress types.String `tfsdk:"ip_address"` + LibvirtSettings *LibvirtModel `tfsdk:"libvirt_settings"` + ListenSSH types.Bool `tfsdk:"listen_ssh"` + MAC types.String `tfsdk:"mac"` + Name types.String `tfsdk:"name"` + NetID types.Int64 `tfsdk:"net_id"` + NetMask types.Int64 `tfsdk:"netmask"` + NetType types.String `tfsdk:"net_type"` + NodeID types.Int64 `tfsdk:"node_id"` + PCISlot types.Int64 `tfsdk:"pci_slot"` + QOS *QOSInListModel `tfsdk:"qos"` + Target types.String `tfsdk:"target"` + Type types.String `tfsdk:"type"` + VNFs types.List `tfsdk:"vnfs"` } type QOSInListModel struct { diff --git a/internal/service/cloudapi/kvmvm/models/model_data_source_compute_list_deleted.go b/internal/service/cloudapi/kvmvm/models/model_data_source_compute_list_deleted.go index 8263cb3..55fcc96 100644 --- a/internal/service/cloudapi/kvmvm/models/model_data_source_compute_list_deleted.go +++ b/internal/service/cloudapi/kvmvm/models/model_data_source_compute_list_deleted.go @@ -39,6 +39,7 @@ type ItemListDeletedComputeModel struct { BootOrder types.List `tfsdk:"boot_order"` BootDiskSize types.Int64 `tfsdk:"bootdisk_size"` CdImageId types.Int64 `tfsdk:"cd_image_id"` + Chipset types.String `tfsdk:"chipset"` CloneReference types.Int64 `tfsdk:"clone_reference"` Clones types.List `tfsdk:"clones"` ComputeCIID types.Int64 `tfsdk:"computeci_id"` @@ -110,30 +111,33 @@ type ItemRuleInListDeletedModel struct { } type DiskInListDeletedModel struct { - DiskId types.Int64 `tfsdk:"disk_id"` - PCISlot types.Int64 `tfsdk:"pci_slot"` + BusNumber types.Int64 `tfsdk:"bus_number"` + DiskId types.Int64 `tfsdk:"disk_id"` + PCISlot types.Int64 `tfsdk:"pci_slot"` } type ItemVNFInterfaceInListDeletedModel struct { - ConnID types.Int64 `tfsdk:"conn_id"` - ConnType types.String `tfsdk:"conn_type"` - DefGW types.String `tfsdk:"def_gw"` - Enabled types.Bool `tfsdk:"enabled"` - FLIPGroupID types.Int64 `tfsdk:"flip_group_id"` - GUID types.String `tfsdk:"guid"` - IPAddress types.String `tfsdk:"ip_address"` - ListenSSH types.Bool `tfsdk:"listen_ssh"` - MAC types.String `tfsdk:"mac"` - Name types.String `tfsdk:"name"` - NetID types.Int64 `tfsdk:"net_id"` - NetMask types.Int64 `tfsdk:"netmask"` - NetType types.String `tfsdk:"net_type"` - NodeID types.Int64 `tfsdk:"node_id"` - PCISlot types.Int64 `tfsdk:"pci_slot"` - QOS *QOSInListModel `tfsdk:"qos"` - Target types.String `tfsdk:"target"` - Type types.String `tfsdk:"type"` - VNFs types.List `tfsdk:"vnfs"` + BusNumber types.Int64 `tfsdk:"bus_number"` + ConnID types.Int64 `tfsdk:"conn_id"` + ConnType types.String `tfsdk:"conn_type"` + DefGW types.String `tfsdk:"def_gw"` + Enabled types.Bool `tfsdk:"enabled"` + FLIPGroupID types.Int64 `tfsdk:"flip_group_id"` + GUID types.String `tfsdk:"guid"` + IPAddress types.String `tfsdk:"ip_address"` + LibvirtSettings *LibvirtModel `tfsdk:"libvirt_settings"` + ListenSSH types.Bool `tfsdk:"listen_ssh"` + MAC types.String `tfsdk:"mac"` + Name types.String `tfsdk:"name"` + NetID types.Int64 `tfsdk:"net_id"` + NetMask types.Int64 `tfsdk:"netmask"` + NetType types.String `tfsdk:"net_type"` + NodeID types.Int64 `tfsdk:"node_id"` + PCISlot types.Int64 `tfsdk:"pci_slot"` + QOS *QOSInListModel `tfsdk:"qos"` + Target types.String `tfsdk:"target"` + Type types.String `tfsdk:"type"` + VNFs types.List `tfsdk:"vnfs"` } type QOSInListDeletedModel struct { diff --git a/internal/service/cloudapi/kvmvm/models/model_resource_compute.go b/internal/service/cloudapi/kvmvm/models/model_resource_compute.go index 454093a..75bfbaa 100644 --- a/internal/service/cloudapi/kvmvm/models/model_resource_compute.go +++ b/internal/service/cloudapi/kvmvm/models/model_resource_compute.go @@ -23,6 +23,7 @@ type ResourceComputeModel struct { AffinityRules types.Set `tfsdk:"affinity_rules"` AntiAffinityRules types.Set `tfsdk:"anti_affinity_rules"` CustomFields types.String `tfsdk:"custom_fields"` + Chipset types.String `tfsdk:"chipset"` Stateless types.Bool `tfsdk:"stateless"` SepId types.Int64 `tfsdk:"sep_id"` Pool types.String `tfsdk:"pool"` @@ -32,6 +33,7 @@ type ResourceComputeModel struct { PortForwarding types.Set `tfsdk:"port_forwarding"` UserAccess types.Set `tfsdk:"user_access"` Snapshot types.Set `tfsdk:"snapshot"` + PCIDevices types.Set `tfsdk:"pci_devices"` Rollback types.Object `tfsdk:"rollback"` CD types.Object `tfsdk:"cd"` PinToStack types.Bool `tfsdk:"pin_to_stack"` @@ -124,6 +126,7 @@ type ItemResourceDiskModel struct { ACL types.String `tfsdk:"acl"` AccountID types.Int64 `tfsdk:"account_id"` BootPartition types.Int64 `tfsdk:"boot_partition"` + BusNumber types.Int64 `tfsdk:"bus_number"` CreatedTime types.Int64 `tfsdk:"created_time"` DeletedTime types.Int64 `tfsdk:"deleted_time"` Description types.String `tfsdk:"desc"` @@ -175,28 +178,33 @@ type ItemNetworkModel struct { NetId types.Int64 `tfsdk:"net_id"` IpAddress types.String `tfsdk:"ip_address"` Mac types.String `tfsdk:"mac"` + Weight types.Int64 `tfsdk:"weight"` + MTU types.Int64 `tfsdk:"mtu"` } type ItemResourceInterfacesModel struct { - ConnID types.Int64 `tfsdk:"conn_id"` - ConnType types.String `tfsdk:"conn_type"` - GetGW types.String `tfsdk:"get_gw"` - Enabled types.Bool `tfsdk:"enabled"` - FLIPGroupID types.Int64 `tfsdk:"flip_group_id"` - GUID types.String `tfsdk:"guid"` - IPAddress types.String `tfsdk:"ip_address"` - ListenSSH types.Bool `tfsdk:"listen_ssh"` - MAC types.String `tfsdk:"mac"` - Name types.String `tfsdk:"name"` - NetID types.Int64 `tfsdk:"net_id"` - NetMask types.Int64 `tfsdk:"netmask"` - NetType types.String `tfsdk:"net_type"` - NodeID types.Int64 `tfsdk:"node_id"` - PCISlot types.Int64 `tfsdk:"pci_slot"` - QOS types.Object `tfsdk:"qos"` - Target types.String `tfsdk:"target"` - Type types.String `tfsdk:"type"` - VNFs types.List `tfsdk:"vnfs"` + BusNumber types.Int64 `tfsdk:"bus_number"` + ConnID types.Int64 `tfsdk:"conn_id"` + ConnType types.String `tfsdk:"conn_type"` + GetGW types.String `tfsdk:"get_gw"` + Enabled types.Bool `tfsdk:"enabled"` + FLIPGroupID types.Int64 `tfsdk:"flip_group_id"` + GUID types.String `tfsdk:"guid"` + IPAddress types.String `tfsdk:"ip_address"` + ListenSSH types.Bool `tfsdk:"listen_ssh"` + MAC types.String `tfsdk:"mac"` + MTU types.Int64 `tfsdk:"mtu"` + Name types.String `tfsdk:"name"` + NetID types.Int64 `tfsdk:"net_id"` + NetMask types.Int64 `tfsdk:"netmask"` + NetType types.String `tfsdk:"net_type"` + NodeID types.Int64 `tfsdk:"node_id"` + PCISlot types.Int64 `tfsdk:"pci_slot"` + QOS types.Object `tfsdk:"qos"` + LibvirtSettings types.Object `tfsdk:"libvirt_settings"` + Target types.String `tfsdk:"target"` + Type types.String `tfsdk:"type"` + VNFs types.List `tfsdk:"vnfs"` } type ItemResourceRulesModel struct { @@ -212,6 +220,8 @@ var ItemNetwork = map[string]attr.Type{ "net_id": types.Int64Type, "ip_address": types.StringType, "mac": types.StringType, + "weight": types.Int64Type, + "mtu": types.Int64Type, } var ItemDisk = map[string]attr.Type{ @@ -219,6 +229,7 @@ var ItemDisk = map[string]attr.Type{ "acl": types.StringType, "account_id": types.Int64Type, "boot_partition": types.Int64Type, + "bus_number": types.Int64Type, "created_time": types.Int64Type, "deleted_time": types.Int64Type, "desc": types.StringType, @@ -293,25 +304,28 @@ var ItemACL = map[string]attr.Type{ } var ItemInterfaces = map[string]attr.Type{ - "conn_id": types.Int64Type, - "conn_type": types.StringType, - "get_gw": types.StringType, - "enabled": types.BoolType, - "flip_group_id": types.Int64Type, - "guid": types.StringType, - "ip_address": types.StringType, - "listen_ssh": types.BoolType, - "mac": types.StringType, - "name": types.StringType, - "net_id": types.Int64Type, - "netmask": types.Int64Type, - "net_type": types.StringType, - "node_id": types.Int64Type, - "pci_slot": types.Int64Type, - "qos": types.ObjectType{AttrTypes: ItemQos}, - "target": types.StringType, - "type": types.StringType, - "vnfs": types.ListType{ElemType: types.Int64Type}, + "bus_number": types.Int64Type, + "conn_id": types.Int64Type, + "conn_type": types.StringType, + "get_gw": types.StringType, + "enabled": types.BoolType, + "flip_group_id": types.Int64Type, + "guid": types.StringType, + "ip_address": types.StringType, + "listen_ssh": types.BoolType, + "mac": types.StringType, + "mtu": types.Int64Type, + "name": types.StringType, + "net_id": types.Int64Type, + "netmask": types.Int64Type, + "net_type": types.StringType, + "node_id": types.Int64Type, + "pci_slot": types.Int64Type, + "qos": types.ObjectType{AttrTypes: ItemQos}, + "libvirt_settings": types.ObjectType{AttrTypes: ItemLibvirtSettings}, + "target": types.StringType, + "type": types.StringType, + "vnfs": types.ListType{ElemType: types.Int64Type}, } var ItemQos = map[string]attr.Type{ @@ -321,6 +335,16 @@ var ItemQos = map[string]attr.Type{ "in_rate": types.Int64Type, } +var ItemLibvirtSettings = map[string]attr.Type{ + "guid": types.StringType, + "txmode": types.StringType, + "ioeventfd": types.StringType, + "event_idx": types.StringType, + "queues": types.Int64Type, + "rx_queue_size": types.Int64Type, + "tx_queue_size": types.Int64Type, +} + var ItemOSUsers = map[string]attr.Type{ "guid": types.StringType, "login": types.StringType, diff --git a/internal/service/cloudapi/kvmvm/resource_compute.go b/internal/service/cloudapi/kvmvm/resource_compute.go index f68c49e..7ef118c 100644 --- a/internal/service/cloudapi/kvmvm/resource_compute.go +++ b/internal/service/cloudapi/kvmvm/resource_compute.go @@ -142,6 +142,11 @@ func (r *resourceCompute) Create(ctx context.Context, req resource.CreateRequest resp.Diagnostics.Append(utilities.ComputeResourceCDInsert(ctx, &plan, r.client)...) } + // attach PCI devices to compute if needed, warnings added to resp.Diagnostics in case of failure. + if !plan.PCIDevices.IsNull() { + resp.Diagnostics.Append(utilities.ComputeResourcePCIDevice(ctx, &plan, r.client)...) + } + // pin to stack if needed, warnings added to resp.Diagnostics in case of failure. if !plan.PinToStack.IsNull() && plan.PinToStack.ValueBool() { resp.Diagnostics.Append(utilities.ComputeResourcePinToStack(ctx, &plan, r.client)...) @@ -319,9 +324,18 @@ func (r *resourceCompute) Update(ctx context.Context, req resource.UpdateRequest } } + // PCI device(s) update if needed + if !plan.PCIDevices.Equal(state.PCIDevices) { + resp.Diagnostics.Append(utilities.ComputeResourcePCIDeviceUpdate(ctx, &state, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceCompute: Error update PCI device(s) list") + return + } + } + // Compute parameters update if needed if (!plan.Description.IsUnknown() && !plan.Description.Equal(state.Description)) || !plan.Name.Equal(state.Name) || - !plan.NumaAffinity.Equal(state.NumaAffinity) || !plan.CPUPin.Equal(state.CPUPin) || !plan.HPBacked.Equal(state.HPBacked) { + !plan.NumaAffinity.Equal(state.NumaAffinity) || !plan.CPUPin.Equal(state.CPUPin) || !plan.HPBacked.Equal(state.HPBacked) || (!plan.Chipset.IsUnknown() && !plan.Chipset.Equal(state.Chipset)) { resp.Diagnostics.Append(utilities.ComputeResourceComputeUpdate(ctx, &state, &plan, r.client)...) if resp.Diagnostics.HasError() { tflog.Error(ctx, "Update resourceCompute: Error update compute parameters") @@ -494,25 +508,11 @@ func (r *resourceCompute) Delete(ctx context.Context, req resource.DeleteRequest ctx, cancel := context.WithTimeout(ctx, readTimeout) defer cancel() - var permanently bool - if state.Permanently.IsNull() { - permanently = true - } else { - permanently = state.Permanently.ValueBool() - } - - var detach bool - if state.DetachDisks.IsNull() { - detach = true - } else { - detach = state.DetachDisks.ValueBool() - } - // Delete existing Compute delReq := compute.DeleteRequest{ ComputeID: uint64(state.ComputeId.ValueInt64()), - Permanently: permanently, - DetachDisks: detach, + Permanently: state.Permanently.ValueBool(), + DetachDisks: state.DetachDisks.ValueBool(), } tflog.Info(ctx, "Delete resourceCompute: calling CloudAPI().Compute().Delete", map[string]any{ diff --git a/internal/service/cloudapi/kvmvm/schemas/schema_data_source_compute.go b/internal/service/cloudapi/kvmvm/schemas/schema_data_source_compute.go index f6dd0b4..4791bfa 100644 --- a/internal/service/cloudapi/kvmvm/schemas/schema_data_source_compute.go +++ b/internal/service/cloudapi/kvmvm/schemas/schema_data_source_compute.go @@ -160,6 +160,9 @@ func MakeSchemaDataSourceCompute() map[string]schema.Attribute { "arch": schema.StringAttribute{ Computed: true, }, + "chipset": schema.StringAttribute{ + Computed: true, + }, "boot_order": schema.ListAttribute{ Computed: true, ElementType: types.StringType, @@ -223,6 +226,9 @@ func MakeSchemaDataSourceCompute() map[string]schema.Attribute { "boot_partition": schema.Int64Attribute{ Computed: true, }, + "bus_number": schema.Int64Attribute{ + Computed: true, + }, "created_time": schema.Int64Attribute{ Computed: true, }, @@ -447,6 +453,9 @@ func MakeSchemaDataSourceCompute() map[string]schema.Attribute { Computed: true, NestedObject: schema.NestedAttributeObject{ Attributes: map[string]schema.Attribute{ + "bus_number": schema.Int64Attribute{ + Computed: true, + }, "conn_id": schema.Int64Attribute{ Computed: true, }, @@ -468,12 +477,41 @@ func MakeSchemaDataSourceCompute() map[string]schema.Attribute { "ip_address": schema.StringAttribute{ Computed: true, }, + "libvirt_settings": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "guid": schema.StringAttribute{ + Computed: true, + }, + "txmode": schema.StringAttribute{ + Computed: true, + }, + "ioeventfd": schema.StringAttribute{ + Computed: true, + }, + "event_idx": schema.StringAttribute{ + Computed: true, + }, + "queues": schema.Int64Attribute{ + Computed: true, + }, + "rx_queue_size": schema.Int64Attribute{ + Computed: true, + }, + "tx_queue_size": schema.Int64Attribute{ + Computed: true, + }, + }, + }, "listen_ssh": schema.BoolAttribute{ Computed: true, }, "mac": schema.StringAttribute{ Computed: true, }, + "mtu": schema.Int64Attribute{ + Computed: true, + }, "name": schema.StringAttribute{ Computed: true, }, @@ -583,6 +621,10 @@ func MakeSchemaDataSourceCompute() map[string]schema.Attribute { }, }, }, + "pci_devices": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, "pinned": schema.BoolAttribute{ Computed: true, }, diff --git a/internal/service/cloudapi/kvmvm/schemas/schema_data_source_compute_list.go b/internal/service/cloudapi/kvmvm/schemas/schema_data_source_compute_list.go index 9813f26..a7d29e5 100644 --- a/internal/service/cloudapi/kvmvm/schemas/schema_data_source_compute_list.go +++ b/internal/service/cloudapi/kvmvm/schemas/schema_data_source_compute_list.go @@ -166,6 +166,9 @@ func MakeSchemaDataSourceComputeList() map[string]schema.Attribute { "arch": schema.StringAttribute{ Computed: true, }, + "chipset": schema.StringAttribute{ + Computed: true, + }, "boot_order": schema.ListAttribute{ Computed: true, ElementType: types.StringType, @@ -217,6 +220,9 @@ func MakeSchemaDataSourceComputeList() map[string]schema.Attribute { Computed: true, NestedObject: schema.NestedAttributeObject{ Attributes: map[string]schema.Attribute{ + "bus_number": schema.Int64Attribute{ + Computed: true, + }, "disk_id": schema.Int64Attribute{ Computed: true, }, @@ -248,6 +254,9 @@ func MakeSchemaDataSourceComputeList() map[string]schema.Attribute { Computed: true, NestedObject: schema.NestedAttributeObject{ Attributes: map[string]schema.Attribute{ + "bus_number": schema.Int64Attribute{ + Computed: true, + }, "conn_id": schema.Int64Attribute{ Computed: true, }, @@ -310,6 +319,32 @@ func MakeSchemaDataSourceComputeList() map[string]schema.Attribute { }, }, }, + "libvirt_settings": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "guid": schema.StringAttribute{ + Computed: true, + }, + "txmode": schema.StringAttribute{ + Computed: true, + }, + "ioeventfd": schema.StringAttribute{ + Computed: true, + }, + "event_idx": schema.StringAttribute{ + Computed: true, + }, + "queues": schema.Int64Attribute{ + Computed: true, + }, + "rx_queue_size": schema.Int64Attribute{ + Computed: true, + }, + "tx_queue_size": schema.Int64Attribute{ + Computed: true, + }, + }, + }, "target": schema.StringAttribute{ Computed: true, }, diff --git a/internal/service/cloudapi/kvmvm/schemas/schema_data_source_compute_list_deleted.go b/internal/service/cloudapi/kvmvm/schemas/schema_data_source_compute_list_deleted.go index f5ecb1f..623a1f2 100644 --- a/internal/service/cloudapi/kvmvm/schemas/schema_data_source_compute_list_deleted.go +++ b/internal/service/cloudapi/kvmvm/schemas/schema_data_source_compute_list_deleted.go @@ -158,6 +158,9 @@ func MakeSchemaDataSourceComputeListDeleted() map[string]schema.Attribute { "arch": schema.StringAttribute{ Computed: true, }, + "chipset": schema.StringAttribute{ + Computed: true, + }, "boot_order": schema.ListAttribute{ Computed: true, ElementType: types.StringType, @@ -209,6 +212,9 @@ func MakeSchemaDataSourceComputeListDeleted() map[string]schema.Attribute { Computed: true, NestedObject: schema.NestedAttributeObject{ Attributes: map[string]schema.Attribute{ + "bus_number": schema.Int64Attribute{ + Computed: true, + }, "disk_id": schema.Int64Attribute{ Computed: true, }, @@ -240,6 +246,9 @@ func MakeSchemaDataSourceComputeListDeleted() map[string]schema.Attribute { Computed: true, NestedObject: schema.NestedAttributeObject{ Attributes: map[string]schema.Attribute{ + "bus_number": schema.Int64Attribute{ + Computed: true, + }, "conn_id": schema.Int64Attribute{ Computed: true, }, @@ -261,6 +270,32 @@ func MakeSchemaDataSourceComputeListDeleted() map[string]schema.Attribute { "ip_address": schema.StringAttribute{ Computed: true, }, + "libvirt_settings": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "guid": schema.StringAttribute{ + Computed: true, + }, + "txmode": schema.StringAttribute{ + Computed: true, + }, + "ioeventfd": schema.StringAttribute{ + Computed: true, + }, + "event_idx": schema.StringAttribute{ + Computed: true, + }, + "queues": schema.Int64Attribute{ + Computed: true, + }, + "rx_queue_size": schema.Int64Attribute{ + Computed: true, + }, + "tx_queue_size": schema.Int64Attribute{ + Computed: true, + }, + }, + }, "listen_ssh": schema.BoolAttribute{ Computed: true, }, diff --git a/internal/service/cloudapi/kvmvm/schemas/schema_resource_compute.go b/internal/service/cloudapi/kvmvm/schemas/schema_resource_compute.go index 38fdf3f..0fe7092 100644 --- a/internal/service/cloudapi/kvmvm/schemas/schema_resource_compute.go +++ b/internal/service/cloudapi/kvmvm/schemas/schema_resource_compute.go @@ -5,6 +5,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/booldefault" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" @@ -30,7 +31,7 @@ func MakeSchemaResourceCompute() map[string]schema.Attribute { "driver": schema.StringAttribute{ Required: true, Validators: []validator.String{ - stringvalidator.OneOf("SVA_KVM_X86", "KVM_X86", "KVM_PPC"), + stringvalidator.OneOf("SVA_KVM_X86", "KVM_X86"), }, Description: "Hardware architecture of this compute instance.", }, @@ -180,7 +181,7 @@ func MakeSchemaResourceCompute() map[string]schema.Attribute { "net_type": schema.StringAttribute{ Required: true, Validators: []validator.String{ - stringvalidator.OneOf("EXTNET", "VINS", "VFNIC"), + stringvalidator.OneOf("EXTNET", "VINS", "VFNIC", "DPDK"), }, Description: "Type of the network for this connection, either EXTNET or VINS.", }, @@ -197,6 +198,19 @@ func MakeSchemaResourceCompute() map[string]schema.Attribute { Computed: true, Description: "MAC address associated with this connection. MAC address is assigned automatically.", }, + "weight": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Weight the network if you need to sort network list, the smallest attach first. zero or null weight attach last", + }, + "mtu": schema.Int64Attribute{ + Optional: true, + Computed: true, + Validators: []validator.Int64{ + int64validator.Between(1, 9216), + }, + Description: "Maximum transmission unit, used only for DPDK type, must be 1-9216", + }, }, }, }, @@ -289,6 +303,8 @@ func MakeSchemaResourceCompute() map[string]schema.Attribute { }, "enabled": schema.BoolAttribute{ Optional: true, + Computed: true, + Default: booldefault.StaticBool(true), Description: "If true - enable compute, else - disable", }, "pause": schema.BoolAttribute{ @@ -301,6 +317,8 @@ func MakeSchemaResourceCompute() map[string]schema.Attribute { }, "restore": schema.BoolAttribute{ Optional: true, + Computed: true, + Default: booldefault.StaticBool(true), //Default: true, }, "auto_start": schema.BoolAttribute{ @@ -328,15 +346,21 @@ func MakeSchemaResourceCompute() map[string]schema.Attribute { }, "started": schema.BoolAttribute{ Optional: true, + Computed: true, + Default: booldefault.StaticBool(true), //Default: true, Description: "Is compute started.", }, "detach_disks": schema.BoolAttribute{ Optional: true, + Computed: true, + Default: booldefault.StaticBool(true), //Default: true, }, "permanently": schema.BoolAttribute{ Optional: true, + Computed: true, + Default: booldefault.StaticBool(true), //Default: true, }, "is": schema.StringAttribute{ @@ -365,6 +389,19 @@ func MakeSchemaResourceCompute() map[string]schema.Attribute { //Default: false, Description: "Use Huge Pages to allocate RAM of the virtual machine. The system must be pre-configured by allocating Huge Pages on the physical node.", }, + "pci_devices": schema.SetAttribute{ + Optional: true, + ElementType: types.Int64Type, + Description: "ID of the connected pci devices", + }, + "chipset": schema.StringAttribute{ + Optional: true, + Computed: true, + Validators: []validator.String{ + stringvalidator.OneOf("i440fx", "Q35"), + }, + Description: "Type of the emulated system, Q35 or i440fx", + }, // computed attributes "compute_id": schema.Int64Attribute{ @@ -528,6 +565,9 @@ func MakeSchemaResourceCompute() map[string]schema.Attribute { Computed: true, NestedObject: schema.NestedAttributeObject{ Attributes: map[string]schema.Attribute{ + "bus_number": schema.Int64Attribute{ + Computed: true, + }, "conn_id": schema.Int64Attribute{ Computed: true, }, @@ -555,6 +595,9 @@ func MakeSchemaResourceCompute() map[string]schema.Attribute { "mac": schema.StringAttribute{ Computed: true, }, + "mtu": schema.Int64Attribute{ + Computed: true, + }, "name": schema.StringAttribute{ Computed: true, }, @@ -590,6 +633,32 @@ func MakeSchemaResourceCompute() map[string]schema.Attribute { }, }, }, + "libvirt_settings": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "guid": schema.StringAttribute{ + Computed: true, + }, + "txmode": schema.StringAttribute{ + Computed: true, + }, + "ioeventfd": schema.StringAttribute{ + Computed: true, + }, + "event_idx": schema.StringAttribute{ + Computed: true, + }, + "queues": schema.Int64Attribute{ + Computed: true, + }, + "rx_queue_size": schema.Int64Attribute{ + Computed: true, + }, + "tx_queue_size": schema.Int64Attribute{ + Computed: true, + }, + }, + }, "target": schema.StringAttribute{ Computed: true, }, @@ -749,6 +818,9 @@ func MakeSchemaResourceComputeDisks() map[string]schema.Attribute { "boot_partition": schema.Int64Attribute{ Computed: true, }, + "bus_number": schema.Int64Attribute{ + Computed: true, + }, "created_time": schema.Int64Attribute{ Computed: true, }, diff --git a/internal/service/cloudapi/kvmvm/utilities/utility_data_source_compute.go b/internal/service/cloudapi/kvmvm/utilities/utility_data_source_compute.go index d132368..55dab3a 100644 --- a/internal/service/cloudapi/kvmvm/utilities/utility_data_source_compute.go +++ b/internal/service/cloudapi/kvmvm/utilities/utility_data_source_compute.go @@ -26,3 +26,21 @@ func ComputeCheckPresence(ctx context.Context, state *models.RecordComputeModel, tflog.Info(ctx, "Getting compute info, successfully") return computeRecord, nil } + +func ComputePCIDevicesListCheckPresence(ctx context.Context, state *models.RecordComputeModel, c *decort.DecortClient) (*compute.ListPCIDevices, error) { + tflog.Info(ctx, "Get PCI devices info") + + req := compute.ListPCIDeviceRequest{ + ComputeID: uint64(state.ComputeId.ValueInt64()), + } + + tflog.Info(ctx, "Check req", map[string]any{"req": req}) + + pciDevicesList, err := c.CloudAPI().Compute().ListPCIDevice(ctx, req) + if err != nil { + return nil, err + } + + tflog.Info(ctx, "Getting PCI devices info, successfully") + return pciDevicesList, nil +} diff --git a/internal/service/cloudapi/kvmvm/utilities/utility_resource_compute_create.go b/internal/service/cloudapi/kvmvm/utilities/utility_resource_compute_create.go index 92375eb..c21b577 100644 --- a/internal/service/cloudapi/kvmvm/utilities/utility_resource_compute_create.go +++ b/internal/service/cloudapi/kvmvm/utilities/utility_resource_compute_create.go @@ -3,6 +3,7 @@ package utilities import ( "context" "fmt" + "sort" "strconv" "strings" @@ -11,7 +12,6 @@ import ( "github.com/hashicorp/terraform-plugin-log/tflog" decort "repository.basistech.ru/BASIS/decort-golang-sdk" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/kvmppc" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/kvmx86" "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm/models" "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/status" @@ -81,42 +81,50 @@ func CreateResourceCompute(ctx context.Context, plan *models.ResourceComputeMode diags := diag.Diagnostics{} createReqX86 := kvmx86.CreateRequest{Start: false} - createReqPPC := kvmppc.CreateRequest{Start: false} if !plan.Description.IsUnknown() { - createReqPPC.Description = plan.Description.ValueString() createReqX86.Description = plan.Description.ValueString() } if !plan.SepId.IsUnknown() { - createReqPPC.SEPID = uint64(plan.SepId.ValueInt64()) createReqX86.SepID = uint64(plan.SepId.ValueInt64()) } if !plan.Pool.IsUnknown() { - createReqPPC.Pool = plan.Pool.ValueString() createReqX86.Pool = plan.Pool.ValueString() } if !plan.IpaType.IsNull() { - createReqPPC.IPAType = plan.IpaType.ValueString() createReqX86.IPAType = plan.IpaType.ValueString() } if !plan.BootDiskSize.IsNull() { - createReqPPC.BootDisk = uint64(plan.BootDiskSize.ValueInt64()) createReqX86.BootDisk = uint64(plan.BootDiskSize.ValueInt64()) } if !plan.IS.IsNull() { - createReqPPC.IS = plan.IS.ValueString() createReqX86.IS = plan.IS.ValueString() } + if !plan.Chipset.IsUnknown() { + createReqX86.Chipset = plan.Chipset.ValueString() + } + createReqX86.Interfaces = make([]kvmx86.Interface, 0) if !plan.Network.IsNull() { networkList := plan.Network.Elements() + sort.Slice(networkList, func(i, j int) bool { + weightI := networkList[i].(types.Object).Attributes()["weight"].(types.Int64).ValueInt64() + weightJ := networkList[j].(types.Object).Attributes()["weight"].(types.Int64).ValueInt64() + if weightI == 0 { + return false + } + if weightJ == 0 { + return true + } + return weightI < weightJ + }) interfaces := make([]kvmx86.Interface, 0) for _, elem := range networkList { objVal := elem.(types.Object) @@ -125,108 +133,72 @@ func CreateResourceCompute(ctx context.Context, plan *models.ResourceComputeMode NetType: strings.ToUpper(elemMap["net_type"].(types.String).ValueString()), NetID: uint64(elemMap["net_id"].(types.Int64).ValueInt64()), } - ipaddr, ipSet := elemMap["ip_address"] - if ipSet { - reqInterface.IPAddr = ipaddr.(types.String).ValueString() - } - - interfaces = append(interfaces, reqInterface) - } - - createReqX86.Interfaces = interfaces - } - - createReqPPC.Interfaces = make([]kvmppc.Interface, 0) - - if !plan.Network.IsNull() { - networkList := plan.Network.Elements() - interfaces := make([]kvmppc.Interface, 0) - for _, elem := range networkList { - objVal := elem.(types.Object) - elemMap := objVal.Attributes() - reqInterface := kvmppc.Interface{ - NetType: strings.ToUpper(elemMap["net_type"].(types.String).ValueString()), - NetID: uint64(elemMap["net_id"].(types.Int64).ValueInt64()), + if reqInterface.NetType == "DPDK" { + reqInterface.MTU = uint64(elemMap["mtu"].(types.Int64).ValueInt64()) } ipaddr, ipSet := elemMap["ip_address"] if ipSet { reqInterface.IPAddr = ipaddr.(types.String).ValueString() } - interfaces = append(interfaces, reqInterface) } - createReqPPC.Interfaces = interfaces + createReqX86.Interfaces = interfaces } if !plan.CloudInit.IsNull() { userData := plan.CloudInit.ValueString() if userData != "" && userData != "applied" { - createReqPPC.Userdata = strings.TrimSpace(userData) createReqX86.Userdata = strings.TrimSpace(userData) } } driver := strings.ToUpper(plan.Driver.ValueString()) - if driver == "KVM_PPC" { - createReqPPC.RGID = uint64(plan.RGID.ValueInt64()) - createReqPPC.Name = plan.Name.ValueString() - createReqPPC.CPU = uint64(plan.CPU.ValueInt64()) - createReqPPC.RAM = uint64(plan.RAM.ValueInt64()) - createReqPPC.ImageID = uint64(plan.ImageID.ValueInt64()) - - tflog.Info(ctx, fmt.Sprintf("CreateResourceCompute: creating Compute of type KVM VM PowerPC")) - id, err := c.CloudAPI().KVMPPC().Create(ctx, createReqPPC) - if err != nil { - diags.AddError("CreateResourceCompute: unable to create KVM VP PowerPC", err.Error()) - return 0, diags - } - return id, diags - } else { - createReqX86.RGID = uint64(plan.RGID.ValueInt64()) - createReqX86.Name = plan.Name.ValueString() - createReqX86.CPU = uint64(plan.CPU.ValueInt64()) - createReqX86.RAM = uint64(plan.RAM.ValueInt64()) - createReqX86.Driver = driver - - if !plan.ImageID.IsNull() { - createReqX86.ImageID = uint64(plan.ImageID.ValueInt64()) - } - if !plan.WithoutBootDisk.IsNull() { - createReqX86.WithoutBootDisk = plan.WithoutBootDisk.ValueBool() - } + createReqX86.RGID = uint64(plan.RGID.ValueInt64()) + createReqX86.Name = plan.Name.ValueString() + createReqX86.CPU = uint64(plan.CPU.ValueInt64()) + createReqX86.RAM = uint64(plan.RAM.ValueInt64()) + createReqX86.Driver = driver - if !plan.CustomFields.IsUnknown() { //CustomFields optional && computed - val := plan.CustomFields.ValueString() - val = strings.ReplaceAll(val, "\\", "") - val = strings.ReplaceAll(val, "\n", "") - val = strings.ReplaceAll(val, "\t", "") - val = strings.TrimSpace(val) + if !plan.ImageID.IsNull() { + createReqX86.ImageID = uint64(plan.ImageID.ValueInt64()) + } - createReqX86.CustomFields = val - } + if !plan.WithoutBootDisk.IsNull() { + createReqX86.WithoutBootDisk = plan.WithoutBootDisk.ValueBool() + } - if !plan.NumaAffinity.IsNull() { - createReqX86.NumaAffinity = strings.ToLower(plan.NumaAffinity.ValueString()) - } + if !plan.CustomFields.IsUnknown() { //CustomFields optional && computed + val := plan.CustomFields.ValueString() + val = strings.ReplaceAll(val, "\\", "") + val = strings.ReplaceAll(val, "\n", "") + val = strings.ReplaceAll(val, "\t", "") + val = strings.TrimSpace(val) - if !plan.CPUPin.IsNull() && plan.CPUPin.ValueBool() { - createReqX86.CPUPin = true - } + createReqX86.CustomFields = val + } - if !plan.HPBacked.IsNull() && plan.HPBacked.ValueBool() { - createReqX86.HPBacked = true - } + if !plan.NumaAffinity.IsNull() { + createReqX86.NumaAffinity = strings.ToLower(plan.NumaAffinity.ValueString()) + } - tflog.Info(ctx, fmt.Sprintf("CreateResourceCompute: creating Compute of type KVM VM x86")) - id, err := c.CloudAPI().KVMX86().Create(ctx, createReqX86) - if err != nil { - diags.AddError("CreateResourceCompute: unable to create KVM VP x86", err.Error()) - return 0, diags - } - return id, diags + if !plan.CPUPin.IsNull() && plan.CPUPin.ValueBool() { + createReqX86.CPUPin = true + } + + if !plan.HPBacked.IsNull() && plan.HPBacked.ValueBool() { + createReqX86.HPBacked = true + } + + tflog.Info(ctx, fmt.Sprintf("CreateResourceCompute: creating Compute of type KVM VM x86")) + id, err := c.CloudAPI().KVMX86().Create(ctx, createReqX86) + if err != nil { + diags.AddError("CreateResourceCompute: unable to create KVM VP x86", err.Error()) + return 0, diags } + return id, diags + } func CleanupResourceCompute(ctx context.Context, computeId uint64, c *decort.DecortClient) { @@ -281,16 +253,11 @@ func ComputeResourceEnableDisable(ctx context.Context, plan *models.ResourceComp return diags } - var enable bool - if plan.Enabled.IsNull() { - enable = true // default value - } else { - enable = plan.Enabled.ValueBool() - } + enable := plan.Enabled.ValueBool() tflog.Info(ctx, "ComputeEnableDisable: compute to be enabled/disabled", map[string]any{ "compute_id": computeId, - "enable": enable}) + "enable": plan.Enabled.ValueBool()}) if enable { _, err = c.CloudAPI().Compute().Enable(ctx, compute.EnableRequest{ComputeID: computeId}) @@ -326,12 +293,7 @@ func ComputeResourceStartStop(ctx context.Context, plan *models.ResourceComputeM return diags } - var started bool - if plan.Started.IsNull() { - started = true // default value - } else { - started = plan.Started.ValueBool() - } + started := plan.Started.ValueBool() tflog.Info(ctx, "ComputeStartStop: compute to be started/stopped", map[string]any{ "compute_id": computeId, @@ -642,6 +604,41 @@ func ComputeResourceSnapshot(ctx context.Context, plan *models.ResourceComputeMo return nil } +func ComputeResourcePCIDevice(ctx context.Context, plan *models.ResourceComputeModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "ComputeResourcePCIDevice: Start added PCI devices(s) to compute with ID", map[string]any{"compute_id": plan.ID.ValueString()}) + diags := diag.Diagnostics{} + + computeId, err := strconv.ParseUint(plan.ID.ValueString(), 10, 64) + if err != nil { + diags.AddError("ComputeResourcePCIDevice: cannot parsed ID compute from plan", err.Error()) + return diags + } + + pciDeviceList := plan.PCIDevices.Elements() + for _, pciDevice := range pciDeviceList { + pciDeviceId := pciDevice.(types.Int64).ValueInt64() + req := compute.AttachPCIDeviceRequest{ + ComputeID: computeId, + DeviceID: uint64(pciDeviceId), + } + tflog.Info(ctx, "ComputeResourcePCIDevice: Start attach PCI device to compute with ID", map[string]any{"compute_id": plan.ID.ValueString(), "pci_device_id": pciDeviceId}) + res, err := c.CloudAPI().Compute().AttachPCIDevice(ctx, req) + tflog.Info(ctx, "ComputeResourceSnapshot: response from CloudAPI().Compute().AttachPCIDevice", map[string]any{"compute_id": computeId, "response": res}) + if err != nil { + diags.AddWarning( + "ComputeResourceSnapshot: Unable to add PCI device for Compute", + err.Error(), + ) + } + } + if diags.WarningsCount() != 0 { + return diags + } + + tflog.Info(ctx, "ComputeResourcePCIDevice: PCI devices(s) is successfully added", map[string]any{"compute_id": computeId}) + return nil +} + func ComputeResourceCDInsert(ctx context.Context, plan *models.ResourceComputeModel, c *decort.DecortClient) diag.Diagnostics { tflog.Info(ctx, "ComputeResourceCDInsert: Start added cd to compute with ID", map[string]any{"compute_id": plan.ID.ValueString()}) diags := diag.Diagnostics{} @@ -745,7 +742,7 @@ func ComputeReadStatus(ctx context.Context, state *models.ResourceComputeModel, "compute_id": computeId, "status": recordCompute.Status}) // restore and enable compute in case it is required - if state.Restore.IsNull() || state.Restore.ValueBool() { // default true or user set-up true + if state.Restore.ValueBool() { // default true or user set-up true diags.Append(RestoreCompute(ctx, computeId, c)...) if diags.HasError() { tflog.Error(ctx, "ComputeReadStatus: cannot restore compute") @@ -759,14 +756,14 @@ func ComputeReadStatus(ctx context.Context, state *models.ResourceComputeModel, return diags } } - if state.Enabled.IsNull() || state.Enabled.ValueBool() { // default true or user set-up true + if state.Enabled.ValueBool() { // default true or user set-up true diags.Append(ComputeResourceEnableDisable(ctx, state, c)...) if diags.HasError() { tflog.Error(ctx, "ComputeReadStatus: Unable to enable compute") return diags } tflog.Info(ctx, "ComputeReadStatus: compute enabled successfully", map[string]any{"compute_id": computeId}) - if state.Started.IsNull() || state.Started.ValueBool() { + if state.Started.ValueBool() { diags.Append(ComputeResourceStartStop(ctx, state, c)...) if diags.HasError() { tflog.Error(ctx, "ComputeReadStatus: Unable to start compute") diff --git a/internal/service/cloudapi/kvmvm/utilities/utility_resource_compute_difference.go b/internal/service/cloudapi/kvmvm/utilities/utility_resource_compute_difference.go index 6dd3595..48d3c4e 100644 --- a/internal/service/cloudapi/kvmvm/utilities/utility_resource_compute_difference.go +++ b/internal/service/cloudapi/kvmvm/utilities/utility_resource_compute_difference.go @@ -1,6 +1,9 @@ package utilities -import "github.com/hashicorp/terraform-plugin-framework/types" +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" +) // differenceSimpleType returns lists added and removed values func differenceSimpleType(oldSet, newSet types.Set) (added, removed []any) { @@ -30,36 +33,52 @@ func differenceSimpleType(oldSet, newSet types.Set) (added, removed []any) { return } -func differenceNetwork(oldSet, newSet types.Set) (added, removed []any) { +func differenceNetwork(oldSet, newSet types.Set) (added, changeIp, removed []map[string]attr.Value) { oldSlice := oldSet.Elements() newSlice := newSet.Elements() - foundIdx := make([]bool, len(oldSlice)) - for _, newElem := range newSlice { - newObj := newElem.(types.Object) - newElemMap := newObj.Attributes() + added = make([]map[string]attr.Value, 0) + changeIp = make([]map[string]attr.Value, 0) + removed = make([]map[string]attr.Value, 0) + + for _, oldNetwork := range oldSlice { + oldMap := oldNetwork.(types.Object).Attributes() found := false - for i, oldElem := range oldSlice { - oldObj := oldElem.(types.Object) - oldElemMap := oldObj.Attributes() - if oldElemMap["net_type"] == newElemMap["net_type"] && oldElemMap["net_id"] == newElemMap["net_id"] { - ipaddr, ipSet := newElemMap["ip_address"] - if !ipSet || ipaddr.(types.String).ValueString() == "" || ipaddr == oldElemMap["ip_address"] { + for _, newNetwork := range newSlice { + newMap := newNetwork.(types.Object).Attributes() + if newMap["net_type"] == oldMap["net_type"] && newMap["net_id"] == oldMap["net_id"] && newMap["weight"] == oldMap["weight"] && (newMap["mtu"] == oldMap["mtu"] || newMap["mtu"].(types.Int64).ValueInt64() == 0) { + if (newMap["net_type"].(types.String).ValueString() == "EXTNET" || newMap["net_type"].(types.String).ValueString() == "VINS") && (newMap["ip_address"] != oldMap["ip_address"] && newMap["ip_address"].(types.String).ValueString() != "") { + changeIp = append(changeIp, newMap) + found = true + break + } else if newMap["ip_address"] == oldMap["ip_address"] || newMap["ip_address"].(types.String).ValueString() != "" { found = true - foundIdx[i] = true break } } } - if !found { - added = append(added, newElem) + if found { + continue } + removed = append(removed, oldMap) } - for i, found := range foundIdx { - if !found { - removed = append(removed, oldSlice[i]) + for _, newNetwork := range newSlice { + newMap := newNetwork.(types.Object).Attributes() + found := false + for _, oldNetwork := range oldSlice { + oldMap := oldNetwork.(types.Object).Attributes() + if newMap["net_type"] == oldMap["net_type"] && newMap["net_id"] == oldMap["net_id"] && newMap["weight"] == oldMap["weight"] && (newMap["mtu"] == oldMap["mtu"] || newMap["mtu"].(types.Int64).ValueInt64() == 0) { + if newMap["ip_address"] == oldMap["ip_address"] || newMap["ip_address"].(types.String).ValueString() != "" || ((newMap["net_type"].(types.String).ValueString() == "EXTNET" || newMap["net_type"].(types.String).ValueString() == "VINS") && newMap["ip_address"] != oldMap["ip_address"]) { + found = true + break + } + } + } + if found { + continue } + added = append(added, newMap) } return diff --git a/internal/service/cloudapi/kvmvm/utilities/utility_resource_compute_update.go b/internal/service/cloudapi/kvmvm/utilities/utility_resource_compute_update.go index ee2f79e..33c46fe 100644 --- a/internal/service/cloudapi/kvmvm/utilities/utility_resource_compute_update.go +++ b/internal/service/cloudapi/kvmvm/utilities/utility_resource_compute_update.go @@ -3,6 +3,7 @@ package utilities import ( "context" "fmt" + "sort" "strconv" "strings" @@ -169,7 +170,7 @@ func ComputeResourceExtraDiskUpdate(ctx context.Context, state *models.ResourceC } } - if len(detachSet) > 0 && (plan.Started.ValueBool() || plan.Started.IsNull()) { + if len(detachSet) > 0 && plan.Started.ValueBool() { diags = ComputeResourceStartStop(ctx, plan, c) } @@ -182,6 +183,55 @@ func ComputeResourceExtraDiskUpdate(ctx context.Context, state *models.ResourceC return nil } +func ComputeResourcePCIDeviceUpdate(ctx context.Context, state *models.ResourceComputeModel, plan *models.ResourceComputeModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "ComputeResourcePCIDeviceUpdate: start update PCI device(s) list to compute with ID", map[string]any{"compute_id": plan.ID.ValueString()}) + diags := diag.Diagnostics{} + + computeId, err := strconv.ParseUint(plan.ID.ValueString(), 10, 64) + if err != nil { + diags.AddError("ComputeResourcePCIDeviceUpdate: cannot parsed ID compute from state", err.Error()) + return diags + } + + attachSet, detachSet := differenceSimpleType(state.ExtraDisks, plan.ExtraDisks) + + for _, pciDevice := range detachSet { + pciDeviceId := pciDevice.(types.Int64).ValueInt64() + tflog.Info(ctx, fmt.Sprintf("ComputeResourcePCIDeviceUpdate: Start detach PCI device with ID - %d from compute with ID - %d", pciDeviceId, computeId)) + req := compute.DetachPCIDeviceRequest{ + ComputeID: computeId, + DeviceID: uint64(pciDeviceId), + } + res, err := c.CloudAPI().Compute().DetachPCIDevice(ctx, req) + tflog.Info(ctx, "ComputeResourcePCIDeviceUpdate: response from CloudAPI().Compute().DetachPCIDevice", map[string]any{"compute_id": plan.ID.ValueString(), "response": res}) + if err != nil { + diags.AddError(fmt.Sprintf("ComputeResourcePCIDeviceUpdate: Cannot detach PCI device with ID - %d", pciDeviceId), err.Error()) + } + } + + for _, pciDevice := range attachSet { + pciDeviceId := pciDevice.(types.Int64).ValueInt64() + tflog.Info(ctx, fmt.Sprintf("ComputeResourcePCIDeviceUpdate: Start attach PCI device with ID - %d to compute with ID - %d", pciDeviceId, computeId)) + req := compute.AttachPCIDeviceRequest{ + ComputeID: computeId, + DeviceID: uint64(pciDeviceId), + } + res, err := c.CloudAPI().Compute().AttachPCIDevice(ctx, req) + tflog.Info(ctx, "ComputeResourcePCIDeviceUpdate: response from CloudAPI().Compute().AttachPCIDevice", map[string]any{"compute_id": plan.ID.ValueString(), "response": res}) + if err != nil { + diags.AddError(fmt.Sprintf("ComputeResourcePCIDeviceUpdate: Cannot attach PCI device with ID - %d", pciDeviceId), err.Error()) + } + } + + if diags.HasError() { + tflog.Error(ctx, "ComputeResourcePCIDeviceUpdate: Errors occurred while managing PCI device(s)") + return diags + } + + tflog.Info(ctx, "ComputeResourcePCIDeviceUpdate: PCI device(s) is successfully update", map[string]any{"compute_id": computeId}) + return nil +} + func ComputeResourceNetworkUpdate(ctx context.Context, state *models.ResourceComputeModel, plan *models.ResourceComputeModel, c *decort.DecortClient) diag.Diagnostics { tflog.Info(ctx, "ComputeResourceNetworkUpdate: start update network rules to compute with ID", map[string]any{"compute_id": plan.ID.ValueString()}) diags := diag.Diagnostics{} @@ -192,22 +242,43 @@ func ComputeResourceNetworkUpdate(ctx context.Context, state *models.ResourceCom return diags } - attachSet, detachSet := differenceNetwork(state.Network, plan.Network) + attachMap, changeIpMap, detachMap := differenceNetwork(state.Network, plan.Network) - for _, network := range detachSet { - objVal := network.(types.Object) - elemMap := objVal.Attributes() + tflog.Info(ctx, "ComputeResourceNetworkUpdate: start detach network(s) rules to compute with ID", map[string]any{"compute_id": plan.ID.ValueString()}) + for _, network := range detachMap { req := compute.NetDetachRequest{ ComputeID: computeId, - IPAddr: elemMap["ip_address"].(types.String).ValueString(), - MAC: elemMap["mac"].(types.String).ValueString(), + IPAddr: network["ip_address"].(types.String).ValueString(), + MAC: network["mac"].(types.String).ValueString(), } tflog.Info(ctx, "ComputeResourceNetworkUpdate: before calling CloudAPI().Compute().NetDetach", map[string]any{"compute_id": computeId, "req": req}) res, err := c.CloudAPI().Compute().NetDetach(ctx, req) tflog.Info(ctx, "ComputeResourceNetworkUpdate: response from CloudAPI().Compute().NetDetach", map[string]any{"compute_id": plan.ID.ValueString(), "response": res}) if err != nil { diags.AddError(fmt.Sprintf("ComputeResourceNetworkUpdate: failed to detach net ID %d from Compute ID %d", - elemMap["net_id"].(types.Int64).ValueInt64(), computeId), err.Error()) + network["net_id"].(types.Int64).ValueInt64(), computeId), err.Error()) + } + } + + if diags.HasError() { + return diags + } + + tflog.Info(ctx, "ComputeResourceNetworkUpdate: start change IP network(s) rules to compute with ID", map[string]any{"compute_id": plan.ID.ValueString()}) + + for _, network := range changeIpMap { + req := compute.ChangeIPRequest{ + ComputeID: computeId, + NetType: network["net_type"].(types.String).ValueString(), + NetID: uint64(network["net_id"].(types.Int64).ValueInt64()), + IPAddr: network["ip_address"].(types.String).ValueString(), + } + tflog.Info(ctx, "ComputeResourceNetworkUpdate: before calling CloudAPI().Compute().ChangeIP", map[string]any{"compute_id": computeId, "req": req}) + res, err := c.CloudAPI().Compute().ChangeIP(ctx, req) + tflog.Info(ctx, "ComputeResourceNetworkUpdate: response from CloudAPI().Compute().ChangeIP", map[string]any{"compute_id": plan.ID.ValueString(), "response": res}) + if err != nil { + diags.AddError(fmt.Sprintf("ComputeResourceNetworkUpdate: failed to change IP net ID %d from Compute ID %d", + network["net_id"].(types.Int64).ValueInt64(), computeId), err.Error()) } } @@ -217,7 +288,7 @@ func ComputeResourceNetworkUpdate(ctx context.Context, state *models.ResourceCom needStart := false // need stop to attach first network - if (len(detachSet) == len(state.Network.Elements()) || len(state.Network.Elements()) < 1) && len(attachSet) > 0 { + if len(detachMap) == len(state.Network.Elements()) || (len(state.Network.Elements()) < 1) && len(attachMap) > 0 || hasDPDKnetwork(attachMap) { tflog.Info(ctx, "ComputeResourceNetworkUpdate: stop compute", map[string]any{"compute_id": computeId}) _, err = c.CloudAPI().Compute().Stop(ctx, compute.StopRequest{ComputeID: computeId}) if err != nil { @@ -226,18 +297,33 @@ func ComputeResourceNetworkUpdate(ctx context.Context, state *models.ResourceCom ) return diags } - needStart = true + if plan.Started.ValueBool() { + needStart = true + } } - for _, network := range attachSet { - objVal := network.(types.Object) - elemMap := objVal.Attributes() + sort.Slice(attachMap, func(i, j int) bool { + weightI := attachMap[i]["weight"].(types.Int64).ValueInt64() + weightJ := attachMap[j]["weight"].(types.Int64).ValueInt64() + if weightI == 0 { + return false + } + if weightJ == 0 { + return true + } + return weightI < weightJ + }) + + for _, network := range attachMap { req := compute.NetAttachRequest{ ComputeID: computeId, - NetType: strings.ToUpper(elemMap["net_type"].(types.String).ValueString()), - NetID: uint64(elemMap["net_id"].(types.Int64).ValueInt64()), + NetType: strings.ToUpper(network["net_type"].(types.String).ValueString()), + NetID: uint64(network["net_id"].(types.Int64).ValueInt64()), + } + if req.NetType == "DPDK" { + req.MTU = uint64(network["mtu"].(types.Int64).ValueInt64()) } - ipaddr, ipSet := elemMap["ip_address"] + ipaddr, ipSet := network["ip_address"] if ipSet { req.IPAddr = ipaddr.(types.String).ValueString() } @@ -246,11 +332,11 @@ func ComputeResourceNetworkUpdate(ctx context.Context, state *models.ResourceCom tflog.Info(ctx, "ComputeResourceNetworkUpdate: response from CloudAPI().Compute().NetAttach", map[string]any{"compute_id": plan.ID.ValueString(), "response": res}) if err != nil { diags.AddError(fmt.Sprintf("ComputeResourceNetworkUpdate: failed to attach net ID %d from Compute ID %d", - elemMap["net_id"].(types.Int64).ValueInt64(), computeId), err.Error()) + network["net_id"].(types.Int64).ValueInt64(), computeId), err.Error()) } } - if needStart && (plan.Started.ValueBool() || plan.Started.IsNull()) { + if needStart { diags = ComputeResourceStartStop(ctx, plan, c) } @@ -263,6 +349,15 @@ func ComputeResourceNetworkUpdate(ctx context.Context, state *models.ResourceCom return nil } +func hasDPDKnetwork(networkAttachMap []map[string]attr.Value) bool { + for _, elem := range networkAttachMap { + if elem["net_type"].(types.String).ValueString() == "DPDK" { + return true + } + } + return false +} + func ComputeResourceComputeUpdate(ctx context.Context, state *models.ResourceComputeModel, plan *models.ResourceComputeModel, c *decort.DecortClient) diag.Diagnostics { tflog.Info(ctx, "ComputeResourceComputeUpdate: start update compute parameters", map[string]any{"compute_id": plan.ID.ValueString()}) diags := diag.Diagnostics{} @@ -301,6 +396,10 @@ func ComputeResourceComputeUpdate(ctx context.Context, state *models.ResourceCom req.HPBacked = state.HPBacked.ValueBool() } + if !plan.Chipset.IsUnknown() && !plan.Chipset.Equal(state.Chipset) { + req.Chipset = plan.Chipset.ValueString() + } + // Note bene: numa_affinity, cpu_pin and hp_backed are not allowed to be changed for compute in STARTED tech status. // If STARTED, we need to stop it before update @@ -328,7 +427,7 @@ func ComputeResourceComputeUpdate(ctx context.Context, state *models.ResourceCom return diags } - if isStopRequred && (plan.Started.ValueBool() || plan.Started.IsNull()) { + if isStopRequred && plan.Started.ValueBool() { diags = ComputeResourceStartStop(ctx, plan, c) } @@ -889,7 +988,7 @@ func ComputeResourceRollback(ctx context.Context, plan *models.ResourceComputeMo return diags } - if plan.Started.ValueBool() || plan.Started.IsNull() { + if plan.Started.ValueBool() { diags = ComputeResourceStartStop(ctx, plan, c) if diags.HasError() { tflog.Error(ctx, "ComputeResourceRollback: cannot start compute") @@ -1092,7 +1191,7 @@ func ComputeResourceRedeploy(ctx context.Context, plan *models.ResourceComputeMo return diags } - if plan.Started.ValueBool() || plan.Started.IsNull() { + if plan.Started.ValueBool() { diags = ComputeResourceStartStop(ctx, plan, c) if diags.HasError() { tflog.Error(ctx, "ComputeResourceRedeploy: cannot start compute") diff --git a/internal/service/cloudapi/lb/flattens/flatten_resource_lb.go b/internal/service/cloudapi/lb/flattens/flatten_resource_lb.go index 0efc3dd..c7301d0 100644 --- a/internal/service/cloudapi/lb/flattens/flatten_resource_lb.go +++ b/internal/service/cloudapi/lb/flattens/flatten_resource_lb.go @@ -39,6 +39,7 @@ func LBResource(ctx context.Context, plan *models.ResourceLBModel, c *decort.Dec Timeouts: plan.Timeouts, SysctlParams: plan.SysctlParams, Permanently: plan.Permanently, + Restore: plan.Restore, Restart: plan.Restart, Enable: plan.Enable, ConfigReset: plan.ConfigReset, diff --git a/internal/service/cloudapi/lb/resource_lb.go b/internal/service/cloudapi/lb/resource_lb.go index 8e72c0f..76e5d4a 100644 --- a/internal/service/cloudapi/lb/resource_lb.go +++ b/internal/service/cloudapi/lb/resource_lb.go @@ -83,7 +83,7 @@ func (r *resourceLB) Create(ctx context.Context, req resource.CreateRequest, res // framework would mark resource as tainted and delete it, which would be unwanted behaviour. // enable or disable lb, warnings added to resp.Diagnostics in case of failure. - if !plan.Enable.IsNull() { // Enable is optional + if !plan.Enable.ValueBool() { // Enable is optional diags := utilities.LBEnableDisable(ctx, &plan, r.client) for _, d := range diags { if d.Severity() == diag.SeverityError { @@ -307,17 +307,10 @@ func (r *resourceLB) Delete(ctx context.Context, req resource.DeleteRequest, res ctx, cancel := context.WithTimeout(ctx, readTimeout) defer cancel() - var permanently bool - if state.Permanently.IsNull() { - permanently = true - } else { - permanently = state.Permanently.ValueBool() - } - // Delete existing lb delReq := lb.DeleteRequest{ LBID: uint64(state.LBID.ValueInt64()), - Permanently: permanently, + Permanently: state.Permanently.ValueBool(), } tflog.Info(ctx, "Delete resourceLB: calling CloudAPI().LB().Delete", map[string]any{ diff --git a/internal/service/cloudapi/lb/schemas/schema_resource_lb.go b/internal/service/cloudapi/lb/schemas/schema_resource_lb.go index 0dfc6a9..64f5bfb 100644 --- a/internal/service/cloudapi/lb/schemas/schema_resource_lb.go +++ b/internal/service/cloudapi/lb/schemas/schema_resource_lb.go @@ -2,6 +2,7 @@ package schemas import ( "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/booldefault" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" ) @@ -36,6 +37,8 @@ func MakeSchemaResourceLB() map[string]schema.Attribute { }, "enable": schema.BoolAttribute{ Optional: true, + Computed: true, + Default: booldefault.StaticBool(true), }, "restart": schema.BoolAttribute{ Optional: true, @@ -45,9 +48,13 @@ func MakeSchemaResourceLB() map[string]schema.Attribute { }, "permanently": schema.BoolAttribute{ Optional: true, + Computed: true, + Default: booldefault.StaticBool(true), }, "restore": schema.BoolAttribute{ Optional: true, + Computed: true, + Default: booldefault.StaticBool(true), }, "safe": schema.BoolAttribute{ Optional: true, diff --git a/internal/service/cloudapi/lb/utilities/utility_resource_lb.go b/internal/service/cloudapi/lb/utilities/utility_resource_lb.go index c4d22a7..ab88f17 100644 --- a/internal/service/cloudapi/lb/utilities/utility_resource_lb.go +++ b/internal/service/cloudapi/lb/utilities/utility_resource_lb.go @@ -91,7 +91,7 @@ func LBEnableDisable(ctx context.Context, plan *models.ResourceLBModel, c *decor diags.AddError("Cannot parsed ID lb from state", err.Error()) return diags } - if plan.Enable.IsNull() || plan.Enable.ValueBool() { + if plan.Enable.ValueBool() { tflog.Info(ctx, "Enable lb with ID", map[string]any{"lb_id": plan.ID.ValueString()}) _, err := c.CloudAPI().LB().Enable(ctx, lb.DisableEnableRequest{LBID: lbId}) if err != nil { @@ -131,7 +131,7 @@ func LBReadStatus(ctx context.Context, plan *models.ResourceLBModel, c *decort.D diags.AddError("Error:", fmt.Sprintf("The lb is in status: %s, please, contact support for more information", lbItem.Status)) return diags case status.Deleted: - if plan.Restore.ValueBool() || plan.Restore.IsNull() { + if plan.Restore.ValueBool() { diags = LBRestore(ctx, plan, c) if diags.HasError() { tflog.Error(ctx, "Error restore lb", map[string]any{"lb_id": plan.ID.ValueString()}) @@ -141,7 +141,7 @@ func LBReadStatus(ctx context.Context, plan *models.ResourceLBModel, c *decort.D diags.AddError("LB in status Deleted:", "please clean state, or restore lb") return diags } - if plan.Enable.ValueBool() || plan.Enable.IsNull() { + if plan.Enable.ValueBool() { diags = LBEnableDisable(ctx, plan, c) if diags.HasError() { tflog.Error(ctx, "Error enable/disable lb", map[string]any{"lb_id": plan.ID.ValueString()}) @@ -196,7 +196,7 @@ func LBStartStop(ctx context.Context, plan *models.ResourceLBModel, c *decort.De diags.AddError("Cannot parsed ID lb from state", err.Error()) return diags } - if plan.Enable.IsNull() || plan.Enable.ValueBool() { + if plan.Enable.ValueBool() { if plan.Start.ValueBool() || plan.Start.IsNull() { tflog.Info(ctx, "Start lb with ID", map[string]any{"lb_id": plan.ID.ValueString()}) _, err := c.CloudAPI().LB().Start(ctx, lb.StartRequest{LBID: lbId}) @@ -206,7 +206,7 @@ func LBStartStop(ctx context.Context, plan *models.ResourceLBModel, c *decort.De } } } - if plan.Enable.ValueBool() || plan.Enable.IsNull() { + if plan.Enable.ValueBool() { tflog.Info(ctx, "Stop lb with ID", map[string]any{"lb_id": plan.ID.ValueString()}) if !plan.Start.ValueBool() && !plan.Start.IsNull() { _, err := c.CloudAPI().LB().Stop(ctx, lb.StopRequest{LBID: lbId}) diff --git a/internal/service/cloudapi/rg/flattens/flatten_data_source_rg.go b/internal/service/cloudapi/rg/flattens/flatten_data_source_rg.go index 4746b15..1ba149c 100644 --- a/internal/service/cloudapi/rg/flattens/flatten_data_source_rg.go +++ b/internal/service/cloudapi/rg/flattens/flatten_data_source_rg.go @@ -33,8 +33,8 @@ func RGDataSource(ctx context.Context, state *models.DataSourceRGModel, c *decor id := uuid.New() *state = models.DataSourceRGModel{ - RGID: state.RGID, - Reason: state.Reason, + RGID: state.RGID, + Timeouts: state.Timeouts, Id: types.StringValue(id.String()), diff --git a/internal/service/cloudapi/rg/flattens/flatten_data_source_rg_usage.go b/internal/service/cloudapi/rg/flattens/flatten_data_source_rg_usage.go index b6bf623..6e92d1b 100644 --- a/internal/service/cloudapi/rg/flattens/flatten_data_source_rg_usage.go +++ b/internal/service/cloudapi/rg/flattens/flatten_data_source_rg_usage.go @@ -31,7 +31,6 @@ func RGUsageDataSource(ctx context.Context, state *models.DataSourceRGUsageModel id := uuid.New() *state = models.DataSourceRGUsageModel{ RGID: state.RGID, - Reason: state.Reason, Timeouts: state.Timeouts, Id: types.StringValue(id.String()), diff --git a/internal/service/cloudapi/rg/flattens/flatten_resource_rg.go b/internal/service/cloudapi/rg/flattens/flatten_resource_rg.go index eb3d467..b74f21e 100644 --- a/internal/service/cloudapi/rg/flattens/flatten_resource_rg.go +++ b/internal/service/cloudapi/rg/flattens/flatten_resource_rg.go @@ -52,7 +52,6 @@ func RGResource(ctx context.Context, plan *models.ResourceRGModel, c *decort.Dec Description: plan.Description, Force: plan.Force, Permanently: plan.Permanently, - Reason: plan.Reason, RegisterComputes: plan.RegisterComputes, Restore: plan.Restore, Enable: plan.Enable, diff --git a/internal/service/cloudapi/rg/models/model_data_source_rg.go b/internal/service/cloudapi/rg/models/model_data_source_rg.go index 28b8a27..db9a81f 100644 --- a/internal/service/cloudapi/rg/models/model_data_source_rg.go +++ b/internal/service/cloudapi/rg/models/model_data_source_rg.go @@ -8,7 +8,6 @@ import ( type DataSourceRGModel struct { // request fields RGID types.Int64 `tfsdk:"rg_id"` - Reason types.String `tfsdk:"reason"` Timeouts timeouts.Value `tfsdk:"timeouts"` // response fields diff --git a/internal/service/cloudapi/rg/models/model_data_source_rg_usage.go b/internal/service/cloudapi/rg/models/model_data_source_rg_usage.go index 08d0aba..4273c76 100644 --- a/internal/service/cloudapi/rg/models/model_data_source_rg_usage.go +++ b/internal/service/cloudapi/rg/models/model_data_source_rg_usage.go @@ -8,7 +8,6 @@ import ( type DataSourceRGUsageModel struct { // request fields RGID types.Int64 `tfsdk:"rg_id"` - Reason types.String `tfsdk:"reason"` Timeouts timeouts.Value `tfsdk:"timeouts"` // response fields diff --git a/internal/service/cloudapi/rg/models/model_resource_rg.go b/internal/service/cloudapi/rg/models/model_resource_rg.go index b20ff63..8819a3d 100644 --- a/internal/service/cloudapi/rg/models/model_resource_rg.go +++ b/internal/service/cloudapi/rg/models/model_resource_rg.go @@ -24,7 +24,6 @@ type ResourceRGModel struct { Description types.String `tfsdk:"description"` Force types.Bool `tfsdk:"force"` Permanently types.Bool `tfsdk:"permanently"` - Reason types.String `tfsdk:"reason"` RegisterComputes types.Bool `tfsdk:"register_computes"` Restore types.Bool `tfsdk:"restore"` Enable types.Bool `tfsdk:"enable"` @@ -76,27 +75,23 @@ type QuotaModel struct { } type AccessModel struct { - User types.String `tfsdk:"user"` - Right types.String `tfsdk:"right"` - Reason types.String `tfsdk:"reason"` + User types.String `tfsdk:"user"` + Right types.String `tfsdk:"right"` } type DefNetModel struct { NetType types.String `tfsdk:"net_type"` NetId types.Int64 `tfsdk:"net_id"` - Reason types.String `tfsdk:"reason"` } var ItemAccess = map[string]attr.Type{ - "user": types.StringType, - "right": types.StringType, - "reason": types.StringType, + "user": types.StringType, + "right": types.StringType, } var ItemDefNet = map[string]attr.Type{ "net_type": types.StringType, "net_id": types.Int64Type, - "reason": types.StringType, } var ItemACL = map[string]attr.Type{ diff --git a/internal/service/cloudapi/rg/resource_rg.go b/internal/service/cloudapi/rg/resource_rg.go index d68ba5c..0b20b3f 100644 --- a/internal/service/cloudapi/rg/resource_rg.go +++ b/internal/service/cloudapi/rg/resource_rg.go @@ -237,13 +237,6 @@ func (r *resourceRG) Update(ctx context.Context, req resource.UpdateRequest, res // Validate if changes in plan are allowed tflog.Info(ctx, "Update resourceRG: checking def_net is not empty in case of change", map[string]any{ "rg_id": state.Id.ValueString()}) - if !state.DefNet.IsNull() && plan.DefNet.IsNull() { - resp.Diagnostics.AddError( - "Update resourceRG: Invalid input provided", - fmt.Sprintf("block def_net must not be empty for resource with rg_id %d", recordRG.ID), - ) - return - } tflog.Info(ctx, "Update resourceRG: checking def_net_type, ipcidr, ext_ip are not changed", map[string]any{ "rg_id": state.Id.ValueString(), @@ -370,9 +363,6 @@ func (r *resourceRG) Delete(ctx context.Context, req resource.DeleteRequest, res } else { delReq.Permanently = state.Permanently.ValueBool() } - if !state.Reason.IsNull() { - delReq.Reason = state.Reason.ValueString() - } _, err := r.client.CloudAPI().RG().Delete(ctx, delReq) if err != nil { resp.Diagnostics.AddError("Delete resourceRG: Error deleting resource group with error: ", err.Error()) diff --git a/internal/service/cloudapi/rg/schemas/schema_data_source_rg.go b/internal/service/cloudapi/rg/schemas/schema_data_source_rg.go index 69c7976..488fb79 100644 --- a/internal/service/cloudapi/rg/schemas/schema_data_source_rg.go +++ b/internal/service/cloudapi/rg/schemas/schema_data_source_rg.go @@ -13,12 +13,6 @@ func MakeSchemaDataSourceRG() map[string]schema.Attribute { Description: "resource group id", }, - // optional attributes - "reason": schema.StringAttribute{ - Optional: true, - Description: "reason for request", - }, - //computed attributes "account_id": schema.Int64Attribute{ Computed: true, diff --git a/internal/service/cloudapi/rg/schemas/schema_data_source_rg_usage.go b/internal/service/cloudapi/rg/schemas/schema_data_source_rg_usage.go index aee92b6..8dcdccb 100644 --- a/internal/service/cloudapi/rg/schemas/schema_data_source_rg_usage.go +++ b/internal/service/cloudapi/rg/schemas/schema_data_source_rg_usage.go @@ -12,12 +12,6 @@ func MakeSchemaDataSourceRGUsage() map[string]schema.Attribute { Description: "find by rg id", }, - // optional attributes - "reason": schema.StringAttribute{ - Optional: true, - Description: "reason for action", - }, - //computed attributes "id": schema.StringAttribute{ Computed: true, diff --git a/internal/service/cloudapi/rg/schemas/schema_resource_rg.go b/internal/service/cloudapi/rg/schemas/schema_resource_rg.go index fa3fb9e..ac73629 100644 --- a/internal/service/cloudapi/rg/schemas/schema_resource_rg.go +++ b/internal/service/cloudapi/rg/schemas/schema_resource_rg.go @@ -109,10 +109,6 @@ func MakeSchemaResourceRG() map[string]schema.Attribute { Required: true, Description: "Access rights to set, one of 'R', 'RCX' or 'ARCXDU'", }, - "reason": schema.StringAttribute{ - Optional: true, - Description: "Reason for action", - }, }, }, }, @@ -132,10 +128,6 @@ func MakeSchemaResourceRG() map[string]schema.Attribute { Description: "Network segment ID. If netType is PUBLIC and netId is 0 then default external network segment will be selected. If netType is PRIVATE and netId=0, the first ViNS defined for this RG will be selected. Otherwise, netId identifies either existing external network segment or ViNS.", // default value is 0 }, - "reason": schema.StringAttribute{ - Optional: true, - Description: "Reason for action", - }, }, }, "description": schema.StringAttribute{ @@ -152,10 +144,6 @@ func MakeSchemaResourceRG() map[string]schema.Attribute { Description: "Set to True if you want force delete non-empty RG", // default value is true }, - "reason": schema.StringAttribute{ - Optional: true, - Description: "Set to True if you want force delete non-empty RG", - }, "register_computes": schema.BoolAttribute{ Optional: true, Description: "Register computes in registration system", @@ -171,6 +159,13 @@ func MakeSchemaResourceRG() map[string]schema.Attribute { // default value is true }, + "uniq_pools": schema.ListAttribute{ + Optional: true, + Computed: true, + ElementType: types.StringType, + Description: "List of strings with pools. Applies only when updating", + }, + // computed attributes "rg_id": schema.Int64Attribute{ Computed: true, @@ -267,9 +262,5 @@ func MakeSchemaResourceRG() map[string]schema.Attribute { Computed: true, ElementType: types.StringType, }, - "uniq_pools": schema.ListAttribute{ - Computed: true, - ElementType: types.StringType, - }, } } diff --git a/internal/service/cloudapi/rg/utilities/utility_data_source_rg_usage.go b/internal/service/cloudapi/rg/utilities/utility_data_source_rg_usage.go index 6a2aef7..5ce5619 100644 --- a/internal/service/cloudapi/rg/utilities/utility_data_source_rg_usage.go +++ b/internal/service/cloudapi/rg/utilities/utility_data_source_rg_usage.go @@ -13,10 +13,6 @@ import ( func RGUsageCheckPresence(ctx context.Context, plan *models.DataSourceRGUsageModel, c *decort.DecortClient) (*rg.RecordResourceUsage, error) { usageReq := rg.UsageRequest{RGID: uint64(plan.RGID.ValueInt64())} - if !plan.Reason.IsNull() { - usageReq.Reason = plan.Reason.ValueString() - } - tflog.Info(ctx, "RGUsageCheckPresence: before call CloudAPI().RG().Usage", map[string]any{"response": usageReq}) usage, err := c.CloudAPI().RG().Usage(ctx, usageReq) if err != nil { diff --git a/internal/service/cloudapi/rg/utilities/utility_resource_rg.go b/internal/service/cloudapi/rg/utilities/utility_resource_rg.go index ef7f64f..91e2d5d 100644 --- a/internal/service/cloudapi/rg/utilities/utility_resource_rg.go +++ b/internal/service/cloudapi/rg/utilities/utility_resource_rg.go @@ -71,10 +71,8 @@ func CreateRequestResourceRG(ctx context.Context, plan *models.ResourceRGModel) createReq.MaxNumPublicIP = -1 } - // set up defNet, owner, ipcidr, description, reason, extNetId, extIp, registerComputes optional parameters - if plan.DefNetType.IsNull() { - createReq.DefNet = "PRIVATE" // default value - } else { + // set up defNet, owner, ipcidr, description, extNetId, extIp, registerComputes optional parameters + if !plan.DefNetType.IsNull() { createReq.DefNet = plan.DefNetType.ValueString() } if !plan.Owner.IsNull() { @@ -86,9 +84,6 @@ func CreateRequestResourceRG(ctx context.Context, plan *models.ResourceRGModel) if !plan.Description.IsNull() { createReq.Description = plan.Description.ValueString() } - if !plan.Reason.IsNull() { - createReq.Reason = plan.Reason.ValueString() - } if plan.ExtNetID.IsNull() { createReq.ExtNetID = 0 // default value 0 } else { @@ -143,9 +138,6 @@ func RestoreRG(ctx context.Context, rgId uint64, c *decort.DecortClient) diag.Di // EnableRG performs resource group Enable request func EnableRG(ctx context.Context, rgId uint64, plan *models.ResourceRGModel, c *decort.DecortClient) error { enableReq := rg.EnableRequest{RGID: rgId} - if !plan.Reason.IsNull() { - enableReq.Reason = plan.Reason.ValueString() - } tflog.Info(ctx, "utilityEnableRG: before calling CloudAPI().RG().Enable", map[string]any{"rg_id": rgId, "req": enableReq}) @@ -159,10 +151,6 @@ func EnableRG(ctx context.Context, rgId uint64, plan *models.ResourceRGModel, c // DisableRG performs resource group Disable request func DisableRG(ctx context.Context, rgId uint64, plan *models.ResourceRGModel, c *decort.DecortClient) error { disableReq := rg.DisableRequest{RGID: rgId} - if !plan.Reason.IsNull() { - disableReq.Reason = plan.Reason.ValueString() - } - tflog.Info(ctx, "utilityDisableRG: before calling CloudAPI().RG().Disable", map[string]any{"rg_id": rgId, "req": disableReq}) res, err := c.CloudAPI().RG().Disable(ctx, disableReq) @@ -181,9 +169,6 @@ func UpdateRG(ctx context.Context, rgId uint64, plan, state *models.ResourceRGMo updateReq := rg.UpdateRequest{ RGID: rgId, } - if !state.Reason.IsNull() { - updateReq.Reason = state.Reason.ValueString() - } if !plan.Name.Equal(state.Name) { updateReq.Name = plan.Name.ValueString() @@ -216,17 +201,37 @@ func UpdateRG(ctx context.Context, rgId uint64, plan, state *models.ResourceRGMo updateNeeded = true } + if !plan.UniqPools.Equal(state.UniqPools) { + if plan.UniqPools.IsNull() || len(plan.UniqPools.Elements()) == 0 { + updateReq.ClearUniqPools = true + } else { + uPoolsList := make([]string, 0, len(plan.UniqPools.Elements())) + + diags.Append(plan.UniqPools.ElementsAs(ctx, &uPoolsList, true)...) + if diags.HasError() { + tflog.Error(ctx, "utilityUpdateRG: cannot populate result with plan.UniqPools object element") + return diags + } + updateReq.UniqPools = uPoolsList + } + tflog.Info(ctx, "utilityUpdateRG: new register_computes specified", map[string]any{ + "rg_id": plan.Id.ValueString(), + "register_computes_plan": plan.RegisterComputes.ValueBool(), + "register_computes_state": state.RegisterComputes.ValueBool()}) + updateNeeded = true + } + var updQuotaNeeded bool var quotaPlan, quotaState models.QuotaModel if !plan.Quota.IsNull() { - diags = plan.Quota.As(ctx, "aPlan, basetypes.ObjectAsOptions{}) + diags = plan.Quota.As(ctx, "aPlan, basetypes.ObjectAsOptions{UnhandledNullAsEmpty: true, UnhandledUnknownAsEmpty: true}) if diags.HasError() { return diags } } if !state.Quota.IsNull() { - diags = state.Quota.As(ctx, "aState, basetypes.ObjectAsOptions{}) + diags = state.Quota.As(ctx, "aState, basetypes.ObjectAsOptions{UnhandledNullAsEmpty: true, UnhandledUnknownAsEmpty: true}) if diags.HasError() { return diags } @@ -424,9 +429,6 @@ func AccessUpdateRG(ctx context.Context, rgId uint64, plan, state *models.Resour RGID: rgId, User: deletedAccessItem.User.ValueString(), } - if !deletedAccessItem.Reason.IsNull() { - revokeReq.Reason = deletedAccessItem.Reason.ValueString() - } tflog.Info(ctx, "AccessUpdateRG: before calling CloudAPI().RG().AccessRevoke", map[string]any{"rg_id": plan.Id.ValueString(), "req": revokeReq}) res, err := c.CloudAPI().RG().AccessRevoke(ctx, revokeReq) @@ -462,9 +464,6 @@ func AccessUpdateRG(ctx context.Context, rgId uint64, plan, state *models.Resour User: addedAccessItem.User.ValueString(), Right: addedAccessItem.Right.ValueString(), } - if !addedAccessItem.Reason.IsNull() { - grantReq.Reason = addedAccessItem.Reason.ValueString() - } tflog.Info(ctx, "AccessUpdateRG: before calling CloudAPI().RG().AccessGrant", map[string]any{"rg_id": plan.Id.ValueString(), "req": grantReq}) res, err := c.CloudAPI().RG().AccessGrant(ctx, grantReq) @@ -506,9 +505,6 @@ func AccessCreateRG(ctx context.Context, rgId uint64, plan *models.ResourceRGMod User: addedAccessItem.User.ValueString(), Right: addedAccessItem.Right.ValueString(), } - if !addedAccessItem.Reason.IsNull() { - grantReq.Reason = addedAccessItem.Reason.ValueString() - } tflog.Info(ctx, "AccessCreateRG: before calling CloudAPI().RG().AccessGrant", map[string]any{ "rg_id": rgId, @@ -568,7 +564,7 @@ func SetDefNetUpdateRG(ctx context.Context, rgId uint64, plan, state *models.Res setDefNetNeeded = true } - } else if !plan.DefNet.IsNull() { + } else { setDefNetNeeded = true } @@ -577,25 +573,36 @@ func SetDefNetUpdateRG(ctx context.Context, rgId uint64, plan, state *models.Res "rg_id": plan.Id.ValueString(), "def_net_plan": plan.DefNet, "def_net_state": state.DefNet}) - setDefNetReq.NetType = itemDefNetPlan.NetType.ValueString() - if itemDefNetPlan.NetId.IsNull() { - setDefNetReq.NetID = 0 // default value + + if plan.DefNet.IsNull() { + removeReq := rg.RemoveDefNetRequest{RGID: uint64(state.RGID.ValueInt64())} + res, err := c.CloudAPI().RG().RemoveDefNet(ctx, removeReq) + if err != nil { + diags.AddError( + "SetDefNetUpdateRG: can not remove defNet for rg", + err.Error()) + return diags + } + tflog.Info(ctx, "utilitySetDefNetUpdateRG: response from CloudAPI().RG().RemoveDefNet", map[string]any{"rg_id": plan.Id.ValueString(), "response": res}) } else { - setDefNetReq.NetID = uint64(itemDefNetPlan.NetId.ValueInt64()) - } - if !itemDefNetPlan.Reason.IsNull() { - setDefNetReq.Reason = itemDefNetPlan.Reason.ValueString() - } + setDefNetReq.NetType = itemDefNetPlan.NetType.ValueString() + if itemDefNetPlan.NetId.IsNull() { + setDefNetReq.NetID = 0 // default value + } else { + setDefNetReq.NetID = uint64(itemDefNetPlan.NetId.ValueInt64()) + } + + tflog.Info(ctx, "utilitySetDefNetUpdateRG: before calling CloudAPI().RG().SetDefNet", map[string]any{"rg_id": plan.Id.ValueString(), "req": setDefNetReq}) + res, err := c.CloudAPI().RG().SetDefNet(ctx, setDefNetReq) + if err != nil { + diags.AddError( + "SetDefNetUpdateRG: can not set defNet for rg", + err.Error()) + return diags + } + tflog.Info(ctx, "utilitySetDefNetUpdateRG: response from CloudAPI().RG().SetDefNet", map[string]any{"rg_id": plan.Id.ValueString(), "response": res}) - tflog.Info(ctx, "utilitySetDefNetUpdateRG: before calling CloudAPI().RG().SetDefNet", map[string]any{"rg_id": plan.Id.ValueString(), "req": setDefNetReq}) - res, err := c.CloudAPI().RG().SetDefNet(ctx, setDefNetReq) - if err != nil { - diags.AddError( - "SetDefNetUpdateRG: can not set defNet for rg", - err.Error()) - return diags } - tflog.Info(ctx, "utilitySetDefNetUpdateRG: response from CloudAPI().RG().SetDefNet", map[string]any{"rg_id": plan.Id.ValueString(), "response": res}) } if !setDefNetNeeded { @@ -637,9 +644,6 @@ func SetDefNetCreateRG(ctx context.Context, rgId uint64, plan *models.ResourceRG } else { setDefNetReq.NetID = uint64(itemDefNetPlan.NetId.ValueInt64()) } - if !itemDefNetPlan.Reason.IsNull() { - setDefNetReq.Reason = itemDefNetPlan.Reason.ValueString() - } tflog.Info(ctx, "SetDefNetCreateRG: before calling CloudAPI().RG().SetDefNet", map[string]any{"rg_id": rgId, "req": setDefNetReq}) res, err := c.CloudAPI().RG().SetDefNet(ctx, setDefNetReq) diff --git a/internal/service/cloudapi/vins/flattens/flatten_data_source_vins.go b/internal/service/cloudapi/vins/flattens/flatten_data_source_vins.go index 3ab6676..6ba07f8 100644 --- a/internal/service/cloudapi/vins/flattens/flatten_data_source_vins.go +++ b/internal/service/cloudapi/vins/flattens/flatten_data_source_vins.go @@ -189,24 +189,27 @@ func flattenInterfaces(ctx context.Context, items *vins.ListVNFInterfaces) types tempSlice := make([]types.Object, 0, len(*items)) for _, item := range *items { temp := models.VNFInterfaceModel{ - ConnID: types.Int64Value(int64(item.ConnID)), - ConnType: types.StringValue(item.ConnType), - DefGW: types.StringValue(item.DefGW), - Enabled: types.BoolValue(item.Enabled), - FLIPGroupID: types.Int64Value(int64(item.FLIPGroupID)), - GUID: types.StringValue(item.GUID), - IPAddress: types.StringValue(item.IPAddress), - ListenSSH: types.BoolValue(item.ListenSSH), - MAC: types.StringValue(item.MAC), - Name: types.StringValue(item.Name), - NetID: types.Int64Value(int64(item.NetID)), - NetMask: types.Int64Value(int64(item.NetMask)), - NetType: types.StringValue(item.NetType), - NodeID: types.Int64Value(int64(item.NodeID)), - PCISlot: types.Int64Value(int64(item.PCISlot)), - QOS: flattenQOS(ctx, &item.QOS), - Target: types.StringValue(item.Target), - Type: types.StringValue(item.Type), + BusNumber: types.Int64Value(int64(item.BusNumber)), + ConnID: types.Int64Value(int64(item.ConnID)), + ConnType: types.StringValue(item.ConnType), + DefGW: types.StringValue(item.DefGW), + Enabled: types.BoolValue(item.Enabled), + FLIPGroupID: types.Int64Value(int64(item.FLIPGroupID)), + GUID: types.StringValue(item.GUID), + IPAddress: types.StringValue(item.IPAddress), + ListenSSH: types.BoolValue(item.ListenSSH), + MAC: types.StringValue(item.MAC), + MTU: types.Int64Value(int64(item.MTU)), + Libvirtsettings: flattenLibvirtSettings(ctx, &item.LibvirtSettings), + Name: types.StringValue(item.Name), + NetID: types.Int64Value(int64(item.NetID)), + NetMask: types.Int64Value(int64(item.NetMask)), + NetType: types.StringValue(item.NetType), + NodeID: types.Int64Value(int64(item.NodeID)), + PCISlot: types.Int64Value(int64(item.PCISlot)), + QOS: flattenQOS(ctx, &item.QOS), + Target: types.StringValue(item.Target), + Type: types.StringValue(item.Type), } temp.VNFs, diags = types.ListValueFrom(ctx, types.Int64Type, item.VNFs) @@ -230,6 +233,28 @@ func flattenInterfaces(ctx context.Context, items *vins.ListVNFInterfaces) types return res } +func flattenLibvirtSettings(ctx context.Context, settings *vins.LibvirtSettings) types.Object { + tflog.Info(ctx, "Start flattenLibvirtSettings") + + temp := models.LibvirtSettingsModel{ + TXMode: types.StringValue(settings.TXMode), + IOEventFD: types.StringValue(settings.IOEventFD), + EventIDx: types.StringValue(settings.EventIDx), + Queues: types.Int64Value(int64(settings.Queues)), + RXQueueSize: types.Int64Value(int64(settings.RXQueueSize)), + TXQueueSize: types.Int64Value(int64(settings.TXQueueSize)), + GUID: types.StringValue(settings.GUID), + } + + res, err := types.ObjectValueFrom(ctx, models.LibvirtSettings, temp) + if err != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenLibvirtSettings struct to obj", err)) + } + + tflog.Info(ctx, "End flattenLibvirtSettings") + return res +} + // flattenQOS flattens QOS. // Flatten errors are added to tflog. func flattenQOS(ctx context.Context, qos *vins.QOS) types.Object { diff --git a/internal/service/cloudapi/vins/models/model_data_source_vins.go b/internal/service/cloudapi/vins/models/model_data_source_vins.go index 23aacad..2daed16 100644 --- a/internal/service/cloudapi/vins/models/model_data_source_vins.go +++ b/internal/service/cloudapi/vins/models/model_data_source_vins.go @@ -89,25 +89,48 @@ type RecordResourcesModel struct { } type VNFInterfaceModel struct { - ConnID types.Int64 `tfsdk:"conn_id"` - ConnType types.String `tfsdk:"conn_type"` - DefGW types.String `tfsdk:"def_gw"` - Enabled types.Bool `tfsdk:"enabled"` - FLIPGroupID types.Int64 `tfsdk:"flipgroup_id"` + BusNumber types.Int64 `tfsdk:"bus_number"` + ConnID types.Int64 `tfsdk:"conn_id"` + ConnType types.String `tfsdk:"conn_type"` + DefGW types.String `tfsdk:"def_gw"` + Enabled types.Bool `tfsdk:"enabled"` + FLIPGroupID types.Int64 `tfsdk:"flipgroup_id"` + GUID types.String `tfsdk:"guid"` + IPAddress types.String `tfsdk:"ip_address"` + Libvirtsettings types.Object `tfsdk:"libvirt_settings"` + ListenSSH types.Bool `tfsdk:"listen_ssh"` + MAC types.String `tfsdk:"mac"` + MTU types.Int64 `tfsdk:"mtu"` + Name types.String `tfsdk:"name"` + NetID types.Int64 `tfsdk:"net_id"` + NetMask types.Int64 `tfsdk:"net_mask"` + NetType types.String `tfsdk:"net_type"` + NodeID types.Int64 `tfsdk:"node_id"` + PCISlot types.Int64 `tfsdk:"pci_slot"` + QOS types.Object `tfsdk:"qos"` + Target types.String `tfsdk:"target"` + Type types.String `tfsdk:"type"` + VNFs types.List `tfsdk:"vnfs"` +} + +type LibvirtSettingsModel struct { + TXMode types.String `tfsdk:"txmode"` + IOEventFD types.String `tfsdk:"ioeventfd"` + EventIDx types.String `tfsdk:"event_idx"` + Queues types.Int64 `tfsdk:"queues"` + RXQueueSize types.Int64 `tfsdk:"rx_queue_size"` + TXQueueSize types.Int64 `tfsdk:"tx_queue_size"` GUID types.String `tfsdk:"guid"` - IPAddress types.String `tfsdk:"ip_address"` - ListenSSH types.Bool `tfsdk:"listen_ssh"` - MAC types.String `tfsdk:"mac"` - Name types.String `tfsdk:"name"` - NetID types.Int64 `tfsdk:"net_id"` - NetMask types.Int64 `tfsdk:"net_mask"` - NetType types.String `tfsdk:"net_type"` - NodeID types.Int64 `tfsdk:"node_id"` - PCISlot types.Int64 `tfsdk:"pci_slot"` - QOS types.Object `tfsdk:"qos"` - Target types.String `tfsdk:"target"` - Type types.String `tfsdk:"type"` - VNFs types.List `tfsdk:"vnfs"` +} + +var LibvirtSettings = map[string]attr.Type{ + "txmode": types.StringType, + "ioeventfd": types.StringType, + "event_idx": types.StringType, + "queues": types.Int64Type, + "rx_queue_size": types.Int64Type, + "tx_queue_size": types.Int64Type, + "guid": types.StringType, } type QOSModel struct { @@ -278,25 +301,28 @@ var ItemVNFDev = map[string]attr.Type{ } var ItemInterface = map[string]attr.Type{ - "conn_id": types.Int64Type, - "conn_type": types.StringType, - "def_gw": types.StringType, - "enabled": types.BoolType, - "flipgroup_id": types.Int64Type, - "guid": types.StringType, - "ip_address": types.StringType, - "listen_ssh": types.BoolType, - "mac": types.StringType, - "name": types.StringType, - "net_id": types.Int64Type, - "net_mask": types.Int64Type, - "net_type": types.StringType, - "node_id": types.Int64Type, - "pci_slot": types.Int64Type, - "qos": types.ObjectType{AttrTypes: ItemQOS}, - "target": types.StringType, - "type": types.StringType, - "vnfs": types.ListType{ElemType: types.Int64Type}, + "bus_number": types.Int64Type, + "conn_id": types.Int64Type, + "conn_type": types.StringType, + "def_gw": types.StringType, + "enabled": types.BoolType, + "flipgroup_id": types.Int64Type, + "guid": types.StringType, + "ip_address": types.StringType, + "libvirt_settings": types.ObjectType{AttrTypes: LibvirtSettings}, + "listen_ssh": types.BoolType, + "mac": types.StringType, + "mtu": types.Int64Type, + "name": types.StringType, + "net_id": types.Int64Type, + "net_mask": types.Int64Type, + "net_type": types.StringType, + "node_id": types.Int64Type, + "pci_slot": types.Int64Type, + "qos": types.ObjectType{AttrTypes: ItemQOS}, + "target": types.StringType, + "type": types.StringType, + "vnfs": types.ListType{ElemType: types.Int64Type}, } var ItemQOS = map[string]attr.Type{ diff --git a/internal/service/cloudapi/vins/models/model_data_source_vins_list.go b/internal/service/cloudapi/vins/models/model_data_source_vins_list.go index c5e4789..7daab86 100644 --- a/internal/service/cloudapi/vins/models/model_data_source_vins_list.go +++ b/internal/service/cloudapi/vins/models/model_data_source_vins_list.go @@ -12,6 +12,7 @@ type DataSourceVINSListModel struct { AccountID types.Int64 `tfsdk:"account_id"` RGID types.Int64 `tfsdk:"rg_id"` ExtIP types.String `tfsdk:"ext_ip"` + VNFDevID types.Int64 `tfsdk:"vnf_dev_id"` IncludeDeleted types.Bool `tfsdk:"include_deleted"` Page types.Int64 `tfsdk:"page"` SortBy types.String `tfsdk:"sort_by"` diff --git a/internal/service/cloudapi/vins/schemas/schema_data_source_vins.go b/internal/service/cloudapi/vins/schemas/schema_data_source_vins.go index f55f836..e0cb6d0 100644 --- a/internal/service/cloudapi/vins/schemas/schema_data_source_vins.go +++ b/internal/service/cloudapi/vins/schemas/schema_data_source_vins.go @@ -91,6 +91,9 @@ func MakeSchemaDataSourceVINS() map[string]schema.Attribute { Computed: true, NestedObject: schema.NestedAttributeObject{ Attributes: map[string]schema.Attribute{ + "bus_number": schema.Int64Attribute{ + Computed: true, + }, "conn_id": schema.Int64Attribute{ Computed: true, }, @@ -112,12 +115,41 @@ func MakeSchemaDataSourceVINS() map[string]schema.Attribute { "ip_address": schema.StringAttribute{ Computed: true, }, + "libvirt_settings": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "txmode": schema.StringAttribute{ + Computed: true, + }, + "ioeventfd": schema.StringAttribute{ + Computed: true, + }, + "event_idx": schema.StringAttribute{ + Computed: true, + }, + "queues": schema.Int64Attribute{ + Computed: true, + }, + "rx_queue_size": schema.Int64Attribute{ + Computed: true, + }, + "tx_queue_size": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + }, + }, "listen_ssh": schema.BoolAttribute{ Computed: true, }, "mac": schema.StringAttribute{ Computed: true, }, + "mtu": schema.Int64Attribute{ + Computed: true, + }, "name": schema.StringAttribute{ Computed: true, }, diff --git a/internal/service/cloudapi/vins/schemas/schema_data_source_vins_list.go b/internal/service/cloudapi/vins/schemas/schema_data_source_vins_list.go index 98c3d61..fb693bd 100644 --- a/internal/service/cloudapi/vins/schemas/schema_data_source_vins_list.go +++ b/internal/service/cloudapi/vins/schemas/schema_data_source_vins_list.go @@ -27,6 +27,10 @@ func MakeSchemaDataSourceVINSList() map[string]schema.Attribute { Optional: true, Description: "Filter by external IP address", }, + "vnf_dev_id": schema.Int64Attribute{ + Optional: true, + Description: "Filter by VNF Device id", + }, "include_deleted": schema.BoolAttribute{ Optional: true, Description: "Include deleted computes", diff --git a/internal/service/cloudapi/vins/schemas/schema_resource_vins.go b/internal/service/cloudapi/vins/schemas/schema_resource_vins.go index 2c3965d..eee3d66 100644 --- a/internal/service/cloudapi/vins/schemas/schema_resource_vins.go +++ b/internal/service/cloudapi/vins/schemas/schema_resource_vins.go @@ -258,6 +258,9 @@ func MakeSchemaResourceVINS() map[string]schema.Attribute { Computed: true, NestedObject: schema.NestedAttributeObject{ Attributes: map[string]schema.Attribute{ + "bus_number": schema.Int64Attribute{ + Computed: true, + }, "conn_id": schema.Int64Attribute{ Computed: true, }, @@ -279,12 +282,41 @@ func MakeSchemaResourceVINS() map[string]schema.Attribute { "ip_address": schema.StringAttribute{ Computed: true, }, + "libvirt_settings": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "txmode": schema.StringAttribute{ + Computed: true, + }, + "ioeventfd": schema.StringAttribute{ + Computed: true, + }, + "event_idx": schema.StringAttribute{ + Computed: true, + }, + "queues": schema.Int64Attribute{ + Computed: true, + }, + "rx_queue_size": schema.Int64Attribute{ + Computed: true, + }, + "tx_queue_size": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + }, + }, "listen_ssh": schema.BoolAttribute{ Computed: true, }, "mac": schema.StringAttribute{ Computed: true, }, + "mtu": schema.Int64Attribute{ + Computed: true, + }, "name": schema.StringAttribute{ Computed: true, }, diff --git a/internal/service/cloudapi/vins/utilities/utility_data_source_vins_list.go b/internal/service/cloudapi/vins/utilities/utility_data_source_vins_list.go index d8a94f4..86dfe66 100644 --- a/internal/service/cloudapi/vins/utilities/utility_data_source_vins_list.go +++ b/internal/service/cloudapi/vins/utilities/utility_data_source_vins_list.go @@ -31,6 +31,9 @@ func VINSListDataSourceCheckPresence(ctx context.Context, plan *models.DataSourc if !plan.ExtIP.IsNull() { listReq.ExtIP = plan.ExtIP.ValueString() } + if !plan.VNFDevID.IsNull() { + listReq.VNFDevId = uint64(plan.VNFDevID.ValueInt64()) + } if !plan.IncludeDeleted.IsNull() { listReq.IncludeDeleted = plan.IncludeDeleted.ValueBool() } diff --git a/internal/service/cloudbroker/account/models/model_resource_cb_account.go b/internal/service/cloudbroker/account/models/model_resource_cb_account.go index f2e0812..e6622cc 100644 --- a/internal/service/cloudbroker/account/models/model_resource_cb_account.go +++ b/internal/service/cloudbroker/account/models/model_resource_cb_account.go @@ -49,9 +49,8 @@ type ResourceAccountModel struct { } type UsersModel struct { - UserID types.String `tfsdk:"user_id"` - AccessType types.String `tfsdk:"access_type"` - RecursiveDelete types.Bool `tfsdk:"recursive_delete"` + UserID types.String `tfsdk:"user_id"` + AccessType types.String `tfsdk:"access_type"` } type ResourceLimitsInAccountResourceModel struct { diff --git a/internal/service/cloudbroker/account/schemas/schema_resource_cb_account.go b/internal/service/cloudbroker/account/schemas/schema_resource_cb_account.go index 793a60f..1588bc5 100644 --- a/internal/service/cloudbroker/account/schemas/schema_resource_cb_account.go +++ b/internal/service/cloudbroker/account/schemas/schema_resource_cb_account.go @@ -46,10 +46,6 @@ func MakeSchemaResourceAccount() map[string]schema.Attribute { "access_type": schema.StringAttribute{ Required: true, }, - "recursive_delete": schema.BoolAttribute{ - Optional: true, - // default is false - }, }, }, }, diff --git a/internal/service/cloudbroker/account/utilities/utility_resource_cb_account.go b/internal/service/cloudbroker/account/utilities/utility_resource_cb_account.go index 883be06..7b27694 100644 --- a/internal/service/cloudbroker/account/utilities/utility_resource_cb_account.go +++ b/internal/service/cloudbroker/account/utilities/utility_resource_cb_account.go @@ -451,9 +451,8 @@ func AddDeleteUsersAccount(ctx context.Context, accountId uint64, plan, state *m for _, user := range deletedUsers { delUserReq := account.DeleteUserRequest{ - AccountID: accountId, - UserName: user.UserID.ValueString(), - RecursiveDelete: user.RecursiveDelete.ValueBool(), // default false + AccountID: accountId, + UserName: user.UserID.ValueString(), } tflog.Info(ctx, "AddDeleteUsersAccount: before calling CloudBroker().Account().DeleteUser", map[string]any{"account_id": accountId, "req": delUserReq}) diff --git a/internal/service/cloudbroker/audit/flattens/flatten_data_source_cb_audit.go b/internal/service/cloudbroker/audit/flattens/flatten_data_source_cb_audit.go index c886dd1..8f55921 100644 --- a/internal/service/cloudbroker/audit/flattens/flatten_data_source_cb_audit.go +++ b/internal/service/cloudbroker/audit/flattens/flatten_data_source_cb_audit.go @@ -29,7 +29,6 @@ func AuditDataSource(ctx context.Context, state *models.DataSourceAudit, c *deco AuditGuid: state.AuditGuid, Timeouts: state.Timeouts, - Apitask: types.StringValue(recordAudit.Apitask), Arguments: types.StringValue(recordAudit.Arguments), Call: types.StringValue(recordAudit.Call), GUID: types.StringValue(recordAudit.GUID), diff --git a/internal/service/cloudbroker/audit/models/model_data_source_cb_audit.go b/internal/service/cloudbroker/audit/models/model_data_source_cb_audit.go index 769e979..7466085 100644 --- a/internal/service/cloudbroker/audit/models/model_data_source_cb_audit.go +++ b/internal/service/cloudbroker/audit/models/model_data_source_cb_audit.go @@ -11,7 +11,6 @@ type DataSourceAudit struct { Timeouts timeouts.Value `tfsdk:"timeouts"` //response field - Apitask types.String `tfsdk:"apitask"` Arguments types.String `tfsdk:"args"` Call types.String `tfsdk:"call"` GUID types.String `tfsdk:"guid"` diff --git a/internal/service/cloudbroker/audit/schemas/schema_data_source_cb_audit.go b/internal/service/cloudbroker/audit/schemas/schema_data_source_cb_audit.go index 2a400ec..72686ec 100644 --- a/internal/service/cloudbroker/audit/schemas/schema_data_source_cb_audit.go +++ b/internal/service/cloudbroker/audit/schemas/schema_data_source_cb_audit.go @@ -9,9 +9,6 @@ func MakeSchemaDataSourceAudit() map[string]schema.Attribute { "audit_guid": schema.StringAttribute{ Required: true, }, - "apitask": schema.StringAttribute{ - Computed: true, - }, "args": schema.StringAttribute{ Computed: true, }, diff --git a/internal/service/cloudbroker/disks/data_source_cb_disk.go b/internal/service/cloudbroker/disks/data_source_cb_disk.go new file mode 100644 index 0000000..d65de39 --- /dev/null +++ b/internal/service/cloudbroker/disks/data_source_cb_disk.go @@ -0,0 +1,91 @@ +package cbDisks + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceDisk{} +) + +func NewDataSourceDisk() datasource.DataSource { + return &dataSourceDisk{} +} + +// dataSourceDisk is the data source implementation. +type dataSourceDisk struct { + client *decort.DecortClient +} + +func (d *dataSourceDisk) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DiskModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDisk: Error get state") + return + } + diskId := uint64(state.DiskID.ValueInt64()) + tflog.Info(ctx, "Read dataSourceDisk: got state successfully", map[string]any{"disk_id": diskId}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDisk: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceDisk: set timeouts successfully", map[string]any{ + "disk_id": diskId, + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.DiskDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDisk: Error flatten data source disk") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDisk: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceDisk", map[string]any{"disk_id": diskId}) +} + +func (d *dataSourceDisk) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceDisk(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceDisk) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_disk" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceDisk) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceDisk") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceDisk successfully") +} diff --git a/internal/service/cloudbroker/disks/data_source_cb_disk_list.go b/internal/service/cloudbroker/disks/data_source_cb_disk_list.go new file mode 100644 index 0000000..e7d16a8 --- /dev/null +++ b/internal/service/cloudbroker/disks/data_source_cb_disk_list.go @@ -0,0 +1,88 @@ +package cbDisks + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceDiskList{} +) + +func NewdataSourceDiskList() datasource.DataSource { + return &dataSourceDiskList{} +} + +// dataSourceDisk is the data source implementation. +type dataSourceDiskList struct { + client *decort.DecortClient +} + +func (d *dataSourceDiskList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DisksModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskList: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceDiskList: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskList: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceDiskList: set timeouts successfully", map[string]any{"readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.DiskListDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskList: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskList: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceDiskList") +} + +func (d *dataSourceDiskList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceDiskList(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceDiskList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_disk_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceDiskList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceDiskList") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceDiskList successfully") +} diff --git a/internal/service/cloudbroker/disks/data_source_cb_disk_list_deleted.go b/internal/service/cloudbroker/disks/data_source_cb_disk_list_deleted.go new file mode 100644 index 0000000..7e9c3b7 --- /dev/null +++ b/internal/service/cloudbroker/disks/data_source_cb_disk_list_deleted.go @@ -0,0 +1,88 @@ +package cbDisks + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceDiskListDeleted{} +) + +func NewDataSourceDiskListDeleted() datasource.DataSource { + return &dataSourceDiskListDeleted{} +} + +// dataSourceDisk is the data source implementation. +type dataSourceDiskListDeleted struct { + client *decort.DecortClient +} + +func (d *dataSourceDiskListDeleted) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.ListDisksModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskListDeleted: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceDiskListDeleted: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskListDeleted: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceDiskListDeleted: set timeouts successfully", map[string]any{"readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.DiskListDeletedDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskListDeleted: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskListDeleted: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceDiskListDeleted") +} + +func (d *dataSourceDiskListDeleted) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceDiskListDeleted(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceDiskListDeleted) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_disk_list_deleted" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceDiskListDeleted) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceDiskListDeleted") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceDiskListDeleted successfully") +} diff --git a/internal/service/cloudbroker/disks/data_source_cb_disk_list_types.go b/internal/service/cloudbroker/disks/data_source_cb_disk_list_types.go new file mode 100644 index 0000000..ae30f4b --- /dev/null +++ b/internal/service/cloudbroker/disks/data_source_cb_disk_list_types.go @@ -0,0 +1,88 @@ +package cbDisks + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceDiskListTypes{} +) + +func NewDataSourceDiskListTypes() datasource.DataSource { + return &dataSourceDiskListTypes{} +} + +// dataSourceDiskListTypes is the data source implementation. +type dataSourceDiskListTypes struct { + client *decort.DecortClient +} + +func (d *dataSourceDiskListTypes) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceDiskListTypesModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskListTypes: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceDiskListTypes: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskListTypes: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceDiskListTypes: set timeouts successfully", map[string]any{"readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.DiskListTypesDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskListTypes: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskListTypes: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceDiskListTypes") +} + +func (d *dataSourceDiskListTypes) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceDiskListTypes(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceDiskListTypes) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_disk_list_types" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceDiskListTypes) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceDiskListTypes") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceDiskListTypes successfully") +} diff --git a/internal/service/cloudbroker/disks/data_source_cb_disk_list_types_detailed.go b/internal/service/cloudbroker/disks/data_source_cb_disk_list_types_detailed.go new file mode 100644 index 0000000..46df7cb --- /dev/null +++ b/internal/service/cloudbroker/disks/data_source_cb_disk_list_types_detailed.go @@ -0,0 +1,88 @@ +package cbDisks + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceDiskListTypesDetailed{} +) + +func NewDataSourceDiskListTypesDetailed() datasource.DataSource { + return &dataSourceDiskListTypesDetailed{} +} + +// dataSourceDiskListTypesDetailed is the data source implementation. +type dataSourceDiskListTypesDetailed struct { + client *decort.DecortClient +} + +func (d *dataSourceDiskListTypesDetailed) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceDiskListTypesDetailedModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskListTypesDetailed: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceDiskListTypesDetailed: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskListTypesDetailed: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceDiskListTypesDetailed: set timeouts successfully", map[string]any{"readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.DiskListTypesDetailedDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskListTypesDetailed: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskListTypesDetailed: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceDiskListTypesDetailed") +} + +func (d *dataSourceDiskListTypesDetailed) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceDiskListTypesDetailed(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceDiskListTypesDetailed) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_disk_list_types_detailed" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceDiskListTypesDetailed) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceDiskListTypesDetailed") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceDiskListTypesDetailed successfully") +} diff --git a/internal/service/cloudbroker/disks/data_source_cb_disk_list_unattached.go b/internal/service/cloudbroker/disks/data_source_cb_disk_list_unattached.go new file mode 100644 index 0000000..510c12e --- /dev/null +++ b/internal/service/cloudbroker/disks/data_source_cb_disk_list_unattached.go @@ -0,0 +1,88 @@ +package cbDisks + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceDiskListUnattached{} +) + +func NewDataSourceDiskListUnattached() datasource.DataSource { + return &dataSourceDiskListUnattached{} +} + +// dataSourceDiskListUnattached is the data source implementation. +type dataSourceDiskListUnattached struct { + client *decort.DecortClient +} + +func (d *dataSourceDiskListUnattached) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceDiskListUnattachedModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskListUnattached: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceDiskListUnattached: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskListUnattached: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceDiskListUnattached: set timeouts successfully", map[string]any{"readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.DiskListUnattachedDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskListUnattached: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskListUnattached: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceDiskListUnattached") +} + +func (d *dataSourceDiskListUnattached) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceDiskListUnattached(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceDiskListUnattached) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_disk_list_unattached" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceDiskListUnattached) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceDiskListUnattached") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceDiskListUnattached successfully") +} diff --git a/internal/service/cloudbroker/disks/data_source_cb_disk_replication.go b/internal/service/cloudbroker/disks/data_source_cb_disk_replication.go new file mode 100644 index 0000000..624fb07 --- /dev/null +++ b/internal/service/cloudbroker/disks/data_source_cb_disk_replication.go @@ -0,0 +1,91 @@ +package cbDisks + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceDiskReplication{} +) + +func NewDataSourceDiskReplication() datasource.DataSource { + return &dataSourceDiskReplication{} +} + +// dataSourceDisk is the data source implementation. +type dataSourceDiskReplication struct { + client *decort.DecortClient +} + +func (d *dataSourceDiskReplication) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.RecordDiskModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskReplication: Error get state") + return + } + diskId := uint64(state.DiskId.ValueInt64()) + tflog.Info(ctx, "Read dataSourceDiskReplication: got state successfully", map[string]any{"disk_id": diskId}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskReplication: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceDiskReplication: set timeouts successfully", map[string]any{ + "disk_id": diskId, + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.DiskReplicationDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskReplication: Error flatten data source disk") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskReplication: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceDiskReplication", map[string]any{"disk_id": diskId}) +} + +func (d *dataSourceDiskReplication) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceDiskReplication(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceDiskReplication) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_disk_replication" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceDiskReplication) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceDisk") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceDisk successfully") +} diff --git a/internal/service/cloudbroker/disks/data_source_cb_disk_snapshot.go b/internal/service/cloudbroker/disks/data_source_cb_disk_snapshot.go new file mode 100644 index 0000000..0d0ffff --- /dev/null +++ b/internal/service/cloudbroker/disks/data_source_cb_disk_snapshot.go @@ -0,0 +1,96 @@ +package cbDisks + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceDiskSnapshot{} +) + +func NewDataSourceDiskSnapshot() datasource.DataSource { + return &dataSourceDiskSnapshot{} +} + +// dataSourceDiskSnapshotList is the data source implementation. +type dataSourceDiskSnapshot struct { + client *decort.DecortClient +} + +func (d *dataSourceDiskSnapshot) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceDiskSnapshotModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskSnapshot: Error get state") + return + } + + ctxSnpsht := map[string]any{ + "disk_id": uint64(state.DiskID.ValueInt64()), + "label": state.Label.ValueString(), + } + tflog.Info(ctx, "Read dataSourceDiskSnapshot: got state successfully", ctxSnpsht) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskSnapshot: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceDiskSnapshot: set timeouts successfully", map[string]any{ + "disk_id": uint64(state.DiskID.ValueInt64()), + "label": state.Label.ValueString(), + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.DiskSnapshotDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskSnapshot: Error flatten data source") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskSnapshot: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceDiskSnapshot", ctxSnpsht) +} + +func (d *dataSourceDiskSnapshot) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceDiskSnapshot(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceDiskSnapshot) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_disk_snapshot" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceDiskSnapshot) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceDiskSnapshot") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceDiskSnapshot successfully") +} diff --git a/internal/service/cloudbroker/disks/data_source_cb_disk_snapshot_list.go b/internal/service/cloudbroker/disks/data_source_cb_disk_snapshot_list.go new file mode 100644 index 0000000..65031a5 --- /dev/null +++ b/internal/service/cloudbroker/disks/data_source_cb_disk_snapshot_list.go @@ -0,0 +1,91 @@ +package cbDisks + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceDiskSnapshotList{} +) + +func NewDataSourceDiskSnapshotList() datasource.DataSource { + return &dataSourceDiskSnapshotList{} +} + +// dataSourceDiskSnapshotList is the data source implementation. +type dataSourceDiskSnapshotList struct { + client *decort.DecortClient +} + +func (d *dataSourceDiskSnapshotList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceDiskSnapshotListModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskSnapshotList: Error get state") + return + } + diskId := uint64(state.DiskID.ValueInt64()) + tflog.Info(ctx, "Read dataSourceDiskSnapshotList: got state successfully", map[string]any{"disk_id": diskId}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskSnapshotList: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceDiskSnapshotList: set timeouts successfully", map[string]any{ + "disk_id": diskId, + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.DiskSnapshotListDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskSnapshotList: Error flatten data source") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskSnapshotList: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceDiskSnapshotList", map[string]any{"disk_id": diskId}) +} + +func (d *dataSourceDiskSnapshotList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceDiskSnapshotList(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceDiskSnapshotList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_disk_snapshot_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceDiskSnapshotList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceDiskSnapshotList") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceDiskSnapshotList successfully") +} diff --git a/internal/service/cloudbroker/disks/flattens/flatten_data_source_cb_disk.go b/internal/service/cloudbroker/disks/flattens/flatten_data_source_cb_disk.go new file mode 100644 index 0000000..5faa22e --- /dev/null +++ b/internal/service/cloudbroker/disks/flattens/flatten_data_source_cb_disk.go @@ -0,0 +1,119 @@ +package flattens + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities" +) + +// DiskDataSource flattens data source for disk list. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func DiskDataSource(ctx context.Context, state *models.DiskModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.DiskDataSource") + + diags := diag.Diagnostics{} + + recordDisk, diags := utilities.DataSourceDiskCheckPresence(ctx, state, c) + if diags.HasError() { + return diags + } + + tflog.Info(ctx, "flattens.DiskDataSource: before flatten") + + diskAcl, _ := json.Marshal(recordDisk.ACL) + + *state = models.DiskModel{ + DiskID: state.DiskID, + Timeouts: state.Timeouts, + AccountID: types.Int64Value(int64(recordDisk.AccountID)), + AccountName: types.StringValue(recordDisk.AccountName), + SEPType: types.StringValue(recordDisk.SEPType), + ACL: types.StringValue(string(diskAcl)), + BootPartition: types.Int64Value(int64(recordDisk.BootPartition)), + Computes: flattenComputeList(ctx, recordDisk.Computes), + CreatedTime: types.Int64Value(int64(recordDisk.CreatedTime)), + DeletedTime: types.Int64Value(int64(recordDisk.DeletedTime)), + Description: types.StringValue(recordDisk.Description), + DestructionTime: types.Int64Value(int64(recordDisk.DestructionTime)), + DiskPath: types.StringValue(recordDisk.DiskPath), + DeviceName: types.StringValue(recordDisk.DeviceName), + GID: types.Int64Value(int64(recordDisk.GID)), + GUID: types.Int64Value(int64(recordDisk.GUID)), + ImageID: types.Int64Value(int64(recordDisk.ImageID)), + IQN: types.StringValue(recordDisk.IQN), + Login: types.StringValue(recordDisk.Login), + Milestones: types.Int64Value(int64(recordDisk.Milestones)), + Name: types.StringValue(recordDisk.Name), + Order: types.Int64Value(int64(recordDisk.Order)), + Params: types.StringValue(recordDisk.Params), + ParentID: types.Int64Value(int64(recordDisk.ParentID)), + Password: types.StringValue(recordDisk.Password), + PCISlot: types.Int64Value(int64(recordDisk.PCISlot)), + Pool: types.StringValue(recordDisk.Pool), + PurgeTime: types.Int64Value(int64(recordDisk.PurgeTime)), + PurgeAttempts: types.Int64Value(int64(recordDisk.PurgeAttempts)), + RealityDeviceNumber: types.Int64Value(int64(recordDisk.RealityDeviceNumber)), + ReferenceID: types.StringValue(recordDisk.ReferenceID), + ResID: types.StringValue(recordDisk.ResID), + ResName: types.StringValue(recordDisk.ResName), + Role: types.StringValue(recordDisk.Role), + SEPID: types.Int64Value(int64(recordDisk.SEPID)), + Shareable: types.BoolValue(recordDisk.Shareable), + SizeMax: types.Int64Value(int64(recordDisk.SizeMax)), + SizeUsed: types.Float64Value(recordDisk.SizeUsed), + Snapshots: flattenSnapshotList(ctx, recordDisk.Snapshots), + Status: types.StringValue(recordDisk.Status), + TechStatus: types.StringValue(recordDisk.TechStatus), + Type: types.StringValue(recordDisk.Type), + VMID: types.Int64Value(int64(recordDisk.VMID)), + } + + state.Images, diags = types.ListValueFrom(ctx, types.StringType, recordDisk.Images) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("flattens.DiskDataSource: cannot flatten recordDisk.Images to state.Images", diags)) + } + state.PresentTo, diags = types.ListValueFrom(ctx, types.Int64Type, recordDisk.PresentTo) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("flattens.DiskDataSource: cannot flatten recordDisk.PresentTo to state.PresentTo", diags)) + } + + iotune := models.IOModel{ + ReadBytesSec: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSec)), + ReadBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSecMax)), + ReadIOPSSec: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSec)), + ReadIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSecMax)), + SizeIOPSSec: types.Int64Value(int64(recordDisk.IOTune.SizeIOPSSec)), + TotalBytesSec: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSec)), + TotalBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSecMax)), + TotalIOPSSec: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSec)), + TotalIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSecMax)), + WriteBytesSec: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSec)), + WriteBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSecMax)), + WriteIOPSSec: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSec)), + WriteIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSecMax)), + } + + state.IOTune = &iotune + + repl := models.ItemReplModel{ + DiskID: types.Int64Value(int64(recordDisk.Replication.DiskID)), + PoolID: types.StringValue(recordDisk.Replication.PoolID), + Role: types.StringValue(recordDisk.Replication.Role), + SelfVolumeID: types.StringValue(recordDisk.Replication.SelfVolumeID), + StorageID: types.StringValue(recordDisk.Replication.StorageID), + VolumeID: types.StringValue(recordDisk.Replication.VolumeID), + } + + state.Replication = &repl + + tflog.Info(ctx, "flattens.DiskDataSource: end flatten") + return nil +} diff --git a/internal/service/cloudbroker/disks/flattens/flatten_data_source_cb_disk_list.go b/internal/service/cloudbroker/disks/flattens/flatten_data_source_cb_disk_list.go new file mode 100644 index 0000000..6466247 --- /dev/null +++ b/internal/service/cloudbroker/disks/flattens/flatten_data_source_cb_disk_list.go @@ -0,0 +1,180 @@ +package flattens + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities" +) + +// DiskListDataSource flattens data source for disk list. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func DiskListDataSource(ctx context.Context, state *models.DisksModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.DiskListDataSource") + + diags := diag.Diagnostics{} + + diskList, diags := utilities.DataSourceDiskListCheckPresence(ctx, state, c) + if diags.HasError() { + return diags + } + + tflog.Info(ctx, "flattens.DiskListDataSource: before flatten") + + *state = models.DisksModel{ + ByID: state.ByID, + Name: state.Name, + AccountName: state.AccountName, + DiskMaxSize: state.DiskMaxSize, + Shared: state.Shared, + AccountID: state.AccountID, + Type: state.Type, + SortBy: state.SortBy, + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + + // computed fields + EntryCount: types.Int64Value(int64(diskList.EntryCount)), + } + + items := make([]models.DiskItemModel, 0, diskList.EntryCount) + for _, recordDisk := range diskList.Data { + diskAcl, _ := json.Marshal(recordDisk.ACL) + d := models.DiskItemModel{ + MachineID: types.Int64Value(int64(recordDisk.MachineID)), + MachineName: types.StringValue(recordDisk.MachineName), + AccountID: types.Int64Value(int64(recordDisk.AccountID)), + AccountName: types.StringValue(recordDisk.AccountName), + SEPType: types.StringValue(recordDisk.SEPType), + ACL: types.StringValue(string(diskAcl)), + BootPartition: types.Int64Value(int64(recordDisk.BootPartition)), + Computes: flattenComputeList(ctx, recordDisk.Computes), + CreatedTime: types.Int64Value(int64(recordDisk.CreatedTime)), + DeletedTime: types.Int64Value(int64(recordDisk.DeletedTime)), + Description: types.StringValue(recordDisk.Description), + DestructionTime: types.Int64Value(int64(recordDisk.DestructionTime)), + DiskPath: types.StringValue(recordDisk.DiskPath), + DeviceName: types.StringValue(recordDisk.DeviceName), + GID: types.Int64Value(int64(recordDisk.GID)), + GUID: types.Int64Value(int64(recordDisk.GUID)), + ID: types.Int64Value(int64(recordDisk.ID)), + ImageID: types.Int64Value(int64(recordDisk.ImageID)), + IQN: types.StringValue(recordDisk.IQN), + Login: types.StringValue(recordDisk.Login), + Milestones: types.Int64Value(int64(recordDisk.Milestones)), + Name: types.StringValue(recordDisk.Name), + Order: types.Int64Value(int64(recordDisk.Order)), + Params: types.StringValue(recordDisk.Params), + ParentID: types.Int64Value(int64(recordDisk.ParentID)), + Password: types.StringValue(recordDisk.Password), + PCISlot: types.Int64Value(int64(recordDisk.PCISlot)), + Pool: types.StringValue(recordDisk.Pool), + PurgeTime: types.Int64Value(int64(recordDisk.PurgeTime)), + PurgeAttempts: types.Int64Value(int64(recordDisk.PurgeAttempts)), + RealityDeviceNumber: types.Int64Value(int64(recordDisk.RealityDeviceNumber)), + ReferenceID: types.StringValue(recordDisk.ReferenceID), + ResID: types.StringValue(recordDisk.ResID), + ResName: types.StringValue(recordDisk.ResName), + Role: types.StringValue(recordDisk.Role), + SEPID: types.Int64Value(int64(recordDisk.SEPID)), + Shareable: types.BoolValue(recordDisk.Shareable), + SizeMax: types.Int64Value(int64(recordDisk.SizeMax)), + SizeUsed: types.Float64Value(recordDisk.SizeUsed), + Snapshots: flattenSnapshotList(ctx, recordDisk.Snapshots), + Status: types.StringValue(recordDisk.Status), + TechStatus: types.StringValue(recordDisk.TechStatus), + Type: types.StringValue(recordDisk.Type), + VMID: types.Int64Value(int64(recordDisk.VMID)), + } + + d.Images, diags = types.ListValueFrom(ctx, types.StringType, recordDisk.Images) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("flattens.DiskListDataSource: cannot flatten recordDisk.Images to d.Images", diags)) + } + d.PresentTo, diags = types.ListValueFrom(ctx, types.Int64Type, recordDisk.PresentTo) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("flattens.DiskListDataSource: cannot flatten recordDisk.PresentTo to d.PresentTo", diags)) + } + + iotune := models.IOModel{ + ReadBytesSec: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSec)), + ReadBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSecMax)), + ReadIOPSSec: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSec)), + ReadIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSecMax)), + SizeIOPSSec: types.Int64Value(int64(recordDisk.IOTune.SizeIOPSSec)), + TotalBytesSec: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSec)), + TotalBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSecMax)), + TotalIOPSSec: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSec)), + TotalIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSecMax)), + WriteBytesSec: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSec)), + WriteBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSecMax)), + WriteIOPSSec: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSec)), + WriteIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSecMax)), + } + + d.IOTune = &iotune + + repl := models.ItemReplModel{ + DiskID: types.Int64Value(int64(recordDisk.Replication.DiskID)), + PoolID: types.StringValue(recordDisk.Replication.PoolID), + Role: types.StringValue(recordDisk.Replication.Role), + SelfVolumeID: types.StringValue(recordDisk.Replication.SelfVolumeID), + StorageID: types.StringValue(recordDisk.Replication.StorageID), + VolumeID: types.StringValue(recordDisk.Replication.VolumeID), + } + + d.Replication = &repl + + items = append(items, d) + } + state.Items = items + + tflog.Info(ctx, "flattens.DiskListDataSource: end flatten") + return nil +} + +func flattenComputeList(ctx context.Context, computes map[string]string) []models.ComputeModel { + tflog.Info(ctx, "Start flattenComputeList") + + res := make([]models.ComputeModel, 0, len(computes)) + + for k, v := range computes { + temp := models.ComputeModel{ + ComputeID: types.StringValue(k), + ComputeName: types.StringValue(v), + } + + res = append(res, temp) + } + + tflog.Info(ctx, "End flattenComputeList") + return res +} + +func flattenSnapshotList(ctx context.Context, snapshots disks.ListSnapshots) []models.SnapshotModel { + tflog.Info(ctx, "Start flattenSnapshotList") + res := make([]models.SnapshotModel, 0, len(snapshots)) + for _, item := range snapshots { + temp := models.SnapshotModel{ + GUID: types.StringValue(item.GUID), + Label: types.StringValue(item.Label), + ResID: types.StringValue(item.ResID), + SnapSetGUID: types.StringValue(item.SnapSetGUID), + SnapSetTime: types.Int64Value(int64(item.SnapSetTime)), + Timestamp: types.Int64Value(int64(item.Timestamp)), + } + res = append(res, temp) + } + + tflog.Info(ctx, "End flattenSnapshotList") + return res +} diff --git a/internal/service/cloudbroker/disks/flattens/flatten_data_source_cb_disk_list_deleted.go b/internal/service/cloudbroker/disks/flattens/flatten_data_source_cb_disk_list_deleted.go new file mode 100644 index 0000000..bd4497d --- /dev/null +++ b/internal/service/cloudbroker/disks/flattens/flatten_data_source_cb_disk_list_deleted.go @@ -0,0 +1,180 @@ +package flattens + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities" +) + +// DiskListDeletedDataSource flattens data source for disk list deleted. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func DiskListDeletedDataSource(ctx context.Context, state *models.ListDisksModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.DiskListDeletedDataSource") + + diags := diag.Diagnostics{} + + diskList, diags := utilities.DataSourceDiskListDeletedCheckPresence(ctx, state, c) + if diags.HasError() { + return diags + } + + tflog.Info(ctx, "flattens.DiskListDeletedDataSource: before flatten") + + *state = models.ListDisksModel{ + ByID: state.ByID, + Name: state.Name, + AccountName: state.AccountName, + DiskMaxSize: state.DiskMaxSize, + Shared: state.Shared, + AccountID: state.AccountID, + Type: state.Type, + SortBy: state.SortBy, + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + + // computed fields + EntryCount: types.Int64Value(int64(diskList.EntryCount)), + } + + items := make([]models.ItemDiskModel, 0, diskList.EntryCount) + for _, recordDisk := range diskList.Data { + diskAcl, _ := json.Marshal(recordDisk.ACL) + d := models.ItemDiskModel{ + MachineID: types.Int64Value(int64(recordDisk.MachineID)), + MachineName: types.StringValue(recordDisk.MachineName), + AccountID: types.Int64Value(int64(recordDisk.AccountID)), + AccountName: types.StringValue(recordDisk.AccountName), + SEPType: types.StringValue(recordDisk.SEPType), + ACL: types.StringValue(string(diskAcl)), + BootPartition: types.Int64Value(int64(recordDisk.BootPartition)), + Computes: flattenComputes(ctx, recordDisk.Computes), + CreatedTime: types.Int64Value(int64(recordDisk.CreatedTime)), + DeletedTime: types.Int64Value(int64(recordDisk.DeletedTime)), + Description: types.StringValue(recordDisk.Description), + DestructionTime: types.Int64Value(int64(recordDisk.DestructionTime)), + DiskPath: types.StringValue(recordDisk.DiskPath), + DeviceName: types.StringValue(recordDisk.DeviceName), + GID: types.Int64Value(int64(recordDisk.GID)), + GUID: types.Int64Value(int64(recordDisk.GUID)), + ID: types.Int64Value(int64(recordDisk.ID)), + ImageID: types.Int64Value(int64(recordDisk.ImageID)), + IQN: types.StringValue(recordDisk.IQN), + Login: types.StringValue(recordDisk.Login), + Milestones: types.Int64Value(int64(recordDisk.Milestones)), + Name: types.StringValue(recordDisk.Name), + Order: types.Int64Value(int64(recordDisk.Order)), + Params: types.StringValue(recordDisk.Params), + ParentID: types.Int64Value(int64(recordDisk.ParentID)), + Password: types.StringValue(recordDisk.Password), + PCISlot: types.Int64Value(int64(recordDisk.PCISlot)), + Pool: types.StringValue(recordDisk.Pool), + PurgeTime: types.Int64Value(int64(recordDisk.PurgeTime)), + PurgeAttempts: types.Int64Value(int64(recordDisk.PurgeAttempts)), + RealityDeviceNumber: types.Int64Value(int64(recordDisk.RealityDeviceNumber)), + ReferenceID: types.StringValue(recordDisk.ReferenceID), + ResID: types.StringValue(recordDisk.ResID), + ResName: types.StringValue(recordDisk.ResName), + Role: types.StringValue(recordDisk.Role), + SEPID: types.Int64Value(int64(recordDisk.SEPID)), + Shareable: types.BoolValue(recordDisk.Shareable), + SizeMax: types.Int64Value(int64(recordDisk.SizeMax)), + SizeUsed: types.Float64Value(recordDisk.SizeUsed), + Snapshots: flattenSnapshots(ctx, recordDisk.Snapshots), + Status: types.StringValue(recordDisk.Status), + TechStatus: types.StringValue(recordDisk.TechStatus), + Type: types.StringValue(recordDisk.Type), + VMID: types.Int64Value(int64(recordDisk.VMID)), + } + + d.Images, diags = types.ListValueFrom(ctx, types.StringType, recordDisk.Images) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("flattens.DiskListDeletedDataSource: cannot flatten recordDisk.Images to d.Images", diags)) + } + d.PresentTo, diags = types.ListValueFrom(ctx, types.Int64Type, recordDisk.PresentTo) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("flattens.DiskListDeletedDataSource: cannot flatten recordDisk.PresentTo to d.PresentTo", diags)) + } + + iotune := models.IOTuneModel{ + ReadBytesSec: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSec)), + ReadBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSecMax)), + ReadIOPSSec: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSec)), + ReadIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSecMax)), + SizeIOPSSec: types.Int64Value(int64(recordDisk.IOTune.SizeIOPSSec)), + TotalBytesSec: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSec)), + TotalBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSecMax)), + TotalIOPSSec: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSec)), + TotalIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSecMax)), + WriteBytesSec: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSec)), + WriteBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSecMax)), + WriteIOPSSec: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSec)), + WriteIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSecMax)), + } + + d.IOTune = &iotune + + repl := models.ItemReplicationModel{ + DiskID: types.Int64Value(int64(recordDisk.Replication.DiskID)), + PoolID: types.StringValue(recordDisk.Replication.PoolID), + Role: types.StringValue(recordDisk.Replication.Role), + SelfVolumeID: types.StringValue(recordDisk.Replication.SelfVolumeID), + StorageID: types.StringValue(recordDisk.Replication.StorageID), + VolumeID: types.StringValue(recordDisk.Replication.VolumeID), + } + + d.Replication = &repl + + items = append(items, d) + } + state.Items = items + + tflog.Info(ctx, "flattens.DiskListDeletedDataSource: end flatten") + return nil +} + +func flattenComputes(ctx context.Context, computes map[string]string) []models.ComputesModel { + tflog.Info(ctx, "Start flattenComputes") + + res := make([]models.ComputesModel, 0, len(computes)) + + for k, v := range computes { + temp := models.ComputesModel{ + ComputeID: types.StringValue(k), + ComputeName: types.StringValue(v), + } + + res = append(res, temp) + } + + tflog.Info(ctx, "End flattenComputes") + return res +} + +func flattenSnapshots(ctx context.Context, snapshots disks.ListSnapshots) []models.ItemSnapshotModel { + tflog.Info(ctx, "Start flattenSnapshots") + res := make([]models.ItemSnapshotModel, 0, len(snapshots)) + for _, item := range snapshots { + temp := models.ItemSnapshotModel{ + GUID: types.StringValue(item.GUID), + Label: types.StringValue(item.Label), + ResID: types.StringValue(item.ResID), + SnapSetGUID: types.StringValue(item.SnapSetGUID), + SnapSetTime: types.Int64Value(int64(item.SnapSetTime)), + Timestamp: types.Int64Value(int64(item.Timestamp)), + } + res = append(res, temp) + } + + tflog.Info(ctx, "End flattenSnapshots") + return res +} diff --git a/internal/service/cloudbroker/disks/flattens/flatten_data_source_cb_disk_list_types.go b/internal/service/cloudbroker/disks/flattens/flatten_data_source_cb_disk_list_types.go new file mode 100644 index 0000000..24aaecc --- /dev/null +++ b/internal/service/cloudbroker/disks/flattens/flatten_data_source_cb_disk_list_types.go @@ -0,0 +1,50 @@ +package flattens + +import ( + "context" + "fmt" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities" +) + +// DiskListTypesDataSource flattens data source for disk list types. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func DiskListTypesDataSource(ctx context.Context, state *models.DataSourceDiskListTypesModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.DiskListTypesDataSource") + + diags := diag.Diagnostics{} + + listTypes, diags := utilities.DataSourceDiskListTypesCheckPresence(ctx, state, c) + if diags.HasError() { + return diags + } + + tflog.Info(ctx, "flattens.DiskListTypesDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceDiskListTypesModel{ + SortBy: state.SortBy, + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + + // computed fields + Id: types.StringValue(id.String()), + EntryCount: types.Int64Value(int64(listTypes.EntryCount)), + } + + state.Items, diags = types.ListValueFrom(ctx, types.StringType, listTypes.Data) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("flattens.DiskListTypesDataSource: cannot flatten listTypes.Data to state.Items", diags)) + } + + tflog.Info(ctx, "flattens.DiskListTypesDataSource: end flatten") + return nil +} diff --git a/internal/service/cloudbroker/disks/flattens/flatten_data_source_cb_disk_list_types_detailed.go b/internal/service/cloudbroker/disks/flattens/flatten_data_source_cb_disk_list_types_detailed.go new file mode 100644 index 0000000..a1b8cbf --- /dev/null +++ b/internal/service/cloudbroker/disks/flattens/flatten_data_source_cb_disk_list_types_detailed.go @@ -0,0 +1,82 @@ +package flattens + +import ( + "context" + "fmt" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities" +) + +// DiskListTypesDetailedDataSource flattens data source for disk list types detailed. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func DiskListTypesDetailedDataSource(ctx context.Context, state *models.DataSourceDiskListTypesDetailedModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.DiskListTypesDetailedDataSource") + + diags := diag.Diagnostics{} + + listTypes, diags := utilities.DataSourceDiskListTypesDetailedCheckPresence(ctx, state, c) + if diags.HasError() { + return diags + } + + tflog.Info(ctx, "flattens.DiskListTypesDetailedDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceDiskListTypesDetailedModel{ + SortBy: state.SortBy, + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + + // computed fields + Id: types.StringValue(id.String()), + EntryCount: types.Int64Value(int64(listTypes.EntryCount)), + } + + items := make([]models.ItemDiskTypeDetailedModel, 0, len(listTypes.Data)) + for _, typeListDetailed := range listTypes.Data { + typeMap := typeListDetailed.(map[string]interface{}) + + t := models.ItemDiskTypeDetailedModel{ + SepID: types.Int64Value(int64(typeMap["sepId"].(float64))), + SepName: types.StringValue(typeMap["sepName"].(string)), + } + + var pools []models.ItemPoolModel + poolsTemp := typeMap["pools"].([]interface{}) + for _, pool := range poolsTemp { + poolsMap := pool.(map[string]interface{}) + p := models.ItemPoolModel{ + Name: types.StringValue(poolsMap["name"].(string)), + System: types.StringValue(poolsMap["system"].(string)), + } + + p.Types, diags = types.ListValueFrom(ctx, types.StringType, flattenTypes(poolsMap["types"].([]interface{}))) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("flattens.DiskListTypesDetailedDataSource: cannot flatten poolsMap[\"types\"] to p.Types", diags)) + } + pools = append(pools, p) + } + t.Pools = pools + items = append(items, t) + } + state.Items = items + + tflog.Info(ctx, "flattens.DiskListTypesDetailedDataSource: end flatten") + return diags +} + +func flattenTypes(typesInterface []interface{}) []string { + var typesList []string + for _, typ := range typesInterface { + typesList = append(typesList, typ.(string)) + } + return typesList +} diff --git a/internal/service/cloudbroker/disks/flattens/flatten_data_source_cb_disk_list_unattached.go b/internal/service/cloudbroker/disks/flattens/flatten_data_source_cb_disk_list_unattached.go new file mode 100644 index 0000000..5ed177a --- /dev/null +++ b/internal/service/cloudbroker/disks/flattens/flatten_data_source_cb_disk_list_unattached.go @@ -0,0 +1,166 @@ +package flattens + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities" +) + +// DiskListUnattachedDataSource flattens data source for disk list unattached. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func DiskListUnattachedDataSource(ctx context.Context, state *models.DataSourceDiskListUnattachedModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.DiskListUnattachedDataSource") + + diags := diag.Diagnostics{} + + diskList, diags := utilities.DataSourceDiskListUnattachedCheckPresence(ctx, state, c) + if diags.HasError() { + return diags + } + + tflog.Info(ctx, "flattens.DiskListUnattachedDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceDiskListUnattachedModel{ + ByID: state.ByID, + AccountName: state.AccountName, + DiskMaxSize: state.DiskMaxSize, + Status: state.Status, + AccountID: state.AccountID, + SepID: state.SepID, + PoolName: state.PoolName, + Type: state.Type, + SortBy: state.SortBy, + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + + // computed fields + Id: types.StringValue(id.String()), + EntryCount: types.Int64Value(int64(diskList.EntryCount)), + } + + items := make([]models.ItemDiskUnattachedModel, 0, diskList.EntryCount) + for _, recordDisk := range diskList.Data { + diskAcl, _ := json.Marshal(recordDisk.ACL) + d := models.ItemDiskUnattachedModel{ + CKey: types.StringValue(recordDisk.CKey), + Meta: flattens.Meta(ctx, recordDisk.Meta), + AccountID: types.Int64Value(int64(recordDisk.AccountID)), + AccountName: types.StringValue(recordDisk.AccountName), + ACL: types.StringValue(string(diskAcl)), + BootPartition: types.Int64Value(int64(recordDisk.BootPartition)), + CreatedTime: types.Int64Value(int64(recordDisk.CreatedTime)), + DeletedTime: types.Int64Value(int64(recordDisk.DeletedTime)), + Description: types.StringValue(recordDisk.Description), + DestructionTime: types.Int64Value(int64(recordDisk.DestructionTime)), + DiskPath: types.StringValue(recordDisk.DiskPath), + GID: types.Int64Value(int64(recordDisk.GID)), + GUID: types.Int64Value(int64(recordDisk.GUID)), + DiskId: types.Int64Value(int64(recordDisk.ID)), + ImageID: types.Int64Value(int64(recordDisk.ImageID)), + Iqn: types.StringValue(recordDisk.IQN), + Login: types.StringValue(recordDisk.Login), + Milestones: types.Int64Value(int64(recordDisk.Milestones)), + DiskName: types.StringValue(recordDisk.Name), + Order: types.Int64Value(int64(recordDisk.Order)), + Params: types.StringValue(recordDisk.Params), + ParentID: types.Int64Value(int64(recordDisk.ParentID)), + Passwd: types.StringValue(recordDisk.Password), + PCISlot: types.Int64Value(int64(recordDisk.PCISlot)), + Pool: types.StringValue(recordDisk.Pool), + PurgeAttempts: types.Int64Value(int64(recordDisk.PurgeAttempts)), + PurgeTime: types.Int64Value(int64(recordDisk.PurgeTime)), + RealityDeviceNumber: types.Int64Value(int64(recordDisk.RealityDeviceNumber)), + ReferenceID: types.StringValue(recordDisk.ReferenceID), + ResID: types.StringValue(recordDisk.ResID), + ResName: types.StringValue(recordDisk.ResName), + Role: types.StringValue(recordDisk.Role), + SepID: types.Int64Value(int64(recordDisk.SEPID)), + Shareable: types.BoolValue(recordDisk.Shareable), + SizeMax: types.Int64Value(int64(recordDisk.SizeMax)), + SizeUsed: types.Float64Value(recordDisk.SizeUsed), + Snapshots: flattenUnattachedDiskSnapshots(ctx, recordDisk.Snapshots), + Status: types.StringValue(recordDisk.Status), + TechStatus: types.StringValue(recordDisk.TechStatus), + Type: types.StringValue(recordDisk.Type), + VMID: types.Int64Value(int64(recordDisk.VMID)), + } + + d.Images, diags = types.ListValueFrom(ctx, types.StringType, recordDisk.Images) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("flattens.DiskListUnattachedDataSource: cannot flatten recordDisk.Images to d.Images", diags)) + } + + d.PresentTo, diags = types.ListValueFrom(ctx, types.Int64Type, recordDisk.PresentTo) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("flattens.DiskListUnattachedDataSource: cannot flatten recordDisk.PresentTo to d.PresentTo", diags)) + } + + iotune := models.IOTuneModel{ + ReadBytesSec: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSec)), + ReadBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSecMax)), + ReadIOPSSec: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSec)), + ReadIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSecMax)), + SizeIOPSSec: types.Int64Value(int64(recordDisk.IOTune.SizeIOPSSec)), + TotalBytesSec: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSec)), + TotalBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSecMax)), + TotalIOPSSec: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSec)), + TotalIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSecMax)), + WriteBytesSec: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSec)), + WriteBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSecMax)), + WriteIOPSSec: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSec)), + WriteIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSecMax)), + } + obj, diags := types.ObjectValueFrom(ctx, models.ItemIOTune, iotune) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattens.DiskListUnattachedDataSource iotune struct to obj", diags)) + } + d.IOTune = obj + + items = append(items, d) + } + state.Items = items + + tflog.Info(ctx, "flattens.DiskListUnattachedDataSource: end flatten") + return diags +} + +func flattenUnattachedDiskSnapshots(ctx context.Context, snapshots disks.ListSnapshots) types.List { + tflog.Info(ctx, "Start flattenSnapshots") + tempSlice := make([]types.Object, 0, len(snapshots)) + for _, item := range snapshots { + temp := models.DiskUnattachedItemSnapshotModel{ + GUID: types.StringValue(item.GUID), + Label: types.StringValue(item.Label), + ResID: types.StringValue(item.ResID), + SnapSetGUID: types.StringValue(item.SnapSetGUID), + SnapSetTime: types.Int64Value(int64(item.SnapSetTime)), + Timestamp: types.Int64Value(int64(item.Timestamp)), + } + obj, diags := types.ObjectValueFrom(ctx, models.ItemSnapshot, temp) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenSnapshots struct to obj", diags)) + } + tempSlice = append(tempSlice, obj) + } + + res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemSnapshot}, tempSlice) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenSnapshots", diags)) + } + + tflog.Info(ctx, "End flattenSnapshots") + return res +} diff --git a/internal/service/cloudbroker/disks/flattens/flatten_data_source_cb_disk_replication.go b/internal/service/cloudbroker/disks/flattens/flatten_data_source_cb_disk_replication.go new file mode 100644 index 0000000..c6a94c3 --- /dev/null +++ b/internal/service/cloudbroker/disks/flattens/flatten_data_source_cb_disk_replication.go @@ -0,0 +1,167 @@ +package flattens + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities" +) + +// DiskReplicationDataSource flattens data source for disk. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func DiskReplicationDataSource(ctx context.Context, state *models.RecordDiskModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.DiskReplicationDataSource") + + diags := diag.Diagnostics{} + + recordDisk, status, err := utilities.DataSourceDiskReplicationCheckPresence(ctx, state, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about disk"), err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.DiskReplicationDataSource: before flatten") + + diskAcl, _ := json.Marshal(recordDisk.ACL) + + *state = models.RecordDiskModel{ + DiskId: state.DiskId, + Timeouts: state.Timeouts, + ACL: types.StringValue(string(diskAcl)), + BootPartition: types.Int64Value(int64(recordDisk.BootPartition)), + AccountID: types.Int64Value(int64(recordDisk.AccountID)), + AccountName: types.StringValue(recordDisk.AccountName), + Computes: flattenDRComputes(ctx, recordDisk.Computes), + CreatedTime: types.Int64Value(int64(recordDisk.CreatedTime)), + DeletedTime: types.Int64Value(int64(recordDisk.DeletedTime)), + DeviceName: types.StringValue(recordDisk.DeviceName), + DiskPath: types.StringValue(recordDisk.DiskPath), + Description: types.StringValue(recordDisk.Description), + DestructionTime: types.Int64Value(int64(recordDisk.DestructionTime)), + GID: types.Int64Value(int64(recordDisk.GID)), + GUID: types.Int64Value(int64(recordDisk.GUID)), + ImageID: types.Int64Value(int64(recordDisk.ImageID)), + Images: flattens.FlattenSimpleTypeToList(ctx, types.StringType, recordDisk.Images), + Name: types.StringValue(recordDisk.Name), + IQN: types.StringValue(recordDisk.IQN), + Login: types.StringValue(recordDisk.Login), + Milestones: types.Int64Value(int64(recordDisk.Milestones)), + Order: types.Int64Value(int64(recordDisk.Order)), + Params: types.StringValue(recordDisk.Params), + ParentID: types.Int64Value(int64(recordDisk.ParentID)), + Passwd: types.StringValue(recordDisk.Password), + PCISlot: types.Int64Value(int64(recordDisk.PCISlot)), + Pool: types.StringValue(recordDisk.Pool), + PresentTo: flattens.FlattenSimpleTypeToList(ctx, types.StringType, recordDisk.PresentTo), + PurgeAttempts: types.Int64Value(int64(recordDisk.PurgeAttempts)), + PurgeTime: types.Int64Value(int64(recordDisk.PurgeTime)), + RealityDeviceNumber: types.Int64Value(int64(recordDisk.RealityDeviceNumber)), + ReferenceID: types.StringValue(recordDisk.ReferenceID), + ResID: types.StringValue(recordDisk.ResID), + ResName: types.StringValue(recordDisk.ResName), + Role: types.StringValue(recordDisk.Role), + SepType: types.StringValue(recordDisk.SEPType), + SepID: types.Int64Value(int64(recordDisk.SEPID)), + Shareable: types.BoolValue(recordDisk.Shareable), + SizeMax: types.Int64Value(int64(recordDisk.SizeMax)), + SizeUsed: types.Float64Value(recordDisk.SizeUsed), + Snapshots: diskListFlattenSnapshots(ctx, recordDisk.Snapshots), + Status: types.StringValue(recordDisk.Status), + StatusReplication: types.StringValue(*status), + TechStatus: types.StringValue(recordDisk.TechStatus), + Type: types.StringValue(recordDisk.Type), + VMID: types.Int64Value(int64(recordDisk.VMID)), + } + + iotune := models.DiskReplicationIOTune{ + ReadBytesSec: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSec)), + ReadBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSecMax)), + ReadIOPSSec: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSec)), + ReadIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSecMax)), + SizeIOPSSec: types.Int64Value(int64(recordDisk.IOTune.SizeIOPSSec)), + TotalBytesSec: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSec)), + TotalBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSecMax)), + TotalIOPSSec: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSec)), + TotalIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSecMax)), + WriteBytesSec: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSec)), + WriteBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSecMax)), + WriteIOPSSec: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSec)), + WriteIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSecMax)), + } + state.IOTune = &iotune + + itemReplication := models.ItemReplicationModel{ + DiskID: types.Int64Value(int64(recordDisk.Replication.DiskID)), + PoolID: types.StringValue(recordDisk.Replication.PoolID), + Role: types.StringValue(recordDisk.Replication.Role), + SelfVolumeID: types.StringValue(recordDisk.Replication.SelfVolumeID), + StorageID: types.StringValue(recordDisk.Replication.StorageID), + VolumeID: types.StringValue(recordDisk.Replication.VolumeID), + } + + state.Replication = &itemReplication + + tflog.Info(ctx, "flattens.ReplicationDiskDataSource: end flatten") + return nil +} + +func flattenDRComputes(ctx context.Context, items map[string]string) types.List { + tflog.Info(ctx, "Start flattenDRComputes") + tempSlice := make([]types.Object, 0, len(items)) + for id, name := range items { + temp := models.ItemComputeModel{ + ComputeId: types.StringValue(id), + ComputeName: types.StringValue(name), + } + obj, diags := types.ObjectValueFrom(ctx, models.ItemCompute, temp) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenDRComputes struct to obj", diags)) + } + tempSlice = append(tempSlice, obj) + } + + res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemCompute}, tempSlice) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenDRComputes", diags)) + } + + tflog.Info(ctx, "End flattenDRComputes") + return res +} + +func diskListFlattenSnapshots(ctx context.Context, snapshots disks.ListSnapshots) types.List { + tflog.Info(ctx, "Start flattenSnapshots") + tempSlice := make([]types.Object, 0, len(snapshots)) + for _, item := range snapshots { + temp := models.DiskReplicationItemSnapshot{ + GUID: types.StringValue(item.GUID), + Label: types.StringValue(item.Label), + ResID: types.StringValue(item.ResID), + SnapSetGUID: types.StringValue(item.SnapSetGUID), + SnapSetTime: types.Int64Value(int64(item.SnapSetTime)), + TimeStamp: types.Int64Value(int64(item.Timestamp)), + } + obj, diags := types.ObjectValueFrom(ctx, models.DiskReplicationSnapshot, temp) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error diskListFlattenSnapshots struct to obj", diags)) + } + tempSlice = append(tempSlice, obj) + } + + res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemSnapshot}, tempSlice) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error diskListFlattenSnapshots", diags)) + } + + tflog.Info(ctx, "End diskListFlattenSnapshots") + return res +} diff --git a/internal/service/cloudbroker/disks/flattens/flatten_data_source_cb_disk_snapshot.go b/internal/service/cloudbroker/disks/flattens/flatten_data_source_cb_disk_snapshot.go new file mode 100644 index 0000000..af97e28 --- /dev/null +++ b/internal/service/cloudbroker/disks/flattens/flatten_data_source_cb_disk_snapshot.go @@ -0,0 +1,50 @@ +package flattens + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities" +) + +// DiskSnapshotDataSource flattens data source for disk snapshot. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func DiskSnapshotDataSource(ctx context.Context, state *models.DataSourceDiskSnapshotModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.DiskSnapshotDataSource") + + diskId := uint64(state.DiskID.ValueInt64()) + + item, diags := utilities.DataSourceDiskSnapshotCheckPresence(ctx, state, c) + if diags.HasError() { + return diags + } + + tflog.Info(ctx, "flattens.DiskSnapshotDataSource: before flatten", map[string]any{"disk_id": diskId, "snapshot": item}) + + id := uuid.New() + *state = models.DataSourceDiskSnapshotModel{ + DiskID: state.DiskID, + Label: state.Label, + Timeouts: state.Timeouts, + + // computed fields + Id: types.StringValue(id.String()), + GUID: types.StringValue(item.GUID), + ResID: types.StringValue(item.ResID), + SnapSetGUID: types.StringValue(item.SnapSetGUID), + SnapSetTime: types.Int64Value(int64(item.SnapSetTime)), + TimeStamp: types.Int64Value(int64(item.Timestamp)), + } + + tflog.Info(ctx, "flattens.DiskSnapshotDataSource: end flatten", map[string]any{ + "disk_id": state.DiskID.ValueInt64(), + "label": state.Label.ValueString(), + }) + return nil +} diff --git a/internal/service/cloudbroker/disks/flattens/flatten_data_source_cb_disk_snapshot_list.go b/internal/service/cloudbroker/disks/flattens/flatten_data_source_cb_disk_snapshot_list.go new file mode 100644 index 0000000..bc06d84 --- /dev/null +++ b/internal/service/cloudbroker/disks/flattens/flatten_data_source_cb_disk_snapshot_list.go @@ -0,0 +1,61 @@ +package flattens + +import ( + "context" + "fmt" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities" +) + +// DiskSnapshotListDataSource flattens data source for disk snapshot list. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func DiskSnapshotListDataSource(ctx context.Context, state *models.DataSourceDiskSnapshotListModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.DiskSnapshotListDataSource") + + diags := diag.Diagnostics{} + + diskId := uint64(state.DiskID.ValueInt64()) + + snapshots, err := utilities.DiskSnapshotListCheckPresence(ctx, diskId, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about disk snapshot list with disk ID %v", diskId), err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.DiskSnapshotListDataSource: before flatten", map[string]any{"disk_id": diskId, "snapshots": snapshots}) + + id := uuid.New() + *state = models.DataSourceDiskSnapshotListModel{ + DiskID: state.DiskID, + Timeouts: state.Timeouts, + + // computed fields + Id: types.StringValue(id.String()), + } + + res := make([]models.DiskItemSnapshot, 0, len(*snapshots)) + for _, item := range *snapshots { + temp := models.DiskItemSnapshot{ + GUID: types.StringValue(item.GUID), + Label: types.StringValue(item.Label), + ResID: types.StringValue(item.ResID), + ReferenceID: types.StringValue(item.ReferenceID), + SnapSetGUID: types.StringValue(item.SnapSetGUID), + SnapSetTime: types.Int64Value(int64(item.SnapSetTime)), + TimeStamp: types.Int64Value(int64(item.Timestamp)), + } + res = append(res, temp) + } + + state.Items = res + + tflog.Info(ctx, "flattens.DiskSnapshotListDataSource: end flatten", map[string]any{"disk_id": state.DiskID.ValueInt64()}) + return nil +} diff --git a/internal/service/cloudbroker/disks/flattens/flatten_resource_cb_disk.go b/internal/service/cloudbroker/disks/flattens/flatten_resource_cb_disk.go new file mode 100644 index 0000000..d16bbd3 --- /dev/null +++ b/internal/service/cloudbroker/disks/flattens/flatten_resource_cb_disk.go @@ -0,0 +1,191 @@ +package flattens + +import ( + "context" + "encoding/json" + "fmt" + "strconv" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities" +) + +// DiskResource flattens resource for disk. +// Return error in case resource is not found on the platform. +// Flatten errors are added to tflog. +func DiskResource(ctx context.Context, plan *models.ResourceDiskModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.DiskResource") + + diags := diag.Diagnostics{} + + diskId, err := strconv.ParseUint(plan.Id.ValueString(), 10, 64) + if err != nil { + diags.AddError("flattens.DiskResource: Cannot parse disk ID from state", err.Error()) + return diags + } + + recordDisk, err := utilities.DiskCheckPresence(ctx, diskId, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about disk with ID %v", diskId), err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.DiskResource: before flatten", map[string]any{"disk_id": diskId, "recordDisk": recordDisk}) + + diskAcl, _ := json.Marshal(recordDisk.ACL) + + *plan = models.ResourceDiskModel{ + // required fields + AccountID: types.Int64Value(int64(recordDisk.AccountID)), + DiskName: types.StringValue(recordDisk.Name), + SizeMax: types.Int64Value(int64(recordDisk.SizeMax)), + GID: types.Int64Value(int64(recordDisk.GID)), + + // optional fields + SSDSize: plan.SSDSize, + NodeIDs: plan.NodeIDs, + Description: plan.Description, + Pool: plan.Pool, + SEPID: plan.SEPID, + Type: plan.Type, + Detach: plan.Detach, + Permanently: plan.Permanently, + Shareable: plan.Shareable, + Timeouts: plan.Timeouts, + + // computed fields + LastUpdated: plan.LastUpdated, + Id: types.StringValue(strconv.Itoa(int(recordDisk.ID))), + DiskId: types.Int64Value(int64(recordDisk.ID)), + AccountName: types.StringValue(recordDisk.AccountName), + ACL: types.StringValue(string(diskAcl)), + Computes: flattenResourceComputes(ctx, recordDisk.Computes), + CreatedTime: types.Int64Value(int64(recordDisk.CreatedTime)), + DeletedTime: types.Int64Value(int64(recordDisk.DeletedTime)), + DestructionTime: types.Int64Value(int64(recordDisk.DestructionTime)), + DeviceName: types.StringValue(recordDisk.DeviceName), + ImageID: types.Int64Value(int64(recordDisk.ImageID)), + Order: types.Int64Value(int64(recordDisk.Order)), + Params: types.StringValue(recordDisk.Params), + ParentID: types.Int64Value(int64(recordDisk.ParentID)), + PCISlot: types.Int64Value(int64(recordDisk.PCISlot)), + PurgeTime: types.Int64Value(int64(recordDisk.PurgeTime)), + ResID: types.StringValue(recordDisk.ResID), + ResName: types.StringValue(recordDisk.ResName), + Role: types.StringValue(recordDisk.Role), + SepType: types.StringValue(recordDisk.SEPType), + SizeUsed: types.Float64Value(recordDisk.SizeUsed), + Snapshots: flattenResourceDiskSnapshots(ctx, recordDisk.Snapshots), + Status: types.StringValue(recordDisk.Status), + TechStatus: types.StringValue(recordDisk.TechStatus), + VMID: types.Int64Value(int64(recordDisk.VMID)), + } + + plan.Images, diags = types.ListValueFrom(ctx, types.StringType, recordDisk.Images) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("flattens.DiskResource: cannot flatten recordDisk.Images to plan.Images", diags)) + } + plan.PresentTo, diags = types.ListValueFrom(ctx, types.Int64Type, recordDisk.PresentTo) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("flattens.DiskResource: cannot flatten recordDisk.PresentTo to plan.PresentTo", diags)) + } + + if plan.Description.IsUnknown() { + plan.Description = types.StringValue(recordDisk.Description) + } + if plan.Pool.IsUnknown() { + plan.Pool = types.StringValue(recordDisk.Pool) + } + if plan.SEPID.IsUnknown() { + plan.SEPID = types.Int64Value(int64(recordDisk.SEPID)) + } + if plan.Shareable.IsUnknown() { + plan.Shareable = types.BoolValue(recordDisk.Shareable) + } + if plan.Type.IsUnknown() { + plan.Type = types.StringValue(recordDisk.Type) + } + + iotune := models.ResourceIOTuneModel{ + ReadBytesSec: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSec)), + ReadBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSecMax)), + ReadIOPSSec: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSec)), + ReadIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSecMax)), + SizeIOPSSec: types.Int64Value(int64(recordDisk.IOTune.SizeIOPSSec)), + TotalBytesSec: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSec)), + TotalBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSecMax)), + TotalIOPSSec: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSec)), + TotalIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSecMax)), + WriteBytesSec: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSec)), + WriteBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSecMax)), + WriteIOPSSec: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSec)), + WriteIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSecMax)), + } + obj, diags := types.ObjectValueFrom(ctx, models.ItemIOTune, iotune) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattens.DiskResource iotune struct to obj", diags)) + } + plan.IOTune = obj + + tflog.Info(ctx, "flattens.DiskResource: after flatten", map[string]any{"disk_id": plan.Id.ValueString()}) + + tflog.Info(ctx, "End flattens.DiskResource") + return nil +} + +func flattenResourceComputes(ctx context.Context, items map[string]string) types.List { + tflog.Info(ctx, "Start flattenResourceComputes") + tempSlice := make([]types.Object, 0, len(items)) + for id, name := range items { + temp := models.ItemComputeModel{ + ComputeId: types.StringValue(id), + ComputeName: types.StringValue(name), + } + obj, diags := types.ObjectValueFrom(ctx, models.ItemCompute, temp) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenResourceComputes struct to obj", diags)) + } + tempSlice = append(tempSlice, obj) + } + + res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemCompute}, tempSlice) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenResourceComputes", diags)) + } + + tflog.Info(ctx, "End flattenComputes") + return res +} + +func flattenResourceDiskSnapshots(ctx context.Context, snapshots disks.ListSnapshots) types.List { + tflog.Info(ctx, "Start flattenResourceDiskSnapshots") + tempSlice := make([]types.Object, 0, len(snapshots)) + for _, item := range snapshots { + temp := models.ItemSnapshotsModel{ + GUID: types.StringValue(item.GUID), + Label: types.StringValue(item.Label), + ResID: types.StringValue(item.ResID), + SnapSetGUID: types.StringValue(item.SnapSetGUID), + SnapSetTime: types.Int64Value(int64(item.SnapSetTime)), + TimeStamp: types.Int64Value(int64(item.Timestamp)), + } + obj, diags := types.ObjectValueFrom(ctx, models.ItemSnapshots, temp) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenResourceDiskSnapshots struct to obj", diags)) + } + tempSlice = append(tempSlice, obj) + } + + res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemSnapshots}, tempSlice) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenResourceDiskSnapshots", diags)) + } + + tflog.Info(ctx, "End flattenResourceDiskSnapshots") + return res +} diff --git a/internal/service/cloudbroker/disks/flattens/flatten_resource_cb_disk_replication.go b/internal/service/cloudbroker/disks/flattens/flatten_resource_cb_disk_replication.go new file mode 100644 index 0000000..61b8f49 --- /dev/null +++ b/internal/service/cloudbroker/disks/flattens/flatten_resource_cb_disk_replication.go @@ -0,0 +1,184 @@ +package flattens + +import ( + "context" + "encoding/json" + "fmt" + "strconv" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities" +) + +// DiskReplicationResource flattens resource for disk. +// Return error in case resource is not found on the platform. +// Flatten errors are added to tflog. +func DiskReplicationResource(ctx context.Context, state *models.ResourceRecordDiskReplicationModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.DiskReplicationresource") + + diags := diag.Diagnostics{} + + recordDisk, status, err := utilities.ResourceDiskReplicationCheckPresence(ctx, state, c) + if err != nil { + diags.AddError("Cannot get info about disk", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.DiskReplicationresource: before flatten") + + diskAcl, _ := json.Marshal(recordDisk.ACL) + + *state = models.ResourceRecordDiskReplicationModel{ + DiskId: state.DiskId, + Name: state.Name, + SepID: state.SepID, + ReplicationId: state.ReplicationId, + Timeouts: state.Timeouts, + PoolName: state.PoolName, + Pause: state.Pause, + Reverse: state.Reverse, + Start: state.Start, + Detach: state.Detach, + Permanently: state.Permanently, + Id: types.StringValue(strconv.Itoa(int(recordDisk.Replication.DiskID))), + ACL: types.StringValue(string(diskAcl)), + BootPartition: types.Int64Value(int64(recordDisk.BootPartition)), + AccountID: types.Int64Value(int64(recordDisk.AccountID)), + AccountName: types.StringValue(recordDisk.AccountName), + Computes: flattenRComputes(ctx, recordDisk.Computes), + CreatedTime: types.Int64Value(int64(recordDisk.CreatedTime)), + DeletedTime: types.Int64Value(int64(recordDisk.DeletedTime)), + DeviceName: types.StringValue(recordDisk.DeviceName), + DiskPath: types.StringValue(recordDisk.DiskPath), + Description: types.StringValue(recordDisk.Description), + DestructionTime: types.Int64Value(int64(recordDisk.DestructionTime)), + GUID: types.Int64Value(int64(recordDisk.GUID)), + GID: types.Int64Value(int64(recordDisk.GID)), + ImageID: types.Int64Value(int64(recordDisk.ImageID)), + Images: flattens.FlattenSimpleTypeToList(ctx, types.StringType, recordDisk.Images), + IQN: types.StringValue(recordDisk.IQN), + Login: types.StringValue(recordDisk.Login), + Milestones: types.Int64Value(int64(recordDisk.Milestones)), + Order: types.Int64Value(int64(recordDisk.Order)), + Params: types.StringValue(recordDisk.Params), + ParentID: types.Int64Value(int64(recordDisk.ParentID)), + Passwd: types.StringValue(recordDisk.Password), + PCISlot: types.Int64Value(int64(recordDisk.PCISlot)), + Pool: types.StringValue(recordDisk.Pool), + PresentTo: flattens.FlattenSimpleTypeToList(ctx, types.StringType, recordDisk.PresentTo), + PurgeAttempts: types.Int64Value(int64(recordDisk.PurgeAttempts)), + PurgeTime: types.Int64Value(int64(recordDisk.PurgeTime)), + RealityDeviceNumber: types.Int64Value(int64(recordDisk.RealityDeviceNumber)), + ReferenceID: types.StringValue(recordDisk.ReferenceID), + ResID: types.StringValue(recordDisk.ResID), + ResName: types.StringValue(recordDisk.ResName), + Role: types.StringValue(recordDisk.Role), + SepType: types.StringValue(recordDisk.SEPType), + Shareable: types.BoolValue(recordDisk.Shareable), + SizeMax: types.Int64Value(int64(recordDisk.SizeMax)), + SizeUsed: types.Float64Value(recordDisk.SizeUsed), + Snapshots: replicationFlattenSnapshots(ctx, recordDisk.Snapshots), + Status: types.StringValue(recordDisk.Status), + StatusReplication: types.StringValue(*status), + TechStatus: types.StringValue(recordDisk.TechStatus), + Type: types.StringValue(recordDisk.Type), + VMID: types.Int64Value(int64(recordDisk.VMID)), + } + + iotune := models.ResourceDiskReplicationIOTuneModel{ + ReadBytesSec: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSec)), + ReadBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSecMax)), + ReadIOPSSec: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSec)), + ReadIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSecMax)), + SizeIOPSSec: types.Int64Value(int64(recordDisk.IOTune.SizeIOPSSec)), + TotalBytesSec: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSec)), + TotalBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSecMax)), + TotalIOPSSec: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSec)), + TotalIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSecMax)), + WriteBytesSec: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSec)), + WriteBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSecMax)), + WriteIOPSSec: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSec)), + WriteIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSecMax)), + } + obj, diags := types.ObjectValueFrom(ctx, models.ResourceDiskReplicationIOTune, iotune) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattens.ReplicationDiskresource iotune struct to obj", diags)) + } + state.IOTune = obj + + itemReplication := models.ResourceItemReplicationModel{ + DiskID: types.Int64Value(int64(recordDisk.Replication.DiskID)), + PoolID: types.StringValue(recordDisk.Replication.PoolID), + Role: types.StringValue(recordDisk.Replication.Role), + SelfVolumeID: types.StringValue(recordDisk.Replication.SelfVolumeID), + StorageID: types.StringValue(recordDisk.Replication.StorageID), + VolumeID: types.StringValue(recordDisk.Replication.VolumeID), + } + + obj, diags = types.ObjectValueFrom(ctx, models.ResourceItemReplication, itemReplication) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattens.ReplicationDiskresource Replication struct to obj", diags)) + } + state.Replication = obj + + tflog.Info(ctx, "flattens.ReplicationDiskresource: end flatten") + return nil +} + +func flattenRComputes(ctx context.Context, items map[string]string) types.List { + tflog.Info(ctx, "Start flattenRComputes") + tempSlice := make([]types.Object, 0, len(items)) + for id, name := range items { + temp := models.ResourceReplicationItemComputeModel{ + ComputeId: types.StringValue(id), + ComputeName: types.StringValue(name), + } + obj, diags := types.ObjectValueFrom(ctx, models.ResourceReplicationItemCompute, temp) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenRComputes struct to obj", diags)) + } + tempSlice = append(tempSlice, obj) + } + + res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemCompute}, tempSlice) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenRComputes", diags)) + } + + tflog.Info(ctx, "End flattenRComputes") + return res +} + +func replicationFlattenSnapshots(ctx context.Context, snapshots disks.ListSnapshots) types.List { + tflog.Info(ctx, "Start flattenSnapshots") + tempSlice := make([]types.Object, 0, len(snapshots)) + for _, item := range snapshots { + temp := models.ResourceReplicationItemSnapshotModel{ + GUID: types.StringValue(item.GUID), + Label: types.StringValue(item.Label), + ResID: types.StringValue(item.ResID), + SnapSetGUID: types.StringValue(item.SnapSetGUID), + SnapSetTime: types.Int64Value(int64(item.SnapSetTime)), + TimeStamp: types.Int64Value(int64(item.Timestamp)), + } + obj, diags := types.ObjectValueFrom(ctx, models.ResourceReplicationItemSnapshot, temp) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenSnapshots struct to obj", diags)) + } + tempSlice = append(tempSlice, obj) + } + + res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ResourceReplicationItemSnapshot}, tempSlice) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenSnapshots", diags)) + } + + tflog.Info(ctx, "End flattenSnapshots") + return res +} diff --git a/internal/service/cloudbroker/disks/flattens/flatten_resource_cb_disk_snapshot.go b/internal/service/cloudbroker/disks/flattens/flatten_resource_cb_disk_snapshot.go new file mode 100644 index 0000000..dd0da6c --- /dev/null +++ b/internal/service/cloudbroker/disks/flattens/flatten_resource_cb_disk_snapshot.go @@ -0,0 +1,65 @@ +package flattens + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities" +) + +// DiskSnapshotResource flattens resource for disk snapshot. +// Return error in case resource is not found on the platform. +// Flatten errors are added to tflog. +func DiskSnapshotResource(ctx context.Context, plan *models.ResourceDiskSnapshotModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.DiskSnapshotResource", map[string]any{ + "disk_id": plan.DiskID.ValueInt64(), + "label": plan.Label.ValueString()}) + + recordSnapshot, diags := utilities.DiskSnapshotCheckPresence(ctx, plan, c) + if diags.HasError() { + return diags + } + + tflog.Info(ctx, "flattens.DiskSnapshotResource: before flatten", map[string]any{ + "disk_id": plan.DiskID.ValueInt64(), + "label": plan.Label.ValueString(), + "recordDisk": recordSnapshot}) + + id := plan.Id + if !strings.Contains(id.ValueString(), "#") { + id = types.StringValue(fmt.Sprintf("%d#%s", plan.DiskID.ValueInt64(), plan.Label.ValueString())) + } + *plan = models.ResourceDiskSnapshotModel{ + // required fields + DiskID: plan.DiskID, + Label: types.StringValue(recordSnapshot.Label), + + // optional fields + Rollback: plan.Rollback, + TimeStamp: plan.TimeStamp, + Timeouts: plan.Timeouts, + + // computed fields + Id: id, + GUID: types.StringValue(recordSnapshot.GUID), + ResID: types.StringValue(recordSnapshot.ResID), + SnapSetGUID: types.StringValue(recordSnapshot.SnapSetGUID), + SnapSetTime: types.Int64Value(int64(recordSnapshot.SnapSetTime)), + } + + if plan.TimeStamp.IsUnknown() { + plan.TimeStamp = types.Int64Value(int64(recordSnapshot.Timestamp)) + } + + tflog.Info(ctx, "flattens.DiskResource: after flatten", map[string]any{ + "disk_id": plan.DiskID.ValueInt64(), + "label": plan.Label.ValueString()}) + + return nil +} diff --git a/internal/service/cloudbroker/disks/input_checks.go b/internal/service/cloudbroker/disks/input_checks.go new file mode 100644 index 0000000..df7f223 --- /dev/null +++ b/internal/service/cloudbroker/disks/input_checks.go @@ -0,0 +1,122 @@ +package cbDisks + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/ic" +) + +// resourceDiskReplicationInputChecks checks if user provided disk_id exist on the platform during disk replication. +func resourceDiskReplicationInputChecks(ctx context.Context, plan *models.ResourceRecordDiskReplicationModel, c *decort.DecortClient) diag.Diagnostics { + diags := diag.Diagnostics{} + + diskId := uint64(plan.DiskId.ValueInt64()) + tflog.Info(ctx, "resourceDiskCreateInputChecks: exist disk check", map[string]any{"disk_id": diskId}) + err := CheckTatlinDiskID(ctx, diskId, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about disk with ID %v", diskId), err.Error()) + } + + return diags + +} + +// resourceDiskUpdateInputChecks checks if user provided: +// account_id and gid exist on the platform during disk creation, +// description, pool, sep_id, type are not attempted to be changed. +func resourceDiskUpdateInputChecks(ctx context.Context, plan, state *models.ResourceDiskModel, c *decort.DecortClient) diag.Diagnostics { + diags := diag.Diagnostics{} + + // check accountId and gid + diags.Append(resourceDiskCreateInputChecks(ctx, plan, c)...) + + // check description + if !plan.Description.Equal(state.Description) && !plan.Description.IsUnknown() { + diags.AddError( + "resourceDiskUpdateInputChecks: description change is not allowed", + fmt.Sprintf("cannot change description from %s to %s for disk id %s", + state.Description.ValueString(), + plan.Description.ValueString(), + plan.Id.ValueString())) + } + + // check pool + if !plan.Pool.Equal(state.Pool) && !plan.Pool.IsUnknown() { + diags.AddError( + "resourceDiskUpdateInputChecks: pool change is not allowed", + fmt.Sprintf("cannot change pool from %s to %s for disk id %s", + state.Pool.ValueString(), + plan.Pool.ValueString(), + plan.Id.ValueString())) + } + + // check sep_id + if !plan.SEPID.Equal(state.SEPID) && !plan.SEPID.IsUnknown() { + diags.AddError( + "resourceDiskUpdateInputChecks: sep_id change is not allowed", + fmt.Sprintf("cannot change sep_id from %d to %d for disk id %s", + state.SEPID.ValueInt64(), + plan.SEPID.ValueInt64(), + plan.Id.ValueString())) + } + + // check type + if !plan.Type.Equal(state.Type) && !plan.Type.IsUnknown() { + diags.AddError( + "resourceDiskUpdateInputChecks: type change is not allowed", + fmt.Sprintf("cannot change type from %s to %s for disk id %s", + state.Type.ValueString(), + plan.Type.ValueString(), + plan.Id.ValueString())) + } + + return diags +} + +// resourceDiskCreateInputChecks checks if user provided account_id and gid exist on the platform during disk creation. +func resourceDiskCreateInputChecks(ctx context.Context, plan *models.ResourceDiskModel, c *decort.DecortClient) diag.Diagnostics { + diags := diag.Diagnostics{} + + accountId := uint64(plan.AccountID.ValueInt64()) + tflog.Info(ctx, "resourceDiskCreateInputChecks: exist account check", map[string]any{"account_id": accountId}) + err := ic.ExistAccount(ctx, accountId, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about account with ID %v", accountId), err.Error()) + } + + gid := uint64(plan.GID.ValueInt64()) + tflog.Info(ctx, "resourceDiskCreateInputChecks: exist gid check", map[string]any{"gid": gid}) + err = ic.ExistGID(ctx, gid, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about GID %v", gid), err.Error()) + } + return diags +} + +func CheckTatlinDiskID(ctx context.Context, diskId uint64, c *decort.DecortClient) error { + + req := disks.ListRequest{ + ByID: diskId, + } + + diskList, err := c.CloudBroker().Disks().List(ctx, req) + if err != nil { + return err + } + + if len(diskList.Data) == 0 { + return fmt.Errorf("DiskID %d is not allowed or does not exist", diskId) + } + + if diskList.Data[0].SEPType != "TATLIN" { + return fmt.Errorf("DiskID %d is not allowed or does not exist", diskId) + } + + return nil +} diff --git a/internal/service/cloudbroker/disks/models/model_data_source_cb_disk.go b/internal/service/cloudbroker/disks/models/model_data_source_cb_disk.go new file mode 100644 index 0000000..6e0e647 --- /dev/null +++ b/internal/service/cloudbroker/disks/models/model_data_source_cb_disk.go @@ -0,0 +1,61 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DiskModel struct { + // request fields + DiskID types.Int64 `tfsdk:"disk_id"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // computed fields + MachineID types.Int64 `tfsdk:"machine_id"` + MachineName types.String `tfsdk:"machine_name"` + DeviceName types.String `tfsdk:"devicename"` + SEPType types.String `tfsdk:"sep_type"` + AccountID types.Int64 `tfsdk:"account_id"` + AccountName types.String `tfsdk:"account_name"` + ACL types.String `tfsdk:"acl"` + BootPartition types.Int64 `tfsdk:"boot_partition"` + Computes []ComputeModel `tfsdk:"computes"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + Description types.String `tfsdk:"desc"` + DestructionTime types.Int64 `tfsdk:"destruction_time"` + DiskPath types.String `tfsdk:"disk_path"` + GID types.Int64 `tfsdk:"gid"` + GUID types.Int64 `tfsdk:"guid"` + ImageID types.Int64 `tfsdk:"image_id"` + Images types.List `tfsdk:"images"` + IOTune *IOModel `tfsdk:"iotune"` + IQN types.String `tfsdk:"iqn"` + Login types.String `tfsdk:"login"` + Milestones types.Int64 `tfsdk:"milestones"` + Name types.String `tfsdk:"disk_name"` + Order types.Int64 `tfsdk:"order"` + Params types.String `tfsdk:"params"` + ParentID types.Int64 `tfsdk:"parent_id"` + Password types.String `tfsdk:"passwd"` + PCISlot types.Int64 `tfsdk:"pci_slot"` + Pool types.String `tfsdk:"pool"` + PresentTo types.List `tfsdk:"present_to"` + PurgeAttempts types.Int64 `tfsdk:"purge_attempts"` + PurgeTime types.Int64 `tfsdk:"purge_time"` + RealityDeviceNumber types.Int64 `tfsdk:"reality_device_number"` + ReferenceID types.String `tfsdk:"reference_id"` + Replication *ItemReplModel `tfsdk:"replication"` + ResID types.String `tfsdk:"res_id"` + ResName types.String `tfsdk:"res_name"` + Role types.String `tfsdk:"role"` + SEPID types.Int64 `tfsdk:"sep_id"` + Shareable types.Bool `tfsdk:"shareable"` + SizeMax types.Int64 `tfsdk:"size_max"` + SizeUsed types.Float64 `tfsdk:"size_used"` + Snapshots []SnapshotModel `tfsdk:"snapshots"` + Status types.String `tfsdk:"status"` + TechStatus types.String `tfsdk:"tech_status"` + Type types.String `tfsdk:"type"` + VMID types.Int64 `tfsdk:"vmid"` +} diff --git a/internal/service/cloudbroker/disks/models/model_data_source_cb_disk_list.go b/internal/service/cloudbroker/disks/models/model_data_source_cb_disk_list.go new file mode 100644 index 0000000..7a61f58 --- /dev/null +++ b/internal/service/cloudbroker/disks/models/model_data_source_cb_disk_list.go @@ -0,0 +1,116 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DisksModel struct { + // request fields + ByID types.Int64 `tfsdk:"by_id"` + Name types.String `tfsdk:"name"` + AccountName types.String `tfsdk:"account_name"` + DiskMaxSize types.Int64 `tfsdk:"disk_max_size"` + Shared types.Bool `tfsdk:"shared"` + AccountID types.Int64 `tfsdk:"account_id"` + Type types.String `tfsdk:"type"` + SortBy types.String `tfsdk:"sort_by"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + //computed fields + Items []DiskItemModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type DiskItemModel struct { + MachineID types.Int64 `tfsdk:"machine_id"` + MachineName types.String `tfsdk:"machine_name"` + DeviceName types.String `tfsdk:"devicename"` + SEPType types.String `tfsdk:"sep_type"` + AccountID types.Int64 `tfsdk:"account_id"` + AccountName types.String `tfsdk:"account_name"` + ACL types.String `tfsdk:"acl"` + BootPartition types.Int64 `tfsdk:"boot_partition"` + Computes []ComputeModel `tfsdk:"computes"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + Description types.String `tfsdk:"desc"` + DestructionTime types.Int64 `tfsdk:"destruction_time"` + DiskPath types.String `tfsdk:"disk_path"` + GID types.Int64 `tfsdk:"gid"` + GUID types.Int64 `tfsdk:"guid"` + ID types.Int64 `tfsdk:"disk_id"` + ImageID types.Int64 `tfsdk:"image_id"` + Images types.List `tfsdk:"images"` + IOTune *IOModel `tfsdk:"iotune"` + IQN types.String `tfsdk:"iqn"` + Login types.String `tfsdk:"login"` + Milestones types.Int64 `tfsdk:"milestones"` + Name types.String `tfsdk:"disk_name"` + Order types.Int64 `tfsdk:"order"` + Params types.String `tfsdk:"params"` + ParentID types.Int64 `tfsdk:"parent_id"` + Password types.String `tfsdk:"passwd"` + PCISlot types.Int64 `tfsdk:"pci_slot"` + Pool types.String `tfsdk:"pool"` + PresentTo types.List `tfsdk:"present_to"` + PurgeAttempts types.Int64 `tfsdk:"purge_attempts"` + PurgeTime types.Int64 `tfsdk:"purge_time"` + RealityDeviceNumber types.Int64 `tfsdk:"reality_device_number"` + ReferenceID types.String `tfsdk:"reference_id"` + Replication *ItemReplModel `tfsdk:"replication"` + ResID types.String `tfsdk:"res_id"` + ResName types.String `tfsdk:"res_name"` + Role types.String `tfsdk:"role"` + SEPID types.Int64 `tfsdk:"sep_id"` + Shareable types.Bool `tfsdk:"shareable"` + SizeMax types.Int64 `tfsdk:"size_max"` + SizeUsed types.Float64 `tfsdk:"size_used"` + Snapshots []SnapshotModel `tfsdk:"snapshots"` + Status types.String `tfsdk:"status"` + TechStatus types.String `tfsdk:"tech_status"` + Type types.String `tfsdk:"type"` + VMID types.Int64 `tfsdk:"vmid"` +} + +type ComputeModel struct { + ComputeID types.String `tfsdk:"compute_id"` + ComputeName types.String `tfsdk:"compute_name"` +} + +type ItemReplModel struct { + DiskID types.Int64 `tfsdk:"disk_id"` + PoolID types.String `tfsdk:"pool_id"` + Role types.String `tfsdk:"role"` + SelfVolumeID types.String `tfsdk:"self_volume_id"` + StorageID types.String `tfsdk:"storage_id"` + VolumeID types.String `tfsdk:"volume_id"` +} + +type IOModel struct { + ReadBytesSec types.Int64 `tfsdk:"read_bytes_sec"` + ReadBytesSecMax types.Int64 `tfsdk:"read_bytes_sec_max"` + ReadIOPSSec types.Int64 `tfsdk:"read_iops_sec"` + ReadIOPSSecMax types.Int64 `tfsdk:"read_iops_sec_max"` + SizeIOPSSec types.Int64 `tfsdk:"size_iops_sec"` + TotalBytesSec types.Int64 `tfsdk:"total_bytes_sec"` + TotalBytesSecMax types.Int64 `tfsdk:"total_bytes_sec_max"` + TotalIOPSSec types.Int64 `tfsdk:"total_iops_sec"` + TotalIOPSSecMax types.Int64 `tfsdk:"total_iops_sec_max"` + WriteBytesSec types.Int64 `tfsdk:"write_bytes_sec"` + WriteBytesSecMax types.Int64 `tfsdk:"write_bytes_sec_max"` + WriteIOPSSec types.Int64 `tfsdk:"write_iops_sec"` + WriteIOPSSecMax types.Int64 `tfsdk:"write_iops_sec_max"` +} + +type SnapshotModel struct { + GUID types.String `tfsdk:"guid"` + Label types.String `tfsdk:"label"` + ReferenceID types.String `tfsdk:"reference_id"` + ResID types.String `tfsdk:"res_id"` + SnapSetGUID types.String `tfsdk:"snap_set_guid"` + SnapSetTime types.Int64 `tfsdk:"snap_set_time"` + Timestamp types.Int64 `tfsdk:"timestamp"` +} diff --git a/internal/service/cloudbroker/disks/models/model_data_source_cb_disk_list_deleted.go b/internal/service/cloudbroker/disks/models/model_data_source_cb_disk_list_deleted.go new file mode 100644 index 0000000..9b32cf3 --- /dev/null +++ b/internal/service/cloudbroker/disks/models/model_data_source_cb_disk_list_deleted.go @@ -0,0 +1,116 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ListDisksModel struct { + // request fields + ByID types.Int64 `tfsdk:"by_id"` + Name types.String `tfsdk:"name"` + AccountName types.String `tfsdk:"account_name"` + DiskMaxSize types.Int64 `tfsdk:"disk_max_size"` + Shared types.Bool `tfsdk:"shared"` + AccountID types.Int64 `tfsdk:"account_id"` + Type types.String `tfsdk:"type"` + SortBy types.String `tfsdk:"sort_by"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + //computed fields + Items []ItemDiskModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemDiskModel struct { + MachineID types.Int64 `tfsdk:"machine_id"` + MachineName types.String `tfsdk:"machine_name"` + DeviceName types.String `tfsdk:"devicename"` + SEPType types.String `tfsdk:"sep_type"` + AccountID types.Int64 `tfsdk:"account_id"` + AccountName types.String `tfsdk:"account_name"` + ACL types.String `tfsdk:"acl"` + BootPartition types.Int64 `tfsdk:"boot_partition"` + Computes []ComputesModel `tfsdk:"computes"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + Description types.String `tfsdk:"desc"` + DestructionTime types.Int64 `tfsdk:"destruction_time"` + DiskPath types.String `tfsdk:"disk_path"` + GID types.Int64 `tfsdk:"gid"` + GUID types.Int64 `tfsdk:"guid"` + ID types.Int64 `tfsdk:"disk_id"` + ImageID types.Int64 `tfsdk:"image_id"` + Images types.List `tfsdk:"images"` + IOTune *IOTuneModel `tfsdk:"iotune"` + IQN types.String `tfsdk:"iqn"` + Login types.String `tfsdk:"login"` + Milestones types.Int64 `tfsdk:"milestones"` + Name types.String `tfsdk:"disk_name"` + Order types.Int64 `tfsdk:"order"` + Params types.String `tfsdk:"params"` + ParentID types.Int64 `tfsdk:"parent_id"` + Password types.String `tfsdk:"passwd"` + PCISlot types.Int64 `tfsdk:"pci_slot"` + Pool types.String `tfsdk:"pool"` + PresentTo types.List `tfsdk:"present_to"` + PurgeAttempts types.Int64 `tfsdk:"purge_attempts"` + PurgeTime types.Int64 `tfsdk:"purge_time"` + RealityDeviceNumber types.Int64 `tfsdk:"reality_device_number"` + ReferenceID types.String `tfsdk:"reference_id"` + Replication *ItemReplicationModel `tfsdk:"replication"` + ResID types.String `tfsdk:"res_id"` + ResName types.String `tfsdk:"res_name"` + Role types.String `tfsdk:"role"` + SEPID types.Int64 `tfsdk:"sep_id"` + Shareable types.Bool `tfsdk:"shareable"` + SizeMax types.Int64 `tfsdk:"size_max"` + SizeUsed types.Float64 `tfsdk:"size_used"` + Snapshots []ItemSnapshotModel `tfsdk:"snapshots"` + Status types.String `tfsdk:"status"` + TechStatus types.String `tfsdk:"tech_status"` + Type types.String `tfsdk:"type"` + VMID types.Int64 `tfsdk:"vmid"` +} + +type ComputesModel struct { + ComputeID types.String `tfsdk:"compute_id"` + ComputeName types.String `tfsdk:"compute_name"` +} + +type ItemReplicationModel struct { + DiskID types.Int64 `tfsdk:"disk_id"` + PoolID types.String `tfsdk:"pool_id"` + Role types.String `tfsdk:"role"` + SelfVolumeID types.String `tfsdk:"self_volume_id"` + StorageID types.String `tfsdk:"storage_id"` + VolumeID types.String `tfsdk:"volume_id"` +} + +type IOTuneModel struct { + ReadBytesSec types.Int64 `tfsdk:"read_bytes_sec"` + ReadBytesSecMax types.Int64 `tfsdk:"read_bytes_sec_max"` + ReadIOPSSec types.Int64 `tfsdk:"read_iops_sec"` + ReadIOPSSecMax types.Int64 `tfsdk:"read_iops_sec_max"` + SizeIOPSSec types.Int64 `tfsdk:"size_iops_sec"` + TotalBytesSec types.Int64 `tfsdk:"total_bytes_sec"` + TotalBytesSecMax types.Int64 `tfsdk:"total_bytes_sec_max"` + TotalIOPSSec types.Int64 `tfsdk:"total_iops_sec"` + TotalIOPSSecMax types.Int64 `tfsdk:"total_iops_sec_max"` + WriteBytesSec types.Int64 `tfsdk:"write_bytes_sec"` + WriteBytesSecMax types.Int64 `tfsdk:"write_bytes_sec_max"` + WriteIOPSSec types.Int64 `tfsdk:"write_iops_sec"` + WriteIOPSSecMax types.Int64 `tfsdk:"write_iops_sec_max"` +} + +type ItemSnapshotModel struct { + GUID types.String `tfsdk:"guid"` + Label types.String `tfsdk:"label"` + ReferenceID types.String `tfsdk:"reference_id"` + ResID types.String `tfsdk:"res_id"` + SnapSetGUID types.String `tfsdk:"snap_set_guid"` + SnapSetTime types.Int64 `tfsdk:"snap_set_time"` + Timestamp types.Int64 `tfsdk:"timestamp"` +} diff --git a/internal/service/cloudbroker/disks/models/model_data_source_cb_disk_list_types.go b/internal/service/cloudbroker/disks/models/model_data_source_cb_disk_list_types.go new file mode 100644 index 0000000..551dd87 --- /dev/null +++ b/internal/service/cloudbroker/disks/models/model_data_source_cb_disk_list_types.go @@ -0,0 +1,19 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceDiskListTypesModel struct { + // request fields - optional + SortBy types.String `tfsdk:"sort_by"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + Items types.List `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} diff --git a/internal/service/cloudbroker/disks/models/model_data_source_cb_disk_list_types_detailed.go b/internal/service/cloudbroker/disks/models/model_data_source_cb_disk_list_types_detailed.go new file mode 100644 index 0000000..cd30e40 --- /dev/null +++ b/internal/service/cloudbroker/disks/models/model_data_source_cb_disk_list_types_detailed.go @@ -0,0 +1,31 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceDiskListTypesDetailedModel struct { + // request fields - optional + SortBy types.String `tfsdk:"sort_by"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemDiskTypeDetailedModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemDiskTypeDetailedModel struct { + Pools []ItemPoolModel `tfsdk:"pools"` + SepID types.Int64 `tfsdk:"sep_id"` + SepName types.String `tfsdk:"sep_name"` +} + +type ItemPoolModel struct { + Name types.String `tfsdk:"name"` + System types.String `tfsdk:"system"` + Types types.List `tfsdk:"types"` +} diff --git a/internal/service/cloudbroker/disks/models/model_data_source_cb_disk_list_unattached.go b/internal/service/cloudbroker/disks/models/model_data_source_cb_disk_list_unattached.go new file mode 100644 index 0000000..e622bc4 --- /dev/null +++ b/internal/service/cloudbroker/disks/models/model_data_source_cb_disk_list_unattached.go @@ -0,0 +1,109 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceDiskListUnattachedModel struct { + // request fields + ByID types.Int64 `tfsdk:"by_id"` + AccountName types.String `tfsdk:"account_name"` + DiskMaxSize types.Int64 `tfsdk:"disk_max_size"` + Status types.String `tfsdk:"status"` + AccountID types.Int64 `tfsdk:"account_id"` + SepID types.Int64 `tfsdk:"sep_id"` + PoolName types.String `tfsdk:"pool_name"` + Type types.String `tfsdk:"type"` + SortBy types.String `tfsdk:"sort_by"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemDiskUnattachedModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemDiskUnattachedModel struct { + CKey types.String `tfsdk:"ckey"` + Meta types.List `tfsdk:"meta"` + AccountID types.Int64 `tfsdk:"account_id"` + AccountName types.String `tfsdk:"account_name"` + ACL types.String `tfsdk:"acl"` + BootPartition types.Int64 `tfsdk:"boot_partition"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + Description types.String `tfsdk:"desc"` + DestructionTime types.Int64 `tfsdk:"destruction_time"` + DiskPath types.String `tfsdk:"disk_path"` + GID types.Int64 `tfsdk:"gid"` + GUID types.Int64 `tfsdk:"guid"` + DiskId types.Int64 `tfsdk:"disk_id"` + ImageID types.Int64 `tfsdk:"image_id"` + Images types.List `tfsdk:"images"` + IOTune types.Object `tfsdk:"iotune"` + Iqn types.String `tfsdk:"iqn"` + Login types.String `tfsdk:"login"` + Milestones types.Int64 `tfsdk:"milestones"` + DiskName types.String `tfsdk:"disk_name"` + Order types.Int64 `tfsdk:"order"` + Params types.String `tfsdk:"params"` + ParentID types.Int64 `tfsdk:"parent_id"` + Passwd types.String `tfsdk:"passwd"` + PCISlot types.Int64 `tfsdk:"pci_slot"` + Pool types.String `tfsdk:"pool"` + PresentTo types.List `tfsdk:"present_to"` + PurgeAttempts types.Int64 `tfsdk:"purge_attempts"` + PurgeTime types.Int64 `tfsdk:"purge_time"` + RealityDeviceNumber types.Int64 `tfsdk:"reality_device_number"` + ReferenceID types.String `tfsdk:"reference_id"` + ResID types.String `tfsdk:"res_id"` + ResName types.String `tfsdk:"res_name"` + Role types.String `tfsdk:"role"` + SepID types.Int64 `tfsdk:"sep_id"` + Shareable types.Bool `tfsdk:"shareable"` + SizeMax types.Int64 `tfsdk:"size_max"` + SizeUsed types.Float64 `tfsdk:"size_used"` + Snapshots types.List `tfsdk:"snapshots"` + Status types.String `tfsdk:"status"` + TechStatus types.String `tfsdk:"tech_status"` + Type types.String `tfsdk:"type"` + VMID types.Int64 `tfsdk:"vmid"` +} + +var ItemIOTune = map[string]attr.Type{ + "read_bytes_sec": types.Int64Type, + "read_bytes_sec_max": types.Int64Type, + "read_iops_sec": types.Int64Type, + "read_iops_sec_max": types.Int64Type, + "size_iops_sec": types.Int64Type, + "total_bytes_sec": types.Int64Type, + "total_bytes_sec_max": types.Int64Type, + "total_iops_sec": types.Int64Type, + "total_iops_sec_max": types.Int64Type, + "write_bytes_sec": types.Int64Type, + "write_bytes_sec_max": types.Int64Type, + "write_iops_sec": types.Int64Type, + "write_iops_sec_max": types.Int64Type, +} + +type DiskUnattachedItemSnapshotModel struct { + GUID types.String `tfsdk:"guid"` + Label types.String `tfsdk:"label"` + ResID types.String `tfsdk:"res_id"` + SnapSetGUID types.String `tfsdk:"snap_set_guid"` + SnapSetTime types.Int64 `tfsdk:"snap_set_time"` + Timestamp types.Int64 `tfsdk:"timestamp"` +} + +var ItemSnapshot = map[string]attr.Type{ + "guid": types.StringType, + "label": types.StringType, + "res_id": types.StringType, + "snap_set_guid": types.StringType, + "snap_set_time": types.Int64Type, + "timestamp": types.Int64Type, +} diff --git a/internal/service/cloudbroker/disks/models/model_data_source_cb_disk_replication.go b/internal/service/cloudbroker/disks/models/model_data_source_cb_disk_replication.go new file mode 100644 index 0000000..a975403 --- /dev/null +++ b/internal/service/cloudbroker/disks/models/model_data_source_cb_disk_replication.go @@ -0,0 +1,115 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type RecordDiskModel struct { + // request fields - required + DiskId types.Int64 `tfsdk:"disk_id"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + ACL types.String `tfsdk:"acl"` + BootPartition types.Int64 `tfsdk:"boot_partition"` + AccountID types.Int64 `tfsdk:"account_id"` + AccountName types.String `tfsdk:"account_name"` + Computes types.List `tfsdk:"computes"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + DeviceName types.String `tfsdk:"devicename"` + DiskPath types.String `tfsdk:"disk_path"` + Description types.String `tfsdk:"desc"` + DestructionTime types.Int64 `tfsdk:"destruction_time"` + GID types.Int64 `tfsdk:"gid"` + GUID types.Int64 `tfsdk:"guid"` + ImageID types.Int64 `tfsdk:"image_id"` + Images types.List `tfsdk:"images"` + IOTune *DiskReplicationIOTune `tfsdk:"iotune"` + IQN types.String `tfsdk:"iqn"` + Login types.String `tfsdk:"login"` + Milestones types.Int64 `tfsdk:"milestones"` + Name types.String `tfsdk:"disk_name"` + Order types.Int64 `tfsdk:"order"` + Params types.String `tfsdk:"params"` + ParentID types.Int64 `tfsdk:"parent_id"` + Passwd types.String `tfsdk:"passwd"` + PCISlot types.Int64 `tfsdk:"pci_slot"` + Pool types.String `tfsdk:"pool"` + PresentTo types.List `tfsdk:"present_to"` + PurgeAttempts types.Int64 `tfsdk:"purge_attempts"` + PurgeTime types.Int64 `tfsdk:"purge_time"` + Replication *ItemReplicationModel `tfsdk:"replication"` + RealityDeviceNumber types.Int64 `tfsdk:"reality_device_number"` + ReferenceID types.String `tfsdk:"reference_id"` + ResID types.String `tfsdk:"res_id"` + ResName types.String `tfsdk:"res_name"` + Role types.String `tfsdk:"role"` + SepType types.String `tfsdk:"sep_type"` + SepID types.Int64 `tfsdk:"sep_id"` + Shareable types.Bool `tfsdk:"shareable"` + SizeMax types.Int64 `tfsdk:"size_max"` + SizeUsed types.Float64 `tfsdk:"size_used"` + Snapshots types.List `tfsdk:"snapshots"` + Status types.String `tfsdk:"status"` + StatusReplication types.String `tfsdk:"status_replication"` + TechStatus types.String `tfsdk:"tech_status"` + Type types.String `tfsdk:"type"` + VMID types.Int64 `tfsdk:"vmid"` +} + +type DiskReplicationIOTune struct { + ReadBytesSec types.Int64 `tfsdk:"read_bytes_sec"` + ReadBytesSecMax types.Int64 `tfsdk:"read_bytes_sec_max"` + ReadIOPSSec types.Int64 `tfsdk:"read_iops_sec"` + ReadIOPSSecMax types.Int64 `tfsdk:"read_iops_sec_max"` + SizeIOPSSec types.Int64 `tfsdk:"size_iops_sec"` + TotalBytesSec types.Int64 `tfsdk:"total_bytes_sec"` + TotalBytesSecMax types.Int64 `tfsdk:"total_bytes_sec_max"` + TotalIOPSSec types.Int64 `tfsdk:"total_iops_sec"` + TotalIOPSSecMax types.Int64 `tfsdk:"total_iops_sec_max"` + WriteBytesSec types.Int64 `tfsdk:"write_bytes_sec"` + WriteBytesSecMax types.Int64 `tfsdk:"write_bytes_sec_max"` + WriteIOPSSec types.Int64 `tfsdk:"write_iops_sec"` + WriteIOPSSecMax types.Int64 `tfsdk:"write_iops_sec_max"` +} + +type ReplicationModel struct { + DiskID types.Int64 `tfsdk:"disk_id"` + PoolID types.String `tfsdk:"pool_id"` + Role types.String `tfsdk:"role"` + SelfVolumeID types.String `tfsdk:"self_volume_id"` + StorageID types.String `tfsdk:"storage_id"` + VolumeID types.String `tfsdk:"volume_id"` +} + +type DiskReplicationItemSnapshot struct { + GUID types.String `tfsdk:"guid"` + Label types.String `tfsdk:"label"` + ReferenceID types.String `tfsdk:"reference_id"` + ResID types.String `tfsdk:"res_id"` + SnapSetGUID types.String `tfsdk:"snap_set_guid"` + SnapSetTime types.Int64 `tfsdk:"snap_set_time"` + TimeStamp types.Int64 `tfsdk:"timestamp"` +} + +var DiskReplicationSnapshot = map[string]attr.Type{ + "guid": types.StringType, + "label": types.StringType, + "res_id": types.StringType, + "snap_set_guid": types.StringType, + "snap_set_time": types.Int64Type, + "timeStamp": types.Int64Type, +} + +type ItemComputeModel struct { + ComputeId types.String `tfsdk:"compute_id"` + ComputeName types.String `tfsdk:"compute_name"` +} + +var ItemCompute = map[string]attr.Type{ + "compute_id": types.StringType, + "compute_name": types.StringType, +} diff --git a/internal/service/cloudbroker/disks/models/model_data_source_cb_disk_snapshot.go b/internal/service/cloudbroker/disks/models/model_data_source_cb_disk_snapshot.go new file mode 100644 index 0000000..1fbc6c6 --- /dev/null +++ b/internal/service/cloudbroker/disks/models/model_data_source_cb_disk_snapshot.go @@ -0,0 +1,21 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceDiskSnapshotModel struct { + // request fields + DiskID types.Int64 `tfsdk:"disk_id"` + Label types.String `tfsdk:"label"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + GUID types.String `tfsdk:"guid"` + ResID types.String `tfsdk:"res_id"` + SnapSetGUID types.String `tfsdk:"snap_set_guid"` + SnapSetTime types.Int64 `tfsdk:"snap_set_time"` + TimeStamp types.Int64 `tfsdk:"timestamp"` +} diff --git a/internal/service/cloudbroker/disks/models/model_data_source_cb_disk_snapshot_list.go b/internal/service/cloudbroker/disks/models/model_data_source_cb_disk_snapshot_list.go new file mode 100644 index 0000000..0727450 --- /dev/null +++ b/internal/service/cloudbroker/disks/models/model_data_source_cb_disk_snapshot_list.go @@ -0,0 +1,26 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceDiskSnapshotListModel struct { + // request fields + DiskID types.Int64 `tfsdk:"disk_id"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + Items []DiskItemSnapshot `tfsdk:"items"` +} + +type DiskItemSnapshot struct { + GUID types.String `tfsdk:"guid"` + Label types.String `tfsdk:"label"` + ReferenceID types.String `tfsdk:"reference_id"` + ResID types.String `tfsdk:"res_id"` + SnapSetGUID types.String `tfsdk:"snap_set_guid"` + SnapSetTime types.Int64 `tfsdk:"snap_set_time"` + TimeStamp types.Int64 `tfsdk:"timestamp"` +} diff --git a/internal/service/cloudbroker/disks/models/model_resource_cb_disk.go b/internal/service/cloudbroker/disks/models/model_resource_cb_disk.go new file mode 100644 index 0000000..10b7120 --- /dev/null +++ b/internal/service/cloudbroker/disks/models/model_resource_cb_disk.go @@ -0,0 +1,127 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ResourceDiskModel struct { + // request fields - required + AccountID types.Int64 `tfsdk:"account_id"` + DiskName types.String `tfsdk:"disk_name"` + SizeMax types.Int64 `tfsdk:"size_max"` + GID types.Int64 `tfsdk:"gid"` + + // request fields - optional + SSDSize types.Int64 `tfsdk:"ssd_size"` + NodeIDs types.List `tfsdk:"node_ids"` + Description types.String `tfsdk:"desc"` + Pool types.String `tfsdk:"pool"` + SEPID types.Int64 `tfsdk:"sep_id"` + Type types.String `tfsdk:"type"` + Detach types.Bool `tfsdk:"detach"` + Permanently types.Bool `tfsdk:"permanently"` + Shareable types.Bool `tfsdk:"shareable"` + IOTune types.Object `tfsdk:"iotune"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + LastUpdated types.String `tfsdk:"last_updated"` + ACL types.String `tfsdk:"acl"` + AccountName types.String `tfsdk:"account_name"` + BootPartition types.Int64 `tfsdk:"boot_partition"` + Computes types.List `tfsdk:"computes"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + DeviceName types.String `tfsdk:"devicename"` + DestructionTime types.Int64 `tfsdk:"destruction_time"` + DiskId types.Int64 `tfsdk:"disk_id"` + DiskPath types.String `tfsdk:"disk_path"` + Guid types.Int64 `tfsdk:"guid"` + ImageID types.Int64 `tfsdk:"image_id"` + Images types.List `tfsdk:"images"` + IQN types.String `tfsdk:"iqn"` + Login types.String `tfsdk:"login"` + Milestones types.Int64 `tfsdk:"milestones"` + Order types.Int64 `tfsdk:"order"` + Params types.String `tfsdk:"params"` + ParentID types.Int64 `tfsdk:"parent_id"` + Passwd types.String `tfsdk:"passwd"` + PCISlot types.Int64 `tfsdk:"pci_slot"` + PresentTo types.List `tfsdk:"present_to"` + PurgeAttempts types.Int64 `tfsdk:"purge_attempts"` + PurgeTime types.Int64 `tfsdk:"purge_time"` + RealityDeviceNumber types.Int64 `tfsdk:"reality_device_number"` + ReferenceID types.String `tfsdk:"reference_id"` + ResID types.String `tfsdk:"res_id"` + ResName types.String `tfsdk:"res_name"` + Role types.String `tfsdk:"role"` + SepType types.String `tfsdk:"sep_type"` + SizeUsed types.Float64 `tfsdk:"size_used"` + Snapshots types.List `tfsdk:"snapshots"` + Status types.String `tfsdk:"status"` + TechStatus types.String `tfsdk:"tech_status"` + VMID types.Int64 `tfsdk:"vmid"` +} + +type ResourceIOTuneModel struct { + ReadBytesSec types.Int64 `tfsdk:"read_bytes_sec"` + ReadBytesSecMax types.Int64 `tfsdk:"read_bytes_sec_max"` + ReadIOPSSec types.Int64 `tfsdk:"read_iops_sec"` + ReadIOPSSecMax types.Int64 `tfsdk:"read_iops_sec_max"` + SizeIOPSSec types.Int64 `tfsdk:"size_iops_sec"` + TotalBytesSec types.Int64 `tfsdk:"total_bytes_sec"` + TotalBytesSecMax types.Int64 `tfsdk:"total_bytes_sec_max"` + TotalIOPSSec types.Int64 `tfsdk:"total_iops_sec"` + TotalIOPSSecMax types.Int64 `tfsdk:"total_iops_sec_max"` + WriteBytesSec types.Int64 `tfsdk:"write_bytes_sec"` + WriteBytesSecMax types.Int64 `tfsdk:"write_bytes_sec_max"` + WriteIOPSSec types.Int64 `tfsdk:"write_iops_sec"` + WriteIOPSSecMax types.Int64 `tfsdk:"write_iops_sec_max"` +} + +type ItemSnapshotsModel struct { + GUID types.String `tfsdk:"guid"` + Label types.String `tfsdk:"label"` + ResID types.String `tfsdk:"res_id"` + SnapSetGUID types.String `tfsdk:"snap_set_guid"` + SnapSetTime types.Int64 `tfsdk:"snap_set_time"` + TimeStamp types.Int64 `tfsdk:"timestamp"` +} + +type ItemComputesModel struct { + ComputeId types.String `tfsdk:"compute_id"` + ComputeName types.String `tfsdk:"compute_name"` +} + +var ItemComputes = map[string]attr.Type{ + "compute_id": types.StringType, + "compute_name": types.StringType, +} + +var ItemSnapshots = map[string]attr.Type{ + "guid": types.StringType, + "label": types.StringType, + "res_id": types.StringType, + "snap_set_guid": types.StringType, + "snap_set_time": types.Int64Type, + "timestamp": types.Int64Type, +} + +var ResourceItemIOTune = map[string]attr.Type{ + "read_bytes_sec": types.Int64Type, + "read_bytes_sec_max": types.Int64Type, + "read_iops_sec": types.Int64Type, + "read_iops_sec_max": types.Int64Type, + "size_iops_sec": types.Int64Type, + "total_bytes_sec": types.Int64Type, + "total_bytes_sec_max": types.Int64Type, + "total_iops_sec": types.Int64Type, + "total_iops_sec_max": types.Int64Type, + "write_bytes_sec": types.Int64Type, + "write_bytes_sec_max": types.Int64Type, + "write_iops_sec": types.Int64Type, + "write_iops_sec_max": types.Int64Type, +} diff --git a/internal/service/cloudbroker/disks/models/model_resource_cb_disk_replication.go b/internal/service/cloudbroker/disks/models/model_resource_cb_disk_replication.go new file mode 100644 index 0000000..b439d04 --- /dev/null +++ b/internal/service/cloudbroker/disks/models/model_resource_cb_disk_replication.go @@ -0,0 +1,147 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ResourceRecordDiskReplicationModel struct { + // request fields + DiskId types.Int64 `tfsdk:"disk_id"` + Name types.String `tfsdk:"disk_name"` + SepID types.Int64 `tfsdk:"sep_id"` + PoolName types.String `tfsdk:"pool_name"` + Pause types.Bool `tfsdk:"pause"` + Reverse types.Bool `tfsdk:"reverse"` + Start types.Bool `tfsdk:"start"` + Detach types.Bool `tfsdk:"detach"` + Permanently types.Bool `tfsdk:"permanently"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + ACL types.String `tfsdk:"acl"` + BootPartition types.Int64 `tfsdk:"boot_partition"` + AccountID types.Int64 `tfsdk:"account_id"` + AccountName types.String `tfsdk:"account_name"` + Computes types.List `tfsdk:"computes"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + DeviceName types.String `tfsdk:"devicename"` + DiskPath types.String `tfsdk:"disk_path"` + Description types.String `tfsdk:"desc"` + DestructionTime types.Int64 `tfsdk:"destruction_time"` + GID types.Int64 `tfsdk:"gid"` + GUID types.Int64 `tfsdk:"guid"` + ImageID types.Int64 `tfsdk:"image_id"` + ReplicationId types.Int64 `tfsdk:"replica_disk_id"` + Images types.List `tfsdk:"images"` + IOTune types.Object `tfsdk:"iotune"` + IQN types.String `tfsdk:"iqn"` + Login types.String `tfsdk:"login"` + Milestones types.Int64 `tfsdk:"milestones"` + Order types.Int64 `tfsdk:"order"` + Params types.String `tfsdk:"params"` + ParentID types.Int64 `tfsdk:"parent_id"` + Passwd types.String `tfsdk:"passwd"` + PCISlot types.Int64 `tfsdk:"pci_slot"` + Pool types.String `tfsdk:"pool"` + PresentTo types.List `tfsdk:"present_to"` + PurgeAttempts types.Int64 `tfsdk:"purge_attempts"` + PurgeTime types.Int64 `tfsdk:"purge_time"` + Replication types.Object `tfsdk:"replication"` + RealityDeviceNumber types.Int64 `tfsdk:"reality_device_number"` + ReferenceID types.String `tfsdk:"reference_id"` + ResID types.String `tfsdk:"res_id"` + ResName types.String `tfsdk:"res_name"` + Role types.String `tfsdk:"role"` + SepType types.String `tfsdk:"sep_type"` + Shareable types.Bool `tfsdk:"shareable"` + SizeMax types.Int64 `tfsdk:"size_max"` + SizeUsed types.Float64 `tfsdk:"size_used"` + Snapshots types.List `tfsdk:"snapshots"` + Status types.String `tfsdk:"status"` + StatusReplication types.String `tfsdk:"status_replication"` + TechStatus types.String `tfsdk:"tech_status"` + Type types.String `tfsdk:"type"` + VMID types.Int64 `tfsdk:"vmid"` +} + +type ResourceDiskReplicationIOTuneModel struct { + ReadBytesSec types.Int64 `tfsdk:"read_bytes_sec"` + ReadBytesSecMax types.Int64 `tfsdk:"read_bytes_sec_max"` + ReadIOPSSec types.Int64 `tfsdk:"read_iops_sec"` + ReadIOPSSecMax types.Int64 `tfsdk:"read_iops_sec_max"` + SizeIOPSSec types.Int64 `tfsdk:"size_iops_sec"` + TotalBytesSec types.Int64 `tfsdk:"total_bytes_sec"` + TotalBytesSecMax types.Int64 `tfsdk:"total_bytes_sec_max"` + TotalIOPSSec types.Int64 `tfsdk:"total_iops_sec"` + TotalIOPSSecMax types.Int64 `tfsdk:"total_iops_sec_max"` + WriteBytesSec types.Int64 `tfsdk:"write_bytes_sec"` + WriteBytesSecMax types.Int64 `tfsdk:"write_bytes_sec_max"` + WriteIOPSSec types.Int64 `tfsdk:"write_iops_sec"` + WriteIOPSSecMax types.Int64 `tfsdk:"write_iops_sec_max"` +} + +var ResourceDiskReplicationIOTune = map[string]attr.Type{ + "read_bytes_sec": types.Int64Type, + "read_bytes_sec_max": types.Int64Type, + "read_iops_sec": types.Int64Type, + "read_iops_sec_max": types.Int64Type, + "size_iops_sec": types.Int64Type, + "total_bytes_sec": types.Int64Type, + "total_bytes_sec_max": types.Int64Type, + "total_iops_sec": types.Int64Type, + "total_iops_sec_max": types.Int64Type, + "write_bytes_sec": types.Int64Type, + "write_bytes_sec_max": types.Int64Type, + "write_iops_sec": types.Int64Type, + "write_iops_sec_max": types.Int64Type, +} + +type ResourceItemReplicationModel struct { + DiskID types.Int64 `tfsdk:"disk_id"` + PoolID types.String `tfsdk:"pool_id"` + Role types.String `tfsdk:"role"` + SelfVolumeID types.String `tfsdk:"self_volume_id"` + StorageID types.String `tfsdk:"storage_id"` + VolumeID types.String `tfsdk:"volume_id"` +} + +var ResourceItemReplication = map[string]attr.Type{ + "disk_id": types.Int64Type, + "pool_id": types.StringType, + "role": types.StringType, + "self_volume_id": types.StringType, + "storage_id:": types.StringType, + "volume_id": types.StringType, +} + +type ResourceReplicationItemSnapshotModel struct { + GUID types.String `tfsdk:"guid"` + Label types.String `tfsdk:"label"` + ResID types.String `tfsdk:"res_id"` + SnapSetGUID types.String `tfsdk:"snap_set_guid"` + SnapSetTime types.Int64 `tfsdk:"snap_set_time"` + TimeStamp types.Int64 `tfsdk:"timestamp"` +} + +var ResourceReplicationItemSnapshot = map[string]attr.Type{ + "guid": types.StringType, + "label": types.StringType, + "res_id": types.StringType, + "snap_set_guid": types.StringType, + "snap_set_time": types.Int64Type, + "timestamp": types.Int64Type, +} + +type ResourceReplicationItemComputeModel struct { + ComputeId types.String `tfsdk:"compute_id"` + ComputeName types.String `tfsdk:"compute_name"` +} + +var ResourceReplicationItemCompute = map[string]attr.Type{ + "compute_id": types.StringType, + "compute_name": types.StringType, +} diff --git a/internal/service/cloudbroker/disks/models/model_resource_cb_disk_snapshot.go b/internal/service/cloudbroker/disks/models/model_resource_cb_disk_snapshot.go new file mode 100644 index 0000000..13a33ce --- /dev/null +++ b/internal/service/cloudbroker/disks/models/model_resource_cb_disk_snapshot.go @@ -0,0 +1,25 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ResourceDiskSnapshotModel struct { + // request fields - required + DiskID types.Int64 `tfsdk:"disk_id"` + Label types.String `tfsdk:"label"` + + // request fields - optional + Rollback types.Bool `tfsdk:"rollback"` + TimeStamp types.Int64 `tfsdk:"timestamp"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + LastUpdated types.String `tfsdk:"last_updated"` + GUID types.String `tfsdk:"guid"` + ResID types.String `tfsdk:"res_id"` + SnapSetGUID types.String `tfsdk:"snap_set_guid"` + SnapSetTime types.Int64 `tfsdk:"snap_set_time"` +} diff --git a/internal/service/cloudbroker/disks/resource_cb_disk.go b/internal/service/cloudbroker/disks/resource_cb_disk.go new file mode 100644 index 0000000..2642dcc --- /dev/null +++ b/internal/service/cloudbroker/disks/resource_cb_disk.go @@ -0,0 +1,370 @@ +package cbDisks + +import ( + "context" + "reflect" + "strconv" + "time" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &resourceDisk{} + _ resource.ResourceWithImportState = &resourceDisk{} +) + +// NewResourceDisk is a helper function to simplify the provider implementation. +func NewResourceDisk() resource.Resource { + return &resourceDisk{} +} + +// resourceDisk is the resource implementation. +type resourceDisk struct { + client *decort.DecortClient +} + +// Create creates the resource and sets the initial Terraform state. +func (r *resourceDisk) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + // Get plan to create resource group + var plan models.ResourceDiskModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceDisk: Error receiving the plan") + return + } + + contextCreateMap := map[string]any{ + "account_id": plan.AccountID.ValueInt64(), + "disk_name": plan.DiskName.ValueString(), + "size_max": plan.SizeMax.ValueInt64(), + "gid": plan.GID.ValueInt64(), + } + tflog.Info(ctx, "Create resourceDisk: start creating", contextCreateMap) + + // Set timeouts + createTimeout, diags := plan.Timeouts.Create(ctx, constants.Timeout600s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceDisk: Error set timeout") + return + } + tflog.Info(ctx, "Create resourceDisk: set timeouts successfully", map[string]any{ + "account_id": plan.AccountID.ValueInt64(), + "disk_name": plan.DiskName.ValueString(), + "size_max": plan.SizeMax.ValueInt64(), + "gid": plan.GID.ValueInt64(), + "createTimeout": createTimeout}) + + ctx, cancel := context.WithTimeout(ctx, createTimeout) + defer cancel() + + // Check if input values are valid in the platform + tflog.Info(ctx, "Create resourceDisk: starting input checks", contextCreateMap) + resp.Diagnostics.Append(resourceDiskCreateInputChecks(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceDisk: Error input checks") + return + } + tflog.Info(ctx, "Create resourceDisk: input checks successful", contextCreateMap) + + // Make create request and get response + createReq := utilities.CreateRequestResourceDisk(ctx, &plan) + tflog.Info(ctx, "Create resourceDisk: before call CloudBroker().Disks().Create", map[string]any{"req": createReq}) + diskId, err := r.client.CloudBroker().Disks().Create(ctx, createReq) + if err != nil { + resp.Diagnostics.AddError( + "Create resourceDisk: unable to Create Disk", + err.Error(), + ) + return + } + plan.Id = types.StringValue(strconv.Itoa(int(diskId))) + tflog.Info(ctx, "Create resourceDisk: disk created", map[string]any{"diskId": diskId, "disk_name": plan.DiskName.ValueString()}) + + // additional settings after disk creation: in case of failures, warnings are added to resp.Diagnostics, + // because additional settings failure is not critical. If errors were added instead of warnings, terraform + // framework would mark resource as tainted and delete it, which would be unwanted behaviour. + + if !plan.NodeIDs.IsUnknown() { + resp.Diagnostics.Append(utilities.ResourceDiskChangeNodes(ctx, diskId, nil, &plan, true, r.client)...) + } + + if !plan.IOTune.IsUnknown() { + resp.Diagnostics.Append(utilities.LimitIOCreateDisk(ctx, diskId, &plan, r.client)...) + } + + if !plan.Shareable.IsUnknown() && plan.Shareable.ValueBool() { // if shareable = true + resp.Diagnostics.Append(utilities.ShareableCreateDisk(ctx, diskId, r.client)...) + } + + tflog.Info(ctx, "Create resourceDisk: resource creation is completed", map[string]any{"disk_id": diskId}) + + // Map response body to schema and populate Computed attribute values + resp.Diagnostics.Append(flattens.DiskResource(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + return + } + + // Set data last update + plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850)) + + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } +} + +// Read refreshes the Terraform state with the latest data. +func (r *resourceDisk) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + // Get current state + var state models.ResourceDiskModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceDisk: Error get state") + return + } + tflog.Info(ctx, "Read resourceDisk: got state successfully", map[string]any{"disk_id": state.Id.ValueString()}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceDisk: Error set timeout") + return + } + tflog.Info(ctx, "Read resourceDisk: set timeouts successfully", map[string]any{ + "disk_id": state.Id.ValueString(), + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // read status + resp.Diagnostics.Append(utilities.DiskReadStatus(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceDisk: Error reading disk status") + return + } + + // Overwrite items with refreshed state + resp.Diagnostics.Append(flattens.DiskResource(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceDisk: Error flatten disk") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceDisk: Error set state") + return + } + tflog.Info(ctx, "End read resourceDisk") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *resourceDisk) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + // Retrieve values from plan + var plan models.ResourceDiskModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDisk: Error receiving the plan") + return + } + tflog.Info(ctx, "Update resourceDisk: got plan successfully", map[string]any{"disk_id": plan.Id.ValueString()}) + + // Retrieve values from state + var state models.ResourceDiskModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDisk: Error receiving the state") + return + } + tflog.Info(ctx, "Update resourceDisk: got state successfully", map[string]any{"disk_id": state.Id.ValueString()}) + + // Set timeouts + updateTimeout, diags := plan.Timeouts.Update(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDisk: Error set timeout") + return + } + tflog.Info(ctx, "Update resourceDisk: set timeouts successfully", map[string]any{ + "disk_id": state.Id.ValueString(), + "updateTimeout": updateTimeout}) + + ctx, cancel := context.WithTimeout(ctx, updateTimeout) + defer cancel() + + // Checking if inputs are valid + tflog.Info(ctx, "Update resourceDisk: starting input checks", map[string]any{"disk_id": plan.Id.ValueString()}) + resp.Diagnostics.Append(resourceDiskUpdateInputChecks(ctx, &plan, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDisk: Error input checks") + return + } + tflog.Info(ctx, "Update resourceDisk: input checks successful", map[string]any{"disk_id": state.Id.ValueString()}) + + diskId, err := strconv.Atoi(state.Id.ValueString()) + if err != nil { + resp.Diagnostics.AddError("Update resourceDisk: Cannot parse disk ID from state", err.Error()) + return + } + + // resize disk + if !plan.SizeMax.Equal(state.SizeMax) { + resp.Diagnostics.Append(utilities.SizeMaxUpdateDisk(ctx, uint64(diskId), &plan, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDisk: Error resizing disk") + return + } + } + + // rename disk + if !plan.DiskName.Equal(state.DiskName) { + resp.Diagnostics.Append(utilities.NameUpdateDisk(ctx, uint64(diskId), &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDisk: Error renaming disk") + return + } + } + + // change io limits + if !reflect.DeepEqual(plan.IOTune, state.IOTune) && !plan.IOTune.IsUnknown() { + resp.Diagnostics.Append(utilities.LimitIOUpdateDisk(ctx, uint64(diskId), &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDisk: Error setting new io limits to disk") + return + } + } + + // share/unshare disk + if !plan.Shareable.Equal(state.Shareable) && !plan.Shareable.IsUnknown() { + resp.Diagnostics.Append(utilities.ShareableUpdateDisk(ctx, uint64(diskId), plan.Shareable.ValueBool(), r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDisk: Error with disk share/unshare") + return + } + } + + // update nodeIDs + if !plan.NodeIDs.Equal(state.NodeIDs) && !plan.NodeIDs.IsUnknown() { + resp.Diagnostics.Append(utilities.ResourceDiskChangeNodes(ctx, uint64(diskId), &state, &plan, false, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDiskUpdate: Error with update nodeIDs") + return + } + } + + tflog.Info(ctx, "Update resourceDisk: disk update is completed", map[string]any{"disk_id": plan.Id.ValueString()}) + + // Map response body to schema and populate Computed attribute values + resp.Diagnostics.Append(flattens.DiskResource(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + return + } + + // Set data last update + plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850)) + + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *resourceDisk) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // Get current state + var state models.ResourceDiskModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceDisk: Error get state") + return + } + tflog.Info(ctx, "Delete resourceDisk: got state successfully", map[string]any{"disk_id": state.Id.ValueString()}) + + // Set timeouts + deleteTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceDisk: Error set timeout") + return + } + tflog.Info(ctx, "Delete resourceDisk: set timeouts successfully", map[string]any{ + "disk_id": state.Id.ValueString(), + "deleteTimeout": deleteTimeout}) + + ctx, cancel := context.WithTimeout(ctx, deleteTimeout) + defer cancel() + + detach := true + permanently := true + + if state.Detach.ValueBool() == false && !state.Detach.IsNull() && !state.Detach.IsUnknown() { + detach = false + } + + if state.Permanently.ValueBool() == false && !state.Permanently.IsNull() && !state.Permanently.IsUnknown() { + permanently = false + } + // Delete existing resource group + delReq := disks.DeleteRequest{ + DiskID: uint64(state.DiskId.ValueInt64()), + Detach: detach, // default true + Permanently: permanently, // default true + } + + tflog.Info(ctx, "Delete resourceDisk: before call CloudBroker().Disks().Delete", map[string]any{"req": delReq}) + _, err := r.client.CloudBroker().Disks().Delete(ctx, delReq) + if err != nil { + resp.Diagnostics.AddError("Delete resourceDisk: Error deleting disk with error: ", err.Error()) + return + } + + tflog.Info(ctx, "End delete resourceDisk", map[string]any{"disk_id": state.Id.ValueString()}) +} + +// Schema defines the schema for the resource. +func (r *resourceDisk) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaResourceDisk(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}), + }, + } +} + +// Metadata returns the resource type name. +func (r *resourceDisk) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_disk" +} + +// Configure adds the provider configured client to the resource. +func (r *resourceDisk) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure resourceDisk") + r.client = client.Resource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure resourceDisk successfully") +} + +func (r *resourceDisk) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + // Retrieve import ID and save to id attribute + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} diff --git a/internal/service/cloudbroker/disks/resource_cb_disk_replication.go b/internal/service/cloudbroker/disks/resource_cb_disk_replication.go new file mode 100644 index 0000000..818a0d1 --- /dev/null +++ b/internal/service/cloudbroker/disks/resource_cb_disk_replication.go @@ -0,0 +1,333 @@ +package cbDisks + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &resourceDiskReplication{} + _ resource.ResourceWithImportState = &resourceDiskReplication{} +) + +// NewResourceDisk is a helper function to simplify the provider implementation. +func NewResourceDiskReplications() resource.Resource { + return &resourceDiskReplication{} +} + +// resourceDiskReplication is the resource implementation. +type resourceDiskReplication struct { + client *decort.DecortClient +} + +// Create creates the resource and sets the initial Terraform state. +func (r *resourceDiskReplication) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + // Get plan to create resource group + var plan models.ResourceRecordDiskReplicationModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceDiskReplication: Error receiving the plan") + return + } + + tflog.Info(ctx, "Create resourceDiskReplication: got plan successfully") + tflog.Info(ctx, "Create resourceDiskReplication: start creating") + + // Set timeouts + createTimeout, diags := plan.Timeouts.Create(ctx, constants.Timeout600s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceDiskReplication: Error set timeout") + return + } + tflog.Info(ctx, "Create resourceDiskReplication: set timeouts successfully") + + ctx, cancel := context.WithTimeout(ctx, createTimeout) + defer cancel() + + // Check if input values are valid in the platform + tflog.Info(ctx, "Create resourceDiskReplication: starting input checks") + resp.Diagnostics.Append(resourceDiskReplicationInputChecks(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceDiskReplication: Error input checks") + return + } + tflog.Info(ctx, "Create resourceDiskReplication: input checks successful") + + reqCreate := disks.ReplicateRequest{ + DiskID: uint64(plan.DiskId.ValueInt64()), + Name: plan.Name.ValueString(), + SepID: uint64(plan.SepID.ValueInt64()), + PoolName: plan.PoolName.ValueString(), + } + + diskReplicaId, err := r.client.CloudBroker().Disks().Replicate(ctx, reqCreate) + if err != nil { + resp.Diagnostics.AddError( + "Create resourceDiskReplication: unable to replicate disk", + err.Error(), + ) + return + } + + tflog.Info(ctx, fmt.Sprintf("resourceDiskReplicationCreate: create replica complete for disk with ID: %d", plan.DiskId.ValueInt64())) + + start := plan.Start.ValueBool() + ok := !(plan.Start.IsNull() || plan.Start.IsUnknown()) + + if ok && !start { + tflog.Info(ctx, fmt.Sprintf("resourceDiskReplicationCreate: replication between disk with ID: %d and replica with ID: %d, try to stop", uint64(plan.DiskId.ValueInt64()), diskReplicaId)) + reqStop := disks.ReplicationStopRequest{ + DiskID: uint64(plan.DiskId.ValueInt64()), + } + _, err = r.client.CloudBroker().Disks().ReplicationStop(ctx, reqStop) + if err != nil { + resp.Diagnostics.AddError( + fmt.Sprintf("resourceDiskReplicationCreate: replication between disk with ID: %d and replica with ID: %d, stoped", uint64(plan.DiskId.ValueInt64()), diskReplicaId), + err.Error(), + ) + return + } + } + + // Map response body to schema and populate Computed attribute values + resp.Diagnostics.Append(flattens.DiskReplicationResource(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + return + } + + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } +} + +// Read refreshes the Terraform state with the latest data. +func (r *resourceDiskReplication) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + // Get current state + var state models.ResourceRecordDiskReplicationModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceDiskReplication: Error get state") + return + } + tflog.Info(ctx, "Read resourceDiskReplication: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceDiskReplication: Error set timeout") + return + } + tflog.Info(ctx, "Read resourceDiskReplication: set timeouts successfully", map[string]any{ + "disk_id": state.DiskId.ValueInt64(), + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // read status + resp.Diagnostics.Append(utilities.ReplicationDiskReadStatus(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceDiskReplication: Error reading disk status") + return + } + + // Overwrite items with refreshed state + resp.Diagnostics.Append(flattens.DiskReplicationResource(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceDiskReplication: Error flatten disk") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceDiskReplication: Error set state") + return + } + tflog.Info(ctx, "End read resourceDiskReplication") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *resourceDiskReplication) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + // Retrieve values from plan + var plan models.ResourceRecordDiskReplicationModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDiskReplication: Error receiving the plan") + return + } + tflog.Info(ctx, "Update resourceDiskReplication: got plan successfully", map[string]any{"disk_id": plan.DiskId.ValueInt64()}) + + // Retrieve values from state + var state models.ResourceRecordDiskReplicationModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDiskReplication: Error receiving the state") + return + } + tflog.Info(ctx, "Update resourceDiskReplication: got state successfully", map[string]any{"disk_id": state.DiskId.ValueInt64()}) + + // Set timeouts + updateTimeout, diags := plan.Timeouts.Update(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDiskReplication: Error set timeout") + return + } + tflog.Info(ctx, "Update resourceDiskReplication: set timeouts successfully", map[string]any{ + "disk_id": state.DiskId.ValueInt64(), + "updateTimeout": updateTimeout}) + + ctx, cancel := context.WithTimeout(ctx, updateTimeout) + defer cancel() + + // Checking if inputs are valid + tflog.Info(ctx, "Update resourceDiskReplication: starting input checks", map[string]any{"disk_id": plan.DiskId.ValueInt64()}) + resp.Diagnostics.Append(resourceDiskReplicationInputChecks(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDiskReplication: Error input checks") + return + } + tflog.Info(ctx, "Update resourceDiskReplication: input checks successful", map[string]any{"disk_id": state.DiskId.ValueInt64()}) + + if !plan.Start.Equal(state.Start) { + resp.Diagnostics.Append(utilities.UtilityDiskReplicationUpdateStartStop(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDiskReplication: error with UtilityDiskReplicationUpdateStartStop") + return + } + } + + if !plan.Pause.Equal(state.Pause) { + resp.Diagnostics.Append(utilities.UtilityDiskReplicationUpdatePause(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDiskReplication: error with UtilityDiskReplicationUpdatePause") + return + } + } + + if !plan.Reverse.Equal(state.Reverse) { + resp.Diagnostics.Append(utilities.UtilityDiskReplicationUpdateReverse(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDiskReplication: error with UtilityDiskReplicationUpdateReverse") + return + } + } + + tflog.Info(ctx, "Update resourceDiskReplication: disk update is completed", map[string]any{"disk_id": plan.DiskId.ValueInt64()}) + + // Overwrite items with refreshed state + resp.Diagnostics.Append(flattens.DiskReplicationResource(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceDiskReplication: Error flatten disk") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceDiskReplication: Error set state") + return + } + tflog.Info(ctx, "End read resourceDiskReplication") +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *resourceDiskReplication) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // Get current state + var state models.ResourceRecordDiskReplicationModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceDiskReplication: Error get state") + return + } + tflog.Info(ctx, "Delete resourceDiskReplication: got state successfully", map[string]any{"disk_id": state.DiskId.ValueInt64()}) + + // Set timeouts + deleteTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceDiskReplication: Error set timeout") + return + } + tflog.Info(ctx, "Delete resourceDiskReplication: set timeouts successfully", map[string]any{ + "disk_id": state.DiskId.ValueInt64(), + "deleteTimeout": deleteTimeout}) + + ctx, cancel := context.WithTimeout(ctx, deleteTimeout) + defer cancel() + + detach := true + permanently := true + + if state.Detach.ValueBool() == false && !state.Detach.IsNull() && !state.Detach.IsUnknown() { + detach = false + } + + if state.Permanently.ValueBool() == false && !state.Permanently.IsNull() && !state.Permanently.IsUnknown() { + permanently = false + } + + // Delete existing resource group + delReq := disks.DeleteRequest{ + DiskID: uint64(state.DiskId.ValueInt64()), + Detach: detach, // default true + Permanently: permanently, // default true + } + + tflog.Info(ctx, "Delete resourceDiskReplication: before call CloudBroker().Disks().Delete", map[string]any{"req": delReq}) + _, err := r.client.CloudBroker().Disks().Delete(ctx, delReq) + if err != nil { + resp.Diagnostics.AddError("Delete resourceDiskReplication: Error deleting disk with error: ", err.Error()) + return + } + + tflog.Info(ctx, "End delete resourceDiskReplication", map[string]any{"disk_id": state.DiskId.ValueInt64()}) +} + +// Schema defines the schema for the resource. +func (r *resourceDiskReplication) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaResourceDiskReplication(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}), + }, + } +} + +// Metadata returns the resource type name. +func (r *resourceDiskReplication) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_disk_replication" +} + +// Configure adds the provider configured client to the resource. +func (r *resourceDiskReplication) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure resourceDiskReplication") + r.client = client.Resource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure resourceDiskReplication successfully") +} + +func (r *resourceDiskReplication) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + // Retrieve import ID and save to id attribute + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} diff --git a/internal/service/cloudbroker/disks/resource_cb_disk_snapshot.go b/internal/service/cloudbroker/disks/resource_cb_disk_snapshot.go new file mode 100644 index 0000000..6c58d2a --- /dev/null +++ b/internal/service/cloudbroker/disks/resource_cb_disk_snapshot.go @@ -0,0 +1,295 @@ +package cbDisks + +import ( + "context" + "time" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/disks" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/schemas" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/disks/utilities" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &resourceDiskSnapshot{} + _ resource.ResourceWithImportState = &resourceDiskSnapshot{} +) + +// NewResourceDiskSnapshot is a helper function to simplify the provider implementation. +func NewResourceDiskSnapshot() resource.Resource { + return &resourceDiskSnapshot{} +} + +// resourceDiskSnapshot is the resource implementation. +type resourceDiskSnapshot struct { + client *decort.DecortClient +} + +// Create creates the resource and sets the initial Terraform state. +func (r *resourceDiskSnapshot) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + // Get plan to create resource group + var plan models.ResourceDiskSnapshotModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceDiskSnapshot: Error receiving the plan") + return + } + + ctxCreateSnpsht := map[string]any{ + "disk_id": plan.DiskID.ValueInt64(), + "label": plan.Label.ValueString(), + } + tflog.Info(ctx, "Create resourceDiskSnapshot: got plan successfully", ctxCreateSnpsht) + tflog.Info(ctx, "Create resourceDiskSnapshot: start creating", ctxCreateSnpsht) + + // Set timeouts + createTimeout, diags := plan.Timeouts.Create(ctx, constants.Timeout600s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceDiskSnapshot: Error set timeout") + return + } + tflog.Info(ctx, "Create resourceDiskSnapshot: set timeouts successfully", map[string]any{ + "disk_id": plan.DiskID.ValueInt64(), + "label": plan.Label.ValueString(), + "createTimeout": createTimeout}) + + ctx, cancel := context.WithTimeout(ctx, createTimeout) + defer cancel() + + // Check if input values are valid in the platform + _, diags = utilities.DiskSnapshotCheckPresence(ctx, &plan, r.client) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceDiskSnapshot: disk snapshot does not exist") + return + } + tflog.Info(ctx, "Create resourceDiskSnapshot: snapshot successfully loaded", ctxCreateSnpsht) + + if plan.Rollback.ValueBool() { // default is false + resp.Diagnostics.Append(utilities.RollbackDiskSnapshot(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceDiskSnapshot: Error rollback snapshot") + return + } + } + + tflog.Info(ctx, "Create resourceDiskSnapshot: resource creation is completed", ctxCreateSnpsht) + + // Map response body to schema and populate Computed attribute values + resp.Diagnostics.Append(flattens.DiskSnapshotResource(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + return + } + + // Set data last update + plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850)) + + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } +} + +// Read refreshes the Terraform state with the latest data. +func (r *resourceDiskSnapshot) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + // Get current state + var state models.ResourceDiskSnapshotModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceDiskSnapshot: Error get state") + return + } + tflog.Info(ctx, "Read resourceDiskSnapshot: got state successfully", map[string]any{ + "disk_id": state.DiskID.ValueInt64(), + "label": state.Label.ValueString(), + }) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceDiskSnapshot: Error set timeout") + return + } + tflog.Info(ctx, "Read resourceDiskSnapshot: set timeouts successfully", map[string]any{ + "disk_id": state.DiskID.ValueInt64(), + "label": state.Label.ValueString(), + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Overwrite items with refreshed state + resp.Diagnostics.Append(flattens.DiskSnapshotResource(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceDiskSnapshot: Error flatten disk snapshot") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceDiskSnapshot: Error set state") + return + } + tflog.Info(ctx, "End read resourceDiskSnapshot") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *resourceDiskSnapshot) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + // Retrieve values from plan + var plan models.ResourceDiskSnapshotModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDiskSnapshot: Error receiving the plan") + return + } + + ctxSnpsht := map[string]any{ + "disk_id": plan.DiskID.ValueInt64(), + "label": plan.Label.ValueString(), + } + tflog.Info(ctx, "Update resourceDiskSnapshot: got plan successfully", ctxSnpsht) + + // Retrieve values from state + var state models.ResourceDiskSnapshotModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDiskSnapshot: Error receiving the state") + return + } + tflog.Info(ctx, "Update resourceDiskSnapshot: got state successfully", ctxSnpsht) + + // Set timeouts + updateTimeout, diags := plan.Timeouts.Update(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDiskSnapshot: Error set timeout") + return + } + tflog.Info(ctx, "Update resourceDiskSnapshot: set timeouts successfully", map[string]any{ + "disk_id": plan.DiskID.ValueInt64(), + "label": plan.Label.ValueString(), + "updateTimeout": updateTimeout}) + + ctx, cancel := context.WithTimeout(ctx, updateTimeout) + defer cancel() + + if !plan.Rollback.Equal(state.Rollback) && plan.Rollback.ValueBool() { + resp.Diagnostics.Append(utilities.RollbackDiskSnapshot(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDiskSnapshot: Error rollback snapshot") + return + } + } + + tflog.Info(ctx, "Update resourceDiskSnapshot: disk snapshot update is completed", ctxSnpsht) + + // Map response body to schema and populate Computed attribute values + resp.Diagnostics.Append(flattens.DiskSnapshotResource(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + return + } + + // Set data last update + plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850)) + + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *resourceDiskSnapshot) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // Get current state + var state models.ResourceDiskSnapshotModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceDiskSnapshot: Error get state") + return + } + tflog.Info(ctx, "Delete resourceDiskSnapshot: got state successfully", map[string]any{ + "disk_id": state.DiskID.ValueInt64(), + "label": state.Label.ValueString()}) + + // Set timeouts + deleteTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceDiskSnapshot: Error set timeout") + return + } + tflog.Info(ctx, "Delete resourceDiskSnapshot: set timeouts successfully", map[string]any{ + "disk_id": state.DiskID.ValueInt64(), + "label": state.Label.ValueString(), + "deleteTimeout": deleteTimeout}) + + ctx, cancel := context.WithTimeout(ctx, deleteTimeout) + defer cancel() + + // Check if input values are valid in the platform + _, diags = utilities.DiskSnapshotCheckPresence(ctx, &state, r.client) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceDiskSnapshot: disk snapshot does not exist") + return + } + + delReq := disks.SnapshotDeleteRequest{ + DiskID: uint64(state.DiskID.ValueInt64()), + Label: state.Label.ValueString(), + } + + tflog.Info(ctx, "Delete resourceDiskSnapshot: before call CloudBroker().Disks().SnapshotDelete", map[string]any{"req": delReq}) + _, err := r.client.CloudBroker().Disks().SnapshotDelete(ctx, delReq) + if err != nil { + resp.Diagnostics.AddError("Delete resourceDiskSnapshot: Error deleting disk with error: ", err.Error()) + return + } + + tflog.Info(ctx, "End delete resourceDiskSnapshot", map[string]any{ + "disk_id": state.Id.ValueString(), + "label": state.Label.ValueString()}) +} + +// Schema defines the schema for the resource. +func (r *resourceDiskSnapshot) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaResourceDiskSnapshot(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}), + }, + } +} + +// Metadata returns the resource type name. +func (r *resourceDiskSnapshot) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_disk_snapshot" +} + +// Configure adds the provider configured client to the resource. +func (r *resourceDiskSnapshot) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure resourceDiskSnapshot") + r.client = client.Resource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure resourceDiskSnapshot successfully") +} + +func (r *resourceDiskSnapshot) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + // Retrieve import ID and save to id attribute + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} diff --git a/internal/service/cloudbroker/disks/schemas/schema_data_source_cb_disk.go b/internal/service/cloudbroker/disks/schemas/schema_data_source_cb_disk.go new file mode 100644 index 0000000..b08177d --- /dev/null +++ b/internal/service/cloudbroker/disks/schemas/schema_data_source_cb_disk.go @@ -0,0 +1,253 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceDisk() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "disk_id": schema.Int64Attribute{ + Required: true, + }, + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "account_name": schema.StringAttribute{ + Computed: true, + }, + "acl": schema.StringAttribute{ + Computed: true, + }, + "boot_partition": schema.Int64Attribute{ + Computed: true, + }, + "computes": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "compute_id": schema.StringAttribute{ + Computed: true, + }, + "compute_name": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "desc": schema.StringAttribute{ + Computed: true, + }, + "destruction_time": schema.Int64Attribute{ + Computed: true, + }, + "devicename": schema.StringAttribute{ + Computed: true, + }, + "disk_path": schema.StringAttribute{ + Computed: true, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "image_id": schema.Int64Attribute{ + Computed: true, + }, + "images": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "iotune": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "read_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "read_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "read_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "read_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "size_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "total_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "write_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "write_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "write_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "write_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "iqn": schema.StringAttribute{ + Computed: true, + }, + "login": schema.StringAttribute{ + Computed: true, + }, + "machine_id": schema.Int64Attribute{ + Computed: true, + }, + "machine_name": schema.StringAttribute{ + Computed: true, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "disk_name": schema.StringAttribute{ + Computed: true, + }, + "order": schema.Int64Attribute{ + Computed: true, + }, + "params": schema.StringAttribute{ + Computed: true, + }, + "parent_id": schema.Int64Attribute{ + Computed: true, + }, + "passwd": schema.StringAttribute{ + Computed: true, + }, + "pci_slot": schema.Int64Attribute{ + Computed: true, + }, + "pool": schema.StringAttribute{ + Computed: true, + }, + "present_to": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "purge_attempts": schema.Int64Attribute{ + Computed: true, + }, + "purge_time": schema.Int64Attribute{ + Computed: true, + }, + "replication": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "disk_id": schema.Int64Attribute{ + Computed: true, + }, + "pool_id": schema.StringAttribute{ + Computed: true, + }, + "role": schema.StringAttribute{ + Computed: true, + }, + "self_volume_id": schema.StringAttribute{ + Computed: true, + }, + "storage_id": schema.StringAttribute{ + Computed: true, + }, + "volume_id": schema.StringAttribute{ + Computed: true, + }, + }, + }, + "reality_device_number": schema.Int64Attribute{ + Computed: true, + }, + "reference_id": schema.StringAttribute{ + Computed: true, + }, + "res_id": schema.StringAttribute{ + Computed: true, + }, + "res_name": schema.StringAttribute{ + Computed: true, + }, + "role": schema.StringAttribute{ + Computed: true, + }, + "sep_id": schema.Int64Attribute{ + Computed: true, + }, + "sep_type": schema.StringAttribute{ + Computed: true, + }, + "shareable": schema.BoolAttribute{ + Computed: true, + }, + "size_max": schema.Int64Attribute{ + Computed: true, + }, + "size_used": schema.Float64Attribute{ + Computed: true, + }, + "snapshots": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "guid": schema.StringAttribute{ + Computed: true, + }, + "label": schema.StringAttribute{ + Computed: true, + }, + "reference_id": schema.StringAttribute{ + Computed: true, + }, + "res_id": schema.StringAttribute{ + Computed: true, + }, + "snap_set_guid": schema.StringAttribute{ + Computed: true, + }, + "snap_set_time": schema.Int64Attribute{ + Computed: true, + }, + "timestamp": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + "vmid": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/disks/schemas/schema_data_source_cb_disk_list.go b/internal/service/cloudbroker/disks/schemas/schema_data_source_cb_disk_list.go new file mode 100644 index 0000000..9416e38 --- /dev/null +++ b/internal/service/cloudbroker/disks/schemas/schema_data_source_cb_disk_list.go @@ -0,0 +1,293 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceDiskList() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "by_id": schema.Int64Attribute{ + Optional: true, + }, + "name": schema.StringAttribute{ + Optional: true, + }, + "account_name": schema.StringAttribute{ + Optional: true, + }, + "disk_max_size": schema.Int64Attribute{ + Optional: true, + }, + "shared": schema.BoolAttribute{ + Optional: true, + }, + "account_id": schema.Int64Attribute{ + Optional: true, + }, + "type": schema.StringAttribute{ + Optional: true, + }, + "sort_by": schema.StringAttribute{ + Optional: true, + }, + "page": schema.Int64Attribute{ + Optional: true, + }, + "size": schema.Int64Attribute{ + Optional: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "account_name": schema.StringAttribute{ + Computed: true, + }, + "acl": schema.StringAttribute{ + Computed: true, + }, + "boot_partition": schema.Int64Attribute{ + Computed: true, + }, + "computes": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "compute_id": schema.StringAttribute{ + Computed: true, + }, + "compute_name": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "desc": schema.StringAttribute{ + Computed: true, + }, + "destruction_time": schema.Int64Attribute{ + Computed: true, + }, + "devicename": schema.StringAttribute{ + Computed: true, + }, + "disk_path": schema.StringAttribute{ + Computed: true, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "disk_id": schema.Int64Attribute{ + Computed: true, + }, + "image_id": schema.Int64Attribute{ + Computed: true, + }, + "images": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "iotune": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "read_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "read_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "read_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "read_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "size_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "total_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "write_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "write_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "write_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "write_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "iqn": schema.StringAttribute{ + Computed: true, + }, + "login": schema.StringAttribute{ + Computed: true, + }, + "machine_id": schema.Int64Attribute{ + Computed: true, + }, + "machine_name": schema.StringAttribute{ + Computed: true, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "disk_name": schema.StringAttribute{ + Computed: true, + }, + "order": schema.Int64Attribute{ + Computed: true, + }, + "params": schema.StringAttribute{ + Computed: true, + }, + "parent_id": schema.Int64Attribute{ + Computed: true, + }, + "passwd": schema.StringAttribute{ + Computed: true, + }, + "pci_slot": schema.Int64Attribute{ + Computed: true, + }, + "pool": schema.StringAttribute{ + Computed: true, + }, + "present_to": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "purge_attempts": schema.Int64Attribute{ + Computed: true, + }, + "purge_time": schema.Int64Attribute{ + Computed: true, + }, + "replication": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "disk_id": schema.Int64Attribute{ + Computed: true, + }, + "pool_id": schema.StringAttribute{ + Computed: true, + }, + "role": schema.StringAttribute{ + Computed: true, + }, + "self_volume_id": schema.StringAttribute{ + Computed: true, + }, + "storage_id": schema.StringAttribute{ + Computed: true, + }, + "volume_id": schema.StringAttribute{ + Computed: true, + }, + }, + }, + "reality_device_number": schema.Int64Attribute{ + Computed: true, + }, + "reference_id": schema.StringAttribute{ + Computed: true, + }, + "res_id": schema.StringAttribute{ + Computed: true, + }, + "res_name": schema.StringAttribute{ + Computed: true, + }, + "role": schema.StringAttribute{ + Computed: true, + }, + "sep_id": schema.Int64Attribute{ + Computed: true, + }, + "sep_type": schema.StringAttribute{ + Computed: true, + }, + "shareable": schema.BoolAttribute{ + Computed: true, + }, + "size_max": schema.Int64Attribute{ + Computed: true, + }, + "size_used": schema.Float64Attribute{ + Computed: true, + }, + "snapshots": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "guid": schema.StringAttribute{ + Computed: true, + }, + "label": schema.StringAttribute{ + Computed: true, + }, + "reference_id": schema.StringAttribute{ + Computed: true, + }, + "res_id": schema.StringAttribute{ + Computed: true, + }, + "snap_set_guid": schema.StringAttribute{ + Computed: true, + }, + "snap_set_time": schema.Int64Attribute{ + Computed: true, + }, + "timestamp": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + "vmid": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/disks/schemas/schema_data_source_cb_disk_list_deleted.go b/internal/service/cloudbroker/disks/schemas/schema_data_source_cb_disk_list_deleted.go new file mode 100644 index 0000000..dd707fa --- /dev/null +++ b/internal/service/cloudbroker/disks/schemas/schema_data_source_cb_disk_list_deleted.go @@ -0,0 +1,293 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceDiskListDeleted() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "by_id": schema.Int64Attribute{ + Optional: true, + }, + "name": schema.StringAttribute{ + Optional: true, + }, + "account_name": schema.StringAttribute{ + Optional: true, + }, + "disk_max_size": schema.Int64Attribute{ + Optional: true, + }, + "shared": schema.BoolAttribute{ + Optional: true, + }, + "account_id": schema.Int64Attribute{ + Optional: true, + }, + "type": schema.StringAttribute{ + Optional: true, + }, + "sort_by": schema.StringAttribute{ + Optional: true, + }, + "page": schema.Int64Attribute{ + Optional: true, + }, + "size": schema.Int64Attribute{ + Optional: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "account_name": schema.StringAttribute{ + Computed: true, + }, + "acl": schema.StringAttribute{ + Computed: true, + }, + "boot_partition": schema.Int64Attribute{ + Computed: true, + }, + "computes": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "compute_id": schema.StringAttribute{ + Computed: true, + }, + "compute_name": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "desc": schema.StringAttribute{ + Computed: true, + }, + "destruction_time": schema.Int64Attribute{ + Computed: true, + }, + "devicename": schema.StringAttribute{ + Computed: true, + }, + "disk_path": schema.StringAttribute{ + Computed: true, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "disk_id": schema.Int64Attribute{ + Computed: true, + }, + "image_id": schema.Int64Attribute{ + Computed: true, + }, + "images": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "iotune": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "read_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "read_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "read_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "read_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "size_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "total_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "write_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "write_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "write_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "write_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "iqn": schema.StringAttribute{ + Computed: true, + }, + "login": schema.StringAttribute{ + Computed: true, + }, + "machine_id": schema.Int64Attribute{ + Computed: true, + }, + "machine_name": schema.StringAttribute{ + Computed: true, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "disk_name": schema.StringAttribute{ + Computed: true, + }, + "order": schema.Int64Attribute{ + Computed: true, + }, + "params": schema.StringAttribute{ + Computed: true, + }, + "parent_id": schema.Int64Attribute{ + Computed: true, + }, + "passwd": schema.StringAttribute{ + Computed: true, + }, + "pci_slot": schema.Int64Attribute{ + Computed: true, + }, + "pool": schema.StringAttribute{ + Computed: true, + }, + "present_to": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "purge_attempts": schema.Int64Attribute{ + Computed: true, + }, + "purge_time": schema.Int64Attribute{ + Computed: true, + }, + "replication": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "disk_id": schema.Int64Attribute{ + Computed: true, + }, + "pool_id": schema.StringAttribute{ + Computed: true, + }, + "role": schema.StringAttribute{ + Computed: true, + }, + "self_volume_id": schema.StringAttribute{ + Computed: true, + }, + "storage_id": schema.StringAttribute{ + Computed: true, + }, + "volume_id": schema.StringAttribute{ + Computed: true, + }, + }, + }, + "reality_device_number": schema.Int64Attribute{ + Computed: true, + }, + "reference_id": schema.StringAttribute{ + Computed: true, + }, + "res_id": schema.StringAttribute{ + Computed: true, + }, + "res_name": schema.StringAttribute{ + Computed: true, + }, + "role": schema.StringAttribute{ + Computed: true, + }, + "sep_id": schema.Int64Attribute{ + Computed: true, + }, + "sep_type": schema.StringAttribute{ + Computed: true, + }, + "shareable": schema.BoolAttribute{ + Computed: true, + }, + "size_max": schema.Int64Attribute{ + Computed: true, + }, + "size_used": schema.Float64Attribute{ + Computed: true, + }, + "snapshots": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "guid": schema.StringAttribute{ + Computed: true, + }, + "label": schema.StringAttribute{ + Computed: true, + }, + "reference_id": schema.StringAttribute{ + Computed: true, + }, + "res_id": schema.StringAttribute{ + Computed: true, + }, + "snap_set_guid": schema.StringAttribute{ + Computed: true, + }, + "snap_set_time": schema.Int64Attribute{ + Computed: true, + }, + "timestamp": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + "vmid": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/disks/schemas/schema_data_source_cb_disk_list_types.go b/internal/service/cloudbroker/disks/schemas/schema_data_source_cb_disk_list_types.go new file mode 100644 index 0000000..dfddf36 --- /dev/null +++ b/internal/service/cloudbroker/disks/schemas/schema_data_source_cb_disk_list_types.go @@ -0,0 +1,36 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceDiskListTypes() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // optional attributes + "sort_by": schema.StringAttribute{ + Optional: true, + Description: "sort by one of supported fields, format +|-(field)", + }, + "page": schema.Int64Attribute{ + Optional: true, + Description: "Page number", + }, + "size": schema.Int64Attribute{ + Optional: true, + Description: "Page size", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/disks/schemas/schema_data_source_cb_disk_list_types_detailed.go b/internal/service/cloudbroker/disks/schemas/schema_data_source_cb_disk_list_types_detailed.go new file mode 100644 index 0000000..82be58a --- /dev/null +++ b/internal/service/cloudbroker/disks/schemas/schema_data_source_cb_disk_list_types_detailed.go @@ -0,0 +1,62 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceDiskListTypesDetailed() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // optional attributes + "sort_by": schema.StringAttribute{ + Optional: true, + Description: "sort by one of supported fields, format +|-(field)", + }, + "page": schema.Int64Attribute{ + Optional: true, + Description: "Page number", + }, + "size": schema.Int64Attribute{ + Optional: true, + Description: "Page size", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "pools": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Computed: true, + }, + "system": schema.StringAttribute{ + Computed: true, + }, + "types": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + }, + }, + }, + "sep_id": schema.Int64Attribute{ + Computed: true, + }, + "sep_name": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/disks/schemas/schema_data_source_cb_disk_list_unattached.go b/internal/service/cloudbroker/disks/schemas/schema_data_source_cb_disk_list_unattached.go new file mode 100644 index 0000000..c06af46 --- /dev/null +++ b/internal/service/cloudbroker/disks/schemas/schema_data_source_cb_disk_list_unattached.go @@ -0,0 +1,269 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceDiskListUnattached() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // optional attributes + "by_id": schema.Int64Attribute{ + Optional: true, + Description: "Find by ID", + }, + "account_name": schema.StringAttribute{ + Optional: true, + Description: "Find by account name", + }, + "disk_max_size": schema.Int64Attribute{ + Optional: true, + Description: "Find by max disk size", + }, + "status": schema.StringAttribute{ + Optional: true, + Description: "Find by status", + }, + "account_id": schema.Int64Attribute{ + Optional: true, + Description: "ID of the account the disks belong to", + }, + "sep_id": schema.Int64Attribute{ + Optional: true, + Description: "find by sep ID", + }, + "pool_name": schema.StringAttribute{ + Optional: true, + Description: "find by pool name", + }, + "type": schema.StringAttribute{ + Optional: true, + Description: "type of the disks", + }, + "sort_by": schema.StringAttribute{ + Optional: true, + Description: "sort by one of supported fields, format +|-(field)", + }, + "page": schema.Int64Attribute{ + Optional: true, + Description: "Page number", + }, + "size": schema.Int64Attribute{ + Optional: true, + Description: "Page size", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "ckey": schema.StringAttribute{ + Computed: true, + }, + "meta": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "account_name": schema.StringAttribute{ + Computed: true, + }, + "acl": schema.StringAttribute{ + Computed: true, + }, + "boot_partition": schema.Int64Attribute{ + Computed: true, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "desc": schema.StringAttribute{ + Computed: true, + }, + "destruction_time": schema.Int64Attribute{ + Computed: true, + }, + "disk_path": schema.StringAttribute{ + Computed: true, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "disk_id": schema.Int64Attribute{ + Computed: true, + }, + "image_id": schema.Int64Attribute{ + Computed: true, + }, + "images": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "iotune": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "read_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "read_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "read_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "read_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "size_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "total_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "write_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "write_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "write_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "write_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "iqn": schema.StringAttribute{ + Computed: true, + }, + "login": schema.StringAttribute{ + Computed: true, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "disk_name": schema.StringAttribute{ + Computed: true, + }, + "order": schema.Int64Attribute{ + Computed: true, + }, + "params": schema.StringAttribute{ + Computed: true, + }, + "parent_id": schema.Int64Attribute{ + Computed: true, + }, + "passwd": schema.StringAttribute{ + Computed: true, + }, + "pci_slot": schema.Int64Attribute{ + Computed: true, + }, + "pool": schema.StringAttribute{ + Computed: true, + }, + "present_to": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "purge_attempts": schema.Int64Attribute{ + Computed: true, + }, + "purge_time": schema.Int64Attribute{ + Computed: true, + }, + "reality_device_number": schema.Int64Attribute{ + Computed: true, + }, + "reference_id": schema.StringAttribute{ + Computed: true, + }, + "res_id": schema.StringAttribute{ + Computed: true, + }, + "res_name": schema.StringAttribute{ + Computed: true, + }, + "role": schema.StringAttribute{ + Computed: true, + }, + "sep_id": schema.Int64Attribute{ + Computed: true, + }, + "shareable": schema.BoolAttribute{ + Computed: true, + }, + "size_max": schema.Int64Attribute{ + Computed: true, + }, + "size_used": schema.Float64Attribute{ + Computed: true, + }, + "snapshots": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "guid": schema.StringAttribute{ + Computed: true, + }, + "label": schema.StringAttribute{ + Computed: true, + }, + "res_id": schema.StringAttribute{ + Computed: true, + }, + "snap_set_guid": schema.StringAttribute{ + Computed: true, + }, + "snap_set_time": schema.Int64Attribute{ + Computed: true, + }, + "timestamp": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + "vmid": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/disks/schemas/schema_data_source_cb_disk_replication.go b/internal/service/cloudbroker/disks/schemas/schema_data_source_cb_disk_replication.go new file mode 100644 index 0000000..fb89f59 --- /dev/null +++ b/internal/service/cloudbroker/disks/schemas/schema_data_source_cb_disk_replication.go @@ -0,0 +1,251 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceDiskReplication() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "disk_id": schema.Int64Attribute{ + Required: true, + Description: "Id of primary disk", + }, + "status_replication": schema.StringAttribute{ + Computed: true, + Description: "Status of replication", + }, + "account_id": schema.Int64Attribute{ + Computed: true, + Description: "The unique ID of the subscriber-owner of the disk", + }, + "account_name": schema.StringAttribute{ + Computed: true, + Description: "The name of the subscriber '(account') to whom this disk belongs", + }, + "acl": schema.StringAttribute{ + Computed: true, + }, + "boot_partition": schema.Int64Attribute{ + Computed: true, + }, + "computes": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "compute_id": schema.StringAttribute{ + Computed: true, + }, + "compute_name": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "desc": schema.StringAttribute{ + Computed: true, + }, + "destruction_time": schema.Int64Attribute{ + Computed: true, + }, + "devicename": schema.StringAttribute{ + Computed: true, + }, + "disk_path": schema.StringAttribute{ + Computed: true, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "image_id": schema.Int64Attribute{ + Computed: true, + }, + "images": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "iotune": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "read_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "read_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "read_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "read_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "size_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "total_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "write_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "write_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "write_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "write_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "iqn": schema.StringAttribute{ + Computed: true, + }, + "login": schema.StringAttribute{ + Computed: true, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "disk_name": schema.StringAttribute{ + Computed: true, + }, + "order": schema.Int64Attribute{ + Computed: true, + }, + "params": schema.StringAttribute{ + Computed: true, + }, + "parent_id": schema.Int64Attribute{ + Computed: true, + }, + "passwd": schema.StringAttribute{ + Computed: true, + }, + "pci_slot": schema.Int64Attribute{ + Computed: true, + }, + "pool": schema.StringAttribute{ + Computed: true, + }, + "present_to": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "purge_attempts": schema.Int64Attribute{ + Computed: true, + }, + "purge_time": schema.Int64Attribute{ + Computed: true, + }, + "replication": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "disk_id": schema.Int64Attribute{ + Computed: true, + }, + "pool_id": schema.StringAttribute{ + Computed: true, + }, + "role": schema.StringAttribute{ + Computed: true, + }, + "self_volume_id": schema.StringAttribute{ + Computed: true, + }, + "storage_id": schema.StringAttribute{ + Computed: true, + }, + "volume_id": schema.StringAttribute{ + Computed: true, + }, + }, + }, + "reality_device_number": schema.Int64Attribute{ + Computed: true, + }, + "reference_id": schema.StringAttribute{ + Computed: true, + }, + "res_id": schema.StringAttribute{ + Computed: true, + }, + "res_name": schema.StringAttribute{ + Computed: true, + }, + "role": schema.StringAttribute{ + Computed: true, + }, + "sep_id": schema.Int64Attribute{ + Computed: true, + }, + "sep_type": schema.StringAttribute{ + Computed: true, + }, + "shareable": schema.BoolAttribute{ + Computed: true, + }, + "size_max": schema.Int64Attribute{ + Computed: true, + }, + "size_used": schema.Float64Attribute{ + Computed: true, + }, + "snapshots": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "guid": schema.StringAttribute{ + Computed: true, + }, + "label": schema.StringAttribute{ + Computed: true, + }, + "res_id": schema.StringAttribute{ + Computed: true, + }, + "snap_set_guid": schema.StringAttribute{ + Computed: true, + }, + "snap_set_time": schema.Int64Attribute{ + Computed: true, + }, + "timestamp": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + "vmid": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/disks/schemas/schema_data_source_cb_disk_snapshot.go b/internal/service/cloudbroker/disks/schemas/schema_data_source_cb_disk_snapshot.go new file mode 100644 index 0000000..55f0b24 --- /dev/null +++ b/internal/service/cloudbroker/disks/schemas/schema_data_source_cb_disk_snapshot.go @@ -0,0 +1,39 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceDiskSnapshot() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "disk_id": schema.Int64Attribute{ + Required: true, + Description: "The unique ID of the subscriber-owner of the disk", + }, + "label": schema.StringAttribute{ + Required: true, + Description: "Name of the snapshot", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "res_id": schema.StringAttribute{ + Computed: true, + }, + "snap_set_guid": schema.StringAttribute{ + Computed: true, + }, + "snap_set_time": schema.Int64Attribute{ + Computed: true, + }, + "timestamp": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/disks/schemas/schema_data_source_cb_disk_snapshot_list.go b/internal/service/cloudbroker/disks/schemas/schema_data_source_cb_disk_snapshot_list.go new file mode 100644 index 0000000..d979eb2 --- /dev/null +++ b/internal/service/cloudbroker/disks/schemas/schema_data_source_cb_disk_snapshot_list.go @@ -0,0 +1,48 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceDiskSnapshotList() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "disk_id": schema.Int64Attribute{ + Required: true, + Description: "The unique ID of the subscriber-owner of the disk", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "guid": schema.StringAttribute{ + Computed: true, + }, + "label": schema.StringAttribute{ + Computed: true, + }, + "reference_id": schema.StringAttribute{ + Computed: true, + }, + "res_id": schema.StringAttribute{ + Computed: true, + }, + "snap_set_guid": schema.StringAttribute{ + Computed: true, + }, + "snap_set_time": schema.Int64Attribute{ + Computed: true, + }, + "timestamp": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + } +} diff --git a/internal/service/cloudbroker/disks/schemas/schema_resource_cb_disk.go b/internal/service/cloudbroker/disks/schemas/schema_resource_cb_disk.go new file mode 100644 index 0000000..9fe3440 --- /dev/null +++ b/internal/service/cloudbroker/disks/schemas/schema_resource_cb_disk.go @@ -0,0 +1,303 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaResourceDisk() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "account_id": schema.Int64Attribute{ + Required: true, + Description: "ID of the account", + }, + "disk_name": schema.StringAttribute{ + Required: true, + Description: "Iname of disk", + }, + "size_max": schema.Int64Attribute{ + Required: true, + Description: "size in GB, default is 10", + }, + "gid": schema.Int64Attribute{ + Required: true, + Description: "ID of the grid (platform)", + }, + "node_ids": schema.ListAttribute{ + ElementType: types.Int64Type, + Optional: true, + Description: "ID of the grid (platform)", + }, + "ssd_size": schema.Int64Attribute{ + Optional: true, + Description: "size in GB, default is 10", + }, + // optional attributes + "desc": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "description of disk", + }, + "pool": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Pool for disk location", + }, + "sep_id": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Storage endpoint provider ID to create disk", + }, + "type": schema.StringAttribute{ + Optional: true, + Computed: true, + Validators: []validator.String{ + stringvalidator.OneOf("B", "D", "T"), // case is not ignored + }, + Description: "(B;D;T) B=Boot;D=Data;T=Temp", + // default is D + }, + "detach": schema.BoolAttribute{ + Optional: true, + Description: "Detaching the disk from compute", + // default is false + }, + "permanently": schema.BoolAttribute{ + Optional: true, + Description: "Whether to completely delete the disk, works only with non attached disks", + // default is false + }, + "shareable": schema.BoolAttribute{ + Optional: true, + Computed: true, + Description: "share disk", + }, + "iotune": schema.SingleNestedAttribute{ + Optional: true, + Computed: true, + Attributes: map[string]schema.Attribute{ + "read_bytes_sec": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Number of bytes to read per second", + }, + "read_bytes_sec_max": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Maximum number of bytes to read", + }, + "read_iops_sec": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Number of io read operations per second", + }, + "read_iops_sec_max": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Maximum number of io read operations", + }, + "size_iops_sec": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Size of io operations", + }, + "total_bytes_sec": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Total size bytes per second", + }, + "total_bytes_sec_max": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Maximum total size of bytes per second", + }, + "total_iops_sec": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Total number of io operations per second", + }, + "total_iops_sec_max": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Maximum total number of io operations per second", + }, + "write_bytes_sec": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Number of bytes to write per second", + }, + "write_bytes_sec_max": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Maximum number of bytes to write per second", + }, + "write_iops_sec": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Number of write operations per second", + }, + "write_iops_sec_max": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Maximum number of write operations per second", + }, + }, + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "disk_id": schema.Int64Attribute{ + Computed: true, + }, + "account_name": schema.StringAttribute{ + Computed: true, + }, + "acl": schema.StringAttribute{ + Computed: true, + }, + "boot_partition": schema.Int64Attribute{ + Computed: true, + }, + "computes": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "compute_id": schema.StringAttribute{ + Computed: true, + }, + "compute_name": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "destruction_time": schema.Int64Attribute{ + Computed: true, + }, + "devicename": schema.StringAttribute{ + Computed: true, + }, + "disk_path": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "image_id": schema.Int64Attribute{ + Computed: true, + }, + "images": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "last_updated": schema.StringAttribute{ + Computed: true, + Description: "Timestamp of the last Terraform update of the disk resource.", + }, + "iqn": schema.StringAttribute{ + Computed: true, + }, + "login": schema.StringAttribute{ + Computed: true, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "order": schema.Int64Attribute{ + Computed: true, + }, + "params": schema.StringAttribute{ + Computed: true, + }, + "parent_id": schema.Int64Attribute{ + Computed: true, + }, + "passwd": schema.StringAttribute{ + Computed: true, + }, + "pci_slot": schema.Int64Attribute{ + Computed: true, + }, + "purge_attempts": schema.Int64Attribute{ + Computed: true, + }, + "present_to": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "purge_time": schema.Int64Attribute{ + Computed: true, + }, + "reality_device_number": schema.Int64Attribute{ + Computed: true, + }, + "reference_id": schema.StringAttribute{ + Computed: true, + }, + "res_id": schema.StringAttribute{ + Computed: true, + }, + "res_name": schema.StringAttribute{ + Computed: true, + }, + "role": schema.StringAttribute{ + Computed: true, + }, + "sep_type": schema.StringAttribute{ + Computed: true, + }, + "size_used": schema.Float64Attribute{ + Computed: true, + }, + "snapshots": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "guid": schema.StringAttribute{ + Computed: true, + }, + "label": schema.StringAttribute{ + Computed: true, + }, + "res_id": schema.StringAttribute{ + Computed: true, + }, + "snap_set_guid": schema.StringAttribute{ + Computed: true, + }, + "snap_set_time": schema.Int64Attribute{ + Computed: true, + }, + "timestamp": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "vmid": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/disks/schemas/schema_resource_cb_disk_replication.go b/internal/service/cloudbroker/disks/schemas/schema_resource_cb_disk_replication.go new file mode 100644 index 0000000..57ff2ad --- /dev/null +++ b/internal/service/cloudbroker/disks/schemas/schema_resource_cb_disk_replication.go @@ -0,0 +1,281 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaResourceDiskReplication() map[string]schema.Attribute { + // required attributes + return map[string]schema.Attribute{ + "disk_id": schema.Int64Attribute{ + Required: true, + }, + "disk_name": schema.StringAttribute{ + Required: true, + }, + "sep_id": schema.Int64Attribute{ + Required: true, + }, + "pool_name": schema.StringAttribute{ + Required: true, + }, + + // optional attributes + "pause": schema.BoolAttribute{ + Optional: true, + }, + "reverse": schema.BoolAttribute{ + Optional: true, + }, + "start": schema.BoolAttribute{ + Optional: true, + }, + "detach": schema.BoolAttribute{ + Optional: true, + }, + "permanently": schema.BoolAttribute{ + Optional: true, + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "replica_disk_id": schema.Int64Attribute{ + Computed: true, + }, + "status_replication": schema.StringAttribute{ + Computed: true, + }, + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "account_name": schema.StringAttribute{ + Computed: true, + }, + "acl": schema.StringAttribute{ + Computed: true, + }, + "boot_partition": schema.Int64Attribute{ + Computed: true, + }, + "computes": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "compute_id": schema.StringAttribute{ + Computed: true, + }, + "compute_name": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "desc": schema.StringAttribute{ + Computed: true, + }, + "destruction_time": schema.Int64Attribute{ + Computed: true, + }, + "devicename": schema.StringAttribute{ + Computed: true, + }, + "disk_path": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "image_id": schema.Int64Attribute{ + Computed: true, + }, + "images": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "iotune": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "read_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "read_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "read_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "read_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "size_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "total_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "write_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "write_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "write_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "write_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "iqn": schema.StringAttribute{ + Computed: true, + }, + "login": schema.StringAttribute{ + Computed: true, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "order": schema.Int64Attribute{ + Computed: true, + }, + "params": schema.StringAttribute{ + Computed: true, + }, + "parent_id": schema.Int64Attribute{ + Computed: true, + }, + "passwd": schema.StringAttribute{ + Computed: true, + }, + "pci_slot": schema.Int64Attribute{ + Computed: true, + }, + "pool": schema.StringAttribute{ + Computed: true, + }, + "present_to": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "purge_time": schema.Int64Attribute{ + Computed: true, + }, + "purge_attempts": schema.Int64Attribute{ + Computed: true, + }, + "replication": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "disk_id": schema.Int64Attribute{ + Computed: true, + }, + "pool_id": schema.StringAttribute{ + Computed: true, + }, + "role": schema.StringAttribute{ + Computed: true, + }, + "self_volume_id": schema.StringAttribute{ + Computed: true, + }, + "storage_id": schema.StringAttribute{ + Computed: true, + }, + "volume_id": schema.StringAttribute{ + Computed: true, + }, + }, + }, + "reality_device_number": schema.Int64Attribute{ + Computed: true, + }, + "res_id": schema.StringAttribute{ + Computed: true, + }, + "reference_id": schema.StringAttribute{ + Computed: true, + }, + "res_name": schema.StringAttribute{ + Computed: true, + }, + "role": schema.StringAttribute{ + Computed: true, + }, + "sep_type": schema.StringAttribute{ + Computed: true, + }, + "shareable": schema.BoolAttribute{ + Computed: true, + }, + "size_max": schema.Int64Attribute{ + Computed: true, + }, + "size_used": schema.Float64Attribute{ + Computed: true, + }, + "snapshots": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "guid": schema.StringAttribute{ + Computed: true, + }, + "label": schema.StringAttribute{ + Computed: true, + }, + "res_id": schema.StringAttribute{ + Computed: true, + }, + "snap_set_guid": schema.StringAttribute{ + Computed: true, + }, + "snap_set_time": schema.Int64Attribute{ + Computed: true, + }, + "timestamp": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + "vmid": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/disks/schemas/schema_resource_cb_disk_snapshot.go b/internal/service/cloudbroker/disks/schemas/schema_resource_cb_disk_snapshot.go new file mode 100644 index 0000000..5b11fbd --- /dev/null +++ b/internal/service/cloudbroker/disks/schemas/schema_resource_cb_disk_snapshot.go @@ -0,0 +1,58 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" +) + +func MakeSchemaResourceDiskSnapshot() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "disk_id": schema.Int64Attribute{ + Required: true, + Description: "The unique ID of the subscriber-owner of the disk", + }, + "label": schema.StringAttribute{ + Required: true, + Description: "Name of the snapshot", + }, + + // optional attributes + "rollback": schema.BoolAttribute{ + Optional: true, + Description: "Needed in order to make a snapshot rollback", + // default is false + }, + "timestamp": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Snapshot time", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + // id is generated as "#