Compare commits
4 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 6876b25f0e | |||
| db1760cb72 | |||
| 84b7a80e1b | |||
| 8857ed17be |
153
CHANGELOG.md
153
CHANGELOG.md
@@ -1,31 +1,128 @@
|
|||||||
## Version 4.5.3
|
## Version 4.6.0
|
||||||
|
|
||||||
### Bugfix
|
|
||||||
- Change field Value in schema resource kvmvm in blocks affinity_rules and anti_affinity_rules from required to optional in cloudapi and cloudbroker
|
|
||||||
- Change the format ID in resource k8s_wg from wg_id to k8s_id#wg_id
|
|
||||||
- Fix scripts intall.bat and intall.sh
|
|
||||||
- Increased timeouts for resources account, disk, cdrom_image, image, virtual_image, k8s_wg, vins, rg in cloudbroker so that they match the same from cloudapi
|
|
||||||
- Increased timeouts for resources pcidevice, sep, sep_config in cloudbroker
|
|
||||||
- Fix bug in affinity_rules in cloudbroker/kvmvm
|
|
||||||
- Add field NeedReboot in cloudapi/kvmvm in data_source, data_source_list and resource schemas
|
|
||||||
- Fix bug with field ExtNetID in CreateInRGRequest in cloudapi/vins and cloudbroker/vins
|
|
||||||
- Add boolean field Safe in Restart func in resource cloudapi/lb
|
|
||||||
- Add field force_resize in Resize func in resource cloudapi/kvmvm and cloudbroker/kvmvm
|
|
||||||
- Fix bug in cloudBroker/kvmvm with disk and extra_disks
|
|
||||||
- Change logic from disk delete and disk add to disk rename when disk.disk_name field is changed in resource cloudapi/kvmvm and cloudbroker/kvmvm
|
|
||||||
- Fix allowed network plugin value from "weawenet" to "weavenet" for k8ci, k8s resources in cloudbroker and for k8s resource in cloudapi
|
|
||||||
- Fix bug with deleting decort_bservice resource when setting enable=false in cloudapi/bservice
|
|
||||||
- Fix panic in data source decort_bservice_snapshot_list in cloudapi/bservice
|
|
||||||
- Fix panic in data source decort_rg_affinity_groups_list in cloudapi/rg
|
|
||||||
- Fix change description in resource decort_cb_kvmvm in cloudbroker
|
|
||||||
- Computed field bootdisk_size has been renamed to boot_disk_size in data sources decort_kvmvm, decort_kvmvm_list, decort_cb_kvmvm_list in cloudapi and cloudbroker
|
|
||||||
- Change request logic for delete resource decort_cb_k8s_cp in cloudbroker
|
|
||||||
- Fix schema in datasource decort_cb_kvmvm in cloudbroker, fields boot_disk_id, sep_id, pool, extra_disks, network, net_id, net_type, ip_address, mac have been removes
|
|
||||||
- Fix panic with RG in cloudapi
|
|
||||||
- Fix permanently field for disks delete in decort_kvmvm in cloudapi/kvmvm and decort_cb_kvmvm in cloudbroker/kvmvm
|
|
||||||
- Fix bug with enable request duplication
|
|
||||||
|
|
||||||
### Feature
|
### Feature
|
||||||
|
|
||||||
- Add RAM divisibility validation in compute, k8s, k8s_cp and k8s_wg resources in cloudapi and cloudbroker
|
#### general
|
||||||
- Add "permanently" flag in k8s, k8s_cp in cloudapi
|
- Add sort_by optional string field for the below data sources in cloudapi and cloudbroker:
|
||||||
|
|
||||||
|
| group | cloudapi data sources | cloudbroker data sources |
|
||||||
|
|-----------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| account | decort_account_computes_list</br>decort_account_deleted_list</br>decort_account_disks_list</br>decort_account_list</br>decort_account_rg_list</br>decort_account_templates_list</br>decort_account_vins_list | decort_cb_account_computes_list</br>decort_cb_account_disks_list</br>decort_cb_account_flipgroups_list</br>decort_cb_account_list</br>decort_cb_account_list_deleted</br>decort_cb_account_rg_list</br>decort_cb_account_vins_list |
|
||||||
|
| audit | - | decort_cb_audit_list |
|
||||||
|
| bservice | decort_bservice_deleted_list</br>decort_bservice_list | - |
|
||||||
|
| disk | decort_disk_list</br>decort_disk_list_types</br>decort_disk_list_types_detailed</br>decort_disk_list_unattached</br>decort_disk_list_deleted | decort_cb_disk_list</br>decort_cb_disk_list_deleted</br>decort_cb_disk_list_types</br>decort_cb_disk_list_types_detailed</br>decort_cb_disk_list_unattached |
|
||||||
|
| extnet | decort_extnet_computes_list</br>decort_extnet_list | decort_cb_extnet_list |
|
||||||
|
| flipgroup | decort_flipgroup_list | decort_cb_flipgroup_list |
|
||||||
|
| grid | - | decort_cb_grid_list |
|
||||||
|
| image | decort_image_list | decort_cb_image_list</br>decort_cb_image_list_stacks |
|
||||||
|
| k8ci | decort_k8ci_list | decort_cb_k8ci_list</br>decort_cb_k8ci_list_deleted |
|
||||||
|
| k8s | decort_k8s_list</br>decort_k8s_list_deleted | decort_cb_k8s_list</br>decort_cb_k8s_list_deleted |
|
||||||
|
| kvmvm | decort_kvmvm_list</br>decort_kvmvm_list_deleted</br>decort_kvmvm_pci_device_list</br>decort_kvmvm_vgpu_list | decort_cb_kvmvm_list</br>decort_cb_kvmvm_list_deleted</br>decort_cb_kvmvm_pci_device_list</br>decort_cb_kvmvm_vgpu_list |
|
||||||
|
| lb | decort_lb_list</br>decort_lb_list_deleted | decort_cb_lb_list</br>decort_cb_lb_list_deleted |
|
||||||
|
| locations | decort_locations_list | - |
|
||||||
|
| rg | decort_rg_list</br>decort_rg_list_computes</br>decort_rg_list_deleted</br>decort_rg_list_lb</br>decort_rg_list_vins | decort_cb_rg_list</br>decort_cb_rg_list_computes</br>decort_cb_rg_list_deleted</br>decort_cb_rg_list_lb</br>decort_cb_rg_list_vins |
|
||||||
|
| sep | - | decort_cb_sep_list |
|
||||||
|
| stack | decort_stack_list | decort_cb_stack_list |
|
||||||
|
| vfpool | decort_vfpool_list | decort_cb_vfpool_list |
|
||||||
|
| vins | decort_vins_list</br>decort_vins_list_deleted | decort_cb_vins_list</br>decort_cb_vins_list_deleted |
|
||||||
|
|
||||||
|
#### account:
|
||||||
|
- Add available_templates optional field to resource decort_cb_account in cloudbroker/account
|
||||||
|
- Add data source decort_cb_account_available_templates_list in cloudbroker/account
|
||||||
|
- Add compute_features computed fields to data sources decort_account, decort_account_list, decort_account_deleted_list, decort_cb_account, decort_cb_account_list, decort_account_deleted_list and to resource decort_account in cloudapi/account and cloudbroker/account
|
||||||
|
- Add compute_features optional field to resource decort_cb_account in cloudbroker/account
|
||||||
|
- Add extnet_id, free_ips computed fields to data sources decort_account_vins_list and decort_cb_account_vins_list in cloudapi/account and cloudbroker/account
|
||||||
|
|
||||||
|
#### audit:
|
||||||
|
- Add guid computed field to data source decort_cb_audit_linked_jobs in cloudbroker/audit
|
||||||
|
- Delete field status_code in data source decort_cb_audit_list in cloudbroker/audit
|
||||||
|
- Add fields min_status_code and max_status_code in data source decort_cb_audit_list in cloudbroker/audit
|
||||||
|
|
||||||
|
#### bservice:
|
||||||
|
- Add validation of ram to be divisible by 128 to resource decort_bservice_group in cloudapi/bservice
|
||||||
|
|
||||||
|
#### disks:
|
||||||
|
- Add resource and data source decort_disk_replication and decort_cb_disk_replication
|
||||||
|
- Add field replication in data sources and resource decort_disk, decort_disk_list, decort_disk_list_deleted, decort_cb_disk, decort_cb_disk_list, decort_cb_disk_list_deleted
|
||||||
|
- Add node_ids optional field to resource decort_cb_disk in cloudbroker/disk
|
||||||
|
|
||||||
|
#### flipgroup:
|
||||||
|
- Add conn_id, status and account_id optional fields to data sources decort_flipgroup_list and decort_cb_flipgroup_list in cloudapi/flipgroup and cloudbroker/flipgroup
|
||||||
|
- Add client_ids optional fields to data sources decort_flipgroup_list and decort_cb_flipgroup_list in cloudapi/flipgroup and cloudbroker/flipgroup
|
||||||
|
|
||||||
|
#### kvmvm:
|
||||||
|
- Add cd_image_id computed field to data sources decort_kvmvm, decort_kvmvm_list, decort_cb_kvmvm, decort_cb_kvmvm_list and resourced decort_kvmvm, decort_cb_kvmvm in cloudapi/kvmvm and cloudbroker/kvmvm
|
||||||
|
- Add natable_vins_id, natable_vins_ip, natable_vins_name, natable_vins_network and natable_vins_network_name computed fields to data source decort_cb_kvmvm and to resource decort_cb_kvmvm in cloudbroker/kvmvm
|
||||||
|
- Change field local_port (in block port_forwarding) from required to optional in resources decort_kvmvm and decort_cb_kvmvm in cloudapi/kvmvm and cloudbroker/kvmvm
|
||||||
|
- Add hp_backed, cpu_pin, numa_affinity and numa_node_id computed fields to data sources decort_kvmvm, decort_kvmvm_list, decort_cb_kvmvm, decort_cb_kvmvm_list in cloudapi/kvmvm and cloudbroker/kvmvm
|
||||||
|
- Add hp_backed, cpu_pin, numa_affinity optional fields and numa_node_id computed field to resources decort_kvmvm, decort_cb_kvmvm in cloudapi/kvmvm and cloudbroker/kvmvm
|
||||||
|
- Add validation of ram to be divisible by 128 to resources decort_kvmvm and decort_cb_kvmvm in cloudapi/kvmvm and cloudbroker/kvmvm
|
||||||
|
- Add name optional field to data source decort_cb_kvmvm_pci_device_list in cloudbroker/kvmvm
|
||||||
|
- Add fields image_name and virtual_image_name in resource and data source decort_cb_kvmvm in cloudbroker/kvmvm
|
||||||
|
- Add fields enabled and node_id in resource and data sources decort_cb_kvmvm, decort_cb_kvmvm_list, decort_cb_kvmvm_list_deleted in cloudbroker/kvmvm. Add field node_id in resource and data sources decort_kvmvm, decort_kvmvm_list, decort_kvmvm_list_deleted in cloudapi/kvmvm
|
||||||
|
- Delete network and extra_disk fields in data source decort_cb_kvmvm in cloudbroker/kvmvm
|
||||||
|
- Add computed fields compute_id, description, guid, hwpath, device_id, name, rg_id, stack_id, status, system_name in data source decort_kvmvm_pci_device_list in cloudapi/kvmvm
|
||||||
|
- Add computed fields account_id, created_time, deleted_time, gid, guid, vgpu_id, last_claimed_by, last_update_time, mode, pci_slot, pgpuid, profile_id, ram, reference_id, rg_id, status, type, vm_id in data sources decort_kvmvm_vgpu_list and decort_cb_kvmvm_vgpu_list in cloudapi/kvmvm and cloudbroker/kvmvm
|
||||||
|
- Add field replication (disk_id, pool_id, role, self_volume_id, storage_id, volume_id) in data source decort_kvmvm, decort_cb_kvmvm in cloudapi/kvmvm and cloudbroker/kvmvm
|
||||||
|
- Add computed field reserved_node_cpus in data sources decort_kvmvm, decort_kvmvm_list, decort_kvmvm_list_deleted, decort_cb_kvmvm, decort_cb_kvmvm_list, decort_cb_kvmvm_list_deleted and in resources decort_kvmvm, decort_cb_kvmvm in cloudapi/kvmvm and cloudbroker/kvmvm
|
||||||
|
- Add "VFNIC" as allowed net_type value in network block for resources decort_kvmvm and decort_cb_kvmvm (created with "KVM_X86" driver) in cloudapi/kvmvm and cloudbroker/kvmvm
|
||||||
|
- Set optional field image_id in resource decort_kvmvm, decort_cb_kvmvm in cloudapi/kvmvm and cloudbroker/kvmvm
|
||||||
|
- Add optional field without_boot_disk in resource decort_kvmvm, decort_cb_kvmvm in cloudapi/kvmvm and cloudbroker/kvmvm
|
||||||
|
- Add optional fields depresent, node_ids in resource decort_cb_kvmvm in cloudbroker/kvmvm
|
||||||
|
|
||||||
|
#### image:
|
||||||
|
- Add cd_presented_to computed field to data sources decort_image, decort_cb_image and to resources decort_image, decort_cb_image in cloudapi/image and cloudbroker/image
|
||||||
|
- Add account_id required field to resource decort_image in cloudapi/image
|
||||||
|
- Add accounts optional field to resource decort_cb_image in cloudbroker/image
|
||||||
|
- Add network_interface_naming computed field to data sources decort_image, decort_image_list, decort_cb_image, decort_cb_image_list and to resources decort_image, decort_cb_image in cloudapi/image and cloudbroker/image
|
||||||
|
- Delete gid required field from resource decort_image in cloudapi/image
|
||||||
|
- Delete meta and ckey computed fields from data sources and resource decort_cb_image and decort_cb_image_list in cloudbroker/image
|
||||||
|
- Add resource decort_image_from_blank_compute and decort_cb_image_from_blank_compute in cloudapi/compute and cloudbroker/compute
|
||||||
|
- Add resource decort_image_from_platoform_disk and decort_cb_image_from_platoform_disk in cloudapi/compute and cloudbroker/compute
|
||||||
|
- Add file_path optional field to resource decort_cb_image in cloudbroker/image
|
||||||
|
- Сhanged url field from required to optional in resource decort_cb_image in cloudbroker/image
|
||||||
|
|
||||||
|
#### k8s:
|
||||||
|
- Change lb_sysctl_params optional field from string to array in resources decort_k8s and decort_k8s_cp in cloudapi/k8s
|
||||||
|
- Add lb_sysctl_params optional field to resource decort_cb_k8s_cp in cloudbroker/k8s
|
||||||
|
- Add validation of ram to be divisible by 128 to resources decort_k8s, decort_k8s_cp, decort_k8s_wg, decort_cb_k8s_cp and decort_cb_k8s_wg in cloudapi/k8s and cloudbroker/k8s
|
||||||
|
- Change possible values num field (can be 5) in resources decort_k8s(cloudapi/k8s), decort_k8s_cp(cloudapi/k8s) and decort_cb_k8s_cp(cloudbroker/k8s)
|
||||||
|
|
||||||
|
#### lb:
|
||||||
|
- Add user_managed, manager_id, manager_type and part_k8s computed fields to data sources decort_lb, decort_lb_list, decort_cb_lb, decort_cb_lb_list and to resources decort_lb, decort_cb_lb in cloudapi/lb and cloudbroker/lb
|
||||||
|
- Add sysctl_params optional field to resources decort_lb and decort_cb_lb in cloudapi/lb and cloudbroker/lb
|
||||||
|
- Add safe optional field to resource decort_cb_lb in cloudbroker/lb
|
||||||
|
|
||||||
|
#### node:
|
||||||
|
- Add data sources decort_cb_node and decort_cb_node_list in cloudbroker/node
|
||||||
|
|
||||||
|
#### rg:
|
||||||
|
- Add compute_features computed fields to data sources decort_resgroup, decort_rg_list, decort_rg_list_deleted, decort_cb_rg, decort_cb_rg_list, decort_cb_rg_list_deleted and to resource decort_resgroup in clouapi/rg and cloudbroker/rg
|
||||||
|
- Add compute_features optional field to resource decort_cb_rg in cloudbroker/rg
|
||||||
|
- Add extnet_id, free_ips computed fields to data sources decort_rg_vins_list and decort_cb_rg_vins_list in cloudapi/rg and cloudbroker/rg
|
||||||
|
- Change resource quota handling logic in cloudapi to be like cloudbroker's. Added "cu_d" field into cloudapi resource quota schema. Deleted default values of "cpu", "ram", "disk", "ext_traffic", "ext_ips" and "gpu_units" fields in cloudapi resource schema
|
||||||
|
|
||||||
|
#### sep:
|
||||||
|
- Add pools optional field to resource decort_cb_sep in cloudbroker/sep
|
||||||
|
- Change config field from optional to required in resource decort_cb_sep in cloudbroker/sep
|
||||||
|
|
||||||
|
#### user:
|
||||||
|
- Add data sources decort_cb_user, decort_cb_user_list and decort_cb_user_get_audit in cloudbroker/user
|
||||||
|
- Add resource decort_cb_user in cloudbroker/user
|
||||||
|
|
||||||
|
#### vfpool:
|
||||||
|
- Add data sources decort_vfpool, decort_vfpool_list and decort_cb_vfpool, decort_cb_vfpool_list in cloudapi/vfpool and cloudbroker/vfpool
|
||||||
|
- Add resource decort_cb_vfpool in cloudbroker/vfpool
|
||||||
|
|
||||||
|
#### vins:
|
||||||
|
- Change field int_port (in block nat_rule) from required to optional in resource decort_cb_vins in cloudbroker/vins
|
||||||
|
- Add extnet_id, free_ips computed fields to data sources decort_vins_list, decort_cb_vins_list in cloudapi/vins and cloudbroker/vins
|
||||||
|
- Add dns optional field to resources decort_vins and decort_cb_vins in cloudapi/vins and cloudbroker/vins
|
||||||
|
- Add node_id computed field to resources and data sources decort_vins and decort_cb_vins in cloudapi/vins and cloudbroker/vins
|
||||||
|
- Add vnfdev_id optional field to data sources decort_cb_vins_list in cloudbroker/vins
|
||||||
|
- Add vnfdev_id optional field to data sources decort_vins_list_deleted in cloudapi/vins
|
||||||
|
|
||||||
|
### Bugfix
|
||||||
|
- Fix boot_disk_size field to be saved correctly in state in order to stop unnecessary resource update attempts after `terraform apply` in resource decort_cb_kvmvm in cloudbroker/kvmvm
|
||||||
|
- Rename computed field name to k8s_name in order to fix state in data source decort_k8s_list_deleted in cloudapi/k8s
|
||||||
|
- Delete account_id field in data sources decort_rg_list_lb and decort_cb_rg_list_lb in cloudapi/rg and cloubroker/rg
|
||||||
|
|||||||
2
Makefile
2
Makefile
@@ -7,7 +7,7 @@ ZIPDIR = ./zip
|
|||||||
BINARY=${NAME}
|
BINARY=${NAME}
|
||||||
WORKPATH= ./examples/terraform.d/plugins/${HOSTNAME}/${NAMESPACE}/${NAMESPACE}/${VERSION}/${OS_ARCH}
|
WORKPATH= ./examples/terraform.d/plugins/${HOSTNAME}/${NAMESPACE}/${NAMESPACE}/${VERSION}/${OS_ARCH}
|
||||||
MAINPATH = ./cmd/decort/
|
MAINPATH = ./cmd/decort/
|
||||||
VERSION=4.5.3
|
VERSION=4.6.0
|
||||||
OS_ARCH=$(shell go env GOHOSTOS)_$(shell go env GOHOSTARCH)
|
OS_ARCH=$(shell go env GOHOSTOS)_$(shell go env GOHOSTARCH)
|
||||||
|
|
||||||
FILES = ${BINARY}_${VERSION}_darwin_amd64\
|
FILES = ${BINARY}_${VERSION}_darwin_amd64\
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ Terraform provider для платформы Digital Energy Cloud Orchestration
|
|||||||
|
|
||||||
| Версия DECORT API | Версия провайдера Terraform |
|
| Версия DECORT API | Версия провайдера Terraform |
|
||||||
| ------ | ------ |
|
| ------ | ------ |
|
||||||
|
| 4.4.0 | 4.6.x |
|
||||||
| 3.8.9 | 4.5.x |
|
| 3.8.9 | 4.5.x |
|
||||||
| 3.8.8 | 4.4.x |
|
| 3.8.8 | 4.4.x |
|
||||||
| 3.8.7 | 4.3.x |
|
| 3.8.7 | 4.3.x |
|
||||||
@@ -59,6 +60,7 @@ Terraform provider для платформы Digital Energy Cloud Orchestration
|
|||||||
- Работа с pci device,
|
- Работа с pci device,
|
||||||
- Работа с resource groups,
|
- Работа с resource groups,
|
||||||
- Работа с seps,
|
- Работа с seps,
|
||||||
|
- Работа с user,
|
||||||
- Работа с stacks,
|
- Работа с stacks,
|
||||||
- Работа с VINS.
|
- Работа с VINS.
|
||||||
|
|
||||||
|
|||||||
48
go.mod
48
go.mod
@@ -1,27 +1,27 @@
|
|||||||
module repository.basistech.ru/BASIS/terraform-provider-decort
|
module repository.basistech.ru/BASIS/terraform-provider-decort
|
||||||
|
|
||||||
go 1.18
|
go 1.20
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/google/uuid v1.3.0
|
github.com/google/uuid v1.4.0
|
||||||
github.com/hashicorp/terraform-plugin-docs v0.13.0
|
github.com/hashicorp/terraform-plugin-docs v0.13.0
|
||||||
github.com/hashicorp/terraform-plugin-sdk/v2 v2.30.0
|
github.com/hashicorp/terraform-plugin-sdk/v2 v2.33.0
|
||||||
github.com/sirupsen/logrus v1.9.0
|
github.com/sirupsen/logrus v1.9.0
|
||||||
golang.org/x/net v0.17.0
|
golang.org/x/net v0.19.0
|
||||||
repository.basistech.ru/BASIS/decort-golang-sdk v1.7.7
|
repository.basistech.ru/BASIS/decort-golang-sdk v1.8.1
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||||
github.com/Masterminds/semver/v3 v3.2.0 // indirect
|
github.com/Masterminds/semver/v3 v3.2.0 // indirect
|
||||||
github.com/Masterminds/sprig/v3 v3.2.3 // indirect
|
github.com/Masterminds/sprig/v3 v3.2.3 // indirect
|
||||||
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect
|
github.com/ProtonMail/go-crypto v1.1.0-alpha.0 // indirect
|
||||||
github.com/agext/levenshtein v1.2.3 // indirect
|
github.com/agext/levenshtein v1.2.3 // indirect
|
||||||
github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect
|
github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect
|
||||||
github.com/armon/go-radix v1.0.0 // indirect
|
github.com/armon/go-radix v1.0.0 // indirect
|
||||||
github.com/bgentry/speakeasy v0.1.0 // indirect
|
github.com/bgentry/speakeasy v0.1.0 // indirect
|
||||||
github.com/cloudflare/circl v1.3.3 // indirect
|
github.com/cloudflare/circl v1.3.7 // indirect
|
||||||
github.com/fatih/color v1.13.0 // indirect
|
github.com/fatih/color v1.16.0 // indirect
|
||||||
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
|
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
|
||||||
github.com/go-playground/locales v0.14.1 // indirect
|
github.com/go-playground/locales v0.14.1 // indirect
|
||||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||||
@@ -35,24 +35,24 @@ require (
|
|||||||
github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 // indirect
|
github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 // indirect
|
||||||
github.com/hashicorp/go-hclog v1.5.0 // indirect
|
github.com/hashicorp/go-hclog v1.5.0 // indirect
|
||||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||||
github.com/hashicorp/go-plugin v1.5.1 // indirect
|
github.com/hashicorp/go-plugin v1.6.0 // indirect
|
||||||
github.com/hashicorp/go-uuid v1.0.3 // indirect
|
github.com/hashicorp/go-uuid v1.0.3 // indirect
|
||||||
github.com/hashicorp/go-version v1.6.0 // indirect
|
github.com/hashicorp/go-version v1.6.0 // indirect
|
||||||
github.com/hashicorp/hc-install v0.6.1 // indirect
|
github.com/hashicorp/hc-install v0.6.3 // indirect
|
||||||
github.com/hashicorp/hcl/v2 v2.19.1 // indirect
|
github.com/hashicorp/hcl/v2 v2.19.1 // indirect
|
||||||
github.com/hashicorp/logutils v1.0.0 // indirect
|
github.com/hashicorp/logutils v1.0.0 // indirect
|
||||||
github.com/hashicorp/terraform-exec v0.19.0 // indirect
|
github.com/hashicorp/terraform-exec v0.20.0 // indirect
|
||||||
github.com/hashicorp/terraform-json v0.17.1 // indirect
|
github.com/hashicorp/terraform-json v0.21.0 // indirect
|
||||||
github.com/hashicorp/terraform-plugin-go v0.19.0 // indirect
|
github.com/hashicorp/terraform-plugin-go v0.22.0 // indirect
|
||||||
github.com/hashicorp/terraform-plugin-log v0.9.0 // indirect
|
github.com/hashicorp/terraform-plugin-log v0.9.0 // indirect
|
||||||
github.com/hashicorp/terraform-registry-address v0.2.2 // indirect
|
github.com/hashicorp/terraform-registry-address v0.2.3 // indirect
|
||||||
github.com/hashicorp/terraform-svchost v0.1.1 // indirect
|
github.com/hashicorp/terraform-svchost v0.1.1 // indirect
|
||||||
github.com/hashicorp/yamux v0.1.1 // indirect
|
github.com/hashicorp/yamux v0.1.1 // indirect
|
||||||
github.com/huandu/xstrings v1.4.0 // indirect
|
github.com/huandu/xstrings v1.4.0 // indirect
|
||||||
github.com/imdario/mergo v0.3.15 // indirect
|
github.com/imdario/mergo v0.3.15 // indirect
|
||||||
github.com/leodido/go-urn v1.2.4 // indirect
|
github.com/leodido/go-urn v1.2.4 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.16 // indirect
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
github.com/mitchellh/cli v1.1.5 // indirect
|
github.com/mitchellh/cli v1.1.5 // indirect
|
||||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||||
github.com/mitchellh/go-testing-interface v1.14.1 // indirect
|
github.com/mitchellh/go-testing-interface v1.14.1 // indirect
|
||||||
@@ -65,16 +65,16 @@ require (
|
|||||||
github.com/shopspring/decimal v1.3.1 // indirect
|
github.com/shopspring/decimal v1.3.1 // indirect
|
||||||
github.com/spf13/cast v1.5.0 // indirect
|
github.com/spf13/cast v1.5.0 // indirect
|
||||||
github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect
|
github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect
|
||||||
github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect
|
github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect
|
||||||
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
|
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
|
||||||
github.com/zclconf/go-cty v1.14.1 // indirect
|
github.com/zclconf/go-cty v1.14.2 // indirect
|
||||||
golang.org/x/crypto v0.15.0 // indirect
|
golang.org/x/crypto v0.19.0 // indirect
|
||||||
golang.org/x/mod v0.13.0 // indirect
|
golang.org/x/mod v0.15.0 // indirect
|
||||||
golang.org/x/sys v0.14.0 // indirect
|
golang.org/x/sys v0.17.0 // indirect
|
||||||
golang.org/x/text v0.14.0 // indirect
|
golang.org/x/text v0.14.0 // indirect
|
||||||
google.golang.org/appengine v1.6.7 // indirect
|
google.golang.org/appengine v1.6.8 // indirect
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 // indirect
|
||||||
google.golang.org/grpc v1.57.1 // indirect
|
google.golang.org/grpc v1.61.1 // indirect
|
||||||
google.golang.org/protobuf v1.31.0 // indirect
|
google.golang.org/protobuf v1.32.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
113
go.sum
113
go.sum
@@ -8,9 +8,8 @@ github.com/Masterminds/sprig/v3 v3.2.1/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFP
|
|||||||
github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA=
|
github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA=
|
||||||
github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM=
|
github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM=
|
||||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||||
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg=
|
github.com/ProtonMail/go-crypto v1.1.0-alpha.0 h1:nHGfwXmFvJrSR9xu8qL7BkO4DqTHXE9N5vPhgY2I+j0=
|
||||||
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
|
github.com/ProtonMail/go-crypto v1.1.0-alpha.0/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE=
|
||||||
github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ=
|
|
||||||
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
|
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
|
||||||
github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
|
github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
|
||||||
github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec=
|
github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec=
|
||||||
@@ -22,23 +21,23 @@ github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgI
|
|||||||
github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=
|
github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=
|
||||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||||
github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA=
|
github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA=
|
||||||
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
|
github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU=
|
||||||
github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs=
|
github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
|
||||||
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
|
|
||||||
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
|
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
|
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
|
||||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||||
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
|
|
||||||
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
|
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
|
||||||
|
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
|
||||||
|
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
|
||||||
github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
|
github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
|
||||||
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
|
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
|
||||||
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
|
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
|
||||||
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=
|
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=
|
||||||
github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU=
|
github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU=
|
||||||
github.com/go-git/go-git/v5 v5.9.0 h1:cD9SFA7sHVRdJ7AYck1ZaAa/yeuBvGPxwXDL8cxrObY=
|
github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4=
|
||||||
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
||||||
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
||||||
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
||||||
@@ -49,8 +48,8 @@ github.com/go-playground/validator/v10 v10.15.4/go.mod h1:9iXMNT7sEkjXb0I+enO7QX
|
|||||||
github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=
|
github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||||
github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
|
||||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||||
|
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
@@ -62,8 +61,8 @@ github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD
|
|||||||
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
|
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
|
||||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
|
||||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||||
@@ -79,33 +78,33 @@ github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVH
|
|||||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||||
github.com/hashicorp/go-plugin v1.5.1 h1:oGm7cWBaYIp3lJpx1RUEfLWophprE2EV/KUeqBYo+6k=
|
github.com/hashicorp/go-plugin v1.6.0 h1:wgd4KxHJTVGGqWBq4QPB1i5BZNEx9BR8+OFmHDmTk8A=
|
||||||
github.com/hashicorp/go-plugin v1.5.1/go.mod h1:w1sAEES3g3PuV/RzUrgow20W2uErMly84hhD3um1WL4=
|
github.com/hashicorp/go-plugin v1.6.0/go.mod h1:lBS5MtSSBZk0SHc66KACcjjlU6WzEVP/8pwz68aMkCI=
|
||||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||||
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
|
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
|
||||||
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||||
github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
|
github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
|
||||||
github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||||
github.com/hashicorp/hc-install v0.6.1 h1:IGxShH7AVhPaSuSJpKtVi/EFORNjO+OYVJJrAtGG2mY=
|
github.com/hashicorp/hc-install v0.6.3 h1:yE/r1yJvWbtrJ0STwScgEnCanb0U9v7zp0Gbkmcoxqs=
|
||||||
github.com/hashicorp/hc-install v0.6.1/go.mod h1:0fW3jpg+wraYSnFDJ6Rlie3RvLf1bIqVIkzoon4KoVE=
|
github.com/hashicorp/hc-install v0.6.3/go.mod h1:KamGdbodYzlufbWh4r9NRo8y6GLHWZP2GBtdnms1Ln0=
|
||||||
github.com/hashicorp/hcl/v2 v2.19.1 h1://i05Jqznmb2EXqa39Nsvyan2o5XyMowW5fnCKW5RPI=
|
github.com/hashicorp/hcl/v2 v2.19.1 h1://i05Jqznmb2EXqa39Nsvyan2o5XyMowW5fnCKW5RPI=
|
||||||
github.com/hashicorp/hcl/v2 v2.19.1/go.mod h1:ThLC89FV4p9MPW804KVbe/cEXoQ8NZEh+JtMeeGErHE=
|
github.com/hashicorp/hcl/v2 v2.19.1/go.mod h1:ThLC89FV4p9MPW804KVbe/cEXoQ8NZEh+JtMeeGErHE=
|
||||||
github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y=
|
github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y=
|
||||||
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
||||||
github.com/hashicorp/terraform-exec v0.19.0 h1:FpqZ6n50Tk95mItTSS9BjeOVUb4eg81SpgVtZNNtFSM=
|
github.com/hashicorp/terraform-exec v0.20.0 h1:DIZnPsqzPGuUnq6cH8jWcPunBfY+C+M8JyYF3vpnuEo=
|
||||||
github.com/hashicorp/terraform-exec v0.19.0/go.mod h1:tbxUpe3JKruE9Cuf65mycSIT8KiNPZ0FkuTE3H4urQg=
|
github.com/hashicorp/terraform-exec v0.20.0/go.mod h1:ckKGkJWbsNqFKV1itgMnE0hY9IYf1HoiekpuN0eWoDw=
|
||||||
github.com/hashicorp/terraform-json v0.17.1 h1:eMfvh/uWggKmY7Pmb3T85u86E2EQg6EQHgyRwf3RkyA=
|
github.com/hashicorp/terraform-json v0.21.0 h1:9NQxbLNqPbEMze+S6+YluEdXgJmhQykRyRNd+zTI05U=
|
||||||
github.com/hashicorp/terraform-json v0.17.1/go.mod h1:Huy6zt6euxaY9knPAFKjUITn8QxUFIe9VuSzb4zn/0o=
|
github.com/hashicorp/terraform-json v0.21.0/go.mod h1:qdeBs11ovMzo5puhrRibdD6d2Dq6TyE/28JiU4tIQxk=
|
||||||
github.com/hashicorp/terraform-plugin-docs v0.13.0 h1:6e+VIWsVGb6jYJewfzq2ok2smPzZrt1Wlm9koLeKazY=
|
github.com/hashicorp/terraform-plugin-docs v0.13.0 h1:6e+VIWsVGb6jYJewfzq2ok2smPzZrt1Wlm9koLeKazY=
|
||||||
github.com/hashicorp/terraform-plugin-docs v0.13.0/go.mod h1:W0oCmHAjIlTHBbvtppWHe8fLfZ2BznQbuv8+UD8OucQ=
|
github.com/hashicorp/terraform-plugin-docs v0.13.0/go.mod h1:W0oCmHAjIlTHBbvtppWHe8fLfZ2BznQbuv8+UD8OucQ=
|
||||||
github.com/hashicorp/terraform-plugin-go v0.19.0 h1:BuZx/6Cp+lkmiG0cOBk6Zps0Cb2tmqQpDM3iAtnhDQU=
|
github.com/hashicorp/terraform-plugin-go v0.22.0 h1:1OS1Jk5mO0f5hrziWJGXXIxBrMe2j/B8E+DVGw43Xmc=
|
||||||
github.com/hashicorp/terraform-plugin-go v0.19.0/go.mod h1:EhRSkEPNoylLQntYsk5KrDHTZJh9HQoumZXbOGOXmec=
|
github.com/hashicorp/terraform-plugin-go v0.22.0/go.mod h1:mPULV91VKss7sik6KFEcEu7HuTogMLLO/EvWCuFkRVE=
|
||||||
github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9TFvymaRGZED3FCV0=
|
github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9TFvymaRGZED3FCV0=
|
||||||
github.com/hashicorp/terraform-plugin-log v0.9.0/go.mod h1:rKL8egZQ/eXSyDqzLUuwUYLVdlYeamldAHSxjUFADow=
|
github.com/hashicorp/terraform-plugin-log v0.9.0/go.mod h1:rKL8egZQ/eXSyDqzLUuwUYLVdlYeamldAHSxjUFADow=
|
||||||
github.com/hashicorp/terraform-plugin-sdk/v2 v2.30.0 h1:X7vB6vn5tON2b49ILa4W7mFAsndeqJ7bZFOGbVO+0Cc=
|
github.com/hashicorp/terraform-plugin-sdk/v2 v2.33.0 h1:qHprzXy/As0rxedphECBEQAh3R4yp6pKksKHcqZx5G8=
|
||||||
github.com/hashicorp/terraform-plugin-sdk/v2 v2.30.0/go.mod h1:ydFcxbdj6klCqYEPkPvdvFKiNGKZLUs+896ODUXCyao=
|
github.com/hashicorp/terraform-plugin-sdk/v2 v2.33.0/go.mod h1:H+8tjs9TjV2w57QFVSMBQacf8k/E1XwLXGCARgViC6A=
|
||||||
github.com/hashicorp/terraform-registry-address v0.2.2 h1:lPQBg403El8PPicg/qONZJDC6YlgCVbWDtNmmZKtBno=
|
github.com/hashicorp/terraform-registry-address v0.2.3 h1:2TAiKJ1A3MAkZlH1YI/aTVcLZRu7JseiXNRHbOAyoTI=
|
||||||
github.com/hashicorp/terraform-registry-address v0.2.2/go.mod h1:LtwNbCihUoUZ3RYriyS2wF/lGPB6gF9ICLRtuDk7hSo=
|
github.com/hashicorp/terraform-registry-address v0.2.3/go.mod h1:lFHA76T8jfQteVfT7caREqguFrW3c4MFSPhZB7HHgUM=
|
||||||
github.com/hashicorp/terraform-svchost v0.1.1 h1:EZZimZ1GxdqFRinZ1tpJwVxxt49xc/S52uzrw4x0jKQ=
|
github.com/hashicorp/terraform-svchost v0.1.1 h1:EZZimZ1GxdqFRinZ1tpJwVxxt49xc/S52uzrw4x0jKQ=
|
||||||
github.com/hashicorp/terraform-svchost v0.1.1/go.mod h1:mNsjQfZyf/Jhz35v6/0LWcv26+X7JPS+buii2c9/ctc=
|
github.com/hashicorp/terraform-svchost v0.1.1/go.mod h1:mNsjQfZyf/Jhz35v6/0LWcv26+X7JPS+buii2c9/ctc=
|
||||||
github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE=
|
github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE=
|
||||||
@@ -137,8 +136,9 @@ github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovk
|
|||||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||||
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
|
|
||||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||||
|
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||||
|
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
github.com/mitchellh/cli v1.1.5 h1:OxRIeJXpAMztws/XHlN2vu6imG5Dpq+j61AzAX5fLng=
|
github.com/mitchellh/cli v1.1.5 h1:OxRIeJXpAMztws/XHlN2vu6imG5Dpq+j61AzAX5fLng=
|
||||||
github.com/mitchellh/cli v1.1.5/go.mod h1:v8+iFts2sPIKUV1ltktPXMCC8fumSKFItNcD2cLtRR4=
|
github.com/mitchellh/cli v1.1.5/go.mod h1:v8+iFts2sPIKUV1ltktPXMCC8fumSKFItNcD2cLtRR4=
|
||||||
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
|
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
|
||||||
@@ -170,7 +170,7 @@ github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5g
|
|||||||
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||||
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
||||||
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||||
github.com/skeema/knownhosts v1.2.0 h1:h9r9cf0+u7wSE+M183ZtMGgOJKiL96brpaz5ekfJCpM=
|
github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ=
|
||||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||||
github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
|
github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
|
||||||
github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
|
github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
|
||||||
@@ -190,41 +190,34 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
|
|||||||
github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk=
|
github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk=
|
||||||
github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI=
|
github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI=
|
||||||
github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk=
|
github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk=
|
||||||
github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU=
|
github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8=
|
||||||
github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc=
|
github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok=
|
||||||
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
|
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
|
||||||
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
|
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
|
||||||
github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
|
github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
|
||||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||||
github.com/zclconf/go-cty v1.14.1 h1:t9fyA35fwjjUMcmL5hLER+e/rEPqrbCK1/OSE4SI9KA=
|
github.com/zclconf/go-cty v1.14.2 h1:kTG7lqmBou0Zkx35r6HJHUQTvaRPr5bIAf3AoHS0izI=
|
||||||
github.com/zclconf/go-cty v1.14.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
|
github.com/zclconf/go-cty v1.14.2/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
||||||
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
|
||||||
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||||
golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA=
|
|
||||||
golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g=
|
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8=
|
||||||
golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY=
|
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||||
golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
|
||||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
|
||||||
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
|
|
||||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
@@ -239,43 +232,37 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q=
|
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
|
||||||
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
|
||||||
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
|
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
|
||||||
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
|
||||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
|
||||||
golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ=
|
golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
|
||||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 h1:Jyp0Hsi0bmHXG6k9eATXoYtjd6e2UzZ1SCn/wIupY14=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:oQ5rr10WTTMvP4A36n8JpR1OrO1BEiV4f78CneXZxkA=
|
||||||
google.golang.org/grpc v1.57.1 h1:upNTNqv0ES+2ZOOqACwVtS3Il8M12/+Hz41RCPzAjQg=
|
google.golang.org/grpc v1.61.1 h1:kLAiWrZs7YeDM6MumDe7m3y4aM6wacLzM1Y/wiLP9XY=
|
||||||
google.golang.org/grpc v1.57.1/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
|
google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs=
|
||||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I=
|
||||||
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
@@ -285,5 +272,5 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
repository.basistech.ru/BASIS/decort-golang-sdk v1.7.7 h1:VIF+NXXfYec8DtlTcPae+CjB0fhH8ovBQxKMwEnLgE8=
|
repository.basistech.ru/BASIS/decort-golang-sdk v1.8.1 h1:wpxjnVO7hGaQVWbtFoYVbNoeFxk0QUnEfDsPuWjiNk0=
|
||||||
repository.basistech.ru/BASIS/decort-golang-sdk v1.7.7/go.mod h1:7fj8sgGZFiiExewQeqckCS4WxwOmU0oP6BO6mi1Lpkw=
|
repository.basistech.ru/BASIS/decort-golang-sdk v1.8.1/go.mod h1:OaUynHHuSjWMzpfyoL4au6oLcUogqUkPPBKA15pbHWo=
|
||||||
|
|||||||
@@ -20,23 +20,23 @@ limitations under the License.
|
|||||||
|
|
||||||
package constants
|
package constants
|
||||||
|
|
||||||
// LimitMaxVinsPerResgroup set maximum number of VINs instances per Resource Group
|
// LIMIT_MAX_VINS_PER_RESGROUP set maximum number of VINs instances per Resource Group
|
||||||
const LimitMaxVinsPerResgroup = 4
|
const LIMIT_MAX_VINS_PER_RESGROUP = 4
|
||||||
|
|
||||||
// MaxSshKeysPerCompute sets maximum number of user:ssh_key pairs to authorize when creating new compute
|
// MAX_SSHKEYS_PER_COMPUTE sets maximum number of user:ssh_key pairs to authorize when creating new compute
|
||||||
const MaxSshKeysPerCompute = 12
|
const MAX_SSHKEYS_PER_COMPUTE = 12
|
||||||
|
|
||||||
// MaxExtraDisksPerCompute sets maximum number of extra disks that can be added when creating new compute
|
// MAX_EXTRA_DISKS_PER_COMPUTE sets maximum number of extra disks that can be added when creating new compute
|
||||||
const MaxExtraDisksPerCompute = 12
|
const MAX_EXTRA_DISKS_PER_COMPUTE = 12
|
||||||
|
|
||||||
// MaxNetworksPerCompute sets maximum number of vNICs per compute
|
// MAX_NETWORKS_PER_COMPUTE sets maximum number of vNICs per compute
|
||||||
const MaxNetworksPerCompute = 8
|
const MAX_NETWORKS_PER_COMPUTE = 8
|
||||||
|
|
||||||
// MaxCpusPerCompute sets maximum number of vCPUs per compute
|
// MAX_CPUS_PER_COMPUTE sets maximum number of vCPUs per compute
|
||||||
const MaxCpusPerCompute = 128
|
const MAX_CPUS_PER_COMPUTE = 128
|
||||||
|
|
||||||
// MinRamPerCompute sets minimum amount of RAM per compute in MB
|
// MIN_RAM_PER_COMPUTE sets minimum amount of RAM per compute in MB
|
||||||
const MinRamPerCompute = 128
|
const MIN_RAM_PER_COMPUTE = 128
|
||||||
|
|
||||||
// RAMDivisibility sets divisibility of RAM value
|
// RAM_DIVISIBILITY sets divisibility of RAM value
|
||||||
const RAMDivisibility = 128
|
const RAM_DIVISIBILITY = 128
|
||||||
|
|||||||
@@ -20,4 +20,4 @@ limitations under the License.
|
|||||||
|
|
||||||
package location
|
package location
|
||||||
|
|
||||||
const LocationsListAPI = "/restmachine/cloudapi/locations/list" // Returns list of GridRecord on success
|
const LOCATIONS_LIST_API = "/restmachine/cloudapi/locations/list" // Returns list of GridRecord on success
|
||||||
|
|||||||
@@ -35,6 +35,7 @@ import (
|
|||||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/rg"
|
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/rg"
|
||||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/snapshot"
|
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/snapshot"
|
||||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/stack"
|
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/stack"
|
||||||
|
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/vfpool"
|
||||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/vins"
|
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/vins"
|
||||||
|
|
||||||
cb_account "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/account"
|
cb_account "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/account"
|
||||||
@@ -44,17 +45,19 @@ import (
|
|||||||
cb_flipgroup "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/flipgroup"
|
cb_flipgroup "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/flipgroup"
|
||||||
cb_grid "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/grid"
|
cb_grid "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/grid"
|
||||||
cb_image "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/image"
|
cb_image "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/image"
|
||||||
|
cb_k8ci "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/k8ci"
|
||||||
|
cb_k8s "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/k8s"
|
||||||
cb_kvmvm "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/kvmvm"
|
cb_kvmvm "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/kvmvm"
|
||||||
cb_lb "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/lb"
|
cb_lb "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/lb"
|
||||||
|
cb_node "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/node"
|
||||||
cb_pcidevice "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/pcidevice"
|
cb_pcidevice "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/pcidevice"
|
||||||
cb_rg "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/rg"
|
cb_rg "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/rg"
|
||||||
cb_sep "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/sep"
|
cb_sep "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/sep"
|
||||||
cb_stack "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/stack"
|
cb_stack "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/stack"
|
||||||
|
cb_user "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/user"
|
||||||
|
cb_vfpool "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/vfpool"
|
||||||
cb_vins "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/vins"
|
cb_vins "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/vins"
|
||||||
|
|
||||||
// cb_vgpu "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/vgpu"
|
// cb_vgpu "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/vgpu"
|
||||||
cb_k8ci "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/k8ci"
|
|
||||||
cb_k8s "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/k8s"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func newDataSourcesMap() map[string]*schema.Resource {
|
func newDataSourcesMap() map[string]*schema.Resource {
|
||||||
@@ -106,6 +109,7 @@ func newDataSourcesMap() map[string]*schema.Resource {
|
|||||||
"decort_disk_list_unattached": disks.DataSourceDiskListUnattached(),
|
"decort_disk_list_unattached": disks.DataSourceDiskListUnattached(),
|
||||||
"decort_disk_snapshot": disks.DataSourceDiskSnapshot(),
|
"decort_disk_snapshot": disks.DataSourceDiskSnapshot(),
|
||||||
"decort_disk_snapshot_list": disks.DataSourceDiskSnapshotList(),
|
"decort_disk_snapshot_list": disks.DataSourceDiskSnapshotList(),
|
||||||
|
"decort_disk_replication": disks.DataSourceDiskReplication(),
|
||||||
"decort_account_list": account.DataSourceAccountList(),
|
"decort_account_list": account.DataSourceAccountList(),
|
||||||
"decort_account_computes_list": account.DataSourceAccountComputesList(),
|
"decort_account_computes_list": account.DataSourceAccountComputesList(),
|
||||||
"decort_account_disks_list": account.DataSourceAccountDisksList(),
|
"decort_account_disks_list": account.DataSourceAccountDisksList(),
|
||||||
@@ -138,6 +142,8 @@ func newDataSourcesMap() map[string]*schema.Resource {
|
|||||||
"decort_flipgroup_list": flipgroup.DataSourceFlipGroupList(),
|
"decort_flipgroup_list": flipgroup.DataSourceFlipGroupList(),
|
||||||
"decort_stack": stack.DataSourceStack(),
|
"decort_stack": stack.DataSourceStack(),
|
||||||
"decort_stack_list": stack.DataSourceStackList(),
|
"decort_stack_list": stack.DataSourceStackList(),
|
||||||
|
"decort_vfpool": vfpool.DataSourceVFPool(),
|
||||||
|
"decort_vfpool_list": vfpool.DataSourceVFPoolList(),
|
||||||
"decort_account_resource_consumption_list": account.DataSourceAccountResourceConsumptionList(),
|
"decort_account_resource_consumption_list": account.DataSourceAccountResourceConsumptionList(),
|
||||||
"decort_account_resource_consumption_get": account.DataSourceAccountResourceConsumptionGet(),
|
"decort_account_resource_consumption_get": account.DataSourceAccountResourceConsumptionGet(),
|
||||||
"decort_kvmvm_list_deleted": kvmvm.DataSourceComputeListDeleted(),
|
"decort_kvmvm_list_deleted": kvmvm.DataSourceComputeListDeleted(),
|
||||||
@@ -158,6 +164,7 @@ func newDataSourcesMap() map[string]*schema.Resource {
|
|||||||
"decort_cb_account_resource_consumption_get": cb_account.DataSourceAccountResourceConsumptionGet(),
|
"decort_cb_account_resource_consumption_get": cb_account.DataSourceAccountResourceConsumptionGet(),
|
||||||
"decort_cb_account_resource_consumption_list": cb_account.DataSourceAccountResourceConsumptionList(),
|
"decort_cb_account_resource_consumption_list": cb_account.DataSourceAccountResourceConsumptionList(),
|
||||||
"decort_cb_account_audits_list": cb_account.DataSourceAccountAuditsList(),
|
"decort_cb_account_audits_list": cb_account.DataSourceAccountAuditsList(),
|
||||||
|
"decort_cb_account_available_templates_list": cb_account.DataSourceAccountAvailableTemplatesList(),
|
||||||
"decort_cb_audit": cb_audit.DataSourceAudit(),
|
"decort_cb_audit": cb_audit.DataSourceAudit(),
|
||||||
"decort_cb_audit_list": cb_audit.DataSourceAuditList(),
|
"decort_cb_audit_list": cb_audit.DataSourceAuditList(),
|
||||||
"decort_cb_audit_linked_jobs": cb_audit.DataSourceAuditLinkedJobs(),
|
"decort_cb_audit_linked_jobs": cb_audit.DataSourceAuditLinkedJobs(),
|
||||||
@@ -194,6 +201,8 @@ func newDataSourcesMap() map[string]*schema.Resource {
|
|||||||
"decort_cb_kvmvm_snapshot_usage": cb_kvmvm.DataSourceComputeSnapshotUsage(),
|
"decort_cb_kvmvm_snapshot_usage": cb_kvmvm.DataSourceComputeSnapshotUsage(),
|
||||||
"decort_cb_kvmvm_user_list": cb_kvmvm.DataSourceComputeUserList(),
|
"decort_cb_kvmvm_user_list": cb_kvmvm.DataSourceComputeUserList(),
|
||||||
"decort_cb_kvmvm_vgpu_list": cb_kvmvm.DataSourceComputeVGPUList(),
|
"decort_cb_kvmvm_vgpu_list": cb_kvmvm.DataSourceComputeVGPUList(),
|
||||||
|
"decort_cb_node": cb_node.DataSourceNode(),
|
||||||
|
"decort_cb_node_list": cb_node.DataSourceNodeList(),
|
||||||
"decort_cb_disk": cb_disks.DataSourceDisk(),
|
"decort_cb_disk": cb_disks.DataSourceDisk(),
|
||||||
"decort_cb_disk_list": cb_disks.DataSourceDiskList(),
|
"decort_cb_disk_list": cb_disks.DataSourceDiskList(),
|
||||||
"decort_cb_disk_list_deleted": cb_disks.DataSourceDiskListDeleted(),
|
"decort_cb_disk_list_deleted": cb_disks.DataSourceDiskListDeleted(),
|
||||||
@@ -202,6 +211,7 @@ func newDataSourcesMap() map[string]*schema.Resource {
|
|||||||
"decort_cb_disk_list_unattached": cb_disks.DataSourceDiskListUnattached(),
|
"decort_cb_disk_list_unattached": cb_disks.DataSourceDiskListUnattached(),
|
||||||
"decort_cb_disk_snapshot": cb_disks.DataSourceDiskSnapshot(),
|
"decort_cb_disk_snapshot": cb_disks.DataSourceDiskSnapshot(),
|
||||||
"decort_cb_disk_snapshot_list": cb_disks.DataSourceDiskSnapshotList(),
|
"decort_cb_disk_snapshot_list": cb_disks.DataSourceDiskSnapshotList(),
|
||||||
|
"decort_cb_disk_replication": cb_disks.DataSourceDiskReplication(),
|
||||||
"decort_cb_pcidevice": cb_pcidevice.DataSourcePcidevice(),
|
"decort_cb_pcidevice": cb_pcidevice.DataSourcePcidevice(),
|
||||||
"decort_cb_pcidevice_list": cb_pcidevice.DataSourcePcideviceList(),
|
"decort_cb_pcidevice_list": cb_pcidevice.DataSourcePcideviceList(),
|
||||||
"decort_cb_rg": cb_rg.DataSourceResgroup(),
|
"decort_cb_rg": cb_rg.DataSourceResgroup(),
|
||||||
@@ -231,6 +241,11 @@ func newDataSourcesMap() map[string]*schema.Resource {
|
|||||||
"decort_cb_flipgroup": cb_flipgroup.DataSourceFlipgroup(),
|
"decort_cb_flipgroup": cb_flipgroup.DataSourceFlipgroup(),
|
||||||
"decort_cb_stack_list": cb_stack.DataSourceStacksList(),
|
"decort_cb_stack_list": cb_stack.DataSourceStacksList(),
|
||||||
"decort_cb_stack": cb_stack.DataSourceStack(),
|
"decort_cb_stack": cb_stack.DataSourceStack(),
|
||||||
|
"decort_cb_user": cb_user.DataSourceUser(),
|
||||||
|
"decort_cb_user_get_audit": cb_user.DataSourceUserGetAudit(),
|
||||||
|
"decort_cb_user_list": cb_user.DataSourceUserList(),
|
||||||
|
"decort_cb_vfpool": cb_vfpool.DataSourceVFPool(),
|
||||||
|
"decort_cb_vfpool_list": cb_vfpool.DataSourceVFPoolList(),
|
||||||
"decort_cb_vins": cb_vins.DataSourceVins(),
|
"decort_cb_vins": cb_vins.DataSourceVins(),
|
||||||
"decort_cb_vins_list": cb_vins.DataSourceVinsList(),
|
"decort_cb_vins_list": cb_vins.DataSourceVinsList(),
|
||||||
"decort_cb_vins_audits": cb_vins.DataSourceVinsAudits(),
|
"decort_cb_vins_audits": cb_vins.DataSourceVinsAudits(),
|
||||||
|
|||||||
@@ -40,64 +40,74 @@ import (
|
|||||||
cb_extnet "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/extnet"
|
cb_extnet "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/extnet"
|
||||||
cb_flipgroup "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/flipgroup"
|
cb_flipgroup "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/flipgroup"
|
||||||
cb_image "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/image"
|
cb_image "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/image"
|
||||||
|
cb_k8ci "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/k8ci"
|
||||||
cb_k8s "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/k8s"
|
cb_k8s "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/k8s"
|
||||||
cb_kvmvm "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/kvmvm"
|
cb_kvmvm "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/kvmvm"
|
||||||
cb_lb "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/lb"
|
cb_lb "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/lb"
|
||||||
cb_pcidevice "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/pcidevice"
|
cb_pcidevice "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/pcidevice"
|
||||||
cb_rg "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/rg"
|
cb_rg "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/rg"
|
||||||
cb_sep "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/sep"
|
cb_sep "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/sep"
|
||||||
cb_k8ci "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/k8ci"
|
cb_user "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/user"
|
||||||
|
cb_vfpool "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/vfpool"
|
||||||
cb_vins "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/vins"
|
cb_vins "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/vins"
|
||||||
)
|
)
|
||||||
|
|
||||||
func newResourcesMap() map[string]*schema.Resource {
|
func newResourcesMap() map[string]*schema.Resource {
|
||||||
return map[string]*schema.Resource{
|
return map[string]*schema.Resource{
|
||||||
"decort_resgroup": rg.ResourceResgroup(),
|
"decort_resgroup": rg.ResourceResgroup(),
|
||||||
"decort_kvmvm": kvmvm.ResourceCompute(),
|
"decort_kvmvm": kvmvm.ResourceCompute(),
|
||||||
"decort_disk": disks.ResourceDisk(),
|
"decort_disk": disks.ResourceDisk(),
|
||||||
"decort_disk_snapshot": disks.ResourceDiskSnapshot(),
|
"decort_disk_snapshot": disks.ResourceDiskSnapshot(),
|
||||||
"decort_vins": vins.ResourceVins(),
|
"decort_disk_replication": disks.ResourceDiskReplication(),
|
||||||
"decort_pfw": pfw.ResourcePfw(),
|
"decort_vins": vins.ResourceVins(),
|
||||||
"decort_k8s": k8s.ResourceK8s(),
|
"decort_pfw": pfw.ResourcePfw(),
|
||||||
"decort_k8s_wg": k8s.ResourceK8sWg(),
|
"decort_k8s": k8s.ResourceK8s(),
|
||||||
"decort_k8s_cp": k8s.ResourceK8sCP(),
|
"decort_k8s_wg": k8s.ResourceK8sWg(),
|
||||||
"decort_snapshot": snapshot.ResourceSnapshot(),
|
"decort_k8s_cp": k8s.ResourceK8sCP(),
|
||||||
"decort_account": account.ResourceAccount(),
|
"decort_snapshot": snapshot.ResourceSnapshot(),
|
||||||
"decort_bservice": bservice.ResourceBasicService(),
|
"decort_account": account.ResourceAccount(),
|
||||||
"decort_bservice_group": bservice.ResourceBasicServiceGroup(),
|
"decort_bservice": bservice.ResourceBasicService(),
|
||||||
"decort_image": image.ResourceImage(),
|
"decort_bservice_group": bservice.ResourceBasicServiceGroup(),
|
||||||
"decort_image_virtual": image.ResourceImageVirtual(),
|
"decort_image": image.ResourceImage(),
|
||||||
"decort_lb": lb.ResourceLB(),
|
"decort_image_from_blank_compute": image.ResourceImageFromBlankCompute(),
|
||||||
"decort_lb_backend": lb.ResourceLBBackend(),
|
"decort_image_from_platform_disk": image.ResourceImageFromPlatformDisk(),
|
||||||
"decort_lb_backend_server": lb.ResourceLBBackendServer(),
|
"decort_image_virtual": image.ResourceImageVirtual(),
|
||||||
"decort_lb_frontend": lb.ResourceLBFrontend(),
|
"decort_lb": lb.ResourceLB(),
|
||||||
"decort_lb_frontend_bind": lb.ResourceLBFrontendBind(),
|
"decort_lb_backend": lb.ResourceLBBackend(),
|
||||||
"decort_flipgroup": flipgroup.ResourceFlipgroup(),
|
"decort_lb_backend_server": lb.ResourceLBBackendServer(),
|
||||||
"decort_vins_static_route": vins.ResourceStaticRoute(),
|
"decort_lb_frontend": lb.ResourceLBFrontend(),
|
||||||
|
"decort_lb_frontend_bind": lb.ResourceLBFrontendBind(),
|
||||||
|
"decort_flipgroup": flipgroup.ResourceFlipgroup(),
|
||||||
|
"decort_vins_static_route": vins.ResourceStaticRoute(),
|
||||||
|
|
||||||
"decort_cb_account": cb_account.ResourceAccount(),
|
"decort_cb_account": cb_account.ResourceAccount(),
|
||||||
"decort_cb_extnet": cb_extnet.ResourceExtnetCB(),
|
"decort_cb_extnet": cb_extnet.ResourceExtnetCB(),
|
||||||
"decort_cb_extnet_static_route": cb_extnet.ResourceStaticRoute(),
|
"decort_cb_extnet_static_route": cb_extnet.ResourceStaticRoute(),
|
||||||
"decort_cb_disk": cb_disks.ResourceDisk(),
|
"decort_cb_disk": cb_disks.ResourceDisk(),
|
||||||
"decort_cb_disk_snapshot": cb_disks.ResourceDiskSnapshot(),
|
"decort_cb_disk_snapshot": cb_disks.ResourceDiskSnapshot(),
|
||||||
"decort_cb_image": cb_image.ResourceImage(),
|
"decort_cb_disk_replication": cb_disks.ResourceDiskReplication(),
|
||||||
"decort_cb_virtual_image": cb_image.ResourceVirtualImage(),
|
"decort_cb_image": cb_image.ResourceImage(),
|
||||||
"decort_cb_cdrom_image": cb_image.ResourceCDROMImage(),
|
"decort_cb_image_from_blank_compute": cb_image.ResourceImageFromBlankCompute(),
|
||||||
"decort_cb_pcidevice": cb_pcidevice.ResourcePcidevice(),
|
"decort_cb_image_from_platform_disk": cb_image.ResourceImageFromPlatformDisk(),
|
||||||
"decort_cb_sep": cb_sep.ResourceSep(),
|
"decort_cb_virtual_image": cb_image.ResourceVirtualImage(),
|
||||||
"decort_cb_sep_config": cb_sep.ResourceSepConfig(),
|
"decort_cb_cdrom_image": cb_image.ResourceCDROMImage(),
|
||||||
"decort_cb_kvmvm": cb_kvmvm.ResourceCompute(),
|
"decort_cb_pcidevice": cb_pcidevice.ResourcePcidevice(),
|
||||||
"decort_cb_vins": cb_vins.ResourceVins(),
|
"decort_cb_sep": cb_sep.ResourceSep(),
|
||||||
"decort_cb_k8ci": cb_k8ci.ResourceK8CI(),
|
"decort_cb_sep_config": cb_sep.ResourceSepConfig(),
|
||||||
"decort_cb_k8s_cp": cb_k8s.ResourceK8sCP(),
|
"decort_cb_kvmvm": cb_kvmvm.ResourceCompute(),
|
||||||
"decort_cb_k8s_wg": cb_k8s.ResourceK8sWg(),
|
"decort_cb_vins": cb_vins.ResourceVins(),
|
||||||
"decort_cb_vins_static_route": cb_vins.ResourceStaticRoute(),
|
"decort_cb_k8ci": cb_k8ci.ResourceK8CI(),
|
||||||
"decort_cb_flipgroup": cb_flipgroup.ResourceFlipgroup(),
|
"decort_cb_k8s_cp": cb_k8s.ResourceK8sCP(),
|
||||||
"decort_cb_lb": cb_lb.ResourceLB(),
|
"decort_cb_k8s_wg": cb_k8s.ResourceK8sWg(),
|
||||||
"decort_cb_lb_backend": cb_lb.ResourceLBBackend(),
|
"decort_cb_vins_static_route": cb_vins.ResourceStaticRoute(),
|
||||||
"decort_cb_lb_backend_server": cb_lb.ResourceLBBackendServer(),
|
"decort_cb_flipgroup": cb_flipgroup.ResourceFlipgroup(),
|
||||||
"decort_cb_lb_frontend": cb_lb.ResourceLBFrontend(),
|
"decort_cb_lb": cb_lb.ResourceLB(),
|
||||||
"decort_cb_lb_frontend_bind": cb_lb.ResourceLBFrontendBind(),
|
"decort_cb_lb_backend": cb_lb.ResourceLBBackend(),
|
||||||
"decort_cb_rg": cb_rg.ResourceResgroup(),
|
"decort_cb_lb_backend_server": cb_lb.ResourceLBBackendServer(),
|
||||||
|
"decort_cb_lb_frontend": cb_lb.ResourceLBFrontend(),
|
||||||
|
"decort_cb_lb_frontend_bind": cb_lb.ResourceLBFrontendBind(),
|
||||||
|
"decort_cb_rg": cb_rg.ResourceResgroup(),
|
||||||
|
"decort_cb_user": cb_user.ResourceUser(),
|
||||||
|
"decort_cb_vfpool": cb_vfpool.ResourceVFPool(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -307,6 +307,13 @@ func dataSourceAccountSchemaMake() map[string]*schema.Schema {
|
|||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
"compute_features": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
},
|
||||||
|
},
|
||||||
"created_by": {
|
"created_by": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
|||||||
@@ -135,6 +135,11 @@ func dataSourceAccountComputesListSchemaMake() map[string]*schema.Schema {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "Filter by extnet ID",
|
Description: "Filter by extnet ID",
|
||||||
},
|
},
|
||||||
|
"sort_by": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "sort by one of supported fields, format +|-(field)",
|
||||||
|
},
|
||||||
"page": {
|
"page": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
|||||||
@@ -101,6 +101,11 @@ func dataSourceAccountDisksListSchemaMake() map[string]*schema.Schema {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "Filter by disk type",
|
Description: "Filter by disk type",
|
||||||
},
|
},
|
||||||
|
"sort_by": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "sort by one of supported fields, format +|-(field)",
|
||||||
|
},
|
||||||
"page": {
|
"page": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
|||||||
@@ -46,13 +46,14 @@ func flattenAccountList(al *account.ListAccounts) []map[string]interface{} {
|
|||||||
res := make([]map[string]interface{}, 0)
|
res := make([]map[string]interface{}, 0)
|
||||||
for _, acc := range al.Data {
|
for _, acc := range al.Data {
|
||||||
temp := map[string]interface{}{
|
temp := map[string]interface{}{
|
||||||
"acl": flattenRgAcl(acc.ACL),
|
"acl": flattenRgAcl(acc.ACL),
|
||||||
"created_time": acc.CreatedTime,
|
"compute_features": acc.ComputeFeatures,
|
||||||
"deleted_time": acc.DeletedTime,
|
"created_time": acc.CreatedTime,
|
||||||
"account_id": acc.ID,
|
"deleted_time": acc.DeletedTime,
|
||||||
"account_name": acc.Name,
|
"account_id": acc.ID,
|
||||||
"status": acc.Status,
|
"account_name": acc.Name,
|
||||||
"updated_time": acc.UpdatedTime,
|
"status": acc.Status,
|
||||||
|
"updated_time": acc.UpdatedTime,
|
||||||
}
|
}
|
||||||
res = append(res, temp)
|
res = append(res, temp)
|
||||||
}
|
}
|
||||||
@@ -112,6 +113,11 @@ func dataSourceAccountListSchemaMake() map[string]*schema.Schema {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "Filter by status",
|
Description: "Filter by status",
|
||||||
},
|
},
|
||||||
|
"sort_by": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "sort by one of supported fields, format +|-(field)",
|
||||||
|
},
|
||||||
"page": {
|
"page": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
@@ -159,6 +165,13 @@ func dataSourceAccountListSchemaMake() map[string]*schema.Schema {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"compute_features": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
},
|
||||||
|
},
|
||||||
"created_time": {
|
"created_time": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
|||||||
@@ -138,6 +138,11 @@ func dataSourceAccountRGListSchemaMake() map[string]*schema.Schema {
|
|||||||
Required: true,
|
Required: true,
|
||||||
Description: "ID of the account",
|
Description: "ID of the account",
|
||||||
},
|
},
|
||||||
|
"sort_by": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "sort by one of supported fields, format +|-(field)",
|
||||||
|
},
|
||||||
"page": {
|
"page": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
|||||||
@@ -103,6 +103,11 @@ func dataSourceAccountTemplatesListSchemaMake() map[string]*schema.Schema {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "Filter by type",
|
Description: "Filter by type",
|
||||||
},
|
},
|
||||||
|
"sort_by": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "sort by one of supported fields, format +|-(field)",
|
||||||
|
},
|
||||||
"page": {
|
"page": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
|||||||
@@ -54,6 +54,8 @@ func flattenAccountVinsList(avl *account.ListVINS) []map[string]interface{} {
|
|||||||
"deleted_by": av.DeletedBy,
|
"deleted_by": av.DeletedBy,
|
||||||
"deleted_time": av.DeletedTime,
|
"deleted_time": av.DeletedTime,
|
||||||
"external_ip": av.ExternalIP,
|
"external_ip": av.ExternalIP,
|
||||||
|
"extnet_id": av.ExtnetId,
|
||||||
|
"free_ips": av.FreeIPs,
|
||||||
"vin_id": av.ID,
|
"vin_id": av.ID,
|
||||||
"vin_name": av.Name,
|
"vin_name": av.Name,
|
||||||
"network": av.Network,
|
"network": av.Network,
|
||||||
@@ -112,6 +114,11 @@ func dataSourceAccountVinsListSchemaMake() map[string]*schema.Schema {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "Filter by external IP",
|
Description: "Filter by external IP",
|
||||||
},
|
},
|
||||||
|
"sort_by": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "sort by one of supported fields, format +|-(field)",
|
||||||
|
},
|
||||||
"page": {
|
"page": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
@@ -160,6 +167,14 @@ func dataSourceAccountVinsListSchemaMake() map[string]*schema.Schema {
|
|||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
"extnet_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"free_ips": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
"vin_id": {
|
"vin_id": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ func flattenAccount(d *schema.ResourceData, acc account.RecordAccount) error {
|
|||||||
d.Set("acl", flattenAccAcl(acc.ACL))
|
d.Set("acl", flattenAccAcl(acc.ACL))
|
||||||
d.Set("company", acc.Company)
|
d.Set("company", acc.Company)
|
||||||
d.Set("companyurl", acc.CompanyURL)
|
d.Set("companyurl", acc.CompanyURL)
|
||||||
|
d.Set("compute_features", acc.ComputeFeatures)
|
||||||
d.Set("created_by", acc.CreatedBy)
|
d.Set("created_by", acc.CreatedBy)
|
||||||
d.Set("created_time", acc.CreatedTime)
|
d.Set("created_time", acc.CreatedTime)
|
||||||
d.Set("deactivation_time", acc.DeactivationTime)
|
d.Set("deactivation_time", acc.DeactivationTime)
|
||||||
|
|||||||
@@ -279,19 +279,6 @@ func resourceAccountUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.HasChange("restore") {
|
|
||||||
restore := d.Get("restore").(bool)
|
|
||||||
if restore && acc.Status == "DELETED" {
|
|
||||||
req := account.RestoreRequest{
|
|
||||||
AccountID: accountId,
|
|
||||||
}
|
|
||||||
_, err := c.CloudAPI().Account().Restore(ctx, req)
|
|
||||||
if err != nil {
|
|
||||||
return diag.FromErr(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.HasChange("users") {
|
if d.HasChange("users") {
|
||||||
deletedUsers := make([]interface{}, 0)
|
deletedUsers := make([]interface{}, 0)
|
||||||
addedUsers := make([]interface{}, 0)
|
addedUsers := make([]interface{}, 0)
|
||||||
@@ -542,6 +529,13 @@ func resourceAccountSchemaMake() map[string]*schema.Schema {
|
|||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
"compute_features": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
},
|
||||||
|
},
|
||||||
"created_by": {
|
"created_by": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
|||||||
@@ -84,6 +84,10 @@ func utilityAccountComputesListCheckPresence(ctx context.Context, d *schema.Reso
|
|||||||
req.ExtNetID = uint64(extnet_id.(int))
|
req.ExtNetID = uint64(extnet_id.(int))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||||
|
req.SortBy = sortBy.(string)
|
||||||
|
}
|
||||||
|
|
||||||
if page, ok := d.GetOk("page"); ok {
|
if page, ok := d.GetOk("page"); ok {
|
||||||
req.Page = uint64(page.(int))
|
req.Page = uint64(page.(int))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -67,6 +67,10 @@ func utilityAccountDeletedListCheckPresence(ctx context.Context, d *schema.Resou
|
|||||||
req.Name = name.(string)
|
req.Name = name.(string)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||||
|
req.SortBy = sortBy.(string)
|
||||||
|
}
|
||||||
|
|
||||||
log.Debugf("utilityAccountDeletedListCheckPresence: load")
|
log.Debugf("utilityAccountDeletedListCheckPresence: load")
|
||||||
accountDeletedList, err := c.CloudAPI().Account().ListDeleted(ctx, req)
|
accountDeletedList, err := c.CloudAPI().Account().ListDeleted(ctx, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -67,6 +67,10 @@ func utilityAccountDisksListCheckPresence(ctx context.Context, d *schema.Resourc
|
|||||||
req.Type = typeVal.(string)
|
req.Type = typeVal.(string)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||||
|
req.SortBy = sortBy.(string)
|
||||||
|
}
|
||||||
|
|
||||||
if page, ok := d.GetOk("page"); ok {
|
if page, ok := d.GetOk("page"); ok {
|
||||||
req.Page = uint64(page.(int))
|
req.Page = uint64(page.(int))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -69,6 +69,10 @@ func utilityAccountListCheckPresence(ctx context.Context, d *schema.ResourceData
|
|||||||
req.Status = status.(string)
|
req.Status = status.(string)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||||
|
req.SortBy = sortBy.(string)
|
||||||
|
}
|
||||||
|
|
||||||
log.Debugf("utilityAccountListCheckPresence: load account list")
|
log.Debugf("utilityAccountListCheckPresence: load account list")
|
||||||
accountList, err := c.CloudAPI().Account().List(ctx, req)
|
accountList, err := c.CloudAPI().Account().List(ctx, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -80,6 +80,10 @@ func utilityAccountRGListCheckPresence(ctx context.Context, d *schema.ResourceDa
|
|||||||
req.Status = status.(string)
|
req.Status = status.(string)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||||
|
req.SortBy = sortBy.(string)
|
||||||
|
}
|
||||||
|
|
||||||
log.Debugf("utilityAccountRGListCheckPresence: load account list")
|
log.Debugf("utilityAccountRGListCheckPresence: load account list")
|
||||||
accountRGList, err := c.CloudAPI().Account().ListRG(ctx, req)
|
accountRGList, err := c.CloudAPI().Account().ListRG(ctx, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -63,6 +63,9 @@ func utilityAccountTemplatesListCheckPresence(ctx context.Context, d *schema.Res
|
|||||||
if typeTemplates, ok := d.GetOk("type"); ok {
|
if typeTemplates, ok := d.GetOk("type"); ok {
|
||||||
req.Type = typeTemplates.(string)
|
req.Type = typeTemplates.(string)
|
||||||
}
|
}
|
||||||
|
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||||
|
req.SortBy = sortBy.(string)
|
||||||
|
}
|
||||||
if page, ok := d.GetOk("page"); ok {
|
if page, ok := d.GetOk("page"); ok {
|
||||||
req.Page = uint64(page.(int))
|
req.Page = uint64(page.(int))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -60,6 +60,10 @@ func utilityAccountVinsListCheckPresence(ctx context.Context, d *schema.Resource
|
|||||||
req.RGID = uint64(rg_id.(int))
|
req.RGID = uint64(rg_id.(int))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||||
|
req.SortBy = sortBy.(string)
|
||||||
|
}
|
||||||
|
|
||||||
if page, ok := d.GetOk("page"); ok {
|
if page, ok := d.GetOk("page"); ok {
|
||||||
req.Page = uint64(page.(int))
|
req.Page = uint64(page.(int))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -131,6 +131,11 @@ func dataSourceBasicServiceListSchemaMake() map[string]*schema.Schema {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "ID of the resource group to query for BasicService instances",
|
Description: "ID of the resource group to query for BasicService instances",
|
||||||
},
|
},
|
||||||
|
"sort_by": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "sort by one of supported fields, format +|-(field)",
|
||||||
|
},
|
||||||
"page": {
|
"page": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ package bservice
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"errors"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||||
@@ -85,37 +85,38 @@ func resourceBasicServiceCreate(ctx context.Context, d *schema.ResourceData, m i
|
|||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
warn := dc.Warnings{}
|
warnings := dc.Warnings{}
|
||||||
|
enable := d.Get("enable").(bool)
|
||||||
|
|
||||||
if !d.Get("enable").(bool) && d.Get("start").(bool) {
|
if enable && (service.Status == status.Disabled || service.Status == status.Created) {
|
||||||
|
|
||||||
warn.Add(fmt.Errorf("the basic service is in tech_status %s, troubles can occur with the usage. Please, enable bservice first", service.TechStatus))
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.Get("enable").(bool) && (service.Status == status.Disabled || service.Status == status.Created) {
|
|
||||||
log.Debugf("trying to enable bservice %v", serviceId)
|
log.Debugf("trying to enable bservice %v", serviceId)
|
||||||
_, err := c.CloudAPI().BService().Enable(ctx, bservice.EnableRequest{
|
_, err := c.CloudAPI().BService().Enable(ctx, bservice.EnableRequest{
|
||||||
ServiceID: serviceId,
|
ServiceID: serviceId,
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
warnings.Add(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
if d.Get("start").(bool) && d.Get("enable").(bool) {
|
if d.Get("start").(bool) {
|
||||||
|
|
||||||
log.Debugf("trying to start bservice %v", serviceId)
|
log.Debugf("trying to start bservice %v", serviceId)
|
||||||
_, err = c.CloudAPI().BService().Start(ctx, bservice.StartRequest{
|
|
||||||
ServiceID: serviceId,
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
if !enable {
|
||||||
return diag.FromErr(err)
|
warnings.Add(errors.New("can not start bservice that is not enabled. Set enable = true and start = true to enable and start bservice"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if enable {
|
||||||
|
_, err := c.CloudAPI().BService().Start(ctx, bservice.StartRequest{
|
||||||
|
ServiceID: serviceId,
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
warnings.Add(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return append(warn.Get(), resourceBasicServiceRead(ctx, d, m)...)
|
return append(warnings.Get(), resourceBasicServiceRead(ctx, d, m)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceBasicServiceRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
func resourceBasicServiceRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
@@ -241,44 +242,35 @@ func resourceBasicServiceUpdate(ctx context.Context, d *schema.ResourceData, m i
|
|||||||
case status.Disabling:
|
case status.Disabling:
|
||||||
log.Debugf("The basic service is in status: %s, troubles can occur with the update.", bs.Status)
|
log.Debugf("The basic service is in status: %s, troubles can occur with the update.", bs.Status)
|
||||||
case status.Deleted:
|
case status.Deleted:
|
||||||
id, _ := strconv.ParseUint(d.Id(), 10, 64)
|
if d.Get("restore").(bool) {
|
||||||
|
restoreReq := bservice.RestoreRequest{
|
||||||
if restore, ok := d.GetOk("restore"); ok {
|
ServiceID: bs.ID,
|
||||||
if restore.(bool) {
|
}
|
||||||
restoreReq := bservice.RestoreRequest{
|
_, err := c.CloudAPI().BService().Restore(ctx, restoreReq)
|
||||||
ServiceID: id,
|
if err != nil {
|
||||||
}
|
return diag.FromErr(err)
|
||||||
_, err := c.CloudAPI().BService().Restore(ctx, restoreReq)
|
|
||||||
if err != nil {
|
|
||||||
return diag.FromErr(err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
hasChanged = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if enable, ok := d.GetOk("enable"); ok {
|
hasChanged = true
|
||||||
if enable.(bool) {
|
|
||||||
|
if d.Get("enable").(bool) {
|
||||||
enableReq := bservice.EnableRequest{
|
enableReq := bservice.EnableRequest{
|
||||||
ServiceID: id,
|
ServiceID: bs.ID,
|
||||||
}
|
}
|
||||||
_, err = c.CloudAPI().BService().Enable(ctx, enableReq)
|
_, err = c.CloudAPI().BService().Enable(ctx, enableReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
hasChanged = true
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if start, ok := d.GetOk("start"); ok {
|
if d.Get("start").(bool) {
|
||||||
if start.(bool) {
|
|
||||||
startReq := bservice.StartRequest{
|
startReq := bservice.StartRequest{
|
||||||
ServiceID: id,
|
ServiceID: bs.ID,
|
||||||
}
|
}
|
||||||
_, err = c.CloudAPI().BService().Start(ctx, startReq)
|
_, err = c.CloudAPI().BService().Start(ctx, startReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
hasChanged = true
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case status.Deleting:
|
case status.Deleting:
|
||||||
@@ -301,8 +293,7 @@ func resourceBasicServiceUpdate(ctx context.Context, d *schema.ResourceData, m i
|
|||||||
}
|
}
|
||||||
|
|
||||||
if d.HasChange("enable") {
|
if d.HasChange("enable") {
|
||||||
enable := d.Get("enable").(bool)
|
if d.Get("enable").(bool) {
|
||||||
if enable {
|
|
||||||
req := bservice.EnableRequest{
|
req := bservice.EnableRequest{
|
||||||
ServiceID: uint64(d.Get("service_id").(int)),
|
ServiceID: uint64(d.Get("service_id").(int)),
|
||||||
}
|
}
|
||||||
@@ -324,8 +315,7 @@ func resourceBasicServiceUpdate(ctx context.Context, d *schema.ResourceData, m i
|
|||||||
}
|
}
|
||||||
|
|
||||||
if d.HasChange("start") {
|
if d.HasChange("start") {
|
||||||
start := d.Get("start").(bool)
|
if d.Get("start").(bool) {
|
||||||
if start {
|
|
||||||
req := bservice.StartRequest{
|
req := bservice.StartRequest{
|
||||||
ServiceID: uint64(d.Get("service_id").(int)),
|
ServiceID: uint64(d.Get("service_id").(int)),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -43,6 +43,7 @@ import (
|
|||||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/bservice"
|
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/bservice"
|
||||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
|
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
|
||||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
|
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
|
||||||
|
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/validators"
|
||||||
)
|
)
|
||||||
|
|
||||||
func resourceBasicServiceGroupCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
func resourceBasicServiceGroupCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
@@ -372,8 +373,12 @@ func resourceBasicServiceGroupSchemaMake() map[string]*schema.Schema {
|
|||||||
Description: "compute CPU number. All computes in the group have the same CPU count",
|
Description: "compute CPU number. All computes in the group have the same CPU count",
|
||||||
},
|
},
|
||||||
"ram": {
|
"ram": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Required: true,
|
Required: true,
|
||||||
|
ValidateFunc: validation.All(
|
||||||
|
validation.IntAtLeast(constants.MIN_RAM_PER_COMPUTE),
|
||||||
|
validators.DivisibleBy(constants.RAM_DIVISIBILITY),
|
||||||
|
),
|
||||||
Description: "compute RAM volume in MB. All computes in the group have the same RAM volume",
|
Description: "compute RAM volume in MB. All computes in the group have the same RAM volume",
|
||||||
},
|
},
|
||||||
"disk": {
|
"disk": {
|
||||||
|
|||||||
@@ -52,6 +52,10 @@ func utilityBasicServiceDeletedListCheckPresence(ctx context.Context, d *schema.
|
|||||||
if rgId, ok := d.GetOk("rg_id"); ok {
|
if rgId, ok := d.GetOk("rg_id"); ok {
|
||||||
req.RGID = uint64(rgId.(int))
|
req.RGID = uint64(rgId.(int))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||||
|
req.SortBy = sortBy.(string)
|
||||||
|
}
|
||||||
if page, ok := d.GetOk("page"); ok {
|
if page, ok := d.GetOk("page"); ok {
|
||||||
req.Page = uint64(page.(int))
|
req.Page = uint64(page.(int))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -52,6 +52,9 @@ func utilityBasicServiceListCheckPresence(ctx context.Context, d *schema.Resourc
|
|||||||
if rgId, ok := d.GetOk("rg_id"); ok {
|
if rgId, ok := d.GetOk("rg_id"); ok {
|
||||||
req.RGID = uint64(rgId.(int))
|
req.RGID = uint64(rgId.(int))
|
||||||
}
|
}
|
||||||
|
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||||
|
req.SortBy = sortBy.(string)
|
||||||
|
}
|
||||||
if page, ok := d.GetOk("page"); ok {
|
if page, ok := d.GetOk("page"); ok {
|
||||||
req.Page = uint64(page.(int))
|
req.Page = uint64(page.(int))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -294,6 +294,39 @@ func dataSourceDiskSchemaMake() map[string]*schema.Schema {
|
|||||||
Computed: true,
|
Computed: true,
|
||||||
Description: "Time of the last deletion attempt",
|
Description: "Time of the last deletion attempt",
|
||||||
},
|
},
|
||||||
|
"replication": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"disk_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"pool_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"role": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"self_volume_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"storage_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"volume_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Description: "Replication status",
|
||||||
|
},
|
||||||
// "reality_device_number": {
|
// "reality_device_number": {
|
||||||
// Type: schema.TypeInt,
|
// Type: schema.TypeInt,
|
||||||
// Computed: true,
|
// Computed: true,
|
||||||
|
|||||||
@@ -108,6 +108,11 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "find by pool name",
|
Description: "find by pool name",
|
||||||
},
|
},
|
||||||
|
"sort_by": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "sort by one of supported fields, format +|-(field)",
|
||||||
|
},
|
||||||
"page": {
|
"page": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
@@ -376,6 +381,39 @@ func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
|
|||||||
// Computed: true,
|
// Computed: true,
|
||||||
// Description: "ID of the reference to the disk",
|
// Description: "ID of the reference to the disk",
|
||||||
// },
|
// },
|
||||||
|
"replication": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"disk_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"pool_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"role": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"self_volume_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"storage_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"volume_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Description: "Replication status",
|
||||||
|
},
|
||||||
"res_id": {
|
"res_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
|||||||
@@ -57,6 +57,11 @@ func dataSourceDiskListTypesRead(ctx context.Context, d *schema.ResourceData, m
|
|||||||
|
|
||||||
func dataSourceDiskListTypesSchemaMake() map[string]*schema.Schema {
|
func dataSourceDiskListTypesSchemaMake() map[string]*schema.Schema {
|
||||||
res := map[string]*schema.Schema{
|
res := map[string]*schema.Schema{
|
||||||
|
"sort_by": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "sort by one of supported fields, format +|-(field)",
|
||||||
|
},
|
||||||
"page": {
|
"page": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
|||||||
@@ -87,6 +87,11 @@ func dataSourceDiskListTypesDetailedRead(ctx context.Context, d *schema.Resource
|
|||||||
|
|
||||||
func dataSourceDiskListTypesDetailedSchemaMake() map[string]*schema.Schema {
|
func dataSourceDiskListTypesDetailedSchemaMake() map[string]*schema.Schema {
|
||||||
res := map[string]*schema.Schema{
|
res := map[string]*schema.Schema{
|
||||||
|
"sort_by": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "sort by one of supported fields, format +|-(field)",
|
||||||
|
},
|
||||||
"page": {
|
"page": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
|||||||
@@ -113,6 +113,11 @@ func dataSourceDiskListUnattachedSchemaMake() map[string]*schema.Schema {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "type of the disks",
|
Description: "type of the disks",
|
||||||
},
|
},
|
||||||
|
"sort_by": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "sort by one of supported fields, format +|-(field)",
|
||||||
|
},
|
||||||
"page": {
|
"page": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
|||||||
480
internal/service/cloudapi/disks/data_source_disk_replication.go
Normal file
480
internal/service/cloudapi/disks/data_source_disk_replication.go
Normal file
@@ -0,0 +1,480 @@
|
|||||||
|
/*
|
||||||
|
Copyright (c) 2019-2024 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||||
|
Authors:
|
||||||
|
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||||
|
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||||
|
Orchestration Technology) with Terraform by Hashicorp.
|
||||||
|
|
||||||
|
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
|
||||||
|
|
||||||
|
Please see README.md to learn where to place source code so that it
|
||||||
|
builds seamlessly.
|
||||||
|
|
||||||
|
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
|
||||||
|
*/
|
||||||
|
|
||||||
|
package disks
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
|
||||||
|
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
|
||||||
|
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||||
|
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func dataSourceDiskReplicationRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
|
log.Debugf("dataSourceDiskReplicationRead: called for disk with ID: %s", d.Id())
|
||||||
|
c := m.(*controller.ControllerCfg)
|
||||||
|
|
||||||
|
req := disks.ReplicationStatusRequest{
|
||||||
|
DiskID: uint64(d.Get("disk_id").(int)),
|
||||||
|
}
|
||||||
|
|
||||||
|
status, err := c.CloudAPI().Disks().ReplicationStatus(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
d.SetId("")
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
id := uuid.New()
|
||||||
|
d.SetId(id.String())
|
||||||
|
|
||||||
|
disk, err := utilityDiskReplicaCheckPresence(ctx, d, m)
|
||||||
|
if err != nil {
|
||||||
|
d.SetId("")
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
flattenDiskReplica(d, disk, status)
|
||||||
|
|
||||||
|
log.Debugf("dataSourceDiskReplicationRead: read complete for disk with ID: %s", d.Id())
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func dataSourceDiskReplicationSchemaMake() map[string]*schema.Schema {
|
||||||
|
rets := map[string]*schema.Schema{
|
||||||
|
"disk_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Required: true,
|
||||||
|
Description: "Id of primary disk",
|
||||||
|
},
|
||||||
|
"replica_disk_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Required: true,
|
||||||
|
Description: "Id of secondary disk",
|
||||||
|
},
|
||||||
|
"status_replication": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Status of replication",
|
||||||
|
},
|
||||||
|
"account_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "The unique ID of the subscriber-owner of the disk",
|
||||||
|
},
|
||||||
|
"account_name": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "The name of the subscriber '(account') to whom this disk belongs",
|
||||||
|
},
|
||||||
|
"acl": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
// "boot_partition": {
|
||||||
|
// Type: schema.TypeInt,
|
||||||
|
// Computed: true,
|
||||||
|
// Description: "Number of disk partitions",
|
||||||
|
// },
|
||||||
|
"computes": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"compute_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"compute_name": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"created_time": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Created time",
|
||||||
|
},
|
||||||
|
"deleted_time": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Deleted time",
|
||||||
|
},
|
||||||
|
"desc": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Description of disk",
|
||||||
|
},
|
||||||
|
"destruction_time": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Time of final deletion",
|
||||||
|
},
|
||||||
|
"devicename": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Name of the device",
|
||||||
|
},
|
||||||
|
// "disk_path": {
|
||||||
|
// Type: schema.TypeString,
|
||||||
|
// Computed: true,
|
||||||
|
// Description: "Disk path",
|
||||||
|
// },
|
||||||
|
"gid": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "ID of the grid (platform)",
|
||||||
|
},
|
||||||
|
// "guid": {
|
||||||
|
// Type: schema.TypeInt,
|
||||||
|
// Computed: true,
|
||||||
|
// Description: "Disk ID on the storage side",
|
||||||
|
// },
|
||||||
|
"image_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Image ID",
|
||||||
|
},
|
||||||
|
"images": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
},
|
||||||
|
Description: "IDs of images using the disk",
|
||||||
|
},
|
||||||
|
"iotune": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"read_bytes_sec": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Number of bytes to read per second",
|
||||||
|
},
|
||||||
|
"read_bytes_sec_max": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Maximum number of bytes to read",
|
||||||
|
},
|
||||||
|
"read_iops_sec": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Number of io read operations per second",
|
||||||
|
},
|
||||||
|
"read_iops_sec_max": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Maximum number of io read operations",
|
||||||
|
},
|
||||||
|
"size_iops_sec": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Size of io operations",
|
||||||
|
},
|
||||||
|
"total_bytes_sec": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Total size bytes per second",
|
||||||
|
},
|
||||||
|
"total_bytes_sec_max": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Maximum total size of bytes per second",
|
||||||
|
},
|
||||||
|
"total_iops_sec": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Total number of io operations per second",
|
||||||
|
},
|
||||||
|
"total_iops_sec_max": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Maximum total number of io operations per second",
|
||||||
|
},
|
||||||
|
"write_bytes_sec": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Number of bytes to write per second",
|
||||||
|
},
|
||||||
|
"write_bytes_sec_max": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Maximum number of bytes to write per second",
|
||||||
|
},
|
||||||
|
"write_iops_sec": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Number of write operations per second",
|
||||||
|
},
|
||||||
|
"write_iops_sec_max": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Maximum number of write operations per second",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// "iqn": {
|
||||||
|
// Type: schema.TypeString,
|
||||||
|
// Computed: true,
|
||||||
|
// Description: "Disk IQN",
|
||||||
|
// },
|
||||||
|
// "login": {
|
||||||
|
// Type: schema.TypeString,
|
||||||
|
// Computed: true,
|
||||||
|
// Description: "Login to access the disk",
|
||||||
|
// },
|
||||||
|
// "milestones": {
|
||||||
|
// Type: schema.TypeInt,
|
||||||
|
// Computed: true,
|
||||||
|
// Description: "Milestones",
|
||||||
|
// },
|
||||||
|
"disk_name": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Name of disk",
|
||||||
|
},
|
||||||
|
"order": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Disk order",
|
||||||
|
},
|
||||||
|
"params": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Disk params",
|
||||||
|
},
|
||||||
|
"parent_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "ID of the parent disk",
|
||||||
|
},
|
||||||
|
// "passwd": {
|
||||||
|
// Type: schema.TypeString,
|
||||||
|
// Computed: true,
|
||||||
|
// Description: "Password to access the disk",
|
||||||
|
// },
|
||||||
|
"pci_slot": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "ID of the pci slot to which the disk is connected",
|
||||||
|
},
|
||||||
|
"pool": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Pool for disk location",
|
||||||
|
},
|
||||||
|
"present_to": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// "purge_attempts": {
|
||||||
|
// Type: schema.TypeInt,
|
||||||
|
// Computed: true,
|
||||||
|
// Description: "Number of deletion attempts",
|
||||||
|
// },
|
||||||
|
"purge_time": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Time of the last deletion attempt",
|
||||||
|
},
|
||||||
|
"replication": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"disk_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"pool_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"role": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"self_volume_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"storage_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"volume_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Description: "Replication status",
|
||||||
|
},
|
||||||
|
// "reality_device_number": {
|
||||||
|
// Type: schema.TypeInt,
|
||||||
|
// Computed: true,
|
||||||
|
// Description: "Reality device number",
|
||||||
|
// },
|
||||||
|
// "reference_id": {
|
||||||
|
// Type: schema.TypeString,
|
||||||
|
// Computed: true,
|
||||||
|
// Description: "ID of the reference to the disk",
|
||||||
|
// },
|
||||||
|
"res_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Resource ID",
|
||||||
|
},
|
||||||
|
"res_name": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Name of the resource",
|
||||||
|
},
|
||||||
|
"role": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Disk role",
|
||||||
|
},
|
||||||
|
"sep_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Storage endpoint provider ID to create disk",
|
||||||
|
},
|
||||||
|
"sep_type": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Type SEP. Defines the type of storage system and contains one of the values set in the cloud platform",
|
||||||
|
},
|
||||||
|
"shareable": {
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"size_max": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Size in GB",
|
||||||
|
},
|
||||||
|
"size_used": {
|
||||||
|
Type: schema.TypeFloat,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Number of used space, in GB",
|
||||||
|
},
|
||||||
|
"snapshots": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"guid": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "ID of the snapshot",
|
||||||
|
},
|
||||||
|
"label": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Name of the snapshot",
|
||||||
|
},
|
||||||
|
"res_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Reference to the snapshot",
|
||||||
|
},
|
||||||
|
"snap_set_guid": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "The set snapshot ID",
|
||||||
|
},
|
||||||
|
"snap_set_time": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "The set time of the snapshot",
|
||||||
|
},
|
||||||
|
"timestamp": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Snapshot time",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Disk status",
|
||||||
|
},
|
||||||
|
"tech_status": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Technical status of the disk",
|
||||||
|
},
|
||||||
|
"type": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'",
|
||||||
|
},
|
||||||
|
"vmid": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Virtual Machine ID (Deprecated)",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return rets
|
||||||
|
}
|
||||||
|
|
||||||
|
func DataSourceDiskReplication() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
SchemaVersion: 1,
|
||||||
|
|
||||||
|
ReadContext: dataSourceDiskReplicationRead,
|
||||||
|
|
||||||
|
Timeouts: &schema.ResourceTimeout{
|
||||||
|
Read: &constants.Timeout30s,
|
||||||
|
Default: &constants.Timeout60s,
|
||||||
|
},
|
||||||
|
|
||||||
|
Schema: dataSourceDiskReplicationSchemaMake(),
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -94,6 +94,11 @@ func dataSourceDiskDeletedListSchemaMake() map[string]*schema.Schema {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "type of the disks",
|
Description: "type of the disks",
|
||||||
},
|
},
|
||||||
|
"sort_by": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "sort by one of supported fields, format +|-(field)",
|
||||||
|
},
|
||||||
"page": {
|
"page": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
@@ -352,6 +357,39 @@ func dataSourceDiskDeletedListSchemaMake() map[string]*schema.Schema {
|
|||||||
Computed: true,
|
Computed: true,
|
||||||
Description: "Time of the last deletion attempt",
|
Description: "Time of the last deletion attempt",
|
||||||
},
|
},
|
||||||
|
"replication": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"disk_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"pool_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"role": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"self_volume_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"storage_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"volume_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Description: "Replication status",
|
||||||
|
},
|
||||||
// "reality_device_number": {
|
// "reality_device_number": {
|
||||||
// Type: schema.TypeInt,
|
// Type: schema.TypeInt,
|
||||||
// Computed: true,
|
// Computed: true,
|
||||||
|
|||||||
@@ -72,7 +72,6 @@ func flattenDiskListUnattached(ul *disks.ListDisksUnattached) []map[string]inter
|
|||||||
|
|
||||||
func flattenDisk(d *schema.ResourceData, disk *disks.RecordDisk) {
|
func flattenDisk(d *schema.ResourceData, disk *disks.RecordDisk) {
|
||||||
diskAcl, _ := json.Marshal(disk.ACL)
|
diskAcl, _ := json.Marshal(disk.ACL)
|
||||||
|
|
||||||
d.Set("account_id", disk.AccountID)
|
d.Set("account_id", disk.AccountID)
|
||||||
d.Set("account_name", disk.AccountName)
|
d.Set("account_name", disk.AccountName)
|
||||||
d.Set("acl", string(diskAcl))
|
d.Set("acl", string(diskAcl))
|
||||||
@@ -103,6 +102,7 @@ func flattenDisk(d *schema.ResourceData, disk *disks.RecordDisk) {
|
|||||||
d.Set("present_to", disk.PresentTo)
|
d.Set("present_to", disk.PresentTo)
|
||||||
// d.Set("purge_attempts", disk.PurgeAttempts)
|
// d.Set("purge_attempts", disk.PurgeAttempts)
|
||||||
d.Set("purge_time", disk.PurgeTime)
|
d.Set("purge_time", disk.PurgeTime)
|
||||||
|
d.Set("replication", flattenDiskReplication(disk.Replication))
|
||||||
// d.Set("reality_device_number", disk.RealityDeviceNumber)
|
// d.Set("reality_device_number", disk.RealityDeviceNumber)
|
||||||
// d.Set("reference_id", disk.ReferenceID)
|
// d.Set("reference_id", disk.ReferenceID)
|
||||||
d.Set("res_id", disk.ResID)
|
d.Set("res_id", disk.ResID)
|
||||||
@@ -120,6 +120,71 @@ func flattenDisk(d *schema.ResourceData, disk *disks.RecordDisk) {
|
|||||||
d.Set("vmid", disk.VMID)
|
d.Set("vmid", disk.VMID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func flattenDiskReplication(rep disks.ItemReplication) []map[string]interface{} {
|
||||||
|
res := []map[string]interface{}{
|
||||||
|
{
|
||||||
|
"disk_id": rep.DiskID,
|
||||||
|
"pool_id": rep.PoolID,
|
||||||
|
"role": rep.Role,
|
||||||
|
"self_volume_id": rep.SelfVolumeID,
|
||||||
|
"storage_id": rep.StorageID,
|
||||||
|
"volume_id": rep.VolumeID,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func flattenDiskReplica(d *schema.ResourceData, disk *disks.RecordDisk, statusReplication string) {
|
||||||
|
diskAcl, _ := json.Marshal(disk.ACL)
|
||||||
|
d.Set("account_id", disk.AccountID)
|
||||||
|
d.Set("account_name", disk.AccountName)
|
||||||
|
d.Set("acl", string(diskAcl))
|
||||||
|
// d.Set("boot_partition", disk.BootPartition)
|
||||||
|
d.Set("computes", flattenDiskComputes(disk.Computes))
|
||||||
|
d.Set("created_time", disk.CreatedTime)
|
||||||
|
d.Set("deleted_time", disk.DeletedTime)
|
||||||
|
d.Set("desc", disk.Description)
|
||||||
|
d.Set("destruction_time", disk.DestructionTime)
|
||||||
|
d.Set("devicename", disk.DeviceName)
|
||||||
|
// d.Set("disk_path", disk.DiskPath)
|
||||||
|
d.Set("gid", disk.GID)
|
||||||
|
// d.Set("guid", disk.GUID)
|
||||||
|
d.Set("replica_disk_id", disk.ID)
|
||||||
|
d.Set("image_id", disk.ImageID)
|
||||||
|
d.Set("images", disk.Images)
|
||||||
|
d.Set("iotune", flattenIOTune(disk.IOTune))
|
||||||
|
// d.Set("iqn", disk.IQN)
|
||||||
|
// d.Set("login", disk.Login)
|
||||||
|
// d.Set("milestones", disk.Milestones)
|
||||||
|
d.Set("disk_name", disk.Name)
|
||||||
|
d.Set("order", disk.Order)
|
||||||
|
d.Set("params", disk.Params)
|
||||||
|
d.Set("parent_id", disk.ParentID)
|
||||||
|
// d.Set("passwd", disk.Passwd)
|
||||||
|
d.Set("pci_slot", disk.PCISlot)
|
||||||
|
d.Set("pool", disk.Pool)
|
||||||
|
d.Set("present_to", disk.PresentTo)
|
||||||
|
// d.Set("purge_attempts", disk.PurgeAttempts)
|
||||||
|
d.Set("purge_time", disk.PurgeTime)
|
||||||
|
d.Set("replication", flattenDiskReplication(disk.Replication))
|
||||||
|
// d.Set("reality_device_number", disk.RealityDeviceNumber)
|
||||||
|
// d.Set("reference_id", disk.ReferenceID)
|
||||||
|
d.Set("res_id", disk.ResID)
|
||||||
|
d.Set("res_name", disk.ResName)
|
||||||
|
d.Set("role", disk.Role)
|
||||||
|
d.Set("sep_id", disk.SepID)
|
||||||
|
d.Set("sep_type", disk.SepType)
|
||||||
|
d.Set("size_max", disk.SizeMax)
|
||||||
|
d.Set("size_used", disk.SizeUsed)
|
||||||
|
d.Set("shareable", disk.Shareable)
|
||||||
|
d.Set("snapshots", flattenDiskSnapshotList(disk.Snapshots))
|
||||||
|
d.Set("status", disk.Status)
|
||||||
|
d.Set("status_replication", statusReplication)
|
||||||
|
d.Set("tech_status", disk.TechStatus)
|
||||||
|
d.Set("type", disk.Type)
|
||||||
|
d.Set("vmid", disk.VMID)
|
||||||
|
}
|
||||||
|
|
||||||
func flattenDiskSnapshotList(sl disks.ListSnapshots) []interface{} {
|
func flattenDiskSnapshotList(sl disks.ListSnapshots) []interface{} {
|
||||||
res := make([]interface{}, 0, len(sl))
|
res := make([]interface{}, 0, len(sl))
|
||||||
for _, snapshot := range sl {
|
for _, snapshot := range sl {
|
||||||
@@ -166,6 +231,7 @@ func flattenDiskList(dl *disks.ListDisks) []map[string]interface{} {
|
|||||||
"pool": disk.Pool,
|
"pool": disk.Pool,
|
||||||
"present_to": disk.PresentTo,
|
"present_to": disk.PresentTo,
|
||||||
"purge_time": disk.PurgeTime,
|
"purge_time": disk.PurgeTime,
|
||||||
|
"replication": flattenDiskReplication(disk.Replication),
|
||||||
"res_id": disk.ResID,
|
"res_id": disk.ResID,
|
||||||
"res_name": disk.ResName,
|
"res_name": disk.ResName,
|
||||||
"role": disk.Role,
|
"role": disk.Role,
|
||||||
|
|||||||
@@ -2,9 +2,11 @@ package disks
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account"
|
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account"
|
||||||
|
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
|
||||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/locations"
|
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/locations"
|
||||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
|
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
|
||||||
)
|
)
|
||||||
@@ -34,3 +36,26 @@ func existGID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool,
|
|||||||
|
|
||||||
return len(locationList.FilterByGID(gid).Data) != 0, nil
|
return len(locationList.FilterByGID(gid).Data) != 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func existDiskID(ctx context.Context, diskId uint64, m interface{}) error {
|
||||||
|
c := m.(*controller.ControllerCfg)
|
||||||
|
|
||||||
|
req := disks.ListRequest{
|
||||||
|
ByID: diskId,
|
||||||
|
}
|
||||||
|
|
||||||
|
diskList, err := c.CloudAPI().Disks().List(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(diskList.Data) == 0 {
|
||||||
|
return fmt.Errorf("resourceDiskReplication: can't create or update Disk replication because DiskID %d is not allowed or does not exist", diskId)
|
||||||
|
}
|
||||||
|
|
||||||
|
if diskList.Data[0].SepType != "TATLIN" {
|
||||||
|
return fmt.Errorf("resourceDiskReplication: can't create or update Disk replication because DiskID %d is not TATLIN SEP Type", diskId)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -671,6 +671,39 @@ func resourceDiskSchemaMake() map[string]*schema.Schema {
|
|||||||
Computed: true,
|
Computed: true,
|
||||||
Description: "Time of the last deletion attempt",
|
Description: "Time of the last deletion attempt",
|
||||||
},
|
},
|
||||||
|
"replication": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"disk_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"pool_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"role": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"self_volume_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"storage_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"volume_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Description: "Replication status",
|
||||||
|
},
|
||||||
// "reality_device_number": {
|
// "reality_device_number": {
|
||||||
// Type: schema.TypeInt,
|
// Type: schema.TypeInt,
|
||||||
// Computed: true,
|
// Computed: true,
|
||||||
|
|||||||
642
internal/service/cloudapi/disks/resource_disk_replication.go
Normal file
642
internal/service/cloudapi/disks/resource_disk_replication.go
Normal file
@@ -0,0 +1,642 @@
|
|||||||
|
/*
|
||||||
|
Copyright (c) 2019-2024 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||||
|
Authors:
|
||||||
|
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||||
|
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||||
|
Orchestration Technology) with Terraform by Hashicorp.
|
||||||
|
|
||||||
|
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
|
||||||
|
|
||||||
|
Please see README.md to learn where to place source code so that it
|
||||||
|
builds seamlessly.
|
||||||
|
|
||||||
|
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
|
||||||
|
*/
|
||||||
|
|
||||||
|
package disks
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
|
||||||
|
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
|
||||||
|
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
|
||||||
|
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||||
|
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceDiskReplicationCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
|
diskId := uint64(d.Get("disk_id").(int))
|
||||||
|
|
||||||
|
log.Debugf("resourceDiskReplicationCreate: called for disk with ID: %d", diskId)
|
||||||
|
|
||||||
|
c := m.(*controller.ControllerCfg)
|
||||||
|
|
||||||
|
err := existDiskID(ctx, diskId, m)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
reqCreate := disks.ReplicateRequest{
|
||||||
|
DiskID: diskId,
|
||||||
|
Name: d.Get("disk_name").(string),
|
||||||
|
SepID: uint64(d.Get("sep_id").(int)),
|
||||||
|
PoolName: d.Get("pool_name").(string),
|
||||||
|
}
|
||||||
|
|
||||||
|
diskReplicaId, err := c.CloudAPI().Disks().Replicate(ctx, reqCreate)
|
||||||
|
if err != nil {
|
||||||
|
d.SetId("")
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId(strconv.FormatUint(diskReplicaId, 10))
|
||||||
|
d.Set("replica_disk_id", diskReplicaId)
|
||||||
|
|
||||||
|
log.Debugf("resourceDiskReplicationCreate: create replica complete for disk with ID: %d", diskId)
|
||||||
|
|
||||||
|
warnings := dc.Warnings{}
|
||||||
|
|
||||||
|
if start, ok := d.GetOk("start"); ok && !start.(bool) {
|
||||||
|
log.Debugf("resourceDiskReplicationCreate: replication between disk with ID: %d and replica with ID: %d, try to stop", diskId, diskReplicaId)
|
||||||
|
reqStop := disks.ReplicationStopRequest{
|
||||||
|
DiskID: diskId,
|
||||||
|
}
|
||||||
|
_, err = c.CloudAPI().Disks().ReplicationStop(ctx, reqStop)
|
||||||
|
if err != nil {
|
||||||
|
warnings.Add(err)
|
||||||
|
}
|
||||||
|
log.Debugf("resourceDiskReplicationCreate: replication between disk with ID: %d and replica with ID: %d, stoped", diskId, diskReplicaId)
|
||||||
|
}
|
||||||
|
return append(resourceDiskReplicationRead(ctx, d, m), warnings.Get()...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceDiskReplicationRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
|
log.Debugf("resourceDiskReplicationRead: called for disk with ID: %s", d.Id())
|
||||||
|
c := m.(*controller.ControllerCfg)
|
||||||
|
|
||||||
|
req := disks.ReplicationStatusRequest{
|
||||||
|
DiskID: uint64(d.Get("disk_id").(int)),
|
||||||
|
}
|
||||||
|
|
||||||
|
status, err := c.CloudAPI().Disks().ReplicationStatus(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
d.SetId("")
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
diskReplica, err := utilityDiskReplicaCheckPresence(ctx, d, m)
|
||||||
|
if err != nil {
|
||||||
|
d.SetId("")
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
flattenDiskReplica(d, diskReplica, status)
|
||||||
|
|
||||||
|
log.Debugf("resourceDiskReplicationRead: read complete for disk with ID: %s", d.Id())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceDiskReplicationUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
|
diskId := uint64(d.Get("disk_id").(int))
|
||||||
|
log.Debugf("resourceDiskReplicationUpdate: called for disk with ID: %d", diskId)
|
||||||
|
|
||||||
|
err := existDiskID(ctx, diskId, m)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.HasChange("start") {
|
||||||
|
if err := utilityDiskReplicationUpdateStartStop(ctx, d, m); err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.HasChange("pause") {
|
||||||
|
if err := utilityDiskReplicationUpdatePause(ctx, d, m); err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.HasChange("reverse") {
|
||||||
|
if err := utilityDiskReplicationUpdateReverse(ctx, d, m); err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("resourceDiskReplicationUpdate: read complete for disk with ID: %d", diskId)
|
||||||
|
return resourceDiskReplicationRead(ctx, d, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceDiskReplicationDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
|
diskId := uint64(d.Get("disk_id").(int))
|
||||||
|
log.Debugf("resourceDiskReplicationDelete: called for disk with ID: %d", diskId)
|
||||||
|
|
||||||
|
disk, err := utilityDiskReplicaCheckPresence(ctx, d, m)
|
||||||
|
if err != nil {
|
||||||
|
d.SetId("")
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c := m.(*controller.ControllerCfg)
|
||||||
|
|
||||||
|
if d.Get("start").(bool) {
|
||||||
|
reqStop := disks.ReplicationStopRequest{
|
||||||
|
DiskID: uint64(d.Get("disk_id").(int)),
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("resourceDiskReplicationDelete: stop replication for disk with ID: %d", diskId)
|
||||||
|
_, err = c.CloudAPI().Disks().ReplicationStop(ctx, reqStop)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
log.Debugf("resourceDiskReplicationDelete: stop replication for disk with ID: %d, complete", diskId)
|
||||||
|
}
|
||||||
|
|
||||||
|
reqDelete := disks.DeleteRequest{
|
||||||
|
DiskID: disk.ID,
|
||||||
|
Detach: d.Get("detach").(bool),
|
||||||
|
Permanently: d.Get("permanently").(bool),
|
||||||
|
Reason: d.Get("reason").(string),
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("resourceDiskReplicationDelete: delete disk replica for disk with ID: %d", diskId)
|
||||||
|
_, err = c.CloudAPI().Disks().Delete(ctx, reqDelete)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
log.Debugf("resourceDiskReplicationDelete: delete disk replica for disk with ID: %d, complete", diskId)
|
||||||
|
|
||||||
|
d.SetId("")
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceDiskReplicationSchemaMake() map[string]*schema.Schema {
|
||||||
|
rets := map[string]*schema.Schema{
|
||||||
|
"disk_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Required: true,
|
||||||
|
Description: "Id of primary disk",
|
||||||
|
},
|
||||||
|
"disk_name": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
Description: "Name of disk replica",
|
||||||
|
},
|
||||||
|
"sep_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Required: true,
|
||||||
|
Description: "Storage endpoint provider ID to create disk replica",
|
||||||
|
},
|
||||||
|
"pool_name": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
Description: "Pool for disk location",
|
||||||
|
},
|
||||||
|
"pause": {
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Description: "Resume replication",
|
||||||
|
},
|
||||||
|
"reverse": {
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Default: false,
|
||||||
|
Description: "Reverse replication",
|
||||||
|
},
|
||||||
|
"start": {
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Default: true,
|
||||||
|
Description: "Start/Stop replication",
|
||||||
|
},
|
||||||
|
"detach": {
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Default: false,
|
||||||
|
Description: "Detach disk from machine first",
|
||||||
|
},
|
||||||
|
"permanently": {
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Default: false,
|
||||||
|
Description: "Delete disk permanently",
|
||||||
|
},
|
||||||
|
"reason": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "Reason for disk deletion",
|
||||||
|
},
|
||||||
|
"replica_disk_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Id of replica disk",
|
||||||
|
},
|
||||||
|
"status_replication": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Status of replication",
|
||||||
|
},
|
||||||
|
"account_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "The unique ID of the subscriber-owner of the disk",
|
||||||
|
},
|
||||||
|
"account_name": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "The name of the subscriber '(account') to whom this disk belongs",
|
||||||
|
},
|
||||||
|
"acl": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
// "boot_partition": {
|
||||||
|
// Type: schema.TypeInt,
|
||||||
|
// Computed: true,
|
||||||
|
// Description: "Number of disk partitions",
|
||||||
|
// },
|
||||||
|
"computes": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"compute_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"compute_name": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"created_time": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Created time",
|
||||||
|
},
|
||||||
|
"deleted_time": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Deleted time",
|
||||||
|
},
|
||||||
|
"desc": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Description of disk",
|
||||||
|
},
|
||||||
|
"destruction_time": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Time of final deletion",
|
||||||
|
},
|
||||||
|
"devicename": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Name of the device",
|
||||||
|
},
|
||||||
|
// "disk_path": {
|
||||||
|
// Type: schema.TypeString,
|
||||||
|
// Computed: true,
|
||||||
|
// Description: "Disk path",
|
||||||
|
// },
|
||||||
|
"gid": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "ID of the grid (platform)",
|
||||||
|
},
|
||||||
|
// "guid": {
|
||||||
|
// Type: schema.TypeInt,
|
||||||
|
// Computed: true,
|
||||||
|
// Description: "Disk ID on the storage side",
|
||||||
|
// },
|
||||||
|
"image_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Image ID",
|
||||||
|
},
|
||||||
|
"images": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
},
|
||||||
|
Description: "IDs of images using the disk",
|
||||||
|
},
|
||||||
|
"iotune": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"read_bytes_sec": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Number of bytes to read per second",
|
||||||
|
},
|
||||||
|
"read_bytes_sec_max": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Maximum number of bytes to read",
|
||||||
|
},
|
||||||
|
"read_iops_sec": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Number of io read operations per second",
|
||||||
|
},
|
||||||
|
"read_iops_sec_max": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Maximum number of io read operations",
|
||||||
|
},
|
||||||
|
"size_iops_sec": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Size of io operations",
|
||||||
|
},
|
||||||
|
"total_bytes_sec": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Total size bytes per second",
|
||||||
|
},
|
||||||
|
"total_bytes_sec_max": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Maximum total size of bytes per second",
|
||||||
|
},
|
||||||
|
"total_iops_sec": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Total number of io operations per second",
|
||||||
|
},
|
||||||
|
"total_iops_sec_max": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Maximum total number of io operations per second",
|
||||||
|
},
|
||||||
|
"write_bytes_sec": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Number of bytes to write per second",
|
||||||
|
},
|
||||||
|
"write_bytes_sec_max": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Maximum number of bytes to write per second",
|
||||||
|
},
|
||||||
|
"write_iops_sec": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Number of write operations per second",
|
||||||
|
},
|
||||||
|
"write_iops_sec_max": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Maximum number of write operations per second",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// "iqn": {
|
||||||
|
// Type: schema.TypeString,
|
||||||
|
// Computed: true,
|
||||||
|
// Description: "Disk IQN",
|
||||||
|
// },
|
||||||
|
// "login": {
|
||||||
|
// Type: schema.TypeString,
|
||||||
|
// Computed: true,
|
||||||
|
// Description: "Login to access the disk",
|
||||||
|
// },
|
||||||
|
// "milestones": {
|
||||||
|
// Type: schema.TypeInt,
|
||||||
|
// Computed: true,
|
||||||
|
// Description: "Milestones",
|
||||||
|
// },
|
||||||
|
"order": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Disk order",
|
||||||
|
},
|
||||||
|
"params": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Disk params",
|
||||||
|
},
|
||||||
|
"parent_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "ID of the parent disk",
|
||||||
|
},
|
||||||
|
// "passwd": {
|
||||||
|
// Type: schema.TypeString,
|
||||||
|
// Computed: true,
|
||||||
|
// Description: "Password to access the disk",
|
||||||
|
// },
|
||||||
|
"pci_slot": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "ID of the pci slot to which the disk is connected",
|
||||||
|
},
|
||||||
|
"present_to": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// "purge_attempts": {
|
||||||
|
// Type: schema.TypeInt,
|
||||||
|
// Computed: true,
|
||||||
|
// Description: "Number of deletion attempts",
|
||||||
|
// },
|
||||||
|
"purge_time": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Time of the last deletion attempt",
|
||||||
|
},
|
||||||
|
"replication": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"disk_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"pool_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"role": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"self_volume_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"storage_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"volume_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Description: "Replication status",
|
||||||
|
},
|
||||||
|
// "reality_device_number": {
|
||||||
|
// Type: schema.TypeInt,
|
||||||
|
// Computed: true,
|
||||||
|
// Description: "Reality device number",
|
||||||
|
// },
|
||||||
|
// "reference_id": {
|
||||||
|
// Type: schema.TypeString,
|
||||||
|
// Computed: true,
|
||||||
|
// Description: "ID of the reference to the disk",
|
||||||
|
// },
|
||||||
|
"res_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Resource ID",
|
||||||
|
},
|
||||||
|
"res_name": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Name of the resource",
|
||||||
|
},
|
||||||
|
"role": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Disk role",
|
||||||
|
},
|
||||||
|
"sep_type": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Type SEP. Defines the type of storage system and contains one of the values set in the cloud platform",
|
||||||
|
},
|
||||||
|
"shareable": {
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"size_max": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Size in GB",
|
||||||
|
},
|
||||||
|
"size_used": {
|
||||||
|
Type: schema.TypeFloat,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Number of used space, in GB",
|
||||||
|
},
|
||||||
|
"snapshots": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"guid": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "ID of the snapshot",
|
||||||
|
},
|
||||||
|
"label": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Name of the snapshot",
|
||||||
|
},
|
||||||
|
"res_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Reference to the snapshot",
|
||||||
|
},
|
||||||
|
"snap_set_guid": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "The set snapshot ID",
|
||||||
|
},
|
||||||
|
"snap_set_time": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "The set time of the snapshot",
|
||||||
|
},
|
||||||
|
"timestamp": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Snapshot time",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Disk status",
|
||||||
|
},
|
||||||
|
"tech_status": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Technical status of the disk",
|
||||||
|
},
|
||||||
|
"type": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'",
|
||||||
|
},
|
||||||
|
"vmid": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Virtual Machine ID (Deprecated)",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return rets
|
||||||
|
}
|
||||||
|
|
||||||
|
func ResourceDiskReplication() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
SchemaVersion: 1,
|
||||||
|
|
||||||
|
CreateContext: resourceDiskReplicationCreate,
|
||||||
|
ReadContext: resourceDiskReplicationRead,
|
||||||
|
UpdateContext: resourceDiskReplicationUpdate,
|
||||||
|
DeleteContext: resourceDiskReplicationDelete,
|
||||||
|
|
||||||
|
Importer: &schema.ResourceImporter{
|
||||||
|
StateContext: schema.ImportStatePassthroughContext,
|
||||||
|
},
|
||||||
|
|
||||||
|
Timeouts: &schema.ResourceTimeout{
|
||||||
|
Create: &constants.Timeout600s,
|
||||||
|
Read: &constants.Timeout300s,
|
||||||
|
Update: &constants.Timeout300s,
|
||||||
|
Delete: &constants.Timeout300s,
|
||||||
|
Default: &constants.Timeout300s,
|
||||||
|
},
|
||||||
|
|
||||||
|
Schema: resourceDiskReplicationSchemaMake(),
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -77,6 +77,9 @@ func utilityDiskListCheckPresence(ctx context.Context, d *schema.ResourceData, m
|
|||||||
if pool_name, ok := d.GetOk("pool_name"); ok {
|
if pool_name, ok := d.GetOk("pool_name"); ok {
|
||||||
req.Pool = pool_name.(string)
|
req.Pool = pool_name.(string)
|
||||||
}
|
}
|
||||||
|
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||||
|
req.SortBy = sortBy.(string)
|
||||||
|
}
|
||||||
if page, ok := d.GetOk("page"); ok {
|
if page, ok := d.GetOk("page"); ok {
|
||||||
req.Page = uint64(page.(int))
|
req.Page = uint64(page.(int))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -54,6 +54,10 @@ func utilityDiskListDeletedCheckPresence(ctx context.Context, d *schema.Resource
|
|||||||
req.Type = typev.(string)
|
req.Type = typev.(string)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||||
|
req.SortBy = sortBy.(string)
|
||||||
|
}
|
||||||
|
|
||||||
if page, ok := d.GetOk("page"); ok {
|
if page, ok := d.GetOk("page"); ok {
|
||||||
req.Page = uint64(page.(int))
|
req.Page = uint64(page.(int))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -37,6 +37,9 @@ func utilityDiskListUnattachedCheckPresence(ctx context.Context, d *schema.Resou
|
|||||||
if pool_name, ok := d.GetOk("pool_name"); ok {
|
if pool_name, ok := d.GetOk("pool_name"); ok {
|
||||||
req.Pool = pool_name.(string)
|
req.Pool = pool_name.(string)
|
||||||
}
|
}
|
||||||
|
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||||
|
req.SortBy = sortBy.(string)
|
||||||
|
}
|
||||||
if page, ok := d.GetOk("page"); ok {
|
if page, ok := d.GetOk("page"); ok {
|
||||||
req.Page = uint64(page.(int))
|
req.Page = uint64(page.(int))
|
||||||
}
|
}
|
||||||
|
|||||||
174
internal/service/cloudapi/disks/utility_disk_replica.go
Normal file
174
internal/service/cloudapi/disks/utility_disk_replica.go
Normal file
@@ -0,0 +1,174 @@
|
|||||||
|
/*
|
||||||
|
Copyright (c) 2019-2024 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||||
|
Authors:
|
||||||
|
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||||
|
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||||
|
Orchestration Technology) with Terraform by Hashicorp.
|
||||||
|
|
||||||
|
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
|
||||||
|
|
||||||
|
Please see README.md to learn where to place source code so that it
|
||||||
|
builds seamlessly.
|
||||||
|
|
||||||
|
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
|
||||||
|
*/
|
||||||
|
|
||||||
|
package disks
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
|
||||||
|
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func utilityDiskReplicationUpdateStartStop(ctx context.Context, d *schema.ResourceData, m interface{}) error {
|
||||||
|
diskId := uint64(d.Get("disk_id").(int))
|
||||||
|
targetDiskId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||||
|
|
||||||
|
log.Debugf("utilityDiskReplicationUpdateStartStop: start update for disk replica with ID: %d", diskId)
|
||||||
|
c := m.(*controller.ControllerCfg)
|
||||||
|
|
||||||
|
start, ok := d.GetOk("start")
|
||||||
|
|
||||||
|
if ok && start.(bool) {
|
||||||
|
log.Debugf("utilityDiskReplicationUpdateStartStop: start disk replication from Disk with ID: %d to Disk with ID: %d", diskId, targetDiskId)
|
||||||
|
req := disks.ReplicationStartRequest{
|
||||||
|
DiskID: diskId,
|
||||||
|
TargetDiskID: targetDiskId,
|
||||||
|
}
|
||||||
|
_, err := c.CloudAPI().Disks().ReplicationStart(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Debugf("utilityDiskReplicationUpdateStartStop: start disk replication from Disk with ID: %d to Disk with ID: %d, complete", diskId, targetDiskId)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ok && !start.(bool) {
|
||||||
|
log.Debugf("utilityDiskReplicationUpdateStartStop: stop disk replication from Disk with ID: %d to Disk with ID: %d", targetDiskId, diskId)
|
||||||
|
req := disks.ReplicationStopRequest{
|
||||||
|
DiskID: targetDiskId,
|
||||||
|
}
|
||||||
|
_, err := c.CloudAPI().Disks().ReplicationStop(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Debugf("utilityDiskReplicationUpdateStartStop: stop disk replication from Disk with ID: %d to Disk with ID: %d, complete", targetDiskId, diskId)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("utilityDiskReplicationUpdateStartStop: complete update for disk replica with ID: %d", diskId)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func utilityDiskReplicationUpdatePause(ctx context.Context, d *schema.ResourceData, m interface{}) error {
|
||||||
|
diskId := uint64(d.Get("disk_id").(int))
|
||||||
|
log.Debugf("utilityDiskReplicationUpdatePause: start update for disk replica with ID: %d", diskId)
|
||||||
|
c := m.(*controller.ControllerCfg)
|
||||||
|
|
||||||
|
pause, ok := d.GetOk("pause")
|
||||||
|
|
||||||
|
if ok && pause.(bool) {
|
||||||
|
log.Debugf("utilityDiskReplicationUpdatePause: pause disk replication with ID: %d", diskId)
|
||||||
|
req := disks.ReplicationSuspendRequest{
|
||||||
|
DiskID: diskId,
|
||||||
|
}
|
||||||
|
_, err := c.CloudAPI().Disks().ReplicationSuspend(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Debugf("utilityDiskReplicationUpdatePause: pause disk replication with ID: %d, complete", diskId)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ok && !pause.(bool) {
|
||||||
|
log.Debugf("utilityDiskReplicationUpdatePause: resume disk replication with ID: %d", diskId)
|
||||||
|
req := disks.ReplicationResumeRequest{
|
||||||
|
DiskID: diskId,
|
||||||
|
}
|
||||||
|
_, err := c.CloudAPI().Disks().ReplicationResume(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Debugf("utilityDiskReplicationUpdatePause: resume disk replication with ID: %d, complete", diskId)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("utilityDiskReplicationUpdatePause: complete update for disk replica with ID: %d", diskId)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func utilityDiskReplicationUpdateReverse(ctx context.Context, d *schema.ResourceData, m interface{}) error {
|
||||||
|
diskId := uint64(d.Get("disk_id").(int))
|
||||||
|
targetDiskId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||||
|
|
||||||
|
log.Debugf("utilityDiskReplicaUpdateReverse: start update for disk replica with ID: %d", diskId)
|
||||||
|
c := m.(*controller.ControllerCfg)
|
||||||
|
|
||||||
|
reverse, ok := d.GetOk("reverse")
|
||||||
|
|
||||||
|
if ok && reverse.(bool) {
|
||||||
|
log.Debugf("utilityDiskReplicaUpdateReverse: reverse disk replication from Disk with ID: %d to Disk with ID: %d", diskId, targetDiskId)
|
||||||
|
req := disks.ReplicationReverseRequest{
|
||||||
|
DiskID: diskId,
|
||||||
|
}
|
||||||
|
_, err := c.CloudAPI().Disks().ReplicationReverse(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Debugf("utilityDiskReplicaUpdateReverse: reverse disk replication from Disk with ID: %d to Disk with ID: %d, complete", diskId, targetDiskId)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ok && !reverse.(bool) {
|
||||||
|
log.Debugf("utilityDiskReplicaUpdateReverse: reverse disk replication from Disk with ID: %d to Disk with ID: %d", targetDiskId, diskId)
|
||||||
|
req := disks.ReplicationReverseRequest{
|
||||||
|
DiskID: targetDiskId,
|
||||||
|
}
|
||||||
|
_, err := c.CloudAPI().Disks().ReplicationReverse(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Debugf("utilityDiskReplicaUpdateReverse: reverse disk replication from Disk with ID: %d to Disk with ID: %d, complete", targetDiskId, diskId)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("utilityDiskReplicaUpdateReverse: complete update for disk replica with ID: %d", diskId)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func utilityDiskReplicaCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*disks.RecordDisk, error) {
|
||||||
|
c := m.(*controller.ControllerCfg)
|
||||||
|
|
||||||
|
req := disks.GetRequest{}
|
||||||
|
|
||||||
|
if d.Id() != "" {
|
||||||
|
diskId, _ := strconv.ParseUint(d.Id(), 10, 64)
|
||||||
|
req.DiskID = diskId
|
||||||
|
} else {
|
||||||
|
req.DiskID = uint64(d.Get("replica_disk_id").(int))
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("utilityDiskReplicaCheckPresence: load disk")
|
||||||
|
disk, err := c.CloudAPI().Disks().Get(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return disk, nil
|
||||||
|
}
|
||||||
@@ -47,6 +47,9 @@ func utilityDiskListTypesDetailedCheckPresence(ctx context.Context, d *schema.Re
|
|||||||
Detailed: true,
|
Detailed: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||||
|
req.SortBy = sortBy.(string)
|
||||||
|
}
|
||||||
if page, ok := d.GetOk("page"); ok {
|
if page, ok := d.GetOk("page"); ok {
|
||||||
req.Page = uint64(page.(int))
|
req.Page = uint64(page.(int))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -47,6 +47,9 @@ func utilityDiskListTypesCheckPresence(ctx context.Context, d *schema.ResourceDa
|
|||||||
Detailed: false,
|
Detailed: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||||
|
req.SortBy = sortBy.(string)
|
||||||
|
}
|
||||||
if page, ok := d.GetOk("page"); ok {
|
if page, ok := d.GetOk("page"); ok {
|
||||||
req.Page = uint64(page.(int))
|
req.Page = uint64(page.(int))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -72,6 +72,11 @@ func dataSourceExtnetComputesListSchemaMake() map[string]*schema.Schema {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "Filter by compute ID",
|
Description: "Filter by compute ID",
|
||||||
},
|
},
|
||||||
|
"sort_by": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "sort by one of supported fields, format +|-(field)",
|
||||||
|
},
|
||||||
"page": {
|
"page": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
|||||||
@@ -92,6 +92,11 @@ func dataSourceExtnetListSchemaMake() map[string]*schema.Schema {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "Find by status",
|
Description: "Find by status",
|
||||||
},
|
},
|
||||||
|
"sort_by": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "sort by one of supported fields, format +|-(field)",
|
||||||
|
},
|
||||||
"page": {
|
"page": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
|||||||
@@ -54,6 +54,9 @@ func utilityExtnetComputesListCheckPresence(ctx context.Context, d *schema.Resou
|
|||||||
if compute_id, ok := d.GetOk("compute_id"); ok {
|
if compute_id, ok := d.GetOk("compute_id"); ok {
|
||||||
req.ComputeID = uint64(compute_id.(int))
|
req.ComputeID = uint64(compute_id.(int))
|
||||||
}
|
}
|
||||||
|
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||||
|
req.SortBy = sortBy.(string)
|
||||||
|
}
|
||||||
if page, ok := d.GetOk("page"); ok {
|
if page, ok := d.GetOk("page"); ok {
|
||||||
req.Page = uint64(page.(int))
|
req.Page = uint64(page.(int))
|
||||||
}
|
}
|
||||||
@@ -61,7 +64,6 @@ func utilityExtnetComputesListCheckPresence(ctx context.Context, d *schema.Resou
|
|||||||
req.Size = uint64(size.(int))
|
req.Size = uint64(size.(int))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
log.Debugf("utilityExtnetComputesListCheckPresence")
|
log.Debugf("utilityExtnetComputesListCheckPresence")
|
||||||
extnetComputesList, err := c.CloudAPI().ExtNet().ListComputes(ctx, req)
|
extnetComputesList, err := c.CloudAPI().ExtNet().ListComputes(ctx, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -67,6 +67,9 @@ func utilityExtnetListCheckPresence(ctx context.Context, d *schema.ResourceData,
|
|||||||
if status, ok := d.GetOk("status"); ok {
|
if status, ok := d.GetOk("status"); ok {
|
||||||
req.Status = status.(string)
|
req.Status = status.(string)
|
||||||
}
|
}
|
||||||
|
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||||
|
req.SortBy = sortBy.(string)
|
||||||
|
}
|
||||||
if page, ok := d.GetOk("page"); ok {
|
if page, ok := d.GetOk("page"); ok {
|
||||||
req.Page = uint64(page.(int))
|
req.Page = uint64(page.(int))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -95,6 +95,11 @@ func dataSourceFlipgroupListSchemaMake() map[string]*schema.Schema {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "Filter by RG ID",
|
Description: "Filter by RG ID",
|
||||||
},
|
},
|
||||||
|
"sort_by": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "sort by one of supported fields, format +|-(field)",
|
||||||
|
},
|
||||||
"page": {
|
"page": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
@@ -105,6 +110,29 @@ func dataSourceFlipgroupListSchemaMake() map[string]*schema.Schema {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "Page size",
|
Description: "Page size",
|
||||||
},
|
},
|
||||||
|
"account_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Description: "Account id",
|
||||||
|
},
|
||||||
|
"conn_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Description: "Conn id",
|
||||||
|
},
|
||||||
|
"client_ids": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Optional: true,
|
||||||
|
Elem: &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
},
|
||||||
|
Description: "client_ids",
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "Status",
|
||||||
|
},
|
||||||
"items": {
|
"items": {
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeList,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
|||||||
@@ -153,38 +153,48 @@ func utilityFlipgroupListCheckPresence(ctx context.Context, d *schema.ResourceDa
|
|||||||
if name, ok := d.GetOk("name"); ok {
|
if name, ok := d.GetOk("name"); ok {
|
||||||
req.Name = name.(string)
|
req.Name = name.(string)
|
||||||
}
|
}
|
||||||
|
|
||||||
if vinsId, ok := d.GetOk("vins_id"); ok {
|
if vinsId, ok := d.GetOk("vins_id"); ok {
|
||||||
req.VINSID = uint64(vinsId.(int))
|
req.VINSID = uint64(vinsId.(int))
|
||||||
}
|
}
|
||||||
|
|
||||||
if vinsName, ok := d.GetOk("vins_name"); ok {
|
if vinsName, ok := d.GetOk("vins_name"); ok {
|
||||||
req.VINSName = vinsName.(string)
|
req.VINSName = vinsName.(string)
|
||||||
}
|
}
|
||||||
|
|
||||||
if extNetId, ok := d.GetOk("extnet_id"); ok {
|
if extNetId, ok := d.GetOk("extnet_id"); ok {
|
||||||
req.ExtNetID = uint64(extNetId.(int))
|
req.ExtNetID = uint64(extNetId.(int))
|
||||||
}
|
}
|
||||||
|
|
||||||
if byIp, ok := d.GetOk("by_ip"); ok {
|
if byIp, ok := d.GetOk("by_ip"); ok {
|
||||||
req.ByIP = byIp.(string)
|
req.ByIP = byIp.(string)
|
||||||
}
|
}
|
||||||
|
|
||||||
if rgId, ok := d.GetOk("rg_id"); ok {
|
if rgId, ok := d.GetOk("rg_id"); ok {
|
||||||
req.RGID = uint64(rgId.(int))
|
req.RGID = uint64(rgId.(int))
|
||||||
}
|
}
|
||||||
|
|
||||||
if byId, ok := d.GetOk("by_id"); ok {
|
if byId, ok := d.GetOk("by_id"); ok {
|
||||||
req.ByID = uint64(byId.(int))
|
req.ByID = uint64(byId.(int))
|
||||||
}
|
}
|
||||||
|
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||||
|
req.SortBy = sortBy.(string)
|
||||||
|
}
|
||||||
if page, ok := d.GetOk("page"); ok {
|
if page, ok := d.GetOk("page"); ok {
|
||||||
req.Page = uint64(page.(int))
|
req.Page = uint64(page.(int))
|
||||||
}
|
}
|
||||||
|
|
||||||
if size, ok := d.GetOk("size"); ok {
|
if size, ok := d.GetOk("size"); ok {
|
||||||
req.Size = uint64(size.(int))
|
req.Size = uint64(size.(int))
|
||||||
}
|
}
|
||||||
|
if accountId, ok := d.GetOk("account_id"); ok {
|
||||||
|
req.AccountId = uint64(accountId.(int))
|
||||||
|
}
|
||||||
|
if connId, ok := d.GetOk("conn_id"); ok {
|
||||||
|
req.ConnId = uint64(connId.(int))
|
||||||
|
}
|
||||||
|
if cliensId, ok := d.GetOk("client_ids"); ok {
|
||||||
|
cliensIds := cliensId.([]interface{})
|
||||||
|
for _, elem := range cliensIds {
|
||||||
|
req.ClientIDs = append(req.ClientIDs, uint64(elem.(int)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if status, ok := d.GetOk("status"); ok {
|
||||||
|
req.Status = status.(string)
|
||||||
|
}
|
||||||
|
|
||||||
fg_list, err := c.CloudAPI().FLIPGroup().List(ctx, req)
|
fg_list, err := c.CloudAPI().FLIPGroup().List(ctx, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -117,6 +117,11 @@ func dataSourceImageListSchemaMake() map[string]*schema.Schema {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "Find bootable images",
|
Description: "Find bootable images",
|
||||||
},
|
},
|
||||||
|
"sort_by": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "sort by one of supported fields, format +|-(field)",
|
||||||
|
},
|
||||||
"page": {
|
"page": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
|||||||
@@ -1,9 +1,11 @@
|
|||||||
package image
|
package image
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/image"
|
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/image"
|
||||||
"strconv"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func flattenHistory(history []image.History) []map[string]interface{} {
|
func flattenHistory(history []image.History) []map[string]interface{} {
|
||||||
@@ -21,6 +23,8 @@ func flattenHistory(history []image.History) []map[string]interface{} {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func flattenImage(d *schema.ResourceData, img *image.RecordImage) {
|
func flattenImage(d *schema.ResourceData, img *image.RecordImage) {
|
||||||
|
cdPresentedTo, _ := json.Marshal(img.CdPresentedTo)
|
||||||
|
|
||||||
d.Set("unc_path", img.UNCPath)
|
d.Set("unc_path", img.UNCPath)
|
||||||
d.Set("ckey", img.CKey)
|
d.Set("ckey", img.CKey)
|
||||||
d.Set("account_id", img.AccountID)
|
d.Set("account_id", img.AccountID)
|
||||||
@@ -29,6 +33,7 @@ func flattenImage(d *schema.ResourceData, img *image.RecordImage) {
|
|||||||
d.Set("boot_type", img.BootType)
|
d.Set("boot_type", img.BootType)
|
||||||
d.Set("bootable", img.Bootable)
|
d.Set("bootable", img.Bootable)
|
||||||
d.Set("compute_ci_id", img.ComputeCIID)
|
d.Set("compute_ci_id", img.ComputeCIID)
|
||||||
|
d.Set("cd_presented_to", string(cdPresentedTo))
|
||||||
d.Set("deleted_time", img.DeletedTime)
|
d.Set("deleted_time", img.DeletedTime)
|
||||||
d.Set("desc", img.Description)
|
d.Set("desc", img.Description)
|
||||||
d.Set("drivers", img.Drivers)
|
d.Set("drivers", img.Drivers)
|
||||||
@@ -42,6 +47,7 @@ func flattenImage(d *schema.ResourceData, img *image.RecordImage) {
|
|||||||
d.Set("link_to", img.LinkTo)
|
d.Set("link_to", img.LinkTo)
|
||||||
d.Set("milestones", img.Milestones)
|
d.Set("milestones", img.Milestones)
|
||||||
d.Set("image_name", img.Name)
|
d.Set("image_name", img.Name)
|
||||||
|
d.Set("network_interface_naming", img.NetworkInterfaceNaming)
|
||||||
d.Set("password", img.Password)
|
d.Set("password", img.Password)
|
||||||
d.Set("pool_name", img.Pool)
|
d.Set("pool_name", img.Pool)
|
||||||
d.Set("provider_name", img.ProviderName)
|
d.Set("provider_name", img.ProviderName)
|
||||||
@@ -63,24 +69,25 @@ func flattenImageList(il *image.ListImages) []map[string]interface{} {
|
|||||||
res := make([]map[string]interface{}, 0)
|
res := make([]map[string]interface{}, 0)
|
||||||
for _, img := range il.Data {
|
for _, img := range il.Data {
|
||||||
temp := map[string]interface{}{
|
temp := map[string]interface{}{
|
||||||
"account_id": img.AccountID,
|
"account_id": img.AccountID,
|
||||||
"architecture": img.Architecture,
|
"architecture": img.Architecture,
|
||||||
"boot_type": img.BootType,
|
"boot_type": img.BootType,
|
||||||
"bootable": img.Bootable,
|
"bootable": img.Bootable,
|
||||||
"cdrom": img.CDROM,
|
"cdrom": img.CDROM,
|
||||||
"desc": img.Description,
|
"desc": img.Description,
|
||||||
"drivers": img.Drivers,
|
"drivers": img.Drivers,
|
||||||
"hot_resize": img.HotResize,
|
"hot_resize": img.HotResize,
|
||||||
"image_id": img.ID,
|
"image_id": img.ID,
|
||||||
"link_to": img.LinkTo,
|
"link_to": img.LinkTo,
|
||||||
"image_name": img.Name,
|
"image_name": img.Name,
|
||||||
"pool_name": img.Pool,
|
"network_interface_naming": img.NetworkInterfaceNaming,
|
||||||
"sep_id": img.SepID,
|
"pool_name": img.Pool,
|
||||||
"size": img.Size,
|
"sep_id": img.SepID,
|
||||||
"status": img.Status,
|
"size": img.Size,
|
||||||
"type": img.Type,
|
"status": img.Status,
|
||||||
"username": img.Username,
|
"type": img.Type,
|
||||||
"virtual": img.Virtual,
|
"username": img.Username,
|
||||||
|
"virtual": img.Virtual,
|
||||||
}
|
}
|
||||||
res = append(res, temp)
|
res = append(res, temp)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -45,7 +45,6 @@ func dataSourceImageExtendSchemaMake() map[string]*schema.Schema {
|
|||||||
Default: false,
|
Default: false,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"unc_path": {
|
"unc_path": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
@@ -78,6 +77,10 @@ func dataSourceImageExtendSchemaMake() map[string]*schema.Schema {
|
|||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
"cd_presented_to": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
"deleted_time": {
|
"deleted_time": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
@@ -129,7 +132,6 @@ func dataSourceImageExtendSchemaMake() map[string]*schema.Schema {
|
|||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"last_modified": {
|
"last_modified": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
@@ -146,6 +148,10 @@ func dataSourceImageExtendSchemaMake() map[string]*schema.Schema {
|
|||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
"network_interface_naming": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
"password": {
|
"password": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
|||||||
@@ -94,6 +94,10 @@ func dataSourceImageSchemaMake() map[string]*schema.Schema {
|
|||||||
Computed: true,
|
Computed: true,
|
||||||
Description: "Image name",
|
Description: "Image name",
|
||||||
},
|
},
|
||||||
|
"network_interface_naming": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
"pool_name": {
|
"pool_name": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
|||||||
@@ -51,12 +51,6 @@ func resourceImageSchemaMake(sch map[string]*schema.Schema) map[string]*schema.S
|
|||||||
Description: "URL where to download media from",
|
Description: "URL where to download media from",
|
||||||
}
|
}
|
||||||
|
|
||||||
sch["gid"] = &schema.Schema{
|
|
||||||
Type: schema.TypeInt,
|
|
||||||
Required: true,
|
|
||||||
Description: "grid (platform) ID where this template should be create in",
|
|
||||||
}
|
|
||||||
|
|
||||||
sch["image_id"] = &schema.Schema{
|
sch["image_id"] = &schema.Schema{
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
@@ -101,8 +95,7 @@ func resourceImageSchemaMake(sch map[string]*schema.Schema) map[string]*schema.S
|
|||||||
|
|
||||||
sch["account_id"] = &schema.Schema{
|
sch["account_id"] = &schema.Schema{
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Required: true,
|
||||||
Computed: true,
|
|
||||||
Description: "AccountId to make the image exclusive",
|
Description: "AccountId to make the image exclusive",
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -155,5 +148,13 @@ func resourceImageSchemaMake(sch map[string]*schema.Schema) map[string]*schema.S
|
|||||||
Description: "whether to completely delete the image",
|
Description: "whether to completely delete the image",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sch["network_interface_naming"] = &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
ValidateFunc: validation.StringInSlice([]string{"eth", "ens"}, true),
|
||||||
|
Description: "select a network interface naming pattern for your Linux machine. eth - onboard, ens - pci slot naming",
|
||||||
|
}
|
||||||
|
|
||||||
return sch
|
return sch
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,9 +2,12 @@ package image
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account"
|
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account"
|
||||||
|
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
|
||||||
|
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
|
||||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/locations"
|
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/locations"
|
||||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
|
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
|
||||||
)
|
)
|
||||||
@@ -34,3 +37,46 @@ func existGID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool,
|
|||||||
|
|
||||||
return len(locationList.FilterByGID(gid).Data) != 0, nil
|
return len(locationList.FilterByGID(gid).Data) != 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func existComputeID(ctx context.Context, computeId uint64, m interface{}) error {
|
||||||
|
c := m.(*controller.ControllerCfg)
|
||||||
|
req := compute.GetRequest{ComputeID: computeId}
|
||||||
|
|
||||||
|
// check for compute existence
|
||||||
|
computeRecord, err := c.CloudAPI().Compute().Get(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("ComputeID %d is not allowed or does not exist", computeId)
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if compute was created as blank
|
||||||
|
computeImageId := computeRecord.ImageID
|
||||||
|
bootImageId := -1
|
||||||
|
for _, d := range computeRecord.Disks {
|
||||||
|
if d.Type == "B" {
|
||||||
|
bootImageId = int(d.ImageID)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if computeImageId != 0 && bootImageId != 0 {
|
||||||
|
return fmt.Errorf("ComputeID %d is not allowed because it is not blank compute (either compute imageId or boot imageId are not zero)", computeId)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func existDiskID(ctx context.Context, diskId uint64, m interface{}) error {
|
||||||
|
c := m.(*controller.ControllerCfg)
|
||||||
|
req := disks.ListRequest{ByID: diskId}
|
||||||
|
|
||||||
|
diskList, err := c.CloudAPI().Disks().List(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(diskList.Data) != 1 {
|
||||||
|
return fmt.Errorf("diskId %d is not allowed or doesn't exist", diskId)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -48,34 +48,23 @@ import (
|
|||||||
func resourceImageCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
func resourceImageCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
log.Debugf("resourceImageCreate: called for image %s", d.Get("name").(string))
|
log.Debugf("resourceImageCreate: called for image %s", d.Get("name").(string))
|
||||||
|
|
||||||
haveGID, err := existGID(ctx, d, m)
|
haveAccountID, err := existAccountID(ctx, d, m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !haveGID {
|
if !haveAccountID {
|
||||||
return diag.Errorf("resourceImageCreate: can't create Image because GID %d is not allowed or does not exist", d.Get("gid").(int))
|
return diag.Errorf("resourceImageCreate: can't create Image because AccountID %d is not allowed or does not exist", d.Get("account_id").(int))
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := d.GetOk("account_id"); ok {
|
|
||||||
haveAccountID, err := existAccountID(ctx, d, m)
|
|
||||||
if err != nil {
|
|
||||||
return diag.FromErr(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !haveAccountID {
|
|
||||||
return diag.Errorf("resourceImageCreate: can't create Image because AccountID %d is not allowed or does not exist", d.Get("account_id").(int))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
c := m.(*controller.ControllerCfg)
|
c := m.(*controller.ControllerCfg)
|
||||||
req := image.CreateRequest{}
|
req := image.CreateRequest{
|
||||||
|
AccountID: uint64(d.Get("account_id").(int)),
|
||||||
req.Name = d.Get("name").(string)
|
Name: d.Get("name").(string),
|
||||||
req.URL = d.Get("url").(string)
|
URL: d.Get("url").(string),
|
||||||
req.GID = uint64(d.Get("gid").(int))
|
BootType: d.Get("boot_type").(string),
|
||||||
req.BootType = d.Get("boot_type").(string)
|
ImageType: d.Get("type").(string),
|
||||||
req.ImageType = d.Get("type").(string)
|
}
|
||||||
|
|
||||||
drivers := []string{}
|
drivers := []string{}
|
||||||
for _, driver := range d.Get("drivers").([]interface{}) {
|
for _, driver := range d.Get("drivers").([]interface{}) {
|
||||||
@@ -93,9 +82,6 @@ func resourceImageCreate(ctx context.Context, d *schema.ResourceData, m interfac
|
|||||||
if password, ok := d.GetOk("password"); ok {
|
if password, ok := d.GetOk("password"); ok {
|
||||||
req.Password = password.(string)
|
req.Password = password.(string)
|
||||||
}
|
}
|
||||||
if accountId, ok := d.GetOk("account_id"); ok {
|
|
||||||
req.AccountID = uint64(accountId.(int))
|
|
||||||
}
|
|
||||||
if usernameDL, ok := d.GetOk("username_dl"); ok {
|
if usernameDL, ok := d.GetOk("username_dl"); ok {
|
||||||
req.UsernameDL = usernameDL.(string)
|
req.UsernameDL = usernameDL.(string)
|
||||||
}
|
}
|
||||||
@@ -111,6 +97,9 @@ func resourceImageCreate(ctx context.Context, d *schema.ResourceData, m interfac
|
|||||||
if architecture, ok := d.GetOk("architecture"); ok {
|
if architecture, ok := d.GetOk("architecture"); ok {
|
||||||
req.Architecture = architecture.(string)
|
req.Architecture = architecture.(string)
|
||||||
}
|
}
|
||||||
|
if networkInterfaceNaming, ok := d.GetOk("network_interface_naming"); ok {
|
||||||
|
req.NetworkInterfaceNaming = networkInterfaceNaming.(string)
|
||||||
|
}
|
||||||
|
|
||||||
imageId, err := c.CloudAPI().Image().Create(ctx, req)
|
imageId, err := c.CloudAPI().Image().Create(ctx, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -210,15 +199,13 @@ func resourceImageUpdate(ctx context.Context, d *schema.ResourceData, m interfac
|
|||||||
return diag.Errorf("resourceImageUpdate: can't update Image because GID %d is not allowed or does not exist", d.Get("gid").(int))
|
return diag.Errorf("resourceImageUpdate: can't update Image because GID %d is not allowed or does not exist", d.Get("gid").(int))
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := d.GetOk("account_id"); ok {
|
haveAccountID, err := existAccountID(ctx, d, m)
|
||||||
haveAccountID, err := existAccountID(ctx, d, m)
|
if err != nil {
|
||||||
if err != nil {
|
return diag.FromErr(err)
|
||||||
return diag.FromErr(err)
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if !haveAccountID {
|
if !haveAccountID {
|
||||||
return diag.Errorf("resourceImageUpdate: can't update Image because AccountID %d is not allowed or does not exist", d.Get("account_id").(int))
|
return diag.Errorf("resourceImageUpdate: can't update Image because AccountID %d is not allowed or does not exist", d.Get("account_id").(int))
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
image, err := utilityImageCheckPresence(ctx, d, m)
|
image, err := utilityImageCheckPresence(ctx, d, m)
|
||||||
|
|||||||
@@ -0,0 +1,508 @@
|
|||||||
|
/*
|
||||||
|
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||||
|
Authors:
|
||||||
|
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||||
|
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||||
|
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||||
|
Orchestration Technology) with Terraform by Hashicorp.
|
||||||
|
|
||||||
|
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
|
||||||
|
|
||||||
|
Please see README.md to learn where to place source code so that it
|
||||||
|
builds seamlessly.
|
||||||
|
|
||||||
|
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
|
||||||
|
*/
|
||||||
|
|
||||||
|
package image
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||||
|
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||||
|
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/compute"
|
||||||
|
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/image"
|
||||||
|
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/tasks"
|
||||||
|
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
|
||||||
|
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
|
||||||
|
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceImageFromBlankComputeCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
|
computeId := uint64(d.Get("compute_id").(int))
|
||||||
|
name := d.Get("name").(string)
|
||||||
|
|
||||||
|
log.Debugf("resourceImageFromBlankComputeCreate: called for image %s", name)
|
||||||
|
|
||||||
|
err := existComputeID(ctx, computeId, m)
|
||||||
|
if err != nil {
|
||||||
|
return diag.Errorf("resourceImageFromBlankComputeCreate: can't create Image: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := d.GetOk("account_id"); ok {
|
||||||
|
haveAccountID, err := existAccountID(ctx, d, m)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !haveAccountID {
|
||||||
|
return diag.Errorf("resourceImageFromBlankComputeCreate: can't create Image because AccountID %d is not allowed or does not exist", d.Get("account_id").(int))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
c := m.(*controller.ControllerCfg)
|
||||||
|
req := compute.CreateTemplateFromBlankRequest{
|
||||||
|
ComputeID: computeId,
|
||||||
|
Name: name,
|
||||||
|
BootType: d.Get("boot_type").(string),
|
||||||
|
ImageType: d.Get("type").(string),
|
||||||
|
}
|
||||||
|
|
||||||
|
if username, ok := d.GetOk("username"); ok {
|
||||||
|
req.Username = username.(string)
|
||||||
|
}
|
||||||
|
if password, ok := d.GetOk("password"); ok {
|
||||||
|
req.Password = password.(string)
|
||||||
|
}
|
||||||
|
if accountId, ok := d.GetOk("account_id"); ok {
|
||||||
|
req.AccountID = uint64(accountId.(int))
|
||||||
|
}
|
||||||
|
if sepId, ok := d.GetOk("sep_id"); ok {
|
||||||
|
req.SepID = uint64(sepId.(int))
|
||||||
|
}
|
||||||
|
if poolName, ok := d.GetOk("pool_name"); ok {
|
||||||
|
req.PoolName = poolName.(string)
|
||||||
|
}
|
||||||
|
if hotresize, ok := d.GetOk("hot_resize"); ok {
|
||||||
|
req.HotResize = hotresize.(bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
var imageId uint64
|
||||||
|
asyncMode := d.Get("async_mode").(bool)
|
||||||
|
if !asyncMode {
|
||||||
|
imageId, err = c.CloudAPI().Compute().CreateTemplateFromBlank(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
d.SetId("")
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
taskId, err := c.CloudAPI().Compute().CreateTemplateFromBlankAsync(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
d.SetId("")
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
taskReq := tasks.GetRequest{
|
||||||
|
AuditID: strings.Trim(taskId, `"`),
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
task, err := c.CloudAPI().Tasks().Get(ctx, taskReq)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("resourceImageFromBlankComputeCreate: instance creating - %s", task.Stage)
|
||||||
|
|
||||||
|
if task.Completed {
|
||||||
|
if task.Error != "" {
|
||||||
|
return diag.FromErr(fmt.Errorf("cannot create image instance: %v", task.Error))
|
||||||
|
}
|
||||||
|
|
||||||
|
id, err := task.Result.ID()
|
||||||
|
imageId = uint64(id)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Second * 20)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId(strconv.FormatUint(imageId, 10))
|
||||||
|
d.Set("image_id", imageId)
|
||||||
|
|
||||||
|
_, err = utilityImageCheckPresence(ctx, d, m)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return resourceImageFromBlankComputeRead(ctx, d, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceImageFromBlankComputeRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
|
log.Debugf("resourceImageFromBlankComputeRead: called for %s id: %s", d.Get("name").(string), d.Id())
|
||||||
|
|
||||||
|
img, err := utilityImageCheckPresence(ctx, d, m)
|
||||||
|
if img == nil {
|
||||||
|
d.SetId("")
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch img.Status {
|
||||||
|
case status.Modeled:
|
||||||
|
return diag.Errorf("The image is in status: %s, please, contact support for more information", img.Status)
|
||||||
|
case status.Creating:
|
||||||
|
case status.Created:
|
||||||
|
case status.Destroyed, status.Purged:
|
||||||
|
d.SetId("")
|
||||||
|
return diag.Errorf("The resource cannot be updated because it has been destroyed")
|
||||||
|
// return resourceImageCreate(ctx, d, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
flattenImage(d, img)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceImageFromBlankComputeDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
|
log.Debugf("resourceImageFromBlankComputeDelete: called for %s, id: %s", d.Get("name").(string), d.Id())
|
||||||
|
|
||||||
|
_, err := utilityImageCheckPresence(ctx, d, m)
|
||||||
|
if err != nil {
|
||||||
|
d.SetId("")
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c := m.(*controller.ControllerCfg)
|
||||||
|
req := image.DeleteRequest{
|
||||||
|
ImageID: uint64(d.Get("image_id").(int)),
|
||||||
|
}
|
||||||
|
|
||||||
|
if permanently, ok := d.GetOk("permanently"); ok {
|
||||||
|
req.Permanently = permanently.(bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = c.CloudAPI().Image().Delete(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId("")
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceImageFromBlankComputeRename(ctx context.Context, d *schema.ResourceData, m interface{}) error {
|
||||||
|
log.Debugf("resourceImageFromBlankComputeRename: called for %s, id: %s", d.Get("name").(string), d.Id())
|
||||||
|
c := m.(*controller.ControllerCfg)
|
||||||
|
req := image.RenameRequest{
|
||||||
|
ImageID: uint64(d.Get("image_id").(int)),
|
||||||
|
Name: d.Get("name").(string),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := c.CloudAPI().Image().Rename(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceImageFromBlankComputeUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
|
log.Debugf("resourceImageFromBlankComputeUpdate: called for %s, id: %s", d.Get("name").(string), d.Id())
|
||||||
|
|
||||||
|
// we do not allow change of compute_id, but allow resource update after import
|
||||||
|
old, _ := d.GetChange("compute_id")
|
||||||
|
if old.(int) != 0 && d.HasChange("compute_id") {
|
||||||
|
return diag.Errorf("resourceImageFromBlankComputeUpdate: can't update Image because compute_id is not allowed to be changed")
|
||||||
|
}
|
||||||
|
|
||||||
|
image, err := utilityImageCheckPresence(ctx, d, m)
|
||||||
|
if err != nil {
|
||||||
|
d.SetId("")
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch image.Status {
|
||||||
|
case status.Modeled:
|
||||||
|
return diag.Errorf("The image is in status: %s, please, contact support for more information", image.Status)
|
||||||
|
case status.Creating:
|
||||||
|
case status.Created:
|
||||||
|
case status.Destroyed, status.Purged:
|
||||||
|
d.SetId("")
|
||||||
|
return diag.Errorf("The resource cannot be updated because it has been destroyed")
|
||||||
|
// return resourceImageCreate(ctx, d, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.HasChange("name") {
|
||||||
|
err := resourceImageFromBlankComputeRename(ctx, d, m)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return resourceImageFromBlankComputeRead(ctx, d, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ResourceImageFromBlankCompute() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
SchemaVersion: 1,
|
||||||
|
|
||||||
|
CreateContext: resourceImageFromBlankComputeCreate,
|
||||||
|
ReadContext: resourceImageFromBlankComputeRead,
|
||||||
|
UpdateContext: resourceImageFromBlankComputeUpdate,
|
||||||
|
DeleteContext: resourceImageFromBlankComputeDelete,
|
||||||
|
|
||||||
|
Importer: &schema.ResourceImporter{
|
||||||
|
StateContext: schema.ImportStatePassthroughContext,
|
||||||
|
},
|
||||||
|
|
||||||
|
Timeouts: &schema.ResourceTimeout{
|
||||||
|
Create: &constants.Timeout30m,
|
||||||
|
Read: &constants.Timeout900s,
|
||||||
|
Update: &constants.Timeout900s,
|
||||||
|
Delete: &constants.Timeout900s,
|
||||||
|
Default: &constants.Timeout900s,
|
||||||
|
},
|
||||||
|
|
||||||
|
Schema: resourceImageFromBlankComputeSchemaMake(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceImageFromBlankComputeSchemaMake() map[string]*schema.Schema {
|
||||||
|
return map[string]*schema.Schema{
|
||||||
|
"compute_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Required: true,
|
||||||
|
Description: "Compute Id",
|
||||||
|
},
|
||||||
|
"name": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
Description: "Name of the rescue disk",
|
||||||
|
},
|
||||||
|
"boot_type": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ValidateFunc: validation.StringInSlice([]string{"bios", "uefi"}, true),
|
||||||
|
Description: "Boot type of image BIOS or UEFI",
|
||||||
|
},
|
||||||
|
"type": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ValidateFunc: validation.StringInSlice([]string{"linux", "windows", "other"}, true),
|
||||||
|
Description: "Image type linux, windows or other",
|
||||||
|
},
|
||||||
|
|
||||||
|
"username": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Optional username for the image",
|
||||||
|
},
|
||||||
|
"password": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Optional password for the image",
|
||||||
|
},
|
||||||
|
"account_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
Description: "AccountId to make the image exclusive",
|
||||||
|
},
|
||||||
|
"sep_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
Description: "storage endpoint provider ID",
|
||||||
|
},
|
||||||
|
"pool_name": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
Description: "pool for image create",
|
||||||
|
},
|
||||||
|
"hot_resize": {
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Does this machine supports hot resize",
|
||||||
|
},
|
||||||
|
"async_mode": {
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Default: false,
|
||||||
|
Description: "create an image in async/sync mode",
|
||||||
|
},
|
||||||
|
"permanently": {
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Default: false,
|
||||||
|
Description: "whether to completely delete the image",
|
||||||
|
},
|
||||||
|
|
||||||
|
"image_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"unc_path": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"ckey": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"acl": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"architecture": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"bootable": {
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"compute_ci_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"cd_presented_to": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"deleted_time": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"desc": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"drivers": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"enabled": {
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"gid": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"guid": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"history": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"guid": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"timestamp": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"last_modified": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"link_to": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"milestones": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"image_name": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"network_interface_naming": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"provider_name": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"purge_attempts": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"present_to": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"res_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"rescuecd": {
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"shared_with": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"size": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"tech_status": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"version": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,506 @@
|
|||||||
|
/*
|
||||||
|
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
|
||||||
|
Authors:
|
||||||
|
Petr Krutov, <petr.krutov@digitalenergy.online>
|
||||||
|
Stanislav Solovev, <spsolovev@digitalenergy.online>
|
||||||
|
Kasim Baybikov, <kmbaybikov@basistech.ru>
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
|
||||||
|
Orchestration Technology) with Terraform by Hashicorp.
|
||||||
|
|
||||||
|
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
|
||||||
|
|
||||||
|
Please see README.md to learn where to place source code so that it
|
||||||
|
builds seamlessly.
|
||||||
|
|
||||||
|
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
|
||||||
|
*/
|
||||||
|
|
||||||
|
package image
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||||
|
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||||
|
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks"
|
||||||
|
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/image"
|
||||||
|
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/tasks"
|
||||||
|
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
|
||||||
|
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
|
||||||
|
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/statefuncs"
|
||||||
|
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceImageFromPlatformDiskCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
|
diskId := uint64(d.Get("disk_id").(int))
|
||||||
|
name := d.Get("name").(string)
|
||||||
|
|
||||||
|
log.Debugf("resourceImageFromPlatformDiskCreate: called for image %s", name)
|
||||||
|
|
||||||
|
err := existDiskID(ctx, diskId, m)
|
||||||
|
if err != nil {
|
||||||
|
return diag.Errorf("resourceImageFromPlatformDiskCreate: can't create Image: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := d.GetOk("account_id"); ok {
|
||||||
|
haveAccountID, err := existAccountID(ctx, d, m)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !haveAccountID {
|
||||||
|
return diag.Errorf("resourceImageFromPlatformDiskCreate: can't create Image because AccountID %d is not allowed or does not exist", d.Get("account_id").(int))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
c := m.(*controller.ControllerCfg)
|
||||||
|
req := disks.FromPlatformDiskRequest{
|
||||||
|
DiskID: diskId,
|
||||||
|
Name: name,
|
||||||
|
BootType: d.Get("boot_type").(string),
|
||||||
|
ImageType: d.Get("type").(string),
|
||||||
|
Architecture: d.Get("architecture").(string),
|
||||||
|
Bootable: d.Get("bootable").(bool), // default value - true
|
||||||
|
}
|
||||||
|
|
||||||
|
if username, ok := d.GetOk("username"); ok {
|
||||||
|
req.Username = username.(string)
|
||||||
|
}
|
||||||
|
if password, ok := d.GetOk("password"); ok {
|
||||||
|
req.Password = password.(string)
|
||||||
|
}
|
||||||
|
if accountId, ok := d.GetOk("account_id"); ok {
|
||||||
|
req.AccountID = uint64(accountId.(int))
|
||||||
|
}
|
||||||
|
if sepId, ok := d.GetOk("sep_id"); ok {
|
||||||
|
req.SepID = uint64(sepId.(int))
|
||||||
|
}
|
||||||
|
if poolName, ok := d.GetOk("pool_name"); ok {
|
||||||
|
req.PoolName = poolName.(string)
|
||||||
|
}
|
||||||
|
if driversInterface, ok := d.GetOk("drivers"); ok {
|
||||||
|
for _, d := range driversInterface.([]interface{}) {
|
||||||
|
req.Drivers = append(req.Drivers, d.(string))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if hotresize, ok := d.GetOk("hot_resize"); ok {
|
||||||
|
req.HotResize = hotresize.(bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
var imageId uint64
|
||||||
|
asyncMode := d.Get("async_mode").(bool)
|
||||||
|
if !asyncMode {
|
||||||
|
imageId, err = c.CloudAPI().Disks().FromPlatformDisk(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
d.SetId("")
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
taskId, err := c.CloudAPI().Disks().FromPlatformDiskAsync(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
d.SetId("")
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
taskReq := tasks.GetRequest{
|
||||||
|
AuditID: strings.Trim(taskId, `"`),
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
task, err := c.CloudAPI().Tasks().Get(ctx, taskReq)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("resourceImageFromPlatformDiskCreate: instance creating - %s", task.Stage)
|
||||||
|
|
||||||
|
if task.Completed {
|
||||||
|
if task.Error != "" {
|
||||||
|
return diag.FromErr(fmt.Errorf("cannot create image instance: %v", task.Error))
|
||||||
|
}
|
||||||
|
|
||||||
|
id, err := task.Result.ID()
|
||||||
|
imageId = uint64(id)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Second * 20)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId(strconv.FormatUint(imageId, 10))
|
||||||
|
d.Set("image_id", imageId)
|
||||||
|
|
||||||
|
_, err = utilityImageCheckPresence(ctx, d, m)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return resourceImageFromPlatformDiskRead(ctx, d, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceImageFromPlatformDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
|
log.Debugf("resourceImageFromPlatformDiskRead: called for %s id: %s", d.Get("name").(string), d.Id())
|
||||||
|
|
||||||
|
img, err := utilityImageCheckPresence(ctx, d, m)
|
||||||
|
if img == nil {
|
||||||
|
d.SetId("")
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch img.Status {
|
||||||
|
case status.Modeled:
|
||||||
|
return diag.Errorf("The image is in status: %s, please, contact support for more information", img.Status)
|
||||||
|
case status.Creating:
|
||||||
|
case status.Created:
|
||||||
|
case status.Destroyed, status.Purged:
|
||||||
|
d.SetId("")
|
||||||
|
return diag.Errorf("The resource cannot be updated because it has been destroyed")
|
||||||
|
}
|
||||||
|
|
||||||
|
flattenImage(d, img)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceImageFromPlatformDiskDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
|
log.Debugf("resourceImageFromPlatformDiskDelete: called for %s, id: %s", d.Get("name").(string), d.Id())
|
||||||
|
|
||||||
|
_, err := utilityImageCheckPresence(ctx, d, m)
|
||||||
|
if err != nil {
|
||||||
|
d.SetId("")
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c := m.(*controller.ControllerCfg)
|
||||||
|
req := image.DeleteRequest{
|
||||||
|
ImageID: uint64(d.Get("image_id").(int)),
|
||||||
|
}
|
||||||
|
|
||||||
|
if permanently, ok := d.GetOk("permanently"); ok {
|
||||||
|
req.Permanently = permanently.(bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = c.CloudAPI().Image().Delete(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId("")
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceImageFromPlatformDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
|
log.Debugf("resourceImageFromPlatformDiskUpdate: called for %s, id: %s", d.Get("name").(string), d.Id())
|
||||||
|
|
||||||
|
// we do not allow change of disk_id, but allow resource update after import
|
||||||
|
old, _ := d.GetChange("disk_id")
|
||||||
|
if old.(int) != 0 && d.HasChange("disk_id") {
|
||||||
|
return diag.Errorf("resourceImageFromPlatformDiskUpdate: can't update Image because disk_id is not allowed to be changed")
|
||||||
|
}
|
||||||
|
|
||||||
|
image, err := utilityImageCheckPresence(ctx, d, m)
|
||||||
|
if err != nil {
|
||||||
|
d.SetId("")
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch image.Status {
|
||||||
|
case status.Modeled:
|
||||||
|
return diag.Errorf("The image is in status: %s, please, contact support for more information", image.Status)
|
||||||
|
case status.Creating:
|
||||||
|
case status.Created:
|
||||||
|
case status.Destroyed, status.Purged:
|
||||||
|
d.SetId("")
|
||||||
|
return diag.Errorf("The resource cannot be updated because it has been destroyed")
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.HasChange("name") {
|
||||||
|
err := resourceImageRename(ctx, d, m)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return resourceImageFromPlatformDiskRead(ctx, d, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ResourceImageFromPlatformDisk() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
SchemaVersion: 1,
|
||||||
|
|
||||||
|
CreateContext: resourceImageFromPlatformDiskCreate,
|
||||||
|
ReadContext: resourceImageFromPlatformDiskRead,
|
||||||
|
UpdateContext: resourceImageFromPlatformDiskUpdate,
|
||||||
|
DeleteContext: resourceImageFromPlatformDiskDelete,
|
||||||
|
|
||||||
|
Importer: &schema.ResourceImporter{
|
||||||
|
StateContext: schema.ImportStatePassthroughContext,
|
||||||
|
},
|
||||||
|
|
||||||
|
Timeouts: &schema.ResourceTimeout{
|
||||||
|
Create: &constants.Timeout30m,
|
||||||
|
Read: &constants.Timeout900s,
|
||||||
|
Update: &constants.Timeout900s,
|
||||||
|
Delete: &constants.Timeout900s,
|
||||||
|
Default: &constants.Timeout900s,
|
||||||
|
},
|
||||||
|
|
||||||
|
Schema: resourceImageFromPlatformDiskSchemaMake(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceImageFromPlatformDiskSchemaMake() map[string]*schema.Schema {
|
||||||
|
return map[string]*schema.Schema{
|
||||||
|
"disk_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Required: true,
|
||||||
|
Description: "Disk Id",
|
||||||
|
},
|
||||||
|
"name": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
Description: "Name of the rescue disk",
|
||||||
|
},
|
||||||
|
"boot_type": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ValidateFunc: validation.StringInSlice([]string{"bios", "uefi"}, true),
|
||||||
|
Description: "Boot type of image BIOS or UEFI",
|
||||||
|
},
|
||||||
|
"type": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ValidateFunc: validation.StringInSlice([]string{"linux", "windows", "other"}, true),
|
||||||
|
Description: "Image type linux, windows or other",
|
||||||
|
},
|
||||||
|
"architecture": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ValidateFunc: validation.StringInSlice([]string{"X86_64", "PPC64_LE"}, true),
|
||||||
|
Description: "binary architecture of this image, one of X86_64 of PPC64_LE",
|
||||||
|
},
|
||||||
|
|
||||||
|
"username": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Optional username for the image",
|
||||||
|
},
|
||||||
|
"password": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Optional password for the image",
|
||||||
|
},
|
||||||
|
"account_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
Description: "AccountId to make the image exclusive",
|
||||||
|
},
|
||||||
|
"sep_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
Description: "storage endpoint provider ID",
|
||||||
|
},
|
||||||
|
"pool_name": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
Description: "pool for image create",
|
||||||
|
},
|
||||||
|
"drivers": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Schema{
|
||||||
|
StateFunc: statefuncs.StateFuncToUpper,
|
||||||
|
ValidateFunc: validation.StringInSlice([]string{"SVA_KVM_X86", "KVM_X86", "KVM_PPC"}, false), // observe case while validating
|
||||||
|
Type: schema.TypeString,
|
||||||
|
},
|
||||||
|
Description: "List of types of compute suitable for image. Example: [ \"KVM_X86\" ]",
|
||||||
|
},
|
||||||
|
"bootable": {
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Default: true,
|
||||||
|
Description: "bootable image",
|
||||||
|
},
|
||||||
|
"hot_resize": {
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
Description: "Does this machine supports hot resize",
|
||||||
|
},
|
||||||
|
"async_mode": {
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Default: false,
|
||||||
|
Description: "create an image in async/sync mode",
|
||||||
|
},
|
||||||
|
"permanently": {
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Default: false,
|
||||||
|
Description: "whether to completely delete the image",
|
||||||
|
},
|
||||||
|
|
||||||
|
"image_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"unc_path": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"ckey": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"acl": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"compute_ci_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"cd_presented_to": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"deleted_time": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"desc": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"enabled": {
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"gid": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"guid": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"history": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"guid": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"timestamp": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"last_modified": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"link_to": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"milestones": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"image_name": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"network_interface_naming": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"provider_name": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"purge_attempts": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"present_to": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"res_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"rescuecd": {
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"shared_with": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"size": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"tech_status": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"version": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -94,6 +94,10 @@ func utilityImageListCheckPresence(ctx context.Context, d *schema.ResourceData,
|
|||||||
req.Bootable = bootable.(bool)
|
req.Bootable = bootable.(bool)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||||
|
req.SortBy = sortBy.(string)
|
||||||
|
}
|
||||||
|
|
||||||
if page, ok := d.GetOk("page"); ok {
|
if page, ok := d.GetOk("page"); ok {
|
||||||
req.Page = uint64(page.(int))
|
req.Page = uint64(page.(int))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -94,6 +94,11 @@ func dataSourceK8CIListSchemaMake() map[string]*schema.Schema {
|
|||||||
Default: false,
|
Default: false,
|
||||||
Description: "Include deleted k8cis in result",
|
Description: "Include deleted k8cis in result",
|
||||||
},
|
},
|
||||||
|
"sort_by": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "sort by one of supported fields, format +|-(field)",
|
||||||
|
},
|
||||||
"page": {
|
"page": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
|||||||
@@ -179,6 +179,11 @@ func createK8sListSchema() map[string]*schema.Schema {
|
|||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
"sort_by": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "sort by one of supported fields, format +|-(field)",
|
||||||
|
},
|
||||||
"page": {
|
"page": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
|||||||
@@ -75,8 +75,8 @@ func mastersSchemaMake() map[string]*schema.Schema {
|
|||||||
masters["num"] = &schema.Schema{
|
masters["num"] = &schema.Schema{
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Required: true,
|
Required: true,
|
||||||
ValidateFunc: validation.IntInSlice([]int{1, 3}),
|
ValidateFunc: validation.IntInSlice([]int{1, 3, 5}),
|
||||||
Description: "Number of nodes to create. Can be either 1 or 3",
|
Description: "Number of nodes to create. Can be either 1, 3 or 5",
|
||||||
}
|
}
|
||||||
masters["sep_id"] = &schema.Schema{
|
masters["sep_id"] = &schema.Schema{
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
@@ -97,8 +97,8 @@ func mastersSchemaMake() map[string]*schema.Schema {
|
|||||||
Required: true,
|
Required: true,
|
||||||
//ForceNew: true,
|
//ForceNew: true,
|
||||||
ValidateFunc: validation.All(
|
ValidateFunc: validation.All(
|
||||||
validation.IntAtLeast(constants.MinRamPerCompute),
|
validation.IntAtLeast(constants.MIN_RAM_PER_COMPUTE),
|
||||||
validators.DivisibleBy(constants.RAMDivisibility),
|
validators.DivisibleBy(constants.RAM_DIVISIBILITY),
|
||||||
),
|
),
|
||||||
Description: "Node RAM in MB.",
|
Description: "Node RAM in MB.",
|
||||||
}
|
}
|
||||||
@@ -125,8 +125,8 @@ func workersSchemaMake() map[string]*schema.Schema {
|
|||||||
"ram": {
|
"ram": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
ValidateFunc: validation.All(
|
ValidateFunc: validation.All(
|
||||||
validation.IntAtLeast(constants.MinRamPerCompute),
|
validation.IntAtLeast(constants.MIN_RAM_PER_COMPUTE),
|
||||||
validators.DivisibleBy(constants.RAMDivisibility),
|
validators.DivisibleBy(constants.RAM_DIVISIBILITY),
|
||||||
),
|
),
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -101,7 +101,7 @@ func resourceK8sCreate(ctx context.Context, d *schema.ResourceData, m interface{
|
|||||||
}
|
}
|
||||||
createReq.MasterNum = uint(masterNode.Num)
|
createReq.MasterNum = uint(masterNode.Num)
|
||||||
createReq.MasterCPU = uint(masterNode.Cpu)
|
createReq.MasterCPU = uint(masterNode.Cpu)
|
||||||
createReq.MasterRAM = uint(masterNode.Ram)
|
createReq.MasterRAM = uint64(masterNode.Ram)
|
||||||
createReq.MasterDisk = uint(masterNode.Disk)
|
createReq.MasterDisk = uint(masterNode.Disk)
|
||||||
createReq.MasterSEPID = uint64(masterNode.SepID)
|
createReq.MasterSEPID = uint64(masterNode.SepID)
|
||||||
createReq.MasterSEPPool = masterNode.SepPool
|
createReq.MasterSEPPool = masterNode.SepPool
|
||||||
@@ -115,7 +115,7 @@ func resourceK8sCreate(ctx context.Context, d *schema.ResourceData, m interface{
|
|||||||
|
|
||||||
createReq.WorkerNum = uint(workerNode.Num)
|
createReq.WorkerNum = uint(workerNode.Num)
|
||||||
createReq.WorkerCPU = uint(workerNode.Cpu)
|
createReq.WorkerCPU = uint(workerNode.Cpu)
|
||||||
createReq.WorkerRAM = uint(workerNode.Ram)
|
createReq.WorkerRAM = uint64(workerNode.Ram)
|
||||||
createReq.WorkerDisk = uint(workerNode.Disk)
|
createReq.WorkerDisk = uint(workerNode.Disk)
|
||||||
createReq.WorkerSEPID = uint64(workerNode.SepID)
|
createReq.WorkerSEPID = uint64(workerNode.SepID)
|
||||||
createReq.WorkerSEPPool = workerNode.SepPool
|
createReq.WorkerSEPPool = workerNode.SepPool
|
||||||
@@ -183,12 +183,26 @@ func resourceK8sCreate(ctx context.Context, d *schema.ResourceData, m interface{
|
|||||||
}
|
}
|
||||||
|
|
||||||
if lbSysctlParams, ok := d.GetOk("lb_sysctl_params"); ok {
|
if lbSysctlParams, ok := d.GetOk("lb_sysctl_params"); ok {
|
||||||
createReq.LbSysctlParams = lbSysctlParams.(string)
|
syscrlSliceMaps := lbSysctlParams.([]map[string]string)
|
||||||
|
res := make([]map[string]interface{}, 0, len(syscrlSliceMaps))
|
||||||
|
for _, syscrlMap := range syscrlSliceMaps {
|
||||||
|
tempMap := make(map[string]interface{})
|
||||||
|
for k, v := range syscrlMap {
|
||||||
|
if intVal, err := strconv.Atoi(v); err == nil {
|
||||||
|
tempMap[k] = intVal
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
tempMap[k] = v
|
||||||
|
}
|
||||||
|
res = append(res, tempMap)
|
||||||
|
}
|
||||||
|
createReq.LbSysctlParams = res
|
||||||
}
|
}
|
||||||
|
|
||||||
if oidcCertificate, ok := d.GetOk("oidc_cert"); ok {
|
if oidcCertificate, ok := d.GetOk("oidc_cert"); ok {
|
||||||
createReq.OidcCertificate = oidcCertificate.(string)
|
createReq.OidcCertificate = oidcCertificate.(string)
|
||||||
}
|
}
|
||||||
|
///
|
||||||
|
|
||||||
createReq.ExtNetOnly = d.Get("extnet_only").(bool)
|
createReq.ExtNetOnly = d.Get("extnet_only").(bool)
|
||||||
|
|
||||||
@@ -231,7 +245,11 @@ func resourceK8sCreate(ctx context.Context, d *schema.ResourceData, m interface{
|
|||||||
return diag.FromErr(fmt.Errorf("cannot create k8s instance: %v", task.Error))
|
return diag.FromErr(fmt.Errorf("cannot create k8s instance: %v", task.Error))
|
||||||
}
|
}
|
||||||
|
|
||||||
d.SetId(strconv.Itoa(int(task.Result)))
|
id, err := task.Result.ID()
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
d.SetId(strconv.Itoa(id))
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -515,6 +533,31 @@ func resourceK8sUpdate(ctx context.Context, d *schema.ResourceData, m interface{
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if d.HasChange("lb_sysctl_params") && d.Get("with_lb").(bool) {
|
||||||
|
lbSysctlParams := d.Get("lb_sysctl_params").([]map[string]string)
|
||||||
|
res := make([]map[string]interface{}, 0, len(lbSysctlParams))
|
||||||
|
for _, syscrlMap := range lbSysctlParams {
|
||||||
|
tempMap := make(map[string]interface{})
|
||||||
|
for k, v := range syscrlMap {
|
||||||
|
if intVal, err := strconv.Atoi(v); err == nil {
|
||||||
|
tempMap[k] = intVal
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
tempMap[k] = v
|
||||||
|
}
|
||||||
|
res = append(res, tempMap)
|
||||||
|
}
|
||||||
|
|
||||||
|
req := lb.UpdateSysctParamsRequest{
|
||||||
|
LBID: cluster.LBID,
|
||||||
|
SysctlParams: res,
|
||||||
|
}
|
||||||
|
_, err := c.CloudAPI().LB().UpdateSysctlParams(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return resourceK8sRead(ctx, d, m)
|
return resourceK8sRead(ctx, d, m)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -685,15 +728,22 @@ func resourceK8sSchemaMake() map[string]*schema.Schema {
|
|||||||
Description: "Use Highly Available schema for LB deploy",
|
Description: "Use Highly Available schema for LB deploy",
|
||||||
},
|
},
|
||||||
"lb_sysctl_params": {
|
"lb_sysctl_params": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeList,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "Custom sysctl values for Load Balancer instance. Applied on boot.",
|
Description: "Custom sysctl values for Load Balancer instance. Applied on boot.",
|
||||||
|
Elem: &schema.Schema{
|
||||||
|
Type: schema.TypeMap,
|
||||||
|
Elem: &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
"oidc_cert": {
|
"oidc_cert": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "insert ssl certificate in x509 pem format",
|
Description: "insert ssl certificate in x509 pem format",
|
||||||
},
|
},
|
||||||
|
|
||||||
"desc": {
|
"desc": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
@@ -705,6 +755,7 @@ func resourceK8sSchemaMake() map[string]*schema.Schema {
|
|||||||
Default: true,
|
Default: true,
|
||||||
Description: "Start k8s cluster",
|
Description: "Start k8s cluster",
|
||||||
},
|
},
|
||||||
|
|
||||||
"acl": {
|
"acl": {
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeList,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
|||||||
@@ -109,7 +109,7 @@ func resourceK8sCPCreate(ctx context.Context, d *schema.ResourceData, m interfac
|
|||||||
}
|
}
|
||||||
|
|
||||||
if ram, ok := d.GetOk("ram"); ok {
|
if ram, ok := d.GetOk("ram"); ok {
|
||||||
createReq.MasterRAM = uint(ram.(int))
|
createReq.MasterRAM = uint64(ram.(int))
|
||||||
} else {
|
} else {
|
||||||
createReq.MasterRAM = 2048
|
createReq.MasterRAM = 2048
|
||||||
}
|
}
|
||||||
@@ -168,7 +168,20 @@ func resourceK8sCPCreate(ctx context.Context, d *schema.ResourceData, m interfac
|
|||||||
}
|
}
|
||||||
|
|
||||||
if lbSysctlParams, ok := d.GetOk("lb_sysctl_params"); ok {
|
if lbSysctlParams, ok := d.GetOk("lb_sysctl_params"); ok {
|
||||||
createReq.LbSysctlParams = lbSysctlParams.(string)
|
syscrlSliceMaps := lbSysctlParams.([]map[string]string)
|
||||||
|
res := make([]map[string]interface{}, 0, len(syscrlSliceMaps))
|
||||||
|
for _, syscrlMap := range syscrlSliceMaps {
|
||||||
|
tempMap := make(map[string]interface{})
|
||||||
|
for k, v := range syscrlMap {
|
||||||
|
if intVal, err := strconv.Atoi(v); err == nil {
|
||||||
|
tempMap[k] = intVal
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
tempMap[k] = v
|
||||||
|
}
|
||||||
|
res = append(res, tempMap)
|
||||||
|
}
|
||||||
|
createReq.LbSysctlParams = res
|
||||||
}
|
}
|
||||||
|
|
||||||
if oidcCertificate, ok := d.GetOk("oidc_cert"); ok {
|
if oidcCertificate, ok := d.GetOk("oidc_cert"); ok {
|
||||||
@@ -176,6 +189,8 @@ func resourceK8sCPCreate(ctx context.Context, d *schema.ResourceData, m interfac
|
|||||||
log.Debug(createReq.OidcCertificate)
|
log.Debug(createReq.OidcCertificate)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
///
|
||||||
|
|
||||||
createReq.ExtNetOnly = d.Get("extnet_only").(bool)
|
createReq.ExtNetOnly = d.Get("extnet_only").(bool)
|
||||||
|
|
||||||
if extNet, ok := d.GetOk("extnet_id"); ok {
|
if extNet, ok := d.GetOk("extnet_id"); ok {
|
||||||
@@ -217,7 +232,11 @@ func resourceK8sCPCreate(ctx context.Context, d *schema.ResourceData, m interfac
|
|||||||
return diag.FromErr(fmt.Errorf("cannot create k8s instance: %v", task.Error))
|
return diag.FromErr(fmt.Errorf("cannot create k8s instance: %v", task.Error))
|
||||||
}
|
}
|
||||||
|
|
||||||
d.SetId(strconv.Itoa(int(task.Result)))
|
id, err := task.Result.ID()
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
d.SetId(strconv.Itoa(id))
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -532,6 +551,31 @@ func resourceK8sCPUpdate(ctx context.Context, d *schema.ResourceData, m interfac
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if d.HasChange("lb_sysctl_params") && d.Get("with_lb").(bool) {
|
||||||
|
lbSysctlParams := d.Get("lb_sysctl_params").([]map[string]string)
|
||||||
|
res := make([]map[string]interface{}, 0, len(lbSysctlParams))
|
||||||
|
for _, syscrlMap := range lbSysctlParams {
|
||||||
|
tempMap := make(map[string]interface{})
|
||||||
|
for k, v := range syscrlMap {
|
||||||
|
if intVal, err := strconv.Atoi(v); err == nil {
|
||||||
|
tempMap[k] = intVal
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
tempMap[k] = v
|
||||||
|
}
|
||||||
|
res = append(res, tempMap)
|
||||||
|
}
|
||||||
|
|
||||||
|
req := lb.UpdateSysctParamsRequest{
|
||||||
|
LBID: cluster.LBID,
|
||||||
|
SysctlParams: res,
|
||||||
|
}
|
||||||
|
_, err := c.CloudAPI().LB().UpdateSysctlParams(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return resourceK8sCPRead(ctx, d, m)
|
return resourceK8sCPRead(ctx, d, m)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -592,8 +636,8 @@ func resourceK8sCPSchemaMake() map[string]*schema.Schema {
|
|||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
ValidateFunc: validation.IntInSlice([]int{1, 3}),
|
ValidateFunc: validation.IntInSlice([]int{1, 3, 5}),
|
||||||
Description: "Number of VMs to create. Can be either 1 or 3",
|
Description: "Number of VMs to create. Can be either 1, 3 or 5",
|
||||||
},
|
},
|
||||||
"cpu": {
|
"cpu": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
@@ -606,8 +650,8 @@ func resourceK8sCPSchemaMake() map[string]*schema.Schema {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
ValidateFunc: validation.All(
|
ValidateFunc: validation.All(
|
||||||
validation.IntAtLeast(constants.MinRamPerCompute),
|
validation.IntAtLeast(constants.MIN_RAM_PER_COMPUTE),
|
||||||
validators.DivisibleBy(constants.RAMDivisibility),
|
validators.DivisibleBy(constants.RAM_DIVISIBILITY),
|
||||||
),
|
),
|
||||||
Description: "Node RAM in MB.",
|
Description: "Node RAM in MB.",
|
||||||
},
|
},
|
||||||
@@ -680,9 +724,15 @@ func resourceK8sCPSchemaMake() map[string]*schema.Schema {
|
|||||||
Description: "Use Highly Available schema for LB deploy",
|
Description: "Use Highly Available schema for LB deploy",
|
||||||
},
|
},
|
||||||
"lb_sysctl_params": {
|
"lb_sysctl_params": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeList,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "Custom sysctl values for Load Balancer instance. Applied on boot.",
|
Description: "Custom sysctl values for Load Balancer instance. Applied on boot.",
|
||||||
|
Elem: &schema.Schema{
|
||||||
|
Type: schema.TypeMap,
|
||||||
|
Elem: &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
"oidc_cert": {
|
"oidc_cert": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
@@ -724,6 +774,12 @@ func resourceK8sCPSchemaMake() map[string]*schema.Schema {
|
|||||||
Computed: true,
|
Computed: true,
|
||||||
Description: "Master group name.",
|
Description: "Master group name.",
|
||||||
},
|
},
|
||||||
|
"permanently": {
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Default: false,
|
||||||
|
Description: "Determines if cluster should be destroyed",
|
||||||
|
},
|
||||||
"acl": {
|
"acl": {
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeList,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
@@ -776,12 +832,6 @@ func resourceK8sCPSchemaMake() map[string]*schema.Schema {
|
|||||||
Computed: true,
|
Computed: true,
|
||||||
Description: "IP address of default load balancer.",
|
Description: "IP address of default load balancer.",
|
||||||
},
|
},
|
||||||
"permanently": {
|
|
||||||
Type: schema.TypeBool,
|
|
||||||
Optional: true,
|
|
||||||
Default: false,
|
|
||||||
Description: "Determines if cluster should be destroyed",
|
|
||||||
},
|
|
||||||
"rg_name": {
|
"rg_name": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
|||||||
@@ -267,8 +267,8 @@ func resourceK8sWgSchemaMake() map[string]*schema.Schema {
|
|||||||
//ForceNew: true,
|
//ForceNew: true,
|
||||||
Default: 1024,
|
Default: 1024,
|
||||||
ValidateFunc: validation.All(
|
ValidateFunc: validation.All(
|
||||||
validation.IntAtLeast(constants.MinRamPerCompute),
|
validation.IntAtLeast(constants.MIN_RAM_PER_COMPUTE),
|
||||||
validators.DivisibleBy(constants.RAMDivisibility),
|
validators.DivisibleBy(constants.RAM_DIVISIBILITY),
|
||||||
),
|
),
|
||||||
Description: "Worker node RAM in MB.",
|
Description: "Worker node RAM in MB.",
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -67,6 +67,9 @@ func utilityK8CIListCheckPresence(ctx context.Context, d *schema.ResourceData, m
|
|||||||
if include_disabled, ok := d.GetOk("include_disabled"); ok {
|
if include_disabled, ok := d.GetOk("include_disabled"); ok {
|
||||||
req.IncludeDisabled = include_disabled.(bool)
|
req.IncludeDisabled = include_disabled.(bool)
|
||||||
}
|
}
|
||||||
|
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||||
|
req.SortBy = sortBy.(string)
|
||||||
|
}
|
||||||
if page, ok := d.GetOk("page"); ok {
|
if page, ok := d.GetOk("page"); ok {
|
||||||
req.Page = uint64(page.(int))
|
req.Page = uint64(page.(int))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -265,6 +265,10 @@ func utilityK8sListCheckPresence(ctx context.Context, d *schema.ResourceData, m
|
|||||||
req.IncludeDeleted = includedeleted.(bool)
|
req.IncludeDeleted = includedeleted.(bool)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||||
|
req.SortBy = sortBy.(string)
|
||||||
|
}
|
||||||
|
|
||||||
if page, ok := d.GetOk("page"); ok {
|
if page, ok := d.GetOk("page"); ok {
|
||||||
req.Page = uint64(page.(int))
|
req.Page = uint64(page.(int))
|
||||||
}
|
}
|
||||||
@@ -329,6 +333,10 @@ func utilityK8sListDeletedCheckPresence(ctx context.Context, d *schema.ResourceD
|
|||||||
req.TechStatus = tech_status.(string)
|
req.TechStatus = tech_status.(string)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||||
|
req.SortBy = sortBy.(string)
|
||||||
|
}
|
||||||
|
|
||||||
if page, ok := d.GetOk("page"); ok {
|
if page, ok := d.GetOk("page"); ok {
|
||||||
req.Page = uint64(page.(int))
|
req.Page = uint64(page.(int))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -360,6 +360,39 @@ func computeListDisksSchemaMake() map[string]*schema.Schema {
|
|||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
"replication": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"disk_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"pool_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"role": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"self_volume_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"storage_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"volume_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Description: "Replication status",
|
||||||
|
},
|
||||||
"reality_device_number": {
|
"reality_device_number": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
@@ -489,6 +522,10 @@ func computeInterfacesSchemaMake() map[string]*schema.Schema {
|
|||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
"node_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
"pci_slot": {
|
"pci_slot": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
@@ -615,7 +652,11 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
|
|||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"boot_disk_size": {
|
"bootdisk_size": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"cd_image_id": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
@@ -634,6 +675,10 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
|
|||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
"cpu_pin": {
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
"cpus": {
|
"cpus": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
@@ -685,6 +730,10 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
|
|||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
"hp_backed": {
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
"image_id": {
|
"image_id": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
@@ -720,10 +769,18 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
|
|||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
"need_reboot" : {
|
"need_reboot": {
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
"numa_affinity": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"numa_node_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
"natable_vins_id": {
|
"natable_vins_id": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
@@ -771,6 +828,13 @@ func dataSourceComputeSchemaMake() map[string]*schema.Schema {
|
|||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
"reserved_node_cpus": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
},
|
||||||
|
},
|
||||||
"rg_id": {
|
"rg_id": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
|||||||
@@ -124,7 +124,11 @@ func itemComputeSchemaMake() map[string]*schema.Schema {
|
|||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"boot_disk_size": {
|
"bootdisk_size": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"cd_image_id": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
@@ -143,6 +147,10 @@ func itemComputeSchemaMake() map[string]*schema.Schema {
|
|||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
"cpu_pin": {
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
"cpus": {
|
"cpus": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
@@ -194,6 +202,10 @@ func itemComputeSchemaMake() map[string]*schema.Schema {
|
|||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
"hp_backed": {
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
"compute_id": {
|
"compute_id": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
@@ -237,6 +249,14 @@ func itemComputeSchemaMake() map[string]*schema.Schema {
|
|||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
"numa_affinity": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"numa_node_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
"pinned": {
|
"pinned": {
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
@@ -257,6 +277,13 @@ func itemComputeSchemaMake() map[string]*schema.Schema {
|
|||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
"reserved_node_cpus": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
},
|
||||||
|
},
|
||||||
"rg_id": {
|
"rg_id": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
@@ -394,6 +421,11 @@ func dataSourceComputeListSchemaMake() map[string]*schema.Schema {
|
|||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
"sort_by": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "sort by one of supported fields, format +|-(field)",
|
||||||
|
},
|
||||||
"page": {
|
"page": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
|||||||
@@ -109,6 +109,11 @@ func dataSourceComputeListDeletedSchemaMake() map[string]*schema.Schema {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "Find by Extnet ID",
|
Description: "Find by Extnet ID",
|
||||||
},
|
},
|
||||||
|
"sort_by": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "sort by one of supported fields, format +|-(field)",
|
||||||
|
},
|
||||||
"page": {
|
"page": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
|||||||
@@ -80,6 +80,11 @@ func dataSourceComputePCIDeviceListSchemaMake() map[string]*schema.Schema {
|
|||||||
Computed: true,
|
Computed: true,
|
||||||
Description: "Find by status",
|
Description: "Find by status",
|
||||||
},
|
},
|
||||||
|
"sort_by": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "sort by one of supported fields, format +|-(field)",
|
||||||
|
},
|
||||||
"page": {
|
"page": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
@@ -93,8 +98,49 @@ func dataSourceComputePCIDeviceListSchemaMake() map[string]*schema.Schema {
|
|||||||
"items": {
|
"items": {
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeList,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
Elem: &schema.Schema{
|
Elem: &schema.Resource{
|
||||||
Type: schema.TypeString,
|
Schema: map[string]*schema.Schema{
|
||||||
|
"compute_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"description": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"guid": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"hwpath": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"device_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"name": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"rg_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"stack_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"system_name": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"entry_count": {
|
"entry_count": {
|
||||||
|
|||||||
@@ -80,6 +80,11 @@ func dataSourceComputeVGPUListSchemaMake() map[string]*schema.Schema {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "Include deleted computes. If using field 'status', then includedeleted will be ignored",
|
Description: "Include deleted computes. If using field 'status', then includedeleted will be ignored",
|
||||||
},
|
},
|
||||||
|
"sort_by": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "sort by one of supported fields, format +|-(field)",
|
||||||
|
},
|
||||||
"page": {
|
"page": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
@@ -93,8 +98,81 @@ func dataSourceComputeVGPUListSchemaMake() map[string]*schema.Schema {
|
|||||||
"items": {
|
"items": {
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeList,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
Elem: &schema.Schema{
|
Elem: &schema.Resource{
|
||||||
Type: schema.TypeString,
|
Schema: map[string]*schema.Schema{
|
||||||
|
"account_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"created_time": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"deleted_time": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"gid": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"guid": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"vgpu_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"last_claimed_by": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"last_update_time": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"mode": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"pci_slot": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"pgpuid": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"profile_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"ram": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"reference_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"rg_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"type": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"vm_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"entry_count": {
|
"entry_count": {
|
||||||
|
|||||||
@@ -33,7 +33,6 @@ Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/w
|
|||||||
package kvmvm
|
package kvmvm
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
@@ -83,6 +82,7 @@ func flattenInterfaces(interfaces compute.ListInterfaces) []map[string]interface
|
|||||||
"net_id": interfaceItem.NetID,
|
"net_id": interfaceItem.NetID,
|
||||||
"netmask": interfaceItem.NetMask,
|
"netmask": interfaceItem.NetMask,
|
||||||
"net_type": interfaceItem.NetType,
|
"net_type": interfaceItem.NetType,
|
||||||
|
"node_id": interfaceItem.NodeID,
|
||||||
"pci_slot": interfaceItem.PCISlot,
|
"pci_slot": interfaceItem.PCISlot,
|
||||||
"qos": flattenQOS(interfaceItem.QOS),
|
"qos": flattenQOS(interfaceItem.QOS),
|
||||||
"target": interfaceItem.Target,
|
"target": interfaceItem.Target,
|
||||||
@@ -164,10 +164,12 @@ func flattenComputeList(computes *compute.ListComputes) []map[string]interface{}
|
|||||||
"anti_affinity_rules": flattenListRules(compute.AntiAffinityRules),
|
"anti_affinity_rules": flattenListRules(compute.AntiAffinityRules),
|
||||||
"arch": compute.Architecture,
|
"arch": compute.Architecture,
|
||||||
"boot_order": compute.BootOrder,
|
"boot_order": compute.BootOrder,
|
||||||
"boot_disk_size": compute.BootDiskSize,
|
"bootdisk_size": compute.BootDiskSize,
|
||||||
|
"cd_image_id": compute.CdImageId,
|
||||||
"clone_reference": compute.CloneReference,
|
"clone_reference": compute.CloneReference,
|
||||||
"clones": compute.Clones,
|
"clones": compute.Clones,
|
||||||
"computeci_id": compute.ComputeCIID,
|
"computeci_id": compute.ComputeCIID,
|
||||||
|
"cpu_pin": compute.CPUPin,
|
||||||
"cpus": compute.CPU,
|
"cpus": compute.CPU,
|
||||||
"created_by": compute.CreatedBy,
|
"created_by": compute.CreatedBy,
|
||||||
"created_time": compute.CreatedTime,
|
"created_time": compute.CreatedTime,
|
||||||
@@ -180,6 +182,7 @@ func flattenComputeList(computes *compute.ListComputes) []map[string]interface{}
|
|||||||
"driver": compute.Driver,
|
"driver": compute.Driver,
|
||||||
"gid": compute.GID,
|
"gid": compute.GID,
|
||||||
"guid": compute.GUID,
|
"guid": compute.GUID,
|
||||||
|
"hp_backed": compute.HPBacked,
|
||||||
"compute_id": compute.ID,
|
"compute_id": compute.ID,
|
||||||
"image_id": compute.ImageID,
|
"image_id": compute.ImageID,
|
||||||
"interfaces": flattenInterfaces(compute.Interfaces),
|
"interfaces": flattenInterfaces(compute.Interfaces),
|
||||||
@@ -190,11 +193,14 @@ func flattenComputeList(computes *compute.ListComputes) []map[string]interface{}
|
|||||||
"milestones": compute.Milestones,
|
"milestones": compute.Milestones,
|
||||||
"name": compute.Name,
|
"name": compute.Name,
|
||||||
"need_reboot": compute.NeedReboot,
|
"need_reboot": compute.NeedReboot,
|
||||||
|
"numa_affinity": compute.NumaAffinity,
|
||||||
|
"numa_node_id": compute.NumaNodeId,
|
||||||
"pinned": compute.Pinned,
|
"pinned": compute.Pinned,
|
||||||
"ram": compute.RAM,
|
"ram": compute.RAM,
|
||||||
"reference_id": compute.ReferenceID,
|
"reference_id": compute.ReferenceID,
|
||||||
"registered": compute.Registered,
|
"registered": compute.Registered,
|
||||||
"res_name": compute.ResName,
|
"res_name": compute.ResName,
|
||||||
|
"reserved_node_cpus": compute.ReservedNodeCpus,
|
||||||
"rg_id": compute.RGID,
|
"rg_id": compute.RGID,
|
||||||
"rg_name": compute.RGName,
|
"rg_name": compute.RGName,
|
||||||
"snap_sets": flattenSnapSets(compute.SnapSets),
|
"snap_sets": flattenSnapSets(compute.SnapSets),
|
||||||
@@ -239,17 +245,25 @@ func flattenBootDisk(bootDisk *compute.ItemComputeDisk) []map[string]interface{}
|
|||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
func flattenComputeDisksDemo(ctx context.Context, d *schema.ResourceData, disksList compute.ListComputeDisks, extraDisks []interface{}, bootDiskId uint64) []map[string]interface{} {
|
func flattenComputeDisksDemo(disksList compute.ListComputeDisks, disksBlocks, extraDisks []interface{}, bootDiskId uint64) []map[string]interface{} {
|
||||||
res := make([]map[string]interface{}, 0, len(disksList))
|
res := make([]map[string]interface{}, 0, len(disksList))
|
||||||
|
|
||||||
|
if len(disksBlocks) == 0 {
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Slice(disksList, func(i, j int) bool {
|
||||||
|
return disksList[i].ID < disksList[j].ID
|
||||||
|
})
|
||||||
|
|
||||||
|
indexDataDisks := 0
|
||||||
|
|
||||||
for _, disk := range disksList {
|
for _, disk := range disksList {
|
||||||
if disk.ID == bootDiskId || findInExtraDisks(uint(disk.ID), extraDisks) { //skip main bootdisk and extraDisks
|
if disk.ID == bootDiskId || findInExtraDisks(uint(disk.ID), extraDisks) { //skip main bootdisk and extraDisks
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
permanently, ok := ctx.Value(DiskKey(strconv.Itoa(int(disk.ID)))).(bool) // get permamently from Create or Update context
|
pernamentlyValue := disksBlocks[indexDataDisks].(map[string]interface{})["permanently"].(bool)
|
||||||
if !ok {
|
|
||||||
permanently = getPermanentlyByDiskID(d, disk.ID) // get permanently from state when Read is not after Create/Update
|
|
||||||
}
|
|
||||||
|
|
||||||
temp := map[string]interface{}{
|
temp := map[string]interface{}{
|
||||||
"disk_name": disk.Name,
|
"disk_name": disk.Name,
|
||||||
@@ -263,32 +277,15 @@ func flattenComputeDisksDemo(ctx context.Context, d *schema.ResourceData, disksL
|
|||||||
"desc": disk.Description,
|
"desc": disk.Description,
|
||||||
"image_id": disk.ImageID,
|
"image_id": disk.ImageID,
|
||||||
"size": disk.SizeMax,
|
"size": disk.SizeMax,
|
||||||
"permanently": permanently,
|
"permanently": pernamentlyValue,
|
||||||
}
|
}
|
||||||
res = append(res, temp)
|
res = append(res, temp)
|
||||||
|
indexDataDisks++
|
||||||
}
|
}
|
||||||
sort.Slice(res, func(i, j int) bool {
|
|
||||||
return res[i]["disk_id"].(uint64) < res[j]["disk_id"].(uint64)
|
|
||||||
})
|
|
||||||
|
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
// getPermanentlyByDiskID gets permanently value of specific disk (by diskId) from disks current state
|
|
||||||
func getPermanentlyByDiskID(d *schema.ResourceData, diskId uint64) bool {
|
|
||||||
disks := d.Get("disks").([]interface{})
|
|
||||||
|
|
||||||
for _, diskItem := range disks {
|
|
||||||
disk := diskItem.(map[string]interface{})
|
|
||||||
if uint64(disk["disk_id"].(int)) == diskId {
|
|
||||||
return disk["permanently"].(bool)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Infof("getPermanentlyByDiskID: disk with id %d not found in state", diskId)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func flattenNetwork(interfaces compute.ListInterfaces) []map[string]interface{} {
|
func flattenNetwork(interfaces compute.ListInterfaces) []map[string]interface{} {
|
||||||
res := make([]map[string]interface{}, 0, len(interfaces))
|
res := make([]map[string]interface{}, 0, len(interfaces))
|
||||||
|
|
||||||
@@ -310,10 +307,10 @@ func findBootDisk(disks compute.ListComputeDisks) *compute.ItemComputeDisk {
|
|||||||
return &disk
|
return &disk
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return &compute.ItemComputeDisk{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func flattenCompute(ctx context.Context, d *schema.ResourceData, computeRec compute.RecordCompute) error {
|
func flattenCompute(d *schema.ResourceData, computeRec compute.RecordCompute) error {
|
||||||
// This function expects that compFacts string contains response from API compute/get,
|
// This function expects that compFacts string contains response from API compute/get,
|
||||||
// i.e. detailed information about compute instance.
|
// i.e. detailed information about compute instance.
|
||||||
//
|
//
|
||||||
@@ -336,6 +333,7 @@ func flattenCompute(ctx context.Context, d *schema.ResourceData, computeRec comp
|
|||||||
d.Set("boot_disk_size", bootDisk.SizeMax)
|
d.Set("boot_disk_size", bootDisk.SizeMax)
|
||||||
d.Set("boot_disk", flattenBootDisk(bootDisk))
|
d.Set("boot_disk", flattenBootDisk(bootDisk))
|
||||||
d.Set("boot_disk_id", bootDisk.ID)
|
d.Set("boot_disk_id", bootDisk.ID)
|
||||||
|
d.Set("cd_image_id", computeRec.CdImageId)
|
||||||
d.Set("sep_id", bootDisk.SepID)
|
d.Set("sep_id", bootDisk.SepID)
|
||||||
d.Set("pool", bootDisk.Pool)
|
d.Set("pool", bootDisk.Pool)
|
||||||
d.Set("clone_reference", computeRec.CloneReference)
|
d.Set("clone_reference", computeRec.CloneReference)
|
||||||
@@ -348,7 +346,7 @@ func flattenCompute(ctx context.Context, d *schema.ResourceData, computeRec comp
|
|||||||
d.Set("deleted_time", computeRec.DeletedTime)
|
d.Set("deleted_time", computeRec.DeletedTime)
|
||||||
d.Set("description", computeRec.Description)
|
d.Set("description", computeRec.Description)
|
||||||
d.Set("devices", string(devices))
|
d.Set("devices", string(devices))
|
||||||
err := d.Set("disks", flattenComputeDisksDemo(ctx, d, computeRec.Disks, d.Get("extra_disks").(*schema.Set).List(), bootDisk.ID))
|
err := d.Set("disks", flattenComputeDisksDemo(computeRec.Disks, d.Get("disks").([]interface{}), d.Get("extra_disks").(*schema.Set).List(), bootDisk.ID))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -370,6 +368,7 @@ func flattenCompute(ctx context.Context, d *schema.ResourceData, computeRec comp
|
|||||||
d.Set("milestones", computeRec.Milestones)
|
d.Set("milestones", computeRec.Milestones)
|
||||||
d.Set("name", computeRec.Name)
|
d.Set("name", computeRec.Name)
|
||||||
d.Set("need_reboot", computeRec.NeedReboot)
|
d.Set("need_reboot", computeRec.NeedReboot)
|
||||||
|
d.Set("numa_node_id", computeRec.NumaNodeId)
|
||||||
d.Set("natable_vins_id", computeRec.NatableVINSID)
|
d.Set("natable_vins_id", computeRec.NatableVINSID)
|
||||||
d.Set("natable_vins_ip", computeRec.NatableVINSIP)
|
d.Set("natable_vins_ip", computeRec.NatableVINSIP)
|
||||||
d.Set("natable_vins_name", computeRec.NatableVINSName)
|
d.Set("natable_vins_name", computeRec.NatableVINSName)
|
||||||
@@ -383,6 +382,7 @@ func flattenCompute(ctx context.Context, d *schema.ResourceData, computeRec comp
|
|||||||
d.Set("reference_id", computeRec.ReferenceID)
|
d.Set("reference_id", computeRec.ReferenceID)
|
||||||
d.Set("registered", computeRec.Registered)
|
d.Set("registered", computeRec.Registered)
|
||||||
d.Set("res_name", computeRec.ResName)
|
d.Set("res_name", computeRec.ResName)
|
||||||
|
d.Set("reserved_node_cpus", computeRec.ReservedNodeCpus)
|
||||||
d.Set("rg_id", computeRec.RGID)
|
d.Set("rg_id", computeRec.RGID)
|
||||||
d.Set("rg_name", computeRec.RGName)
|
d.Set("rg_name", computeRec.RGName)
|
||||||
d.Set("snap_sets", flattenSnapSets(computeRec.SnapSets))
|
d.Set("snap_sets", flattenSnapSets(computeRec.SnapSets))
|
||||||
@@ -513,6 +513,7 @@ func flattenListComputeDisks(disks compute.ListComputeDisks) []map[string]interf
|
|||||||
"pool": disk.Pool,
|
"pool": disk.Pool,
|
||||||
"present_to": disk.PresentTo,
|
"present_to": disk.PresentTo,
|
||||||
"purge_time": disk.PurgeTime,
|
"purge_time": disk.PurgeTime,
|
||||||
|
"replication": flattenDiskReplication(disk.Replication),
|
||||||
"reality_device_number": disk.RealityDeviceNumber,
|
"reality_device_number": disk.RealityDeviceNumber,
|
||||||
"res_id": disk.ResID,
|
"res_id": disk.ResID,
|
||||||
"role": disk.Role,
|
"role": disk.Role,
|
||||||
@@ -532,6 +533,20 @@ func flattenListComputeDisks(disks compute.ListComputeDisks) []map[string]interf
|
|||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func flattenDiskReplication(rep compute.ItemReplication) []map[string]interface{} {
|
||||||
|
res := []map[string]interface{}{
|
||||||
|
{
|
||||||
|
"disk_id": rep.DiskID,
|
||||||
|
"pool_id": rep.PoolID,
|
||||||
|
"role": rep.Role,
|
||||||
|
"self_volume_id": rep.SelfVolumeID,
|
||||||
|
"storage_id": rep.StorageID,
|
||||||
|
"volume_id": rep.VolumeID,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
func flattenCustomFields(customFields map[string]interface{}) string {
|
func flattenCustomFields(customFields map[string]interface{}) string {
|
||||||
encoded, _ := json.Marshal(customFields)
|
encoded, _ := json.Marshal(customFields)
|
||||||
return string(encoded)
|
return string(encoded)
|
||||||
@@ -563,10 +578,12 @@ func flattenDataCompute(d *schema.ResourceData, computeRec compute.RecordCompute
|
|||||||
d.Set("anti_affinity_rules", flattenListRules(computeRec.AntiAffinityRules))
|
d.Set("anti_affinity_rules", flattenListRules(computeRec.AntiAffinityRules))
|
||||||
d.Set("arch", computeRec.Architecture)
|
d.Set("arch", computeRec.Architecture)
|
||||||
d.Set("boot_order", computeRec.BootOrder)
|
d.Set("boot_order", computeRec.BootOrder)
|
||||||
d.Set("boot_disk_size", computeRec.BootDiskSize)
|
d.Set("bootdisk_size", computeRec.BootDiskSize)
|
||||||
|
d.Set("cd_image_id", computeRec.CdImageId)
|
||||||
d.Set("clone_reference", computeRec.CloneReference)
|
d.Set("clone_reference", computeRec.CloneReference)
|
||||||
d.Set("clones", computeRec.Clones)
|
d.Set("clones", computeRec.Clones)
|
||||||
d.Set("computeci_id", computeRec.ComputeCIID)
|
d.Set("computeci_id", computeRec.ComputeCIID)
|
||||||
|
d.Set("cpu_pin", computeRec.CPUPin)
|
||||||
d.Set("cpus", computeRec.CPU)
|
d.Set("cpus", computeRec.CPU)
|
||||||
d.Set("created_by", computeRec.CreatedBy)
|
d.Set("created_by", computeRec.CreatedBy)
|
||||||
d.Set("created_time", computeRec.CreatedTime)
|
d.Set("created_time", computeRec.CreatedTime)
|
||||||
@@ -579,6 +596,7 @@ func flattenDataCompute(d *schema.ResourceData, computeRec compute.RecordCompute
|
|||||||
d.Set("driver", computeRec.Driver)
|
d.Set("driver", computeRec.Driver)
|
||||||
d.Set("gid", computeRec.GID)
|
d.Set("gid", computeRec.GID)
|
||||||
d.Set("guid", computeRec.GUID)
|
d.Set("guid", computeRec.GUID)
|
||||||
|
d.Set("hp_backed", computeRec.HPBacked)
|
||||||
d.Set("compute_id", computeRec.ID)
|
d.Set("compute_id", computeRec.ID)
|
||||||
d.Set("image_id", computeRec.ImageID)
|
d.Set("image_id", computeRec.ImageID)
|
||||||
d.Set("image_name", computeRec.ImageName)
|
d.Set("image_name", computeRec.ImageName)
|
||||||
@@ -590,6 +608,8 @@ func flattenDataCompute(d *schema.ResourceData, computeRec compute.RecordCompute
|
|||||||
d.Set("milestones", computeRec.Milestones)
|
d.Set("milestones", computeRec.Milestones)
|
||||||
d.Set("name", computeRec.Name)
|
d.Set("name", computeRec.Name)
|
||||||
d.Set("need_reboot", computeRec.NeedReboot)
|
d.Set("need_reboot", computeRec.NeedReboot)
|
||||||
|
d.Set("numa_affinity", computeRec.NumaAffinity)
|
||||||
|
d.Set("numa_node_id", computeRec.NumaNodeId)
|
||||||
d.Set("natable_vins_id", computeRec.NatableVINSID)
|
d.Set("natable_vins_id", computeRec.NatableVINSID)
|
||||||
d.Set("natable_vins_ip", computeRec.NatableVINSIP)
|
d.Set("natable_vins_ip", computeRec.NatableVINSIP)
|
||||||
d.Set("natable_vins_name", computeRec.NatableVINSName)
|
d.Set("natable_vins_name", computeRec.NatableVINSName)
|
||||||
@@ -601,6 +621,7 @@ func flattenDataCompute(d *schema.ResourceData, computeRec compute.RecordCompute
|
|||||||
d.Set("reference_id", computeRec.ReferenceID)
|
d.Set("reference_id", computeRec.ReferenceID)
|
||||||
d.Set("registered", computeRec.Registered)
|
d.Set("registered", computeRec.Registered)
|
||||||
d.Set("res_name", computeRec.ResName)
|
d.Set("res_name", computeRec.ResName)
|
||||||
|
d.Set("reserved_node_cpus", computeRec.ReservedNodeCpus)
|
||||||
d.Set("rg_id", computeRec.RGID)
|
d.Set("rg_id", computeRec.RGID)
|
||||||
d.Set("rg_name", computeRec.RGName)
|
d.Set("rg_name", computeRec.RGName)
|
||||||
d.Set("snap_sets", flattenSnapSets(computeRec.SnapSets))
|
d.Set("snap_sets", flattenSnapSets(computeRec.SnapSets))
|
||||||
@@ -696,40 +717,50 @@ func flattenSnapshotUsage(computeSnapshotUsages compute.ListUsageSnapshots) []ma
|
|||||||
// return res
|
// return res
|
||||||
// }
|
// }
|
||||||
|
|
||||||
func flattenVGPU(m []interface{}) []string {
|
func flattenVGPU(vgpuList []compute.ItemVGPU) []map[string]interface{} {
|
||||||
output := []string{}
|
res := make([]map[string]interface{}, 0, len(vgpuList))
|
||||||
for _, item := range m {
|
for _, dev := range vgpuList {
|
||||||
switch d := item.(type) {
|
temp := map[string]interface{}{
|
||||||
case string:
|
"account_id": dev.AccountID,
|
||||||
output = append(output, d)
|
"created_time": dev.CreatedTime,
|
||||||
case int:
|
"deleted_time": dev.DeletedTime,
|
||||||
output = append(output, strconv.Itoa(d))
|
"gid": dev.GID,
|
||||||
case int64:
|
"guid": dev.GUID,
|
||||||
output = append(output, strconv.FormatInt(d, 10))
|
"vgpu_id": dev.ID,
|
||||||
case float64:
|
"last_claimed_by": dev.LastClaimedBy,
|
||||||
output = append(output, strconv.FormatInt(int64(d), 10))
|
"last_update_time": dev.LastUpdateTime,
|
||||||
default:
|
"mode": dev.Mode,
|
||||||
output = append(output, "")
|
"pci_slot": dev.PCISlot,
|
||||||
|
"pgpuid": dev.PGPUID,
|
||||||
|
"profile_id": dev.ProfileID,
|
||||||
|
"ram": dev.RAM,
|
||||||
|
"reference_id": dev.ReferenceID,
|
||||||
|
"rg_id": dev.RGID,
|
||||||
|
"status": dev.Status,
|
||||||
|
"type": dev.Type,
|
||||||
|
"vm_id": dev.VMID,
|
||||||
}
|
}
|
||||||
|
res = append(res, temp)
|
||||||
}
|
}
|
||||||
return output
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
func flattenPCIDevice(m []interface{}) []string {
|
func flattenPCIDevice(deviceList []compute.ItemPCIDevice) []map[string]interface{} {
|
||||||
output := []string{}
|
res := make([]map[string]interface{}, 0, len(deviceList))
|
||||||
for _, item := range m {
|
for _, dev := range deviceList {
|
||||||
switch d := item.(type) {
|
temp := map[string]interface{}{
|
||||||
case string:
|
"compute_id": dev.ComputeID,
|
||||||
output = append(output, d)
|
"description": dev.Description,
|
||||||
case int:
|
"guid": dev.GUID,
|
||||||
output = append(output, strconv.Itoa(d))
|
"hwpath": dev.HwPath,
|
||||||
case int64:
|
"device_id": dev.ID,
|
||||||
output = append(output, strconv.FormatInt(d, 10))
|
"name": dev.Name,
|
||||||
case float64:
|
"rg_id": dev.RGID,
|
||||||
output = append(output, strconv.FormatInt(int64(d), 10))
|
"stack_id": dev.StackID,
|
||||||
default:
|
"status": dev.Status,
|
||||||
output = append(output, "")
|
"system_name": dev.SystemName,
|
||||||
}
|
}
|
||||||
|
res = append(res, temp)
|
||||||
}
|
}
|
||||||
return output
|
return res
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -128,7 +128,7 @@ func networkSubresourceSchemaMake() map[string]*schema.Schema {
|
|||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
StateFunc: statefuncs.StateFuncToUpper,
|
StateFunc: statefuncs.StateFuncToUpper,
|
||||||
ValidateFunc: validation.StringInSlice([]string{"EXTNET", "VINS"}, false), // observe case while validating
|
ValidateFunc: validation.StringInSlice([]string{"EXTNET", "VINS", "VFNIC"}, false), // observe case while validating
|
||||||
Description: "Type of the network for this connection, either EXTNET or VINS.",
|
Description: "Type of the network for this connection, either EXTNET or VINS.",
|
||||||
},
|
},
|
||||||
|
|
||||||
|
|||||||
@@ -33,13 +33,13 @@ func resourceComputeResourceV1() *schema.Resource {
|
|||||||
"cpu": {
|
"cpu": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Required: true,
|
Required: true,
|
||||||
ValidateFunc: validation.IntBetween(1, constants.MaxCpusPerCompute),
|
ValidateFunc: validation.IntBetween(1, constants.MAX_CPUS_PER_COMPUTE),
|
||||||
Description: "Number of CPUs to allocate to this compute instance.",
|
Description: "Number of CPUs to allocate to this compute instance.",
|
||||||
},
|
},
|
||||||
"ram": {
|
"ram": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Required: true,
|
Required: true,
|
||||||
ValidateFunc: validation.IntAtLeast(constants.MinRamPerCompute),
|
ValidateFunc: validation.IntAtLeast(constants.MIN_RAM_PER_COMPUTE),
|
||||||
Description: "Amount of RAM in MB to allocate to this compute instance.",
|
Description: "Amount of RAM in MB to allocate to this compute instance.",
|
||||||
},
|
},
|
||||||
"image_id": {
|
"image_id": {
|
||||||
@@ -191,7 +191,7 @@ func resourceComputeResourceV1() *schema.Resource {
|
|||||||
"extra_disks": {
|
"extra_disks": {
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
MaxItems: constants.MaxExtraDisksPerCompute,
|
MaxItems: constants.MAX_EXTRA_DISKS_PER_COMPUTE,
|
||||||
Elem: &schema.Schema{
|
Elem: &schema.Schema{
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
},
|
},
|
||||||
@@ -202,7 +202,7 @@ func resourceComputeResourceV1() *schema.Resource {
|
|||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
MinItems: 1,
|
MinItems: 1,
|
||||||
MaxItems: constants.MaxNetworksPerCompute,
|
MaxItems: constants.MAX_NETWORKS_PER_COMPUTE,
|
||||||
Elem: &schema.Resource{
|
Elem: &schema.Resource{
|
||||||
Schema: networkSubresourceSchemaMake(),
|
Schema: networkSubresourceSchemaMake(),
|
||||||
},
|
},
|
||||||
@@ -575,13 +575,13 @@ func resourceComputeResourceV2() *schema.Resource {
|
|||||||
"cpu": {
|
"cpu": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Required: true,
|
Required: true,
|
||||||
ValidateFunc: validation.IntBetween(1, constants.MaxCpusPerCompute),
|
ValidateFunc: validation.IntBetween(1, constants.MAX_CPUS_PER_COMPUTE),
|
||||||
Description: "Number of CPUs to allocate to this compute instance.",
|
Description: "Number of CPUs to allocate to this compute instance.",
|
||||||
},
|
},
|
||||||
"ram": {
|
"ram": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Required: true,
|
Required: true,
|
||||||
ValidateFunc: validation.IntAtLeast(constants.MinRamPerCompute),
|
ValidateFunc: validation.IntAtLeast(constants.MIN_RAM_PER_COMPUTE),
|
||||||
Description: "Amount of RAM in MB to allocate to this compute instance.",
|
Description: "Amount of RAM in MB to allocate to this compute instance.",
|
||||||
},
|
},
|
||||||
"image_id": {
|
"image_id": {
|
||||||
@@ -853,7 +853,7 @@ func resourceComputeResourceV2() *schema.Resource {
|
|||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
MaxItems: constants.MaxExtraDisksPerCompute,
|
MaxItems: constants.MAX_EXTRA_DISKS_PER_COMPUTE,
|
||||||
Elem: &schema.Schema{
|
Elem: &schema.Schema{
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
},
|
},
|
||||||
@@ -864,7 +864,7 @@ func resourceComputeResourceV2() *schema.Resource {
|
|||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
MinItems: 1,
|
MinItems: 1,
|
||||||
MaxItems: constants.MaxNetworksPerCompute,
|
MaxItems: constants.MAX_NETWORKS_PER_COMPUTE,
|
||||||
Elem: &schema.Resource{
|
Elem: &schema.Resource{
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"net_type": {
|
"net_type": {
|
||||||
|
|||||||
@@ -41,6 +41,7 @@ import (
|
|||||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/extnet"
|
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/extnet"
|
||||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/image"
|
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/image"
|
||||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/rg"
|
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/rg"
|
||||||
|
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/vfpool"
|
||||||
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/vins"
|
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/vins"
|
||||||
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
|
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
|
||||||
)
|
)
|
||||||
@@ -128,3 +129,36 @@ func existExtNetId(ctx context.Context, m interface{}, id int) (int, bool) {
|
|||||||
|
|
||||||
return 0, true
|
return 0, true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func existVFPoolId(ctx context.Context, m interface{}, id int) (int, bool) {
|
||||||
|
c := m.(*controller.ControllerCfg)
|
||||||
|
req := vfpool.ListRequest{ByID: uint64(id)}
|
||||||
|
|
||||||
|
vfpoolList, err := c.CloudAPI().VFPool().List(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
log.Debugf("Unable to retrieve vfpool list, %s", err)
|
||||||
|
return id, false
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(vfpoolList.Data) == 1 {
|
||||||
|
return 0, true
|
||||||
|
}
|
||||||
|
|
||||||
|
return id, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func isMoreThanOneDisksTypeB(ctx context.Context, disks interface{}) bool {
|
||||||
|
count := 0
|
||||||
|
|
||||||
|
for _, elem := range disks.([]interface{}) {
|
||||||
|
diskVal := elem.(map[string]interface{})
|
||||||
|
if diskVal["disk_type"].(string) == "B" {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
if count > 1 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|||||||
@@ -55,9 +55,6 @@ import (
|
|||||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
|
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DiskKey is custom string type to set up context Key for Disk ID
|
|
||||||
type DiskKey string
|
|
||||||
|
|
||||||
func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
log.Debugf("resourceComputeCreate: called for Compute name %q, RG ID %d", d.Get("name").(string), d.Get("rg_id").(int))
|
log.Debugf("resourceComputeCreate: called for Compute name %q, RG ID %d", d.Get("name").(string), d.Get("rg_id").(int))
|
||||||
c := m.(*controller.ControllerCfg)
|
c := m.(*controller.ControllerCfg)
|
||||||
@@ -82,6 +79,12 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
|
|||||||
return diag.Errorf("resourceComputeCreate: can't create Compute because imageID %d is not allowed or does not exist", d.Get("image_id").(int))
|
return diag.Errorf("resourceComputeCreate: can't create Compute because imageID %d is not allowed or does not exist", d.Get("image_id").(int))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if disks, ok := d.GetOk("disks"); ok {
|
||||||
|
if isMoreThanOneDisksTypeB(ctx, disks) {
|
||||||
|
return diag.Errorf("resourceComputeCreate: can't create Compute because block disks have more 1 disk type 'B'")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if network, ok := d.GetOk("network"); ok {
|
if network, ok := d.GetOk("network"); ok {
|
||||||
networkList := network.(*schema.Set).List()
|
networkList := network.(*schema.Set).List()
|
||||||
for _, elem := range networkList {
|
for _, elem := range networkList {
|
||||||
@@ -96,7 +99,13 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
|
|||||||
if extNetId, ok := existExtNetId(ctx, m, networkData["net_id"].(int)); !ok {
|
if extNetId, ok := existExtNetId(ctx, m, networkData["net_id"].(int)); !ok {
|
||||||
return diag.Errorf("resourceComputeCreate: can't create compute because extnet ID %d is not allowed or does not exist", extNetId)
|
return diag.Errorf("resourceComputeCreate: can't create compute because extnet ID %d is not allowed or does not exist", extNetId)
|
||||||
}
|
}
|
||||||
|
case "VFNIC":
|
||||||
|
if d.Get("driver").(string) == "KVM_PPC" {
|
||||||
|
return diag.Errorf("resourceComputeCreate: can't create compute because 'VFNIC' net_type is not allowed for driver 'KVM_PPC'")
|
||||||
|
}
|
||||||
|
if vfpoolId, ok := existVFPoolId(ctx, m, networkData["net_id"].(int)); !ok {
|
||||||
|
return diag.Errorf("resourceComputeCreate: can't create compute because vfpool ID %d is not allowed or does not exist", vfpoolId)
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -186,6 +195,57 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if disks, ok := d.GetOk("disks"); ok {
|
||||||
|
disksX86 := make([]kvmx86.DataDisk, 0)
|
||||||
|
disksPPC := make([]kvmppc.DataDisk, 0)
|
||||||
|
|
||||||
|
for _, elem := range disks.([]interface{}) {
|
||||||
|
diskVal := elem.(map[string]interface{})
|
||||||
|
reqDataDisk := kvmx86.DataDisk{
|
||||||
|
DiskName: diskVal["disk_name"].(string),
|
||||||
|
Size: uint64(diskVal["size"].(int)),
|
||||||
|
}
|
||||||
|
if sepId, ok := diskVal["sep_id"]; ok {
|
||||||
|
reqDataDisk.SepID = uint64(sepId.(int))
|
||||||
|
}
|
||||||
|
if pool, ok := diskVal["pool"]; ok {
|
||||||
|
reqDataDisk.Pool = pool.(string)
|
||||||
|
}
|
||||||
|
if desc, ok := diskVal["desc"]; ok {
|
||||||
|
reqDataDisk.Description = desc.(string)
|
||||||
|
}
|
||||||
|
if imageID, ok := diskVal["image_id"]; ok {
|
||||||
|
reqDataDisk.ImageID = uint64(imageID.(int))
|
||||||
|
}
|
||||||
|
disksX86 = append(disksX86, reqDataDisk)
|
||||||
|
}
|
||||||
|
|
||||||
|
createReqX86.DataDisks = disksX86
|
||||||
|
|
||||||
|
for _, elem := range disks.([]interface{}) {
|
||||||
|
diskVal := elem.(map[string]interface{})
|
||||||
|
reqDataDisk := kvmppc.DataDisk{
|
||||||
|
DiskName: diskVal["disk_name"].(string),
|
||||||
|
Size: uint64(diskVal["size"].(int)),
|
||||||
|
}
|
||||||
|
if sepId, ok := diskVal["sep_id"]; ok {
|
||||||
|
reqDataDisk.SepID = uint64(sepId.(int))
|
||||||
|
}
|
||||||
|
if pool, ok := diskVal["pool"]; ok {
|
||||||
|
reqDataDisk.Pool = pool.(string)
|
||||||
|
}
|
||||||
|
if desc, ok := diskVal["desc"]; ok {
|
||||||
|
reqDataDisk.Description = desc.(string)
|
||||||
|
}
|
||||||
|
if imageID, ok := diskVal["image_id"]; ok {
|
||||||
|
reqDataDisk.ImageID = uint64(imageID.(int))
|
||||||
|
}
|
||||||
|
disksPPC = append(disksPPC, reqDataDisk)
|
||||||
|
}
|
||||||
|
|
||||||
|
createReqPPC.DataDisks = disksPPC
|
||||||
|
}
|
||||||
|
|
||||||
argVal, ok = d.GetOk("cloud_init")
|
argVal, ok = d.GetOk("cloud_init")
|
||||||
if ok {
|
if ok {
|
||||||
userdata := argVal.(string)
|
userdata := argVal.(string)
|
||||||
@@ -217,10 +277,16 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
|
|||||||
createReqX86.Name = d.Get("name").(string)
|
createReqX86.Name = d.Get("name").(string)
|
||||||
createReqX86.CPU = uint64(d.Get("cpu").(int))
|
createReqX86.CPU = uint64(d.Get("cpu").(int))
|
||||||
createReqX86.RAM = uint64(d.Get("ram").(int))
|
createReqX86.RAM = uint64(d.Get("ram").(int))
|
||||||
createReqX86.ImageID = uint64(d.Get("image_id").(int))
|
|
||||||
|
|
||||||
createReqX86.Driver = driver
|
createReqX86.Driver = driver
|
||||||
|
|
||||||
|
if image, ok := d.GetOk("image_id"); ok {
|
||||||
|
createReqX86.ImageID = uint64(image.(int))
|
||||||
|
}
|
||||||
|
if withoutBootDisk, ok := d.GetOk("without_boot_disk"); ok {
|
||||||
|
createReqX86.WithoutBootDisk = withoutBootDisk.(bool)
|
||||||
|
}
|
||||||
|
|
||||||
if custom_fields, ok := d.GetOk("custom_fields"); ok {
|
if custom_fields, ok := d.GetOk("custom_fields"); ok {
|
||||||
val := custom_fields.(string)
|
val := custom_fields.(string)
|
||||||
val = strings.ReplaceAll(val, "\\", "")
|
val = strings.ReplaceAll(val, "\\", "")
|
||||||
@@ -231,6 +297,12 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
|
|||||||
createReqX86.CustomFields = val
|
createReqX86.CustomFields = val
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if numaAffinity, ok := d.GetOk("numa_affinity"); ok {
|
||||||
|
createReqX86.NumaAffinity = numaAffinity.(string)
|
||||||
|
}
|
||||||
|
createReqX86.CPUPin = d.Get("cpu_pin").(bool)
|
||||||
|
createReqX86.HPBacked = d.Get("hp_backed").(bool)
|
||||||
|
|
||||||
log.Debugf("resourceComputeCreate: creating Compute of type KVM VM x86")
|
log.Debugf("resourceComputeCreate: creating Compute of type KVM VM x86")
|
||||||
apiResp, err := c.CloudAPI().KVMX86().Create(ctx, createReqX86)
|
apiResp, err := c.CloudAPI().KVMX86().Create(ctx, createReqX86)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -273,46 +345,8 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if disks, ok := d.GetOk("disks"); ok {
|
|
||||||
log.Debugf("resourceComputeCreate: Create disks on ComputeID: %d", computeId)
|
|
||||||
addedDisks := disks.([]interface{})
|
|
||||||
if len(addedDisks) > 0 {
|
|
||||||
for _, disk := range addedDisks {
|
|
||||||
diskConv := disk.(map[string]interface{})
|
|
||||||
req := compute.DiskAddRequest{
|
|
||||||
ComputeID: computeId,
|
|
||||||
DiskName: diskConv["disk_name"].(string),
|
|
||||||
Size: uint64(diskConv["size"].(int)),
|
|
||||||
}
|
|
||||||
|
|
||||||
if diskConv["sep_id"].(int) != 0 {
|
|
||||||
req.SepID = uint64(diskConv["sep_id"].(int))
|
|
||||||
}
|
|
||||||
if diskConv["disk_type"].(string) != "" {
|
|
||||||
req.DiskType = diskConv["disk_type"].(string)
|
|
||||||
}
|
|
||||||
if diskConv["pool"].(string) != "" {
|
|
||||||
req.Pool = diskConv["pool"].(string)
|
|
||||||
}
|
|
||||||
if diskConv["desc"].(string) != "" {
|
|
||||||
req.Description = diskConv["desc"].(string)
|
|
||||||
}
|
|
||||||
if diskConv["image_id"].(int) != 0 {
|
|
||||||
req.ImageID = uint64(diskConv["image_id"].(int))
|
|
||||||
}
|
|
||||||
|
|
||||||
diskId, err := c.CloudAPI().Compute().DiskAdd(ctx, req)
|
|
||||||
if err != nil {
|
|
||||||
cleanup = true
|
|
||||||
return diag.FromErr(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx = context.WithValue(ctx, DiskKey(strconv.Itoa(int(diskId))), diskConv["permanently"].(bool))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !cleanup {
|
if !cleanup {
|
||||||
|
|
||||||
if enabled, ok := d.GetOk("enabled"); ok {
|
if enabled, ok := d.GetOk("enabled"); ok {
|
||||||
if enabled.(bool) {
|
if enabled.(bool) {
|
||||||
req := compute.EnableRequest{ComputeID: computeId}
|
req := compute.EnableRequest{ComputeID: computeId}
|
||||||
@@ -435,9 +469,12 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf
|
|||||||
req := compute.PFWAddRequest{
|
req := compute.PFWAddRequest{
|
||||||
ComputeID: computeId,
|
ComputeID: computeId,
|
||||||
PublicPortStart: uint64(pfwItem["public_port_start"].(int)),
|
PublicPortStart: uint64(pfwItem["public_port_start"].(int)),
|
||||||
LocalBasePort: uint64(pfwItem["local_port"].(int)),
|
|
||||||
Proto: pfwItem["proto"].(string),
|
Proto: pfwItem["proto"].(string),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if pfwItem["local_port"].(int) != 0 {
|
||||||
|
req.LocalBasePort = uint64(pfwItem["local_port"].(int))
|
||||||
|
}
|
||||||
if pfwItem["public_port_end"].(int) != 0 {
|
if pfwItem["public_port_end"].(int) != 0 {
|
||||||
req.PublicPortEnd = int64(pfwItem["public_port_end"].(int))
|
req.PublicPortEnd = int64(pfwItem["public_port_end"].(int))
|
||||||
}
|
}
|
||||||
@@ -587,7 +624,7 @@ func resourceComputeRead(ctx context.Context, d *schema.ResourceData, m interfac
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = flattenCompute(ctx, d, computeRec); err != nil {
|
if err = flattenCompute(d, computeRec); err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -621,6 +658,12 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
|||||||
return diag.Errorf("resourceComputeUpdate: can't update Compute bacause imageID %d not allowed or does not exist", d.Get("image_id").(int))
|
return diag.Errorf("resourceComputeUpdate: can't update Compute bacause imageID %d not allowed or does not exist", d.Get("image_id").(int))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if disks, ok := d.GetOk("disks"); ok {
|
||||||
|
if isMoreThanOneDisksTypeB(ctx, disks) {
|
||||||
|
return diag.Errorf("resourceComputeUpdate: can't update Compute because block disks have more 1 disk type 'B'")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if network, ok := d.GetOk("network"); ok {
|
if network, ok := d.GetOk("network"); ok {
|
||||||
networkList := network.(*schema.Set).List()
|
networkList := network.(*schema.Set).List()
|
||||||
for _, elem := range networkList {
|
for _, elem := range networkList {
|
||||||
@@ -629,13 +672,19 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
|||||||
switch networkData["net_type"].(string) {
|
switch networkData["net_type"].(string) {
|
||||||
case "VINS":
|
case "VINS":
|
||||||
if vinsId, ok := existVinsId(ctx, m, networkData["net_id"].(int)); !ok {
|
if vinsId, ok := existVinsId(ctx, m, networkData["net_id"].(int)); !ok {
|
||||||
return diag.Errorf("resourceComputeCreate: can't update compute because vins ID %d is not allowed or does not exist", vinsId)
|
return diag.Errorf("resourceComputeUpdate: can't update compute because vins ID %d is not allowed or does not exist", vinsId)
|
||||||
}
|
}
|
||||||
case "EXTNET":
|
case "EXTNET":
|
||||||
if extNetId, ok := existExtNetId(ctx, m, networkData["net_id"].(int)); !ok {
|
if extNetId, ok := existExtNetId(ctx, m, networkData["net_id"].(int)); !ok {
|
||||||
return diag.Errorf("resourceComputeCreate: can't update compute because extnet ID %d is not allowed or does not exist", extNetId)
|
return diag.Errorf("resourceComputeUpdate: can't update compute because extnet ID %d is not allowed or does not exist", extNetId)
|
||||||
|
}
|
||||||
|
case "VFNIC":
|
||||||
|
if d.Get("driver").(string) == "KVM_PPC" {
|
||||||
|
return diag.Errorf("resourceComputeUpdate: can't create compute because 'VFNIC' net_type is not allowed for driver 'KVM_PPC'")
|
||||||
|
}
|
||||||
|
if vfpoolId, ok := existVFPoolId(ctx, m, networkData["net_id"].(int)); !ok {
|
||||||
|
return diag.Errorf("resourceComputeUpdate: can't create compute because vfpool ID %d is not allowed or does not exist", vfpoolId)
|
||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -839,7 +888,7 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.HasChanges("description", "name") {
|
if d.HasChanges("description", "name", "numa_affinity", "cpu_pin", "hp_backed") {
|
||||||
req := compute.UpdateRequest{
|
req := compute.UpdateRequest{
|
||||||
ComputeID: computeRec.ID,
|
ComputeID: computeRec.ID,
|
||||||
}
|
}
|
||||||
@@ -850,10 +899,39 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
|||||||
if d.HasChange("description") {
|
if d.HasChange("description") {
|
||||||
req.Description = d.Get("description").(string)
|
req.Description = d.Get("description").(string)
|
||||||
}
|
}
|
||||||
|
if d.HasChange("numa_affinity") {
|
||||||
|
req.NumaAffinity = d.Get("numa_affinity").(string)
|
||||||
|
}
|
||||||
|
if d.HasChange("cpu_pin") {
|
||||||
|
req.CPUPin = d.Get("cpu_pin").(bool)
|
||||||
|
}
|
||||||
|
if d.HasChange("hp_backed") {
|
||||||
|
req.HPBacked = d.Get("hp_backed").(bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note bene: numa_affinity, cpu_pin and hp_backed are not allowed to be changed for compute in STARTED tech status.
|
||||||
|
// If STARTED, we need to stop it before update
|
||||||
|
var isStopRequired bool
|
||||||
|
if d.HasChanges("numa_affinity", "cpu_pin", "hp_backed") && d.Get("started").(bool) {
|
||||||
|
isStopRequired = true
|
||||||
|
}
|
||||||
|
if isStopRequired {
|
||||||
|
if _, err := c.CloudAPI().Compute().Stop(ctx, compute.StopRequest{ComputeID: computeRec.ID}); err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// perform update
|
||||||
if _, err := c.CloudAPI().Compute().Update(ctx, req); err != nil {
|
if _, err := c.CloudAPI().Compute().Update(ctx, req); err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If used to be STARTED, we need to start it after update
|
||||||
|
if isStopRequired {
|
||||||
|
if _, err := c.CloudAPI().Compute().Start(ctx, compute.StartRequest{ComputeID: computeRec.ID}); err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.HasChange("disks") {
|
if d.HasChange("disks") {
|
||||||
@@ -862,14 +940,6 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
|||||||
resizedDisks := make([]interface{}, 0)
|
resizedDisks := make([]interface{}, 0)
|
||||||
renamedDisks := make([]interface{}, 0)
|
renamedDisks := make([]interface{}, 0)
|
||||||
|
|
||||||
// save permanently in disks based on disk_id to context
|
|
||||||
for _, diskItemInterface := range d.Get("disks").([]interface{}) {
|
|
||||||
diskItem := diskItemInterface.(map[string]interface{})
|
|
||||||
diskId := diskItem["disk_id"].(int)
|
|
||||||
permanently := diskItem["permanently"].(bool)
|
|
||||||
ctx = context.WithValue(ctx, DiskKey(strconv.Itoa(diskId)), permanently)
|
|
||||||
}
|
|
||||||
|
|
||||||
oldDisks, newDisks := d.GetChange("disks")
|
oldDisks, newDisks := d.GetChange("disks")
|
||||||
oldConv := oldDisks.([]interface{})
|
oldConv := oldDisks.([]interface{})
|
||||||
newConv := newDisks.([]interface{})
|
newConv := newDisks.([]interface{})
|
||||||
@@ -975,12 +1045,10 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
|||||||
if diskConv["image_id"].(int) != 0 {
|
if diskConv["image_id"].(int) != 0 {
|
||||||
req.ImageID = uint64(diskConv["image_id"].(int))
|
req.ImageID = uint64(diskConv["image_id"].(int))
|
||||||
}
|
}
|
||||||
diskId, err := c.CloudAPI().Compute().DiskAdd(ctx, req)
|
_, err := c.CloudAPI().Compute().DiskAdd(ctx, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx = context.WithValue(ctx, DiskKey(strconv.Itoa(int(diskId))), diskConv["permanently"].(bool))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1244,10 +1312,11 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
|||||||
req := compute.PFWDelRequest{
|
req := compute.PFWDelRequest{
|
||||||
ComputeID: computeRec.ID,
|
ComputeID: computeRec.ID,
|
||||||
PublicPortStart: uint64(pfwItem["public_port_start"].(int)),
|
PublicPortStart: uint64(pfwItem["public_port_start"].(int)),
|
||||||
LocalBasePort: uint64(pfwItem["local_port"].(int)),
|
|
||||||
Proto: pfwItem["proto"].(string),
|
Proto: pfwItem["proto"].(string),
|
||||||
}
|
}
|
||||||
|
if pfwItem["local_port"].(int) != 0 {
|
||||||
|
req.LocalBasePort = uint64(pfwItem["local_port"].(int))
|
||||||
|
}
|
||||||
if pfwItem["public_port_end"].(int) == -1 {
|
if pfwItem["public_port_end"].(int) == -1 {
|
||||||
req.PublicPortEnd = req.PublicPortStart
|
req.PublicPortEnd = req.PublicPortStart
|
||||||
} else {
|
} else {
|
||||||
@@ -1269,10 +1338,11 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
|
|||||||
ComputeID: computeRec.ID,
|
ComputeID: computeRec.ID,
|
||||||
PublicPortStart: uint64(pfwItem["public_port_start"].(int)),
|
PublicPortStart: uint64(pfwItem["public_port_start"].(int)),
|
||||||
PublicPortEnd: int64(pfwItem["public_port_end"].(int)),
|
PublicPortEnd: int64(pfwItem["public_port_end"].(int)),
|
||||||
LocalBasePort: uint64(pfwItem["local_port"].(int)),
|
|
||||||
Proto: pfwItem["proto"].(string),
|
Proto: pfwItem["proto"].(string),
|
||||||
}
|
}
|
||||||
|
if pfwItem["local_port"].(int) != 0 {
|
||||||
|
req.LocalBasePort = uint64(pfwItem["local_port"].(int))
|
||||||
|
}
|
||||||
_, err := c.CloudAPI().Compute().PFWAdd(ctx, req)
|
_, err := c.CloudAPI().Compute().PFWAdd(ctx, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
@@ -1711,7 +1781,7 @@ func portForwardingSubresourceSchemaMake() map[string]*schema.Schema {
|
|||||||
},
|
},
|
||||||
"local_port": {
|
"local_port": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Required: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
"proto": {
|
"proto": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
@@ -1786,24 +1856,30 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
|
|||||||
"cpu": {
|
"cpu": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Required: true,
|
Required: true,
|
||||||
ValidateFunc: validation.IntBetween(1, constants.MaxCpusPerCompute),
|
ValidateFunc: validation.IntBetween(1, constants.MAX_CPUS_PER_COMPUTE),
|
||||||
Description: "Number of CPUs to allocate to this compute instance.",
|
Description: "Number of CPUs to allocate to this compute instance.",
|
||||||
},
|
},
|
||||||
"ram": {
|
"ram": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Required: true,
|
Required: true,
|
||||||
ValidateFunc: validation.All(
|
ValidateFunc: validation.All(
|
||||||
validation.IntAtLeast(constants.MinRamPerCompute),
|
validation.IntAtLeast(constants.MIN_RAM_PER_COMPUTE),
|
||||||
validators.DivisibleBy(constants.RAMDivisibility),
|
validators.DivisibleBy(constants.RAM_DIVISIBILITY),
|
||||||
),
|
),
|
||||||
Description: "Amount of RAM in MB to allocate to this compute instance.",
|
Description: "Amount of RAM in MB to allocate to this compute instance.",
|
||||||
},
|
},
|
||||||
"image_id": {
|
"image_id": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Required: true,
|
Optional: true,
|
||||||
//ForceNew: true, //REDEPLOY
|
//ForceNew: true, //REDEPLOY
|
||||||
Description: "ID of the OS image to base this compute instance on.",
|
Description: "ID of the OS image to base this compute instance on.",
|
||||||
},
|
},
|
||||||
|
"without_boot_disk": {
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Default: false,
|
||||||
|
Description: "If True, the imageId, bootDisk, sepId, pool parameters are ignored and the compute is created without a boot disk in the stopped state.",
|
||||||
|
},
|
||||||
"boot_disk_size": {
|
"boot_disk_size": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
@@ -1926,29 +2002,26 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
|
|||||||
// ForceNew: true,
|
// ForceNew: true,
|
||||||
Description: "Pool to use if sepId is set, can be also empty if needed to be chosen by system.",
|
Description: "Pool to use if sepId is set, can be also empty if needed to be chosen by system.",
|
||||||
},
|
},
|
||||||
|
|
||||||
"extra_disks": {
|
"extra_disks": {
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
MaxItems: constants.MaxExtraDisksPerCompute,
|
MaxItems: constants.MAX_EXTRA_DISKS_PER_COMPUTE,
|
||||||
Elem: &schema.Schema{
|
Elem: &schema.Schema{
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
},
|
},
|
||||||
Description: "Optional list of IDs of extra disks to attach to this compute. You may specify several extra disks.",
|
Description: "Optional list of IDs of extra disks to attach to this compute. You may specify several extra disks.",
|
||||||
},
|
},
|
||||||
|
|
||||||
"network": {
|
"network": {
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
MinItems: 1,
|
MinItems: 1,
|
||||||
MaxItems: constants.MaxNetworksPerCompute,
|
MaxItems: constants.MAX_NETWORKS_PER_COMPUTE,
|
||||||
Elem: &schema.Resource{
|
Elem: &schema.Resource{
|
||||||
Schema: networkSubresourceSchemaMake(),
|
Schema: networkSubresourceSchemaMake(),
|
||||||
},
|
},
|
||||||
Description: "Optional network connection(s) for this compute. You may specify several network blocks, one for each connection.",
|
Description: "Optional network connection(s) for this compute. You may specify several network blocks, one for each connection.",
|
||||||
},
|
},
|
||||||
|
|
||||||
"tags": {
|
"tags": {
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
@@ -1956,7 +2029,6 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
|
|||||||
Schema: tagsSubresourceSchemaMake(),
|
Schema: tagsSubresourceSchemaMake(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
"port_forwarding": {
|
"port_forwarding": {
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
@@ -1964,7 +2036,6 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
|
|||||||
Schema: portForwardingSubresourceSchemaMake(),
|
Schema: portForwardingSubresourceSchemaMake(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
"user_access": {
|
"user_access": {
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
@@ -1972,7 +2043,6 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
|
|||||||
Schema: userAccessSubresourceSchemaMake(),
|
Schema: userAccessSubresourceSchemaMake(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
"snapshot": {
|
"snapshot": {
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
@@ -1980,7 +2050,6 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
|
|||||||
Schema: snapshotSubresourceSchemaMake(),
|
Schema: snapshotSubresourceSchemaMake(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
"rollback": {
|
"rollback": {
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
MaxItems: 1,
|
MaxItems: 1,
|
||||||
@@ -1989,7 +2058,6 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
|
|||||||
Schema: snapshotRollbackSubresourceSchemaMake(),
|
Schema: snapshotRollbackSubresourceSchemaMake(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
"cd": {
|
"cd": {
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
@@ -1998,50 +2066,42 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
|
|||||||
Schema: cdSubresourceSchemaMake(),
|
Schema: cdSubresourceSchemaMake(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
"pin_to_stack": {
|
"pin_to_stack": {
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Default: false,
|
Default: false,
|
||||||
},
|
},
|
||||||
|
|
||||||
"description": {
|
"description": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "Optional text description of this compute instance.",
|
Description: "Optional text description of this compute instance.",
|
||||||
},
|
},
|
||||||
|
|
||||||
"cloud_init": {
|
"cloud_init": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "Optional cloud_init parameters. Applied when creating new compute instance only, ignored in all other cases.",
|
Description: "Optional cloud_init parameters. Applied when creating new compute instance only, ignored in all other cases.",
|
||||||
},
|
},
|
||||||
|
|
||||||
"enabled": {
|
"enabled": {
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
Description: "If true - enable compute, else - disable",
|
Description: "If true - enable compute, else - disable",
|
||||||
},
|
},
|
||||||
|
|
||||||
"pause": {
|
"pause": {
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Default: false,
|
Default: false,
|
||||||
},
|
},
|
||||||
|
|
||||||
"reset": {
|
"reset": {
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Default: false,
|
Default: false,
|
||||||
},
|
},
|
||||||
|
|
||||||
"restore": {
|
"restore": {
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Default: false,
|
Default: false,
|
||||||
},
|
},
|
||||||
|
|
||||||
"auto_start": {
|
"auto_start": {
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
@@ -2093,6 +2153,26 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "compute purpose",
|
Description: "compute purpose",
|
||||||
},
|
},
|
||||||
|
"numa_affinity": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Default: "none",
|
||||||
|
ValidateFunc: validation.StringInSlice([]string{"none", "strict", "loose"}, false), // observe case while validating
|
||||||
|
Description: "Rule for VM placement with NUMA affinity.",
|
||||||
|
},
|
||||||
|
"cpu_pin": {
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Default: false,
|
||||||
|
Description: "Run VM on dedicated CPUs. To use this feature, the system must be pre-configured by allocating CPUs on the physical node.",
|
||||||
|
},
|
||||||
|
"hp_backed": {
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Default: false,
|
||||||
|
Description: "Use Huge Pages to allocate RAM of the virtual machine. The system must be pre-configured by allocating Huge Pages on the physical node.",
|
||||||
|
},
|
||||||
|
|
||||||
// The rest are Compute properties, which are "computed" once it is created
|
// The rest are Compute properties, which are "computed" once it is created
|
||||||
"account_id": {
|
"account_id": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
@@ -2124,6 +2204,10 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
|
|||||||
Computed: true,
|
Computed: true,
|
||||||
Description: "This compute instance boot disk ID.",
|
Description: "This compute instance boot disk ID.",
|
||||||
},
|
},
|
||||||
|
"cd_image_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
"clone_reference": {
|
"clone_reference": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
@@ -2202,6 +2286,10 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
|
|||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
"numa_node_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
"natable_vins_id": {
|
"natable_vins_id": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
@@ -2246,6 +2334,13 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
|
|||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
"reserved_node_cpus": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
},
|
||||||
|
},
|
||||||
"rg_name": {
|
"rg_name": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
|||||||
@@ -77,6 +77,9 @@ func utilityDataComputeListCheckPresence(ctx context.Context, d *schema.Resource
|
|||||||
if includeDeleted, ok := d.GetOk("includedeleted"); ok {
|
if includeDeleted, ok := d.GetOk("includedeleted"); ok {
|
||||||
req.IncludeDeleted = includeDeleted.(bool)
|
req.IncludeDeleted = includeDeleted.(bool)
|
||||||
}
|
}
|
||||||
|
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||||
|
req.SortBy = sortBy.(string)
|
||||||
|
}
|
||||||
if page, ok := d.GetOk("page"); ok {
|
if page, ok := d.GetOk("page"); ok {
|
||||||
req.Page = uint64(page.(int))
|
req.Page = uint64(page.(int))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -71,6 +71,9 @@ func utilityDataComputeListDeletedCheckPresence(ctx context.Context, d *schema.R
|
|||||||
if extnetId, ok := d.GetOk("extnet_id"); ok {
|
if extnetId, ok := d.GetOk("extnet_id"); ok {
|
||||||
req.ExtNetID = uint64(extnetId.(int))
|
req.ExtNetID = uint64(extnetId.(int))
|
||||||
}
|
}
|
||||||
|
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||||
|
req.SortBy = sortBy.(string)
|
||||||
|
}
|
||||||
if page, ok := d.GetOk("page"); ok {
|
if page, ok := d.GetOk("page"); ok {
|
||||||
req.Page = uint64(page.(int))
|
req.Page = uint64(page.(int))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -57,6 +57,9 @@ func utilityComputePCIDeviceListCheckPresence(ctx context.Context, d *schema.Res
|
|||||||
if status, ok := d.GetOk("status"); ok {
|
if status, ok := d.GetOk("status"); ok {
|
||||||
req.Status = status.(string)
|
req.Status = status.(string)
|
||||||
}
|
}
|
||||||
|
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||||
|
req.SortBy = sortBy.(string)
|
||||||
|
}
|
||||||
if page, ok := d.GetOk("page"); ok {
|
if page, ok := d.GetOk("page"); ok {
|
||||||
req.Page = uint64(page.(int))
|
req.Page = uint64(page.(int))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -57,6 +57,9 @@ func utilityComputeVGPUListCheckPresence(ctx context.Context, d *schema.Resource
|
|||||||
if includeDeleted, ok := d.GetOk("includedeleted"); ok {
|
if includeDeleted, ok := d.GetOk("includedeleted"); ok {
|
||||||
req.IncludeDeleted = includeDeleted.(bool)
|
req.IncludeDeleted = includeDeleted.(bool)
|
||||||
}
|
}
|
||||||
|
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||||
|
req.SortBy = sortBy.(string)
|
||||||
|
}
|
||||||
if page, ok := d.GetOk("page"); ok {
|
if page, ok := d.GetOk("page"); ok {
|
||||||
req.Page = uint64(page.(int))
|
req.Page = uint64(page.(int))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -104,6 +104,8 @@ func flattenResourceLB(d *schema.ResourceData, lb *lb.RecordLB) {
|
|||||||
d.Set("frontends", flattenFrontends(lb.Frontends))
|
d.Set("frontends", flattenFrontends(lb.Frontends))
|
||||||
d.Set("gid", lb.GID)
|
d.Set("gid", lb.GID)
|
||||||
d.Set("guid", lb.GUID)
|
d.Set("guid", lb.GUID)
|
||||||
|
d.Set("manager_id", lb.ManagerId)
|
||||||
|
d.Set("manager_type", lb.ManagerType)
|
||||||
d.Set("lb_id", lb.ID)
|
d.Set("lb_id", lb.ID)
|
||||||
d.Set("image_id", lb.ImageID)
|
d.Set("image_id", lb.ImageID)
|
||||||
d.Set("milestones", lb.Milestones)
|
d.Set("milestones", lb.Milestones)
|
||||||
@@ -117,6 +119,7 @@ func flattenResourceLB(d *schema.ResourceData, lb *lb.RecordLB) {
|
|||||||
d.Set("tech_status", lb.TechStatus)
|
d.Set("tech_status", lb.TechStatus)
|
||||||
d.Set("updated_by", lb.UpdatedBy)
|
d.Set("updated_by", lb.UpdatedBy)
|
||||||
d.Set("updated_time", lb.UpdatedTime)
|
d.Set("updated_time", lb.UpdatedTime)
|
||||||
|
d.Set("user_managed", lb.UserManaged)
|
||||||
d.Set("vins_id", lb.VINSID)
|
d.Set("vins_id", lb.VINSID)
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -136,6 +139,8 @@ func flattenLB(d *schema.ResourceData, lb *lb.RecordLB) {
|
|||||||
d.Set("frontends", flattenFrontends(lb.Frontends))
|
d.Set("frontends", flattenFrontends(lb.Frontends))
|
||||||
d.Set("gid", lb.GID)
|
d.Set("gid", lb.GID)
|
||||||
d.Set("guid", lb.GUID)
|
d.Set("guid", lb.GUID)
|
||||||
|
d.Set("manager_id", lb.ManagerId)
|
||||||
|
d.Set("manager_type", lb.ManagerType)
|
||||||
d.Set("image_id", lb.ImageID)
|
d.Set("image_id", lb.ImageID)
|
||||||
d.Set("milestones", lb.Milestones)
|
d.Set("milestones", lb.Milestones)
|
||||||
d.Set("name", lb.Name)
|
d.Set("name", lb.Name)
|
||||||
@@ -148,6 +153,7 @@ func flattenLB(d *schema.ResourceData, lb *lb.RecordLB) {
|
|||||||
d.Set("tech_status", lb.TechStatus)
|
d.Set("tech_status", lb.TechStatus)
|
||||||
d.Set("updated_by", lb.UpdatedBy)
|
d.Set("updated_by", lb.UpdatedBy)
|
||||||
d.Set("updated_time", lb.UpdatedTime)
|
d.Set("updated_time", lb.UpdatedTime)
|
||||||
|
d.Set("user_managed", lb.UserManaged)
|
||||||
d.Set("vins_id", lb.VINSID)
|
d.Set("vins_id", lb.VINSID)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -266,9 +272,12 @@ func flattenLBList(lbl *lb.ListLB) []map[string]interface{} {
|
|||||||
"frontends": flattenFrontends(lb.Frontends),
|
"frontends": flattenFrontends(lb.Frontends),
|
||||||
"gid": lb.GID,
|
"gid": lb.GID,
|
||||||
"guid": lb.GUID,
|
"guid": lb.GUID,
|
||||||
|
"manager_id": lb.ManagerId,
|
||||||
|
"manager_type": lb.ManagerType,
|
||||||
"image_id": lb.ImageID,
|
"image_id": lb.ImageID,
|
||||||
"milestones": lb.Milestones,
|
"milestones": lb.Milestones,
|
||||||
"name": lb.Name,
|
"name": lb.Name,
|
||||||
|
"part_k8s": lb.PartK8s,
|
||||||
"primary_node": flattenNode(lb.PrimaryNode),
|
"primary_node": flattenNode(lb.PrimaryNode),
|
||||||
"rg_id": lb.RGID,
|
"rg_id": lb.RGID,
|
||||||
"rg_name": lb.RGName,
|
"rg_name": lb.RGName,
|
||||||
@@ -277,6 +286,7 @@ func flattenLBList(lbl *lb.ListLB) []map[string]interface{} {
|
|||||||
"tech_status": lb.TechStatus,
|
"tech_status": lb.TechStatus,
|
||||||
"updated_by": lb.UpdatedBy,
|
"updated_by": lb.UpdatedBy,
|
||||||
"updated_time": lb.UpdatedTime,
|
"updated_time": lb.UpdatedTime,
|
||||||
|
"user_managed": lb.UserManaged,
|
||||||
"vins_id": lb.VINSID,
|
"vins_id": lb.VINSID,
|
||||||
"lb_id": lb.ID,
|
"lb_id": lb.ID,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -80,6 +80,11 @@ func dsLBListDeletedSchemaMake() map[string]*schema.Schema {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "Filter by BackIP",
|
Description: "Filter by BackIP",
|
||||||
},
|
},
|
||||||
|
"sort_by": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "sort by one of supported fields, format +|-(field)",
|
||||||
|
},
|
||||||
"page": {
|
"page": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
@@ -151,6 +156,11 @@ func dsLBListSchemaMake() map[string]*schema.Schema {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
Default: false,
|
Default: false,
|
||||||
},
|
},
|
||||||
|
"sort_by": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "sort by one of supported fields, format +|-(field)",
|
||||||
|
},
|
||||||
"page": {
|
"page": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
@@ -181,6 +191,5 @@ func dsLBItemSchemaMake() map[string]*schema.Schema {
|
|||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
}
|
}
|
||||||
delete(sch, "part_k8s")
|
|
||||||
return sch
|
return sch
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -94,9 +94,15 @@ func lbResourceSchemaMake() map[string]*schema.Schema {
|
|||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
}
|
}
|
||||||
sch["lb_sysctl_params"] = &schema.Schema{
|
sch["sysctl_params"] = &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeList,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
Elem: &schema.Schema{
|
||||||
|
Type: schema.TypeMap,
|
||||||
|
Elem: &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
///
|
///
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ func createLBSchema() map[string]*schema.Schema {
|
|||||||
},
|
},
|
||||||
"backend_haip": {
|
"backend_haip": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
"backends": {
|
"backends": {
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeList,
|
||||||
@@ -210,7 +210,7 @@ func createLBSchema() map[string]*schema.Schema {
|
|||||||
},
|
},
|
||||||
"frontend_haip": {
|
"frontend_haip": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
"frontends": {
|
"frontends": {
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeList,
|
||||||
@@ -268,6 +268,14 @@ func createLBSchema() map[string]*schema.Schema {
|
|||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
"manager_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"manager_type": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
"image_id": {
|
"image_id": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
@@ -372,6 +380,10 @@ func createLBSchema() map[string]*schema.Schema {
|
|||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
"user_managed": {
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
"vins_id": {
|
"vins_id": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
|||||||
@@ -88,10 +88,25 @@ func resourceLBCreate(ctx context.Context, d *schema.ResourceData, m interface{}
|
|||||||
if desc, ok := d.GetOk("desc"); ok {
|
if desc, ok := d.GetOk("desc"); ok {
|
||||||
req.Description = desc.(string)
|
req.Description = desc.(string)
|
||||||
}
|
}
|
||||||
|
|
||||||
if haMode, ok := d.GetOk("ha_mode"); ok {
|
if haMode, ok := d.GetOk("ha_mode"); ok {
|
||||||
req.HighlyAvailable = haMode.(bool)
|
req.HighlyAvailable = haMode.(bool)
|
||||||
}
|
}
|
||||||
|
if sysctlParams, ok := d.GetOk("sysctl_params"); ok {
|
||||||
|
syscrlSliceMaps := sysctlParams.([]map[string]string)
|
||||||
|
res := make([]map[string]interface{}, 0, len(syscrlSliceMaps))
|
||||||
|
for _, syscrlMap := range syscrlSliceMaps {
|
||||||
|
tempMap := make(map[string]interface{})
|
||||||
|
for k, v := range syscrlMap {
|
||||||
|
if intVal, err := strconv.Atoi(v); err == nil {
|
||||||
|
tempMap[k] = intVal
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
tempMap[k] = v
|
||||||
|
}
|
||||||
|
res = append(res, tempMap)
|
||||||
|
}
|
||||||
|
req.SysctlParams = res
|
||||||
|
}
|
||||||
|
|
||||||
lbId, err := c.CloudAPI().LB().Create(ctx, req)
|
lbId, err := c.CloudAPI().LB().Create(ctx, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -348,6 +363,31 @@ func resourceLBUpdate(ctx context.Context, d *schema.ResourceData, m interface{}
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if d.HasChange("sysctl_params") {
|
||||||
|
syscrlSliceMaps := d.Get("sysctl_params").([]map[string]string)
|
||||||
|
res := make([]map[string]interface{}, 0, len(syscrlSliceMaps))
|
||||||
|
for _, syscrlMap := range syscrlSliceMaps {
|
||||||
|
tempMap := make(map[string]interface{})
|
||||||
|
for k, v := range syscrlMap {
|
||||||
|
if intVal, err := strconv.Atoi(v); err == nil {
|
||||||
|
tempMap[k] = intVal
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
tempMap[k] = v
|
||||||
|
}
|
||||||
|
res = append(res, tempMap)
|
||||||
|
}
|
||||||
|
|
||||||
|
req := lb.UpdateSysctParamsRequest{
|
||||||
|
LBID: uint64(d.Get("lb_id").(int)),
|
||||||
|
SysctlParams: res,
|
||||||
|
}
|
||||||
|
_, err := c.CloudAPI().LB().UpdateSysctlParams(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return diag.FromErr(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if d.HasChange("enable") {
|
if d.HasChange("enable") {
|
||||||
enable := d.Get("enable").(bool)
|
enable := d.Get("enable").(bool)
|
||||||
req := lb.DisableEnableRequest{
|
req := lb.DisableEnableRequest{
|
||||||
@@ -402,9 +442,9 @@ func resourceLBUpdate(ctx context.Context, d *schema.ResourceData, m interface{}
|
|||||||
req := lb.RestartRequest{
|
req := lb.RestartRequest{
|
||||||
LBID: uint64(d.Get("lb_id").(int)),
|
LBID: uint64(d.Get("lb_id").(int)),
|
||||||
}
|
}
|
||||||
if safe, ok:= d.GetOk("safe"); ok {
|
if safe, ok := d.GetOk("safe"); ok {
|
||||||
req.Safe = safe.(bool)
|
req.Safe = safe.(bool)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := c.CloudAPI().LB().Restart(ctx, req)
|
_, err := c.CloudAPI().LB().Restart(ctx, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -82,6 +82,10 @@ func utilityLBListCheckPresence(ctx context.Context, d *schema.ResourceData, m i
|
|||||||
req.IncludeDeleted = includedeleted.(bool)
|
req.IncludeDeleted = includedeleted.(bool)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||||
|
req.SortBy = sortBy.(string)
|
||||||
|
}
|
||||||
|
|
||||||
if page, ok := d.GetOk("page"); ok {
|
if page, ok := d.GetOk("page"); ok {
|
||||||
req.Page = uint64(page.(int))
|
req.Page = uint64(page.(int))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -74,6 +74,10 @@ func utilityLBListDeletedCheckPresence(ctx context.Context, d *schema.ResourceDa
|
|||||||
req.BackIP = back_ip.(string)
|
req.BackIP = back_ip.(string)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||||
|
req.SortBy = sortBy.(string)
|
||||||
|
}
|
||||||
|
|
||||||
if page, ok := d.GetOk("page"); ok {
|
if page, ok := d.GetOk("page"); ok {
|
||||||
req.Page = uint64(page.(int))
|
req.Page = uint64(page.(int))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -101,6 +101,11 @@ func dataSourceLocationsListSchemaMake() map[string]*schema.Schema {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "Filter by location code",
|
Description: "Filter by location code",
|
||||||
},
|
},
|
||||||
|
"sort_by": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "sort by one of supported fields, format +|-(field)",
|
||||||
|
},
|
||||||
"page": {
|
"page": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
|||||||
@@ -46,6 +46,10 @@ func utilityLocationsListCheckPresence(ctx context.Context, d *schema.ResourceDa
|
|||||||
c := m.(*controller.ControllerCfg)
|
c := m.(*controller.ControllerCfg)
|
||||||
req := locations.ListRequest{}
|
req := locations.ListRequest{}
|
||||||
|
|
||||||
|
if sortBy, ok := d.GetOk("sort_by"); ok {
|
||||||
|
req.SortBy = sortBy.(string)
|
||||||
|
}
|
||||||
|
|
||||||
if page, ok := d.GetOk("page"); ok {
|
if page, ok := d.GetOk("page"); ok {
|
||||||
req.Page = uint64(page.(int))
|
req.Page = uint64(page.(int))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -68,7 +68,6 @@ func DataSourceResgroup() *schema.Resource {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// func sepsSchemaMake() map[string]*schema.Schema {
|
// func sepsSchemaMake() map[string]*schema.Schema {
|
||||||
// res := map[string]*schema.Schema{
|
// res := map[string]*schema.Schema{
|
||||||
// "sep_id": {
|
// "sep_id": {
|
||||||
@@ -300,6 +299,13 @@ func dataSourceRgSchemaMake() map[string]*schema.Schema {
|
|||||||
Schema: aclSchemaMake(),
|
Schema: aclSchemaMake(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"compute_features": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
},
|
||||||
|
},
|
||||||
"created_by": {
|
"created_by": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
@@ -418,5 +424,3 @@ func dataSourceRgSchemaMake() map[string]*schema.Schema {
|
|||||||
}
|
}
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -83,8 +83,17 @@ func dataSourceRgAffinityGroupsListSchemaMake() map[string]*schema.Schema {
|
|||||||
"ids": {
|
"ids": {
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeList,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
Elem: &schema.Schema{
|
Elem: &schema.Resource{
|
||||||
Type: schema.TypeInt,
|
Schema: map[string]*schema.Schema{
|
||||||
|
"id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"node_id": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -104,6 +104,11 @@ func dataSourceRgListSchemaMake() map[string]*schema.Schema {
|
|||||||
Default: false,
|
Default: false,
|
||||||
Description: "included deleted resource groups",
|
Description: "included deleted resource groups",
|
||||||
},
|
},
|
||||||
|
"sort_by": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "sort by one of supported fields, format +|-(field)",
|
||||||
|
},
|
||||||
"page": {
|
"page": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
@@ -134,6 +139,13 @@ func dataSourceRgListSchemaMake() map[string]*schema.Schema {
|
|||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
"compute_features": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
},
|
||||||
|
},
|
||||||
"created_by": {
|
"created_by": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
|||||||
@@ -135,6 +135,11 @@ func dataSourceRgListComputesSchemaMake() map[string]*schema.Schema {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "Filter by extnet ID",
|
Description: "Filter by extnet ID",
|
||||||
},
|
},
|
||||||
|
"sort_by": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "sort by one of supported fields, format +|-(field)",
|
||||||
|
},
|
||||||
"page": {
|
"page": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
|||||||
@@ -93,6 +93,11 @@ func dataSourceRgListDeletedSchemaMake() map[string]*schema.Schema {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "Filter by lock status",
|
Description: "Filter by lock status",
|
||||||
},
|
},
|
||||||
|
"sort_by": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "sort by one of supported fields, format +|-(field)",
|
||||||
|
},
|
||||||
"page": {
|
"page": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
@@ -123,6 +128,13 @@ func dataSourceRgListDeletedSchemaMake() map[string]*schema.Schema {
|
|||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
"compute_features": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
},
|
||||||
|
},
|
||||||
"cpu_allocation_parameter": {
|
"cpu_allocation_parameter": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
|||||||
@@ -257,11 +257,6 @@ func dataSourceRgListLbSchemaMake() map[string]*schema.Schema {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "Filter by name",
|
Description: "Filter by name",
|
||||||
},
|
},
|
||||||
"account_id": {
|
|
||||||
Type: schema.TypeInt,
|
|
||||||
Optional: true,
|
|
||||||
Description: "Filter by account ID",
|
|
||||||
},
|
|
||||||
"tech_status": {
|
"tech_status": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
@@ -282,6 +277,11 @@ func dataSourceRgListLbSchemaMake() map[string]*schema.Schema {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "Filter by backend IP",
|
Description: "Filter by backend IP",
|
||||||
},
|
},
|
||||||
|
"sort_by": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "sort by one of supported fields, format +|-(field)",
|
||||||
|
},
|
||||||
"page": {
|
"page": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user