From e2ee45ee140803cd1ded0c7af9d61155f0843418 Mon Sep 17 00:00:00 2001 From: loskutovanl Date: Mon, 18 Dec 2023 18:36:55 +0300 Subject: [PATCH] 4.5.1 --- CHANGELOG.md | 24 +- Makefile | 2 +- README.md | 53 +- README_EN.md | 2 +- go.mod | 9 +- go.sum | 18 +- internal/controller/controller.go | 16 +- internal/provider/data_sources_map.go | 415 ++-- internal/provider/provider.go | 318 +-- internal/provider/resource_map.go | 207 +- .../service/cloudapi/k8s/utility_k8s_wg.go | 2 +- .../cloudapi/kvmvm/resource_compute.go | 10 +- .../account/data_source_account.go | 172 +- .../data_source_account_audits_list.go | 179 +- .../data_source_account_computes_list.go | 296 +-- .../data_source_account_deleted_list.go | 355 +--- .../account/data_source_account_disks_list.go | 221 +- .../data_source_account_flipgroups_list.go | 289 +-- ...source_account_get_resource_consumption.go | 168 +- .../account/data_source_account_list.go | 364 +--- ...ource_account_resource_consumption_list.go | 143 +- .../account/data_source_account_rg_list.go | 433 +--- .../account/data_source_account_vins_list.go | 263 +-- .../service/cloudbroker/account/flattens.go | 20 +- .../cloudbroker/account/resource_account.go | 883 ++++---- .../cloudbroker/account/utility_account.go | 251 +++ .../cloudbroker/disks/data_source_disk.go | 294 +-- .../disks/data_source_disk_list.go | 513 +---- .../disks/data_source_disk_list_types.go | 19 +- .../data_source_disk_list_types_detailed.go | 50 +- .../disks/data_source_disk_list_unattached.go | 399 +--- .../disks/data_source_disk_snapshot.go | 46 +- .../disks/data_source_disk_snapshot_list.go | 54 +- .../service/cloudbroker/disks/flattens.go | 11 +- .../disks/resource_check_input_values.go | 49 +- .../cloudbroker/disks/resource_disk.go | 578 ++---- .../disks/resource_disk_snapshot.go | 162 +- .../disks/utility_disk_list_deleted.go | 2 +- .../disks/utility_disk_list_types.go | 7 + .../disks/utility_disk_list_types_detailed.go | 14 +- .../disks/utility_disk_list_unattached.go | 2 +- .../cloudbroker/extnet/data_source_extnet.go | 227 +- .../extnet/data_source_extnet_default.go | 11 +- .../extnet/data_source_extnet_list.go | 184 +- .../extnet/data_source_extnet_static_route.go | 40 +- .../data_source_extnet_static_route_list.go | 51 +- .../cloudbroker/extnet/resource_extnet.go | 392 +--- .../extnet/resource_extnet_static_route.go | 100 +- .../extnet/utility_extnet_resource.go | 42 +- .../extnet/utility_extnet_static_route.go | 52 +- .../flipgroup/data_source_flipgroup.go | 147 -- .../flipgroup/data_sourse_flipgroup_list.go | 169 -- .../flipgroup/resource_check_input_values.go | 40 +- .../flipgroup/resource_flipgroup.go | 227 +- .../flipgroup/utility_flipgroup.go | 2 +- .../cloudbroker/grid/data_source_grid.go | 168 +- .../data_source_grid_get_post_diagnosis.go | 33 +- .../grid/data_source_grid_get_post_status.go | 20 +- .../cloudbroker/grid/data_source_grid_list.go | 334 +-- .../grid/data_source_grid_list_emails.go | 31 +- .../grid/data_sourse_grid_get_consumption.go | 132 +- .../grid/data_sourse_grid_list_consumption.go | 149 +- internal/service/cloudbroker/grid/flattens.go | 5 +- .../service/cloudbroker/grid/utility_grid.go | 126 +- .../grid/utility_grid_get_post_diagnosis.go | 8 +- .../service/cloudbroker/ic/input_checks.go | 246 ++- .../cloudbroker/image/data_source_image.go | 217 +- .../image/data_source_image_list.go | 167 +- .../image/data_source_image_list_stacks.go | 236 +-- .../service/cloudbroker/image/flattens.go | 297 ++- .../cloudbroker/image/resource_cdrom_image.go | 639 +++--- .../cloudbroker/image/resource_image.go | 952 ++++----- .../image/resource_virtual_image.go | 551 ++--- .../cloudbroker/image/utility_image.go | 120 +- .../cloudbroker/image/utility_image_list.go | 164 +- .../image/utility_image_list_stacks.go | 130 +- .../cloudbroker/k8s/resource_k8s_wg.go | 214 +- .../service/cloudbroker/k8s/utility_k8s.go | 25 +- .../service/cloudbroker/k8s/utility_k8s_wg.go | 121 +- .../cloudbroker/kvmvm/data_source_compute.go | 260 +-- .../service/cloudbroker/kvmvm/flattens.go | 584 +++++- .../cloudbroker/kvmvm/resource_compute.go | 1817 +++-------------- .../cloudbroker/kvmvm/utility_compute.go | 921 +++++++++ .../kvmvm/utility_compute_boot_disk.go | 4 +- .../service/cloudbroker/lb/data_source_lb.go | 4 +- .../cloudbroker/lb/data_source_lb_list.go | 2 + .../lb/data_source_lb_list_deleted.go | 2 + internal/service/cloudbroker/lb/flattens.go | 8 + .../lb/resource_check_input_values.go | 64 +- .../service/cloudbroker/lb/resource_lb.go | 436 ++-- .../cloudbroker/lb/resource_lb_backend.go | 194 +- .../lb/resource_lb_backend_server.go | 122 +- .../cloudbroker/lb/resource_lb_frontend.go | 72 +- .../lb/resource_lb_frontend_bind.go | 71 +- internal/service/cloudbroker/lb/utility_lb.go | 3 +- .../cloudbroker/lb/utility_lb_backend.go | 6 +- .../pcidevice/data_source_pcidevice.go | 209 +- .../pcidevice/data_source_pcidevice_list.go | 221 +- .../pcidevice/resource_check_input_vales.go | 53 +- .../pcidevice/resource_pcidevice.go | 521 +++-- .../service/cloudbroker/rg/data_source_rg.go | 269 --- .../data_source_rg_affinity_group_computes.go | 109 +- .../rg/data_source_rg_affinity_groups_get.go | 46 +- .../rg/data_source_rg_affinity_groups_list.go | 24 +- .../cloudbroker/rg/data_source_rg_audits.go | 74 +- .../cloudbroker/rg/data_source_rg_list.go | 364 +--- .../rg/resource_check_input_values.go | 57 +- .../service/cloudbroker/rg/resource_rg.go | 889 +++----- .../rg/utility_rg_affinity_groups_list.go | 56 +- .../service/cloudbroker/rg/utility_rg_list.go | 155 +- .../cloudbroker/sep/data_source_sep.go | 235 +-- .../cloudbroker/sep/data_source_sep_config.go | 160 +- .../sep/data_source_sep_consumption.go | 245 +-- .../sep/data_source_sep_disk_list.go | 163 +- .../cloudbroker/sep/data_source_sep_list.go | 285 +-- .../cloudbroker/sep/data_source_sep_pool.go | 216 +- internal/service/cloudbroker/sep/flattens.go | 32 +- .../sep/resource_check_input_values.go | 32 +- .../service/cloudbroker/sep/resource_sep.go | 1044 +++++----- .../cloudbroker/sep/resource_sep_config.go | 324 ++- .../cloudbroker/sep/utility_sep_list.go | 168 +- .../cloudbroker/sep/utility_sep_pool.go | 116 +- .../stack/data_source_stack_list.go | 51 +- .../cloudbroker/stack/data_sourse_stack.go | 252 +-- .../service/cloudbroker/stack/flattens.go | 118 +- .../cloudbroker/stack/utility_stack.go | 2 +- .../cloudbroker/stack/utility_stack_list.go | 8 +- .../cloudbroker/vins/data_source_vins.go | 120 +- .../cloudbroker/vins/data_source_vins_list.go | 261 +-- .../service/cloudbroker/vins/resource_vins.go | 809 ++++++-- .../service/cloudbroker/vins/utility_vins.go | 80 +- .../cloudbroker/vins/utility_vins_list.go | 142 +- samples/cloudbroker/data_disk/main.tf | 10 +- samples/cloudbroker/data_disk_list/main.tf | 44 +- samples/cloudbroker/data_grid/main.tf | 6 +- samples/cloudbroker/data_grid_list/main.tf | 14 +- samples/cloudbroker/data_image/main.tf | 4 +- samples/cloudbroker/data_image_list/main.tf | 76 +- .../data_image_list_stacks/main.tf | 19 +- .../cloudbroker/data_pcidevice_list/main.tf | 37 +- samples/cloudbroker/data_sep/main.tf | 6 +- samples/cloudbroker/data_sep_config/main.tf | 6 +- .../cloudbroker/data_sep_consumption/main.tf | 4 +- .../cloudbroker/data_sep_disk_list/main.tf | 5 +- samples/cloudbroker/data_sep_list/main.tf | 40 +- samples/cloudbroker/data_sep_pool/main.tf | 13 +- samples/cloudbroker/resource_account/main.tf | 8 +- samples/cloudbroker/resource_disk/main.tf | 95 +- samples/cloudbroker/resource_extnet/main.tf | 60 +- samples/cloudbroker/resource_image/main.tf | 28 +- .../cloudbroker/resource_image_cdrom/main.tf | 32 +- .../cloudbroker/resource_pcidevice/main.tf | 7 +- samples/cloudbroker/resource_sep/main.tf | 50 +- .../cloudbroker/resource_sep_config/main.tf | 12 +- .../resource_virtual_image/main.tf | 26 +- 155 files changed, 10040 insertions(+), 17124 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 211bd65..fc991bf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,24 @@ -## Version 4.5.0 +## Version 4.5.1 ## Feature -- Added support for authorization using the Basis.Virtual Security system. Add client and config +- Refactoring BVS config +- Added and updated data sources and resources for cloudbroker groups: + * account + * audit + * disks + * extnet + * flipgroup + * grid + * image + * k8ci + * k8s + * kvmvm (compute) + * lb (load balancer) + * pcidevice + * rg (resource group) + * sep + * stack + * vins ### Bugfix -- Fixed bservice and rg schema and flatten in cloudapi -- Add stateUpgrader for k8s_cp in cloudapi/k8s +- Fixed description update for compute in cloudapi/kvmvm diff --git a/Makefile b/Makefile index aef969e..f17baf6 100644 --- a/Makefile +++ b/Makefile @@ -7,7 +7,7 @@ ZIPDIR = ./zip BINARY=${NAME} WORKPATH= ./examples/terraform.d/plugins/${HOSTNAME}/${NAMESPACE}/${NAMESPACE}/${VERSION}/${OS_ARCH} MAINPATH = ./cmd/decort/ -VERSION=4.5.0 +VERSION=4.5.1 OS_ARCH=$(shell go env GOHOSTOS)_$(shell go env GOHOSTARCH) FILES = ${BINARY}_${VERSION}_darwin_amd64\ diff --git a/README.md b/README.md index c5e0a07..efa48f5 100644 --- a/README.md +++ b/README.md @@ -27,23 +27,42 @@ Terraform provider для платформы Digital Energy Cloud Orchestration ## Возможности провайдера -- Работа с Compute instances, -- Работа с disks, -- Работа с k8s, -- Работа с image, -- Работа с flipgroups, -- Работа с stacks, -- Работа с reource groups, -- Работа с VINS, -- Работа с pfw, -- Работа с accounts, -- Работа с snapshots, -- Работа с bservice, -- Работа с extnets, -- Работа с locations, -- Работа с load balancer. - -Вики проекта: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +- Режим пользователя: + - Работа с accounts, + - Работа с bservice, + - Работа с disks, + - Работа с extnets, + - Работа с flipgroups, + - Работа с image, + - Работа с k8s, + - Работа с Compute instances, + - Работа с load balancer, + - Работа с locations, + - Работа с pfw, + - Работа с resource groups, + - Работа с snapshots, + - Работа с stacks, + - Работа с VINS. + +- Режим администратора: + - Работа с accounts, + - Работа с audits, + - Работа с disks, + - Работа с extnets, + - Работа с flipgroups, + - Работа с grids, + - Работа с images, + - Работа с k8ci, + - Работа с k8s, + - Работа с Compute instances, + - Работа с load balancer, + - Работа с pci device, + - Работа с resource groups, + - Работа с seps, + - Работа с stacks, + - Работа с VINS. + +Со списком и описанием функционала всех групп можно ознамоиться на Вики проекта: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki ## Установка Начиная с версии провайдера `4.3.0` в релизном архиве находятся скрипты-инсталляторы. diff --git a/README_EN.md b/README_EN.md index f6c342a..061a176 100644 --- a/README_EN.md +++ b/README_EN.md @@ -58,7 +58,7 @@ Two ways for starting: ```terraform provider "decort" { - authenticator = "oauth2" + authenticator = "decs3o" #controller_url = controller_url = "https://ds1.digitalenergy.online" #oauth2_url = diff --git a/go.mod b/go.mod index 7a5a849..208badd 100644 --- a/go.mod +++ b/go.mod @@ -9,8 +9,7 @@ require ( github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1 github.com/sirupsen/logrus v1.9.0 golang.org/x/net v0.16.0 - golang.org/x/oauth2 v0.13.0 - repository.basistech.ru/BASIS/decort-golang-sdk v1.7.0 + repository.basistech.ru/BASIS/decort-golang-sdk v1.7.2 ) require ( @@ -68,9 +67,9 @@ require ( github.com/vmihailenco/msgpack/v4 v4.3.12 // indirect github.com/vmihailenco/tagparser v0.1.2 // indirect github.com/zclconf/go-cty v1.12.1 // indirect - golang.org/x/crypto v0.14.0 // indirect - golang.org/x/sys v0.13.0 // indirect - golang.org/x/text v0.13.0 // indirect + golang.org/x/crypto v0.15.0 // indirect + golang.org/x/sys v0.14.0 // indirect + golang.org/x/text v0.14.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 // indirect google.golang.org/grpc v1.51.0 // indirect diff --git a/go.sum b/go.sum index f4d743d..e5859a3 100644 --- a/go.sum +++ b/go.sum @@ -249,8 +249,8 @@ golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA= +golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -268,8 +268,6 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.16.0 h1:7eBu7KsSvFDtSXUIDbh3aqlK4DPsZ1rByC8PFfBThos= golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= -golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -295,8 +293,8 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= @@ -306,8 +304,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= @@ -341,5 +339,5 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -repository.basistech.ru/BASIS/decort-golang-sdk v1.7.0 h1:aGDg9hQXs70m4Llx8hw9Y50M1C2apDqSsNMsE8isyIM= -repository.basistech.ru/BASIS/decort-golang-sdk v1.7.0/go.mod h1:mwcpnw0dT/PQf6AOJShjlbDNDfNitr0WM77LNKL1qjo= +repository.basistech.ru/BASIS/decort-golang-sdk v1.7.2 h1:Ll8MBcmDcElxxgxOUUaYbbafTSbIm4dcPEDLl4fdF8Q= +repository.basistech.ru/BASIS/decort-golang-sdk v1.7.2/go.mod h1:7fj8sgGZFiiExewQeqckCS4WxwOmU0oP6BO6mi1Lpkw= diff --git a/internal/controller/controller.go b/internal/controller/controller.go index 3e1bf3b..d9614e7 100644 --- a/internal/controller/controller.go +++ b/internal/controller/controller.go @@ -32,7 +32,6 @@ import ( "strings" log "github.com/sirupsen/logrus" - "golang.org/x/oauth2" decort "repository.basistech.ru/BASIS/decort-golang-sdk" "repository.basistech.ru/BASIS/decort-golang-sdk/config" "repository.basistech.ru/BASIS/decort-golang-sdk/interfaces" @@ -60,7 +59,10 @@ type ControllerCfg struct { bvs_user string // required for bvs mode bvs_password string // required for bvs mode domain string // required for bvs mode - token oauth2.Token // obtained from BVS provider on successful login in bvs mode + token config.Token // obtained from BVS provider on successful login in bvs mode + path_cfg string // the path of the configuration file entry + path_token string // the path of the token file entry + time_to_refresh int64 // the number of minutes before the expiration of the token, a refresh will be made legacy_user string // required for legacy mode legacy_password string // required for legacy mode legacy_sid string // obtained from DECORT controller on successful login in legacy mode @@ -99,7 +101,10 @@ func ControllerConfigure(d *schema.ResourceData) (*ControllerCfg, error) { app_secret: d.Get("app_secret").(string), oauth2_url: d.Get("oauth2_url").(string), decort_username: "", - token: oauth2.Token{}, + token: config.Token{}, + path_cfg: d.Get("path_cfg").(string), + path_token: d.Get("path_token").(string), + time_to_refresh: int64(d.Get("time_to_refresh").(int)), } allow_unverified_ssl := d.Get("allow_unverified_ssl").(bool) @@ -231,7 +236,7 @@ func ControllerConfigure(d *schema.ResourceData) (*ControllerCfg, error) { ret_config.caller = decort.New(sdkConf) case MODE_BVS: - + sdkConf := config.BVSConfig{ AppID: ret_config.app_id, AppSecret: ret_config.app_secret, @@ -242,6 +247,9 @@ func ControllerConfigure(d *schema.ResourceData) (*ControllerCfg, error) { Password: ret_config.bvs_password, Domain: ret_config.domain, Token: ret_config.token, + PathCfg: ret_config.path_cfg, + PathToken: ret_config.path_token, + TimeToRefresh: ret_config.time_to_refresh, } ret_config.caller = decort.NewBVS(sdkConf) diff --git a/internal/provider/data_sources_map.go b/internal/provider/data_sources_map.go index 3c6f18b..be4e279 100644 --- a/internal/provider/data_sources_map.go +++ b/internal/provider/data_sources_map.go @@ -1,161 +1,254 @@ -/* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, -Kasim Baybikov, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package provider - -import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/account" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/bservice" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/disks" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/extnet" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/flipgroup" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/image" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/k8s" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/kvmvm" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/lb" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/locations" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/rg" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/snapshot" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/stack" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/vins" - - cb_account "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/account" - // cb_disks "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/disks" - cb_extnet "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/extnet" - // cb_flipgroup "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/flipgroup" - // cb_grid "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/grid" - // cb_image "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/image" - // cb_lb "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/lb" - // cb_pcidevice "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/pcidevice" - // cb_rg "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/rg" - // cb_sep "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/sep" - // cb_stack "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/stack" - // cb_vgpu "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/vgpu" -) - -func newDataSourcesMap() map[string]*schema.Resource { - return map[string]*schema.Resource{ - "decort_account": account.DataSourceAccount(), - "decort_resgroup": rg.DataSourceResgroup(), - "decort_kvmvm": kvmvm.DataSourceCompute(), - "decort_kvmvm_list": kvmvm.DataSourceComputeList(), - "decort_kvmvm_audits": kvmvm.DataSourceComputeAudits(), - "decort_kvmvm_get_audits": kvmvm.DataSourceComputeGetAudits(), - "decort_kvmvm_get_console_url": kvmvm.DataSourceComputeGetConsoleUrl(), - "decort_kvmvm_get_log": kvmvm.DataSourceComputeGetLog(), - "decort_kvmvm_pfw_list": kvmvm.DataSourceComputePfwList(), - "decort_kvmvm_user_list": kvmvm.DataSourceComputeUserList(), - "decort_kvmvm_snapshot_usage": kvmvm.DataSourceComputeSnapshotUsage(), - "decort_k8s": k8s.DataSourceK8s(), - "decort_k8s_list": k8s.DataSourceK8sList(), - "decort_k8s_list_deleted": k8s.DataSourceK8sListDeleted(), - "decort_k8s_wg": k8s.DataSourceK8sWg(), - "decort_k8s_wg_list": k8s.DataSourceK8sWgList(), - "decort_k8s_computes": k8s.DataSourceK8sComputes(), - "decort_k8ci_list": k8s.DataSourceK8CIList(), - "decort_vins": vins.DataSourceVins(), - "decort_vins_list": vins.DataSourceVinsList(), - "decort_vins_audits": vins.DataSourceVinsAudits(), - "decort_vins_ip_list": vins.DataSourceVinsIpList(), - "decort_vins_list_deleted": vins.DataSourceVinsListDeleted(), - "decort_vins_ext_net_list": vins.DataSourceVinsExtNetList(), - "decort_vins_nat_rule_list": vins.DataSourceVinsNatRuleList(), - "decort_vins_static_route_list": vins.DataSourceStaticRouteList(), - "decort_vins_static_route": vins.DataSourceStaticRoute(), - "decort_snapshot_list": snapshot.DataSourceSnapshotList(), - "decort_disk": disks.DataSourceDisk(), - "decort_disk_list": disks.DataSourceDiskList(), - "decort_rg_list": rg.DataSourceRgList(), - "decort_rg_affinity_group_computes": rg.DataSourceRgAffinityGroupComputes(), - "decort_rg_affinity_groups_list": rg.DataSourceRgAffinityGroupsList(), - "decort_rg_affinity_groups_get": rg.DataSourceRgAffinityGroupsGet(), - "decort_rg_audits": rg.DataSourceRgAudits(), - "decort_rg_list_computes": rg.DataSourceRgListComputes(), - "decort_rg_list_deleted": rg.DataSourceRgListDeleted(), - "decort_rg_list_lb": rg.DataSourceRgListLb(), - "decort_rg_list_pfw": rg.DataSourceRgListPfw(), - "decort_rg_list_vins": rg.DataSourceRgListVins(), - "decort_rg_usage": rg.DataSourceRgUsage(), - "decort_disk_list_types_detailed": disks.DataSourceDiskListTypesDetailed(), - "decort_disk_list_types": disks.DataSourceDiskListTypes(), - "decort_disk_list_deleted": disks.DataSourceDiskListDeleted(), - "decort_disk_list_unattached": disks.DataSourceDiskListUnattached(), - "decort_disk_snapshot": disks.DataSourceDiskSnapshot(), - "decort_disk_snapshot_list": disks.DataSourceDiskSnapshotList(), - "decort_account_list": account.DataSourceAccountList(), - "decort_account_computes_list": account.DataSourceAccountComputesList(), - "decort_account_disks_list": account.DataSourceAccountDisksList(), - "decort_account_vins_list": account.DataSourceAccountVinsList(), - "decort_account_audits_list": account.DataSourceAccountAuditsList(), - "decort_account_rg_list": account.DataSourceAccountRGList(), - "decort_account_consumed_units": account.DataSourceAccountConsumedUnits(), - "decort_account_consumed_units_by_type": account.DataSourceAccountConsumedUnitsByType(), - "decort_account_reserved_units": account.DataSourceAccountReservedUnits(), - "decort_account_templates_list": account.DataSourceAccountTemplatessList(), - "decort_account_deleted_list": account.DataSourceAccountDeletedList(), - "decort_account_flipgroups_list": account.DataSourceAccountFlipGroupsList(), - "decort_bservice_list": bservice.DataSourceBasicServiceList(), - "decort_bservice": bservice.DataSourceBasicService(), - "decort_bservice_snapshot_list": bservice.DataSourceBasicServiceSnapshotList(), - "decort_bservice_group": bservice.DataSourceBasicServiceGroup(), - "decort_bservice_deleted_list": bservice.DataSourceBasicServiceDeletedList(), - "decort_extnet_list": extnet.DataSourceExtnetList(), - "decort_extnet_computes_list": extnet.DataSourceExtnetComputesList(), - "decort_extnet": extnet.DataSourceExtnet(), - "decort_extnet_default": extnet.DataSourceExtnetDefault(), - "decort_locations_list": locations.DataSourceLocationsList(), - "decort_location_url": locations.DataSourceLocationUrl(), - "decort_image_list": image.DataSourceImageList(), - "decort_image": image.DataSourceImage(), - "decort_lb": lb.DataSourceLB(), - "decort_lb_list": lb.DataSourceLBList(), - "decort_lb_list_deleted": lb.DataSourceLBListDeleted(), - "decort_flipgroup": flipgroup.DataSourceFlipgroup(), - "decort_flipgroup_list": flipgroup.DataSourceFlipGroupList(), - "decort_stack": stack.DataSourceStack(), - "decort_stack_list": stack.DataSourceStackList(), - "decort_account_resource_consumption_list": account.DataSourceAccountResourceConsumptionList(), - "decort_account_resource_consumption_get": account.DataSourceAccountResourceConsumptionGet(), - "decort_kvmvm_list_deleted": kvmvm.DataSourceComputeListDeleted(), - "decort_kvmvm_vgpu_list": kvmvm.DataSourceComputeVGPUList(), - "decort_kvmvm_pci_device_list": kvmvm.DataSourceComputePCIDeviceList(), - "decort_k8s_wg_cloud_init": k8s.DataSourceK8sWgCloudInit(), - "decort_rg_resource_consumption_list": rg.DataSourceRGResourceConsumptionList(), - "decort_rg_resource_consumption_get": rg.DataSourceRGResourceConsumptionGet(), - - "decort_cb_account": cb_account.DataSourceAccount(), - "decort_cb_account_list": cb_account.DataSourceAccountList(), - "decort_cb_account_computes_list": cb_account.DataSourceAccountComputesList(), - "decort_cb_account_deleted_list": cb_account.DataSourceAccountDeletedList(), - "decort_cb_account_disks_list": cb_account.DataSourceAccountDisksList(), - "decort_cb_account_flipgroups_list": cb_account.DataSourceAccountFlipGroupsList(), - "decort_cb_account_rg_list": cb_account.DataSourceAccountRGList(), - "decort_cb_account_vins_list": cb_account.DataSourceAccountVinsList(), - "decort_cb_account_resource_consumption_get": cb_account.DataSourceAccountResourceConsumptionGet(), - "decort_cb_account_resource_consumption_list": cb_account.DataSourceAccountResourceConsumptionList(), - "decort_cb_account_audits_list": cb_account.DataSourceAccountAuditsList(), - "decort_cb_extnet": cb_extnet.DataSourceExtnetCB(), - "decort_cb_extnet_list": cb_extnet.DataSourceExtnetListCB(), - "decort_cb_extnet_default": cb_extnet.DataSourceExtnetDefaultCB(), - "decort_cb_extnet_static_route_list": cb_extnet.DataSourceStaticRouteList(), - "decort_cb_extnet_static_route": cb_extnet.DataSourceStaticRoute(), - } -} +/* +Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provider + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/account" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/bservice" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/disks" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/extnet" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/flipgroup" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/image" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/k8s" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/kvmvm" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/lb" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/locations" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/rg" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/snapshot" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/stack" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/vins" + + cb_account "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/account" + cb_audit "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/audit" + cb_disks "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/disks" + cb_extnet "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/extnet" + cb_flipgroup "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/flipgroup" + cb_grid "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/grid" + cb_image "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/image" + cb_kvmvm "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/kvmvm" + cb_lb "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/lb" + cb_pcidevice "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/pcidevice" + cb_rg "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/rg" + cb_sep "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/sep" + cb_stack "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/stack" + cb_vins "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/vins" + + // cb_vgpu "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/vgpu" + cb_k8ci "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/k8ci" + cb_k8s "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/k8s" +) + +func newDataSourcesMap() map[string]*schema.Resource { + return map[string]*schema.Resource{ + "decort_account": account.DataSourceAccount(), + "decort_resgroup": rg.DataSourceResgroup(), + "decort_kvmvm": kvmvm.DataSourceCompute(), + "decort_kvmvm_list": kvmvm.DataSourceComputeList(), + "decort_kvmvm_audits": kvmvm.DataSourceComputeAudits(), + "decort_kvmvm_get_audits": kvmvm.DataSourceComputeGetAudits(), + "decort_kvmvm_get_console_url": kvmvm.DataSourceComputeGetConsoleUrl(), + "decort_kvmvm_get_log": kvmvm.DataSourceComputeGetLog(), + "decort_kvmvm_pfw_list": kvmvm.DataSourceComputePfwList(), + "decort_kvmvm_user_list": kvmvm.DataSourceComputeUserList(), + "decort_kvmvm_snapshot_usage": kvmvm.DataSourceComputeSnapshotUsage(), + "decort_k8s": k8s.DataSourceK8s(), + "decort_k8s_list": k8s.DataSourceK8sList(), + "decort_k8s_list_deleted": k8s.DataSourceK8sListDeleted(), + "decort_k8s_wg": k8s.DataSourceK8sWg(), + "decort_k8s_wg_list": k8s.DataSourceK8sWgList(), + "decort_k8s_computes": k8s.DataSourceK8sComputes(), + "decort_k8ci_list": k8s.DataSourceK8CIList(), + "decort_vins": vins.DataSourceVins(), + "decort_vins_list": vins.DataSourceVinsList(), + "decort_vins_audits": vins.DataSourceVinsAudits(), + "decort_vins_ip_list": vins.DataSourceVinsIpList(), + "decort_vins_list_deleted": vins.DataSourceVinsListDeleted(), + "decort_vins_ext_net_list": vins.DataSourceVinsExtNetList(), + "decort_vins_nat_rule_list": vins.DataSourceVinsNatRuleList(), + "decort_vins_static_route_list": vins.DataSourceStaticRouteList(), + "decort_vins_static_route": vins.DataSourceStaticRoute(), + "decort_snapshot_list": snapshot.DataSourceSnapshotList(), + "decort_disk": disks.DataSourceDisk(), + "decort_disk_list": disks.DataSourceDiskList(), + "decort_rg_list": rg.DataSourceRgList(), + "decort_rg_affinity_group_computes": rg.DataSourceRgAffinityGroupComputes(), + "decort_rg_affinity_groups_list": rg.DataSourceRgAffinityGroupsList(), + "decort_rg_affinity_groups_get": rg.DataSourceRgAffinityGroupsGet(), + "decort_rg_audits": rg.DataSourceRgAudits(), + "decort_rg_list_computes": rg.DataSourceRgListComputes(), + "decort_rg_list_deleted": rg.DataSourceRgListDeleted(), + "decort_rg_list_lb": rg.DataSourceRgListLb(), + "decort_rg_list_pfw": rg.DataSourceRgListPfw(), + "decort_rg_list_vins": rg.DataSourceRgListVins(), + "decort_rg_usage": rg.DataSourceRgUsage(), + "decort_disk_list_types_detailed": disks.DataSourceDiskListTypesDetailed(), + "decort_disk_list_types": disks.DataSourceDiskListTypes(), + "decort_disk_list_deleted": disks.DataSourceDiskListDeleted(), + "decort_disk_list_unattached": disks.DataSourceDiskListUnattached(), + "decort_disk_snapshot": disks.DataSourceDiskSnapshot(), + "decort_disk_snapshot_list": disks.DataSourceDiskSnapshotList(), + "decort_account_list": account.DataSourceAccountList(), + "decort_account_computes_list": account.DataSourceAccountComputesList(), + "decort_account_disks_list": account.DataSourceAccountDisksList(), + "decort_account_vins_list": account.DataSourceAccountVinsList(), + "decort_account_audits_list": account.DataSourceAccountAuditsList(), + "decort_account_rg_list": account.DataSourceAccountRGList(), + "decort_account_consumed_units": account.DataSourceAccountConsumedUnits(), + "decort_account_consumed_units_by_type": account.DataSourceAccountConsumedUnitsByType(), + "decort_account_reserved_units": account.DataSourceAccountReservedUnits(), + "decort_account_templates_list": account.DataSourceAccountTemplatessList(), + "decort_account_deleted_list": account.DataSourceAccountDeletedList(), + "decort_account_flipgroups_list": account.DataSourceAccountFlipGroupsList(), + "decort_bservice_list": bservice.DataSourceBasicServiceList(), + "decort_bservice": bservice.DataSourceBasicService(), + "decort_bservice_snapshot_list": bservice.DataSourceBasicServiceSnapshotList(), + "decort_bservice_group": bservice.DataSourceBasicServiceGroup(), + "decort_bservice_deleted_list": bservice.DataSourceBasicServiceDeletedList(), + "decort_extnet_list": extnet.DataSourceExtnetList(), + "decort_extnet_computes_list": extnet.DataSourceExtnetComputesList(), + "decort_extnet": extnet.DataSourceExtnet(), + "decort_extnet_default": extnet.DataSourceExtnetDefault(), + "decort_locations_list": locations.DataSourceLocationsList(), + "decort_location_url": locations.DataSourceLocationUrl(), + "decort_image_list": image.DataSourceImageList(), + "decort_image": image.DataSourceImage(), + "decort_lb": lb.DataSourceLB(), + "decort_lb_list": lb.DataSourceLBList(), + "decort_lb_list_deleted": lb.DataSourceLBListDeleted(), + "decort_flipgroup": flipgroup.DataSourceFlipgroup(), + "decort_flipgroup_list": flipgroup.DataSourceFlipGroupList(), + "decort_stack": stack.DataSourceStack(), + "decort_stack_list": stack.DataSourceStackList(), + "decort_account_resource_consumption_list": account.DataSourceAccountResourceConsumptionList(), + "decort_account_resource_consumption_get": account.DataSourceAccountResourceConsumptionGet(), + "decort_kvmvm_list_deleted": kvmvm.DataSourceComputeListDeleted(), + "decort_kvmvm_vgpu_list": kvmvm.DataSourceComputeVGPUList(), + "decort_kvmvm_pci_device_list": kvmvm.DataSourceComputePCIDeviceList(), + "decort_k8s_wg_cloud_init": k8s.DataSourceK8sWgCloudInit(), + "decort_rg_resource_consumption_list": rg.DataSourceRGResourceConsumptionList(), + "decort_rg_resource_consumption_get": rg.DataSourceRGResourceConsumptionGet(), + + "decort_cb_account": cb_account.DataSourceAccount(), + "decort_cb_account_list": cb_account.DataSourceAccountList(), + "decort_cb_account_computes_list": cb_account.DataSourceAccountComputesList(), + "decort_cb_account_list_deleted": cb_account.DataSourceAccountDeletedList(), + "decort_cb_account_disks_list": cb_account.DataSourceAccountDisksList(), + "decort_cb_account_flipgroups_list": cb_account.DataSourceAccountFlipGroupsList(), + "decort_cb_account_rg_list": cb_account.DataSourceAccountRGList(), + "decort_cb_account_vins_list": cb_account.DataSourceAccountVinsList(), + "decort_cb_account_resource_consumption_get": cb_account.DataSourceAccountResourceConsumptionGet(), + "decort_cb_account_resource_consumption_list": cb_account.DataSourceAccountResourceConsumptionList(), + "decort_cb_account_audits_list": cb_account.DataSourceAccountAuditsList(), + "decort_cb_audit": cb_audit.DataSourceAudit(), + "decort_cb_audit_list": cb_audit.DataSourceAuditList(), + "decort_cb_audit_linked_jobs": cb_audit.DataSourceAuditLinkedJobs(), + "decort_cb_extnet": cb_extnet.DataSourceExtnetCB(), + "decort_cb_extnet_list": cb_extnet.DataSourceExtnetListCB(), + "decort_cb_extnet_default": cb_extnet.DataSourceExtnetDefaultCB(), + "decort_cb_extnet_static_route_list": cb_extnet.DataSourceStaticRouteList(), + "decort_cb_extnet_static_route": cb_extnet.DataSourceStaticRoute(), + "decort_cb_image": cb_image.DataSourceImage(), + "decort_cb_grid": cb_grid.DataSourceGrid(), + "decort_cb_grid_get_status": cb_grid.DataSourceGridGetStatus(), + "decort_cb_grid_post_status": cb_grid.DataSourceGridPostStatus(), + "decort_cb_grid_get_diagnosis": cb_grid.DataSourceGridGetDiagnosis(), + "decort_cb_grid_post_diagnosis": cb_grid.DataSourceGridPostDiagnosis(), + "decort_cb_grid_list": cb_grid.DataSourceGridList(), + "decort_cb_grid_list_emails": cb_grid.DataSourceGridListEmails(), + "decort_cb_grid_list_consumption": cb_grid.DataSourceGridListConsumption(), + "decort_cb_grid_get_consumption": cb_grid.DataSourceGridGetConsumption(), + "decort_cb_image_list": cb_image.DataSourceImageList(), + "decort_cb_image_list_stacks": cb_image.DataSourceImageListStacks(), + "decort_cb_kvmvm": cb_kvmvm.DataSourceCompute(), + "decort_cb_kvmvm_affinity_relations": cb_kvmvm.DataSourceComputeAffinityRelations(), + "decort_cb_kvmvm_audits": cb_kvmvm.DataSourceComputeAudits(), + "decort_cb_kvmvm_boot_order_get": cb_kvmvm.DataSourceComputeBootOrderGet(), + "decort_cb_kvmvm_get_audits": cb_kvmvm.DataSourceComputeGetAudits(), + "decort_cb_kvmvm_get_console_url": cb_kvmvm.DataSourceComputeGetConsoleUrl(), + "decort_cb_kvmvm_get_log": cb_kvmvm.DataSourceComputeGetLog(), + "decort_cb_kvmvm_list": cb_kvmvm.DataSourceComputeList(), + "decort_cb_kvmvm_list_deleted": cb_kvmvm.DataSourceComputeListDeleted(), + "decort_cb_kvmvm_migrate_storage_info": cb_kvmvm.DataSourceComputeMigrateStorageInfo(), + "decort_cb_kvmvm_pci_device_list": cb_kvmvm.DataSourceComputePCIDeviceList(), + "decort_cb_kvmvm_pfw_list": cb_kvmvm.DataSourceComputePfwList(), + "decort_cb_kvmvm_snapshot_list": cb_kvmvm.DataSourceComputeSnapshotList(), + "decort_cb_kvmvm_snapshot_usage": cb_kvmvm.DataSourceComputeSnapshotUsage(), + "decort_cb_kvmvm_user_list": cb_kvmvm.DataSourceComputeUserList(), + "decort_cb_kvmvm_vgpu_list": cb_kvmvm.DataSourceComputeVGPUList(), + "decort_cb_disk": cb_disks.DataSourceDisk(), + "decort_cb_disk_list": cb_disks.DataSourceDiskList(), + "decort_cb_disk_list_deleted": cb_disks.DataSourceDiskListDeleted(), + "decort_cb_disk_list_types": cb_disks.DataSourceDiskListTypes(), + "decort_cb_disk_list_types_detailed": cb_disks.DataSourceDiskListTypesDetailed(), + "decort_cb_disk_list_unattached": cb_disks.DataSourceDiskListUnattached(), + "decort_cb_disk_snapshot": cb_disks.DataSourceDiskSnapshot(), + "decort_cb_disk_snapshot_list": cb_disks.DataSourceDiskSnapshotList(), + "decort_cb_pcidevice": cb_pcidevice.DataSourcePcidevice(), + "decort_cb_pcidevice_list": cb_pcidevice.DataSourcePcideviceList(), + "decort_cb_rg": cb_rg.DataSourceResgroup(), + "decort_cb_rg_affinity_group_computes": cb_rg.DataSourceRgAffinityGroupComputes(), + "decort_cb_rg_affinity_groups_get": cb_rg.DataSourceRgAffinityGroupsGet(), + "decort_cb_rg_affinity_groups_list": cb_rg.DataSourceRgAffinityGroupsList(), + "decort_cb_rg_resource_consumption_get": cb_rg.DataSourceRGResourceConsumptionGet(), + "decort_cb_rg_resource_consumption_list": cb_rg.DataSourceRGResourceConsumptionList(), + "decort_cb_rg_audits": cb_rg.DataSourceRgAudits(), + "decort_cb_rg_list": cb_rg.DataSourceRgList(), + "decort_cb_rg_list_deleted": cb_rg.DataSourceRgListDeleted(), + "decort_cb_rg_list_computes": cb_rg.DataSourceRgListComputes(), + "decort_cb_rg_list_lb": cb_rg.DataSourceRgListLb(), + "decort_cb_rg_list_pfw": cb_rg.DataSourceRgListPfw(), + "decort_cb_rg_list_vins": cb_rg.DataSourceRgListVins(), + "decort_cb_rg_usage": cb_rg.DataSourceRgUsage(), + "decort_cb_sep_list": cb_sep.DataSourceSepList(), + "decort_cb_sep": cb_sep.DataSourceSep(), + "decort_cb_sep_consumption": cb_sep.DataSourceSepConsumption(), + "decort_cb_sep_disk_list": cb_sep.DataSourceSepDiskList(), + "decort_cb_sep_config": cb_sep.DataSourceSepConfig(), + "decort_cb_sep_pool": cb_sep.DataSourceSepPool(), + "decort_cb_lb": cb_lb.DataSourceLB(), + "decort_cb_lb_list": cb_lb.DataSourceLBList(), + "decort_cb_lb_list_deleted": cb_lb.DataSourceLBListDeleted(), + "decort_cb_flipgroup_list": cb_flipgroup.DataSourceFlipgroupList(), + "decort_cb_flipgroup": cb_flipgroup.DataSourceFlipgroup(), + "decort_cb_stack_list": cb_stack.DataSourceStacksList(), + "decort_cb_stack": cb_stack.DataSourceStack(), + "decort_cb_vins": cb_vins.DataSourceVins(), + "decort_cb_vins_list": cb_vins.DataSourceVinsList(), + "decort_cb_vins_audits": cb_vins.DataSourceVinsAudits(), + "decort_cb_vins_ip_list": cb_vins.DataSourceVinsIpList(), + "decort_cb_vins_list_deleted": cb_vins.DataSourceVinsListDeleted(), + "decort_cb_vins_ext_net_list": cb_vins.DataSourceVinsExtNetList(), + "decort_cb_vins_nat_rule_list": cb_vins.DataSourceVinsNatRuleList(), + "decort_cb_vins_static_route": cb_vins.DataSourceStaticRoute(), + "decort_cb_vins_static_route_list": cb_vins.DataSourceStaticRouteList(), + "decort_cb_k8ci": cb_k8ci.DataSourceK8CI(), + "decort_cb_k8ci_list": cb_k8ci.DataSourceK8CIList(), + "decort_cb_k8ci_list_deleted": cb_k8ci.DataSourceK8CIListDeleted(), + "decort_cb_k8s": cb_k8s.DataSourceK8s(), + "decort_cb_k8s_list": cb_k8s.DataSourceK8sList(), + "decort_cb_k8s_list_deleted": cb_k8s.DataSourceK8sListDeleted(), + "decort_cb_k8s_wg": cb_k8s.DataSourceK8sWg(), + "decort_cb_k8s_wg_cloud_init": cb_k8s.DataSourceK8sWgCloudInit(), + "decort_cb_k8s_wg_list": cb_k8s.DataSourceK8sWgList(), + "decort_cb_k8s_computes": cb_k8s.DataSourceK8sComputes(), + } +} diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 259a078..dafe852 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -1,150 +1,168 @@ -/* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, -Kasim Baybikov, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package provider - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "golang.org/x/net/context" - - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/location" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/statefuncs" -) - -func Provider() *schema.Provider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - "authenticator": { - Type: schema.TypeString, - Required: true, - StateFunc: statefuncs.StateFuncToLower, - ValidateFunc: validation.StringInSlice([]string{"decs3o", "legacy", "jwt", "bvs"}, true), // ignore case while validating - Description: "Authentication mode to use when connecting to DECORT cloud API. Should be one of 'decs3o', 'legacy', 'jwt' or 'bvs'.", - }, - - "oauth2_url": { - Type: schema.TypeString, - Optional: true, - StateFunc: statefuncs.StateFuncToLower, - DefaultFunc: schema.EnvDefaultFunc("DECORT_OAUTH2_URL", nil), - Description: "OAuth2 application URL in 'decs3o' and 'bvs' authentication mode.", - }, - - "controller_url": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - StateFunc: statefuncs.StateFuncToLower, - Description: "URL of DECORT Cloud controller to use. API calls will be directed to this URL.", - }, - - "user": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("DECORT_USER", nil), - Description: "User name for DECORT cloud API operations in 'legacy' authentication mode.", - }, - - "password": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("DECORT_PASSWORD", nil), - Description: "User password for DECORT cloud API operations in 'legacy' authentication mode.", - }, - - "bvs_user": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("DECORT_BVS_USER", nil), - Description: "User name for DECORT cloud API operations in 'bvs' authentication mode.", - }, - - "bvs_password": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("DECORT_BVS_PASSWORD", nil), - Description: "User password for DECORT cloud API operations in 'bvs' authentication mode.", - }, - - "domain": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("DECORT_DOMAIN", nil), - Description: "User password for DECORT cloud API operations in 'bvs' authentication mode.", - }, - - "app_id": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("DECORT_APP_ID", nil), - Description: "Application ID to access DECORT cloud API in 'decs3o' and 'bvs' authentication mode.", - }, - - "app_secret": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("DECORT_APP_SECRET", nil), - Description: "Application secret to access DECORT cloud API in 'decs3o' and 'bvs' authentication mode.", - }, - - "jwt": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("DECORT_JWT", nil), - Description: "JWT to access DECORT cloud API in 'jwt' authentication mode.", - }, - - "allow_unverified_ssl": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "If true, DECORT API will not verify SSL certificates. Use this with caution and in trusted environments only!", - }, - }, - - ResourcesMap: newResourcesMap(), - - DataSourcesMap: newDataSourcesMap(), - - ConfigureContextFunc: providerConfigure, - } -} - -func providerConfigure(ctx context.Context, d *schema.ResourceData) (interface{}, diag.Diagnostics) { - decsController, err := controller.ControllerConfigure(d) - if err != nil { - return nil, diag.FromErr(err) - } - - gridId, err := location.UtilityLocationGetDefaultGridID(ctx, decsController) - if err != nil { - return nil, diag.FromErr(err) - } - if gridId == 0 { - return nil, diag.FromErr(fmt.Errorf("providerConfigure: invalid default Grid ID = 0")) - } - - return decsController, nil -} +/* +Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provider + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "golang.org/x/net/context" + + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/location" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/statefuncs" +) + +func Provider() *schema.Provider { + return &schema.Provider{ + Schema: map[string]*schema.Schema{ + "authenticator": { + Type: schema.TypeString, + Required: true, + StateFunc: statefuncs.StateFuncToLower, + ValidateFunc: validation.StringInSlice([]string{"decs3o", "legacy", "jwt", "bvs"}, true), // ignore case while validating + Description: "Authentication mode to use when connecting to DECORT cloud API. Should be one of 'decs3o', 'legacy', 'jwt' or 'bvs'.", + }, + + "oauth2_url": { + Type: schema.TypeString, + Optional: true, + StateFunc: statefuncs.StateFuncToLower, + DefaultFunc: schema.EnvDefaultFunc("DECORT_OAUTH2_URL", nil), + Description: "OAuth2 application URL in 'decs3o' and 'bvs' authentication mode.", + }, + + "controller_url": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + StateFunc: statefuncs.StateFuncToLower, + Description: "URL of DECORT Cloud controller to use. API calls will be directed to this URL.", + }, + + "user": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("DECORT_USER", nil), + Description: "User name for DECORT cloud API operations in 'legacy' authentication mode.", + }, + + "password": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("DECORT_PASSWORD", nil), + Description: "User password for DECORT cloud API operations in 'legacy' authentication mode.", + }, + + "bvs_user": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("DECORT_BVS_USER", nil), + Description: "User name for DECORT cloud API operations in 'bvs' authentication mode.", + }, + + "bvs_password": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("DECORT_BVS_PASSWORD", nil), + Description: "User password for DECORT cloud API operations in 'bvs' authentication mode.", + }, + + "domain": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("DECORT_DOMAIN", nil), + Description: "User password for DECORT cloud API operations in 'bvs' authentication mode.", + }, + + "app_id": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("DECORT_APP_ID", nil), + Description: "Application ID to access DECORT cloud API in 'decs3o' and 'bvs' authentication mode.", + }, + + "app_secret": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("DECORT_APP_SECRET", nil), + Description: "Application secret to access DECORT cloud API in 'decs3o' and 'bvs' authentication mode.", + }, + + "jwt": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("DECORT_JWT", nil), + Description: "JWT to access DECORT cloud API in 'jwt' authentication mode.", + }, + + "allow_unverified_ssl": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "If true, DECORT API will not verify SSL certificates. Use this with caution and in trusted environments only!", + }, + + "path_cfg": { + Type: schema.TypeString, + Optional: true, + Description: "The path of the configuration file entry", + }, + + "path_token": { + Type: schema.TypeString, + Optional: true, + Description: "The path of the token file entry", + }, + + "time_to_refresh": { + Type: schema.TypeInt, + Optional: true, + Description: "The number of minutes before the expiration of the token, a refresh will be made", + }, + }, + + ResourcesMap: newResourcesMap(), + + DataSourcesMap: newDataSourcesMap(), + + ConfigureContextFunc: providerConfigure, + } +} + +func providerConfigure(ctx context.Context, d *schema.ResourceData) (interface{}, diag.Diagnostics) { + decsController, err := controller.ControllerConfigure(d) + if err != nil { + return nil, diag.FromErr(err) + } + + gridId, err := location.UtilityLocationGetDefaultGridID(ctx, decsController) + if err != nil { + return nil, diag.FromErr(err) + } + if gridId == 0 { + return nil, diag.FromErr(fmt.Errorf("providerConfigure: invalid default Grid ID = 0")) + } + + return decsController, nil +} diff --git a/internal/provider/resource_map.go b/internal/provider/resource_map.go index e6a8ae2..90558b7 100644 --- a/internal/provider/resource_map.go +++ b/internal/provider/resource_map.go @@ -1,104 +1,103 @@ -/* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, -Kasim Baybikov, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package provider - -import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/account" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/bservice" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/disks" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/flipgroup" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/image" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/k8s" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/kvmvm" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/lb" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/pfw" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/rg" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/snapshot" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/vins" - - cb_account "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/account" - // cb_disks "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/disks" - cb_extnet "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/extnet" - // cb_flipgroup "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/flipgroup" - // cb_image "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/image" - // cb_k8s "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/k8s" - // cb_kvmvm "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/kvmvm" - // cb_lb "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/lb" - // cb_pcidevice "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/pcidevice" - // cb_pfw "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/pfw" - // cb_rg "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/rg" - // cb_sep "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/sep" - // cb_snapshot "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/snapshot" - // cb_vins "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/vins" -) - -func newResourcesMap() map[string]*schema.Resource { - return map[string]*schema.Resource{ - "decort_resgroup": rg.ResourceResgroup(), - "decort_kvmvm": kvmvm.ResourceCompute(), - "decort_disk": disks.ResourceDisk(), - "decort_disk_snapshot": disks.ResourceDiskSnapshot(), - "decort_vins": vins.ResourceVins(), - "decort_pfw": pfw.ResourcePfw(), - "decort_k8s": k8s.ResourceK8s(), - "decort_k8s_wg": k8s.ResourceK8sWg(), - "decort_k8s_cp": k8s.ResourceK8sCP(), - "decort_snapshot": snapshot.ResourceSnapshot(), - "decort_account": account.ResourceAccount(), - "decort_bservice": bservice.ResourceBasicService(), - "decort_bservice_group": bservice.ResourceBasicServiceGroup(), - "decort_image": image.ResourceImage(), - "decort_image_virtual": image.ResourceImageVirtual(), - "decort_lb": lb.ResourceLB(), - "decort_lb_backend": lb.ResourceLBBackend(), - "decort_lb_backend_server": lb.ResourceLBBackendServer(), - "decort_lb_frontend": lb.ResourceLBFrontend(), - "decort_lb_frontend_bind": lb.ResourceLBFrontendBind(), - "decort_flipgroup": flipgroup.ResourceFlipgroup(), - "decort_vins_static_route": vins.ResourceStaticRoute(), - - "decort_cb_account": cb_account.ResourceAccount(), - "decort_cb_extnet": cb_extnet.ResourceExtnetCB(), - // "decort_cb_disk": cb_disks.ResourceDisk(), - // "decort_cb_image": cb_image.ResourceImage(), - // "decort_cb_virtual_image": cb_image.ResourceVirtualImage(), - // "decort_cb_cdrom_image": cb_image.ResourceCDROMImage(), - // "decort_cb_delete_images": cb_image.ResourceDeleteImages(), - // "decort_cb_pcidevice": cb_pcidevice.ResourcePcidevice(), - // "decort_cb_sep": cb_sep.ResourceSep(), - // "decort_cb_sep_config": cb_sep.ResourceSepConfig(), - // "decort_cb_resgroup": cb_rg.ResourceResgroup(), - // "decort_cb_kvmvm": cb_kvmvm.ResourceCompute(), - // "decort_cb_vins": cb_vins.ResourceVins(), - // "decort_cb_pfw": cb_pfw.ResourcePfw(), - // "decort_cb_k8s": cb_k8s.ResourceK8s(), - // "decort_cb_k8s_wg": cb_k8s.ResourceK8sWg(), - // "decort_cb_snapshot": cb_snapshot.ResourceSnapshot(), - // "decort_cb_flipgroup": cb_flipgroup.ResourceFlipgroup(), - // "decort_cb_lb": cb_lb.ResourceLB(), - // "decort_cb_lb_backend": cb_lb.ResourceLBBackend(), - // "decort_cb_lb_backend_server": cb_lb.ResourceLBBackendServer(), - // "decort_cb_lb_frontend": cb_lb.ResourceLBFrontend(), - // "decort_cb_lb_frontend_bind": cb_lb.ResourceLBFrontendBind(), - "decort_cb_extnet_static_route": cb_extnet.ResourceStaticRoute(), - } -} +/* +Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provider + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/account" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/bservice" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/disks" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/flipgroup" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/image" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/k8s" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/kvmvm" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/lb" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/pfw" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/rg" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/snapshot" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/vins" + + cb_account "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/account" + cb_disks "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/disks" + cb_extnet "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/extnet" + cb_flipgroup "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/flipgroup" + cb_image "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/image" + cb_k8ci "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/k8ci" + cb_k8s "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/k8s" + cb_kvmvm "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/kvmvm" + cb_lb "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/lb" + cb_pcidevice "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/pcidevice" + cb_rg "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/rg" + cb_sep "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/sep" + cb_vins "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/vins" +) + +func newResourcesMap() map[string]*schema.Resource { + return map[string]*schema.Resource{ + "decort_resgroup": rg.ResourceResgroup(), + "decort_kvmvm": kvmvm.ResourceCompute(), + "decort_disk": disks.ResourceDisk(), + "decort_disk_snapshot": disks.ResourceDiskSnapshot(), + "decort_vins": vins.ResourceVins(), + "decort_pfw": pfw.ResourcePfw(), + "decort_k8s": k8s.ResourceK8s(), + "decort_k8s_wg": k8s.ResourceK8sWg(), + "decort_k8s_cp": k8s.ResourceK8sCP(), + "decort_snapshot": snapshot.ResourceSnapshot(), + "decort_account": account.ResourceAccount(), + "decort_bservice": bservice.ResourceBasicService(), + "decort_bservice_group": bservice.ResourceBasicServiceGroup(), + "decort_image": image.ResourceImage(), + "decort_image_virtual": image.ResourceImageVirtual(), + "decort_lb": lb.ResourceLB(), + "decort_lb_backend": lb.ResourceLBBackend(), + "decort_lb_backend_server": lb.ResourceLBBackendServer(), + "decort_lb_frontend": lb.ResourceLBFrontend(), + "decort_lb_frontend_bind": lb.ResourceLBFrontendBind(), + "decort_flipgroup": flipgroup.ResourceFlipgroup(), + "decort_vins_static_route": vins.ResourceStaticRoute(), + + "decort_cb_account": cb_account.ResourceAccount(), + "decort_cb_extnet": cb_extnet.ResourceExtnetCB(), + "decort_cb_extnet_static_route": cb_extnet.ResourceStaticRoute(), + "decort_cb_disk": cb_disks.ResourceDisk(), + "decort_cb_disk_snapshot": cb_disks.ResourceDiskSnapshot(), + "decort_cb_image": cb_image.ResourceImage(), + "decort_cb_virtual_image": cb_image.ResourceVirtualImage(), + "decort_cb_cdrom_image": cb_image.ResourceCDROMImage(), + "decort_cb_pcidevice": cb_pcidevice.ResourcePcidevice(), + "decort_cb_sep": cb_sep.ResourceSep(), + "decort_cb_sep_config": cb_sep.ResourceSepConfig(), + "decort_cb_kvmvm": cb_kvmvm.ResourceCompute(), + "decort_cb_vins": cb_vins.ResourceVins(), + "decort_cb_k8ci": cb_k8ci.ResourceK8CI(), + "decort_cb_k8s_cp": cb_k8s.ResourceK8sCP(), + "decort_cb_k8s_wg": cb_k8s.ResourceK8sWg(), + "decort_cb_vins_static_route": cb_vins.ResourceStaticRoute(), + "decort_cb_flipgroup": cb_flipgroup.ResourceFlipgroup(), + "decort_cb_lb": cb_lb.ResourceLB(), + "decort_cb_lb_backend": cb_lb.ResourceLBBackend(), + "decort_cb_lb_backend_server": cb_lb.ResourceLBBackendServer(), + "decort_cb_lb_frontend": cb_lb.ResourceLBFrontend(), + "decort_cb_lb_frontend_bind": cb_lb.ResourceLBFrontendBind(), + "decort_cb_rg": cb_rg.ResourceResgroup(), + } +} diff --git a/internal/service/cloudapi/k8s/utility_k8s_wg.go b/internal/service/cloudapi/k8s/utility_k8s_wg.go index 1b2a36f..824762d 100644 --- a/internal/service/cloudapi/k8s/utility_k8s_wg.go +++ b/internal/service/cloudapi/k8s/utility_k8s_wg.go @@ -142,7 +142,7 @@ func utilityK8sWgListCheckPresence(ctx context.Context, d *schema.ResourceData, func utilityK8sWgCloudInitCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (string, error) { c := m.(*controller.ControllerCfg) req := k8s.GetWorkerNodesMetaDataRequest{ - K8SID: uint64(d.Get("k8s_id").(int)), + K8SID: uint64(d.Get("k8s_id").(int)), WorkersGroupID: uint64(d.Get("wg_id").(int)), } diff --git a/internal/service/cloudapi/kvmvm/resource_compute.go b/internal/service/cloudapi/kvmvm/resource_compute.go index 52046a5..d799ebf 100644 --- a/internal/service/cloudapi/kvmvm/resource_compute.go +++ b/internal/service/cloudapi/kvmvm/resource_compute.go @@ -735,7 +735,7 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf Name: d.Get("name").(string), } - if desc, ok := d.GetOk("desc"); ok { + if desc, ok := d.GetOk("description"); ok { req.Description = desc.(string) } @@ -1613,15 +1613,15 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema { Description: "Name of this compute. Compute names are case sensitive and must be unique in the resource group.", }, "rg_id": { - Type: schema.TypeInt, - Required: true, + Type: schema.TypeInt, + Required: true, // ForceNew: true, ValidateFunc: validation.IntAtLeast(1), Description: "ID of the resource group where this compute should be deployed.", }, "driver": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, // ForceNew: true, StateFunc: statefuncs.StateFuncToUpper, ValidateFunc: validation.StringInSlice([]string{"SVA_KVM_X86", "KVM_X86", "KVM_PPC"}, false), // observe case while validating diff --git a/internal/service/cloudbroker/account/data_source_account.go b/internal/service/cloudbroker/account/data_source_account.go index 65bd378..80cb11c 100644 --- a/internal/service/cloudbroker/account/data_source_account.go +++ b/internal/service/cloudbroker/account/data_source_account.go @@ -44,6 +44,7 @@ import ( func dataSourceAccountRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { acc, err := utilityAccountCheckPresence(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } @@ -67,174 +68,3 @@ func DataSourceAccount() *schema.Resource { Schema: dataSourceAccountSchemaMake(), } } - -func dataSourceAccountSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "account_id": { - Type: schema.TypeInt, - Required: true, - }, - "dc_location": { - Type: schema.TypeString, - Computed: true, - }, - "ckey": { - Type: schema.TypeString, - Computed: true, - }, - "acl": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "explicit": { - Type: schema.TypeBool, - Computed: true, - }, - "guid": { - Type: schema.TypeString, - Computed: true, - }, - "right": { - Type: schema.TypeString, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "type": { - Type: schema.TypeString, - Computed: true, - }, - "user_group_id": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - "company": { - Type: schema.TypeString, - Computed: true, - }, - "companyurl": { - Type: schema.TypeString, - Computed: true, - }, - "cpu_allocation_parameter": { - Type: schema.TypeString, - Computed: true, - }, - "cpu_allocation_ratio": { - Type: schema.TypeFloat, - Computed: true, - }, - "created_by": { - Type: schema.TypeString, - Computed: true, - }, - "created_time": { - Type: schema.TypeInt, - Computed: true, - }, - "deactivation_time": { - Type: schema.TypeInt, - Computed: true, - }, - "deleted_by": { - Type: schema.TypeString, - Computed: true, - }, - "deleted_time": { - Type: schema.TypeInt, - Computed: true, - }, - "displayname": { - Type: schema.TypeString, - Computed: true, - }, - "guid": { - Type: schema.TypeInt, - Computed: true, - }, - "account_name": { - Type: schema.TypeString, - Computed: true, - }, - "resource_limits": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cu_c": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_d": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_dm": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_i": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_m": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_np": { - Type: schema.TypeFloat, - Computed: true, - }, - "gpu_units": { - Type: schema.TypeFloat, - Computed: true, - }, - }, - }, - }, - "resource_types": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "send_access_emails": { - Type: schema.TypeBool, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "uniq_pools": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "updated_time": { - Type: schema.TypeInt, - Computed: true, - }, - "version": { - Type: schema.TypeInt, - Computed: true, - }, - "vins": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - } -} - diff --git a/internal/service/cloudbroker/account/data_source_account_audits_list.go b/internal/service/cloudbroker/account/data_source_account_audits_list.go index ae49ff8..2f5e3c9 100644 --- a/internal/service/cloudbroker/account/data_source_account_audits_list.go +++ b/internal/service/cloudbroker/account/data_source_account_audits_list.go @@ -1,109 +1,70 @@ -/* -Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package account - -import ( - "context" - - "github.com/google/uuid" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" -) - -func dataSourceAccountAuditsListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - accountAuditsList, err := utilityAccountAuditsListCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - - id := uuid.New() - d.SetId(id.String()) - d.Set("items", flattenAccountAuditsList(accountAuditsList)) - - return nil -} - -func dataSourceAccountAuditsListSchemaMake() map[string]*schema.Schema { - res := map[string]*schema.Schema{ - "account_id": { - Type: schema.TypeInt, - Required: true, - Description: "ID of the account", - }, - "items": { - Type: schema.TypeList, - Computed: true, - Description: "Search Result", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "call": { - Type: schema.TypeString, - Computed: true, - }, - "responsetime": { - Type: schema.TypeFloat, - Computed: true, - }, - "statuscode": { - Type: schema.TypeInt, - Computed: true, - }, - "timestamp": { - Type: schema.TypeFloat, - Computed: true, - }, - "user": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - } - return res -} - -func DataSourceAccountAuditsList() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - ReadContext: dataSourceAccountAuditsListRead, - - Timeouts: &schema.ResourceTimeout{ - Read: &constants.Timeout30s, - Default: &constants.Timeout60s, - }, - - Schema: dataSourceAccountAuditsListSchemaMake(), - } -} \ No newline at end of file +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package account + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceAccountAuditsListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + accountAuditsList, err := utilityAccountAuditsListCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + id := uuid.New() + d.SetId(id.String()) + d.Set("items", flattenAccountAuditsList(accountAuditsList)) + + return nil +} + +func DataSourceAccountAuditsList() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceAccountAuditsListRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceAccountAuditsListSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/account/data_source_account_computes_list.go b/internal/service/cloudbroker/account/data_source_account_computes_list.go index 78e61fc..62b21a6 100644 --- a/internal/service/cloudbroker/account/data_source_account_computes_list.go +++ b/internal/service/cloudbroker/account/data_source_account_computes_list.go @@ -1,225 +1,71 @@ -/* -Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package account - -import ( - "context" - - "github.com/google/uuid" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" -) - -func dataSourceAccountComputesListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - accountComputesList, err := utilityAccountComputesListCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - - id := uuid.New() - d.SetId(id.String()) - d.Set("items", flattenAccountComputesList(accountComputesList)) - d.Set("entry_count", accountComputesList.EntryCount) - - return nil -} - -func dataSourceAccountComputesListSchemaMake() map[string]*schema.Schema { - res := map[string]*schema.Schema{ - "account_id": { - Type: schema.TypeInt, - Required: true, - Description: "ID of the account", - }, - "compute_id": { - Type: schema.TypeInt, - Optional: true, - Description: "Filter by compute ID", - }, - "name": { - Type: schema.TypeString, - Optional: true, - Description: "Filter by compute name", - }, - "rg_name": { - Type: schema.TypeString, - Optional: true, - Description: "Filter by RG name", - }, - "rg_id": { - Type: schema.TypeInt, - Optional: true, - Description: "Filter by RG ID", - }, - "tech_status": { - Type: schema.TypeString, - Optional: true, - Description: "Filter by tech. status", - }, - "ip_address": { - Type: schema.TypeString, - Optional: true, - Description: "Filter by IP address", - }, - "extnet_name": { - Type: schema.TypeString, - Optional: true, - Description: "Filter by extnet name", - }, - "extnet_id": { - Type: schema.TypeInt, - Optional: true, - Description: "Filter by extnet ID", - }, - "page": { - Type: schema.TypeInt, - Optional: true, - Description: "Page number", - }, - "size": { - Type: schema.TypeInt, - Optional: true, - Description: "Page size", - }, - "items": { - Type: schema.TypeList, - Computed: true, - Description: "Search Result", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "account_id": { - Type: schema.TypeInt, - Computed: true, - }, - "account_name": { - Type: schema.TypeString, - Computed: true, - }, - "cpus": { - Type: schema.TypeInt, - Computed: true, - }, - "created_by": { - Type: schema.TypeString, - Computed: true, - }, - "created_time": { - Type: schema.TypeInt, - Computed: true, - }, - "deleted_by": { - Type: schema.TypeString, - Computed: true, - }, - "deleted_time": { - Type: schema.TypeInt, - Computed: true, - }, - "compute_id": { - Type: schema.TypeInt, - Computed: true, - }, - "compute_name": { - Type: schema.TypeString, - Computed: true, - }, - "ram": { - Type: schema.TypeInt, - Computed: true, - }, - "registered": { - Type: schema.TypeBool, - Computed: true, - }, - "rg_id": { - Type: schema.TypeInt, - Computed: true, - }, - "rg_name": { - Type: schema.TypeString, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "tech_status": { - Type: schema.TypeString, - Computed: true, - }, - "total_disks_size": { - Type: schema.TypeInt, - Computed: true, - }, - "updated_by": { - Type: schema.TypeString, - Computed: true, - }, - "updated_time": { - Type: schema.TypeInt, - Computed: true, - }, - "user_managed": { - Type: schema.TypeBool, - Computed: true, - }, - "vins_connected": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - "entry_count": { - Type: schema.TypeInt, - Computed: true, - }, - } - return res -} - - -func DataSourceAccountComputesList() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - ReadContext: dataSourceAccountComputesListRead, - - Timeouts: &schema.ResourceTimeout{ - Read: &constants.Timeout30s, - Default: &constants.Timeout60s, - }, - - Schema: dataSourceAccountComputesListSchemaMake(), - } -} \ No newline at end of file +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package account + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceAccountComputesListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + accountComputesList, err := utilityAccountComputesListCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + id := uuid.New() + d.SetId(id.String()) + d.Set("items", flattenAccountComputesList(accountComputesList)) + d.Set("entry_count", accountComputesList.EntryCount) + + return nil +} + +func DataSourceAccountComputesList() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceAccountComputesListRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceAccountComputesListSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/account/data_source_account_deleted_list.go b/internal/service/cloudbroker/account/data_source_account_deleted_list.go index 61838fa..87043f4 100644 --- a/internal/service/cloudbroker/account/data_source_account_deleted_list.go +++ b/internal/service/cloudbroker/account/data_source_account_deleted_list.go @@ -1,284 +1,71 @@ -/* -Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package account - -import ( - "context" - - "github.com/google/uuid" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" -) - -func DataSourceAccountDeletedList() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - ReadContext: dataSourceAccountDeletedListRead, - - Timeouts: &schema.ResourceTimeout{ - Read: &constants.Timeout30s, - Default: &constants.Timeout60s, - }, - - Schema: dataSourceAccountListDeletedSchemaMake(), - } -} - -func dataSourceAccountDeletedListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - accountDeletedList, err := utilityAccountDeletedListCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - - id := uuid.New() - d.SetId(id.String()) - d.Set("items", flattenListDeleted(accountDeletedList)) - d.Set("entry_count", accountDeletedList.EntryCount) - - return nil -} - -func dataSourceAccountListDeletedSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "by_id": { - Type: schema.TypeInt, - Optional: true, - Description: "Filter by ID", - }, - "name": { - Type: schema.TypeString, - Optional: true, - Description: "Filter by name", - }, - "acl": { - Type: schema.TypeString, - Optional: true, - Description: "Filter by ACL", - }, - "page": { - Type: schema.TypeInt, - Optional: true, - Description: "Page number", - }, - "size": { - Type: schema.TypeInt, - Optional: true, - Description: "Page size", - }, - "items": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "dc_location": { - Type: schema.TypeString, - Computed: true, - }, - "ckey": { - Type: schema.TypeString, - Computed: true, - }, - "meta": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "acl": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "explicit": { - Type: schema.TypeBool, - Computed: true, - }, - "guid": { - Type: schema.TypeString, - Computed: true, - }, - "right": { - Type: schema.TypeString, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "type": { - Type: schema.TypeString, - Computed: true, - }, - "user_group_id": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - "company": { - Type: schema.TypeString, - Computed: true, - }, - "companyurl": { - Type: schema.TypeString, - Computed: true, - }, - "cpu_allocation_parameter": { - Type: schema.TypeString, - Computed: true, - }, - "cpu_allocation_ratio": { - Type: schema.TypeFloat, - Computed: true, - }, - "created_by": { - Type: schema.TypeString, - Computed: true, - }, - "created_time": { - Type: schema.TypeInt, - Computed: true, - }, - "deactivation_time": { - Type: schema.TypeFloat, - Computed: true, - }, - "deleted_by": { - Type: schema.TypeString, - Computed: true, - }, - "deleted_time": { - Type: schema.TypeInt, - Computed: true, - }, - "displayname": { - Type: schema.TypeString, - Computed: true, - }, - "guid": { - Type: schema.TypeInt, - Computed: true, - }, - "account_id": { - Type: schema.TypeInt, - Computed: true, - }, - "account_name": { - Type: schema.TypeString, - Computed: true, - }, - "resource_limits": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cu_c": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_d": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_dm": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_i": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_m": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_np": { - Type: schema.TypeFloat, - Computed: true, - }, - "gpu_units": { - Type: schema.TypeFloat, - Computed: true, - }, - }, - }, - }, - "resource_types": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "send_access_emails": { - Type: schema.TypeBool, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "uniq_pools": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "updated_time": { - Type: schema.TypeInt, - Computed: true, - }, - "version": { - Type: schema.TypeInt, - Computed: true, - }, - "vins": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - }, - }, - }, - "entry_count": { - Type: schema.TypeInt, - Computed: true, - }, - } -} +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package account + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceAccountDeletedListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + accountDeletedList, err := utilityAccountDeletedListCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + id := uuid.New() + d.SetId(id.String()) + d.Set("items", flattenListDeleted(accountDeletedList)) + d.Set("entry_count", accountDeletedList.EntryCount) + + return nil +} + +func DataSourceAccountDeletedList() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceAccountDeletedListRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceAccountListDeletedSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/account/data_source_account_disks_list.go b/internal/service/cloudbroker/account/data_source_account_disks_list.go index d30f2e3..09b86f8 100644 --- a/internal/service/cloudbroker/account/data_source_account_disks_list.go +++ b/internal/service/cloudbroker/account/data_source_account_disks_list.go @@ -1,151 +1,70 @@ -/* -Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ -package account - -import ( - "context" - - "github.com/google/uuid" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" -) - -func dataSourceAccountDisksListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - accountDisksList, err := utilityAccountDisksListCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - - id := uuid.New() - d.SetId(id.String()) - d.Set("items", flattenAccountDisksList(accountDisksList)) - d.Set("entry_count", accountDisksList.EntryCount) - - return nil -} - -func dataSourceAccountDisksListSchemaMake() map[string]*schema.Schema { - res := map[string]*schema.Schema{ - "account_id": { - Type: schema.TypeInt, - Required: true, - Description: "ID of the account", - }, - "disk_id": { - Type: schema.TypeInt, - Optional: true, - Description: "Filter by disk ID", - }, - "name": { - Type: schema.TypeString, - Optional: true, - Description: "Filter by disk name", - }, - "disk_max_size": { - Type: schema.TypeInt, - Optional: true, - Description: "Filter by disk max size", - }, - "type": { - Type: schema.TypeString, - Optional: true, - Description: "Filter by disk type", - }, - "page": { - Type: schema.TypeInt, - Optional: true, - Description: "Page number", - }, - "size": { - Type: schema.TypeInt, - Optional: true, - Description: "Page size", - }, - "items": { - Type: schema.TypeList, - Computed: true, - Description: "Search Result", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "disk_id": { - Type: schema.TypeInt, - Computed: true, - }, - "disk_name": { - Type: schema.TypeString, - Computed: true, - }, - "pool_name": { - Type: schema.TypeString, - Computed: true, - }, - "sep_id": { - Type: schema.TypeInt, - Computed: true, - }, - "shareable": { - Type: schema.TypeBool, - Computed: true, - }, - "size_max": { - Type: schema.TypeInt, - Computed: true, - }, - "type": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - "entry_count": { - Type: schema.TypeInt, - Computed: true, - }, - } - return res -} - -func DataSourceAccountDisksList() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - ReadContext: dataSourceAccountDisksListRead, - - Timeouts: &schema.ResourceTimeout{ - Read: &constants.Timeout30s, - Default: &constants.Timeout60s, - }, - - Schema: dataSourceAccountDisksListSchemaMake(), - } -} +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ +package account + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceAccountDisksListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + accountDisksList, err := utilityAccountDisksListCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + id := uuid.New() + d.SetId(id.String()) + d.Set("items", flattenAccountDisksList(accountDisksList)) + d.Set("entry_count", accountDisksList.EntryCount) + + return nil +} + +func DataSourceAccountDisksList() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceAccountDisksListRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceAccountDisksListSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/account/data_source_account_flipgroups_list.go b/internal/service/cloudbroker/account/data_source_account_flipgroups_list.go index df151a1..b124cb0 100644 --- a/internal/service/cloudbroker/account/data_source_account_flipgroups_list.go +++ b/internal/service/cloudbroker/account/data_source_account_flipgroups_list.go @@ -1,218 +1,71 @@ -/* -Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package account - -import ( - "context" - - "github.com/google/uuid" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" -) - -func dataSourceAccountFlipGroupsListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - accountFlipGroupsList, err := utilityAccountFlipGroupsListCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - - id := uuid.New() - d.SetId(id.String()) - d.Set("items", flattenAccountFlipGroupsList(accountFlipGroupsList)) - d.Set("entry_count", accountFlipGroupsList.EntryCount) - - return nil -} - -func dataSourceAccountFlipGroupsListSchemaMake() map[string]*schema.Schema { - res := map[string]*schema.Schema{ - "account_id": { - Type: schema.TypeInt, - Required: true, - Description: "ID of the account", - }, - "name": { - Type: schema.TypeString, - Optional: true, - Description: "Filter by name", - }, - "vins_id": { - Type: schema.TypeInt, - Optional: true, - Description: "Filter by ViNS ID", - }, - "vins_name": { - Type: schema.TypeString, - Optional: true, - Description: "Filter by ViNS name", - }, - "extnet_id": { - Type: schema.TypeInt, - Optional: true, - Description: "Filter by extnet ID", - }, - "by_ip": { - Type: schema.TypeString, - Optional: true, - Description: "Filter by IP", - }, - "flipgroup_id": { - Type: schema.TypeInt, - Optional: true, - Description: "Filter by flipgroup ID", - }, - "page": { - Type: schema.TypeInt, - Optional: true, - Description: "Page number", - }, - "size": { - Type: schema.TypeInt, - Optional: true, - Description: "Page size", - }, - "items": { - Type: schema.TypeList, - Computed: true, - Description: "Search Result", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "account_id": { - Type: schema.TypeInt, - Computed: true, - }, - "client_type": { - Type: schema.TypeString, - Computed: true, - }, - "conn_type": { - Type: schema.TypeString, - Computed: true, - }, - "created_by": { - Type: schema.TypeString, - Computed: true, - }, - "created_time": { - Type: schema.TypeInt, - Computed: true, - }, - "default_gw": { - Type: schema.TypeString, - Computed: true, - }, - "deleted_by": { - Type: schema.TypeString, - Computed: true, - }, - "deleted_time": { - Type: schema.TypeInt, - Computed: true, - }, - "desc": { - Type: schema.TypeString, - Computed: true, - }, - "gid": { - Type: schema.TypeInt, - Computed: true, - }, - "guid": { - Type: schema.TypeInt, - Computed: true, - }, - "fg_id": { - Type: schema.TypeInt, - Computed: true, - }, - "ip": { - Type: schema.TypeString, - Computed: true, - }, - "milestones": { - Type: schema.TypeInt, - Computed: true, - }, - "fg_name": { - Type: schema.TypeString, - Computed: true, - }, - "net_id": { - Type: schema.TypeInt, - Computed: true, - }, - "net_type": { - Type: schema.TypeString, - Computed: true, - }, - "netmask": { - Type: schema.TypeInt, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "updated_by": { - Type: schema.TypeString, - Computed: true, - }, - "updated_time": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - "entry_count": { - Type: schema.TypeInt, - Computed: true, - }, - } - return res -} - -func DataSourceAccountFlipGroupsList() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - ReadContext: dataSourceAccountFlipGroupsListRead, - - Timeouts: &schema.ResourceTimeout{ - Read: &constants.Timeout30s, - Default: &constants.Timeout60s, - }, - - Schema: dataSourceAccountFlipGroupsListSchemaMake(), - } -} \ No newline at end of file +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package account + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceAccountFlipGroupsListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + accountFlipGroupsList, err := utilityAccountFlipGroupsListCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + id := uuid.New() + d.SetId(id.String()) + d.Set("items", flattenAccountFlipGroupsList(accountFlipGroupsList)) + d.Set("entry_count", accountFlipGroupsList.EntryCount) + + return nil +} + +func DataSourceAccountFlipGroupsList() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceAccountFlipGroupsListRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceAccountFlipGroupsListSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/account/data_source_account_get_resource_consumption.go b/internal/service/cloudbroker/account/data_source_account_get_resource_consumption.go index baef099..4533b39 100644 --- a/internal/service/cloudbroker/account/data_source_account_get_resource_consumption.go +++ b/internal/service/cloudbroker/account/data_source_account_get_resource_consumption.go @@ -44,6 +44,7 @@ import ( func dataSourceAccountResourceConsumptionGetRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { accountResourceConsumptionRec, err := utilityAccountResourceConsumptionGetCheckPresence(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } @@ -53,173 +54,6 @@ func dataSourceAccountResourceConsumptionGetRead(ctx context.Context, d *schema. return nil } -func dataSourceAccountResourceConsumptionGetSchemaMake() map[string]*schema.Schema { - res := map[string]*schema.Schema{ - "account_id": { - Type: schema.TypeInt, - Required: true, - }, - "consumed": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cpu": { - Type: schema.TypeInt, - Computed: true, - }, - "disksize": { - Type: schema.TypeFloat, - Computed: true, - }, - "disksizemax": { - Type: schema.TypeFloat, - Computed: true, - }, - "extips": { - Type: schema.TypeInt, - Computed: true, - }, - "exttraffic": { - Type: schema.TypeInt, - Computed: true, - }, - "gpu": { - Type: schema.TypeInt, - Computed: true, - }, - "ram": { - Type: schema.TypeInt, - Computed: true, - }, - "seps": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "sep_id": { - Type: schema.TypeString, - Computed: true, - }, - "data_name": { - Type: schema.TypeString, - Computed: true, - }, - "disk_size": { - Type: schema.TypeFloat, - Computed: true, - }, - "disk_size_max": { - Type: schema.TypeFloat, - Computed: true, - }, - }, - }, - }, - }, - }, - }, - "reserved": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cpu": { - Type: schema.TypeInt, - Computed: true, - }, - "disksize": { - Type: schema.TypeFloat, - Computed: true, - }, - "disksizemax": { - Type: schema.TypeFloat, - Computed: true, - }, - "extips": { - Type: schema.TypeInt, - Computed: true, - }, - "exttraffic": { - Type: schema.TypeInt, - Computed: true, - }, - "gpu": { - Type: schema.TypeInt, - Computed: true, - }, - "ram": { - Type: schema.TypeInt, - Computed: true, - }, - "seps": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "sep_id": { - Type: schema.TypeString, - Computed: true, - }, - "data_name": { - Type: schema.TypeString, - Computed: true, - }, - "disk_size": { - Type: schema.TypeFloat, - Computed: true, - }, - "disk_size_max": { - Type: schema.TypeFloat, - Computed: true, - }, - }, - }, - }, - }, - }, - }, - "resource_limits": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cu_c": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_d": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_dm": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_i": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_m": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_np": { - Type: schema.TypeFloat, - Computed: true, - }, - "gpu_units": { - Type: schema.TypeFloat, - Computed: true, - }, - }, - }, - }, - } - - return res -} - func DataSourceAccountResourceConsumptionGet() *schema.Resource { return &schema.Resource{ SchemaVersion: 1, diff --git a/internal/service/cloudbroker/account/data_source_account_list.go b/internal/service/cloudbroker/account/data_source_account_list.go index f038320..e506c44 100644 --- a/internal/service/cloudbroker/account/data_source_account_list.go +++ b/internal/service/cloudbroker/account/data_source_account_list.go @@ -1,292 +1,72 @@ -/* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, -Nikita Sorokin, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package account - -import ( - "context" - - "github.com/google/uuid" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" -) - -func DataSourceAccountList() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - ReadContext: dataSourceAccountListRead, - - Timeouts: &schema.ResourceTimeout{ - Read: &constants.Timeout30s, - Default: &constants.Timeout60s, - }, - - Schema: dataSourceAccountListSchemaMake(), - } -} - -func dataSourceAccountListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - accountList, err := utilityAccountListCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - - id := uuid.New() - d.SetId(id.String()) - d.Set("items", flattenAccountList(accountList)) - d.Set("entry_count", accountList.EntryCount) - - return nil -} - -func dataSourceAccountListSchemaMake() map[string]*schema.Schema { - res := map[string]*schema.Schema{ - "by_id": { - Type: schema.TypeInt, - Optional: true, - Description: "Filter by ID", - }, - "name": { - Type: schema.TypeString, - Optional: true, - Description: "Filter by name", - }, - "acl": { - Type: schema.TypeString, - Optional: true, - Description: "Filter by ACL", - }, - "status": { - Type: schema.TypeString, - Optional: true, - Description: "Filter by status", - }, - "page": { - Type: schema.TypeInt, - Optional: true, - Description: "Page number", - }, - "size": { - Type: schema.TypeInt, - Optional: true, - Description: "Page size", - }, - "items": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "dc_location": { - Type: schema.TypeString, - Computed: true, - }, - "ckey": { - Type: schema.TypeString, - Computed: true, - }, - "meta": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "acl": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "explicit": { - Type: schema.TypeBool, - Computed: true, - }, - "guid": { - Type: schema.TypeString, - Computed: true, - }, - "right": { - Type: schema.TypeString, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "type": { - Type: schema.TypeString, - Computed: true, - }, - "user_group_id": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - "company": { - Type: schema.TypeString, - Computed: true, - }, - "companyurl": { - Type: schema.TypeString, - Computed: true, - }, - "cpu_allocation_parameter": { - Type: schema.TypeString, - Computed: true, - }, - "cpu_allocation_ratio": { - Type: schema.TypeFloat, - Computed: true, - }, - "created_by": { - Type: schema.TypeString, - Computed: true, - }, - "created_time": { - Type: schema.TypeInt, - Computed: true, - }, - "deactivation_time": { - Type: schema.TypeFloat, - Computed: true, - }, - "deleted_by": { - Type: schema.TypeString, - Computed: true, - }, - "deleted_time": { - Type: schema.TypeInt, - Computed: true, - }, - "displayname": { - Type: schema.TypeString, - Computed: true, - }, - "guid": { - Type: schema.TypeInt, - Computed: true, - }, - "account_id": { - Type: schema.TypeInt, - Computed: true, - }, - "account_name": { - Type: schema.TypeString, - Computed: true, - }, - "resource_limits": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cu_c": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_d": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_dm": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_i": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_m": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_np": { - Type: schema.TypeFloat, - Computed: true, - }, - "gpu_units": { - Type: schema.TypeFloat, - Computed: true, - }, - }, - }, - }, - "resource_types": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "send_access_emails": { - Type: schema.TypeBool, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "uniq_pools": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "updated_time": { - Type: schema.TypeInt, - Computed: true, - }, - "version": { - Type: schema.TypeInt, - Computed: true, - }, - "vins": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - }, - }, - }, - "entry_count": { - Type: schema.TypeInt, - Computed: true, - }, - } - return res -} - +/* +Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Nikita Sorokin, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package account + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceAccountListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + accountList, err := utilityAccountListCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + id := uuid.New() + d.SetId(id.String()) + d.Set("items", flattenAccountList(accountList)) + d.Set("entry_count", accountList.EntryCount) + + return nil +} + +func DataSourceAccountList() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceAccountListRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceAccountListSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/account/data_source_account_resource_consumption_list.go b/internal/service/cloudbroker/account/data_source_account_resource_consumption_list.go index a58f212..ba54fcd 100644 --- a/internal/service/cloudbroker/account/data_source_account_resource_consumption_list.go +++ b/internal/service/cloudbroker/account/data_source_account_resource_consumption_list.go @@ -44,6 +44,7 @@ import ( func dataSourceAccountResourceConsumptionListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { accountResourceConsumptionList, err := utilityAccountResourceConsumptionListCheckPresence(ctx, m) if err != nil { + d.SetId("") return diag.FromErr(err) } @@ -54,148 +55,6 @@ func dataSourceAccountResourceConsumptionListRead(ctx context.Context, d *schema return nil } -func dataSourceAccountResourceConsumptionListSchemaMake() map[string]*schema.Schema { - res := map[string]*schema.Schema{ - "items": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "account_id": { - Type: schema.TypeInt, - Computed: true, - }, - "consumed": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cpu": { - Type: schema.TypeInt, - Computed: true, - }, - "disksize": { - Type: schema.TypeFloat, - Computed: true, - }, - "disksizemax": { - Type: schema.TypeFloat, - Computed: true, - }, - "extips": { - Type: schema.TypeInt, - Computed: true, - }, - "exttraffic": { - Type: schema.TypeInt, - Computed: true, - }, - "gpu": { - Type: schema.TypeInt, - Computed: true, - }, - "ram": { - Type: schema.TypeInt, - Computed: true, - }, - "seps": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "sep_id": { - Type: schema.TypeString, - Computed: true, - }, - "data_name": { - Type: schema.TypeString, - Computed: true, - }, - "disk_size": { - Type: schema.TypeFloat, - Computed: true, - }, - "disk_size_max": { - Type: schema.TypeFloat, - Computed: true, - }, - }, - }, - }, - }, - }, - }, - "reserved": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cpu": { - Type: schema.TypeInt, - Computed: true, - }, - "disksize": { - Type: schema.TypeFloat, - Computed: true, - }, - "disksizemax": { - Type: schema.TypeFloat, - Computed: true, - }, - "extips": { - Type: schema.TypeInt, - Computed: true, - }, - "exttraffic": { - Type: schema.TypeInt, - Computed: true, - }, - "gpu": { - Type: schema.TypeInt, - Computed: true, - }, - "ram": { - Type: schema.TypeInt, - Computed: true, - }, - "seps": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "sep_id": { - Type: schema.TypeString, - Computed: true, - }, - "data_name": { - Type: schema.TypeString, - Computed: true, - }, - "disk_size": { - Type: schema.TypeFloat, - Computed: true, - }, - "disk_size_max": { - Type: schema.TypeFloat, - Computed: true, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "entry_count": { - Type: schema.TypeInt, - Computed: true, - }, - } - return res -} - func DataSourceAccountResourceConsumptionList() *schema.Resource { return &schema.Resource{ SchemaVersion: 1, diff --git a/internal/service/cloudbroker/account/data_source_account_rg_list.go b/internal/service/cloudbroker/account/data_source_account_rg_list.go index c30390e..a2dfda5 100644 --- a/internal/service/cloudbroker/account/data_source_account_rg_list.go +++ b/internal/service/cloudbroker/account/data_source_account_rg_list.go @@ -1,362 +1,71 @@ -/* -Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package account - -import ( - "context" - - "github.com/google/uuid" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" -) - -func DataSourceAccountRGList() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - ReadContext: dataSourceAccountRGListRead, - - Timeouts: &schema.ResourceTimeout{ - Read: &constants.Timeout30s, - Default: &constants.Timeout60s, - }, - - Schema: dataSourceAccountRGListSchemaMake(), - } -} - -func dataSourceAccountRGListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - accountRGList, err := utilityAccountRGListCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - - id := uuid.New() - d.SetId(id.String()) - d.Set("items", flattenAccountRGList(accountRGList)) - d.Set("entry_count", accountRGList.EntryCount) - - return nil -} - -func dataSourceAccountRGListSchemaMake() map[string]*schema.Schema { - res := map[string]*schema.Schema{ - "account_id": { - Type: schema.TypeInt, - Required: true, - Description: "ID of the account", - }, - "page": { - Type: schema.TypeInt, - Optional: true, - Description: "Page number", - }, - "size": { - Type: schema.TypeInt, - Optional: true, - Description: "Page size", - }, - "rg_id": { - Type: schema.TypeInt, - Optional: true, - Description: "Filter by RG ID", - }, - "name": { - Type: schema.TypeString, - Optional: true, - Description: "Filter by name", - }, - "vins_id": { - Type: schema.TypeInt, - Optional: true, - Description: "Filter by ViNS ID", - }, - "vm_id": { - Type: schema.TypeInt, - Optional: true, - Description: "Filter by VM ID", - }, - - "status": { - Type: schema.TypeString, - Optional: true, - Description: "Filter by status", - }, - "items": { - Type: schema.TypeList, - Computed: true, - Description: "Search Result", - Elem: &schema.Resource{ - Schema: dataSourceAccountRGSchemaMake(), - }, - }, - "entry_count": { - Type: schema.TypeInt, - Optional: true, - }, - } - return res -} - -func dataSourceAccountRGSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "computes": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "started": { - Type: schema.TypeInt, - Computed: true, - }, - "stopped": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - "resources": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "consumed": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cpu": { - Type: schema.TypeInt, - Computed: true, - }, - "disksize": { - Type: schema.TypeFloat, - Computed: true, - }, - "disksizemax": { - Type: schema.TypeInt, - Computed: true, - }, - "extips": { - Type: schema.TypeInt, - Computed: true, - }, - "exttraffic": { - Type: schema.TypeInt, - Computed: true, - }, - "gpu": { - Type: schema.TypeInt, - Computed: true, - }, - "ram": { - Type: schema.TypeInt, - Computed: true, - }, - "seps": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "sep_id": { - Type: schema.TypeString, - Computed: true, - }, - "data_name": { - Type: schema.TypeString, - Computed: true, - }, - "disk_size": { - Type: schema.TypeFloat, - Computed: true, - }, - "disk_size_max": { - Type: schema.TypeFloat, - Computed: true, - }, - }, - }, - }, - }, - }, - }, - "limits": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cpu": { - Type: schema.TypeInt, - Computed: true, - }, - "disksize": { - Type: schema.TypeInt, - Computed: true, - }, - "disksizemax": { - Type: schema.TypeInt, - Computed: true, - }, - "extips": { - Type: schema.TypeInt, - Computed: true, - }, - "exttraffic": { - Type: schema.TypeInt, - Computed: true, - }, - "gpu": { - Type: schema.TypeInt, - Computed: true, - }, - "ram": { - Type: schema.TypeInt, - Computed: true, - }, - "seps": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - "reserved": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cpu": { - Type: schema.TypeInt, - Computed: true, - }, - "disksize": { - Type: schema.TypeInt, - Computed: true, - }, - "disksizemax": { - Type: schema.TypeInt, - Computed: true, - }, - "extips": { - Type: schema.TypeInt, - Computed: true, - }, - "exttraffic": { - Type: schema.TypeInt, - Computed: true, - }, - "gpu": { - Type: schema.TypeInt, - Computed: true, - }, - "ram": { - Type: schema.TypeInt, - Computed: true, - }, - "seps": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "sep_id": { - Type: schema.TypeString, - Computed: true, - }, - "data_name": { - Type: schema.TypeString, - Computed: true, - }, - "disk_size": { - Type: schema.TypeFloat, - Computed: true, - }, - "disk_size_max": { - Type: schema.TypeFloat, - Computed: true, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "created_by": { - Type: schema.TypeString, - Computed: true, - }, - "created_time": { - Type: schema.TypeInt, - Computed: true, - }, - "deleted_by": { - Type: schema.TypeString, - Computed: true, - }, - "deleted_time": { - Type: schema.TypeInt, - Computed: true, - }, - "rg_id": { - Type: schema.TypeInt, - Computed: true, - }, - "milestones": { - Type: schema.TypeInt, - Computed: true, - }, - "rg_name": { - Type: schema.TypeString, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "updated_by": { - Type: schema.TypeString, - Computed: true, - }, - "updated_time": { - Type: schema.TypeInt, - Computed: true, - }, - "vinses": { - Type: schema.TypeInt, - Computed: true, - }, - } -} +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package account + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceAccountRGListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + accountRGList, err := utilityAccountRGListCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + id := uuid.New() + d.SetId(id.String()) + d.Set("items", flattenAccountRGList(accountRGList)) + d.Set("entry_count", accountRGList.EntryCount) + + return nil +} + +func DataSourceAccountRGList() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceAccountRGListRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceAccountRGListSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/account/data_source_account_vins_list.go b/internal/service/cloudbroker/account/data_source_account_vins_list.go index d87228f..391c79a 100644 --- a/internal/service/cloudbroker/account/data_source_account_vins_list.go +++ b/internal/service/cloudbroker/account/data_source_account_vins_list.go @@ -1,192 +1,71 @@ -/* -Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package account - -import ( - "context" - - "github.com/google/uuid" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" -) - -func dataSourceAccountVinsListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - accountVinsList, err := utilityAccountVinsListCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - - id := uuid.New() - d.SetId(id.String()) - d.Set("items", flattenAccountVinsList(accountVinsList)) - d.Set("entry_count", accountVinsList.EntryCount) - - return nil -} - -func dataSourceAccountVinsListSchemaMake() map[string]*schema.Schema { - res := map[string]*schema.Schema{ - "account_id": { - Type: schema.TypeInt, - Required: true, - Description: "ID of the account", - }, - "vins_id": { - Type: schema.TypeInt, - Optional: true, - Description: "Filter by ViNS ID", - }, - "name": { - Type: schema.TypeString, - Optional: true, - Description: "Filter by name", - }, - "rg_id": { - Type: schema.TypeInt, - Optional: true, - Description: "Filter by RG ID", - }, - "ext_ip": { - Type: schema.TypeString, - Optional: true, - Description: "Filter by external IP", - }, - "page": { - Type: schema.TypeInt, - Optional: true, - Description: "Page number", - }, - "size": { - Type: schema.TypeInt, - Optional: true, - Description: "Page size", - }, - "items": { - Type: schema.TypeList, - Computed: true, - Description: "Search Result", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "account_id": { - Type: schema.TypeInt, - Computed: true, - }, - "account_name": { - Type: schema.TypeString, - Computed: true, - }, - "computes": { - Type: schema.TypeInt, - Computed: true, - }, - "created_by": { - Type: schema.TypeString, - Computed: true, - }, - "created_time": { - Type: schema.TypeInt, - Computed: true, - }, - "deleted_by": { - Type: schema.TypeString, - Computed: true, - }, - "deleted_time": { - Type: schema.TypeInt, - Computed: true, - }, - "external_ip": { - Type: schema.TypeString, - Computed: true, - }, - "vin_id": { - Type: schema.TypeInt, - Computed: true, - }, - "vin_name": { - Type: schema.TypeString, - Computed: true, - }, - "network": { - Type: schema.TypeString, - Computed: true, - }, - "pri_vnf_dev_id": { - Type: schema.TypeInt, - Computed: true, - }, - "rg_id": { - Type: schema.TypeInt, - Computed: true, - }, - "rg_name": { - Type: schema.TypeString, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "updated_by": { - Type: schema.TypeString, - Computed: true, - }, - "updated_time": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - "entry_count": { - Type: schema.TypeInt, - Computed: true, - }, - } - return res -} - -func DataSourceAccountVinsList() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - ReadContext: dataSourceAccountVinsListRead, - - Timeouts: &schema.ResourceTimeout{ - Read: &constants.Timeout30s, - Default: &constants.Timeout60s, - }, - - Schema: dataSourceAccountVinsListSchemaMake(), - } -} \ No newline at end of file +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package account + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceAccountVinsListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + accountVinsList, err := utilityAccountVinsListCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + id := uuid.New() + d.SetId(id.String()) + d.Set("items", flattenAccountVinsList(accountVinsList)) + d.Set("entry_count", accountVinsList.EntryCount) + + return nil +} + +func DataSourceAccountVinsList() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceAccountVinsListRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceAccountVinsListSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/account/flattens.go b/internal/service/cloudbroker/account/flattens.go index a5576be..61c34d2 100644 --- a/internal/service/cloudbroker/account/flattens.go +++ b/internal/service/cloudbroker/account/flattens.go @@ -62,7 +62,7 @@ func flattenDataAccount(d *schema.ResourceData, acc *account.RecordAccount) { } func flattenAccountRGList(argl *account.ListRG) []map[string]interface{} { - res := make([]map[string]interface{}, 0) + res := make([]map[string]interface{}, 0, len(argl.Data)) for _, arg := range argl.Data { temp := map[string]interface{}{ "computes": flattenAccRGComputes(arg.Computes), @@ -139,7 +139,7 @@ func flattenAccResource(r account.Resource) []map[string]interface{} { } func flattenAccAcl(acls []account.ACL) []map[string]interface{} { - res := make([]map[string]interface{}, 0) + res := make([]map[string]interface{}, 0, len(acls)) for _, acls := range acls { temp := map[string]interface{}{ "explicit": acls.Explicit, @@ -187,7 +187,7 @@ func flattenRgAcl(rgAcls []account.ACL) []map[string]interface{} { } func flattenListDeleted(al *account.ListAccounts) []map[string]interface{} { - res := make([]map[string]interface{}, 0) + res := make([]map[string]interface{}, 0, len(al.Data)) for _, acc := range al.Data { temp := map[string]interface{}{ "dc_location": acc.DCLocation, @@ -222,7 +222,7 @@ func flattenListDeleted(al *account.ListAccounts) []map[string]interface{} { } func flattenAccountList(al *account.ListAccounts) []map[string]interface{} { - res := make([]map[string]interface{}, 0) + res := make([]map[string]interface{}, 0, len(al.Data)) for _, acc := range al.Data { temp := map[string]interface{}{ "dc_location": acc.DCLocation, @@ -257,7 +257,7 @@ func flattenAccountList(al *account.ListAccounts) []map[string]interface{} { } func flattenAccountAuditsList(aal account.ListAudits) []map[string]interface{} { - res := make([]map[string]interface{}, 0) + res := make([]map[string]interface{}, 0, len(aal)) for _, aa := range aal { temp := map[string]interface{}{ "call": aa.Call, @@ -272,7 +272,7 @@ func flattenAccountAuditsList(aal account.ListAudits) []map[string]interface{} { } func flattenAccountComputesList(acl *account.ListComputes) []map[string]interface{} { - res := make([]map[string]interface{}, 0) + res := make([]map[string]interface{}, 0, len(acl.Data)) for _, acc := range acl.Data { temp := map[string]interface{}{ "account_id": acc.AccountID, @@ -302,7 +302,7 @@ func flattenAccountComputesList(acl *account.ListComputes) []map[string]interfac } func flattenAccountDisksList(adl *account.ListDisks) []map[string]interface{} { - res := make([]map[string]interface{}, 0) + res := make([]map[string]interface{}, 0, len(adl.Data)) for _, ad := range adl.Data { temp := map[string]interface{}{ "disk_id": ad.ID, @@ -319,7 +319,7 @@ func flattenAccountDisksList(adl *account.ListDisks) []map[string]interface{} { } func flattenAccountFlipGroupsList(afgl *account.ListFLIPGroups) []map[string]interface{} { - res := make([]map[string]interface{}, 0) + res := make([]map[string]interface{}, 0, len(afgl.Data)) for _, afg := range afgl.Data { temp := map[string]interface{}{ "account_id": afg.AccountID, @@ -350,7 +350,7 @@ func flattenAccountFlipGroupsList(afgl *account.ListFLIPGroups) []map[string]int } func flattenAccountVinsList(avl *account.ListVINS) []map[string]interface{} { - res := make([]map[string]interface{}, 0) + res := make([]map[string]interface{}, 0, len(avl.Data)) for _, av := range avl.Data { temp := map[string]interface{}{ "account_id": av.AccountID, @@ -384,7 +384,7 @@ func flattenResourceConsumption(d *schema.ResourceData, acc *account.RecordResou } func flattenAccountSeps(seps map[string]map[string]account.DiskUsage) []map[string]interface{} { - res := make([]map[string]interface{}, 0) + res := make([]map[string]interface{}, 0, len(seps)) for sepKey, sepVal := range seps { for dataKey, dataVal := range sepVal { temp := map[string]interface{}{ diff --git a/internal/service/cloudbroker/account/resource_account.go b/internal/service/cloudbroker/account/resource_account.go index faefeed..eed1de9 100644 --- a/internal/service/cloudbroker/account/resource_account.go +++ b/internal/service/cloudbroker/account/resource_account.go @@ -1,541 +1,342 @@ -/* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package account - -import ( - "context" - "strconv" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - log "github.com/sirupsen/logrus" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/status" -) - -func resourceAccountCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourseAccountCreate") - - c := m.(*controller.ControllerCfg) - - req := account.CreateRequest{} - - req.Name = d.Get("account_name").(string) - req.Username = d.Get("username").(string) - - if emailaddress, ok := d.GetOk("emailaddress"); ok { - req.EmailAddress = emailaddress.(string) - } - - if sendAccessEmails, ok := d.GetOk("send_access_emails"); ok { - req.SendAccessEmails = sendAccessEmails.(bool) - } - - if uniqPools, ok := d.GetOk("uniq_pools"); ok { - uniqPools := uniqPools.([]interface{}) - for _, pool := range uniqPools { - req.UniqPools = append(req.UniqPools, pool.(string)) - } - } - - if resLimits, ok := d.GetOk("resource_limits"); ok { - resLimits := resLimits.([]interface{})[0] - resLimitsConv := resLimits.(map[string]interface{}) - if resLimitsConv["cu_m"] != nil { - maxMemCap := int64(resLimitsConv["cu_m"].(float64)) - if maxMemCap == 0 { - req.MaxMemoryCapacity = -1 - } else { - req.MaxMemoryCapacity = maxMemCap - } - } - if resLimitsConv["cu_dm"] != nil { - maxDiskCap := int64(resLimitsConv["cu_dm"].(float64)) - if maxDiskCap == 0 { - req.MaxVDiskCapacity = -1 - } else { - req.MaxVDiskCapacity = maxDiskCap - } - } - if resLimitsConv["cu_c"] != nil { - maxCPUCap := int64(resLimitsConv["cu_c"].(float64)) - if maxCPUCap == 0 { - req.MaxCPUCapacity = -1 - } else { - req.MaxCPUCapacity = maxCPUCap - } - } - if resLimitsConv["cu_i"] != nil { - maxNumPublicIP := int64(resLimitsConv["cu_i"].(float64)) - if maxNumPublicIP == 0 { - req.MaxNumPublicIP = -1 - } else { - req.MaxNumPublicIP = maxNumPublicIP - } - } - if resLimitsConv["cu_np"] != nil { - maxNP := int64(resLimitsConv["cu_np"].(float64)) - if maxNP == 0 { - req.MaxNetworkPeerTransfer = -1 - } else { - req.MaxNetworkPeerTransfer = maxNP - } - } - if resLimitsConv["gpu_units"] != nil { - gpuUnits := int64(resLimitsConv["gpu_units"].(float64)) - if gpuUnits == 0 { - req.GPUUnits = -1 - } else { - req.GPUUnits = gpuUnits - } - } - } - - accountId, err := c.CloudBroker().Account().Create(ctx, req) - if err != nil { - return diag.FromErr(err) - } - - d.SetId(strconv.FormatUint(accountId, 10)) - - var w dc.Warnings - - if users, ok := d.GetOk("users"); ok { - addedUsers := users.([]interface{}) - - for _, user := range addedUsers { - userConv := user.(map[string]interface{}) - - req := account.AddUserRequest{ - AccountID: accountId, - Username: userConv["user_id"].(string), - AccessType: userConv["access_type"].(string), - } - - _, err := c.CloudBroker().Account().AddUser(ctx, req) - if err != nil { - w.Add(err) - } - } - } - - if cpuAllocationParameter, ok := d.GetOk("cpu_allocation_parameter"); ok { - cpuAllocationParameter := cpuAllocationParameter.(string) - - req := account.SetCPUAllocationParameterRequest{ - AccountID: accountId, - StrictLoose: cpuAllocationParameter, - } - - log.Debugf("setting account cpu allocation parameter") - _, err := c.CloudBroker().Account().SetCPUAllocationParameter(ctx, req) - if err != nil { - w.Add(err) - } - } - - if cpuAllocationRatio, ok := d.GetOk("cpu_allocation_ratio"); ok { - cpuAllocationRatio := cpuAllocationRatio.(float64) - - req := account.SetCPUAllocationRatioRequest{ - AccountID: accountId, - Ratio: cpuAllocationRatio, - } - - log.Debugf("setting account cpu allocation ratio") - _, err := c.CloudBroker().Account().SetCPUAllocationRatio(ctx, req) - if err != nil { - w.Add(err) - } - } - - if !d.Get("enable").(bool) { - _, err := c.CloudBroker().Account().Disable(ctx, account.DisableRequest{ - AccountID: accountId, - }) - - if err != nil { - w.Add(err) - } - } - - diags := resourceAccountRead(ctx, d, m) - diags = append(diags, w.Get()...) - - return diags -} - -func resourceAccountRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceAccountRead") - - acc, err := utilityAccountCheckPresence(ctx, d, m) - if err != nil { - d.SetId("") - return diag.FromErr(err) - } - - flattenResourceAccount(d, acc) - - return nil -} - -func resourceAccountDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceAccountDelete") - - accountData, err := utilityAccountCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - - c := m.(*controller.ControllerCfg) - req := account.DeleteRequest{ - AccountID: accountData.ID, - Permanently: d.Get("permanently").(bool), - } - - _, err = c.CloudBroker().Account().Delete(ctx, req) - if err != nil { - return diag.FromErr(err) - } - - d.SetId("") - - return nil -} - -func resourceAccountUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceAccountUpdate") - c := m.(*controller.ControllerCfg) - - acc, err := utilityAccountCheckPresence(ctx, d, m) - if err != nil { - d.SetId("") - return diag.FromErr(err) - } - - accountId, _ := strconv.ParseUint(d.Id(), 10, 64) - - hasChanged := false - - switch acc.Status { - case status.Destroyed: - d.SetId("") - // return resourceAccountCreate(ctx, d, m) - return diag.Errorf("The resource cannot be updated because it has been destroyed") - case status.Destroying: - return diag.Errorf("The account is in progress with status: %s", acc.Status) - case status.Deleted: - _, err := c.CloudBroker().Account().Restore(ctx, account.RestoreRequest{ - AccountID: accountId, - }) - - if err != nil { - return diag.FromErr(err) - } - - hasChanged = true - case status.Disabled: - log.Debugf("The account is in status: %s, troubles may occur with update. Please, enable account first.", acc.Status) - case status.Confirmed: - } - - if hasChanged { - acc, err = utilityAccountCheckPresence(ctx, d, m) - if err != nil { - d.SetId("") - return diag.FromErr(err) - } - } - - if d.HasChange("enable") { - enable := d.Get("enable").(bool) - - if enable && acc.Status == status.Disabled { - _, err := c.CloudBroker().Account().Enable(ctx, account.EnableRequest{ - AccountID: accountId, - }) - - if err != nil { - return diag.FromErr(err) - } - } else if !enable && acc.Status == status.Enabled { - _, err := c.CloudBroker().Account().Disable(ctx, account.DisableRequest{ - AccountID: accountId, - }) - - if err != nil { - return diag.FromErr(err) - } - } - } - - req := account.UpdateRequest{ - AccountID: accountId, - } - - updated := false - - if d.HasChange("account_name") { - req.Name = d.Get("account_name").(string) - updated = true - } - - if d.HasChange("send_access_emails") { - req.SendAccessEmails = d.Get("send_access_emails").(bool) - updated = true - } - - if d.HasChange("uniq_pools") { - uniq_pools := d.Get("uniq_pools").([]interface{}) - - for _, pool := range uniq_pools { - req.UniqPools = append(req.UniqPools, pool.(string)) - } - - updated = true - } - - if d.HasChange("resource_limits") { - resLimit := d.Get("resource_limits").([]interface{})[0] - resLimitConv := resLimit.(map[string]interface{}) - - if resLimitConv["cu_m"] != nil { - maxMemCap := int(resLimitConv["cu_m"].(float64)) - if maxMemCap == 0 { - req.MaxMemoryCapacity = -1 - } else { - req.MaxMemoryCapacity = int64(maxMemCap) - } - } - if resLimitConv["cu_dm"] != nil { - maxDiskCap := int(resLimitConv["cu_dm"].(float64)) - if maxDiskCap == 0 { - req.MaxVDiskCapacity = -1 - } else { - req.MaxVDiskCapacity = int64(maxDiskCap) - } - } - if resLimitConv["cu_c"] != nil { - maxCPUCap := int(resLimitConv["cu_c"].(float64)) - if maxCPUCap == 0 { - req.MaxCPUCapacity = -1 - } else { - req.MaxCPUCapacity = int64(maxCPUCap) - } - } - if resLimitConv["cu_i"] != nil { - maxNumPublicIP := int(resLimitConv["cu_i"].(float64)) - if maxNumPublicIP == 0 { - req.MaxNumPublicIP = -1 - } else { - req.MaxNumPublicIP = int64(maxNumPublicIP) - } - } - if resLimitConv["cu_np"] != nil { - maxNP := int(resLimitConv["cu_np"].(float64)) - if maxNP == 0 { - req.MaxNetworkPeerTransfer = -1 - } else { - req.MaxNetworkPeerTransfer = int64(maxNP) - } - } - if resLimitConv["gpu_units"] != nil { - gpuUnits := int(resLimitConv["gpu_units"].(float64)) - if gpuUnits == 0 { - req.GPUUnits = -1 - } else { - req.GPUUnits = int64(gpuUnits) - } - } - - updated = true - } - - if updated { - _, err := c.CloudBroker().Account().Update(ctx, req) - if err != nil { - return diag.FromErr(err) - } - } - - if d.HasChange("cpu_allocation_parameter") { - cpuAllocationParameter := d.Get("cpu_allocation_parameter").(string) - - _, err := c.CloudBroker().Account().SetCPUAllocationParameter(ctx, account.SetCPUAllocationParameterRequest{ - AccountID: acc.ID, - StrictLoose: cpuAllocationParameter, - }) - - if err != nil { - return diag.FromErr(err) - } - } - - if d.HasChange("cpu_allocation_ratio") { - cpuAllocacationRatio := d.Get("cpu_allocation_ratio").(float64) - - _, err := c.CloudBroker().Account().SetCPUAllocationRatio(ctx, account.SetCPUAllocationRatioRequest{ - AccountID: acc.ID, - Ratio: cpuAllocacationRatio, - }) - if err != nil { - return diag.FromErr(err) - } - } - - if d.HasChange("users") { - deletedUsers := make([]interface{}, 0) - addedUsers := make([]interface{}, 0) - updatedUsers := make([]interface{}, 0) - - old, new := d.GetChange("users") - oldConv := old.([]interface{}) - newConv := new.([]interface{}) - for _, el := range oldConv { - if !isContainsUser(newConv, el) { - deletedUsers = append(deletedUsers, el) - } - } - - for _, el := range newConv { - if !isContainsUser(oldConv, el) { - duplicate := false - for _, user := range acc.ACL { - if user.UserGroupID == el.(map[string]interface{})["user_id"].(string) { - duplicate = true - } - } - if !duplicate { - addedUsers = append(addedUsers, el) - } else if isChangedUser(oldConv, el) { - updatedUsers = append(updatedUsers, el) - } - } - } - for _, user := range deletedUsers { - userConv := user.(map[string]interface{}) - - _, err := c.CloudBroker().Account().DeleteUser(ctx, account.DeleteUserRequest{ - AccountID: accountId, - UserName: userConv["user_id"].(string), - RecursiveDelete: userConv["recursive_delete"].(bool), - }) - - if err != nil { - return diag.FromErr(err) - } - } - - for _, user := range addedUsers { - userConv := user.(map[string]interface{}) - - _, err := c.CloudBroker().Account().AddUser(ctx, account.AddUserRequest{ - AccountID: accountId, - Username: userConv["user_id"].(string), - AccessType: strings.ToUpper(userConv["access_type"].(string)), - }) - - if err != nil { - return diag.FromErr(err) - } - } - - for _, user := range updatedUsers { - userConv := user.(map[string]interface{}) - - _, err := c.CloudBroker().Account().UpdateUser(ctx, account.UpdateUserRequest{ - AccountID: accountId, - UserID: userConv["user_id"].(string), - AccessType: strings.ToUpper(userConv["access_type"].(string)), - }) - - if err != nil { - return diag.FromErr(err) - } - } - } - - return resourceAccountRead(ctx, d, m) -} - -func isContainsUser(els []interface{}, el interface{}) bool { - for _, elOld := range els { - elOldConv := elOld.(map[string]interface{}) - elConv := el.(map[string]interface{}) - if elOldConv["user_id"].(string) == elConv["user_id"].(string) { - return true - } - } - return false -} - -func isChangedUser(els []interface{}, el interface{}) bool { - for _, elOld := range els { - elOldConv := elOld.(map[string]interface{}) - elConv := el.(map[string]interface{}) - if elOldConv["user_id"].(string) == elConv["user_id"].(string) && - (!strings.EqualFold(elOldConv["access_type"].(string), elConv["access_type"].(string)) || - elOldConv["recursive_delete"].(bool) != elConv["recursive_delete"].(bool)) { - return true - } - } - return false -} - -func ResourceAccount() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - CreateContext: resourceAccountCreate, - ReadContext: resourceAccountRead, - UpdateContext: resourceAccountUpdate, - DeleteContext: resourceAccountDelete, - - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: &constants.Timeout60s, - Read: &constants.Timeout30s, - Update: &constants.Timeout60s, - Delete: &constants.Timeout60s, - Default: &constants.Timeout60s, - }, - - Schema: resourceAccountSchemaMake(), - } -} +/* +Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package account + +import ( + "context" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/status" +) + +func resourceAccountCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourseAccountCreate") + + c := m.(*controller.ControllerCfg) + + req := account.CreateRequest{ + Name: d.Get("account_name").(string), + Username: d.Get("username").(string), + } + + if emailaddress, ok := d.GetOk("emailaddress"); ok { + req.EmailAddress = emailaddress.(string) + } + + if sendAccessEmails, ok := d.GetOk("send_access_emails"); ok { + req.SendAccessEmails = sendAccessEmails.(bool) + } + + if uniqPools, ok := d.GetOk("uniq_pools"); ok { + uniqPools := uniqPools.([]interface{}) + for _, pool := range uniqPools { + req.UniqPools = append(req.UniqPools, pool.(string)) + } + } + + if resLimits, ok := d.GetOk("resource_limits"); ok { + resLimits := resLimits.([]interface{})[0] + resLimitsConv := resLimits.(map[string]interface{}) + if resLimitsConv["cu_m"] != nil { + maxMemCap := int64(resLimitsConv["cu_m"].(float64)) + if maxMemCap == 0 { + req.MaxMemoryCapacity = -1 + } else { + req.MaxMemoryCapacity = maxMemCap + } + } + if resLimitsConv["cu_dm"] != nil { + maxDiskCap := int64(resLimitsConv["cu_dm"].(float64)) + if maxDiskCap == 0 { + req.MaxVDiskCapacity = -1 + } else { + req.MaxVDiskCapacity = maxDiskCap + } + } + if resLimitsConv["cu_c"] != nil { + maxCPUCap := int64(resLimitsConv["cu_c"].(float64)) + if maxCPUCap == 0 { + req.MaxCPUCapacity = -1 + } else { + req.MaxCPUCapacity = maxCPUCap + } + } + if resLimitsConv["cu_i"] != nil { + maxNumPublicIP := int64(resLimitsConv["cu_i"].(float64)) + if maxNumPublicIP == 0 { + req.MaxNumPublicIP = -1 + } else { + req.MaxNumPublicIP = maxNumPublicIP + } + } + if resLimitsConv["cu_np"] != nil { + maxNP := int64(resLimitsConv["cu_np"].(float64)) + if maxNP == 0 { + req.MaxNetworkPeerTransfer = -1 + } else { + req.MaxNetworkPeerTransfer = maxNP + } + } + if resLimitsConv["gpu_units"] != nil { + gpuUnits := int64(resLimitsConv["gpu_units"].(float64)) + if gpuUnits == 0 { + req.GPUUnits = -1 + } else { + req.GPUUnits = gpuUnits + } + } + } + + accountId, err := c.CloudBroker().Account().Create(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(strconv.FormatUint(accountId, 10)) + + var w dc.Warnings + + if users, ok := d.GetOk("users"); ok { + addedUsers := users.([]interface{}) + + for _, user := range addedUsers { + userConv := user.(map[string]interface{}) + + req := account.AddUserRequest{ + AccountID: accountId, + Username: userConv["user_id"].(string), + AccessType: userConv["access_type"].(string), + } + + _, err := c.CloudBroker().Account().AddUser(ctx, req) + if err != nil { + w.Add(err) + } + } + } + + if cpuAllocationParameter, ok := d.GetOk("cpu_allocation_parameter"); ok { + cpuAllocationParameter := cpuAllocationParameter.(string) + + req := account.SetCPUAllocationParameterRequest{ + AccountID: accountId, + StrictLoose: cpuAllocationParameter, + } + + log.Debugf("setting account cpu allocation parameter") + _, err := c.CloudBroker().Account().SetCPUAllocationParameter(ctx, req) + if err != nil { + w.Add(err) + } + } + + if cpuAllocationRatio, ok := d.GetOk("cpu_allocation_ratio"); ok { + cpuAllocationRatio := cpuAllocationRatio.(float64) + + req := account.SetCPUAllocationRatioRequest{ + AccountID: accountId, + Ratio: cpuAllocationRatio, + } + + log.Debugf("setting account cpu allocation ratio") + _, err := c.CloudBroker().Account().SetCPUAllocationRatio(ctx, req) + if err != nil { + w.Add(err) + } + } + + if !d.Get("enable").(bool) { + _, err := c.CloudBroker().Account().Disable(ctx, account.DisableRequest{ + AccountID: accountId, + }) + + if err != nil { + w.Add(err) + } + } + + return append(resourceAccountRead(ctx, d, m), w.Get()...) +} + +func resourceAccountRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceAccountRead") + + acc, err := utilityAccountCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + flattenResourceAccount(d, acc) + + return nil +} + +func resourceAccountDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceAccountDelete") + + accountData, err := utilityAccountCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + c := m.(*controller.ControllerCfg) + req := account.DeleteRequest{ + AccountID: accountData.ID, + Permanently: d.Get("permanently").(bool), + } + + _, err = c.CloudBroker().Account().Delete(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + d.SetId("") + + return nil +} + +func resourceAccountUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceAccountUpdate") + c := m.(*controller.ControllerCfg) + + acc, err := utilityAccountCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + accountId, _ := strconv.ParseUint(d.Id(), 10, 64) + + hasChanged := false + + switch acc.Status { + case status.Destroyed: + d.SetId("") + // return resourceAccountCreate(ctx, d, m) + return diag.Errorf("The resource cannot be updated because it has been destroyed") + case status.Destroying: + return diag.Errorf("The account is in progress with status: %s", acc.Status) + case status.Deleted: + if d.Get("restore").(bool) { + _, err := c.CloudBroker().Account().Restore(ctx, account.RestoreRequest{ + AccountID: accountId, + }) + + if err != nil { + return diag.FromErr(err) + } + if _, ok := d.GetOk("enable"); ok { + if err := utilityAccountEnableUpdate(ctx, d, m, acc); err != nil { + return diag.FromErr(err) + } + } + hasChanged = true + } + case status.Disabled: + log.Debugf("The account is in status: %s, troubles may occur with update. Please, enable account first.", acc.Status) + case status.Confirmed: + } + + if hasChanged { + acc, err = utilityAccountCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + } + + if d.HasChange("enable") { + if err := utilityAccountEnableUpdate(ctx, d, m, acc); err != nil { + return diag.FromErr(err) + } + } + + if d.HasChanges("account_name", "send_access_emails", "uniq_pools", "resource_limits") { + if err := utilityAccountUpdate(ctx, d, m); err != nil { + return diag.FromErr(err) + } + } + + if d.HasChange("cpu_allocation_parameter") { + if err := utilityAccountCPUParameterUpdate(ctx, d, m); err != nil { + return diag.FromErr(err) + } + } + + if d.HasChange("cpu_allocation_ratio") { + if err := utilityAccountCPURatioUpdate(ctx, d, m); err != nil { + return diag.FromErr(err) + } + } + + if d.HasChange("users") { + if err := utilityAccountUsersUpdate(ctx, d, m, acc); err != nil { + return diag.FromErr(err) + } + } + + return resourceAccountRead(ctx, d, m) +} + +func ResourceAccount() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + CreateContext: resourceAccountCreate, + ReadContext: resourceAccountRead, + UpdateContext: resourceAccountUpdate, + DeleteContext: resourceAccountDelete, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: &constants.Timeout60s, + Read: &constants.Timeout30s, + Update: &constants.Timeout60s, + Delete: &constants.Timeout60s, + Default: &constants.Timeout60s, + }, + + Schema: resourceAccountSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/account/utility_account.go b/internal/service/cloudbroker/account/utility_account.go index 94c6e7d..966025d 100644 --- a/internal/service/cloudbroker/account/utility_account.go +++ b/internal/service/cloudbroker/account/utility_account.go @@ -34,10 +34,12 @@ package account import ( "context" "strconv" + "strings" log "github.com/sirupsen/logrus" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/status" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -63,3 +65,252 @@ func utilityAccountCheckPresence(ctx context.Context, d *schema.ResourceData, m return account, nil } + +func utilityAccountEnableUpdate(ctx context.Context, d *schema.ResourceData, m interface{}, acc *account.RecordAccount) error { + c := m.(*controller.ControllerCfg) + + enable := d.Get("enable").(bool) + + if enable && acc.Status == status.Disabled { + _, err := c.CloudBroker().Account().Enable(ctx, account.EnableRequest{ + AccountID: acc.ID, + }) + + if err != nil { + return err + } + } else if !enable && acc.Status == status.Enabled { + _, err := c.CloudBroker().Account().Disable(ctx, account.DisableRequest{ + AccountID: acc.ID, + }) + + if err != nil { + return err + } + } + + return nil +} + +func utilityAccountCPUParameterUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + accountId, _ := strconv.ParseUint(d.Id(), 10, 64) + + cpuAllocationParameter := d.Get("cpu_allocation_parameter").(string) + + _, err := c.CloudBroker().Account().SetCPUAllocationParameter(ctx, account.SetCPUAllocationParameterRequest{ + AccountID: accountId, + StrictLoose: cpuAllocationParameter, + }) + if err != nil { + return err + } + + return nil +} + +func utilityAccountCPURatioUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + accountId, _ := strconv.ParseUint(d.Id(), 10, 64) + + cpuAllocacationRatio := d.Get("cpu_allocation_ratio").(float64) + + _, err := c.CloudBroker().Account().SetCPUAllocationRatio(ctx, account.SetCPUAllocationRatioRequest{ + AccountID: accountId, + Ratio: cpuAllocacationRatio, + }) + if err != nil { + return err + } + + return nil +} + +func utilityAccountUsersUpdate(ctx context.Context, d *schema.ResourceData, m interface{}, acc *account.RecordAccount) error { + c := m.(*controller.ControllerCfg) + + deletedUsers := make([]interface{}, 0) + addedUsers := make([]interface{}, 0) + updatedUsers := make([]interface{}, 0) + + old, new := d.GetChange("users") + oldConv := old.([]interface{}) + newConv := new.([]interface{}) + for _, el := range oldConv { + if !isContainsUser(newConv, el) { + deletedUsers = append(deletedUsers, el) + } + } + + for _, el := range newConv { + if !isContainsUser(oldConv, el) { + duplicate := false + for _, user := range acc.ACL { + if user.UserGroupID == el.(map[string]interface{})["user_id"].(string) { + duplicate = true + } + } + if !duplicate { + addedUsers = append(addedUsers, el) + } else if isChangedUser(oldConv, el) { + updatedUsers = append(updatedUsers, el) + } + } + } + for _, user := range deletedUsers { + userConv := user.(map[string]interface{}) + + _, err := c.CloudBroker().Account().DeleteUser(ctx, account.DeleteUserRequest{ + AccountID: acc.ID, + UserName: userConv["user_id"].(string), + RecursiveDelete: userConv["recursive_delete"].(bool), + }) + + if err != nil { + return err + } + } + + for _, user := range addedUsers { + userConv := user.(map[string]interface{}) + + _, err := c.CloudBroker().Account().AddUser(ctx, account.AddUserRequest{ + AccountID: acc.ID, + Username: userConv["user_id"].(string), + AccessType: strings.ToUpper(userConv["access_type"].(string)), + }) + + if err != nil { + return err + } + } + + for _, user := range updatedUsers { + userConv := user.(map[string]interface{}) + + _, err := c.CloudBroker().Account().UpdateUser(ctx, account.UpdateUserRequest{ + AccountID: acc.ID, + UserID: userConv["user_id"].(string), + AccessType: strings.ToUpper(userConv["access_type"].(string)), + }) + + if err != nil { + return err + } + } + + return nil +} + +func utilityAccountUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + accountId, _ := strconv.ParseUint(d.Id(), 10, 64) + + req := account.UpdateRequest{ + AccountID: accountId, + } + + if d.HasChange("account_name") { + req.Name = d.Get("account_name").(string) + } + + if d.HasChange("send_access_emails") { + req.SendAccessEmails = d.Get("send_access_emails").(bool) + } + + if d.HasChange("uniq_pools") { + uniq_pools := d.Get("uniq_pools").([]interface{}) + + for _, pool := range uniq_pools { + req.UniqPools = append(req.UniqPools, pool.(string)) + } + } + + if d.HasChange("resource_limits") { + resLimit := d.Get("resource_limits").([]interface{})[0] + resLimitConv := resLimit.(map[string]interface{}) + + if resLimitConv["cu_m"] != nil { + maxMemCap := int(resLimitConv["cu_m"].(float64)) + if maxMemCap == 0 { + req.MaxMemoryCapacity = -1 + } else { + req.MaxMemoryCapacity = int64(maxMemCap) + } + } + if resLimitConv["cu_dm"] != nil { + maxDiskCap := int(resLimitConv["cu_dm"].(float64)) + if maxDiskCap == 0 { + req.MaxVDiskCapacity = -1 + } else { + req.MaxVDiskCapacity = int64(maxDiskCap) + } + } + if resLimitConv["cu_c"] != nil { + maxCPUCap := int(resLimitConv["cu_c"].(float64)) + if maxCPUCap == 0 { + req.MaxCPUCapacity = -1 + } else { + req.MaxCPUCapacity = int64(maxCPUCap) + } + } + if resLimitConv["cu_i"] != nil { + maxNumPublicIP := int(resLimitConv["cu_i"].(float64)) + if maxNumPublicIP == 0 { + req.MaxNumPublicIP = -1 + } else { + req.MaxNumPublicIP = int64(maxNumPublicIP) + } + } + if resLimitConv["cu_np"] != nil { + maxNP := int(resLimitConv["cu_np"].(float64)) + if maxNP == 0 { + req.MaxNetworkPeerTransfer = -1 + } else { + req.MaxNetworkPeerTransfer = int64(maxNP) + } + } + if resLimitConv["gpu_units"] != nil { + gpuUnits := int(resLimitConv["gpu_units"].(float64)) + if gpuUnits == 0 { + req.GPUUnits = -1 + } else { + req.GPUUnits = int64(gpuUnits) + } + } + } + + _, err := c.CloudBroker().Account().Update(ctx, req) + if err != nil { + return err + } + + return nil +} + +func isContainsUser(els []interface{}, el interface{}) bool { + for _, elOld := range els { + elOldConv := elOld.(map[string]interface{}) + elConv := el.(map[string]interface{}) + if elOldConv["user_id"].(string) == elConv["user_id"].(string) { + return true + } + } + return false +} + +func isChangedUser(els []interface{}, el interface{}) bool { + for _, elOld := range els { + elOldConv := elOld.(map[string]interface{}) + elConv := el.(map[string]interface{}) + if elOldConv["user_id"].(string) == elConv["user_id"].(string) && + (!strings.EqualFold(elOldConv["access_type"].(string), elConv["access_type"].(string)) || + elOldConv["recursive_delete"].(bool) != elConv["recursive_delete"].(bool)) { + return true + } + } + return false +} diff --git a/internal/service/cloudbroker/disks/data_source_disk.go b/internal/service/cloudbroker/disks/data_source_disk.go index 5b7028a..9afc791 100644 --- a/internal/service/cloudbroker/disks/data_source_disk.go +++ b/internal/service/cloudbroker/disks/data_source_disk.go @@ -46,6 +46,7 @@ import ( func dataSourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { disk, err := utilityDiskCheckPresence(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } @@ -57,299 +58,6 @@ func dataSourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface return nil } -func dataSourceDiskSchemaMake() map[string]*schema.Schema { - rets := map[string]*schema.Schema{ - "disk_id": { - Type: schema.TypeInt, - Required: true, - }, - "account_id": { - Type: schema.TypeInt, - Computed: true, - }, - "account_name": { - Type: schema.TypeString, - Computed: true, - }, - "acl": { - Type: schema.TypeString, - Computed: true, - }, - "boot_partition": { - Type: schema.TypeInt, - Computed: true, - }, - "computes": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "compute_id": { - Type: schema.TypeString, - Computed: true, - }, - "compute_name": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - "created_time": { - Type: schema.TypeInt, - Computed: true, - }, - "deleted_time": { - Type: schema.TypeInt, - Computed: true, - }, - "desc": { - Type: schema.TypeString, - Computed: true, - }, - "destruction_time": { - Type: schema.TypeInt, - Computed: true, - }, - "devicename": { - Type: schema.TypeString, - Computed: true, - }, - "disk_path": { - Type: schema.TypeString, - Computed: true, - }, - "gid": { - Type: schema.TypeInt, - Computed: true, - }, - "guid": { - Type: schema.TypeInt, - Computed: true, - }, - "image_id": { - Type: schema.TypeInt, - Computed: true, - }, - "images": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "iotune": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "read_bytes_sec": { - Type: schema.TypeInt, - Computed: true, - }, - "read_bytes_sec_max": { - Type: schema.TypeInt, - Computed: true, - }, - "read_iops_sec": { - Type: schema.TypeInt, - Computed: true, - }, - "read_iops_sec_max": { - Type: schema.TypeInt, - Computed: true, - }, - "size_iops_sec": { - Type: schema.TypeInt, - Computed: true, - }, - "total_bytes_sec": { - Type: schema.TypeInt, - Computed: true, - }, - "total_bytes_sec_max": { - Type: schema.TypeInt, - Computed: true, - }, - "total_iops_sec": { - Type: schema.TypeInt, - Computed: true, - }, - "total_iops_sec_max": { - Type: schema.TypeInt, - Computed: true, - }, - "write_bytes_sec": { - Type: schema.TypeInt, - Computed: true, - }, - "write_bytes_sec_max": { - Type: schema.TypeInt, - Computed: true, - }, - "write_iops_sec": { - Type: schema.TypeInt, - Computed: true, - }, - "write_iops_sec_max": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - "iqn": { - Type: schema.TypeString, - Computed: true, - }, - "login": { - Type: schema.TypeString, - Computed: true, - }, - "milestones": { - Type: schema.TypeInt, - Computed: true, - }, - "disk_name": { - Type: schema.TypeString, - Computed: true, - }, - "order": { - Type: schema.TypeInt, - Computed: true, - }, - "params": { - Type: schema.TypeString, - Computed: true, - }, - "parent_id": { - Type: schema.TypeInt, - Computed: true, - }, - "passwd": { - Type: schema.TypeString, - Computed: true, - }, - "pci_slot": { - Type: schema.TypeInt, - Computed: true, - }, - "pool": { - Type: schema.TypeString, - Computed: true, - }, - "present_to": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - "purge_attempts": { - Type: schema.TypeInt, - Computed: true, - }, - "purge_time": { - Type: schema.TypeInt, - Computed: true, - }, - "reality_device_number": { - Type: schema.TypeInt, - Computed: true, - }, - "reference_id": { - Type: schema.TypeString, - Computed: true, - }, - "res_id": { - Type: schema.TypeString, - Computed: true, - }, - "res_name": { - Type: schema.TypeString, - Computed: true, - }, - "role": { - Type: schema.TypeString, - Computed: true, - }, - "sep_id": { - Type: schema.TypeInt, - Computed: true, - }, - "sep_type": { - Type: schema.TypeString, - Computed: true, - }, - "shareable": { - Type: schema.TypeString, - Computed: true, - }, - "size_max": { - Type: schema.TypeInt, - Computed: true, - }, - "size_used": { - Type: schema.TypeFloat, - Computed: true, - }, - "snapshots": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "guid": { - Type: schema.TypeString, - Computed: true, - }, - "label": { - Type: schema.TypeString, - Computed: true, - }, - "reference_id": { - Type: schema.TypeString, - Computed: true, - }, - "res_id": { - Type: schema.TypeString, - Computed: true, - }, - "snap_set_guid": { - Type: schema.TypeString, - Computed: true, - }, - "snap_set_time": { - Type: schema.TypeInt, - Computed: true, - }, - "timestamp": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "tech_status": { - Type: schema.TypeString, - Computed: true, - }, - "type": { - Type: schema.TypeString, - Computed: true, - }, - "vmid": { - Type: schema.TypeInt, - Computed: true, - }, - } - - return rets -} - func DataSourceDisk() *schema.Resource { return &schema.Resource{ SchemaVersion: 1, diff --git a/internal/service/cloudbroker/disks/data_source_disk_list.go b/internal/service/cloudbroker/disks/data_source_disk_list.go index b010930..2a20b3c 100644 --- a/internal/service/cloudbroker/disks/data_source_disk_list.go +++ b/internal/service/cloudbroker/disks/data_source_disk_list.go @@ -1,442 +1,71 @@ -/* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package disks - -import ( - "context" - - "github.com/google/uuid" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" -) - -func dataSourceDiskListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - diskList, err := utilityDiskListCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - - id := uuid.New() - d.SetId(id.String()) - d.Set("items", flattenDiskList(diskList)) - d.Set("entry_count", diskList.EntryCount) - - return nil -} - -func dataSourceDiskListSchemaMake() map[string]*schema.Schema { - res := map[string]*schema.Schema{ - "by_id": { - Type: schema.TypeInt, - Optional: true, - Description: "Find by ID", - }, - "name": { - Type: schema.TypeString, - Optional: true, - Description: "Find by name", - }, - "account_name": { - Type: schema.TypeString, - Optional: true, - Description: "Find by account name", - }, - "disk_max_size": { - Type: schema.TypeInt, - Optional: true, - Description: "Find by max disk size", - }, - "status": { - Type: schema.TypeString, - Optional: true, - Description: "Find by status", - }, - "shared": { - Type: schema.TypeBool, - Optional: true, - Description: "Find by shared field", - }, - "account_id": { - Type: schema.TypeInt, - Optional: true, - Description: "ID of the account the disks belong to", - }, - "type": { - Type: schema.TypeString, - Optional: true, - Description: "type of the disks", - }, - "sep_id": { - Type: schema.TypeInt, - Optional: true, - Description: "Find by sep id", - }, - "pool": { - Type: schema.TypeString, - Optional: true, - Description: "Find by pool name", - }, - "page": { - Type: schema.TypeInt, - Optional: true, - Description: "Page number", - }, - "size": { - Type: schema.TypeInt, - Optional: true, - Description: "Page size", - }, - "items": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "account_id": { - Type: schema.TypeInt, - Computed: true, - }, - "account_name": { - Type: schema.TypeString, - Computed: true, - }, - "acl": { - Type: schema.TypeString, - Computed: true, - }, - "boot_partition": { - Type: schema.TypeInt, - Computed: true, - }, - "computes": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "compute_id": { - Type: schema.TypeString, - Computed: true, - }, - "compute_name": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - "created_time": { - Type: schema.TypeInt, - Computed: true, - }, - "deleted_time": { - Type: schema.TypeInt, - Computed: true, - }, - "desc": { - Type: schema.TypeString, - Computed: true, - }, - "destruction_time": { - Type: schema.TypeInt, - Computed: true, - }, - "devicename": { - Type: schema.TypeString, - Computed: true, - }, - "disk_path": { - Type: schema.TypeString, - Computed: true, - }, - "gid": { - Type: schema.TypeInt, - Computed: true, - }, - "guid": { - Type: schema.TypeInt, - Computed: true, - }, - "disk_id": { - Type: schema.TypeInt, - Computed: true, - }, - "image_id": { - Type: schema.TypeInt, - Computed: true, - }, - "images": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "iotune": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "read_bytes_sec": { - Type: schema.TypeInt, - Computed: true, - }, - "read_bytes_sec_max": { - Type: schema.TypeInt, - Computed: true, - }, - "read_iops_sec": { - Type: schema.TypeInt, - Computed: true, - }, - "read_iops_sec_max": { - Type: schema.TypeInt, - Computed: true, - }, - "size_iops_sec": { - Type: schema.TypeInt, - Computed: true, - }, - "total_bytes_sec": { - Type: schema.TypeInt, - Computed: true, - }, - "total_bytes_sec_max": { - Type: schema.TypeInt, - Computed: true, - }, - "total_iops_sec": { - Type: schema.TypeInt, - Computed: true, - }, - "total_iops_sec_max": { - Type: schema.TypeInt, - Computed: true, - }, - "write_bytes_sec": { - Type: schema.TypeInt, - Computed: true, - }, - "write_bytes_sec_max": { - Type: schema.TypeInt, - Computed: true, - }, - "write_iops_sec": { - Type: schema.TypeInt, - Computed: true, - }, - "write_iops_sec_max": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - "iqn": { - Type: schema.TypeString, - Computed: true, - }, - "login": { - Type: schema.TypeString, - Computed: true, - }, - "machine_id": { - Type: schema.TypeInt, - Computed: true, - }, - "machine_name": { - Type: schema.TypeString, - Computed: true, - }, - "milestones": { - Type: schema.TypeInt, - Computed: true, - }, - "disk_name": { - Type: schema.TypeString, - Computed: true, - }, - "order": { - Type: schema.TypeInt, - Computed: true, - }, - "params": { - Type: schema.TypeString, - Computed: true, - }, - "parent_id": { - Type: schema.TypeInt, - Computed: true, - }, - "passwd": { - Type: schema.TypeString, - Computed: true, - }, - "pci_slot": { - Type: schema.TypeInt, - Computed: true, - }, - "pool": { - Type: schema.TypeString, - Computed: true, - }, - "present_to": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - "purge_attempts": { - Type: schema.TypeInt, - Computed: true, - }, - "purge_time": { - Type: schema.TypeInt, - Computed: true, - }, - "reality_device_number": { - Type: schema.TypeInt, - Computed: true, - }, - "reference_id": { - Type: schema.TypeString, - Computed: true, - }, - "res_id": { - Type: schema.TypeString, - Computed: true, - }, - "res_name": { - Type: schema.TypeString, - Computed: true, - }, - "role": { - Type: schema.TypeString, - Computed: true, - }, - "sep_id": { - Type: schema.TypeInt, - Computed: true, - }, - "sep_type": { - Type: schema.TypeString, - Computed: true, - }, - "shareable": { - Type: schema.TypeBool, - Computed: true, - }, - "size_max": { - Type: schema.TypeInt, - Computed: true, - }, - "size_used": { - Type: schema.TypeFloat, - Computed: true, - }, - "snapshots": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "guid": { - Type: schema.TypeString, - Computed: true, - }, - "label": { - Type: schema.TypeString, - Computed: true, - }, - "reference_id": { - Type: schema.TypeString, - Computed: true, - }, - "res_id": { - Type: schema.TypeString, - Computed: true, - }, - "snap_set_guid": { - Type: schema.TypeString, - Computed: true, - }, - "snap_set_time": { - Type: schema.TypeInt, - Computed: true, - }, - "timestamp": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "tech_status": { - Type: schema.TypeString, - Computed: true, - }, - "type": { - Type: schema.TypeString, - Computed: true, - }, - "vmid": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - "entry_count": { - Type: schema.TypeInt, - Computed: true, - }, - } - return res -} - -func DataSourceDiskList() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - ReadContext: dataSourceDiskListRead, - - Timeouts: &schema.ResourceTimeout{ - Read: &constants.Timeout30s, - Default: &constants.Timeout60s, - }, - - Schema: dataSourceDiskListSchemaMake(), - } -} +/* +Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package disks + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceDiskListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + diskList, err := utilityDiskListCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + id := uuid.New() + d.SetId(id.String()) + d.Set("items", flattenDiskList(diskList)) + d.Set("entry_count", diskList.EntryCount) + + return nil +} + +func DataSourceDiskList() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceDiskListRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceDiskListSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/disks/data_source_disk_list_types.go b/internal/service/cloudbroker/disks/data_source_disk_list_types.go index dfabdae..4e2e10d 100644 --- a/internal/service/cloudbroker/disks/data_source_disk_list_types.go +++ b/internal/service/cloudbroker/disks/data_source_disk_list_types.go @@ -12,6 +12,7 @@ import ( func dataSourceDiskListTypesRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { listTypes, err := utilityDiskListTypesCheckPresence(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } @@ -23,24 +24,6 @@ func dataSourceDiskListTypesRead(ctx context.Context, d *schema.ResourceData, m return nil } -func dataSourceDiskListTypesSchemaMake() map[string]*schema.Schema { - res := map[string]*schema.Schema{ - "items": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "The types of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'", - }, - "entry_count": { - Type: schema.TypeInt, - Computed: true, - }, - } - return res -} - func DataSourceDiskListTypes() *schema.Resource { return &schema.Resource{ SchemaVersion: 1, diff --git a/internal/service/cloudbroker/disks/data_source_disk_list_types_detailed.go b/internal/service/cloudbroker/disks/data_source_disk_list_types_detailed.go index c3c43d5..49adfe2 100644 --- a/internal/service/cloudbroker/disks/data_source_disk_list_types_detailed.go +++ b/internal/service/cloudbroker/disks/data_source_disk_list_types_detailed.go @@ -12,6 +12,7 @@ import ( func dataSourceDiskListTypesDetailedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { listTypesDetailed, err := utilityDiskListTypesDetailedCheckPresence(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } @@ -32,53 +33,6 @@ func DataSourceDiskListTypesDetailed() *schema.Resource { Default: &constants.Timeout60s, }, - Schema: map[string]*schema.Schema{ - "items": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "pools": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Computed: true, - Description: "Pool name", - }, - "system": { - Type: schema.TypeString, - Computed: true, - }, - "types": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "The types of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'", - }, - }, - }, - }, - "sep_id": { - Type: schema.TypeInt, - Computed: true, - Description: "Storage endpoint provider ID to create disk", - }, - "sep_name": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - "entry_count": { - Type: schema.TypeInt, - Computed: true, - }, - }, + Schema: dataSourceDiskListTypesDetailedSchemaMake(), } } diff --git a/internal/service/cloudbroker/disks/data_source_disk_list_unattached.go b/internal/service/cloudbroker/disks/data_source_disk_list_unattached.go index d3cad73..dff8895 100644 --- a/internal/service/cloudbroker/disks/data_source_disk_list_unattached.go +++ b/internal/service/cloudbroker/disks/data_source_disk_list_unattached.go @@ -45,6 +45,7 @@ import ( func dataSourceDiskListUnattachedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { diskListUnattached, err := utilityDiskListUnattachedCheckPresence(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } @@ -70,401 +71,3 @@ func DataSourceDiskListUnattached() *schema.Resource { Schema: dataSourceDiskListUnattachedSchemaMake(), } } - -func dataSourceDiskListUnattachedSchemaMake() map[string]*schema.Schema { - res := map[string]*schema.Schema{ - "by_id": { - Type: schema.TypeInt, - Optional: true, - Description: "Find by ID", - }, - "account_name": { - Type: schema.TypeString, - Optional: true, - Description: "Find by account name", - }, - "disk_max_size": { - Type: schema.TypeInt, - Optional: true, - Description: "Find by max disk size", - }, - "status": { - Type: schema.TypeString, - Optional: true, - Description: "Find by status", - }, - "type": { - Type: schema.TypeString, - Optional: true, - Description: "type of the disks", - }, - "account_id": { - Type: schema.TypeInt, - Optional: true, - Description: "ID of the account the disks belong to", - }, - "sep_id": { - Type: schema.TypeInt, - Optional: true, - Description: "ID of SEP", - }, - "pool": { - Type: schema.TypeString, - Optional: true, - }, - "page": { - Type: schema.TypeInt, - Optional: true, - Description: "Page number", - }, - "size": { - Type: schema.TypeInt, - Optional: true, - Description: "Page size", - }, - "items": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "_ckey": { - Type: schema.TypeString, - Computed: true, - Description: "CKey", - }, - "_meta": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "Meta parameters", - }, - "account_id": { - Type: schema.TypeInt, - Computed: true, - Description: "ID of the account the disks belong to", - }, - "account_name": { - Type: schema.TypeString, - Computed: true, - Description: "The name of the subscriber '(account') to whom this disk belongs", - }, - "acl": { - Type: schema.TypeString, - Computed: true, - }, - "boot_partition": { - Type: schema.TypeInt, - Computed: true, - Description: "Number of disk partitions", - }, - "created_time": { - Type: schema.TypeInt, - Computed: true, - Description: "Created time", - }, - "deleted_time": { - Type: schema.TypeInt, - Computed: true, - Description: "Deleted time", - }, - "desc": { - Type: schema.TypeString, - Computed: true, - Description: "Description of disk", - }, - "destruction_time": { - Type: schema.TypeInt, - Computed: true, - Description: "Time of final deletion", - }, - "disk_path": { - Type: schema.TypeString, - Computed: true, - Description: "Disk path", - }, - "gid": { - Type: schema.TypeInt, - Computed: true, - Description: "ID of the grid (platform)", - }, - "guid": { - Type: schema.TypeInt, - Computed: true, - Description: "Disk ID on the storage side", - }, - "disk_id": { - Type: schema.TypeInt, - Computed: true, - Description: "The unique ID of the subscriber-owner of the disk", - }, - "image_id": { - Type: schema.TypeInt, - Computed: true, - Description: "Image ID", - }, - "images": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "IDs of images using the disk", - }, - "iotune": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "read_bytes_sec": { - Type: schema.TypeInt, - Computed: true, - Description: "Number of bytes to read per second", - }, - "read_bytes_sec_max": { - Type: schema.TypeInt, - Computed: true, - Description: "Maximum number of bytes to read", - }, - "read_iops_sec": { - Type: schema.TypeInt, - Computed: true, - Description: "Number of io read operations per second", - }, - "read_iops_sec_max": { - Type: schema.TypeInt, - Computed: true, - Description: "Maximum number of io read operations", - }, - "size_iops_sec": { - Type: schema.TypeInt, - Computed: true, - Description: "Size of io operations", - }, - "total_bytes_sec": { - Type: schema.TypeInt, - Computed: true, - Description: "Total size bytes per second", - }, - "total_bytes_sec_max": { - Type: schema.TypeInt, - Computed: true, - Description: "Maximum total size of bytes per second", - }, - "total_iops_sec": { - Type: schema.TypeInt, - Computed: true, - Description: "Total number of io operations per second", - }, - "total_iops_sec_max": { - Type: schema.TypeInt, - Computed: true, - Description: "Maximum total number of io operations per second", - }, - "write_bytes_sec": { - Type: schema.TypeInt, - Computed: true, - Description: "Number of bytes to write per second", - }, - "write_bytes_sec_max": { - Type: schema.TypeInt, - Computed: true, - Description: "Maximum number of bytes to write per second", - }, - "write_iops_sec": { - Type: schema.TypeInt, - Computed: true, - Description: "Number of write operations per second", - }, - "write_iops_sec_max": { - Type: schema.TypeInt, - Computed: true, - Description: "Maximum number of write operations per second", - }, - }, - }, - }, - "iqn": { - Type: schema.TypeString, - Computed: true, - Description: "Disk IQN", - }, - "login": { - Type: schema.TypeString, - Computed: true, - Description: "Login to access the disk", - }, - "milestones": { - Type: schema.TypeInt, - Computed: true, - Description: "Milestones", - }, - "disk_name": { - Type: schema.TypeString, - Computed: true, - Description: "Name of disk", - }, - "order": { - Type: schema.TypeInt, - Computed: true, - Description: "Disk order", - }, - "params": { - Type: schema.TypeString, - Computed: true, - Description: "Disk params", - }, - "parent_id": { - Type: schema.TypeInt, - Computed: true, - Description: "ID of the parent disk", - }, - "passwd": { - Type: schema.TypeString, - Computed: true, - Description: "Password to access the disk", - }, - "pci_slot": { - Type: schema.TypeInt, - Computed: true, - Description: "ID of the pci slot to which the disk is connected", - }, - "pool": { - Type: schema.TypeString, - Computed: true, - Description: "Pool for disk location", - }, - "present_to": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - "purge_attempts": { - Type: schema.TypeInt, - Computed: true, - Description: "Number of deletion attempts", - }, - "purge_time": { - Type: schema.TypeInt, - Computed: true, - Description: "Time of the last deletion attempt", - }, - "reality_device_number": { - Type: schema.TypeInt, - Computed: true, - Description: "Reality device number", - }, - "reference_id": { - Type: schema.TypeString, - Computed: true, - Description: "ID of the reference to the disk", - }, - "res_id": { - Type: schema.TypeString, - Computed: true, - Description: "Resource ID", - }, - "res_name": { - Type: schema.TypeString, - Computed: true, - Description: "Name of the resource", - }, - "role": { - Type: schema.TypeString, - Computed: true, - Description: "Disk role", - }, - "sep_id": { - Type: schema.TypeInt, - Computed: true, - Description: "Storage endpoint provider ID to create disk", - }, - "shareable": { - Type: schema.TypeBool, - Computed: true, - }, - "size_max": { - Type: schema.TypeInt, - Computed: true, - Description: "Size in GB", - }, - "size_used": { - Type: schema.TypeFloat, - Computed: true, - Description: "Number of used space, in GB", - }, - "snapshots": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "guid": { - Type: schema.TypeString, - Computed: true, - Description: "ID of the snapshot", - }, - "label": { - Type: schema.TypeString, - Computed: true, - Description: "Name of the snapshot", - }, - "reference_id": { - Type: schema.TypeString, - Computed: true, - }, - "res_id": { - Type: schema.TypeString, - Computed: true, - Description: "Reference to the snapshot", - }, - "snap_set_guid": { - Type: schema.TypeString, - Computed: true, - Description: "The set snapshot ID", - }, - "snap_set_time": { - Type: schema.TypeInt, - Computed: true, - Description: "The set time of the snapshot", - }, - "timestamp": { - Type: schema.TypeInt, - Computed: true, - Description: "Snapshot time", - }, - }, - }, - }, - "status": { - Type: schema.TypeString, - Computed: true, - Description: "Disk status", - }, - "tech_status": { - Type: schema.TypeString, - Computed: true, - Description: "Technical status of the disk", - }, - "type": { - Type: schema.TypeString, - Computed: true, - Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'", - }, - "vmid": { - Type: schema.TypeInt, - Computed: true, - Description: "Virtual Machine ID (Deprecated)", - }, - }, - }, - }, - "entry_count": { - Type: schema.TypeInt, - Computed: true, - }, - } - return res -} diff --git a/internal/service/cloudbroker/disks/data_source_disk_snapshot.go b/internal/service/cloudbroker/disks/data_source_disk_snapshot.go index daa7923..8064f4b 100644 --- a/internal/service/cloudbroker/disks/data_source_disk_snapshot.go +++ b/internal/service/cloudbroker/disks/data_source_disk_snapshot.go @@ -45,6 +45,7 @@ import ( func dataSourceDiskSnapshotRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { disk, err := utilityDiskCheckPresence(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } @@ -82,48 +83,3 @@ func DataSourceDiskSnapshot() *schema.Resource { Schema: dataSourceDiskSnapshotSchemaMake(), } } - -func dataSourceDiskSnapshotSchemaMake() map[string]*schema.Schema { - rets := map[string]*schema.Schema{ - "disk_id": { - Type: schema.TypeInt, - Required: true, - Description: "The unique ID of the subscriber-owner of the disk", - }, - "label": { - Type: schema.TypeString, - Required: true, - Description: "Name of the snapshot", - }, - "guid": { - Type: schema.TypeString, - Computed: true, - Description: "ID of the snapshot", - }, - "timestamp": { - Type: schema.TypeInt, - Computed: true, - Description: "Snapshot time", - }, - "reference_id": { - Type: schema.TypeString, - Computed: true, - }, - "res_id": { - Type: schema.TypeString, - Computed: true, - Description: "Reference to the snapshot", - }, - "snap_set_guid": { - Type: schema.TypeString, - Computed: true, - Description: "The set snapshot ID", - }, - "snap_set_time": { - Type: schema.TypeInt, - Computed: true, - Description: "The set time of the snapshot", - }, - } - return rets -} diff --git a/internal/service/cloudbroker/disks/data_source_disk_snapshot_list.go b/internal/service/cloudbroker/disks/data_source_disk_snapshot_list.go index e9471e7..eef364f 100644 --- a/internal/service/cloudbroker/disks/data_source_disk_snapshot_list.go +++ b/internal/service/cloudbroker/disks/data_source_disk_snapshot_list.go @@ -44,6 +44,7 @@ import ( func dataSourceDiskSnapshotListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { disk, err := utilityDiskCheckPresence(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } @@ -67,56 +68,3 @@ func DataSourceDiskSnapshotList() *schema.Resource { Schema: dataSourceDiskSnapshotListSchemaMake(), } } - -func dataSourceDiskSnapshotListSchemaMake() map[string]*schema.Schema { - rets := map[string]*schema.Schema{ - "disk_id": { - Type: schema.TypeInt, - Required: true, - Description: "The unique ID of the subscriber-owner of the disk", - }, - "items": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "label": { - Type: schema.TypeString, - Computed: true, - Description: "Name of the snapshot", - }, - "guid": { - Type: schema.TypeString, - Computed: true, - Description: "ID of the snapshot", - }, - "reference_id": { - Type: schema.TypeString, - Computed: true, - }, - "timestamp": { - Type: schema.TypeInt, - Computed: true, - Description: "Snapshot time", - }, - "res_id": { - Type: schema.TypeString, - Computed: true, - Description: "Reference to the snapshot", - }, - "snap_set_guid": { - Type: schema.TypeString, - Computed: true, - Description: "The set snapshot ID", - }, - "snap_set_time": { - Type: schema.TypeInt, - Computed: true, - Description: "The set time of the snapshot", - }, - }, - }, - }, - } - return rets -} diff --git a/internal/service/cloudbroker/disks/flattens.go b/internal/service/cloudbroker/disks/flattens.go index 291edcf..6be53b3 100644 --- a/internal/service/cloudbroker/disks/flattens.go +++ b/internal/service/cloudbroker/disks/flattens.go @@ -106,12 +106,11 @@ func flattenDiskList(dl *disks.ListDisks) []map[string]interface{} { for _, disk := range dl.Data { diskAcl, _ := json.Marshal(disk.ACL) temp := map[string]interface{}{ - "account_id": disk.AccountID, - "account_name": disk.AccountName, - "acl": string(diskAcl), - "boot_partition": disk.BootPartition, - // "compute_id": disk.MachineID, - // "compute_name": disk.MachineName, + "account_id": disk.AccountID, + "account_name": disk.AccountName, + "acl": string(diskAcl), + "boot_partition": disk.BootPartition, + "computes": flattenDiskComputes(disk.Computes), "created_time": disk.CreatedTime, "deleted_time": disk.DeletedTime, "desc": disk.Description, diff --git a/internal/service/cloudbroker/disks/resource_check_input_values.go b/internal/service/cloudbroker/disks/resource_check_input_values.go index a27b70b..fe57df4 100644 --- a/internal/service/cloudbroker/disks/resource_check_input_values.go +++ b/internal/service/cloudbroker/disks/resource_check_input_values.go @@ -2,54 +2,27 @@ package disks import ( "context" - "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/ic" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" ) -func existAccountID(ctx context.Context, d *schema.ResourceData, m interface{}) error { - c := m.(*controller.ControllerCfg) - accountID := uint64(d.Get("account_id").(int)) - - accountList, err := c.CloudBroker().Account().List(ctx, account.ListRequest{}) - if err != nil { - return err - } - - if len(accountList.FilterByID(accountID).Data) == 0 { - return fmt.Errorf("resourceDiskCreate: can't create/update Disk because AccountID %d is not allowed or does not exist", accountID) - } - - return nil -} - -func existGID(ctx context.Context, d *schema.ResourceData, m interface{}) error { - c := m.(*controller.ControllerCfg) +func checkParamsExistence(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg) diag.Diagnostics { + var errs []error + accountID := uint64(d.Get("account_id").(int)) gid := uint64(d.Get("gid").(int)) - gidList, err := c.CloudBroker().Grid().List(ctx, grid.ListRequest{}) - if err != nil { - return err - } - - for _, elem := range gidList.Data { - if elem.GID == gid { - return nil - } + if err := ic.ExistAccount(ctx, accountID, c); err != nil { + errs = append(errs, err) } - return fmt.Errorf("resourceDiskCreate: can't create/update Disk because GID %d is not allowed or does not exist", gid) -} - -func checkParamsExists(ctx context.Context, d *schema.ResourceData, m interface{}) error { - err := existAccountID(ctx, d, m) - if err != nil { - return err + if err := ic.ExistGID(ctx, gid, c); err != nil { + errs = append(errs, err) } - return existGID(ctx, d, m) + return dc.ErrorsToDiagnostics(errs) } diff --git a/internal/service/cloudbroker/disks/resource_disk.go b/internal/service/cloudbroker/disks/resource_disk.go index eb5ad31..be0e027 100644 --- a/internal/service/cloudbroker/disks/resource_disk.go +++ b/internal/service/cloudbroker/disks/resource_disk.go @@ -44,16 +44,15 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" log "github.com/sirupsen/logrus" ) func resourceDiskCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceDiskCreate: called for disk %s", d.Get("disk_name").(string)) c := m.(*controller.ControllerCfg) - err := checkParamsExists(ctx, d, m) - if err != nil { - return diag.FromErr(err) + if diags := checkParamsExistence(ctx, d, c); diags != nil { + return diags } req := disks.CreateRequest{ @@ -72,6 +71,10 @@ func resourceDiskCreate(ctx context.Context, d *schema.ResourceData, m interface req.SSDSize = uint64(ssdSize.(int)) } + if iops, ok := d.GetOk("iops"); ok { + req.IOPS = uint64(iops.(int)) + } + if sepID, ok := d.GetOk("sep_id"); ok { req.SEPID = uint64(sepID.(int)) } @@ -82,45 +85,23 @@ func resourceDiskCreate(ctx context.Context, d *schema.ResourceData, m interface diskID, err := c.CloudBroker().Disks().Create(ctx, req) if err != nil { + d.SetId("") return diag.FromErr(err) } d.SetId(strconv.FormatUint(diskID, 10)) + d.Set("disk_id", diskID) w := dc.Warnings{} - if iotuneRaw, ok := d.GetOk("iotune"); ok { - iot := iotuneRaw.([]interface{})[0] - iotune := iot.(map[string]interface{}) - req := disks.LimitIORequest{ - DiskID: diskID, - IOPS: uint64(iotune["total_iops_sec"].(int)), - ReadBytesSec: uint64(iotune["read_bytes_sec"].(int)), - ReadBytesSecMax: uint64(iotune["read_bytes_sec_max"].(int)), - ReadIOPSSec: uint64(iotune["read_iops_sec"].(int)), - ReadIOPSSecMax: uint64(iotune["read_iops_sec_max"].(int)), - SizeIOPSSec: uint64(iotune["size_iops_sec"].(int)), - TotalBytesSec: uint64(iotune["total_bytes_sec"].(int)), - TotalBytesSecMax: uint64(iotune["total_bytes_sec_max"].(int)), - TotalIOPSSecMax: uint64(iotune["total_iops_sec_max"].(int)), - TotalIOPSSec: uint64(iotune["total_iops_sec"].(int)), - WriteBytesSec: uint64(iotune["write_bytes_sec"].(int)), - WriteBytesSecMax: uint64(iotune["write_bytes_sec_max"].(int)), - WriteIOPSSec: uint64(iotune["write_iops_sec"].(int)), - WriteIOPSSecMax: uint64(iotune["write_iops_sec_max"].(int)), - } - - _, err := c.CloudBroker().Disks().LimitIO(ctx, req) - if err != nil { + if _, ok := d.GetOk("iotune"); ok { + if err := resourceDiskChangeIotune(ctx, d, m); err != nil { w.Add(err) } } if shareable := d.Get("shareable"); shareable.(bool) { - _, err := c.CloudBroker().Disks().Share(ctx, disks.ShareRequest{ - DiskID: diskID, - }) - if err != nil { + if err := resourceDiskChangeShareable(ctx, d, m); err != nil { w.Add(err) } } @@ -129,7 +110,7 @@ func resourceDiskCreate(ctx context.Context, d *schema.ResourceData, m interface } func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - c := m.(*controller.ControllerCfg) + log.Debugf("resourceDiskRead: called for disk_id %d", d.Get("disk_id").(int)) w := dc.Warnings{} disk, err := utilityDiskCheckPresence(ctx, d, m) @@ -144,23 +125,13 @@ func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{} case status.Destroyed, status.Purged: d.Set("disk_id", 0) d.SetId("") - return resourceDiskCreate(ctx, d, m) + return diag.Errorf("The resource cannot be read because it has been destroyed") + //return resourceDiskCreate(ctx, d, m) case status.Deleted: - hasChangeState = true - req := disks.RestoreRequest{ - DiskID: disk.ID, - } - - if reason, ok := d.GetOk("reason"); ok { - req.Reason = reason.(string) - } else { - req.Reason = "Terraform automatic restore" - } - - _, err := c.CloudBroker().Disks().Restore(ctx, req) - if err != nil { - w.Add(err) - } + //hasChangeState = true + //if err := resourceDiskRestore(ctx, d, m); err != nil { + // w.Add(err) + //} case status.Assigned: case status.Modeled: return diag.Errorf("The disk is in status: %s, please, contact support for more information", disk.Status) @@ -173,6 +144,7 @@ func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{} if hasChangeState { disk, err = utilityDiskCheckPresence(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } } @@ -183,12 +155,12 @@ func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{} } func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceDiskUpdate: called for disk_id %d", d.Get("disk_id").(int)) c := m.(*controller.ControllerCfg) w := dc.Warnings{} - err := checkParamsExists(ctx, d, m) - if err != nil { - return diag.FromErr(err) + if diags := checkParamsExistence(ctx, d, c); diags != nil { + return diags } disk, err := utilityDiskCheckPresence(ctx, d, m) @@ -203,21 +175,11 @@ func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface case status.Destroyed, status.Purged: d.Set("disk_id", 0) d.SetId("") - return resourceDiskCreate(ctx, d, m) + return diag.Errorf("The resource cannot be updated because it has been destroyed") + //return resourceDiskCreate(ctx, d, m) case status.Deleted: hasChangeState = true - req := disks.RestoreRequest{ - DiskID: disk.ID, - } - - if reason, ok := d.GetOk("reason"); ok { - req.Reason = reason.(string) - } else { - req.Reason = "Terraform automatic restore" - } - - _, err := c.CloudBroker().Disks().Restore(ctx, req) - if err != nil { + if err := resourceDiskRestore(ctx, d, m); err != nil { return diag.FromErr(err) } case status.Assigned: @@ -239,75 +201,32 @@ func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface if d.HasChange("size_max") { oldSize, newSize := d.GetChange("size_max") - if oldSize.(int) < newSize.(int) { - log.Debugf("resourceDiskUpdate: resizing disk ID %s - %d GB -> %d GB", - d.Id(), oldSize.(int), newSize.(int)) - - _, err := c.CloudBroker().Disks().Resize(ctx, disks.ResizeRequest{ - DiskID: disk.ID, - Size: uint64(newSize.(int)), - }) - if err != nil { - w.Add(err) - } - } else if oldSize.(int) > newSize.(int) { + if oldSize.(int) > newSize.(int) { return diag.FromErr(fmt.Errorf("resourceDiskUpdate: Disk ID %s - reducing disk size is not allowed", d.Id())) } + + log.Debugf("resourceDiskUpdate: resizing disk ID %s - %d GB -> %d GB", + d.Id(), oldSize.(int), newSize.(int)) + if err := resourceDiskChangeSize(ctx, d, m); err != nil { + w.Add(err) + } } if d.HasChange("disk_name") { - _, err := c.CloudBroker().Disks().Rename(ctx, disks.RenameRequest{ - DiskID: disk.ID, - Name: d.Get("disk_name").(string), - }) - if err != nil { + if err := resourceDiskChangeDiskName(ctx, d, m); err != nil { w.Add(err) } } if d.HasChange("iotune") { - iot := d.Get("iotune").([]interface{})[0] - iotune := iot.(map[string]interface{}) - req := disks.LimitIORequest{ - IOPS: uint64(iotune["total_iops_sec"].(int)), - ReadBytesSec: uint64(iotune["read_bytes_sec"].(int)), - ReadBytesSecMax: uint64(iotune["read_bytes_sec_max"].(int)), - ReadIOPSSec: uint64(iotune["read_iops_sec"].(int)), - ReadIOPSSecMax: uint64(iotune["read_iops_sec_max"].(int)), - SizeIOPSSec: uint64(iotune["size_iops_sec"].(int)), - TotalBytesSec: uint64(iotune["total_bytes_sec"].(int)), - TotalBytesSecMax: uint64(iotune["total_bytes_sec_max"].(int)), - TotalIOPSSecMax: uint64(iotune["total_iops_sec_max"].(int)), - TotalIOPSSec: uint64(iotune["total_iops_sec"].(int)), - WriteBytesSec: uint64(iotune["write_bytes_sec"].(int)), - WriteBytesSecMax: uint64(iotune["write_bytes_sec_max"].(int)), - WriteIOPSSec: uint64(iotune["write_iops_sec"].(int)), - WriteIOPSSecMax: uint64(iotune["write_iops_sec_max"].(int)), - } - - _, err := c.CloudBroker().Disks().LimitIO(ctx, req) - if err != nil { + if err := resourceDiskChangeIotune(ctx, d, m); err != nil { w.Add(err) } } if d.HasChange("shareable") { - old, new := d.GetChange("shareable") - if !old.(bool) && new.(bool) && !disk.Shareable { - _, err := c.CloudBroker().Disks().Share(ctx, disks.ShareRequest{ - DiskID: disk.ID, - }) - if err != nil { - w.Add(err) - } - } - if old.(bool) && !new.(bool) && disk.Shareable { - _, err := c.CloudBroker().Disks().Unshare(ctx, disks.UnshareRequest{ - DiskID: disk.ID, - }) - if err != nil { - w.Add(err) - } + if err := resourceDiskChangeShareable(ctx, d, m); err != nil { + w.Add(err) } } @@ -315,6 +234,7 @@ func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface } func resourceDiskDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceDiskDelete: called for disk_id %d", d.Get("disk_id").(int)) c := m.(*controller.ControllerCfg) disk, err := utilityDiskCheckPresence(ctx, d, m) @@ -340,349 +260,91 @@ func resourceDiskDelete(ctx context.Context, d *schema.ResourceData, m interface return nil } -func resourceDiskSchemaMake() map[string]*schema.Schema { - rets := map[string]*schema.Schema{ - "account_id": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - "gid": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - "disk_name": { - Type: schema.TypeString, - Required: true, - }, - "type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{"D", "B", "T"}, false), - Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'", - }, - "desc": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "size_max": { - Type: schema.TypeInt, - Required: true, - }, - "ssd_size": { - Type: schema.TypeInt, - Optional: true, - }, - "sep_id": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "pool": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "detach": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "detach disk from machine first", - }, - "permanently": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "whether to completely delete the disk, works only with non attached disks", - }, - "reason": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: "reason for an action", - }, - "shareable": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - "restore": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "restore deleting disk", - }, - "account_name": { - Type: schema.TypeString, - Computed: true, - }, - "acl": { - Type: schema.TypeString, - Computed: true, - }, - "boot_partition": { - Type: schema.TypeInt, - Computed: true, - }, - "computes": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "compute_id": { - Type: schema.TypeString, - Computed: true, - }, - "compute_name": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - "created_time": { - Type: schema.TypeInt, - Computed: true, - }, - "deleted_time": { - Type: schema.TypeInt, - Computed: true, - }, - "destruction_time": { - Type: schema.TypeInt, - Computed: true, - }, - "devicename": { - Type: schema.TypeString, - Computed: true, - }, - "disk_path": { - Type: schema.TypeString, - Computed: true, - }, - "guid": { - Type: schema.TypeInt, - Computed: true, - }, - "disk_id": { - Type: schema.TypeInt, - Computed: true, - }, - "image_id": { - Type: schema.TypeInt, - Computed: true, - }, - "images": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "iotune": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "read_bytes_sec": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "read_bytes_sec_max": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "read_iops_sec": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "read_iops_sec_max": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "size_iops_sec": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "total_bytes_sec": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "total_bytes_sec_max": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "total_iops_sec": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "total_iops_sec_max": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "write_bytes_sec": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "write_bytes_sec_max": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "write_iops_sec": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "write_iops_sec_max": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - }, - }, - }, - "iqn": { - Type: schema.TypeString, - Computed: true, - }, - "login": { - Type: schema.TypeString, - Computed: true, - }, - "milestones": { - Type: schema.TypeInt, - Computed: true, - }, +func resourceDiskChangeIotune(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) - "order": { - Type: schema.TypeInt, - Computed: true, - }, - "params": { - Type: schema.TypeString, - Computed: true, - }, - "parent_id": { - Type: schema.TypeInt, - Computed: true, - }, - "passwd": { - Type: schema.TypeString, - Computed: true, - }, - "pci_slot": { - Type: schema.TypeInt, - Computed: true, - }, - "present_to": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - "purge_attempts": { - Type: schema.TypeInt, - Computed: true, - }, - "purge_time": { - Type: schema.TypeInt, - Computed: true, - }, - "reality_device_number": { - Type: schema.TypeInt, - Computed: true, - }, - "reference_id": { - Type: schema.TypeString, - Computed: true, - }, - "res_id": { - Type: schema.TypeString, - Computed: true, - }, - "res_name": { - Type: schema.TypeString, - Computed: true, - }, - "role": { - Type: schema.TypeString, - Computed: true, - }, - "sep_type": { - Type: schema.TypeString, - Computed: true, - }, - "size_used": { - Type: schema.TypeFloat, - Computed: true, - }, - "snapshots": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "guid": { - Type: schema.TypeString, - Computed: true, - }, - "label": { - Type: schema.TypeString, - Computed: true, - }, - "reference_id": { - Type: schema.TypeString, - Computed: true, - }, - "res_id": { - Type: schema.TypeString, - Computed: true, - }, - "snap_set_guid": { - Type: schema.TypeString, - Computed: true, - }, - "snap_set_time": { - Type: schema.TypeInt, - Computed: true, - }, - "timestamp": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "tech_status": { - Type: schema.TypeString, - Computed: true, - }, - "vmid": { - Type: schema.TypeInt, - Computed: true, - }, + iotuneRaw := d.Get("iotune") + diskId := uint64(d.Get("disk_id").(int)) + + iot := iotuneRaw.([]interface{})[0] + iotune := iot.(map[string]interface{}) + req := disks.LimitIORequest{ + DiskID: diskId, + ReadBytesSec: uint64(iotune["read_bytes_sec"].(int)), + ReadBytesSecMax: uint64(iotune["read_bytes_sec_max"].(int)), + ReadIOPSSec: uint64(iotune["read_iops_sec"].(int)), + ReadIOPSSecMax: uint64(iotune["read_iops_sec_max"].(int)), + SizeIOPSSec: uint64(iotune["size_iops_sec"].(int)), + TotalBytesSec: uint64(iotune["total_bytes_sec"].(int)), + TotalBytesSecMax: uint64(iotune["total_bytes_sec_max"].(int)), + TotalIOPSSecMax: uint64(iotune["total_iops_sec_max"].(int)), + TotalIOPSSec: uint64(iotune["total_iops_sec"].(int)), + WriteBytesSec: uint64(iotune["write_bytes_sec"].(int)), + WriteBytesSecMax: uint64(iotune["write_bytes_sec_max"].(int)), + WriteIOPSSec: uint64(iotune["write_iops_sec"].(int)), + WriteIOPSSecMax: uint64(iotune["write_iops_sec_max"].(int)), + } + + if _, ok := iotune["total_iops_sec"]; ok { + req.IOPS = uint64(iotune["total_iops_sec"].(int)) + } else if _, ok := d.GetOk("iops"); ok { + req.IOPS = uint64(d.Get("iops").(int)) + } + + _, err := c.CloudBroker().Disks().LimitIO(ctx, req) + return err +} + +func resourceDiskChangeShareable(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + diskId := uint64(d.Get("disk_id").(int)) + shareable := d.Get("shareable").(bool) + + if shareable { + _, err := c.CloudBroker().Disks().Share(ctx, disks.ShareRequest{DiskID: diskId}) + return err } - return rets + _, err := c.CloudBroker().Disks().Unshare(ctx, disks.UnshareRequest{DiskID: diskId}) + return err +} + +func resourceDiskRestore(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + req := disks.RestoreRequest{ + DiskID: uint64(d.Get("disk_id").(int)), + } + + if reason, ok := d.GetOk("reason"); ok { + req.Reason = reason.(string) + } else { + req.Reason = "Terraform automatic restore" + } + + _, err := c.CloudBroker().Disks().Restore(ctx, req) + return err +} + +func resourceDiskChangeDiskName(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + _, err := c.CloudBroker().Disks().Rename(ctx, disks.RenameRequest{ + DiskID: uint64(d.Get("disk_id").(int)), + Name: d.Get("disk_name").(string), + }) + return err +} + +func resourceDiskChangeSize(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + _, err := c.CloudBroker().Disks().Resize2(ctx, disks.ResizeRequest{ + DiskID: uint64(d.Get("disk_id").(int)), + Size: uint64(d.Get("size_max").(int)), + }) + return err } func ResourceDisk() *schema.Resource { diff --git a/internal/service/cloudbroker/disks/resource_disk_snapshot.go b/internal/service/cloudbroker/disks/resource_disk_snapshot.go index c550d88..0a48300 100644 --- a/internal/service/cloudbroker/disks/resource_disk_snapshot.go +++ b/internal/service/cloudbroker/disks/resource_disk_snapshot.go @@ -2,6 +2,7 @@ package disks import ( "context" + "fmt" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -12,35 +13,21 @@ import ( ) func resourceDiskSnapshotCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - c := m.(*controller.ControllerCfg) + log.Debugf("resourceDiskSnapshotCreate: call for disk_id %d, label %s", + d.Get("disk_id").(int), + d.Get("label").(string)) - disk, err := utilityDiskCheckPresence(ctx, d, m) + _, err := utilityDiskSnapshotCheckPresence(ctx, d, m) if err != nil { return diag.FromErr(err) } - snapshots := disk.Snapshots - snapshot := disks.ItemSnapshot{} + diskId := uint64(d.Get("disk_id").(int)) label := d.Get("label").(string) - for _, sn := range snapshots { - if label == sn.Label { - snapshot = sn - break - } - } - if label != snapshot.Label { - return diag.Errorf("Snapshot with label \"%v\" not found", label) - } + d.SetId(fmt.Sprintf("%d#%s", diskId, label)) if rollback := d.Get("rollback").(bool); rollback { - req := disks.SnapshotRollbackRequest{ - DiskID: disk.ID, - Label: label, - TimeStamp: uint64(d.Get("timestamp").(int)), - } - - log.Debugf("resourceDiskCreate: Snapshot rollback with label", label) - _, err := c.CloudBroker().Disks().SnapshotRollback(ctx, req) + err := resourceDiskSnapshotChangeRollback(ctx, d, m) if err != nil { return diag.FromErr(err) } @@ -49,61 +36,30 @@ func resourceDiskSnapshotCreate(ctx context.Context, d *schema.ResourceData, m i } func resourceDiskSnapshotRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - disk, err := utilityDiskCheckPresence(ctx, d, m) + log.Debugf("resourceDiskSnapshotRead: snapshot id %s", d.Id()) + + snapshot, err := utilityDiskSnapshotCheckPresence(ctx, d, m) if err != nil { d.SetId("") return diag.FromErr(err) } - snapshots := disk.Snapshots - snapshot := disks.ItemSnapshot{} - label := d.Get("label").(string) - for _, sn := range snapshots { - if label == sn.Label { - snapshot = sn - break - } - } - if label != snapshot.Label { - return diag.Errorf("Snapshot with label \"%v\" not found", label) - } - flattenDiskSnapshot(d, snapshot) return nil } func resourceDiskSnapshotUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - c := m.(*controller.ControllerCfg) - disk, err := utilityDiskCheckPresence(ctx, d, m) + log.Debugf("resourceDiskSnapshotUpdate: snapshot id %s", d.Id()) + + _, err := utilityDiskSnapshotCheckPresence(ctx, d, m) if err != nil { d.SetId("") return diag.FromErr(err) } - snapshots := disk.Snapshots - snapshot := disks.ItemSnapshot{} - label := d.Get("label").(string) - for _, sn := range snapshots { - if label == sn.Label { - snapshot = sn - break - } - } - - if label != snapshot.Label { - return diag.Errorf("Snapshot with label \"%v\" not found", label) - } - - if d.HasChange("rollback") && d.Get("rollback").(bool) == true { - req := disks.SnapshotRollbackRequest{ - DiskID: disk.ID, - Label: label, - TimeStamp: uint64(d.Get("timestamp").(int)), - } - - log.Debugf("resourceDiskUpdtae: Snapshot rollback with label", label) - _, err := c.CloudBroker().Disks().SnapshotRollback(ctx, req) + if d.HasChange("rollback") { + err := resourceDiskSnapshotChangeRollback(ctx, d, m) if err != nil { return diag.FromErr(err) } @@ -113,16 +69,18 @@ func resourceDiskSnapshotUpdate(ctx context.Context, d *schema.ResourceData, m i } func resourceDiskSnapshotDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - c := m.(*controller.ControllerCfg) + log.Debugf("resourceDiskSnapshotDelete: snapshot id %s", d.Id()) - disk, err := utilityDiskCheckPresence(ctx, d, m) + _, err := utilityDiskSnapshotCheckPresence(ctx, d, m) if err != nil { d.SetId("") return diag.FromErr(err) } + c := m.(*controller.ControllerCfg) + req := disks.SnapshotDeleteRequest{ - DiskID: disk.ID, + DiskID: uint64(d.Get("disk_id").(int)), Label: d.Get("label").(string), } @@ -136,6 +94,31 @@ func resourceDiskSnapshotDelete(ctx context.Context, d *schema.ResourceData, m i return nil } +func resourceDiskSnapshotChangeRollback(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + rollback := d.Get("rollback").(bool) + + if rollback { + label := d.Get("label").(string) + timestamp := uint64(d.Get("timestamp").(int)) + diskId := uint64(d.Get("disk_id").(int)) + + req := disks.SnapshotRollbackRequest{ + DiskID: diskId, + Label: label, + TimeStamp: timestamp, + } + + log.Debugf("resourceDiskUpdate: Snapshot rollback with label %s", label) + if _, err := c.CloudBroker().Disks().SnapshotRollback(ctx, req); err != nil { + return err + } + } + + return nil +} + func ResourceDiskSnapshot() *schema.Resource { return &schema.Resource{ SchemaVersion: 1, @@ -160,56 +143,3 @@ func ResourceDiskSnapshot() *schema.Resource { Schema: resourceDiskSnapshotSchemaMake(), } } - -func resourceDiskSnapshotSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "disk_id": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - Description: "The unique ID of the subscriber-owner of the disk", - }, - "label": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "Name of the snapshot", - }, - "rollback": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Needed in order to make a snapshot rollback", - }, - "timestamp": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - Description: "Snapshot time", - }, - "guid": { - Type: schema.TypeString, - Computed: true, - Description: "ID of the snapshot", - }, - "reference_id": { - Type: schema.TypeString, - Computed: true, - }, - "res_id": { - Type: schema.TypeString, - Computed: true, - Description: "Reference to the snapshot", - }, - "snap_set_guid": { - Type: schema.TypeString, - Computed: true, - Description: "The set snapshot ID", - }, - "snap_set_time": { - Type: schema.TypeInt, - Computed: true, - Description: "The set time of the snapshot", - }, - } -} diff --git a/internal/service/cloudbroker/disks/utility_disk_list_deleted.go b/internal/service/cloudbroker/disks/utility_disk_list_deleted.go index 90acabb..40921a6 100644 --- a/internal/service/cloudbroker/disks/utility_disk_list_deleted.go +++ b/internal/service/cloudbroker/disks/utility_disk_list_deleted.go @@ -75,7 +75,7 @@ func utilityDiskListDeletedCheckPresence(ctx context.Context, d *schema.Resource req.Size = uint64(size.(int)) } - log.Debugf("utilityDiskListDeletedCheckPresence: load disk list") + log.Debugf("utilityDiskListDeletedCheckPresence: load disk list deleted") diskList, err := c.CloudBroker().Disks().ListDeleted(ctx, req) if err != nil { return nil, err diff --git a/internal/service/cloudbroker/disks/utility_disk_list_types.go b/internal/service/cloudbroker/disks/utility_disk_list_types.go index c4499cc..dafc4be 100644 --- a/internal/service/cloudbroker/disks/utility_disk_list_types.go +++ b/internal/service/cloudbroker/disks/utility_disk_list_types.go @@ -15,6 +15,13 @@ func utilityDiskListTypesCheckPresence(ctx context.Context, d *schema.ResourceDa Detailed: false, } + if page, ok := d.GetOk("page"); ok { + req.Page = uint64(page.(int)) + } + if size, ok := d.GetOk("size"); ok { + req.Size = uint64(size.(int)) + } + log.Debugf("utilityDiskListTypesCheckPresence: load disk list Types Detailed") typesList, err := c.CloudBroker().Disks().ListTypes(ctx, req) if err != nil { diff --git a/internal/service/cloudbroker/disks/utility_disk_list_types_detailed.go b/internal/service/cloudbroker/disks/utility_disk_list_types_detailed.go index de259f1..44ce0f3 100644 --- a/internal/service/cloudbroker/disks/utility_disk_list_types_detailed.go +++ b/internal/service/cloudbroker/disks/utility_disk_list_types_detailed.go @@ -12,10 +12,18 @@ import ( func utilityDiskListTypesDetailedCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*disks.ListTypes, error) { c := m.(*controller.ControllerCfg) - log.Debugf("utilityDiskListTypesDetailedCheckPresence: load disk list Types Detailed") - listTypesDetailed, err := c.CloudBroker().Disks().ListTypes(ctx, disks.ListTypesRequest{ + req := disks.ListTypesRequest{ Detailed: true, - }) + } + if page, ok := d.GetOk("page"); ok { + req.Page = uint64(page.(int)) + } + if size, ok := d.GetOk("size"); ok { + req.Size = uint64(size.(int)) + } + + log.Debugf("utilityDiskListTypesDetailedCheckPresence: load disk list Types Detailed") + listTypesDetailed, err := c.CloudBroker().Disks().ListTypes(ctx, req) log.Debugf("%+v", listTypesDetailed.Data[0].(map[string]interface{})) diff --git a/internal/service/cloudbroker/disks/utility_disk_list_unattached.go b/internal/service/cloudbroker/disks/utility_disk_list_unattached.go index 60409e4..f3da6c9 100644 --- a/internal/service/cloudbroker/disks/utility_disk_list_unattached.go +++ b/internal/service/cloudbroker/disks/utility_disk_list_unattached.go @@ -28,7 +28,7 @@ func utilityDiskListUnattachedCheckPresence(ctx context.Context, d *schema.Resou if diskType, ok := d.GetOk("type"); ok { req.Type = diskType.(string) } - if accountId, ok := d.GetOk("accountId"); ok { + if accountId, ok := d.GetOk("account_id"); ok { req.AccountID = uint64(accountId.(int)) } if sepId, ok := d.GetOk("sep_id"); ok { diff --git a/internal/service/cloudbroker/extnet/data_source_extnet.go b/internal/service/cloudbroker/extnet/data_source_extnet.go index d5d8c45..2c40a19 100644 --- a/internal/service/cloudbroker/extnet/data_source_extnet.go +++ b/internal/service/cloudbroker/extnet/data_source_extnet.go @@ -45,6 +45,7 @@ import ( func dataSourceExtnetRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { net, err := utilityExtnetCheckPresence(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } @@ -54,232 +55,6 @@ func dataSourceExtnetRead(ctx context.Context, d *schema.ResourceData, m interfa return nil } -func dataSourceExtnetSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "extnet_id": { - Type: schema.TypeInt, - Required: true, - }, - "ckey": { - Type: schema.TypeString, - Computed: true, - }, - "meta": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "meta", - }, - "default": { - Type: schema.TypeBool, - Computed: true, - }, - "default_qos": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "e_rate": { - Type: schema.TypeInt, - Computed: true, - }, - "guid": { - Type: schema.TypeString, - Computed: true, - }, - "in_burst": { - Type: schema.TypeInt, - Computed: true, - }, - "in_rate": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - "desc": { - Type: schema.TypeString, - Computed: true, - }, - "free_ips": { - Type: schema.TypeInt, - Computed: true, - }, - "gid": { - Type: schema.TypeInt, - Computed: true, - }, - "guid": { - Type: schema.TypeInt, - Computed: true, - }, - "ipcidr": { - Type: schema.TypeString, - Computed: true, - }, - "milestones": { - Type: schema.TypeInt, - Computed: true, - }, - "name": { - Type: schema.TypeString, - Computed: true, - }, - "network_id": { - Type: schema.TypeInt, - Computed: true, - }, - "ovs_bridge": { - Type: schema.TypeString, - Computed: true, - }, - "pre_reservations_num": { - Type: schema.TypeInt, - Computed: true, - }, - "pri_vnfdev_id": { - Type: schema.TypeInt, - Computed: true, - }, - "shared_with": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "vlan_id": { - Type: schema.TypeInt, - Computed: true, - }, - "vnfs": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "dhcp": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - "check_ips": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "dns": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "excluded": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "client_type": { - Type: schema.TypeString, - Computed: true, - }, - "desc": { - Type: schema.TypeString, - Computed: true, - }, - "domain_name": { - Type: schema.TypeString, - Computed: true, - }, - "hostname": { - Type: schema.TypeString, - Computed: true, - }, - "ip": { - Type: schema.TypeString, - Computed: true, - }, - "mac": { - Type: schema.TypeString, - Computed: true, - }, - "type": { - Type: schema.TypeString, - Computed: true, - }, - "vm_id": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - "gateway": { - Type: schema.TypeString, - Computed: true, - }, - "network": { - Type: schema.TypeString, - Computed: true, - }, - "prefix": { - Type: schema.TypeInt, - Computed: true, - }, - "reservations": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "client_type": { - Type: schema.TypeString, - Computed: true, - }, - "domain_name": { - Type: schema.TypeString, - Computed: true, - }, - "hostname": { - Type: schema.TypeString, - Computed: true, - }, - "desc": { - Type: schema.TypeString, - Computed: true, - }, - "ip": { - Type: schema.TypeString, - Computed: true, - }, - "mac": { - Type: schema.TypeString, - Computed: true, - }, - "type": { - Type: schema.TypeString, - Computed: true, - }, - "vm_id": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - } -} - func DataSourceExtnetCB() *schema.Resource { return &schema.Resource{ SchemaVersion: 1, diff --git a/internal/service/cloudbroker/extnet/data_source_extnet_default.go b/internal/service/cloudbroker/extnet/data_source_extnet_default.go index 048b68c..35f88ab 100644 --- a/internal/service/cloudbroker/extnet/data_source_extnet_default.go +++ b/internal/service/cloudbroker/extnet/data_source_extnet_default.go @@ -44,26 +44,17 @@ import ( func dataSourceExtnetDefaultRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { extnetId, err := utilityExtnetDefaultCheckPresence(ctx, m) if err != nil { + d.SetId("") return diag.FromErr(err) } id := uuid.New() d.SetId(id.String()) - d.Set("extnet_id", extnetId) return nil } -func dataSourceExtnetDefaultSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "extnet_id": { - Type: schema.TypeInt, - Computed: true, - }, - } -} - func DataSourceExtnetDefaultCB() *schema.Resource { return &schema.Resource{ SchemaVersion: 1, diff --git a/internal/service/cloudbroker/extnet/data_source_extnet_list.go b/internal/service/cloudbroker/extnet/data_source_extnet_list.go index 92dfd4a..cec89f4 100644 --- a/internal/service/cloudbroker/extnet/data_source_extnet_list.go +++ b/internal/service/cloudbroker/extnet/data_source_extnet_list.go @@ -45,6 +45,7 @@ import ( func dataSourceExtnetListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { netList, err := utilityExtnetListCheckPresence(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } @@ -56,189 +57,6 @@ func dataSourceExtnetListRead(ctx context.Context, d *schema.ResourceData, m int return nil } -func dataSourceExtnetListSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "account_id": { - Type: schema.TypeInt, - Optional: true, - Description: "Find by account ID", - }, - "by_id": { - Type: schema.TypeInt, - Optional: true, - Description: "Find by ID", - }, - "name": { - Type: schema.TypeString, - Optional: true, - Description: "Find by name", - }, - "network": { - Type: schema.TypeString, - Optional: true, - }, - "vlan_id": { - Type: schema.TypeInt, - Optional: true, - Description: "Find by VLAN ID", - }, - "vnfdev_id": { - Type: schema.TypeInt, - Optional: true, - Description: "Find by VnfDEV ID", - }, - "status": { - Type: schema.TypeString, - Optional: true, - Description: "Find by status", - }, - "page": { - Type: schema.TypeInt, - Optional: true, - Description: "Page number", - }, - "size": { - Type: schema.TypeInt, - Optional: true, - Description: "Page size", - }, - "items": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ckey": { - Type: schema.TypeString, - Computed: true, - }, - "meta": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "meta", - }, - "default": { - Type: schema.TypeBool, - Computed: true, - }, - "default_qos": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "e_rate": { - Type: schema.TypeInt, - Computed: true, - }, - "guid": { - Type: schema.TypeString, - Computed: true, - }, - "in_burst": { - Type: schema.TypeInt, - Computed: true, - }, - "in_rate": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - "desc": { - Type: schema.TypeString, - Computed: true, - }, - "free_ips": { - Type: schema.TypeInt, - Computed: true, - }, - "gid": { - Type: schema.TypeInt, - Computed: true, - }, - "guid": { - Type: schema.TypeInt, - Computed: true, - }, - "extnet_id": { - Type: schema.TypeInt, - Computed: true, - }, - "ipcidr": { - Type: schema.TypeString, - Computed: true, - }, - "milestones": { - Type: schema.TypeInt, - Computed: true, - }, - "name": { - Type: schema.TypeString, - Computed: true, - }, - "network_id": { - Type: schema.TypeInt, - Computed: true, - }, - "ovs_bridge": { - Type: schema.TypeString, - Computed: true, - }, - "pre_reservations_num": { - Type: schema.TypeInt, - Computed: true, - }, - "pri_vnfdev_id": { - Type: schema.TypeInt, - Computed: true, - }, - "shared_with": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "vlan_id": { - Type: schema.TypeInt, - Computed: true, - }, - "vnfs": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "dhcp": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - "check_ips": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - }, - "entry_count": { - Type: schema.TypeInt, - Computed: true, - }, - } -} - func DataSourceExtnetListCB() *schema.Resource { return &schema.Resource{ SchemaVersion: 1, diff --git a/internal/service/cloudbroker/extnet/data_source_extnet_static_route.go b/internal/service/cloudbroker/extnet/data_source_extnet_static_route.go index 123ae18..d707867 100644 --- a/internal/service/cloudbroker/extnet/data_source_extnet_static_route.go +++ b/internal/service/cloudbroker/extnet/data_source_extnet_static_route.go @@ -44,6 +44,7 @@ import ( func dataSourceStaticRouteRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { staticRoute, err := utilityDataStaticRouteCheckPresence(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } @@ -52,45 +53,6 @@ func dataSourceStaticRouteRead(ctx context.Context, d *schema.ResourceData, m in return nil } -func dataSourceStaticRouteSchemaMake() map[string]*schema.Schema { - rets := map[string]*schema.Schema{ - "extnet_id": { - Type: schema.TypeInt, - Required: true, - Description: "Unique ID of the ExtNet", - }, - "route_id": { - Type: schema.TypeInt, - Required: true, - Description: "Unique ID of the static route", - }, - "compute_ids": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - "destination": { - Type: schema.TypeString, - Computed: true, - }, - "gateway": { - Type: schema.TypeString, - Computed: true, - }, - "guid": { - Type: schema.TypeString, - Computed: true, - }, - "netmask": { - Type: schema.TypeString, - Computed: true, - }, - } - return rets -} - func DataSourceStaticRoute() *schema.Resource { return &schema.Resource{ SchemaVersion: 1, diff --git a/internal/service/cloudbroker/extnet/data_source_extnet_static_route_list.go b/internal/service/cloudbroker/extnet/data_source_extnet_static_route_list.go index 34b21e1..cd669d8 100644 --- a/internal/service/cloudbroker/extnet/data_source_extnet_static_route_list.go +++ b/internal/service/cloudbroker/extnet/data_source_extnet_static_route_list.go @@ -44,6 +44,7 @@ import ( func dataSourceStaticRouteListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { staticRouteList, err := utilityStaticRouteListCheckPresence(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } @@ -55,56 +56,6 @@ func dataSourceStaticRouteListRead(ctx context.Context, d *schema.ResourceData, return nil } -func dataSourceStaticRouteListSchemaMake() map[string]*schema.Schema { - res := map[string]*schema.Schema{ - "extnet_id": { - Type: schema.TypeInt, - Required: true, - Description: "ID of ExtNet", - }, - "items": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "compute_ids": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - "destination": { - Type: schema.TypeString, - Computed: true, - }, - "gateway": { - Type: schema.TypeString, - Computed: true, - }, - "guid": { - Type: schema.TypeString, - Computed: true, - }, - "netmask": { - Type: schema.TypeString, - Computed: true, - }, - "route_id": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - "entry_count": { - Type: schema.TypeInt, - Computed: true, - }, - } - return res -} - func DataSourceStaticRouteList() *schema.Resource { return &schema.Resource{ SchemaVersion: 1, diff --git a/internal/service/cloudbroker/extnet/resource_extnet.go b/internal/service/cloudbroker/extnet/resource_extnet.go index b11cfef..5a84aff 100644 --- a/internal/service/cloudbroker/extnet/resource_extnet.go +++ b/internal/service/cloudbroker/extnet/resource_extnet.go @@ -43,6 +43,7 @@ import ( "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/extnet" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/ic" ) @@ -130,6 +131,8 @@ func resourceExtnetCreate(ctx context.Context, d *schema.ResourceData, m interfa d.Set("extnet_id", netID) log.Debugf("cloudbroker: Extnet with id %d successfully created on platform", netID) + var w dc.Warnings + if d.Get("excluded_ips").(*schema.Set).Len() > 0 { ips := make([]string, 0) @@ -144,7 +147,36 @@ func resourceExtnetCreate(ctx context.Context, d *schema.ResourceData, m interfa _, err := c.CloudBroker().ExtNet().IPsExclude(ctx, req) if err != nil { - return diag.FromErr(err) + w.Add(err) + } + } + + if d.Get("shared_with").(*schema.Set).Len() > 0 { + for _, id := range d.Get("shared_with").(*schema.Set).List() { + req := extnet.AccessRemoveRequest{ + NetID: uint64(d.Get("extnet_id").(int)), + AccountID: uint64(id.(int)), + } + + _, err := c.CloudBroker().ExtNet().AccessRemove(ctx, req) + if err != nil { + w.Add(err) + } + } + } + + if d.Get("excluded_ips_range").(*schema.Set).Len() > 0 { + for _, ip := range d.Get("excluded_ips_range").(*schema.Set).List() { + req := extnet.IPsExcludeRangeRequest{ + NetID: uint64(d.Get("extnet_id").(int)), + IPStart: ip.(map[string]interface{})["ip_start"].(string), + IPEnd: ip.(map[string]interface{})["ip_end"].(string), + } + + _, err := c.CloudBroker().ExtNet().IPsExcludeRange(ctx, req) + if err != nil { + w.Add(err) + } } } @@ -155,7 +187,7 @@ func resourceExtnetCreate(ctx context.Context, d *schema.ResourceData, m interfa }) if err != nil { - return diag.FromErr(err) + w.Add(err) } } @@ -257,7 +289,7 @@ func resourceExtnetUpdate(ctx context.Context, d *schema.ResourceData, m interfa return diag.FromErr(err) } } - + return resourceExtnetRead(ctx, d, m) } @@ -277,360 +309,6 @@ func resourceExtnetDelete(ctx context.Context, d *schema.ResourceData, m interfa return nil } -func resourceExtnetSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: "External network name", - }, - "gid": { - Type: schema.TypeInt, - Required: true, - Description: "Grid (platform) ID", - }, - "ipcidr": { - Type: schema.TypeString, - Required: true, - // ForceNew: true, - Description: "IP network CIDR", - }, - "vlan_id": { - Type: schema.TypeInt, - Required: true, - // ForceNew: true, - Description: "VLAN ID", - }, - "gateway": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "External network gateway IP address", - }, - "dns": { - Type: schema.TypeList, - Optional: true, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "List of DNS addresses", - }, - "ntp": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "List of NTP addresses", - }, - "check_ips": { - Type: schema.TypeList, - Optional: true, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "IPs to check network availability", - }, - "virtual": { - Type: schema.TypeBool, - Optional: true, - Description: "If true - platform DHCP server will not be created", - }, - "desc": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "Optional description", - }, - "start_ip": { - Type: schema.TypeString, - Optional: true, - Description: "Start of IP range to be explicitly included", - }, - "end_ip": { - Type: schema.TypeString, - Optional: true, - Description: "End of IP range to be explicitly included", - }, - "vnfdev_ip": { - Type: schema.TypeString, - Optional: true, - Description: "IP to create VNFDev with", - }, - "pre_reservations_num": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - Description: "Number of pre created reservations", - }, - "ovs_bridge": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "OpenvSwith bridge name for ExtNet connection", - }, - "enable": { - Type: schema.TypeBool, - Optional: true, - Default: true, - Description: "Disable/Enable extnet", - }, - "set_default": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Set current extnet as default (can not be undone)", - }, - "excluded_ips": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "IPs to exclude in current extnet pool", - }, - "excluded_ips_range": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ip_start": { - Type: schema.TypeString, - Required: true, - }, - "ip_end": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - Description: "Range of IPs to exclude in current extnet pool", - }, - "default_qos": { - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "e_rate": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "guid": { - Type: schema.TypeString, - Computed: true, - }, - "in_burst": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "in_rate": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - }, - }, - }, - "restart":{ - Type: schema.TypeBool, - Optional: true, - Description: "restart extnet vnf device", - }, - "migrate":{ - Type: schema.TypeInt, - Optional: true, - }, - "ckey": { - Type: schema.TypeString, - Computed: true, - }, - "meta": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "meta", - }, - "default": { - Type: schema.TypeBool, - Computed: true, - }, - "free_ips": { - Type: schema.TypeInt, - Computed: true, - }, - "guid": { - Type: schema.TypeInt, - Computed: true, - }, - "extnet_id": { - Type: schema.TypeInt, - Computed: true, - }, - "milestones": { - Type: schema.TypeInt, - Computed: true, - }, - "network_id": { - Type: schema.TypeInt, - Computed: true, - }, - "pri_vnfdev_id": { - Type: schema.TypeInt, - Computed: true, - }, - "shared_with": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "vnfs": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "dhcp": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - "excluded": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "client_type": { - Type: schema.TypeString, - Computed: true, - }, - "domain_name": { - Type: schema.TypeString, - Computed: true, - }, - "hostname": { - Type: schema.TypeString, - Computed: true, - }, - "ip": { - Type: schema.TypeString, - Computed: true, - }, - "mac": { - Type: schema.TypeString, - Computed: true, - }, - "type": { - Type: schema.TypeString, - Computed: true, - }, - "vm_id": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - "network": { - Type: schema.TypeString, - Computed: true, - }, - "prefix": { - Type: schema.TypeInt, - Computed: true, - }, - "routes": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "compute_ids": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - "destination": { - Type: schema.TypeString, - Computed: true, - }, - "gateway": { - Type: schema.TypeString, - Computed: true, - }, - "guid": { - Type: schema.TypeString, - Computed: true, - }, - "netmask": { - Type: schema.TypeString, - Computed: true, - }, - "route_id": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - "reservations": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "client_type": { - Type: schema.TypeString, - Computed: true, - }, - "domain_name": { - Type: schema.TypeString, - Computed: true, - }, - "hostname": { - Type: schema.TypeString, - Computed: true, - }, - "desc": { - Type: schema.TypeString, - Computed: true, - }, - "ip": { - Type: schema.TypeString, - Computed: true, - }, - "mac": { - Type: schema.TypeString, - Computed: true, - }, - "type": { - Type: schema.TypeString, - Computed: true, - }, - "vm_id": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - } -} - func ResourceExtnetCB() *schema.Resource { return &schema.Resource{ SchemaVersion: 1, diff --git a/internal/service/cloudbroker/extnet/resource_extnet_static_route.go b/internal/service/cloudbroker/extnet/resource_extnet_static_route.go index fd1218f..b8ea4ea 100644 --- a/internal/service/cloudbroker/extnet/resource_extnet_static_route.go +++ b/internal/service/cloudbroker/extnet/resource_extnet_static_route.go @@ -65,7 +65,7 @@ func resourceStaticRouteCreate(ctx context.Context, d *schema.ResourceData, m in if computesIDS, ok := d.GetOk("compute_ids"); ok { ids := computesIDS.([]interface{}) - res := make([]uint64, 0, len (ids)) + res := make([]uint64, 0, len(ids)) for _, id := range ids { computeId := uint64(id.(int)) @@ -82,6 +82,7 @@ func resourceStaticRouteCreate(ctx context.Context, d *schema.ResourceData, m in staticRouteData, err := getStaticRouteData(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } @@ -91,7 +92,6 @@ func resourceStaticRouteCreate(ctx context.Context, d *schema.ResourceData, m in } func resourceStaticRouteRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - warnings := dc.Warnings{} staticRouteData, err := utilityDataStaticRouteCheckPresence(ctx, d, m) if err != nil { @@ -101,7 +101,7 @@ func resourceStaticRouteRead(ctx context.Context, d *schema.ResourceData, m inte flattenStaticRouteData(d, staticRouteData) - return warnings.Get() + return nil } func resourceStaticRouteUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { @@ -112,58 +112,9 @@ func resourceStaticRouteUpdate(ctx context.Context, d *schema.ResourceData, m in return diag.FromErr(err) } - staticRouteData, err := utilityDataStaticRouteCheckPresence(ctx, d, m) - if err != nil { - d.SetId("") - return diag.FromErr(err) - } - if d.HasChange("compute_ids") { - deletedIds := make([]uint64, 0) - addedIds := make([]uint64, 0) - - oldComputeIds, newComputeIds := d.GetChange("compute_ids") - oldComputeIdsSlice := oldComputeIds.([]interface{}) - newComputeIdsSlice := newComputeIds.([]interface{}) - - for _, el := range oldComputeIdsSlice { - if !isContainsIds(newComputeIdsSlice, el) { - convertedEl := uint64(el.(int)) - deletedIds = append(deletedIds, convertedEl) - } - } - - for _, el := range newComputeIdsSlice { - if !isContainsIds(oldComputeIdsSlice, el) { - convertedEl := uint64(el.(int)) - addedIds = append(addedIds, convertedEl) - } - } - - if len(deletedIds) > 0 { - req := extnet.StaticRouteAccessRevokeRequest{ - ExtNetID: uint64(d.Get("extnet_id").(int)), - RouteId: staticRouteData.ID, - ComputeIds: deletedIds, - } - - _, err := c.CloudBroker().ExtNet().StaticRouteAccessRevoke(ctx, req) - if err != nil { - warnings.Add(err) - } - } - - if len(addedIds) > 0 { - req := extnet.StaticRouteAccessGrantRequest{ - ExtNetID: uint64(d.Get("extnet_id").(int)), - RouteId: staticRouteData.ID, - ComputeIds: addedIds, - } - - _, err := c.CloudBroker().ExtNet().StaticRouteAccessGrant(ctx, req) - if err != nil { - warnings.Add(err) - } + if err := utilityStaticRouteComputeIDsUpdate(ctx, d, m); err != nil { + warnings.Add(err) } } @@ -192,47 +143,6 @@ func resourceStaticRouteDelete(ctx context.Context, d *schema.ResourceData, m in return nil } -func resourceStaticRouteSchemaMake() map[string]*schema.Schema { - rets := dataSourceStaticRouteSchemaMake() - rets["route_id"] = &schema.Schema{ - Type: schema.TypeInt, - Computed: true, - Optional: true, - } - rets["compute_ids"] = &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - } - rets["destination"] = &schema.Schema{ - Type: schema.TypeString, - Required: true, - } - rets["gateway"] = &schema.Schema{ - Type: schema.TypeString, - Required: true, - } - rets["netmask"] = &schema.Schema{ - Type: schema.TypeString, - Required: true, - } - - return rets -} - -func isContainsIds(els []interface{}, el interface{}) bool { - convEl := el.(int) - for _, elOld := range els { - if convEl == elOld.(int) { - return true - } - } - return false -} - func ResourceStaticRoute() *schema.Resource { return &schema.Resource{ SchemaVersion: 1, diff --git a/internal/service/cloudbroker/extnet/utility_extnet_resource.go b/internal/service/cloudbroker/extnet/utility_extnet_resource.go index 80e15c5..c20930b 100644 --- a/internal/service/cloudbroker/extnet/utility_extnet_resource.go +++ b/internal/service/cloudbroker/extnet/utility_extnet_resource.go @@ -253,32 +253,14 @@ func handleExcludedIPsRangeUpdate(ctx context.Context, d *schema.ResourceData, c } func handleSharedWithUpdate(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg) error { - deletedIds := make([]uint64, 0) - addedIds := make([]uint64, 0) + oldSet, newSet := d.GetChange("shared_with") - oldAccountIds, newAccountIds := d.GetChange("shared_with") - oldAccountIdsSlice := oldAccountIds.([]interface{}) - newAccountIdsSlice := newAccountIds.([]interface{}) - - for _, el := range oldAccountIdsSlice { - if !isContainsIds(newAccountIdsSlice, el) { - convertedEl := uint64(el.(int)) - deletedIds = append(deletedIds, convertedEl) - } - } - - for _, el := range newAccountIdsSlice { - if !isContainsIds(oldAccountIdsSlice, el) { - convertedEl := uint64(el.(int)) - addedIds = append(addedIds, convertedEl) - } - } - - if len(deletedIds) > 0 { - for _, accountId := range deletedIds { + deletedAccountIds := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List() + if len(deletedAccountIds) > 0 { + for _, accountIdInterface := range deletedAccountIds { req := extnet.AccessRemoveRequest{ NetID: uint64(d.Get("extnet_id").(int)), - AccountID: accountId, + AccountID: uint64(accountIdInterface.(int)), } _, err := c.CloudBroker().ExtNet().AccessRemove(ctx, req) @@ -288,11 +270,12 @@ func handleSharedWithUpdate(ctx context.Context, d *schema.ResourceData, c *cont } } - if len(addedIds) > 0 { - for _, accountId := range addedIds { + addedAccountIds := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List() + if len(addedAccountIds) > 0 { + for _, accountIdInterface := range addedAccountIds { req := extnet.AccessAddRequest{ NetID: uint64(d.Get("extnet_id").(int)), - AccountID: accountId, + AccountID: uint64(accountIdInterface.(int)), } _, err := c.CloudBroker().ExtNet().AccessAdd(ctx, req) @@ -300,7 +283,6 @@ func handleSharedWithUpdate(ctx context.Context, d *schema.ResourceData, c *cont return err } } - } return nil @@ -309,14 +291,14 @@ func handleSharedWithUpdate(ctx context.Context, d *schema.ResourceData, c *cont func handleVirtualUpdate(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, recNet *extnet.RecordExtNet) error { virtualOld, virtualNew := d.GetChange("virtual") - if virtualOld == false && virtualNew == true { + if !virtualOld.(bool) && virtualNew.(bool) { req := extnet.DeviceRemoveRequest{NetID: recNet.ID} _, err := c.CloudBroker().ExtNet().DeviceRemove(ctx, req) if err != nil { return err } - } else if virtualOld == true && virtualNew == false { + } else if virtualOld.(bool) && !virtualNew.(bool) { req := extnet.DeviceDeployRequest{NetID: recNet.ID} _, err := c.CloudBroker().ExtNet().DeviceDeploy(ctx, req) if err != nil { @@ -329,7 +311,7 @@ func handleVirtualUpdate(ctx context.Context, d *schema.ResourceData, c *control func handleRestartUpdate(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, recNet *extnet.RecordExtNet) error { restartOld, restartNew := d.GetChange("restart") - if restartOld == false && restartNew == true { + if !restartOld.(bool) && restartNew.(bool) { req := extnet.DeviceRestartRequest{NetID: recNet.ID} _, err := c.CloudBroker().ExtNet().DeviceRestart(ctx, req) if err != nil { diff --git a/internal/service/cloudbroker/extnet/utility_extnet_static_route.go b/internal/service/cloudbroker/extnet/utility_extnet_static_route.go index 47a0308..8f9f68e 100644 --- a/internal/service/cloudbroker/extnet/utility_extnet_static_route.go +++ b/internal/service/cloudbroker/extnet/utility_extnet_static_route.go @@ -41,7 +41,6 @@ import ( log "github.com/sirupsen/logrus" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/extnet" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -106,3 +105,54 @@ func getStaticRouteData(ctx context.Context, d *schema.ResourceData, m interface return nil, fmt.Errorf("static route not found") } + +func utilityStaticRouteComputeIDsUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + staticRouteData, err := utilityDataStaticRouteCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return err + } + + oldSet, newSet := d.GetChange("compute_ids") + + deletedComputeIDs := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List() + deletedIds := make([]uint64, 0, len(deletedComputeIDs)) + if len(deletedComputeIDs) > 0 { + for _, computeIdInterface := range deletedComputeIDs { + deletedIds = append(deletedIds, uint64(computeIdInterface.(int))) + } + + req := extnet.StaticRouteAccessRevokeRequest{ + ExtNetID: uint64(d.Get("extnet_id").(int)), + RouteId: staticRouteData.ID, + ComputeIds: deletedIds, + } + + _, err := c.CloudBroker().ExtNet().StaticRouteAccessRevoke(ctx, req) + if err != nil { + return err + } + } + + addedComputeIDs := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List() + addedIds := make([]uint64, 0, len(addedComputeIDs)) + if len(addedComputeIDs) > 0 { + for _, computeIdInterface := range addedComputeIDs { + addedIds = append(addedIds, uint64(computeIdInterface.(int))) + } + req := extnet.StaticRouteAccessGrantRequest{ + ExtNetID: uint64(d.Get("extnet_id").(int)), + RouteId: staticRouteData.ID, + ComputeIds: addedIds, + } + + _, err := c.CloudBroker().ExtNet().StaticRouteAccessGrant(ctx, req) + if err != nil { + return err + } + } + + return nil +} diff --git a/internal/service/cloudbroker/flipgroup/data_source_flipgroup.go b/internal/service/cloudbroker/flipgroup/data_source_flipgroup.go index 51db9d8..63d73ab 100644 --- a/internal/service/cloudbroker/flipgroup/data_source_flipgroup.go +++ b/internal/service/cloudbroker/flipgroup/data_source_flipgroup.go @@ -56,153 +56,6 @@ func dataSourceFlipgroupRead(ctx context.Context, d *schema.ResourceData, m inte return nil } -func dataSourceFlipgroupSchemaMake() map[string]*schema.Schema { - rets := map[string]*schema.Schema{ - "flipgroup_id": { - Type: schema.TypeInt, - Required: true, - Description: "flipgroup_id", - }, - "account_id": { - Type: schema.TypeInt, - Computed: true, - Description: "account_id", - }, - "account_name": { - Type: schema.TypeString, - Computed: true, - Description: "account_name", - }, - "client_ids": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - Description: "client_ids", - }, - "client_names": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "client_names", - }, - "client_type": { - Type: schema.TypeString, - Computed: true, - Description: "client_type", - }, - "conn_id": { - Type: schema.TypeInt, - Computed: true, - Description: "conn_id", - }, - "conn_type": { - Type: schema.TypeString, - Computed: true, - Description: "conn_type", - }, - "created_by": { - Type: schema.TypeString, - Computed: true, - Description: "created_by", - }, - "created_time": { - Type: schema.TypeInt, - Computed: true, - Description: "created_time", - }, - "default_gw": { - Type: schema.TypeString, - Computed: true, - Description: "default_gw", - }, - "deleted_by": { - Type: schema.TypeString, - Computed: true, - Description: "deleted_by", - }, - "deleted_time": { - Type: schema.TypeInt, - Computed: true, - Description: "deleted_time", - }, - "description": { - Type: schema.TypeString, - Computed: true, - Description: "description", - }, - "gid": { - Type: schema.TypeInt, - Computed: true, - Description: "gid", - }, - "guid": { - Type: schema.TypeInt, - Computed: true, - Description: "guid", - }, - "ip": { - Type: schema.TypeString, - Computed: true, - Description: "ip", - }, - "milestones": { - Type: schema.TypeInt, - Computed: true, - Description: "milestones", - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: "name", - }, - "net_id": { - Type: schema.TypeInt, - Computed: true, - Description: "net_id", - }, - "net_type": { - Type: schema.TypeString, - Computed: true, - Description: "net_type", - }, - "network": { - Type: schema.TypeString, - Computed: true, - Description: "network", - }, - "rg_id": { - Type: schema.TypeInt, - Computed: true, - Description: "rg_id", - }, - "rg_name": { - Type: schema.TypeString, - Computed: true, - Description: "rg_name", - }, - "status": { - Type: schema.TypeString, - Computed: true, - Description: "status", - }, - "updated_by": { - Type: schema.TypeString, - Computed: true, - Description: "updated_by", - }, - "updated_time": { - Type: schema.TypeInt, - Computed: true, - Description: "updated_time", - }, - } - return rets -} - func DataSourceFlipgroup() *schema.Resource { return &schema.Resource{ SchemaVersion: 1, diff --git a/internal/service/cloudbroker/flipgroup/data_sourse_flipgroup_list.go b/internal/service/cloudbroker/flipgroup/data_sourse_flipgroup_list.go index 88693bf..ef31283 100644 --- a/internal/service/cloudbroker/flipgroup/data_sourse_flipgroup_list.go +++ b/internal/service/cloudbroker/flipgroup/data_sourse_flipgroup_list.go @@ -58,175 +58,6 @@ func dataSourceFlipgroupListRead(ctx context.Context, d *schema.ResourceData, m return nil } -func dataSourceFlipgroupItemSchemaMake() map[string]*schema.Schema { - rets := map[string]*schema.Schema{ - "ckey": { - Type: schema.TypeString, - Computed: true, - Description: "ckey", - }, - "meta": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "account_id": { - Type: schema.TypeInt, - Computed: true, - Description: "account_id", - }, - "client_ids": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - Description: "client_ids", - }, - "client_type": { - Type: schema.TypeString, - Computed: true, - Description: "client_type", - }, - "conn_id": { - Type: schema.TypeInt, - Computed: true, - Description: "conn_id", - }, - "conn_type": { - Type: schema.TypeString, - Computed: true, - Description: "conn_type", - }, - "default_gw": { - Type: schema.TypeString, - Computed: true, - Description: "default_gw", - }, - "description": { - Type: schema.TypeString, - Computed: true, - Description: "description", - }, - "gid": { - Type: schema.TypeInt, - Computed: true, - Description: "gid", - }, - "guid": { - Type: schema.TypeInt, - Computed: true, - Description: "guid", - }, - "flipgroup_id": { - Type: schema.TypeInt, - Computed: true, - Description: "flipgroup_id", - }, - "ip": { - Type: schema.TypeString, - Computed: true, - Description: "ip", - }, - "milestones": { - Type: schema.TypeInt, - Computed: true, - Description: "milestones", - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: "name", - }, - "net_id": { - Type: schema.TypeInt, - Computed: true, - Description: "net_id", - }, - "net_type": { - Type: schema.TypeString, - Computed: true, - Description: "net_type", - }, - "net_mask": { - Type: schema.TypeInt, - Computed: true, - Description: "net_mask", - }, - "status": { - Type: schema.TypeString, - Computed: true, - Description: "status", - }, - } - return rets -} - -func dataSourceFlipgroupsListSchemaMake() map[string]*schema.Schema { - res := map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - Description: "name", - }, - "vins_id": { - Type: schema.TypeInt, - Optional: true, - Description: "vins_id", - }, - "vins_name": { - Type: schema.TypeString, - Optional: true, - Description: "vins_name", - }, - "extnet_id": { - Type: schema.TypeInt, - Optional: true, - Description: "extnet_id", - }, - "by_ip": { - Type: schema.TypeString, - Optional: true, - Description: "by_ip", - }, - "rg_id": { - Type: schema.TypeInt, - Optional: true, - Description: "rg_id", - }, - "by_id": { - Type: schema.TypeInt, - Optional: true, - Description: "by_id", - }, - "page": { - Type: schema.TypeInt, - Optional: true, - Description: "Page number", - }, - "size": { - Type: schema.TypeInt, - Optional: true, - Description: "Page size", - }, - "items": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: dataSourceFlipgroupItemSchemaMake(), - }, - }, - "entry_count": { - Type: schema.TypeInt, - Computed: true, - Description: "entry_count", - }, - } - return res -} - func DataSourceFlipgroupList() *schema.Resource { return &schema.Resource{ SchemaVersion: 1, diff --git a/internal/service/cloudbroker/flipgroup/resource_check_input_values.go b/internal/service/cloudbroker/flipgroup/resource_check_input_values.go index a17bb5c..dff18a0 100644 --- a/internal/service/cloudbroker/flipgroup/resource_check_input_values.go +++ b/internal/service/cloudbroker/flipgroup/resource_check_input_values.go @@ -36,35 +36,35 @@ package flipgroup import ( "context" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/ic" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins" ) -func existAccountID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { - c := m.(*controller.ControllerCfg) +func checkParamsExistence(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg) diag.Diagnostics { + var errs []error + accountId := uint64(d.Get("account_id").(int)) - req := account.ListRequest{} + netType := d.Get("net_type").(string) + netId := uint64(d.Get("net_id").(int)) - accountList, err := c.CloudBroker().Account().List(ctx, req) - if err != nil { - return false, err + if err := ic.ExistAccount(ctx, accountId, c); err != nil { + errs = append(errs, err) } - return len(accountList.FilterByID(accountId).Data) != 0, nil -} - -func existNetID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { - c := m.(*controller.ControllerCfg) - netID := uint64(d.Get("net_id").(int)) - req := vins.ListRequest {} - - vinsList, err := c.CloudBroker().VINS().List(ctx, req) - if err != nil { - return false, err + switch netType { + case "VINS": + if err := ic.ExistVins(ctx, netId, c); err != nil { + errs = append(errs, err) + } + case "EXTNET": + if err := ic.ExistExtNet(ctx, netId, c); err != nil { + errs = append(errs, err) + } } - return len(vinsList.FilterByID(netID).Data) != 0, nil + return dc.ErrorsToDiagnostics(errs) } diff --git a/internal/service/cloudbroker/flipgroup/resource_flipgroup.go b/internal/service/cloudbroker/flipgroup/resource_flipgroup.go index 203912c..efbaf10 100644 --- a/internal/service/cloudbroker/flipgroup/resource_flipgroup.go +++ b/internal/service/cloudbroker/flipgroup/resource_flipgroup.go @@ -37,14 +37,13 @@ package flipgroup import ( "context" "fmt" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/flipgroup" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/status" log "github.com/sirupsen/logrus" ) @@ -53,43 +52,34 @@ func resourceFlipgroupCreate(ctx context.Context, d *schema.ResourceData, m inte log.Debugf("resourceFlipgroupCreate called with name: %s, accountID %v", d.Get("name").(string), d.Get("account_id").(int)) c := m.(*controller.ControllerCfg) - req := flipgroup.CreateRequest{ - Name: d.Get("name").(string), - NetType: d.Get("net_type").(string), - ClientType: d.Get("client_type").(string), - } - - haveAccount, err := existAccountID(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - if !haveAccount { - return diag.Errorf("resourceFlipgroupCreate: can't create Flipgroup because AccountID %d is not allowed or does not exist", d.Get("account_id").(int)) - } - req.AccountID = uint64(d.Get("account_id").(int)) - haveVINS, err := existNetID(ctx, d, m) - if err != nil { - return diag.FromErr(err) + if diags := checkParamsExistence(ctx, d, c); diags != nil { + return diags } - if !haveVINS { - return diag.Errorf("resourceFlipgroupCreate: can't create Flipgroup because VinsID %d is not allowed or does not exist", d.Get("net_id").(int)) + + req := flipgroup.CreateRequest{ + Name: d.Get("name").(string), + NetType: d.Get("net_type").(string), + ClientType: d.Get("client_type").(string), + AccountID: uint64(d.Get("account_id").(int)), + NetID: uint64(d.Get("net_id").(int)), } - req.NetID = uint64(d.Get("net_id").(int)) - + if IP, ok := d.GetOk("ip"); ok { req.IP = IP.(string) } if description, ok := d.GetOk("desc"); ok { req.Description = description.(string) } - + resp, err := c.CloudBroker().FLIPGroup().Create(ctx, req) if err != nil { + d.SetId("") return diag.FromErr(err) } d.SetId(fmt.Sprint(resp.ID)) + d.Set("flipgroup_id", resp.ID) var warnings dc.Warnings @@ -102,14 +92,26 @@ func resourceFlipgroupCreate(ctx context.Context, d *schema.ResourceData, m inte } func resourceFlipgroupRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceFlipgroupRead: called for flipgroup_id %s, name %s", + d.Id(), d.Get("name").(string)) + fg, err := utilityFlipgroupCheckPresence(ctx, d, m) if err != nil { d.SetId("") return diag.FromErr(err) } + switch fg.Status { + case status.Destroyed: + d.SetId("") + return diag.Errorf("The flipgroup status is destroyed and cannot be read.") + } + flattenFlipgroup(d, fg) + log.Debugf("resourceFlipgroupRead: after flattenFlipgroup: flipgroup_id %s, name %s", + d.Id(), d.Get("name").(string)) + return nil } @@ -117,19 +119,8 @@ func resourceFlipgroupUpdate(ctx context.Context, d *schema.ResourceData, m inte log.Debugf("resourceFlipgroupUpdate called with id: %v", d.Get("flipgroup_id").(int)) c := m.(*controller.ControllerCfg) - haveAccount, err := existAccountID(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - if !haveAccount { - return diag.Errorf("resourceFlipgroupUpdate: can't update Flipgroup because AccountID %d is not allowed or does not exist", d.Get("account_id").(int)) - } - haveVINS, err := existNetID(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - if !haveVINS { - return diag.Errorf("resourceFlipgroupUpdate: can't update Flipgroup because VinsID %d is not allowed or does not exist", d.Get("net_id").(int)) + if diags := checkParamsExistence(ctx, d, c); diags != nil { + return diags } fg, err := utilityFlipgroupCheckPresence(ctx, d, m) @@ -138,6 +129,12 @@ func resourceFlipgroupUpdate(ctx context.Context, d *schema.ResourceData, m inte return diag.FromErr(err) } + switch fg.Status { + case status.Destroyed: + d.SetId("") + return diag.Errorf("The flipgroup status is destroyed and cannot be updated.") + } + var warnings dc.Warnings basicUpdate := false req := flipgroup.EditRequest{FLIPGroupID: fg.ID} @@ -172,6 +169,7 @@ func resourceFlipgroupDelete(ctx context.Context, d *schema.ResourceData, m inte fg, err := utilityFlipgroupCheckPresence(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } @@ -179,163 +177,16 @@ func resourceFlipgroupDelete(ctx context.Context, d *schema.ResourceData, m inte FLIPGroupID: fg.ID, } + // When FLIPGroup().Delete() is executed, flipgroup automatically is removed from the compute it has been attached to, if any. + // No need to specifically call for FlipGroup().ComputeRemove(). _, err = c.CloudBroker().FLIPGroup().Delete(ctx, req) if err != nil { return diag.FromErr(err) } - return nil -} + d.SetId("") -func resourceFlipgroupSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "account_id": { - Type: schema.TypeInt, - Required: true, - Description: "Account ID", - }, - "name": { - Type: schema.TypeString, - Required: true, - Description: "Flipgroup name", - }, - "net_id": { - Type: schema.TypeInt, - Required: true, - Description: "EXTNET or ViNS ID", - }, - "net_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{"EXTNET", "VINS"}, true), - Description: "Network type, EXTNET or VINS", - }, - "client_type": { - Type: schema.TypeString, - Required: true, - Description: "Type of client, 'compute' ('vins' will be later)", - ValidateFunc: validation.StringInSlice([]string{"compute"}, true), - }, - "ip": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "IP address to associate with this group. If empty, the platform will autoselect IP address", - }, - "desc": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "Text description of this Flipgroup instance", - }, - "client_ids": { - Type: schema.TypeList, - Optional: true, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - Description: "List of clients attached to this Flipgroup instance", - }, - "client_names": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "client_names", - }, - "account_name": { - Type: schema.TypeString, - Computed: true, - Description: "account_name", - }, - "flipgroup_id": { - Type: schema.TypeInt, - Computed: true, - }, - "conn_id": { - Type: schema.TypeInt, - Computed: true, - }, - "conn_type": { - Type: schema.TypeString, - Computed: true, - }, - "created_by": { - Type: schema.TypeString, - Computed: true, - Description: "created_by", - }, - "created_time": { - Type: schema.TypeInt, - Computed: true, - Description: "created_time", - }, - "default_gw": { - Type: schema.TypeString, - Computed: true, - }, - "deleted_by": { - Type: schema.TypeString, - Computed: true, - Description: "deleted_by", - }, - "deleted_time": { - Type: schema.TypeInt, - Computed: true, - Description: "deleted_time", - }, - "gid": { - Type: schema.TypeInt, - Computed: true, - }, - "guid": { - Type: schema.TypeInt, - Computed: true, - }, - "milestones": { - Type: schema.TypeInt, - Computed: true, - }, - "network": { - Type: schema.TypeString, - Computed: true, - Description: "network", - }, - "rg_id": { - Type: schema.TypeInt, - Computed: true, - Description: "rg_id", - }, - "rg_name": { - Type: schema.TypeString, - Computed: true, - Description: "rg_name", - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "updated_by": { - Type: schema.TypeString, - Computed: true, - Description: "updated_by", - }, - "updated_time": { - Type: schema.TypeInt, - Computed: true, - Description: "updated_time", - }, - "net_mask": { - Type: schema.TypeInt, - Computed: true, - }, - "ckey": { - Type: schema.TypeString, - Computed: true, - }, - } + return nil } func ResourceFlipgroup() *schema.Resource { diff --git a/internal/service/cloudbroker/flipgroup/utility_flipgroup.go b/internal/service/cloudbroker/flipgroup/utility_flipgroup.go index 0ffa4fc..da1f55a 100644 --- a/internal/service/cloudbroker/flipgroup/utility_flipgroup.go +++ b/internal/service/cloudbroker/flipgroup/utility_flipgroup.go @@ -46,6 +46,7 @@ import ( ) func utilityFlipgroupCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*flipgroup.RecordFLIPGroup, error) { + log.Debugf("utilityFlipgroupCheckPresence") c := m.(*controller.ControllerCfg) req := flipgroup.GetRequest{} @@ -56,7 +57,6 @@ func utilityFlipgroupCheckPresence(ctx context.Context, d *schema.ResourceData, req.FLIPGroupID = uint64(d.Get("flipgroup_id").(int)) } - log.Debugf("utilityDiskCheckPresence: load disk") flipgroup, err := c.CloudBroker().FLIPGroup().Get(ctx, req) if err != nil { return nil, err diff --git a/internal/service/cloudbroker/grid/data_source_grid.go b/internal/service/cloudbroker/grid/data_source_grid.go index f66adbb..2bb9e9d 100644 --- a/internal/service/cloudbroker/grid/data_source_grid.go +++ b/internal/service/cloudbroker/grid/data_source_grid.go @@ -1,100 +1,68 @@ -/* -Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package grid - -import ( - "context" - "strconv" - - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" -) - -func dataSourceGridRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - grid, err := utilityGridCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - d.SetId(strconv.FormatUint(grid.ID, 10)) - flattenGrid(d, grid) - - return nil -} - -func dataSourceGetGridSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "grid_id": { - Type: schema.TypeInt, - Required: true, - }, - "flag": { - Type: schema.TypeString, - Computed: true, - }, - "gid": { - Type: schema.TypeInt, - Computed: true, - }, - "guid": { - Type: schema.TypeInt, - Computed: true, - }, - "id": { - Type: schema.TypeInt, - Computed: true, - }, - "location_code": { - Type: schema.TypeString, - Computed: true, - }, - "name": { - Type: schema.TypeString, - Computed: true, - }, - } -} - -func DataSourceGrid() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - ReadContext: dataSourceGridRead, - - Timeouts: &schema.ResourceTimeout{ - Read: &constants.Timeout30s, - Default: &constants.Timeout60s, - }, - - Schema: dataSourceGetGridSchemaMake(), - } -} +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package grid + +import ( + "context" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceGridRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + grid, err := utilityGridCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + d.SetId(strconv.FormatUint(grid.ID, 10)) + flattenGrid(d, grid) + + return nil +} + +func DataSourceGrid() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceGridRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceGetGridSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/grid/data_source_grid_get_post_diagnosis.go b/internal/service/cloudbroker/grid/data_source_grid_get_post_diagnosis.go index 022d615..8327c4c 100644 --- a/internal/service/cloudbroker/grid/data_source_grid_get_post_diagnosis.go +++ b/internal/service/cloudbroker/grid/data_source_grid_get_post_diagnosis.go @@ -34,6 +34,7 @@ package grid import ( "context" + "strconv" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -43,26 +44,14 @@ import ( func dataSourceGridGetDiagnosisRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { diagnosis, err := utilityGridGetDiagnosisCheckPresence(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } - d.SetId(d.Id()) + d.SetId(strconv.Itoa(d.Get("gid").(int))) d.Set("diagnosis", diagnosis) return nil } -func dataSourceGridGetDiagnosisSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "gid": { - Type: schema.TypeInt, - Required: true, - }, - "diagnosis": { - Type: schema.TypeString, - Computed: true, - }, - } -} - func DataSourceGridGetDiagnosis() *schema.Resource { return &schema.Resource{ SchemaVersion: 1, @@ -81,26 +70,14 @@ func DataSourceGridGetDiagnosis() *schema.Resource { func dataSourceGridPostDiagnosisRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { diagnosis, err := utilityGridPostDiagnosisCheckPresence(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } - d.SetId(d.Id()) + d.SetId(strconv.Itoa(d.Get("gid").(int))) d.Set("diagnosis", diagnosis) return nil } -func dataSourceGridPostDiagnosisSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "gid": { - Type: schema.TypeInt, - Required: true, - }, - "diagnosis": { - Type: schema.TypeString, - Computed: true, - }, - } -} - func DataSourceGridPostDiagnosis() *schema.Resource { return &schema.Resource{ SchemaVersion: 1, diff --git a/internal/service/cloudbroker/grid/data_source_grid_get_post_status.go b/internal/service/cloudbroker/grid/data_source_grid_get_post_status.go index 4596a8c..ef4334e 100644 --- a/internal/service/cloudbroker/grid/data_source_grid_get_post_status.go +++ b/internal/service/cloudbroker/grid/data_source_grid_get_post_status.go @@ -44,6 +44,7 @@ import ( func dataSourceGridGetStatusRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { grid, err := utilityGridGetStatusCheckPresence(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } id := uuid.New() @@ -52,15 +53,6 @@ func dataSourceGridGetStatusRead(ctx context.Context, d *schema.ResourceData, m return nil } -func dataSourceGridGetStatusSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "status": { - Type: schema.TypeBool, - Computed: true, - }, - } -} - func DataSourceGridGetStatus() *schema.Resource { return &schema.Resource{ SchemaVersion: 1, @@ -79,6 +71,7 @@ func DataSourceGridGetStatus() *schema.Resource { func dataSourceGridPostStatusRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { grid, err := utilityGridPostStatusCheckPresence(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } id := uuid.New() @@ -87,15 +80,6 @@ func dataSourceGridPostStatusRead(ctx context.Context, d *schema.ResourceData, m return nil } -func dataSourceGridPostStatusSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "status": { - Type: schema.TypeBool, - Computed: true, - }, - } -} - func DataSourceGridPostStatus() *schema.Resource { return &schema.Resource{ SchemaVersion: 1, diff --git a/internal/service/cloudbroker/grid/data_source_grid_list.go b/internal/service/cloudbroker/grid/data_source_grid_list.go index 2a9854b..03d23d6 100644 --- a/internal/service/cloudbroker/grid/data_source_grid_list.go +++ b/internal/service/cloudbroker/grid/data_source_grid_list.go @@ -1,265 +1,69 @@ -/* -Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package grid - -import ( - "context" - - "github.com/google/uuid" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" -) - -func dataSourceGridListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - gridList, err := utilityGridListCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - id := uuid.New() - d.SetId(id.String()) - d.Set("items", flattenGridList(gridList)) - d.Set("entry_count", gridList.EntryCount) - return nil -} - -func dataSourceGridListSchemaMake() map[string]*schema.Schema { - rets := map[string]*schema.Schema{ - "by_id": { - Type: schema.TypeInt, - Optional: true, - Description: "by id", - }, - "name": { - Type: schema.TypeString, - Optional: true, - Description: "name", - }, - "page": { - Type: schema.TypeInt, - Optional: true, - Description: "page number", - }, - "size": { - Type: schema.TypeInt, - Optional: true, - Description: "page size", - }, - "items": { - Type: schema.TypeList, - Computed: true, - Description: "grid list", - Elem: &schema.Resource{ - Schema: dataSourceGridSchemaMake(), - }, - }, - "entry_count": { - Type: schema.TypeInt, - Computed: true, - Description: "entry count", - }, - } - - return rets -} - -func dataSourceGridSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "resources": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "current": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cpu": { - Type: schema.TypeInt, - Computed: true, - }, - "disk_size": { - Type: schema.TypeFloat, - Computed: true, - }, - "disk_size_max": { - Type: schema.TypeInt, - Computed: true, - }, - "ext_ips": { - Type: schema.TypeInt, - Computed: true, - }, - "ext_traffic": { - Type: schema.TypeInt, - Computed: true, - }, - "gpu": { - Type: schema.TypeInt, - Computed: true, - }, - "ram": { - Type: schema.TypeInt, - Computed: true, - }, - "seps": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "sep_id": { - Type: schema.TypeString, - Computed: true, - }, - "data_name": { - Type: schema.TypeString, - Computed: true, - }, - "disk_size": { - Type: schema.TypeFloat, - Computed: true, - }, - "disk_size_max": { - Type: schema.TypeFloat, - Computed: true, - }, - }, - }, - }, - }, - }, - }, - "reserved": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cpu": { - Type: schema.TypeInt, - Computed: true, - }, - "disk_size": { - Type: schema.TypeFloat, - Computed: true, - }, - "disk_size_max": { - Type: schema.TypeInt, - Computed: true, - }, - "ext_ips": { - Type: schema.TypeInt, - Computed: true, - }, - "ext_traffic": { - Type: schema.TypeInt, - Computed: true, - }, - "gpu": { - Type: schema.TypeInt, - Computed: true, - }, - "ram": { - Type: schema.TypeInt, - Computed: true, - }, - "seps": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "sep_id": { - Type: schema.TypeString, - Computed: true, - }, - "data_name": { - Type: schema.TypeString, - Computed: true, - }, - "disk_size": { - Type: schema.TypeFloat, - Computed: true, - }, - "disk_size_max": { - Type: schema.TypeFloat, - Computed: true, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "flag": { - Type: schema.TypeString, - Computed: true, - }, - "gid": { - Type: schema.TypeInt, - Computed: true, - }, - "guid": { - Type: schema.TypeInt, - Computed: true, - }, - "id": { - Type: schema.TypeInt, - Computed: true, - }, - "location_code": { - Type: schema.TypeString, - Computed: true, - }, - "name": { - Type: schema.TypeString, - Computed: true, - }, - } -} - -func DataSourceGridList() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - ReadContext: dataSourceGridListRead, - - Timeouts: &schema.ResourceTimeout{ - Read: &constants.Timeout30s, - Default: &constants.Timeout60s, - }, - - Schema: dataSourceGridListSchemaMake(), - } -} +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package grid + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceGridListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + gridList, err := utilityGridListCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + id := uuid.New() + d.SetId(id.String()) + d.Set("items", flattenGridList(gridList)) + d.Set("entry_count", gridList.EntryCount) + return nil +} + +func DataSourceGridList() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceGridListRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceGridListSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/grid/data_source_grid_list_emails.go b/internal/service/cloudbroker/grid/data_source_grid_list_emails.go index 35424b7..f4106cb 100644 --- a/internal/service/cloudbroker/grid/data_source_grid_list_emails.go +++ b/internal/service/cloudbroker/grid/data_source_grid_list_emails.go @@ -44,6 +44,7 @@ import ( func dataSourceGridListEmailsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { gridListEmails, err := utilityGridListEmailsCheckPresence(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } id := uuid.New() @@ -53,36 +54,6 @@ func dataSourceGridListEmailsRead(ctx context.Context, d *schema.ResourceData, m return nil } -func dataSourceGridListEmailsSchemaMake() map[string]*schema.Schema { - rets := map[string]*schema.Schema{ - "page": { - Type: schema.TypeInt, - Optional: true, - Description: "page number", - }, - "size": { - Type: schema.TypeInt, - Optional: true, - Description: "page size", - }, - "items": { - Type: schema.TypeList, - Computed: true, - Description: "grid list emails", - Elem: &schema.Schema { - Type: schema.TypeString, - }, - }, - "entry_count": { - Type: schema.TypeInt, - Computed: true, - Description: "entry count", - }, - } - - return rets -} - func DataSourceGridListEmails() *schema.Resource { return &schema.Resource{ SchemaVersion: 1, diff --git a/internal/service/cloudbroker/grid/data_sourse_grid_get_consumption.go b/internal/service/cloudbroker/grid/data_sourse_grid_get_consumption.go index ab0600d..f041071 100644 --- a/internal/service/cloudbroker/grid/data_sourse_grid_get_consumption.go +++ b/internal/service/cloudbroker/grid/data_sourse_grid_get_consumption.go @@ -44,143 +44,15 @@ import ( func dataSourceGridGetConsumptionRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { gridGetConsumption, err := utilityGridGetConsumptionCheckPresence(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } - d.SetId(strconv.FormatUint(gridGetConsumption.GID, 10)) + d.SetId(strconv.Itoa(d.Get("grid_id").(int))) d.Set("consumed", flattenGridRecordResource(gridGetConsumption.Consumed)) d.Set("reserved", flattenGridRecordResource(gridGetConsumption.Reserved)) return nil } -func dataSourceGridGetConsumptionSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "grid_id": { - Type: schema.TypeInt, - Required: true, - }, - "consumed": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cpu": { - Type: schema.TypeInt, - Computed: true, - }, - "disk_size": { - Type: schema.TypeFloat, - Computed: true, - }, - "disk_size_max": { - Type: schema.TypeInt, - Computed: true, - }, - "ext_ips": { - Type: schema.TypeInt, - Computed: true, - }, - "ext_traffic": { - Type: schema.TypeInt, - Computed: true, - }, - "gpu": { - Type: schema.TypeInt, - Computed: true, - }, - "ram": { - Type: schema.TypeInt, - Computed: true, - }, - "seps": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "sep_id": { - Type: schema.TypeString, - Computed: true, - }, - "data_name": { - Type: schema.TypeString, - Computed: true, - }, - "disk_size": { - Type: schema.TypeFloat, - Computed: true, - }, - "disk_size_max": { - Type: schema.TypeFloat, - Computed: true, - }, - }, - }, - }, - }, - }, - }, - "reserved": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cpu": { - Type: schema.TypeInt, - Computed: true, - }, - "disk_size": { - Type: schema.TypeFloat, - Computed: true, - }, - "disk_size_max": { - Type: schema.TypeInt, - Computed: true, - }, - "ext_ips": { - Type: schema.TypeInt, - Computed: true, - }, - "ext_traffic": { - Type: schema.TypeInt, - Computed: true, - }, - "gpu": { - Type: schema.TypeInt, - Computed: true, - }, - "ram": { - Type: schema.TypeInt, - Computed: true, - }, - "seps": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "sep_id": { - Type: schema.TypeString, - Computed: true, - }, - "data_name": { - Type: schema.TypeString, - Computed: true, - }, - "disk_size": { - Type: schema.TypeFloat, - Computed: true, - }, - "disk_size_max": { - Type: schema.TypeFloat, - Computed: true, - }, - }, - }, - }, - }, - }, - }, - } -} - func DataSourceGridGetConsumption() *schema.Resource { return &schema.Resource{ SchemaVersion: 1, diff --git a/internal/service/cloudbroker/grid/data_sourse_grid_list_consumption.go b/internal/service/cloudbroker/grid/data_sourse_grid_list_consumption.go index 92fa364..52ce9da 100644 --- a/internal/service/cloudbroker/grid/data_sourse_grid_list_consumption.go +++ b/internal/service/cloudbroker/grid/data_sourse_grid_list_consumption.go @@ -44,6 +44,7 @@ import ( func dataSourceGridListConsumptionRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { gridListConsumption, err := utilityGridListConsumptionCheckPresence(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } id := uuid.New() @@ -53,154 +54,6 @@ func dataSourceGridListConsumptionRead(ctx context.Context, d *schema.ResourceDa return nil } -func dataSourceGridListConsumptionSchemaMake() map[string]*schema.Schema { - rets := map[string]*schema.Schema{ - "items": { - Type: schema.TypeList, - Computed: true, - Description: "grid list consumption", - Elem: &schema.Resource{ - Schema: dataSourceGridConsumptionSchemaMake(), - }, - }, - "entry_count": { - Type: schema.TypeInt, - Computed: true, - Description: "entry count", - }, - } - return rets -} - -func dataSourceGridConsumptionSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "consumed": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cpu": { - Type: schema.TypeInt, - Computed: true, - }, - "disk_size": { - Type: schema.TypeFloat, - Computed: true, - }, - "disk_size_max": { - Type: schema.TypeInt, - Computed: true, - }, - "ext_ips": { - Type: schema.TypeInt, - Computed: true, - }, - "ext_traffic": { - Type: schema.TypeInt, - Computed: true, - }, - "gpu": { - Type: schema.TypeInt, - Computed: true, - }, - "ram": { - Type: schema.TypeInt, - Computed: true, - }, - "seps": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "sep_id": { - Type: schema.TypeString, - Computed: true, - }, - "data_name": { - Type: schema.TypeString, - Computed: true, - }, - "disk_size": { - Type: schema.TypeFloat, - Computed: true, - }, - "disk_size_max": { - Type: schema.TypeFloat, - Computed: true, - }, - }, - }, - }, - }, - }, - }, - "reserved": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cpu": { - Type: schema.TypeInt, - Computed: true, - }, - "disk_size": { - Type: schema.TypeFloat, - Computed: true, - }, - "disk_size_max": { - Type: schema.TypeInt, - Computed: true, - }, - "ext_ips": { - Type: schema.TypeInt, - Computed: true, - }, - "ext_traffic": { - Type: schema.TypeInt, - Computed: true, - }, - "gpu": { - Type: schema.TypeInt, - Computed: true, - }, - "ram": { - Type: schema.TypeInt, - Computed: true, - }, - "seps": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "sep_id": { - Type: schema.TypeString, - Computed: true, - }, - "data_name": { - Type: schema.TypeString, - Computed: true, - }, - "disk_size": { - Type: schema.TypeFloat, - Computed: true, - }, - "disk_size_max": { - Type: schema.TypeFloat, - Computed: true, - }, - }, - }, - }, - }, - }, - }, - "id": { - Type: schema.TypeInt, - Computed: true, - }, - } -} - func DataSourceGridListConsumption() *schema.Resource { return &schema.Resource{ SchemaVersion: 1, diff --git a/internal/service/cloudbroker/grid/flattens.go b/internal/service/cloudbroker/grid/flattens.go index c092516..cac0817 100644 --- a/internal/service/cloudbroker/grid/flattens.go +++ b/internal/service/cloudbroker/grid/flattens.go @@ -3,9 +3,11 @@ package grid import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens" ) func flattenGrid(d *schema.ResourceData, grid *grid.RecordGrid) { + d.Set("auth_broker", flattens.FlattenMeta(grid.AuthBroker)) d.Set("name", grid.Name) d.Set("flag", grid.Flag) d.Set("gid", grid.GID) @@ -20,6 +22,7 @@ func flattenGridList(gl *grid.ListGrids) []map[string]interface{} { temp := map[string]interface{}{ "resources": flattenGridResources(item.Resources), "name": item.Name, + "auth_broker": flattens.FlattenMeta(item.AuthBroker), "flag": item.Flag, "gid": item.GID, "guid": item.GUID, @@ -37,7 +40,7 @@ func flattenGridListConsumption(gl *grid.ListResourceConsumption) []map[string]i temp := map[string]interface{}{ "consumed": flattenGridRecordResource(item.Consumed), "reserved": flattenGridRecordResource(item.Reserved), - "id": item.GID, + "id": item.GID, } res = append(res, temp) } diff --git a/internal/service/cloudbroker/grid/utility_grid.go b/internal/service/cloudbroker/grid/utility_grid.go index 1a8c1d4..e1647d5 100644 --- a/internal/service/cloudbroker/grid/utility_grid.go +++ b/internal/service/cloudbroker/grid/utility_grid.go @@ -1,63 +1,63 @@ -/* -Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package grid - -import ( - "context" - "strconv" - - log "github.com/sirupsen/logrus" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func utilityGridCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*grid.RecordGrid, error) { - c := m.(*controller.ControllerCfg) - req := grid.GetRequest{} - - if d.Id() != "" { - id, _ := strconv.ParseUint(d.Id(), 10, 64) - req.GID = id - } else { - req.GID = uint64(d.Get("grid_id").(int)) - } - - log.Debugf("utilityGridCheckPresence: load grid") - grid, err := c.CloudBroker().Grid().Get(ctx, req) - if err != nil { - return nil, err - } - - return grid, nil -} +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package grid + +import ( + "context" + "strconv" + + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func utilityGridCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*grid.RecordGrid, error) { + c := m.(*controller.ControllerCfg) + req := grid.GetRequest{} + + if d.Id() != "" { + id, _ := strconv.ParseUint(d.Id(), 10, 64) + req.GID = id + } else { + req.GID = uint64(d.Get("grid_id").(int)) + } + + log.Debugf("utilityGridCheckPresence: load grid") + gridRec, err := c.CloudBroker().Grid().Get(ctx, req) + if err != nil { + return nil, err + } + + return gridRec, nil +} diff --git a/internal/service/cloudbroker/grid/utility_grid_get_post_diagnosis.go b/internal/service/cloudbroker/grid/utility_grid_get_post_diagnosis.go index 1cd1d62..4c8e172 100644 --- a/internal/service/cloudbroker/grid/utility_grid_get_post_diagnosis.go +++ b/internal/service/cloudbroker/grid/utility_grid_get_post_diagnosis.go @@ -22,7 +22,7 @@ limitations under the License. Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud Orchestration Technology) with Terraform by Hashicorp. -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort Please see README.md to learn where to place source code so that it builds seamlessly. @@ -54,7 +54,7 @@ func utilityGridGetDiagnosisCheckPresence(ctx context.Context, d *schema.Resourc req.GID = uint64(d.Get("gid").(int)) } - log.Debugf("utilityGridListConsumptionCheckPresence: load grid list consumption") + log.Debugf("utilityGridGetDiagnosisCheckPresence: load grid get diagnosis") gridGetDiagnosis, err := c.CloudBroker().Grid().GetDiagnosisGET(ctx, req) if err != nil { return "", err @@ -73,8 +73,8 @@ func utilityGridPostDiagnosisCheckPresence(ctx context.Context, d *schema.Resour } else { req.GID = uint64(d.Get("gid").(int)) } - - log.Debugf("utilityGridListConsumptionCheckPresence: load grid list consumption") + + log.Debugf("utilityGridPostDiagnosisCheckPresence: load grid post diagnosis") gridPostDiagnosis, err := c.CloudBroker().Grid().GetDiagnosis(ctx, req) if err != nil { return "", err diff --git a/internal/service/cloudbroker/ic/input_checks.go b/internal/service/cloudbroker/ic/input_checks.go index 6fd78b8..dce6538 100644 --- a/internal/service/cloudbroker/ic/input_checks.go +++ b/internal/service/cloudbroker/ic/input_checks.go @@ -5,9 +5,14 @@ import ( "context" "fmt" + cb_account "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account" + cb_compute "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute" cb_extnet "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/extnet" cb_gid "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid" cb_image "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image" + cb_k8ci "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/k8ci" + cb_k8s "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/k8s" + cb_lb "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb" cb_rg "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" cb_stack "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/stack" cb_vins "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins" @@ -49,6 +54,24 @@ func ExistImage(ctx context.Context, imageId uint64, c *controller.ControllerCfg return nil } +func ExistVins(ctx context.Context, vinsId uint64, c *controller.ControllerCfg) error { + req := cb_vins.ListRequest{ + ByID: vinsId, + IncludeDeleted: false, + } + + vinsList, err := c.CloudBroker().VINS().List(ctx, req) + if err != nil { + return err + } + + if len(vinsList.Data) == 0 { + return fmt.Errorf("vins with ID %v not found", vinsId) + } + + return nil +} + func ExistVinses(ctx context.Context, vinsIds []uint64, c *controller.ControllerCfg) []error { var errs []error @@ -109,10 +132,69 @@ func ExistExtNets(ctx context.Context, extNetIds []uint64, c *controller.Control return errs } +func ExistExtNetInLb(ctx context.Context, extNetId uint64, c *controller.ControllerCfg) error { + if extNetId == 0 { + return nil + } + req := cb_extnet.ListRequest{ + ByID: extNetId, + } + + extNetList, err := c.CloudBroker().ExtNet().List(ctx, req) + if err != nil { + return err + } + + if len(extNetList.Data) == 0 { + return fmt.Errorf("EXTNET with ID %v not found", extNetId) + } + + return nil +} + +func ExistExtNetInRG(ctx context.Context, extNetId, accountId uint64, c *controller.ControllerCfg) error { + req := cb_extnet.ListRequest{ + AccountID: accountId, + ByID: extNetId, + } + + listExtNet, err := c.CloudBroker().ExtNet().List(ctx, req) + if err != nil { + return err + } + + if len(listExtNet.Data) == 0 { + return fmt.Errorf("EXTNET with ID %v not found for account with id %d", extNetId, accountId) + } + + return nil +} + +func ExistExtNetInVins(ctx context.Context, extNetId int, c *controller.ControllerCfg) error { + if extNetId == 0 || extNetId == -1 { + return nil + } + req := cb_extnet.ListRequest{ + ByID: uint64(extNetId), + } + + extNetList, err := c.CloudBroker().ExtNet().List(ctx, req) + if err != nil { + return err + } + + if len(extNetList.Data) == 0 { + return fmt.Errorf("EXTNET with ID %v not found", extNetId) + } + + return nil +} + func ExistExtNet(ctx context.Context, extNetId uint64, c *controller.ControllerCfg) error { req := cb_extnet.ListRequest{ - ByID: extNetId, + ByID: extNetId, + Status: "Enabled", } extNetList, err := c.CloudBroker().ExtNet().List(ctx, req) @@ -127,6 +209,27 @@ func ExistExtNet(ctx context.Context, extNetId uint64, c *controller.ControllerC return nil } +func ExistVinsInLb(ctx context.Context, vinsId uint64, c *controller.ControllerCfg) error { + if vinsId == 0 { + return nil + } + + req := cb_vins.ListRequest{ + ByID: vinsId, + } + + vinsList, err := c.CloudBroker().VINS().List(ctx, req) + if err != nil { + return err + } + + if len(vinsList.Data) == 0 { + return fmt.Errorf("VINS with ID %v not found", vinsId) + } + + return nil +} + func ExistGID(ctx context.Context, gid uint64, c *controller.ControllerCfg) error { req := cb_gid.ListRequest{} @@ -160,3 +263,144 @@ func ExistStack(ctx context.Context, stackId uint64, c *controller.ControllerCfg return nil } + +// ExistStackInPcidevice checks if compute exists with specified stackId and specified non-deleted rgId. +func ExistStackInPcidevice(ctx context.Context, stackId, rgId uint64, c *controller.ControllerCfg) error { + req := cb_rg.ListRequest{ + ByID: rgId, + IncludeDeleted: false, + } + + rgList, err := c.CloudBroker().RG().List(ctx, req) + if err != nil { + return err + } + + for _, v := range rgList.Data { + for _, idVM := range v.VMs { + req := cb_compute.GetRequest{ + ComputeID: idVM, + } + computeRec, err := c.CloudBroker().Compute().Get(ctx, req) + if err != nil { + return err + } + if computeRec.StackID == stackId { + return nil + } + } + } + + return fmt.Errorf("no compute found with stack_id %v and rg_id %v", stackId, rgId) +} + +func ExistLB(ctx context.Context, lbId uint64, c *controller.ControllerCfg) error { + + req := cb_lb.ListRequest{ + ByID: lbId, + } + + lbList, err := c.CloudBroker().LB().List(ctx, req) + if err != nil { + return err + } + + if len(lbList.Data) == 0 { + return fmt.Errorf("LB with ID %v not found", lbId) + } + + return nil + +} + +func ExistAccount(ctx context.Context, accountId uint64, c *controller.ControllerCfg) error { + req := cb_account.ListRequest{ + ByID: accountId, + } + + accountList, err := c.CloudBroker().Account().List(ctx, req) + if err != nil { + return err + } + + if len(accountList.Data) == 0 { + return fmt.Errorf("account with id %d not found", accountId) + } + + return nil +} + +func ExistK8CI(ctx context.Context, k8ciId uint64, c *controller.ControllerCfg) error { + req := cb_k8ci.ListRequest{ + ByID: k8ciId, + } + + k8ciList, err := c.CloudBroker().K8CI().List(ctx, req) + if err != nil { + return err + } + + if len(k8ciList.Data) == 0 { + return fmt.Errorf("k8ci with id %d not found", k8ciId) + } + + return nil +} + +func ExistExtNetInK8s(ctx context.Context, extNetId uint64, c *controller.ControllerCfg) error { + if extNetId == 0 { + return nil + } + req := cb_extnet.ListRequest{ + ByID: extNetId, + } + + extNetList, err := c.CloudBroker().ExtNet().List(ctx, req) + if err != nil { + return err + } + + if len(extNetList.Data) == 0 { + return fmt.Errorf("EXTNET with ID %v not found", extNetId) + } + + return nil +} + +func ExistVinsInK8s(ctx context.Context, vinsId uint64, c *controller.ControllerCfg) error { + if vinsId == 0 { + return nil + } + + req := cb_vins.ListRequest{ + ByID: vinsId, + } + + vinsList, err := c.CloudBroker().VINS().List(ctx, req) + if err != nil { + return err + } + + if len(vinsList.Data) == 0 { + return fmt.Errorf("VINS with ID %v not found", vinsId) + } + + return nil +} + +func ExistK8s(ctx context.Context, k8sId uint64, c *controller.ControllerCfg) error { + req := cb_k8s.ListRequest{ + ByID: k8sId, + } + + k8sList, err := c.CloudBroker().K8S().List(ctx, req) + if err != nil { + return err + } + + if len(k8sList.Data) == 0 { + return fmt.Errorf("k8s with id %d not found", k8sId) + } + + return nil +} diff --git a/internal/service/cloudbroker/image/data_source_image.go b/internal/service/cloudbroker/image/data_source_image.go index 98d58ea..c6b4aa7 100644 --- a/internal/service/cloudbroker/image/data_source_image.go +++ b/internal/service/cloudbroker/image/data_source_image.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, @@ -43,227 +43,14 @@ import ( func dataSourceImageRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { image, err := utilityImageCheckPresence(ctx, d, m) if err != nil { - + d.SetId("") return diag.FromErr(err) } d.SetId(strconv.FormatUint(image.ID, 10)) flattenImage(d, image) - return nil } -func dataSourceImageSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Computed: true, - Description: "Name of the rescue disk", - }, - "url": { - Type: schema.TypeString, - Computed: true, - Description: "URL where to download media from", - }, - "gid": { - Type: schema.TypeInt, - Computed: true, - Description: "grid (platform) ID where this template should be create in", - }, - "boot_type": { - Type: schema.TypeString, - Computed: true, - Description: "Boot type of image bios or uefi", - }, - "image_type": { - Type: schema.TypeString, - Computed: true, - Description: "Image type linux, windows or other", - }, - "shared_with": { - Type: schema.TypeList, - Optional: true, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - "history": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "guid": { - Type: schema.TypeString, - Computed: true, - }, - "id": { - Type: schema.TypeInt, - Computed: true, - }, - "timestamp": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - "drivers": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "List of types of compute suitable for image. Example: [ \"KVM_X86\" ]", - }, - "meta": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "meta", - }, - "hot_resize": { - Type: schema.TypeBool, - Computed: true, - Description: "Does this machine supports hot resize", - }, - "username": { - Type: schema.TypeString, - Computed: true, - Description: "Optional username for the image", - }, - "password": { - Type: schema.TypeString, - Computed: true, - Description: "Optional password for the image", - }, - "account_id": { - Type: schema.TypeInt, - Computed: true, - Description: "AccountId to make the image exclusive", - }, - "username_dl": { - Type: schema.TypeString, - Computed: true, - Description: "username for upload binary media", - }, - "password_dl": { - Type: schema.TypeString, - Computed: true, - Description: "password for upload binary media", - }, - "sep_id": { - Type: schema.TypeInt, - Computed: true, - Description: "storage endpoint provider ID", - }, - "pool_name": { - Type: schema.TypeString, - Computed: true, - Description: "pool for image create", - }, - "architecture": { - Type: schema.TypeString, - Computed: true, - Description: "binary architecture of this image, one of X86_64 of PPC64_LE", - }, - "image_id": { - Type: schema.TypeInt, - Required: true, - Description: "image id", - }, - "permanently": { - Type: schema.TypeBool, - Computed: true, - Description: "Whether to completely delete the image", - }, - "bootable": { - Type: schema.TypeBool, - Computed: true, - Description: "Does this image boot OS", - }, - "unc_path": { - Type: schema.TypeString, - Computed: true, - Description: "unc path", - }, - "link_to": { - Type: schema.TypeInt, - Computed: true, - Description: "", - }, - "status": { - Type: schema.TypeString, - Computed: true, - Description: "status", - }, - "tech_status": { - Type: schema.TypeString, - Computed: true, - Description: "tech atatus", - }, - "version": { - Type: schema.TypeString, - Computed: true, - Description: "version", - }, - "size": { - Type: schema.TypeInt, - Computed: true, - Description: "image size", - }, - "enabled": { - Type: schema.TypeBool, - Computed: true, - }, - "computeci_id": { - Type: schema.TypeInt, - Computed: true, - }, - "guid": { - Type: schema.TypeInt, - Computed: true, - }, - "milestones": { - Type: schema.TypeInt, - Computed: true, - }, - "provider_name": { - Type: schema.TypeString, - Computed: true, - }, - "purge_attempts": { - Type: schema.TypeInt, - Computed: true, - }, - "reference_id": { - Type: schema.TypeString, - Computed: true, - }, - "res_id": { - Type: schema.TypeString, - Computed: true, - }, - "res_name": { - Type: schema.TypeString, - Computed: true, - }, - "rescuecd": { - Type: schema.TypeBool, - Computed: true, - }, - "last_modified": { - Type: schema.TypeInt, - Computed: true, - }, - "desc": { - Type: schema.TypeString, - Computed: true, - }, - } -} - func DataSourceImage() *schema.Resource { return &schema.Resource{ SchemaVersion: 1, diff --git a/internal/service/cloudbroker/image/data_source_image_list.go b/internal/service/cloudbroker/image/data_source_image_list.go index 379526b..fda82b1 100644 --- a/internal/service/cloudbroker/image/data_source_image_list.go +++ b/internal/service/cloudbroker/image/data_source_image_list.go @@ -1,98 +1,69 @@ -/* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package image - -import ( - "context" - - "github.com/google/uuid" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" -) - -func dataSourceImageListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - imageList, err := utilityImageListCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - id := uuid.New() - d.SetId(id.String()) - d.Set("items", flattenImageList(imageList)) - - return nil -} - -func dataSourceImageListSchemaMake() map[string]*schema.Schema { - rets := map[string]*schema.Schema{ - "sep_id": { - Type: schema.TypeInt, - Optional: true, - Description: "filter images by storage endpoint provider ID", - }, - "page": { - Type: schema.TypeInt, - Optional: true, - Description: "page number", - }, - "size": { - Type: schema.TypeInt, - Optional: true, - Description: "page size", - }, - "items": { - Type: schema.TypeList, - Computed: true, - Description: "image list", - Elem: &schema.Resource{ - Schema: dataSourceImageSchemaMake(), - }, - }, - } - - return rets -} - -func DataSourceImageList() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - ReadContext: dataSourceImageListRead, - - Timeouts: &schema.ResourceTimeout{ - Read: &constants.Timeout30s, - Default: &constants.Timeout60s, - }, - - Schema: dataSourceImageListSchemaMake(), - } -} +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package image + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceImageListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + imageList, err := utilityImageListCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + id := uuid.New() + d.SetId(id.String()) + d.Set("items", flattenImageList(imageList)) + d.Set("entry_count", imageList.EntryCount) + return nil +} + +func DataSourceImageList() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceImageListRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceImageListSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/image/data_source_image_list_stacks.go b/internal/service/cloudbroker/image/data_source_image_list_stacks.go index 0e39e01..8e536ba 100644 --- a/internal/service/cloudbroker/image/data_source_image_list_stacks.go +++ b/internal/service/cloudbroker/image/data_source_image_list_stacks.go @@ -1,167 +1,69 @@ -/* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package image - -import ( - "context" - - "github.com/google/uuid" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" -) - -func dataSourceImageListStacksRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - imageListStacks, err := utilityImageListStacksCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - id := uuid.New() - d.SetId(id.String()) - d.Set("items", flattenImageListStacks(d, imageListStacks)) - - return nil -} - -func dataSourceImageListStackSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "api_url": { - Type: schema.TypeString, - Computed: true, - }, - "api_key": { - Type: schema.TypeString, - Computed: true, - }, - "app_id": { - Type: schema.TypeString, - Computed: true, - }, - "desc": { - Type: schema.TypeString, - Computed: true, - }, - "drivers": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "error": { - Type: schema.TypeInt, - Computed: true, - }, - "guid": { - Type: schema.TypeInt, - Computed: true, - }, - "id": { - Type: schema.TypeInt, - Computed: true, - }, - "images": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - "login": { - Type: schema.TypeString, - Computed: true, - }, - "name": { - Type: schema.TypeString, - Computed: true, - }, - "passwd": { - Type: schema.TypeString, - Computed: true, - }, - "reference_id": { - Type: schema.TypeString, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "type": { - Type: schema.TypeString, - Computed: true, - }, - } -} - -func dataSourceImageListStacksSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "image_id": { - Type: schema.TypeInt, - Required: true, - Description: "image id", - }, - "page": { - Type: schema.TypeInt, - Optional: true, - Description: "page number", - }, - "size": { - Type: schema.TypeInt, - Optional: true, - Description: "page size", - }, - "items": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: dataSourceImageListStackSchemaMake(), - }, - Description: "items of stacks list", - }, - } -} - -func DataSourceImageListStacks() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - ReadContext: dataSourceImageListStacksRead, - - Timeouts: &schema.ResourceTimeout{ - Read: &constants.Timeout30s, - Default: &constants.Timeout60s, - }, - - Schema: dataSourceImageListStacksSchemaMake(), - } -} +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package image + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceImageListStacksRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + imageListStacks, err := utilityImageListStacksCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + id := uuid.New() + d.SetId(id.String()) + d.Set("items", flattenImageListStacks(imageListStacks)) + d.Set("entry_count", imageListStacks.EntryCount) + return nil +} + +func DataSourceImageListStacks() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceImageListStacksRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceImageListStacksSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/image/flattens.go b/internal/service/cloudbroker/image/flattens.go index 1652240..8d7b705 100644 --- a/internal/service/cloudbroker/image/flattens.go +++ b/internal/service/cloudbroker/image/flattens.go @@ -4,154 +4,269 @@ import ( "strconv" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + log "github.com/sirupsen/logrus" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens" ) func flattenImage(d *schema.ResourceData, img *image.RecordImage) { - d.Set("name", img.Name) - d.Set("drivers", img.Drivers) - d.Set("url", img.URL) - d.Set("gid", img.GID) + log.Debugf("flattenImageID %d", img.ID) d.Set("image_id", img.ID) + d.Set("unc_path", img.UNCPath) + d.Set("ckey", img.CKey) + d.Set("meta", flattens.FlattenMeta(img.Meta)) + d.Set("account_id", img.AccountID) + d.Set("acl", flattenAcl(img.ACL)) + d.Set("architecture", img.Architecture) d.Set("boot_type", img.BootType) - d.Set("image_type", img.Type) d.Set("bootable", img.Bootable) - d.Set("sep_id", img.SEPID) - d.Set("unc_path", img.UNCPath) - d.Set("link_to", img.LinkTo) - d.Set("status", img.Status) - d.Set("tech_status", img.TechStatus) - d.Set("version", img.Version) - d.Set("size", img.Size) - d.Set("enabled", img.Enabled) d.Set("computeci_id", img.ComputeCIID) - d.Set("pool_name", img.Pool) - d.Set("username", img.Username) - // d.Set("username_dl", img.UsernameDL) - d.Set("password", img.Password) - // d.Set("password_dl", img.PasswordDL) - d.Set("account_id", img.AccountID) + d.Set("deleted_time", img.DeletedTime) + d.Set("desc", img.Description) + d.Set("drivers", img.Drivers) + d.Set("enabled", img.Enabled) + d.Set("gid", img.GID) d.Set("guid", img.GUID) + d.Set("history", flattenHistory(img.History)) + d.Set("hot_resize", img.HotResize) + d.Set("last_modified", img.LastModified) + d.Set("link_to", img.LinkTo) d.Set("milestones", img.Milestones) + d.Set("name", img.Name) + d.Set("password", img.Password) + d.Set("pool_name", img.Pool) + d.Set("present_to", img.PresentTo) d.Set("provider_name", img.ProviderName) d.Set("purge_attempts", img.PurgeAttempts) d.Set("reference_id", img.ReferenceID) d.Set("res_id", img.ResID) d.Set("res_name", img.ResName) d.Set("rescuecd", img.RescueCD) - d.Set("architecture", img.Architecture) - d.Set("hot_resize", img.HotResize) - d.Set("history", flattenHistory(img.History)) - d.Set("last_modified", img.LastModified) - d.Set("meta", flattens.FlattenMeta(img.Meta)) - d.Set("desc", img.Description) + d.Set("sep_id", img.SEPID) d.Set("shared_with", img.SharedWith) + d.Set("size", img.Size) + d.Set("status", img.Status) + d.Set("tech_status", img.TechStatus) + d.Set("image_type", img.Type) + d.Set("url", img.URL) + d.Set("username", img.Username) + d.Set("version", img.Version) } -func flattenMeta(m []interface{}) []string { - output := []string{} - for _, item := range m { - switch d := item.(type) { - case string: - output = append(output, d) - case int: - output = append(output, strconv.Itoa(d)) - case int64: - output = append(output, strconv.FormatInt(d, 10)) - case float64: - output = append(output, strconv.FormatInt(int64(d), 10)) - default: - output = append(output, "") +func flattenAcl(acl image.ListACL) []map[string]interface{} { + res := make([]map[string]interface{}, 0, len(acl)) + for _, val := range acl { + temp := map[string]interface{}{ + "explicit": val.Explicit, + "guid": val.GUID, + "right": val.Right, + "status": val.Status, + "type": val.Type, + "user_group_id": val.UserGroupID, } + res = append(res, temp) } - return output + return res } -func flattenHistory(history []image.History) []map[string]interface{} { - temp := make([]map[string]interface{}, 0) +func flattenHistory(history image.ListHistory) []map[string]interface{} { + temp := make([]map[string]interface{}, 0, len(history)) for _, item := range history { t := map[string]interface{}{ "id": item.ID, "guid": item.GUID, "timestamp": item.Timestamp, } - temp = append(temp, t) } return temp } func flattenImageList(il *image.ListImages) []map[string]interface{} { - res := make([]map[string]interface{}, 0) + log.Debug("flattenImageList") + res := make([]map[string]interface{}, 0, len(il.Data)) for _, item := range il.Data { temp := map[string]interface{}{ - "name": item.Name, - "url": item.URL, - "gid": item.GID, - "guid": item.GUID, - "drivers": item.Drivers, - "image_id": item.ID, - "boot_type": item.BootType, - "bootable": item.Bootable, - "image_type": item.Type, - "status": item.Status, - "tech_status": item.TechStatus, - "version": item.Version, - "username": item.Username, - // "username_dl": item.UsernameDL, - "password": item.Password, - // "password_dl": item.PasswordDL, - "purge_attempts": item.PurgeAttempts, - "architecture": item.Architecture, + "image_id": item.ID, + "unc_path": item.UNCPath, + "ckey": item.CKey, + "meta": flattens.FlattenMeta(item.Meta), "account_id": item.AccountID, + "acl": flattenAcl(item.ACL), + "architecture": item.Architecture, + "boot_type": item.BootType, + "bootable": item.Bootable, "computeci_id": item.ComputeCIID, + "deleted_time": item.DeletedTime, + "desc": item.Description, + "drivers": item.Drivers, "enabled": item.Enabled, + "gid": item.GID, + "guid": item.GUID, + "history": flattenHistory(item.History), + "hot_resize": item.HotResize, + "last_modified": item.LastModified, + "link_to": item.LinkTo, + "milestones": item.Milestones, + "name": item.Name, + "password": item.Password, + "pool_name": item.Pool, + "present_to": item.PresentTo, + "provider_name": item.ProviderName, + "purge_attempts": item.PurgeAttempts, "reference_id": item.ReferenceID, "res_id": item.ResID, "res_name": item.ResName, "rescuecd": item.RescueCD, - "provider_name": item.ProviderName, - "milestones": item.Milestones, - "size": item.Size, "sep_id": item.SEPID, - "link_to": item.LinkTo, - "unc_path": item.UNCPath, - "pool_name": item.Pool, - "hot_resize": item.HotResize, - "history": flattenHistory(item.History), - "last_modified": item.LastModified, - "meta": flattenMeta(item.Meta), - "desc": item.Description, "shared_with": item.SharedWith, + "size": item.Size, + "status": item.Status, + "tech_status": item.TechStatus, + "image_type": item.Type, + "url": item.URL, + "username": item.Username, + "version": item.Version, + "virtual": item.Virtual, } res = append(res, temp) } return res } -func flattenImageListStacks(_ *schema.ResourceData, stack *image.ListStacks) []map[string]interface{} { - temp := make([]map[string]interface{}, 0) +func flattenEco(m interface{}) string { + log.Debug("flattenEco") + output := "" + switch d := m.(type) { + case string: + output = d + case int: + output = strconv.Itoa(d) + case int64: + output = strconv.FormatInt(d, 10) + case float64: + output = strconv.FormatInt(int64(d), 10) + default: + } + return output +} + +func flattenImageListStacks(stack *image.ListStacks) []map[string]interface{} { + log.Debug("flattenImageListStacks") + temp := make([]map[string]interface{}, 0, len(stack.Data)) for _, item := range stack.Data { t := map[string]interface{}{ - "api_url": item.APIURL, - "api_key": item.APIKey, - "app_id": item.AppID, - "desc": item.Description, - "drivers": item.Drivers, - "error": item.Error, - "guid": item.GUID, - "id": item.ID, - "images": item.Images, - "login": item.Login, - "name": item.Name, - "passwd": item.Password, - "reference_id": item.ReferenceID, - "status": item.Status, - "type": item.Type, + "ckey": item.CKey, + "meta": flattens.FlattenMeta(item.Meta), + "api_url": item.APIURL, + "api_key": item.APIKey, + "app_id": item.AppID, + "cpu_allocation_ratio": item.CPUAllocationRatio, + "desc": item.Description, + "descr": item.Descr, + "drivers": item.Drivers, + "eco": flattenEco(item.Eco), + "error": item.Error, + "gid": item.GID, + "guid": item.GUID, + "id": item.ID, + "images": item.Images, + "login": item.Login, + "mem_allocation_ratio": item.MemAllocationRatio, + "name": item.Name, + "packages": flattenPackages(item.Packages), + "passwd": item.Password, + "reference_id": item.ReferenceID, + "status": item.Status, + "type": item.Type, } - temp = append(temp, t) } return temp } + +func flattenPackages(pg image.Packages) []map[string]interface{} { + log.Debug("flattenPackages") + res := make([]map[string]interface{}, 0) + temp := map[string]interface{}{ + "libvirt_bin": flattenLibvirtBin(pg), + "libvirt_daemon": flattenLibvirtDaemon(pg), + "lvm2_lockd": flattenLvm2Lockd(pg), + "openvswitch_common": flattenOpenvswitchCommon(pg), + "openvswitch_switch": flattenOpenvswitchSwitch(pg), + "qemu_system_x86": flattenQemuSystemX86(pg), + "sanlock": flattenSanlock(pg), + } + res = append(res, temp) + return res +} + +func flattenLibvirtBin(lb image.Packages) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + temp := map[string]interface{}{ + "installed_size": lb.LibvirtBin.InstalledSize, + "ver": lb.LibvirtBin.Ver, + } + res = append(res, temp) + return res +} + +func flattenLibvirtDaemon(ld image.Packages) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + temp := map[string]interface{}{ + "installed_size": ld.LibvirtDaemon.InstalledSize, + "ver": ld.LibvirtDaemon.Ver, + } + res = append(res, temp) + return res +} + +func flattenLvm2Lockd(ll image.Packages) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + temp := map[string]interface{}{ + "installed_size": ll.Lvm2Lockd.InstalledSize, + "ver": ll.Lvm2Lockd.Ver, + } + res = append(res, temp) + return res +} + +func flattenOpenvswitchCommon(oc image.Packages) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + temp := map[string]interface{}{ + "installed_size": oc.OpenvswitchCommon.InstalledSize, + "ver": oc.OpenvswitchCommon.Ver, + } + res = append(res, temp) + return res +} + +func flattenOpenvswitchSwitch(os image.Packages) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + temp := map[string]interface{}{ + "installed_size": os.OpenvswitchSwitch.InstalledSize, + "ver": os.OpenvswitchSwitch.Ver, + } + res = append(res, temp) + return res +} + +func flattenQemuSystemX86(qs image.Packages) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + temp := map[string]interface{}{ + "installed_size": qs.QemuSystemX86.InstalledSize, + "ver": qs.QemuSystemX86.Ver, + } + res = append(res, temp) + return res +} + +func flattenSanlock(sl image.Packages) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + temp := map[string]interface{}{ + "installed_size": sl.Sanlock.InstalledSize, + "ver": sl.Sanlock.Ver, + } + res = append(res, temp) + return res +} diff --git a/internal/service/cloudbroker/image/resource_cdrom_image.go b/internal/service/cloudbroker/image/resource_cdrom_image.go index a78a5ca..75f1143 100644 --- a/internal/service/cloudbroker/image/resource_cdrom_image.go +++ b/internal/service/cloudbroker/image/resource_cdrom_image.go @@ -1,382 +1,257 @@ -/* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package image - -import ( - "context" - "strconv" - - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - log "github.com/sirupsen/logrus" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" -) - -func resourceCDROMImageCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceCDROMImageCreate: called for image %s", d.Get("name").(string)) - c := m.(*controller.ControllerCfg) - req := image.CreateCDROMImageRequest{ - Name: d.Get("name").(string), - URL: d.Get("url").(string), - GID: uint64(d.Get("gid").(int)), - } - - drivers := []string{} - for _, driver := range d.Get("drivers").([]interface{}) { - drivers = append(drivers, driver.(string)) - } - - req.Drivers = drivers - - if username, ok := d.GetOk("username_dl"); ok { - req.UsernameDL = username.(string) - } - if password, ok := d.GetOk("password_dl"); ok { - req.PasswordDl = password.(string) - } - if accountId, ok := d.GetOk("account_id"); ok { - req.AccountID = uint64(accountId.(int)) - } - if sepId, ok := d.GetOk("sep_id"); ok { - req.SEPID = uint64(sepId.(int)) - } - if poolName, ok := d.GetOk("pool_name"); ok { - req.PoolName = poolName.(string) - } - if architecture, ok := d.GetOk("architecture"); ok { - req.Architecture = architecture.(string) - } - - imageId, err := c.CloudBroker().Image().CreateCDROMImage(ctx, req) - if err != nil { - return diag.FromErr(err) - } - - d.SetId(strconv.FormatUint(imageId, 10)) - d.Set("image_id", imageId) - - image, err := utilityImageCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - - d.Set("bootable", image.Bootable) - - diagnostics := resourceImageRead(ctx, d, m) - if diagnostics != nil { - return diagnostics - } - - return nil -} - -func resourceCDROMImageDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceCDROMImageDelete: called for %s, id: %s", d.Get("name").(string), d.Id()) - - imageData, err := utilityImageCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - - c := m.(*controller.ControllerCfg) - req := image.DeleteCDROMImageRequest{ - ImageID: imageData.ID, - } - - if permanently, ok := d.GetOk("permanently"); ok { - req.Permanently = permanently.(bool) - } - - _, err = c.CloudBroker().Image().DeleteCDROMImage(ctx, req) - if err != nil { - return diag.FromErr(err) - } - - d.SetId("") - - return nil -} - -func resourceCDROMImageSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: "Name of the rescue disk", - }, - "url": { - Type: schema.TypeString, - Required: true, - Description: "URL where to download ISO from", - }, - "gid": { - Type: schema.TypeInt, - Required: true, - Description: "grid (platform) ID where this template should be create in", - }, - "boot_type": { - Type: schema.TypeString, - Computed: true, - Description: "Boot type of image bios or uefi", - }, - "image_type": { - Type: schema.TypeString, - Computed: true, - Description: "Image type linux, windows or other", - }, - "drivers": { - Type: schema.TypeList, - Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "List of types of compute suitable for image. Example: [ \"KVM_X86\" ]", - }, - "meta": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "meta", - }, - "hot_resize": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - Description: "Does this machine supports hot resize", - }, - // "username": { - // Type: schema.TypeString, - // Optional: true, - // Computed: true, - // Description: "Optional username for the image", - // }, - // "password": { - // Type: schema.TypeString, - // Optional: true, - // Computed: true, - // Description: "Optional password for the image", - // }, - "account_id": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - Description: "AccountId to make the image exclusive", - }, - "username_dl": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "username for upload binary media", - }, - "password_dl": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "password for upload binary media", - }, - "sep_id": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - Description: "storage endpoint provider ID", - }, - "pool_name": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "pool for image create", - }, - "architecture": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "binary architecture of this image, one of X86_64 of PPC64_LE", - }, - "image_id": { - Type: schema.TypeInt, - Computed: true, - Description: "image id", - }, - "permanently": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - Description: "Whether to completely delete the image", - }, - "bootable": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - Description: "Does this image boot OS", - }, - "unc_path": { - Type: schema.TypeString, - Computed: true, - Description: "unc path", - }, - "link_to": { - Type: schema.TypeInt, - Computed: true, - Description: "", - }, - "status": { - Type: schema.TypeString, - Computed: true, - Description: "status", - }, - "tech_status": { - Type: schema.TypeString, - Computed: true, - Description: "tech atatus", - }, - "version": { - Type: schema.TypeString, - Computed: true, - Description: "version", - }, - "size": { - Type: schema.TypeInt, - Computed: true, - Description: "image size", - }, - "enabled": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - "computeci_id": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "guid": { - Type: schema.TypeInt, - Computed: true, - }, - "milestones": { - Type: schema.TypeInt, - Computed: true, - }, - "provider_name": { - Type: schema.TypeString, - Computed: true, - }, - "purge_attempts": { - Type: schema.TypeInt, - Computed: true, - }, - "reference_id": { - Type: schema.TypeString, - Computed: true, - }, - "res_id": { - Type: schema.TypeString, - Computed: true, - }, - "res_name": { - Type: schema.TypeString, - Computed: true, - }, - "rescuecd": { - Type: schema.TypeBool, - Computed: true, - }, - "desc": { - Type: schema.TypeString, - Computed: true, - }, - "shared_with": { - Type: schema.TypeList, - Optional: true, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - "enabled_stacks": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "history": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "guid": { - Type: schema.TypeString, - Computed: true, - }, - "id": { - Type: schema.TypeInt, - Computed: true, - }, - "timestamp": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - } -} - -func ResourceCDROMImage() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - CreateContext: resourceCDROMImageCreate, - ReadContext: resourceImageRead, - UpdateContext: resourceImageUpdate, - DeleteContext: resourceCDROMImageDelete, - - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: &constants.Timeout60s, - Read: &constants.Timeout30s, - Update: &constants.Timeout60s, - Delete: &constants.Timeout60s, - Default: &constants.Timeout60s, - }, - - Schema: resourceCDROMImageSchemaMake(), - } -} +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package image + +import ( + "context" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/ic" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/status" +) + +func resourceCDROMImageCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceCDROMImageCreate: called for image %s", d.Get("name").(string)) + c := m.(*controller.ControllerCfg) + req := image.CreateCDROMImageRequest{ + Name: d.Get("name").(string), + URL: d.Get("url").(string), + } + + if err := ic.ExistGID(ctx, uint64(d.Get("gid").(int)), c); err != nil { + return diag.FromErr(err) + } + + req.GID = uint64(d.Get("gid").(int)) + + drivers := []string{} + for _, driver := range d.Get("drivers").([]interface{}) { + drivers = append(drivers, driver.(string)) + } + req.Drivers = drivers + + if username, ok := d.GetOk("username_dl"); ok { + req.UsernameDL = username.(string) + } + if password, ok := d.GetOk("password_dl"); ok { + req.PasswordDl = password.(string) + } + if accountId, ok := d.GetOk("account_id"); ok { + req.AccountID = uint64(accountId.(int)) + } + if sepId, ok := d.GetOk("sep_id"); ok { + req.SEPID = uint64(sepId.(int)) + } + if poolName, ok := d.GetOk("pool_name"); ok { + req.PoolName = poolName.(string) + } + if architecture, ok := d.GetOk("architecture"); ok { + req.Architecture = architecture.(string) + } + + imageId, err := c.CloudBroker().Image().CreateCDROMImage(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(strconv.FormatUint(imageId, 10)) + + return resourceImageRead(ctx, d, m) +} + +func resourceCDROMImageRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceCDROMImageRead: called for %s id: %s", d.Get("name").(string), d.Id()) + + img, err := utilityImageCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + switch img.Status { + case status.Modeled: + return diag.Errorf("The image is in status: %s, please, contact support for more information", img.Status) + case status.Destroyed, status.Purged: + d.SetId("") + return diag.Errorf("The resource cannot be updated because it has been destroyed") + } + + flattenImage(d, img) + + return nil +} + +func resourceCDROMImageDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceCDROMImageDelete: called for %s, id: %s", d.Get("name").(string), d.Id()) + + imageData, err := utilityImageCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + c := m.(*controller.ControllerCfg) + req := image.DeleteCDROMImageRequest{ + ImageID: imageData.ID, + } + + if permanently, ok := d.GetOk("permanently"); ok { + req.Permanently = permanently.(bool) + } + + _, err = c.CloudBroker().Image().DeleteCDROMImage(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + d.SetId("") + + return nil +} + +func resourceCDROMImageUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceCDROMImageEdit: called for %s, id: %s", d.Get("name").(string), d.Id()) + + img, err := utilityImageCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + switch img.Status { + case status.Modeled: + return diag.Errorf("The image is in status: %s, please, contact support for more information", img.Status) + case status.Destroyed, status.Purged: + d.SetId("") + return diag.Errorf("The resource cannot be updated because it has been destroyed") + } + + if d.HasChange("enabled") { + err := resourceImageChangeEnabled(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + } + + if d.HasChange("shared_with") { + err := resourceImageShare(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + } + + if d.HasChange("computeci_id") { + err := resourceImageChangeComputeci(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + } + + if d.HasChange("enabled_stacks") { + err := resourceImageUpdateNodes(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + } + + if d.HasChanges("name", "password_dl", "username_dl", "account_id", "bootable", "hot_resize") { + err := resourceImageCDROMEdit(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + } + + return nil +} + +func resourceImageCDROMEdit(ctx context.Context, d *schema.ResourceData, m interface{}) error { + log.Debugf("resourceImageEdit: called for %s, id: %s", d.Get("name").(string), d.Id()) + c := m.(*controller.ControllerCfg) + req := image.EditRequest{} + + req.ImageID = uint64(d.Get("image_id").(int)) + if d.HasChange("name") { + req.Name = d.Get("name").(string) + } + if d.HasChange("username_dl") { + req.Username = d.Get("username_dl").(string) + } + if d.HasChange("password_dl") { + req.Password = d.Get("password_dl").(string) + } + if d.HasChange("account_id") { + req.AccountID = uint64(d.Get("account_id").(int)) + } + if d.HasChange("bootable") { + req.Bootable = d.Get("bootable").(bool) + } + if d.HasChange("hot_resize") { + req.HotResize = d.Get("hot_resize").(bool) + } + + _, err := c.CloudBroker().Image().Edit(ctx, req) + if err != nil { + return err + } + return nil +} + +func ResourceCDROMImage() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + CreateContext: resourceCDROMImageCreate, + ReadContext: resourceCDROMImageRead, + UpdateContext: resourceCDROMImageUpdate, + DeleteContext: resourceCDROMImageDelete, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: &constants.Timeout60s, + Read: &constants.Timeout30s, + Update: &constants.Timeout60s, + Delete: &constants.Timeout60s, + Default: &constants.Timeout60s, + }, + + Schema: resourceCDROMImageSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/image/resource_image.go b/internal/service/cloudbroker/image/resource_image.go index 04011b6..8dce95a 100644 --- a/internal/service/cloudbroker/image/resource_image.go +++ b/internal/service/cloudbroker/image/resource_image.go @@ -1,611 +1,341 @@ -/* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package image - -import ( - "context" - "strconv" - - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - log "github.com/sirupsen/logrus" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" -) - -func resourceImageCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceImageCreate: called for image %s", d.Get("name").(string)) - - c := m.(*controller.ControllerCfg) - req := image.CreateRequest{ - Name: d.Get("name").(string), - URL: d.Get("url").(string), - GID: uint64(d.Get("gid").(int)), - BootType: d.Get("boot_type").(string), - ImageType: d.Get("image_type").(string), - } - - drivers := []string{} - for _, driver := range d.Get("drivers").([]interface{}) { - drivers = append(drivers, driver.(string)) - } - - req.Drivers = drivers - - if hotresize, ok := d.GetOk("hot_resize"); ok { - req.HotResize = hotresize.(bool) - } - if username, ok := d.GetOk("username"); ok { - req.Username = username.(string) - } - if password, ok := d.GetOk("password"); ok { - req.Password = password.(string) - } - if accountId, ok := d.GetOk("account_id"); ok { - req.AccountID = uint64(accountId.(int)) - } - if usernameDL, ok := d.GetOk("username_dl"); ok { - req.UsernameDL = usernameDL.(string) - } - if passwordDL, ok := d.GetOk("password_dl"); ok { - req.PasswordDL = passwordDL.(string) - } - if sepId, ok := d.GetOk("sep_id"); ok { - req.SEPID = uint64(sepId.(int)) - } - if poolName, ok := d.GetOk("pool_name"); ok { - req.PoolName = poolName.(string) - } - if architecture, ok := d.GetOk("architecture"); ok { - req.Architecture = architecture.(string) - } - - imageId, err := c.CloudBroker().Image().CreateImage(ctx, req) - if err != nil { - return diag.FromErr(err) - } - - d.SetId(strconv.FormatUint(imageId, 10)) - d.Set("image_id", imageId) - - return resourceImageRead(ctx, d, m) -} - -func resourceImageRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceImageRead: called for %s id: %s", d.Get("name").(string), d.Id()) - - image, err := utilityImageCheckPresence(ctx, d, m) - if err != nil { - d.SetId("") - return diag.FromErr(err) - } - - flattenImage(d, image) - - return nil -} - -func resourceImageDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceImageDelete: called for %s, id: %s", d.Get("name").(string), d.Id()) - - _, err := utilityImageCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - - c := m.(*controller.ControllerCfg) - req := image.DeleteRequest{ - ImageID: uint64(d.Get("image_id").(int)), - } - - if reason, ok := d.GetOk("reason"); ok { - req.Reason = reason.(string) - } - if permanently, ok := d.GetOk("permanently"); ok { - req.Permanently = permanently.(bool) - } - - _, err = c.CloudBroker().Image().Delete(ctx, req) - if err != nil { - return diag.FromErr(err) - } - - d.SetId("") - - return nil -} - -func resourceImageEditName(ctx context.Context, d *schema.ResourceData, m interface{}) error { - log.Debugf("resourceImageEditName: called for %s, id: %s", d.Get("name").(string), d.Id()) - c := m.(*controller.ControllerCfg) - req := image.RenameRequest{ - ImageID: uint64(d.Get("image_id").(int)), - Name: d.Get("name").(string), - } - - _, err := c.CloudBroker().Image().Rename(ctx, req) - if err != nil { - return err - } - - return nil -} - -func resourceImageUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceImageEdit: called for %s, id: %s", d.Get("name").(string), d.Id()) - c := m.(*controller.ControllerCfg) - - if d.HasChange("enabled") { - err := resourceImageChangeEnabled(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - } - - if d.HasChange("name") { - err := resourceImageEditName(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - } - - if d.HasChange("shared_with") { - err := resourceImageShare(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - } - - if d.HasChange("computeci_id") { - err := resourceImageChangeComputeci(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - } - - if d.HasChange("enabled_stacks") { - err := resourceImageUpdateNodes(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - } - - if d.HasChange("link_to") { - err := resourceImageLink(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - } - - if d.HasChanges("name", "username", "password", "account_id", "bootable", "hot_resize") { - req := image.EditRequest{} - - req.ImageID = uint64(d.Get("image_id").(int)) - req.Name = d.Get("name").(string) - - req.Username = d.Get("username").(string) - req.Password = d.Get("password").(string) - req.AccountID = uint64(d.Get("account_id").(int)) - req.Bootable = d.Get("bootable").(bool) - req.HotResize = d.Get("hot_resize").(bool) - - _, err := c.CloudBroker().Image().Edit(ctx, req) - if err != nil { - return diag.FromErr(err) - } - } - - return nil -} - -func resourceImageChangeEnabled(ctx context.Context, d *schema.ResourceData, m interface{}) error { - c := m.(*controller.ControllerCfg) - imageId := uint64(d.Get("image_id").(int)) - - if d.Get("enabled").(bool) { - req := image.EnableRequest{ - ImageID: imageId, - } - - _, err := c.CloudBroker().Image().Enable(ctx, req) - if err != nil { - return err - } - } else { - req := image.DisableRequest{ - ImageID: imageId, - } - - _, err := c.CloudBroker().Image().Disable(ctx, req) - if err != nil { - return err - } - } - - return nil -} - -func resourceImageLink(ctx context.Context, d *schema.ResourceData, m interface{}) error { - log.Debugf("resourceVirtualImageLink: called for %s, id: %s", d.Get("name").(string), d.Id()) - c := m.(*controller.ControllerCfg) - req := image.LinkRequest{ - ImageID: uint64(d.Get("image_id").(int)), - TargetID: uint64(d.Get("link_to").(int)), - } - - _, err := c.CloudBroker().Image().Link(ctx, req) - if err != nil { - return err - } - - return nil -} - -func resourceImageShare(ctx context.Context, d *schema.ResourceData, m interface{}) error { - log.Debugf("resourceImageShare: called for %s, id: %s", d.Get("name").(string), d.Id()) - c := m.(*controller.ControllerCfg) - req := image.ShareRequest{ - ImageId: uint64(d.Get("image_id").(int)), - } - accIds := []uint64{} - for _, accId := range d.Get("shared_with").([]interface{}) { - accIds = append(accIds, uint64(accId.(int))) - } - req.AccountIDs = accIds - - _, err := c.CloudBroker().Image().Share(ctx, req) - if err != nil { - return err - } - - return nil -} - -func resourceImageChangeComputeci(ctx context.Context, d *schema.ResourceData, m interface{}) error { - c := m.(*controller.ControllerCfg) - - imageId := uint64(d.Get("image_id").(int)) - computeci := uint64(d.Get("computeci_id").(int)) - - if computeci == 0 { - req := image.ComputeCIUnsetRequest{ - ImageID: imageId, - } - - _, err := c.CloudBroker().Image().ComputeCIUnset(ctx, req) - if err != nil { - return err - } - } else { - req := image.ComputeCISetRequest{ - ImageID: imageId, - ComputeCIID: computeci, - } - - _, err := c.CloudBroker().Image().ComputeCISet(ctx, req) - if err != nil { - return err - } - } - - return nil -} - -func resourceImageUpdateNodes(ctx context.Context, d *schema.ResourceData, m interface{}) error { - log.Debugf("resourceImageUpdateNodes: called for %s, id: %s", d.Get("name").(string), d.Id()) - c := m.(*controller.ControllerCfg) - req := image.UpdateNodesRequest{ - ImageID: uint64(d.Get("image_id").(int)), - } - enabledStacks := []uint64{} - for _, stack := range d.Get("enabled_stacks").([]interface{}) { - enabledStacks = append(enabledStacks, uint64(stack.(int))) - } - - req.EnabledStacks = enabledStacks - - _, err := c.CloudBroker().Image().UpdateNodes(ctx, req) - if err != nil { - return err - } - - return nil -} - -func resourceImageSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: "Name of the rescue disk", - }, - "url": { - Type: schema.TypeString, - Required: true, - Description: "URL where to download media from", - }, - "gid": { - Type: schema.TypeInt, - Required: true, - Description: "grid (platform) ID where this template should be create in", - }, - "boot_type": { - Type: schema.TypeString, - Required: true, - Description: "Boot type of image bios or uefi", - }, - "image_type": { - Type: schema.TypeString, - Required: true, - Description: "Image type linux, windows or other", - }, - "drivers": { - Type: schema.TypeList, - Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "List of types of compute suitable for image. Example: [ \"KVM_X86\" ]", - }, - "meta": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "meta", - }, - "hot_resize": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - Description: "Does this machine supports hot resize", - }, - "username": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "Optional username for the image", - }, - "password": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "Optional password for the image", - }, - "account_id": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - Description: "AccountId to make the image exclusive", - }, - "username_dl": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "username for upload binary media", - }, - "password_dl": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "password for upload binary media", - }, - "sep_id": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - Description: "storage endpoint provider ID", - }, - "pool_name": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "pool for image create", - }, - "architecture": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "binary architecture of this image, one of X86_64 of PPC64_LE", - }, - "image_id": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - Description: "image id", - }, - "permanently": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - Description: "Whether to completely delete the image", - }, - "bootable": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - Description: "Does this image boot OS", - }, - "unc_path": { - Type: schema.TypeString, - Computed: true, - Description: "unc path", - }, - "link_to": { - Type: schema.TypeInt, - Computed: true, - Description: "", - }, - "status": { - Type: schema.TypeString, - Computed: true, - Description: "status", - }, - "tech_status": { - Type: schema.TypeString, - Computed: true, - Description: "tech atatus", - }, - "version": { - Type: schema.TypeString, - Computed: true, - Description: "version", - }, - "size": { - Type: schema.TypeInt, - Computed: true, - Description: "image size", - }, - "enabled": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - "computeci_id": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "guid": { - Type: schema.TypeInt, - Computed: true, - }, - "milestones": { - Type: schema.TypeInt, - Computed: true, - }, - "provider_name": { - Type: schema.TypeString, - Computed: true, - }, - "purge_attempts": { - Type: schema.TypeInt, - Computed: true, - }, - "reference_id": { - Type: schema.TypeString, - Computed: true, - }, - "res_id": { - Type: schema.TypeString, - Computed: true, - }, - "res_name": { - Type: schema.TypeString, - Computed: true, - }, - "rescuecd": { - Type: schema.TypeBool, - Computed: true, - }, - "reason": { - Type: schema.TypeString, - Optional: true, - }, - "last_modified": { - Type: schema.TypeInt, - Computed: true, - }, - "desc": { - Type: schema.TypeString, - Computed: true, - }, - "shared_with": { - Type: schema.TypeList, - Optional: true, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - "sync": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Create image from a media identified by URL (in synchronous mode)", - }, - "enabled_stacks": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "history": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "guid": { - Type: schema.TypeString, - Computed: true, - }, - "id": { - Type: schema.TypeInt, - Computed: true, - }, - "timestamp": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - } -} - -func ResourceImage() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - CreateContext: resourceImageCreate, - ReadContext: resourceImageRead, - UpdateContext: resourceImageUpdate, - DeleteContext: resourceImageDelete, - - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: &constants.Timeout60s, - Read: &constants.Timeout30s, - Update: &constants.Timeout60s, - Delete: &constants.Timeout60s, - Default: &constants.Timeout60s, - }, - - Schema: resourceImageSchemaMake(), - } -} +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package image + +import ( + "context" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/status" +) + +func resourceImageCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceImageCreate: called for image %s", d.Get("name").(string)) + + c := m.(*controller.ControllerCfg) + + syncMode := d.Get("sync_mode").(bool) + var imageId uint64 + + if syncMode { + req, err := SyncCreateRequest(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + imageId, err = c.CloudBroker().Image().SyncCreate(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } else { + req, err := CreateRequest(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + imageId, err = c.CloudBroker().Image().CreateImage(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } + + d.SetId(strconv.FormatUint(imageId, 10)) + + return resourceImageRead(ctx, d, m) +} + +func resourceImageRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceImageRead: called for %s id: %s", d.Get("name").(string), d.Id()) + + img, err := utilityImageCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + switch img.Status { + case status.Modeled: + return diag.Errorf("The image is in status: %s, please, contact support for more information", img.Status) + case status.Destroyed, status.Purged: + d.SetId("") + return diag.Errorf("The resource cannot be updated because it has been destroyed") + } + + flattenImage(d, img) + + return nil +} + +func resourceImageDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceImageDelete: called for %s, id: %s", d.Get("name").(string), d.Id()) + + _, err := utilityImageCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + c := m.(*controller.ControllerCfg) + req := image.DeleteRequest{ + ImageID: uint64(d.Get("image_id").(int)), + } + + if reason, ok := d.GetOk("reason"); ok { + req.Reason = reason.(string) + } + if permanently, ok := d.GetOk("permanently"); ok { + req.Permanently = permanently.(bool) + } + + _, err = c.CloudBroker().Image().Delete(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + d.SetId("") + + return nil +} + +func resourceImageUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceImageEdit: called for %s, id: %s", d.Get("name").(string), d.Id()) + + img, err := utilityImageCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + switch img.Status { + case status.Modeled: + return diag.Errorf("The image is in status: %s, please, contact support for more information", img.Status) + case status.Destroyed, status.Purged: + d.SetId("") + return diag.Errorf("The resource cannot be updated because it has been destroyed") + } + + if d.HasChange("enabled") { + err := resourceImageChangeEnabled(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + } + + if d.HasChange("shared_with") { + err := resourceImageShare(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + } + + if d.HasChange("computeci_id") { + err := resourceImageChangeComputeci(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + } + + if d.HasChange("enabled_stacks") { + err := resourceImageUpdateNodes(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + } + + if d.HasChanges("name", "username", "password", "account_id", "bootable", "hot_resize") { + err := resourceImageEdit(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + } + + return nil +} + +func resourceImageChangeEnabled(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + imageId := uint64(d.Get("image_id").(int)) + + if d.Get("enabled").(bool) { + req := image.EnableRequest{ + ImageID: imageId, + } + + _, err := c.CloudBroker().Image().Enable(ctx, req) + if err != nil { + return err + } + } else { + req := image.DisableRequest{ + ImageID: imageId, + } + + _, err := c.CloudBroker().Image().Disable(ctx, req) + if err != nil { + return err + } + } + + return nil +} + +func resourceImageShare(ctx context.Context, d *schema.ResourceData, m interface{}) error { + log.Debugf("resourceImageShare: called for %s, id: %s", d.Get("name").(string), d.Id()) + c := m.(*controller.ControllerCfg) + req := image.ShareRequest{ + ImageId: uint64(d.Get("image_id").(int)), + } + accIds := []uint64{} + for _, accId := range d.Get("shared_with").([]interface{}) { + accIds = append(accIds, uint64(accId.(int))) + } + req.AccountIDs = accIds + + _, err := c.CloudBroker().Image().Share(ctx, req) + if err != nil { + return err + } + + return nil +} + +func resourceImageChangeComputeci(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + imageId := uint64(d.Get("image_id").(int)) + computeci := uint64(d.Get("computeci_id").(int)) + + if computeci == 0 { + req := image.ComputeCIUnsetRequest{ + ImageID: imageId, + } + + _, err := c.CloudBroker().Image().ComputeCIUnset(ctx, req) + if err != nil { + return err + } + } else { + req := image.ComputeCISetRequest{ + ImageID: imageId, + ComputeCIID: computeci, + } + + _, err := c.CloudBroker().Image().ComputeCISet(ctx, req) + if err != nil { + return err + } + } + + return nil +} + +func resourceImageUpdateNodes(ctx context.Context, d *schema.ResourceData, m interface{}) error { + log.Debugf("resourceImageUpdateNodes: called for %s, id: %s", d.Get("name").(string), d.Id()) + c := m.(*controller.ControllerCfg) + req := image.UpdateNodesRequest{ + ImageID: uint64(d.Get("image_id").(int)), + } + enabledStacks := []uint64{} + for _, stack := range d.Get("enabled_stacks").([]interface{}) { + enabledStacks = append(enabledStacks, uint64(stack.(int))) + } + + req.EnabledStacks = enabledStacks + + _, err := c.CloudBroker().Image().UpdateNodes(ctx, req) + if err != nil { + return err + } + + return nil +} + +func resourceImageEdit(ctx context.Context, d *schema.ResourceData, m interface{}) error { + log.Debugf("resourceImageEdit: called for %s, id: %s", d.Get("name").(string), d.Id()) + c := m.(*controller.ControllerCfg) + req := image.EditRequest{} + + req.ImageID = uint64(d.Get("image_id").(int)) + if d.HasChange("name") { + req.Name = d.Get("name").(string) + } + if d.HasChange("username") { + req.Username = d.Get("username").(string) + } + if d.HasChange("password") { + req.Password = d.Get("password").(string) + } + if d.HasChange("account_id") { + req.AccountID = uint64(d.Get("account_id").(int)) + } + if d.HasChange("bootable") { + req.Bootable = d.Get("bootable").(bool) + } + if d.HasChange("hot_resize") { + req.HotResize = d.Get("hot_resize").(bool) + } + + _, err := c.CloudBroker().Image().Edit(ctx, req) + if err != nil { + return err + } + return nil +} + +func ResourceImage() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + CreateContext: resourceImageCreate, + ReadContext: resourceImageRead, + UpdateContext: resourceImageUpdate, + DeleteContext: resourceImageDelete, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: &constants.Timeout60s, + Read: &constants.Timeout30s, + Update: &constants.Timeout60s, + Delete: &constants.Timeout60s, + Default: &constants.Timeout60s, + }, + + Schema: resourceImageSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/image/resource_virtual_image.go b/internal/service/cloudbroker/image/resource_virtual_image.go index fc965f0..0a3cfde 100644 --- a/internal/service/cloudbroker/image/resource_virtual_image.go +++ b/internal/service/cloudbroker/image/resource_virtual_image.go @@ -1,331 +1,220 @@ -/* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package image - -import ( - "context" - "strconv" - - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - log "github.com/sirupsen/logrus" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" -) - -func resourceVirtualImageCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceImageCreate: called for image %s", d.Get("name").(string)) - - c := m.(*controller.ControllerCfg) - req := image.CreateVirtualRequest{ - Name: d.Get("name").(string), - TargetID: uint64(d.Get("target_id").(int)), - } - - imageId, err := c.CloudBroker().Image().CreateVirtual(ctx, req) - if err != nil { - return diag.FromErr(err) - } - - d.SetId(strconv.FormatUint(imageId, 10)) - d.Set("image_id", imageId) - - return resourceImageRead(ctx, d, m) -} - -func resourceVirtualImageSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: "name of the virtual image to create", - }, - "target_id": { - Type: schema.TypeInt, - Required: true, - Description: "ID of real image to link this virtual image to upon creation", - }, - "history": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "guid": { - Type: schema.TypeString, - Computed: true, - }, - "id": { - Type: schema.TypeInt, - Computed: true, - }, - "timestamp": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - "url": { - Type: schema.TypeString, - Computed: true, - Description: "URL where to download media from", - }, - "gid": { - Type: schema.TypeInt, - Computed: true, - Description: "grid (platform) ID where this template should be create in", - }, - "boot_type": { - Type: schema.TypeString, - Computed: true, - Description: "Boot type of image bios or uefi", - }, - "image_type": { - Type: schema.TypeString, - Computed: true, - Description: "Image type linux, windows or other", - }, - "drivers": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "List of types of compute suitable for image. Example: [ \"KVM_X86\" ]", - }, - "meta": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "meta", - }, - "hot_resize": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - Description: "Does this machine supports hot resize", - }, - "username": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "Optional username for the image", - }, - "password": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "Optional password for the image", - }, - "account_id": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - Description: "AccountId to make the image exclusive", - }, - "username_dl": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "username for upload binary media", - }, - "password_dl": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "password for upload binary media", - }, - "sep_id": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - Description: "storage endpoint provider ID", - }, - "pool_name": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "pool for image create", - }, - "architecture": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "binary architecture of this image, one of X86_64 of PPC64_LE", - }, - "image_id": { - Type: schema.TypeInt, - Computed: true, - Description: "image id", - }, - "permanently": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - Description: "Whether to completely delete the image", - }, - "bootable": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - Description: "Does this image boot OS", - }, - "unc_path": { - Type: schema.TypeString, - Computed: true, - Description: "unc path", - }, - "link_to": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - Description: "", - }, - "status": { - Type: schema.TypeString, - Computed: true, - Description: "status", - }, - "tech_status": { - Type: schema.TypeString, - Computed: true, - Description: "tech atatus", - }, - "version": { - Type: schema.TypeString, - Computed: true, - Description: "version", - }, - "size": { - Type: schema.TypeInt, - Computed: true, - Description: "image size", - }, - "enabled": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - "computeci_id": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "guid": { - Type: schema.TypeInt, - Computed: true, - }, - "milestones": { - Type: schema.TypeInt, - Computed: true, - }, - "provider_name": { - Type: schema.TypeString, - Computed: true, - }, - "purge_attempts": { - Type: schema.TypeInt, - Computed: true, - }, - "reference_id": { - Type: schema.TypeString, - Computed: true, - }, - "res_id": { - Type: schema.TypeString, - Computed: true, - }, - "res_name": { - Type: schema.TypeString, - Computed: true, - }, - "rescuecd": { - Type: schema.TypeBool, - Computed: true, - }, - "reason": { - Type: schema.TypeString, - Optional: true, - }, - "last_modified": { - Type: schema.TypeInt, - Computed: true, - }, - "desc": { - Type: schema.TypeString, - Computed: true, - }, - "enabled_stacks": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "shared_with": { - Type: schema.TypeList, - Optional: true, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - } -} - -func ResourceVirtualImage() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - CreateContext: resourceVirtualImageCreate, - ReadContext: resourceImageRead, - UpdateContext: resourceImageUpdate, - DeleteContext: resourceImageDelete, - - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: &constants.Timeout60s, - Read: &constants.Timeout30s, - Update: &constants.Timeout60s, - Delete: &constants.Timeout60s, - Default: &constants.Timeout60s, - }, - - Schema: resourceVirtualImageSchemaMake(), - } -} +/* +Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package image + +import ( + "context" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/status" +) + +func resourceVirtualImageCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceImageCreate: called for image %s", d.Get("name").(string)) + + c := m.(*controller.ControllerCfg) + req := image.CreateVirtualRequest{ + Name: d.Get("name").(string), + TargetID: uint64(d.Get("link_to").(int)), + } + + imageId, err := c.CloudBroker().Image().CreateVirtual(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(strconv.FormatUint(imageId, 10)) + + return resourceImageRead(ctx, d, m) +} + +func resourceVirtualImageRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceVirtualImageRead: called for %s id: %s", d.Get("name").(string), d.Id()) + + img, err := utilityImageCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + switch img.Status { + case status.Modeled: + return diag.Errorf("The image is in status: %s, please, contact support for more information", img.Status) + case status.Destroyed, status.Purged: + d.SetId("") + return diag.Errorf("The resource cannot be updated because it has been destroyed") + } + + flattenImage(d, img) + + return nil +} + +func resourceVirtualImageDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceVirtualImageDelete: called for %s, id: %s", d.Get("name").(string), d.Id()) + + _, err := utilityImageCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + c := m.(*controller.ControllerCfg) + req := image.DeleteRequest{ + ImageID: uint64(d.Get("image_id").(int)), + } + + if reason, ok := d.GetOk("reason"); ok { + req.Reason = reason.(string) + } + if permanently, ok := d.GetOk("permanently"); ok { + req.Permanently = permanently.(bool) + } + + _, err = c.CloudBroker().Image().Delete(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + d.SetId("") + + return nil +} + +func resourceVirtualImageUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceVirtualImageEdit: called for %s, id: %s", d.Get("name").(string), d.Id()) + + img, err := utilityImageCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + switch img.Status { + case status.Modeled: + return diag.Errorf("The image is in status: %s, please, contact support for more information", img.Status) + case status.Destroyed, status.Purged: + d.SetId("") + return diag.Errorf("The resource cannot be updated because it has been destroyed") + } + + if d.HasChange("enabled") { + err := resourceImageChangeEnabled(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + } + + if d.HasChange("shared_with") { + err := resourceImageShare(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + } + + if d.HasChange("computeci_id") { + err := resourceImageChangeComputeci(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + } + + if d.HasChange("enabled_stacks") { + err := resourceImageUpdateNodes(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + } + + if d.HasChanges("name", "username", "password", "account_id", "bootable", "hot_resize") { + err := resourceImageEdit(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + } + + if d.HasChange("link_to") { + err := resourceImageLink(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + } + + return nil +} + +func resourceImageLink(ctx context.Context, d *schema.ResourceData, m interface{}) error { + log.Debugf("resourceVirtualImageLink: called for %s, id: %s", d.Get("name").(string), d.Id()) + c := m.(*controller.ControllerCfg) + req := image.LinkRequest{ + ImageID: uint64(d.Get("image_id").(int)), + TargetID: uint64(d.Get("link_to").(int)), + } + + _, err := c.CloudBroker().Image().Link(ctx, req) + if err != nil { + return err + } + + return nil +} + +func ResourceVirtualImage() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + CreateContext: resourceVirtualImageCreate, + ReadContext: resourceVirtualImageRead, + UpdateContext: resourceVirtualImageUpdate, + DeleteContext: resourceVirtualImageDelete, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: &constants.Timeout60s, + Read: &constants.Timeout30s, + Update: &constants.Timeout60s, + Delete: &constants.Timeout60s, + Default: &constants.Timeout60s, + }, + + Schema: resourceVirtualImageSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/image/utility_image.go b/internal/service/cloudbroker/image/utility_image.go index 2a210e5..53d3d44 100644 --- a/internal/service/cloudbroker/image/utility_image.go +++ b/internal/service/cloudbroker/image/utility_image.go @@ -1,60 +1,60 @@ -/* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package image - -import ( - "context" - "strconv" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" -) - -func utilityImageCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*image.RecordImage, error) { - c := m.(*controller.ControllerCfg) - req := image.GetRequest{} - - if (strconv.Itoa(d.Get("image_id").(int))) != "0" { - req.ImageID = uint64(d.Get("image_id").(int)) - } else { - id, _ := strconv.ParseUint(d.Id(), 10, 64) - req.ImageID = id - } - - image, err := c.CloudBroker().Image().Get(ctx, req) - if err != nil { - return nil, err - } - - return image, nil -} +/* +Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package image + +import ( + "context" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" +) + +func utilityImageCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*image.RecordImage, error) { + c := m.(*controller.ControllerCfg) + req := image.GetRequest{} + + if d.Id() != "" { + id, _ := strconv.ParseUint(d.Id(), 10, 64) + req.ImageID = id + } else { + req.ImageID = uint64(d.Get("image_id").(int)) + } + + image, err := c.CloudBroker().Image().Get(ctx, req) + if err != nil { + return nil, err + } + + return image, nil +} diff --git a/internal/service/cloudbroker/image/utility_image_list.go b/internal/service/cloudbroker/image/utility_image_list.go index 30e8551..5ef32be 100644 --- a/internal/service/cloudbroker/image/utility_image_list.go +++ b/internal/service/cloudbroker/image/utility_image_list.go @@ -1,66 +1,98 @@ -/* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package image - -import ( - "context" - - log "github.com/sirupsen/logrus" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func utilityImageListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*image.ListImages, error) { - c := m.(*controller.ControllerCfg) - req := image.ListRequest{} - - if sepId, ok := d.GetOk("sep_id"); ok { - req.SepID = uint64(sepId.(int)) - } - - if page, ok := d.GetOk("page"); ok { - req.Page = uint64(page.(int)) - } - if size, ok := d.GetOk("size"); ok { - req.Size = uint64(size.(int)) - } - - log.Debugf("utilityImageListCheckPresence: load image list") - imageList, err := c.CloudBroker().Image().List(ctx, req) - if err != nil { - return nil, err - } - - return imageList, nil -} +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package image + +import ( + "context" + + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func utilityImageListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*image.ListImages, error) { + c := m.(*controller.ControllerCfg) + req := image.ListRequest{} + + if sepId, ok := d.GetOk("sep_id"); ok { + req.SepID = uint64(sepId.(int)) + } + if byId, ok := d.GetOk("by_id"); ok { + req.ByID = uint64(byId.(int)) + } + if name, ok := d.GetOk("name"); ok { + req.Name = name.(string) + } + if status, ok := d.GetOk("status"); ok { + req.Status = status.(string) + } + if architecture, ok := d.GetOk("architecture"); ok { + req.Architecture = architecture.(string) + } + if typeImage, ok := d.GetOk("type_image"); ok { + req.TypeImage = typeImage.(string) + } + if imageSize, ok := d.GetOk("image_size"); ok { + req.ImageSize = uint64(imageSize.(int)) + } + if sepName, ok := d.GetOk("sep_name"); ok { + req.SEPName = sepName.(string) + } + if pool, ok := d.GetOk("pool"); ok { + req.Pool = pool.(string) + } + if public, ok := d.GetOk("public"); ok { + req.Public = public.(bool) + } + if hotResize, ok := d.GetOk("hot_resize"); ok { + req.HotResize = hotResize.(bool) + } + if bootable, ok := d.GetOk("bootable"); ok { + req.Bootable = bootable.(bool) + } + if page, ok := d.GetOk("page"); ok { + req.Page = uint64(page.(int)) + } + if size, ok := d.GetOk("size"); ok { + req.Size = uint64(size.(int)) + } + + log.Debugf("utilityImageListCheckPresence: load image list") + imageList, err := c.CloudBroker().Image().List(ctx, req) + if err != nil { + return nil, err + } + + return imageList, nil +} diff --git a/internal/service/cloudbroker/image/utility_image_list_stacks.go b/internal/service/cloudbroker/image/utility_image_list_stacks.go index 8ee11fe..0ae13e0 100644 --- a/internal/service/cloudbroker/image/utility_image_list_stacks.go +++ b/internal/service/cloudbroker/image/utility_image_list_stacks.go @@ -1,57 +1,73 @@ -/* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package image - -import ( - "context" - - log "github.com/sirupsen/logrus" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func utilityImageListStacksCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*image.ListStacks, error) { - c := m.(*controller.ControllerCfg) - req := image.ListStacksRequest{ - ImageID: uint64(d.Get("image_id").(int)), - } - - log.Debugf("utilityImageListStacksCheckPresence: load image list") - imageListStacks, err := c.CloudBroker().Image().ListStacks(ctx, req) - if err != nil { - return nil, err - } - - return imageListStacks, nil -} +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package image + +import ( + "context" + + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func utilityImageListStacksCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*image.ListStacks, error) { + c := m.(*controller.ControllerCfg) + req := image.ListStacksRequest{ + ImageID: uint64(d.Get("image_id").(int)), + } + + if page, ok := d.GetOk("page"); ok { + req.Page = uint64(page.(int)) + } + if size, ok := d.GetOk("size"); ok { + req.Size = uint64(size.(int)) + } + if name, ok := d.GetOk("name"); ok { + req.Name = name.(string) + } + if status, ok := d.GetOk("status"); ok { + req.Status = status.(string) + } + if typeImage, ok := d.GetOk("type_image"); ok { + req.Type = typeImage.(string) + } + + log.Debugf("utilityImageListStacksCheckPresence: load image list") + imageListStacks, err := c.CloudBroker().Image().ListStacks(ctx, req) + if err != nil { + return nil, err + } + + return imageListStacks, nil +} diff --git a/internal/service/cloudbroker/k8s/resource_k8s_wg.go b/internal/service/cloudbroker/k8s/resource_k8s_wg.go index 83d6db7..862e84d 100644 --- a/internal/service/cloudbroker/k8s/resource_k8s_wg.go +++ b/internal/service/cloudbroker/k8s/resource_k8s_wg.go @@ -33,26 +33,65 @@ package k8s import ( "context" + "fmt" + "strconv" + "strings" + "time" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/k8s" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/tasks" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/ic" ) func resourceK8sWgCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { log.Debugf("resourceK8sWgCreate: called with k8s id %d", d.Get("k8s_id").(int)) - c := m.(*controller.ControllerCfg) + + if err := ic.ExistK8s(ctx, uint64(d.Get("k8s_id").(int)), c); err != nil { + return diag.FromErr(err) + } + req := k8s.WorkersGroupAddRequest{ - K8SID: uint64(d.Get("k8s_id").(int)), - Name: d.Get("name").(string), - WorkerNum: uint64(d.Get("num").(int)), - WorkerCPU: uint64(d.Get("cpu").(int)), - WorkerRAM: uint64(d.Get("ram").(int)), - WorkerDisk: uint64(d.Get("disk").(int)), + K8SID: uint64(d.Get("k8s_id").(int)), + Name: d.Get("name").(string), + WorkerNum: uint64(d.Get("num").(int)), + WorkerCPU: uint64(d.Get("cpu").(int)), + WorkerRAM: uint64(d.Get("ram").(int)), + WorkerSEPID: uint64(d.Get("worker_sep_id").(int)), + WorkerSEPPool: d.Get("worker_sep_pool").(string), + } + + if d.Get("disk") == nil { + req.WorkerDisk = 0 + } else { + req.WorkerDisk = uint64(d.Get("disk").(int)) + } + + labels, _ := d.Get("labels").([]interface{}) + for _, label := range labels { + if !strings.HasPrefix(label.(string), "workersGroupName") { + req.Labels = append(req.Labels, label.(string)) + } + } + + annotations, _ := d.Get("annotations").([]interface{}) + for _, annotation := range annotations { + req.Annotations = append(req.Annotations, annotation.(string)) + } + + taints, _ := d.Get("taints").([]interface{}) + for _, taint := range taints { + req.Taints = append(req.Taints, taint.(string)) + } + + if cloudInit, ok := d.GetOk("cloud_init"); ok { + req.UserData = cloudInit.(string) } resp, err := c.CloudBroker().K8S().WorkersGroupAdd(ctx, req) @@ -60,9 +99,31 @@ func resourceK8sWgCreate(ctx context.Context, d *schema.ResourceData, m interfac return diag.FromErr(err) } - d.SetId(resp) + taskReq := tasks.GetRequest{ + AuditID: strings.Trim(resp, `"`), + } - return nil + for { + task, err := c.CloudBroker().Tasks().Get(ctx, taskReq) + if err != nil { + return diag.FromErr(err) + } + + log.Debugf("resourceK8sWgCreate: instance creating - %s", task.Stage) + + if task.Completed { + if task.Error != "" { + return diag.FromErr(fmt.Errorf("cannot create k8sWg instance: %v", task.Error)) + } + + d.SetId(strconv.Itoa(int(task.Result))) + break + } + + time.Sleep(time.Second * 20) + } + + return resourceK8sWgRead(ctx, d, m) } func resourceK8sWgRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { @@ -74,11 +135,30 @@ func resourceK8sWgRead(ctx context.Context, d *schema.ResourceData, m interface{ return diag.FromErr(err) } - d.Set("name", wg.Name) - d.Set("num", wg.Num) - d.Set("cpu", wg.CPU) - d.Set("ram", wg.RAM) - d.Set("disk", wg.Disk) + workersComputeList := make([]compute.RecordCompute, 0) + for _, info := range wg.DetailedInfo { + compute, err := utilityComputeCheckPresence(ctx, d, m, info.ID) + if err != nil { + return diag.FromErr(err) + } + workersComputeList = append(workersComputeList, *compute) + } + + d.Set("wg_id", wg.ID) + if strings.Contains(d.Id(), "#") { + k8sId, err := strconv.Atoi(strings.Split(d.Id(), "#")[1]) + if err != nil { + return diag.FromErr(err) + } + + d.Set("k8s_id", k8sId) + } else { + d.Set("k8s_id", d.Get("k8s_id")) + } + + d.SetId(strings.Split(d.Id(), "#")[0]) + + flattenWg(d, wg, workersComputeList) return nil } @@ -88,35 +168,56 @@ func resourceK8sWgUpdate(ctx context.Context, d *schema.ResourceData, m interfac c := m.(*controller.ControllerCfg) + if err := ic.ExistK8s(ctx, uint64(d.Get("k8s_id").(int)), c); err != nil { + return diag.FromErr(err) + } + wg, err := utilityK8sWgCheckPresence(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } + wgId, _ := strconv.ParseUint(d.Id(), 10, 64) + + if d.HasChange("num") { + if newNum := d.Get("num").(int); uint64(newNum) > wg.Num { + req := k8s.WorkerAddRequest{ + K8SID: uint64(d.Get("k8s_id").(int)), + WorkersGroupID: wgId, + Num: uint64(newNum) - wg.Num, + } - addReq := k8s.WorkerAddRequest{ - K8SID: uint64(d.Get("k8s_id").(int)), - WorkersGroupID: wg.ID, + _, err := c.CloudBroker().K8S().WorkerAdd(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } else { + for i := int(wg.Num) - 1; i >= newNum; i-- { + req := k8s.DeleteWorkerFromGroupRequest{ + K8SID: uint64(d.Get("k8s_id").(int)), + WorkersGroupID: wgId, + WorkerID: wg.DetailedInfo[i].ID, + } + + _, err := c.CloudBroker().K8S().DeleteWorkerFromGroup(ctx, req) + if err != nil { + return diag.FromErr(err) + } + } + } } - delReq := k8s.DeleteWorkerFromGroupRequest{ - K8SID: uint64(d.Get("k8s_id").(int)), - WorkersGroupID: wg.ID, - } + if d.HasChange("cloud_init") { + req := k8s.UpdateWorkerNodesMetaDataRequest{ + K8SID: uint64(d.Get("k8s_id").(int)), + WorkersGroupID: wgId, + UserData: d.Get("cloud_init").(string), + } - if newNum := uint64(d.Get("num").(int)); newNum > wg.Num { - addReq.Num = newNum - wg.Num - _, err := c.CloudBroker().K8S().WorkerAdd(ctx, addReq) + _, err := c.CloudBroker().K8S().UpdateWorkerNodesMetaData(ctx, req) if err != nil { return diag.FromErr(err) } - } else { - for i := wg.Num - 1; i >= newNum; i-- { - delReq.WorkerID = wg.DetailedInfo[i].ID - _, err := c.CloudBroker().K8S().DeleteWorkerFromGroup(ctx, delReq) - if err != nil { - return diag.FromErr(err) - } - } } return nil @@ -146,55 +247,6 @@ func resourceK8sWgDelete(ctx context.Context, d *schema.ResourceData, m interfac return nil } -func resourceK8sWgSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "k8s_id": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - Description: "ID of k8s instance.", - }, - - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "Name of the worker group.", - }, - - "num": { - Type: schema.TypeInt, - Optional: true, - Default: 1, - Description: "Number of worker nodes to create.", - }, - - "cpu": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Default: 1, - Description: "Worker node CPU count.", - }, - - "ram": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Default: 1024, - Description: "Worker node RAM in MB.", - }, - - "disk": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Default: 0, - Description: "Worker node boot disk size. If unspecified or 0, size is defined by OS image size.", - }, - } -} - func ResourceK8sWg() *schema.Resource { return &schema.Resource{ SchemaVersion: 1, diff --git a/internal/service/cloudbroker/k8s/utility_k8s.go b/internal/service/cloudbroker/k8s/utility_k8s.go index c3ed84a..5070269 100644 --- a/internal/service/cloudbroker/k8s/utility_k8s.go +++ b/internal/service/cloudbroker/k8s/utility_k8s.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, @@ -36,6 +36,7 @@ import ( "strconv" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/k8s" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" ) @@ -43,9 +44,13 @@ import ( func utilityK8sCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*k8s.RecordK8S, error) { c := m.(*controller.ControllerCfg) req := k8s.GetRequest{} - k8sId, _ := strconv.ParseUint(d.Id(), 10, 64) - req.K8SID = k8sId + if d.Id() != "" { + k8sId, _ := strconv.ParseUint(d.Id(), 10, 64) + req.K8SID = k8sId + } else { + req.K8SID = uint64(d.Get("k8s_id").(int)) + } k8s, err := c.CloudBroker().K8S().Get(ctx, req) if err != nil { @@ -54,3 +59,17 @@ func utilityK8sCheckPresence(ctx context.Context, d *schema.ResourceData, m inte return k8s, nil } + +func utilityComputeCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}, computeID uint64) (*compute.RecordCompute, error) { + c := m.(*controller.ControllerCfg) + req := compute.GetRequest{ + ComputeID: computeID, + } + + compute, err := c.CloudBroker().Compute().Get(ctx, req) + if err != nil { + return nil, err + } + + return compute, nil +} diff --git a/internal/service/cloudbroker/k8s/utility_k8s_wg.go b/internal/service/cloudbroker/k8s/utility_k8s_wg.go index 0ef6369..1bae6d5 100644 --- a/internal/service/cloudbroker/k8s/utility_k8s_wg.go +++ b/internal/service/cloudbroker/k8s/utility_k8s_wg.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, @@ -33,31 +33,136 @@ package k8s import ( "context" + "fmt" "strconv" + "strings" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/k8s" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" ) func utilityK8sWgCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*k8s.RecordK8SGroup, error) { c := m.(*controller.ControllerCfg) + var wgId int + var k8sId int + var err error + + if strings.Contains(d.Id(), "#") { + wgId, err = strconv.Atoi(strings.Split(d.Id(), "#")[0]) + if err != nil { + return nil, err + } + k8sId, err = strconv.Atoi(strings.Split(d.Id(), "#")[1]) + if err != nil { + return nil, err + } + } else { + wgId, err = strconv.Atoi(d.Id()) + if err != nil { + return nil, err + } + k8sId = d.Get("k8s_id").(int) + } + req := k8s.GetRequest{ - K8SID: uint64(d.Get("k8s_id").(int)), + K8SID: uint64(k8sId), } - k8sData, err := c.CloudBroker().K8S().Get(ctx, req) + cluster, err := c.CloudBroker().K8S().Get(ctx, req) if err != nil { return nil, err } - k8sWgID, _ := strconv.ParseUint(d.Id(), 10, 64) - - for _, wg := range k8sData.K8SGroups.Workers { - if wg.ID == k8sWgID { + for _, wg := range cluster.K8SGroups.Workers { + if wg.ID == uint64(wgId) { return &wg, nil } } - return nil, nil + return nil, fmt.Errorf("not found wg with id: %v in k8s cluster: %v", wgId, cluster.ID) +} + +func utilityDataK8sWgCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*k8s.RecordK8SGroup, []compute.RecordCompute, error) { + c := m.(*controller.ControllerCfg) + + k8sId := uint64(d.Get("k8s_id").(int)) + wgId := uint64(d.Get("wg_id").(int)) + + k8sGetReq := k8s.GetRequest{ + K8SID: k8sId, + } + + cluster, err := c.CloudBroker().K8S().Get(ctx, k8sGetReq) + if err != nil { + return nil, nil, err + } + + curWg := k8s.RecordK8SGroup{} + for _, wg := range cluster.K8SGroups.Workers { + if wg.ID == wgId { + curWg = wg + break + } + } + if curWg.ID == 0 { + return nil, nil, fmt.Errorf("WG with id %v in k8s cluster %v not found", wgId, k8sId) + } + + workersComputeList := make([]compute.RecordCompute, 0) + for _, info := range curWg.DetailedInfo { + compute, err := utilityComputeCheckPresence(ctx, d, m, info.ID) + if err != nil { + return nil, nil, err + } + + workersComputeList = append(workersComputeList, *compute) + } + + return &curWg, workersComputeList, nil +} + +func utilityK8sWgListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*k8s.ListK8SGroup, error) { + c := m.(*controller.ControllerCfg) + req := k8s.GetRequest{} + + if d.Id() != "" { + k8sId, _ := strconv.ParseUint(d.Id(), 10, 64) + req.K8SID = k8sId + } else { + req.K8SID = uint64(d.Get("k8s_id").(int)) + } + + cluster, err := c.CloudBroker().K8S().Get(ctx, req) + if err != nil { + return nil, err + } + + return &cluster.K8SGroups.Workers, nil +} + +func utilityK8sWgCloudInitCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (string, error) { + c := m.(*controller.ControllerCfg) + req := k8s.GetWorkerNodesMetaDataRequest{ + K8SID: uint64(d.Get("k8s_id").(int)), + WorkersGroupID: uint64(d.Get("wg_id").(int)), + } + + cloudInit, err := c.CloudBroker().K8S().GetWorkerNodesMetaData(ctx, req) + if err != nil { + return "", err + } + + return cloudInit, nil +} + +func cloudInitDiffSupperss(key, oldVal, newVal string, d *schema.ResourceData) bool { + if newVal != "" && newVal != oldVal { + log.Debugf("networkSubresIPAddreDiffSupperss: key=%s, oldVal=%q, newVal=%q -> suppress=FALSE", key, oldVal, newVal) + return false + } + log.Debugf("networkSubresIPAddreDiffSupperss: key=%s, oldVal=%q, newVal=%q -> suppress=TRUE", key, oldVal, newVal) + return true // suppress difference } diff --git a/internal/service/cloudbroker/kvmvm/data_source_compute.go b/internal/service/cloudbroker/kvmvm/data_source_compute.go index 0d58e35..8f5eb41 100644 --- a/internal/service/cloudbroker/kvmvm/data_source_compute.go +++ b/internal/service/cloudbroker/kvmvm/data_source_compute.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, @@ -33,12 +33,8 @@ package kvmvm import ( "context" - "fmt" - // "net/url" - log "github.com/sirupsen/logrus" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -46,121 +42,6 @@ import ( // "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) -// Parse list of all disks from API compute/get into a list of "extra disks" attached to this compute -// Extra disks are all compute disks but a boot disk. -func parseComputeDisksToExtraDisks(disks compute.ListDisks) []interface{} { - // this return value will be used to d.Set("extra_disks",) item of dataSourceCompute schema, - // which is a simple list of integer disk IDs excluding boot disk ID - length := len(disks) - log.Debugf("parseComputeDisksToExtraDisks: called for %d disks", length) - - if length == 0 || (length == 1 && disks[0].Type == "B") { - // the disk list is empty (which is kind of strange - diskless compute?), or - // there is only one disk in the list and it is a boot disk; - // as we skip boot disks, the result will be of 0 length anyway - return make([]interface{}, 0) - } - - result := make([]interface{}, length-1) - idx := 0 - for _, value := range disks { - if value.Type == "B" { - // skip boot disk when iterating over the list of disks - continue - } - - result[idx] = value.ID - idx++ - } - - return result -} - -// Parse the list of interfaces from compute/get response into a list of networks -// attached to this compute -func parseComputeInterfacesToNetworks(ifaces compute.ListInterfaces) []interface{} { - // return value will be used to d.Set("network") item of dataSourceCompute schema - length := len(ifaces) - log.Debugf("parseComputeInterfacesToNetworks: called for %d ifaces", length) - - result := []interface{}{} - - for _, value := range ifaces { - elem := make(map[string]interface{}) - // Keys in this map should correspond to the Schema definition - // as returned by networkSubresourceSchemaMake() - elem["net_id"] = value.NetID - elem["net_type"] = value.NetType - elem["ip_address"] = value.IPAddress - elem["mac"] = value.MAC - - // log.Debugf(" element %d: net_id=%d, net_type=%s", i, value.NetID, value.NetType) - - result = append(result, elem) - } - - return result -} - -func flattenDataCompute(d *schema.ResourceData, compFacts *compute.RecordCompute) error { - // This function expects that compFacts string contains response from API compute/get, - // i.e. detailed information about compute instance. - // - // NOTE: this function modifies ResourceData argument - as such it should never be called - // from resourceComputeExists(...) method - - log.Debugf("flattenCompute: ID %d, RG ID %d", compFacts.ID, compFacts.RGID) - - d.SetId(fmt.Sprintf("%d", compFacts.ID)) - d.Set("name", compFacts.Name) - d.Set("rg_id", compFacts.RGID) - d.Set("rg_name", compFacts.RGName) - d.Set("account_id", compFacts.AccountID) - d.Set("account_name", compFacts.AccountName) - d.Set("driver", compFacts.Driver) - d.Set("cpu", compFacts.CPUs) - d.Set("ram", compFacts.RAM) - d.Set("image_id", compFacts.ImageID) - d.Set("description", compFacts.Description) - d.Set("cloud_init", "applied") - - if compFacts.TechStatus == "STARTED" { - d.Set("started", true) - } else { - d.Set("started", false) - } - - bootDisk := findBootDisk(compFacts.Disks) - - d.Set("boot_disk_size", bootDisk.SizeMax) - d.Set("boot_disk_id", bootDisk.ID) // we may need boot disk ID in resize operations - d.Set("sep_id", bootDisk.SEPID) - d.Set("pool", bootDisk.Pool) - - if len(compFacts.Disks) > 0 { - log.Debugf("flattenCompute: calling parseComputeDisksToExtraDisks for %d disks", len(compFacts.Disks)) - if err := d.Set("extra_disks", parseComputeDisksToExtraDisks(compFacts.Disks)); err != nil { - return err - } - } - - if len(compFacts.Interfaces) > 0 { - log.Debugf("flattenCompute: calling parseComputeInterfacesToNetworks for %d interfaces", len(compFacts.Interfaces)) - if err := d.Set("network", parseComputeInterfacesToNetworks(compFacts.Interfaces)); err != nil { - return err - } - } - - if len(compFacts.OSUsers) > 0 { - log.Debugf("flattenCompute: calling parseOsUsers for %d logins", len(compFacts.OSUsers)) - if err := d.Set("os_users", parseOsUsers(compFacts.OSUsers)); err != nil { - return err - } - } - - return nil -} - func dataSourceComputeRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { compFacts, err := utilityComputeCheckPresence(ctx, d, m) if compFacts == nil { @@ -186,143 +67,6 @@ func DataSourceCompute() *schema.Resource { Default: &constants.Timeout60s, }, - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - Description: "Name of this compute instance. NOTE: this parameter is case sensitive.", - }, - - // TODO: consider removing compute_id from the schema, as it not practical to call this data provider if - // corresponding compute ID is already known - "compute_id": { - Type: schema.TypeInt, - Optional: true, - Description: "ID of the compute instance. If ID is specified, name and resource group ID are ignored.", - }, - - "rg_id": { - Type: schema.TypeInt, - Optional: true, - Description: "ID of the resource group where this compute instance is located.", - }, - - "rg_name": { - Type: schema.TypeString, - Computed: true, - Description: "Name of the resource group where this compute instance is located.", - }, - - "account_id": { - Type: schema.TypeInt, - Computed: true, - Description: "ID of the account this compute instance belongs to.", - }, - - "account_name": { - Type: schema.TypeString, - Computed: true, - Description: "Name of the account this compute instance belongs to.", - }, - - "driver": { - Type: schema.TypeString, - Computed: true, - Description: "Hardware architecture of this compute instance.", - }, - - "cpu": { - Type: schema.TypeInt, - Computed: true, - Description: "Number of CPUs allocated for this compute instance.", - }, - - "ram": { - Type: schema.TypeInt, - Computed: true, - Description: "Amount of RAM in MB allocated for this compute instance.", - }, - - "image_id": { - Type: schema.TypeInt, - Computed: true, - Description: "ID of the OS image this compute instance is based on.", - }, - - "image_name": { - Type: schema.TypeString, - Computed: true, - Description: "Name of the OS image this compute instance is based on.", - }, - "boot_disk_size": { - Type: schema.TypeInt, - Computed: true, - Description: "This compute instance boot disk size in GB.", - }, - - "boot_disk_id": { - Type: schema.TypeInt, - Computed: true, - Description: "This compute instance boot disk ID.", - }, - - "extra_disks": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - Description: "IDs of the extra disk(s) attached to this compute.", - }, - - /* - "disks": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: dataSourceDiskSchemaMake(), // ID, type, name, size, account ID, SEP ID, SEP type, pool, status, tech status, compute ID, image ID - }, - Description: "Detailed specification for all disks attached to this compute instance (including bood disk).", - }, - */ - - // "network": { - // Type: schema.TypeSet, - // Optional: true, - // MaxItems: constants.MaxNetworksPerCompute, - // Elem: &schema.Resource{ - // Schema: networkSubresourceSchemaMake(), - // }, - // Description: "Network connection(s) for this compute.", - // }, - - "os_users": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: osUsersSubresourceSchemaMake(), - }, - Description: "Guest OS users provisioned on this compute instance.", - }, - - "description": { - Type: schema.TypeString, - Computed: true, - Description: "User-defined text description of this compute instance.", - }, - - "cloud_init": { - Type: schema.TypeString, - Computed: true, - Description: "Placeholder for cloud_init parameters.", - }, - - "started": { - Type: schema.TypeBool, - Optional: true, - Default: true, - Description: "Is compute started.", - }, - }, + Schema: dataSourceComputeSchemaMake(), } } diff --git a/internal/service/cloudbroker/kvmvm/flattens.go b/internal/service/cloudbroker/kvmvm/flattens.go index fcf0ede..3ae9b68 100644 --- a/internal/service/cloudbroker/kvmvm/flattens.go +++ b/internal/service/cloudbroker/kvmvm/flattens.go @@ -2,11 +2,14 @@ package kvmvm import ( "encoding/json" + "fmt" "sort" + "strconv" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" log "github.com/sirupsen/logrus" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens" ) func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute) error { @@ -14,18 +17,35 @@ func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute) e customFields, _ := json.Marshal(computeRec.CustomFields) devices, _ := json.Marshal(computeRec.Devices) - + userData, _ := json.Marshal(computeRec.Userdata) bootDisk := findBootDisk(computeRec.Disks) + //extra fields setting + if len(computeRec.Disks) > 0 { + log.Debugf("flattenCompute: calling parseComputeDisksToExtraDisks for %d disks", len(computeRec.Disks)) + if err := d.Set("extra_disks", parseComputeDisksToExtraDisks(computeRec.Disks)); err != nil { + return err + } + } + + if len(computeRec.Interfaces) > 0 { + log.Debugf("flattenCompute: calling parseComputeInterfacesToNetworks for %d interfaces", len(computeRec.Interfaces)) + if err := d.Set("network", parseComputeInterfacesToNetworks(computeRec.Interfaces)); err != nil { + return err + } + } d.Set("account_id", computeRec.AccountID) d.Set("account_name", computeRec.AccountName) + d.Set("acl", flattenListACLInterface(computeRec.ACL)) d.Set("affinity_label", computeRec.AffinityLabel) d.Set("affinity_weight", computeRec.AffinityWeight) d.Set("affinity_rules", flattenAffinityRules(computeRec.AffinityRules)) d.Set("anti_affinity_rules", flattenAffinityRules(computeRec.AntiAffinityRules)) d.Set("arch", computeRec.Arch) d.Set("boot_order", computeRec.BootOrder) + d.Set("boot_disk_id", bootDisk.ID) d.Set("boot_disk_size", computeRec.BootDiskSize) + d.Set("cd_image_id", computeRec.CdImageId) d.Set("clone_reference", computeRec.CloneReference) d.Set("clones", computeRec.Clones) d.Set("computeci_id", computeRec.ComputeCIID) @@ -36,13 +56,11 @@ func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute) e d.Set("deleted_time", computeRec.DeletedTime) d.Set("description", computeRec.Description) d.Set("devices", string(devices)) - d.Set("disks", - flattenComputeDisks( - computeRec.Disks, - d.Get("extra_disks").(*schema.Set).List(), - bootDisk.ID, - ), - ) + err := d.Set("disks", flattenComputeDisks(computeRec.Disks, d.Get("extra_disks").(*schema.Set).List(), bootDisk.ID)) + if err != nil { + return err + } + d.Set("driver", computeRec.Driver) d.Set("gid", computeRec.GID) d.Set("guid", computeRec.GUID) d.Set("compute_id", computeRec.ID) @@ -53,6 +71,7 @@ func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute) e d.Set("manager_type", computeRec.ManagerType) d.Set("migrationjob", computeRec.MigrationJob) d.Set("milestones", computeRec.Milestones) + d.Set("need_reboot", computeRec.NeedReboot) d.Set("os_users", flattenOSUsers(computeRec.OSUsers)) d.Set("pinned", computeRec.Pinned) d.Set("reference_id", computeRec.ReferenceID) @@ -69,6 +88,7 @@ func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute) e d.Set("tech_status", computeRec.TechStatus) d.Set("updated_by", computeRec.UpdatedBy) d.Set("updated_time", computeRec.UpdatedTime) + d.Set("user_data", string(userData)) d.Set("user_managed", computeRec.UserManaged) d.Set("vgpus", computeRec.VGPUs) d.Set("virtual_image_id", computeRec.VirtualImageID) @@ -175,6 +195,7 @@ func flattenComputeDisks(disksList compute.ListDisks, extraDisks []interface{}, "disk_id": disk.ID, "shareable": disk.Shareable, "size_used": disk.SizeUsed, + "size_max": disk.SizeMax, } res = append(res, temp) } @@ -193,15 +214,6 @@ func findInExtraDisks(diskId uint, extraDisks []interface{}) bool { return false } -func findBootDisk(disks compute.ListDisks) *compute.ItemDisk { - for _, disk := range disks { - if disk.Type == "B" { - return &disk - } - } - return nil -} - func flattenAffinityRules(rules compute.ListRules) []map[string]interface{} { res := make([]map[string]interface{}, 0, len(rules)) @@ -217,3 +229,541 @@ func flattenAffinityRules(rules compute.ListRules) []map[string]interface{} { return res } + +func flattenComputeList(computes *compute.ListComputes) []map[string]interface{} { + res := make([]map[string]interface{}, 0, len(computes.Data)) + for _, computeItem := range computes.Data { + customFields, _ := json.Marshal(computeItem.CustomFields) + devices, _ := json.Marshal(computeItem.Devices) + userData, _ := json.Marshal(computeItem.Userdata) + temp := map[string]interface{}{ + "acl": flattenListACLInterface(computeItem.ACL), + "account_id": computeItem.AccountID, + "account_name": computeItem.AccountName, + "affinity_label": computeItem.AffinityLabel, + "affinity_rules": flattenListRules(computeItem.AffinityRules), + "affinity_weight": computeItem.AffinityWeight, + "anti_affinity_rules": flattenListRules(computeItem.AntiAffinityRules), + "arch": computeItem.Arch, + "cd_image_id": computeItem.CdImageId, + "boot_order": computeItem.BootOrder, + "bootdisk_size": computeItem.BootDiskSize, + "clone_reference": computeItem.CloneReference, + "clones": computeItem.Clones, + "computeci_id": computeItem.ComputeCIID, + "cpus": computeItem.CPUs, + "created_by": computeItem.CreatedBy, + "created_time": computeItem.CreatedTime, + "custom_fields": string(customFields), + "deleted_by": computeItem.DeletedBy, + "deleted_time": computeItem.DeletedTime, + "desc": computeItem.Description, + "devices": string(devices), + "disks": flattenDisks(computeItem.Disks), + "driver": computeItem.Driver, + "gid": computeItem.GID, + "guid": computeItem.GUID, + "compute_id": computeItem.ID, + "image_id": computeItem.ImageID, + "interfaces": flattenInterfaces(computeItem.Interfaces), + "lock_status": computeItem.LockStatus, + "manager_id": computeItem.ManagerID, + "manager_type": computeItem.ManagerType, + "migrationjob": computeItem.MigrationJob, + "milestones": computeItem.Milestones, + "name": computeItem.Name, + "need_reboot": computeItem.NeedReboot, + "os_users": flattenOSUsers(computeItem.OSUsers), + "pinned": computeItem.Pinned, + "ram": computeItem.RAM, + "reference_id": computeItem.ReferenceID, + "registered": computeItem.Registered, + "res_name": computeItem.ResName, + "rg_id": computeItem.RGID, + "rg_name": computeItem.RGName, + "snap_sets": flattenSnapSets(computeItem.SnapSets), + "stack_id": computeItem.StackID, + "stateless_sep_id": computeItem.StatelessSEPID, + "stateless_sep_type": computeItem.StatelessSEPType, + "status": computeItem.Status, + "tags": flattenTags(computeItem.Tags), + "tech_status": computeItem.TechStatus, + "total_disk_size": computeItem.TotalDiskSize, + "updated_by": computeItem.UpdatedBy, + "updated_time": computeItem.UpdatedTime, + "user_data": string(userData), + "user_managed": computeItem.UserManaged, + "vgpus": computeItem.VGPUs, + "vins_connected": computeItem.VINSConnected, + "virtual_image_id": computeItem.VirtualImageID, + } + res = append(res, temp) + } + return res +} + +func flattenListACLInterface(listAcl []interface{}) []map[string]interface{} { + res := make([]map[string]interface{}, 0, len(listAcl)) + for _, aclInterface := range listAcl { + acl := aclInterface.(map[string]interface{}) + temp := map[string]interface{}{ + "explicit": acl["explicit"], + "guid": acl["guid"], + "right": acl["right"], + "status": acl["status"], + "type": acl["type"], + "user_group_id": acl["user_group_id"], + } + res = append(res, temp) + } + return res +} + +func flattenListACL(listAcl compute.ListACL) []map[string]interface{} { + res := make([]map[string]interface{}, 0, len(listAcl)) + for _, acl := range listAcl { + temp := map[string]interface{}{ + "explicit": acl.Explicit, + "guid": acl.GUID, + "right": acl.Right, + "status": acl.Status, + "type": acl.Type, + "user_group_id": acl.UserGroupID, + } + res = append(res, temp) + } + return res +} + +func flattenListComputeACL(listAcl []compute.ItemComputeACL) []map[string]interface{} { + res := make([]map[string]interface{}, 0, len(listAcl)) + for _, acl := range listAcl { + temp := map[string]interface{}{ + "explicit": acl.Explicit, + "guid": acl.GUID, + "right": acl.Right, + "status": acl.Status, + "type": acl.Type, + "user_group_id": acl.UserGroupID, + } + res = append(res, temp) + } + return res +} + +func flattenListRules(listRules compute.ListRules) []map[string]interface{} { + res := make([]map[string]interface{}, 0, len(listRules)) + for _, rule := range listRules { + temp := map[string]interface{}{ + "guid": rule.GUID, + "key": rule.Key, + "mode": rule.Mode, + "policy": rule.Policy, + "topology": rule.Topology, + "value": rule.Value, + } + res = append(res, temp) + } + return res +} + +func flattenDisks(disks []compute.InfoDisk) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + for _, disk := range disks { + temp := map[string]interface{}{ + "disk_id": disk.ID, + "pci_slot": disk.PCISlot, + } + res = append(res, temp) + } + return res +} + +func flattenComputeAudits(computeAudits compute.ListDetailedAudits) []map[string]interface{} { + res := make([]map[string]interface{}, 0, len(computeAudits)) + for _, computeAudit := range computeAudits { + temp := map[string]interface{}{ + "call": computeAudit.Call, + "responsetime": computeAudit.ResponseTime, + "statuscode": computeAudit.StatusCode, + "timestamp": computeAudit.Timestamp, + "user": computeAudit.User, + } + res = append(res, temp) + } + return res +} + +func flattenComputeGetAudits(computeAudits compute.ListAudits) []map[string]interface{} { + res := make([]map[string]interface{}, 0, len(computeAudits)) + for _, computeAudit := range computeAudits { + temp := map[string]interface{}{ + "epoch": computeAudit.Epoch, + "message": computeAudit.Message, + } + res = append(res, temp) + } + return res +} + +func flattenPfwList(computePfws *compute.ListPFW) []map[string]interface{} { + res := make([]map[string]interface{}, 0, len(computePfws.Data)) + for _, computePfw := range computePfws.Data { + temp := map[string]interface{}{ + "pfw_id": computePfw.ID, + "local_ip": computePfw.LocalIP, + "local_port": computePfw.LocalPort, + "protocol": computePfw.Protocol, + "public_port_end": computePfw.PublicPortEnd, + "public_port_start": computePfw.PublicPortStart, + "vm_id": computePfw.VMID, + } + res = append(res, temp) + } + return res +} + +func flattenUserList(d *schema.ResourceData, userList *compute.ListUsers) { + d.Set("account_acl", flattenListACL(userList.Data.AccountACL)) + d.Set("compute_acl", flattenListComputeACL(userList.Data.ComputeACL)) + d.Set("rg_acl", flattenListACL(userList.Data.RGACL)) +} + +func flattenSnapshotList(computeSnapshots *compute.ListSnapShot) []map[string]interface{} { + res := make([]map[string]interface{}, 0, len(computeSnapshots.Data)) + for _, snp := range computeSnapshots.Data { + temp := map[string]interface{}{ + "disks": snp.Disks, + "guid": snp.GUID, + "label": snp.Label, + "timestamp": snp.Timestamp, + } + res = append(res, temp) + } + return res +} + +func flattenAffinityRelations(d *schema.ResourceData, ar *compute.RecordAffinityRelations) { + d.Set("other_node", flattenNodes(ar.OtherNode)) + d.Set("other_node_indirect", flattenNodes(ar.OtherNodeIndirect)) + d.Set("other_node_indirect_soft", flattenNodes(ar.OtherNodeIndirectSoft)) + d.Set("other_node_soft", flattenNodes(ar.OtherNodeSoft)) + d.Set("same_node", flattenNodes(ar.SameNode)) + d.Set("same_node_soft", flattenNodes(ar.SameNodeSoft)) +} + +func flattenSnapshotUsage(computeSnapshotUsages compute.ListSnapshotUsage) []map[string]interface{} { + res := make([]map[string]interface{}, 0, len(computeSnapshotUsages)) + for _, computeUsage := range computeSnapshotUsages { + temp := map[string]interface{}{ + "count": computeUsage.Count, + "stored": computeUsage.Stored, + "label": computeUsage.Label, + "timestamp": computeUsage.Timestamp, + } + res = append(res, temp) + } + return res +} + +func flattenPCIDevice(deviceList []compute.ItemPCIDevice) []map[string]interface{} { + res := make([]map[string]interface{}, 0, len(deviceList)) + for _, dev := range deviceList { + temp := map[string]interface{}{ + "ckey": dev.CKey, + "meta": flattens.FlattenMeta(dev.Meta), + "compute_id": dev.ComputeID, + "description": dev.Description, + "guid": dev.GUID, + "hwpath": dev.HwPath, + "device_id": dev.ID, + "name": dev.Name, + "rg_id": dev.RGID, + "stack_id": dev.StackID, + "status": dev.Status, + "system_name": dev.SystemName, + } + res = append(res, temp) + } + return res +} + +func flattenVGPU(m []interface{}) []string { + var output []string + for _, item := range m { + switch d := item.(type) { + case string: + output = append(output, d) + case int: + output = append(output, strconv.Itoa(d)) + case int64: + output = append(output, strconv.FormatInt(d, 10)) + case float64: + output = append(output, strconv.FormatInt(int64(d), 10)) + default: + output = append(output, "") + } + } + return output +} + +func flattenNodes(m []interface{}) []string { + var output []string + for _, item := range m { + switch d := item.(type) { + case string: + output = append(output, d) + case int: + output = append(output, strconv.Itoa(d)) + case int64: + output = append(output, strconv.FormatInt(d, 10)) + case float64: + output = append(output, strconv.FormatInt(int64(d), 10)) + default: + output = append(output, "") + } + } + return output +} + +func flattenDataCompute(d *schema.ResourceData, compFacts *compute.RecordCompute) error { + // This function expects that compFacts string contains response from API compute/get, + // i.e. detailed information about compute instance. + // + // NOTE: this function modifies ResourceData argument - as such it should never be called + // from resourceComputeExists(...) method + + log.Debugf("flattenCompute: ID %d, RG ID %d", compFacts.ID, compFacts.RGID) + + customFields, _ := json.Marshal(compFacts.CustomFields) + devices, _ := json.Marshal(compFacts.Devices) + userData, _ := json.Marshal(compFacts.Userdata) + // general fields setting + d.SetId(fmt.Sprintf("%d", compFacts.ID)) + d.Set("account_id", compFacts.AccountID) + d.Set("account_name", compFacts.AccountName) + d.Set("acl", flattenListACLInterface(compFacts.ACL)) + d.Set("affinity_label", compFacts.AffinityLabel) + d.Set("affinity_rules", flattenAffinityRules(compFacts.AffinityRules)) + d.Set("affinity_weight", compFacts.AffinityWeight) + d.Set("anti_affinity_rules", flattenAffinityRules(compFacts.AntiAffinityRules)) + d.Set("arch", compFacts.Arch) + d.Set("boot_order", compFacts.BootOrder) + d.Set("cd_image_id", compFacts.CdImageId) + d.Set("clone_reference", compFacts.CloneReference) + d.Set("clones", compFacts.Clones) + d.Set("computeci_id", compFacts.ComputeCIID) + d.Set("cpus", compFacts.CPUs) + d.Set("created_by", compFacts.CreatedBy) + d.Set("created_time", compFacts.CreatedTime) + d.Set("custom_fields", string(customFields)) + d.Set("deleted_by", compFacts.DeletedBy) + d.Set("deleted_time", compFacts.DeletedTime) + d.Set("desc", compFacts.Description) + d.Set("devices", string(devices)) + d.Set("disks", flattenDisk(compFacts.Disks)) + d.Set("driver", compFacts.Driver) + d.Set("gid", compFacts.GID) + d.Set("guid", compFacts.GUID) + d.Set("image_id", compFacts.ImageID) + d.Set("interfaces", flattenInterfaces(compFacts.Interfaces)) + d.Set("lock_status", compFacts.LockStatus) + d.Set("manager_id", compFacts.ManagerID) + d.Set("manager_type", compFacts.ManagerType) + d.Set("migrationjob", compFacts.MigrationJob) + d.Set("milestones", compFacts.Milestones) + d.Set("name", compFacts.Name) + d.Set("need_reboot", compFacts.NeedReboot) + d.Set("os_users", flattenOSUsers(compFacts.OSUsers)) + d.Set("pinned", compFacts.Pinned) + d.Set("ram", compFacts.RAM) + d.Set("reference_id", compFacts.ReferenceID) + d.Set("registered", compFacts.Registered) + d.Set("res_name", compFacts.ResName) + d.Set("rg_id", compFacts.RGID) + d.Set("rg_name", compFacts.RGName) + d.Set("snap_sets", flattenSnapSets(compFacts.SnapSets)) + d.Set("stack_id", compFacts.StackID) + d.Set("stack_name", compFacts.StackName) + d.Set("stateless_sep_id", compFacts.StatelessSEPID) + d.Set("stateless_sep_type", compFacts.StatelessSEPType) + d.Set("status", compFacts.Status) + d.Set("tags", flattenTags(compFacts.Tags)) + d.Set("tech_status", compFacts.TechStatus) + d.Set("updated_by", compFacts.UpdatedBy) + d.Set("updated_time", compFacts.UpdatedTime) + d.Set("user_data", string(userData)) + d.Set("user_managed", compFacts.UserManaged) + d.Set("vgpus", compFacts.VGPUs) + d.Set("virtual_image_id", compFacts.VirtualImageID) + + //extra fields setting + bootDisk := findBootDisk(compFacts.Disks) + if bootDisk != nil { + d.Set("boot_disk_size", bootDisk.SizeMax) + d.Set("boot_disk_id", bootDisk.ID) // we may need boot disk ID in resize operations + d.Set("sep_id", bootDisk.SEPID) + d.Set("pool", bootDisk.Pool) + } + + if len(compFacts.Disks) > 0 { + log.Debugf("flattenCompute: calling parseComputeDisksToExtraDisks for %d disks", len(compFacts.Disks)) + if err := d.Set("extra_disks", parseComputeDisksToExtraDisks(compFacts.Disks)); err != nil { + return err + } + } + + if len(compFacts.Interfaces) > 0 { + log.Debugf("flattenCompute: calling parseComputeInterfacesToNetworks for %d interfaces", len(compFacts.Interfaces)) + if err := d.Set("network", parseComputeInterfacesToNetworks(compFacts.Interfaces)); err != nil { + return err + } + } + + return nil +} + +// Parse list of all disks from API compute/get into a list of "extra disks" attached to this compute +// Extra disks are all compute disks but a boot disk. +func parseComputeDisksToExtraDisks(disks compute.ListDisks) []interface{} { + // this return value will be used to d.Set("extra_disks",) item of dataSourceCompute schema, + // which is a simple list of integer disk IDs excluding boot disk ID + length := len(disks) + log.Debugf("parseComputeDisksToExtraDisks: called for %d disks", length) + + if length == 0 || (length == 1 && disks[0].Type == "B") { + // the disk list is empty (which is kind of strange - diskless compute?), or + // there is only one disk in the list and it is a boot disk; + // as we skip boot disks, the result will be of 0 length anyway + return make([]interface{}, 0) + } + + result := make([]interface{}, length-1) + idx := 0 + for _, value := range disks { + if value.Type == "B" { + // skip boot disk when iterating over the list of disks + continue + } + + result[idx] = value.ID + idx++ + } + + return result +} + +// Parse the list of interfaces from compute/get response into a list of networks +// attached to this compute +func parseComputeInterfacesToNetworks(ifaces compute.ListInterfaces) []interface{} { + // return value will be used to d.Set("network") item of dataSourceCompute schema + length := len(ifaces) + log.Debugf("parseComputeInterfacesToNetworks: called for %d ifaces", length) + + result := []interface{}{} + + for _, value := range ifaces { + elem := make(map[string]interface{}) + // Keys in this map should correspond to the Schema definition for "network" + elem["net_id"] = value.NetID + elem["net_type"] = value.NetType + elem["ip_address"] = value.IPAddress + elem["mac"] = value.MAC + + result = append(result, elem) + } + + return result +} + +func flattenDisk(diskList compute.ListDisks) []map[string]interface{} { + res := make([]map[string]interface{}, 0, len(diskList)) + for _, disk := range diskList { + temp := map[string]interface{}{ + "ckey": disk.CKey, + "meta": flattens.FlattenMeta(disk.Meta), + "account_id": disk.AccountID, + "boot_partition": disk.BootPartition, + "created_time": disk.CreatedTime, + "deleted_time": disk.DeletedTime, + "desc": disk.Description, + "destruction_time": disk.DestructionTime, + "disk_path": disk.DiskPath, + "gid": disk.GID, + "guid": disk.GUID, + "disk_id": disk.ID, + "image_id": disk.ImageID, + "images": disk.Images, + "iotune": flattenIOTune(disk.IOTune), + "iqn": disk.IQN, + "login": disk.Login, + "milestones": disk.Milestones, + "name": disk.Name, + "order": disk.Order, + "params": disk.Params, + "parent_id": disk.ParentID, + "passwd": disk.Password, + "pci_slot": disk.PCISlot, + "pool": disk.Pool, + "purge_attempts": disk.PurgeAttempts, + "present_to": disk.PresentTo, + "purge_time": disk.PurgeTime, + "reality_device_number": disk.RealityDeviceNumber, + "reference_id": disk.ReferenceID, + "res_id": disk.ResID, + "res_name": disk.ResName, + "role": disk.Role, + "sep_id": disk.SEPID, + "shareable": disk.Shareable, + "size_max": disk.SizeMax, + "size_used": disk.SizeUsed, + "snapshots": flattendDiskSnapshotList(disk.Snapshots), + "status": disk.Status, + "tech_status": disk.TechStatus, + "type": disk.Type, + "vmid": disk.VMID, + } + res = append(res, temp) + } + return res +} + +func flattenIOTune(iot compute.IOTune) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + temp := map[string]interface{}{ + "read_bytes_sec": iot.ReadBytesSec, + "read_bytes_sec_max": iot.ReadBytesSecMax, + "read_iops_sec": iot.ReadIOPSSec, + "read_iops_sec_max": iot.ReadIOPSSecMax, + "size_iops_sec": iot.SizeIOPSSec, + "total_bytes_sec": iot.TotalBytesSec, + "total_bytes_sec_max": iot.TotalBytesSecMax, + "total_iops_sec": iot.TotalIOPSSec, + "total_iops_sec_max": iot.TotalIOPSSecMax, + "write_bytes_sec": iot.WriteBytesSec, + "write_bytes_sec_max": iot.WriteBytesSecMax, + "write_iops_sec": iot.WriteIOPSSec, + "write_iops_sec_max": iot.WriteIOPSSecMax, + } + + res = append(res, temp) + return res +} + +func flattendDiskSnapshotList(sl compute.ListDetailedSnapshots) []interface{} { + res := make([]interface{}, 0) + for _, snapshot := range sl { + temp := map[string]interface{}{ + "guid": snapshot.GUID, + "label": snapshot.Label, + "res_id": snapshot.ResID, + "snap_set_guid": snapshot.SnapSetGUID, + "snap_set_time": snapshot.SnapSetTime, + "timestamp": snapshot.TimeStamp, + } + res = append(res, temp) + } + + return res + +} diff --git a/internal/service/cloudbroker/kvmvm/resource_compute.go b/internal/service/cloudbroker/kvmvm/resource_compute.go index 997697b..8048b0a 100644 --- a/internal/service/cloudbroker/kvmvm/resource_compute.go +++ b/internal/service/cloudbroker/kvmvm/resource_compute.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, @@ -39,7 +39,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" log "github.com/sirupsen/logrus" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/kvmppc" @@ -47,7 +46,6 @@ import ( "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/statefuncs" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/status" ) @@ -62,10 +60,9 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf return diags } - argVal, ok := d.GetOk("description") - if ok { - createReqPPC.Description = argVal.(string) - createReqX86.Description = argVal.(string) + if desc, ok := d.GetOk("description"); ok { + createReqPPC.Description = desc.(string) + createReqX86.Description = desc.(string) } if sepID, ok := d.GetOk("sep_id"); ok { @@ -78,6 +75,16 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf createReqX86.Pool = pool.(string) } + if stackID, ok := d.GetOk("stack_id"); ok { + createReqPPC.StackID = uint64(stackID.(int)) + createReqX86.StackID = uint64(stackID.(int)) + } + + if start, ok := d.GetOk("started"); ok { + createReqPPC.Start = start.(bool) + createReqX86.Start = start.(bool) + } + if ipaType, ok := d.GetOk("ipa_type"); ok { createReqPPC.IPAType = ipaType.(string) createReqX86.IPAType = ipaType.(string) @@ -93,15 +100,15 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf createReqX86.IS = IS.(string) } - if !d.Get("with_default_vins").(bool) { - createReqX86.Interfaces = make([]kvmx86.Interface, 0) - } + createReqX86.Interfaces = make([]kvmx86.Interface, 0) + createReqPPC.Interfaces = make([]kvmppc.Interface, 0) if networks, ok := d.GetOk("network"); ok { if networks.(*schema.Set).Len() > 0 { ns := networks.(*schema.Set).List() - interfaces := make([]kvmx86.Interface, 0) + interfacesX86 := make([]kvmx86.Interface, 0) + interfacesPPC := make([]kvmppc.Interface, 0) for _, elem := range ns { netInterfaceVal := elem.(map[string]interface{}) reqInterface := kvmx86.Interface{ @@ -114,16 +121,31 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf reqInterface.IPAddr = ipaddr.(string) } - interfaces = append(interfaces, reqInterface) + interfacesX86 = append(interfacesX86, reqInterface) } + createReqX86.Interfaces = interfacesX86 + + for _, elem := range ns { + netInterfaceVal := elem.(map[string]interface{}) + reqInterface := kvmppc.Interface{ + NetType: netInterfaceVal["net_type"].(string), + NetID: uint64(netInterfaceVal["net_id"].(int)), + } + + ipaddr, ipSet := netInterfaceVal["ip_address"] + if ipSet { + reqInterface.IPAddr = ipaddr.(string) + } + + interfacesPPC = append(interfacesPPC, reqInterface) + } + createReqPPC.Interfaces = interfacesPPC - createReqX86.Interfaces = interfaces } } - argVal, ok = d.GetOk("cloud_init") - if ok { - userdata := argVal.(string) + if cloudInit, ok := d.GetOk("cloud_init"); ok { + userdata := cloudInit.(string) if userdata != "" && userdata != "applied" { createReqPPC.Userdata = strings.TrimSpace(userdata) createReqX86.Userdata = strings.TrimSpace(userdata) @@ -154,7 +176,7 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf createReqX86.RAM = uint64(d.Get("ram").(int)) createReqX86.ImageID = uint64(d.Get("image_id").(int)) - // createReqX86.Driver = driver + createReqX86.Driver = driver if custom_fields, ok := d.GetOk("custom_fields"); ok { val := custom_fields.(string) @@ -186,7 +208,9 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf Permanently: true, DetachDisks: true, } - + if reason, ok := d.Get("reason").(string); ok { + req.Reason = reason + } if _, err := c.CloudBroker().Compute().Delete(ctx, req); err != nil { log.Errorf("resourceComputeCreate: could not delete compute after failed creation: %v", err) } @@ -197,9 +221,9 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf log.Debugf("resourceComputeCreate: new simple Compute ID %d, name %s created", computeId, d.Get("name").(string)) - argVal, ok = d.GetOk("extra_disks") - if ok && argVal.(*schema.Set).Len() > 0 { - log.Debugf("resourceComputeCreate: calling utilityComputeExtraDisksConfigure to attach %d extra disk(s)", argVal.(*schema.Set).Len()) + extraDisks, ok := d.GetOk("extra_disks") + if ok && extraDisks.(*schema.Set).Len() > 0 { + log.Debugf("resourceComputeCreate: calling utilityComputeExtraDisksConfigure to attach %d extra disk(s)", extraDisks.(*schema.Set).Len()) err := utilityComputeExtraDisksConfigure(ctx, d, m, false) if err != nil { log.Errorf("resourceComputeCreate: error when attaching extra disk(s) to a new Compute ID %d: %v", computeId, err) @@ -208,31 +232,37 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf } } - if d.Get("started").(bool) { - req := compute.StartRequest{ComputeID: computeId} - log.Debugf("resourceComputeCreate: starting Compute ID %d after completing its resource configuration", computeId) - if _, err := c.CloudBroker().Compute().Start(ctx, req); err != nil { - warnings.Add(err) + if !cleanup { + if enabled, ok := d.GetOk("enabled"); ok { + if enabled.(bool) { + req := compute.EnableRequest{ComputeID: computeId} + log.Debugf("resourceComputeCreate: enable=%t Compute ID %d after completing its resource configuration", enabled, computeId) + if _, err := c.CloudBroker().Compute().Enable(ctx, req); err != nil { + warnings.Add(err) + } + } else { + req := compute.DisableRequest{ComputeID: computeId} + log.Debugf("resourceComputeCreate: enable=%t Compute ID %d after completing its resource configuration", enabled, computeId) + if _, err := c.CloudBroker().Compute().Disable(ctx, req); err != nil { + warnings.Add(err) + } + } } - } - if enabled, ok := d.GetOk("enabled"); ok { - if enabled.(bool) { - req := compute.EnableRequest{ComputeID: computeId} - log.Debugf("resourceComputeCreate: enable=%t Compute ID %d after completing its resource configuration", enabled, computeId) - if _, err := c.CloudBroker().Compute().Enable(ctx, req); err != nil { + if start, ok := d.GetOk("started"); ok && start.(bool) { + req := compute.StartRequest{ComputeID: computeId} + log.Debugf("resourceComputeCreate: starting Compute ID %d after completing its resource configuration", computeId) + if _, err := c.CloudBroker().Compute().Start(ctx, req); err != nil { warnings.Add(err) } - } else { - req := compute.DisableRequest{ComputeID: computeId} - log.Debugf("resourceComputeCreate: enable=%t Compute ID %d after completing its resource configuration", enabled, computeId) - if _, err := c.CloudBroker().Compute().Disable(ctx, req); err != nil { + } else if ok && !start.(bool) { + req := compute.StopRequest{ComputeID: computeId} + log.Debugf("resourceComputeCreate: stoping Compute ID %d after completing its resource configuration", computeId) + if _, err := c.CloudBroker().Compute().Stop(ctx, req); err != nil { warnings.Add(err) } } - } - if !cleanup { if affinityLabel, ok := d.GetOk("affinity_label"); ok { req := compute.AffinityLabelSetRequest{ ComputeIDs: []uint64{ @@ -257,9 +287,11 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf ComputeID: computeId, DiskName: diskConv["disk_name"].(string), Size: uint64(diskConv["size"].(int)), - SepID: uint64(diskConv["sep_id"].(int)), } + if uint64(diskConv["sep_id"].(int)) != 0 { + req.SepID = uint64(diskConv["sep_id"].(int)) + } if diskConv["disk_type"].(string) != "" { req.DiskType = diskConv["disk_type"].(string) } @@ -327,123 +359,135 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf } } } - } + if tags, ok := d.GetOk("tags"); ok { + log.Debugf("resourceComputeCreate: Create tags on ComputeID: %d", computeId) + addedTags := tags.(*schema.Set).List() + if len(addedTags) > 0 { + for _, tagInterface := range addedTags { + tagItem := tagInterface.(map[string]interface{}) + req := compute.TagAddRequest{ + ComputeIDs: []uint64{computeId}, + Key: tagItem["key"].(string), + Value: tagItem["value"].(string), + } - if tags, ok := d.GetOk("tags"); ok { - log.Debugf("resourceComputeCreate: Create tags on ComputeID: %d", computeId) - addedTags := tags.(*schema.Set).List() - if len(addedTags) > 0 { - for _, tagInterface := range addedTags { - tagItem := tagInterface.(map[string]interface{}) - req := compute.TagAddRequest{ - ComputeIDs: []uint64{computeId}, - Key: tagItem["key"].(string), - Value: tagItem["value"].(string), + _, err := c.CloudBroker().Compute().TagAdd(ctx, req) + if err != nil { + warnings.Add(err) + } } + } + } - _, err := c.CloudBroker().Compute().TagAdd(ctx, req) - if err != nil { - warnings.Add(err) + if pfws, ok := d.GetOk("port_forwarding"); ok { + log.Debugf("resourceComputeCreate: Create port farwarding on ComputeID: %d", computeId) + addedPfws := pfws.(*schema.Set).List() + if len(addedPfws) > 0 { + for _, pfwInterface := range addedPfws { + pfwItem := pfwInterface.(map[string]interface{}) + req := compute.PFWAddRequest{ + ComputeID: computeId, + PublicPortStart: uint64(pfwItem["public_port_start"].(int)), + LocalBasePort: uint64(pfwItem["local_port"].(int)), + Proto: pfwItem["proto"].(string), + } + if int64(pfwItem["public_port_end"].(int)) != 0 { + req.PublicPortEnd = int64(pfwItem["public_port_end"].(int)) + } + if pfwItem["reason"].(string) != "" { + req.Reason = pfwItem["reason"].(string) + } + pwfId, err := c.CloudBroker().Compute().PFWAdd(ctx, req) + if err != nil { + warnings.Add(err) + } + d.Set("rule_id", pwfId) } } } - } - if pfws, ok := d.GetOk("port_forwarding"); ok { - log.Debugf("resourceComputeCreate: Create port farwarding on ComputeID: %d", computeId) - addedPfws := pfws.(*schema.Set).List() - if len(addedPfws) > 0 { - for _, pfwInterface := range addedPfws { - pfwItem := pfwInterface.(map[string]interface{}) - req := compute.PFWAddRequest{ - ComputeID: computeId, - PublicPortStart: uint64(pfwItem["public_port_start"].(int)), - PublicPortEnd: int64(pfwItem["public_port_end"].(int)), - LocalBasePort: uint64(pfwItem["local_port"].(int)), - Proto: pfwItem["proto"].(string), - } + if userAcess, ok := d.GetOk("user_access"); ok { + log.Debugf("resourceComputeCreate: Create user access on ComputeID: %d", computeId) + usersAcess := userAcess.(*schema.Set).List() + if len(usersAcess) > 0 { + for _, userAcessInterface := range usersAcess { + userAccessItem := userAcessInterface.(map[string]interface{}) + req := compute.UserGrantRequest{ + ComputeID: computeId, + Username: userAccessItem["username"].(string), + AccessType: userAccessItem["access_type"].(string), + } - _, err := c.CloudBroker().Compute().PFWAdd(ctx, req) - if err != nil { - warnings.Add(err) + _, err := c.CloudBroker().Compute().UserGrant(ctx, req) + if err != nil { + warnings.Add(err) + } } } } - } - if userAcess, ok := d.GetOk("user_access"); ok { - log.Debugf("resourceComputeCreate: Create user access on ComputeID: %d", computeId) - usersAcess := userAcess.(*schema.Set).List() - if len(usersAcess) > 0 { - for _, userAcessInterface := range usersAcess { - userAccessItem := userAcessInterface.(map[string]interface{}) - req := compute.UserGrantRequest{ - ComputeID: computeId, - Username: userAccessItem["username"].(string), - AccessType: userAccessItem["access_type"].(string), - } + if snapshotList, ok := d.GetOk("snapshot"); ok { + log.Debugf("resourceComputeCreate: Create snapshot on ComputeID: %d", computeId) + snapshots := snapshotList.(*schema.Set).List() + if len(snapshots) > 0 { + for _, snapshotInterface := range snapshots { + snapshotItem := snapshotInterface.(map[string]interface{}) + req := compute.SnapshotCreateRequest{ + ComputeID: computeId, + Label: snapshotItem["label"].(string), + } - _, err := c.CloudBroker().Compute().UserGrant(ctx, req) - if err != nil { - warnings.Add(err) + _, err := c.CloudBroker().Compute().SnapshotCreate(ctx, req) + if err != nil { + warnings.Add(err) + } } } } - } - if snapshotList, ok := d.GetOk("snapshot"); ok { - log.Debugf("resourceComputeCreate: Create snapshot on ComputeID: %d", computeId) - snapshots := snapshotList.(*schema.Set).List() - if len(snapshots) > 0 { - for _, snapshotInterface := range snapshots { - snapshotItem := snapshotInterface.(map[string]interface{}) - req := compute.SnapshotCreateRequest{ + if cdtList, ok := d.GetOk("cd"); ok { + log.Debugf("resourceComputeCreate: Create cd on ComputeID: %d", computeId) + cds := cdtList.(*schema.Set).List() + if len(cds) > 0 { + snapshotItem := cds[0].(map[string]interface{}) + req := compute.CDInsertRequest{ ComputeID: computeId, - Label: snapshotItem["label"].(string), + CDROMID: uint64(snapshotItem["cdrom_id"].(int)), } - - _, err := c.CloudBroker().Compute().SnapshotCreate(ctx, req) + if snapshotItem["reason"].(string) != "" { + req.Reason = snapshotItem["reason"].(string) + } + _, err := c.CloudBroker().Compute().CDInsert(ctx, req) if err != nil { warnings.Add(err) } } } - } - if cdtList, ok := d.GetOk("cd"); ok { - log.Debugf("resourceComputeCreate: Create cd on ComputeID: %d", computeId) - cds := cdtList.(*schema.Set).List() - if len(cds) > 0 { - snapshotItem := cds[0].(map[string]interface{}) - req := compute.CDInsertRequest{ - ComputeID: computeId, - CDROMID: uint64(snapshotItem["cdrom_id"].(int)), + if pin, ok := d.GetOk("pin_to_stack"); ok && pin.(bool) { + req := compute.PinToStackRequest{ + ComputeID: computeId, + TargetStackID: uint64(d.Get("target_stack_id").(int)), + } + + if force, ok := d.Get("force_pin").(bool); ok { + req.Force = force } - _, err := c.CloudBroker().Compute().CDInsert(ctx, req) + _, err := c.CloudBroker().Compute().PinToStack(ctx, req) if err != nil { warnings.Add(err) } } - } - if d.Get("pin_to_stack").(bool) { - req := compute.PinToStackRequest{ - ComputeID: computeId, - } - _, err := c.CloudBroker().Compute().PinToStack(ctx, req) - if err != nil { - warnings.Add(err) - } - } - - if d.Get("pause").(bool) { - req := compute.PauseRequest{ - ComputeID: computeId, - } - _, err := c.CloudBroker().Compute().Pause(ctx, req) - if err != nil { - warnings.Add(err) + if d.Get("pause").(bool) { + req := compute.PauseRequest{ + ComputeID: computeId, + } + _, err := c.CloudBroker().Compute().Pause(ctx, req) + if err != nil { + warnings.Add(err) + } } } @@ -453,19 +497,19 @@ func resourceComputeCreate(ctx context.Context, d *schema.ResourceData, m interf // between Compute resource and Compute data source schemas // Compute read function will also update resource ID on success, so that Terraform // will know the resource exists - diags := resourceComputeRead(ctx, d, m) - return append(diags, warnings.Get()...) + return append(resourceComputeRead(ctx, d, m), warnings.Get()...) } func resourceComputeRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { log.Debugf("resourceComputeRead: called for Compute name %s, RG ID %d", d.Get("name").(string), d.Get("rg_id").(int)) - c := m.(*controller.ControllerCfg) + // c := m.(*controller.ControllerCfg) computeRec, err := utilityComputeCheckPresence(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } @@ -473,23 +517,24 @@ func resourceComputeRead(ctx context.Context, d *schema.ResourceData, m interfac switch computeRec.Status { case status.Deleted: - restoreReq := compute.RestoreRequest{ComputeID: computeRec.ID} - enableReq := compute.EnableRequest{ComputeID: computeRec.ID} + // restoreReq := compute.RestoreRequest{ComputeID: computeRec.ID} + // enableReq := compute.EnableRequest{ComputeID: computeRec.ID} - _, err := c.CloudBroker().Compute().Restore(ctx, restoreReq) - if err != nil { - return diag.FromErr(err) - } + // _, err := c.CloudBroker().Compute().Restore(ctx, restoreReq) + // if err != nil { + // return diag.FromErr(err) + // } - _, err = c.CloudBroker().Compute().Enable(ctx, enableReq) - if err != nil { - return diag.FromErr(err) - } + // _, err = c.CloudBroker().Compute().Enable(ctx, enableReq) + // if err != nil { + // return diag.FromErr(err) + // } - hasChanged = true + // hasChanged = true case status.Destroyed: d.SetId("") - return resourceComputeCreate(ctx, d, m) + return diag.Errorf("The resource cannot be read because it has been destroyed") + // return resourceComputeCreate(ctx, d, m) case status.Disabled: log.Debugf("The compute is in status: %s, troubles may occur with update. Please, enable compute first.", computeRec.Status) case status.Redeploying: @@ -535,44 +580,14 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf return diag.FromErr(err) } - if d.HasChange("enabled") { - enabled := d.Get("enabled").(bool) - if enabled { - req := compute.EnableRequest{ - ComputeID: computeRec.ID, - } - - if _, err := c.CloudBroker().Compute().Enable(ctx, req); err != nil { - return diag.FromErr(err) - } - } else { - req := compute.DisableRequest{ - ComputeID: computeRec.ID, - } - - if _, err := c.CloudBroker().Compute().Disable(ctx, req); err != nil { - return diag.FromErr(err) - } - } - log.Debugf("resourceComputeUpdate: enable=%s Compute ID %v after completing its resource configuration", d.Id(), enabled) - } - // check compute statuses switch computeRec.Status { case status.Deleted: - restoreReq := compute.RestoreRequest{ComputeID: computeRec.ID} - enableReq := compute.EnableRequest{ComputeID: computeRec.ID} - - _, err := c.CloudBroker().Compute().Restore(ctx, restoreReq) - if err != nil { - return diag.FromErr(err) - } - - _, err = c.CloudBroker().Compute().Enable(ctx, enableReq) - if err != nil { - return diag.FromErr(err) + if restore, ok := d.GetOk("restore"); ok && restore.(bool) { + if err := utilityComputeRestore(ctx, d, m); err != nil { + return diag.FromErr(err) + } } - case status.Destroyed: d.SetId("") return resourceComputeCreate(ctx, d, m) @@ -585,67 +600,30 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf case status.Modeled: return diag.Errorf("The compute is in status: %s, please, contant the support for more information", computeRec.Status) } - - doUpdate := false - resizeReq := compute.ResizeRequest{ - ComputeID: computeRec.ID, - Force: true, - } - warnings := dc.Warnings{} - oldCpu, newCpu := d.GetChange("cpu") - if oldCpu.(int) != newCpu.(int) { - resizeReq.CPU = uint64(newCpu.(int)) - doUpdate = true - } else { - resizeReq.CPU = 0 - } - - oldRam, newRam := d.GetChange("ram") - if oldRam.(int) != newRam.(int) { - resizeReq.RAM = uint64(newRam.(int)) - doUpdate = true - } else { - resizeReq.RAM = 0 + if d.HasChange("enabled") { + if err := utilityComputeEnabled(ctx, d, m); err != nil { + return diag.FromErr(err) + } } - if doUpdate { - log.Debugf("resourceComputeUpdate: changing CPU %d -> %d and/or RAM %d -> %d", - oldCpu.(int), newCpu.(int), - oldRam.(int), newRam.(int)) - _, err := c.CloudBroker().Compute().Resize(ctx, resizeReq) - if err != nil { + if d.HasChange("started") { + if err := utilityComputeStarted(ctx, d, m); err != nil { return diag.FromErr(err) } } - oldSize, newSize := d.GetChange("boot_disk_size") - if oldSize.(int) < newSize.(int) { - req := compute.DiskResizeRequest{ComputeID: computeRec.ID} - if diskId, ok := d.GetOk("boot_disk_id"); ok { - req.DiskID = uint64(diskId.(int)) - - } else { - bootDisk, err := utilityComputeBootDiskCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - - req.DiskID = bootDisk.ID + if d.HasChanges("cpu", "ram") { + if err := utilityComputeResize(ctx, d, m); err != nil { + return diag.FromErr(err) } - req.Size = uint64(newSize.(int)) - - log.Debugf("resourceComputeUpdate: compute ID %s, boot disk ID %d resize %d -> %d", - d.Id(), d.Get("boot_disk_id").(int), oldSize.(int), newSize.(int)) + } - _, err := c.CloudBroker().Compute().DiskResize(ctx, req) - if err != nil { + if d.HasChange("boot_disk_size") { + if err := utilityComputeBootDiskResize(ctx, d, m); err != nil { return diag.FromErr(err) } - - } else if oldSize.(int) > newSize.(int) { - log.Warnf("resourceComputeUpdate: compute ID %s - shrinking boot disk is not allowed", d.Id()) } if d.HasChange("extra_disks") { @@ -662,584 +640,103 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf } } - if d.HasChange("description") || d.HasChange("name") { - req := compute.UpdateRequest{ - ComputeID: computeRec.ID, - Name: d.Get("name").(string), - } - - if desc, ok := d.GetOk("desc"); ok { - req.Description = desc.(string) - } - - if _, err := c.CloudBroker().Compute().Update(ctx, req); err != nil { + if d.HasChanges("description", "name") { + if err := utilityComputeUpdate(ctx, d, m); err != nil { return diag.FromErr(err) } } if d.HasChange("disks") { - deletedDisks := make([]interface{}, 0) - addedDisks := make([]interface{}, 0) - updatedDisks := make([]interface{}, 0) - - oldDisks, newDisks := d.GetChange("disks") - oldConv := oldDisks.([]interface{}) - newConv := newDisks.([]interface{}) - - for _, el := range oldConv { - if !isContainsDisk(newConv, el) { - deletedDisks = append(deletedDisks, el) - } - } - - for _, el := range newConv { - if !isContainsDisk(oldConv, el) { - addedDisks = append(addedDisks, el) - } else { - if isChangeDisk(oldConv, el) { - updatedDisks = append(updatedDisks, el) - } - } - } - - if len(deletedDisks) > 0 { - stopReq := compute.StopRequest{ - ComputeID: computeRec.ID, - Force: false, - } - - _, err := c.CloudBroker().Compute().Stop(ctx, stopReq) - if err != nil { - return diag.FromErr(err) - } - - for _, disk := range deletedDisks { - diskConv := disk.(map[string]interface{}) - if diskConv["disk_type"].(string) == "B" { - continue - } - - req := compute.DiskDelRequest{ - ComputeID: computeRec.ID, - DiskID: uint64(diskConv["disk_id"].(int)), - Permanently: diskConv["permanently"].(bool), - } - - _, err := c.CloudBroker().Compute().DiskDel(ctx, req) - if err != nil { - return diag.FromErr(err) - } - } - req := compute.StartRequest{ - ComputeID: computeRec.ID, - AltBootID: 0, - } - _, err = c.CloudBroker().Compute().Start(ctx, req) - if err != nil { - return diag.FromErr(err) - } - } - - if len(addedDisks) > 0 { - for _, disk := range addedDisks { - diskConv := disk.(map[string]interface{}) - if diskConv["disk_type"].(string) == "B" { - continue - } - req := compute.DiskAddRequest{ - ComputeID: computeRec.ID, - DiskName: diskConv["disk_name"].(string), - Size: uint64(diskConv["size"].(int)), - SepID: uint64(diskConv["sep_id"].(int)), - } - - if diskConv["disk_type"].(string) != "" { - req.DiskType = diskConv["disk_type"].(string) - } - if diskConv["pool"].(string) != "" { - req.Pool = diskConv["pool"].(string) - } - if diskConv["desc"].(string) != "" { - req.Description = diskConv["desc"].(string) - } - if diskConv["image_id"].(int) != 0 { - req.ImageID = uint64(diskConv["image_id"].(int)) - } - _, err := c.CloudBroker().Compute().DiskAdd(ctx, req) - if err != nil { - return diag.FromErr(err) - } - } - } - - if len(updatedDisks) > 0 { - for _, disk := range updatedDisks { - diskConv := disk.(map[string]interface{}) - if diskConv["disk_type"].(string) == "B" { - continue - } - req := compute.DiskResizeRequest{ - ComputeID: computeRec.ID, - DiskID: uint64(diskConv["disk_id"].(int)), - Size: uint64(diskConv["size"].(int)), - } - - _, err := c.CloudBroker().Compute().DiskResize(ctx, req) - if err != nil { - return diag.FromErr(err) - } - } - } - } - - if d.HasChange("started") { - if d.Get("started").(bool) { - req := compute.StartRequest{ - ComputeID: computeRec.ID, - } - if _, err := c.CloudBroker().Compute().Start(ctx, req); err != nil { - return diag.FromErr(err) - } - } else { - req := compute.StopRequest{ - ComputeID: computeRec.ID, - } - if _, err := c.CloudBroker().Compute().Stop(ctx, req); err != nil { - return diag.FromErr(err) - } + if err := utilityComputeUpdateDisks(ctx, d, m); err != nil { + return diag.FromErr(err) } } if d.HasChange("affinity_label") { - affinityLabel := d.Get("affinity_label").(string) - if affinityLabel == "" { - req := compute.AffinityLabelRemoveRequest{ - ComputeIDs: []uint64{computeRec.ID}, - } - - _, err := c.CloudBroker().Compute().AffinityLabelRemove(ctx, req) - if err != nil { - return diag.FromErr(err) - } - } - req := compute.AffinityLabelSetRequest{ - ComputeIDs: []uint64{computeRec.ID}, - AffinityLabel: affinityLabel, - } - - _, err := c.CloudBroker().Compute().AffinityLabelSet(ctx, req) - if err != nil { + if err := utilityComputeUpdateAffinityLabel(ctx, d, m); err != nil { return diag.FromErr(err) } } if d.HasChange("affinity_rules") { - deletedAR := make([]interface{}, 0) - addedAR := make([]interface{}, 0) - - oldAR, newAR := d.GetChange("affinity_rules") - oldConv := oldAR.([]interface{}) - newConv := newAR.([]interface{}) - - if len(newConv) == 0 { - req := compute.AffinityRulesClearRequest{ - ComputeIDs: []uint64{computeRec.ID}, - } - - _, err := c.CloudBroker().Compute().AffinityRulesClear(ctx, req) - if err != nil { - return diag.FromErr(err) - } - } else { - for _, el := range oldConv { - if !isContainsAR(newConv, el) { - deletedAR = append(deletedAR, el) - } - } - for _, el := range newConv { - if !isContainsAR(oldConv, el) { - addedAR = append(addedAR, el) - } - } - - if len(deletedAR) > 0 { - for _, ar := range deletedAR { - arConv := ar.(map[string]interface{}) - req := compute.AffinityRuleRemoveRequest{ - ComputeIDs: []uint64{computeRec.ID}, - Topology: arConv["topology"].(string), - Policy: arConv["policy"].(string), - Mode: arConv["mode"].(string), - Key: arConv["key"].(string), - Value: arConv["value"].(string), - } - - _, err := c.CloudBroker().Compute().AffinityRuleRemove(ctx, req) - if err != nil { - return diag.FromErr(err) - } - } - } - if len(addedAR) > 0 { - for _, ar := range addedAR { - arConv := ar.(map[string]interface{}) - req := compute.AffinityRuleAddRequest{ - ComputeIDs: []uint64{computeRec.ID}, - Topology: arConv["topology"].(string), - Policy: arConv["policy"].(string), - Mode: arConv["mode"].(string), - Key: arConv["key"].(string), - Value: arConv["value"].(string), - } - - _, err := c.CloudBroker().Compute().AffinityRuleAdd(ctx, req) - if err != nil { - return diag.FromErr(err) - } - } - } + if err := utilityComputeUpdateAffinityRules(ctx, d, m); err != nil { + return diag.FromErr(err) } } - if d.HasChange("tags") { - oldSet, newSet := d.GetChange("tags") - deletedTags := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List() - if len(deletedTags) > 0 { - for _, tagInterface := range deletedTags { - tagItem := tagInterface.(map[string]interface{}) - req := compute.TagRemoveRequest{ - ComputeIDs: []uint64{computeRec.ID}, - Key: tagItem["key"].(string), - } - - _, err := c.CloudBroker().Compute().TagRemove(ctx, req) - if err != nil { - return diag.FromErr(err) - } - } + if d.HasChange("anti_affinity_rules") { + if err := utilityComputeUpdateAntiAffinityRules(ctx, d, m); err != nil { + return diag.FromErr(err) } + } - addedTags := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List() - if len(addedTags) > 0 { - for _, tagInterface := range addedTags { - tagItem := tagInterface.(map[string]interface{}) - req := compute.TagAddRequest{ - ComputeIDs: []uint64{computeRec.ID}, - Key: tagItem["key"].(string), - Value: tagItem["value"].(string), - } - - _, err := c.CloudBroker().Compute().TagAdd(ctx, req) - if err != nil { - return diag.FromErr(err) - } - } + if d.HasChange("tags") { + if err := utilityComputeUpdateTags(ctx, d, m); err != nil { + return diag.FromErr(err) } } if d.HasChange("port_forwarding") { - oldSet, newSet := d.GetChange("port_forwarding") - deletedPfws := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List() - if len(deletedPfws) > 0 { - for _, pfwInterface := range deletedPfws { - pfwItem := pfwInterface.(map[string]interface{}) - req := compute.PFWDelRequest{ - ComputeID: computeRec.ID, - PublicPortStart: uint64(pfwItem["public_port_start"].(int)), - LocalBasePort: uint64(pfwItem["local_port"].(int)), - Proto: pfwItem["proto"].(string), - } - - if pfwItem["public_port_end"].(int) == -1 { - req.PublicPortEnd = req.PublicPortStart - } else { - req.PublicPortEnd = uint64(pfwItem["public_port_end"].(int)) - } - - _, err := c.CloudBroker().Compute().PFWDel(ctx, req) - if err != nil { - warnings.Add(err) - } - } - } - - addedPfws := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List() - if len(addedPfws) > 0 { - for _, pfwInterface := range addedPfws { - pfwItem := pfwInterface.(map[string]interface{}) - req := compute.PFWAddRequest{ - ComputeID: computeRec.ID, - PublicPortStart: uint64(pfwItem["public_port_start"].(int)), - PublicPortEnd: int64(pfwItem["public_port_end"].(int)), - LocalBasePort: uint64(pfwItem["local_port"].(int)), - Proto: pfwItem["proto"].(string), - } - - _, err := c.CloudBroker().Compute().PFWAdd(ctx, req) - if err != nil { - return diag.FromErr(err) - } - } + if err := utilityComputeUpdatePFW(ctx, d, m); err != nil { + return diag.FromErr(err) } } if d.HasChange("user_access") { - oldSet, newSet := d.GetChange("user_access") - deletedUserAcess := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List() - if len(deletedUserAcess) > 0 { - for _, userAcessInterface := range deletedUserAcess { - userAccessItem := userAcessInterface.(map[string]interface{}) - req := compute.UserRevokeRequest{ - ComputeID: computeRec.ID, - Username: userAccessItem["username"].(string), - } - - _, err := c.CloudBroker().Compute().UserRevoke(ctx, req) - if err != nil { - return diag.FromErr(err) - } - } - } - - addedUserAccess := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List() - if len(addedUserAccess) > 0 { - for _, userAccessInterface := range addedUserAccess { - userAccessItem := userAccessInterface.(map[string]interface{}) - req := compute.UserGrantRequest{ - ComputeID: computeRec.ID, - Username: userAccessItem["username"].(string), - AccessType: userAccessItem["access_type"].(string), - } - - _, err := c.CloudBroker().Compute().UserGrant(ctx, req) - if err != nil { - return diag.FromErr(err) - } - } + if err := utilityComputeUpdateUserAccess(ctx, d, m); err != nil { + return diag.FromErr(err) } } if d.HasChange("snapshot") { - oldSet, newSet := d.GetChange("snapshot") - deletedSnapshots := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List() - if len(deletedSnapshots) > 0 { - for _, snapshotInterface := range deletedSnapshots { - snapshotItem := snapshotInterface.(map[string]interface{}) - req := compute.SnapshotDeleteRequest{ - ComputeID: computeRec.ID, - Label: snapshotItem["label"].(string), - } - - _, err := c.CloudBroker().Compute().SnapshotDelete(ctx, req) - if err != nil { - return diag.FromErr(err) - } - } - } - - addedSnapshots := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List() - if len(addedSnapshots) > 0 { - for _, snapshotInterface := range addedSnapshots { - snapshotItem := snapshotInterface.(map[string]interface{}) - req := compute.SnapshotCreateRequest{ - ComputeID: computeRec.ID, - Label: snapshotItem["label"].(string), - } - - _, err := c.CloudBroker().Compute().SnapshotCreate(ctx, req) - if err != nil { - return diag.FromErr(err) - } - } + if err := utilityComputeUpdateSnapshot(ctx, d, m); err != nil { + return diag.FromErr(err) } } if d.HasChange("rollback") { - if rollback, ok := d.GetOk("rollback"); ok { - req := compute.StopRequest{ - ComputeID: computeRec.ID, - Force: false, - } - - _, err := c.CloudBroker().Compute().Stop(ctx, req) - if err != nil { - return diag.FromErr(err) - } - - rollbackInterface := rollback.(*schema.Set).List()[0] - rollbackItem := rollbackInterface.(map[string]interface{}) - - rollbackReq := compute.SnapshotRollbackRequest{ - ComputeID: computeRec.ID, - Label: rollbackItem["label"].(string), - } - - _, err = c.CloudBroker().Compute().SnapshotRollback(ctx, rollbackReq) - if err != nil { - return diag.FromErr(err) - } + if err := utilityComputeRollback(ctx, d, m); err != nil { + return diag.FromErr(err) } } if d.HasChange("cd") { - oldSet, newSet := d.GetChange("cd") - deletedCd := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List() - if len(deletedCd) > 0 { - req := compute.CDEjectRequest{ - ComputeID: computeRec.ID, - } - - _, err := c.CloudBroker().Compute().CDEject(ctx, req) - if err != nil { - return diag.FromErr(err) - } - } - - addedCd := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List() - if len(addedCd) > 0 { - cdItem := addedCd[0].(map[string]interface{}) - req := compute.CDInsertRequest{ - ComputeID: computeRec.ID, - CDROMID: uint64(cdItem["cdrom_id"].(int)), - } - - _, err := c.CloudBroker().Compute().CDInsert(ctx, req) - if err != nil { - return diag.FromErr(err) - } + if err := utilityComputeUpdateCD(ctx, d, m); err != nil { + return diag.FromErr(err) } } if d.HasChange("pin_to_stack") { - oldPin, newPin := d.GetChange("pin_to_stack") - if oldPin.(bool) == true && newPin.(bool) == false { - req := compute.UnpinFromStackRequest{ - ComputeID: computeRec.ID, - } - - _, err := c.CloudBroker().Compute().UnpinFromStack(ctx, req) - if err != nil { - return diag.FromErr(err) - } - } - if oldPin.(bool) == false && newPin.(bool) == true { - req := compute.PinToStackRequest{ - ComputeID: computeRec.ID, - } - - _, err := c.CloudBroker().Compute().PinToStack(ctx, req) - if err != nil { - return diag.FromErr(err) - } + if err := utilityComputePinToStack(ctx, d, m); err != nil { + return diag.FromErr(err) } } if d.HasChange("pause") { - oldPause, newPause := d.GetChange("pause") - if oldPause.(bool) == true && newPause.(bool) == false { - req := compute.ResumeRequest{ - ComputeID: computeRec.ID, - } - _, err := c.CloudBroker().Compute().Resume(ctx, req) - if err != nil { - return diag.FromErr(err) - } - } - if oldPause.(bool) == false && newPause.(bool) == true { - req := compute.PauseRequest{ - ComputeID: computeRec.ID, - } - - _, err := c.CloudBroker().Compute().Pause(ctx, req) - if err != nil { - return diag.FromErr(err) - } + if err := utilityComputePause(ctx, d, m); err != nil { + return diag.FromErr(err) } } if d.HasChange("reset") { - oldReset, newReset := d.GetChange("reset") - if oldReset.(bool) == false && newReset.(bool) == true { - req := compute.ResetRequest{ - ComputeID: computeRec.ID, - } - _, err := c.CloudBroker().Compute().Reset(ctx, req) - if err != nil { - return diag.FromErr(err) - } + if err := utilityComputeReset(ctx, d, m); err != nil { + return diag.FromErr(err) } } if d.HasChange("image_id") { - oldImage, newImage := d.GetChange("image_id") - stopReq := compute.StopRequest{ - ComputeID: computeRec.ID, - Force: false, - } - - _, err := c.CloudBroker().Compute().Stop(ctx, stopReq) - if err != nil { + if err := utilityComputeUpdateImage(ctx, d, m); err != nil { return diag.FromErr(err) } + } - if oldImage.(int) != newImage.(int) { - req := compute.RedeployRequest{ - ComputeID: computeRec.ID, - ImageID: uint64(newImage.(int)), - } - - if diskSize, ok := d.GetOk("boot_disk_size"); ok { - req.DiskSize = uint64(diskSize.(int)) - } - if dataDisks, ok := d.GetOk("data_disks"); ok { - req.DataDisks = dataDisks.(string) - } - if autoStart, ok := d.GetOk("auto_start"); ok { - req.AutoStart = autoStart.(bool) - } - if forceStop, ok := d.GetOk("force_stop"); ok { - req.ForceStop = forceStop.(bool) - } - - _, err := c.CloudBroker().Compute().Redeploy(ctx, req) - if err != nil { - return diag.FromErr(err) - } + if d.HasChange("custom_fields") { + if err := utilityComputeUpdateCustomFields(ctx, d, m); err != nil { + return diag.FromErr(err) } } - // if d.HasChange("custom_fields") { - // val := d.Get("custom_fields").(string) - // val = strings.ReplaceAll(val, "\\", "") - // val = strings.ReplaceAll(val, "\n", "") - // val = strings.ReplaceAll(val, "\t", "") - // val = strings.TrimSpace(val) - - // if len(val) > 0 { - // req := compute.SetCustomFieldsRequest{ - // ComputeID: computeRec.ID, - // CustomFields: val, - // } - - // _, err := c.CloudBroker().Compute().SetCustomFields(ctx, req) - // if err != nil { - // return diag.FromErr(err) - // } - // } else { - // req := compute.DeleteCustomFieldsRequest{ - // ComputeID: computeRec.ID, - // } - - // _, err := c.CloudBroker().Compute().DeleteCustomFields(ctx, req) - // if err != nil { - // return diag.FromErr(err) - // } - // } - // } todo: uncomment when sdk updates - - diags := resourceComputeRead(ctx, d, m) - - return append(diags, warnings.Get()...) + return append(resourceComputeRead(ctx, d, m), warnings.Get()...) } func resourceComputeDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { @@ -1262,6 +759,8 @@ func resourceComputeDelete(ctx context.Context, d *schema.ResourceData, m interf return diag.FromErr(err) } + d.SetId("") + return nil } @@ -1289,843 +788,3 @@ func ResourceCompute() *schema.Resource { Schema: resourceComputeSchemaMake(), } } - -func resourceComputeSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: "Name of this compute. Compute names are case sensitive and must be unique in the resource group.", - }, - "rg_id": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - ValidateFunc: validation.IntAtLeast(1), - Description: "ID of the resource group where this compute should be deployed.", - }, - "driver": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - StateFunc: statefuncs.StateFuncToUpper, - ValidateFunc: validation.StringInSlice([]string{"SVA_KVM_X86", "KVM_X86", "KVM_PPC"}, false), // observe case while validating - Description: "Hardware architecture of this compute instance.", - }, - "cpu": { - Type: schema.TypeInt, - Required: true, - ValidateFunc: validation.IntBetween(1, constants.MaxCpusPerCompute), - Description: "Number of CPUs to allocate to this compute instance.", - }, - "ram": { - Type: schema.TypeInt, - Required: true, - ValidateFunc: validation.IntAtLeast(constants.MinRamPerCompute), - Description: "Amount of RAM in MB to allocate to this compute instance.", - }, - "image_id": { - Type: schema.TypeInt, - Required: true, - //ForceNew: true, //REDEPLOY - Description: "ID of the OS image to base this compute instance on.", - }, - "boot_disk_size": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - Description: "This compute instance boot disk size in GB. Make sure it is large enough to accomodate selected OS image.", - }, - "sep_id": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ForceNew: true, - Description: "ID of SEP to create bootDisk on. Uses image's sepId if not set.", - }, - "pool": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: "Pool to use if sepId is set, can be also empty if needed to be chosen by system.", - }, - "cloud_init": { - Type: schema.TypeString, - Optional: true, - Description: "Optional cloud_init parameters. Applied when creating new compute instance only, ignored in all other cases.", - }, - "description": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "Optional text description of this compute instance.", - }, - "started": { - Type: schema.TypeBool, - Optional: true, - Default: true, - Description: "Is compute started.", - }, - "is": { - Type: schema.TypeString, - Optional: true, - Description: "system name", - }, - "ipa_type": { - Type: schema.TypeString, - Optional: true, - Description: "compute purpose", - }, - "custom_fields": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "network": { - Type: schema.TypeSet, - Optional: true, - MinItems: 1, - MaxItems: constants.MaxNetworksPerCompute, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "net_type": { - Type: schema.TypeString, - Required: true, - StateFunc: statefuncs.StateFuncToUpper, - ValidateFunc: validation.StringInSlice([]string{"EXTNET", "VINS"}, false), // observe case while validating - Description: "Type of the network for this connection, either EXTNET or VINS.", - }, - - "net_id": { - Type: schema.TypeInt, - Required: true, - Description: "ID of the network for this connection.", - }, - - "ip_address": { - Type: schema.TypeString, - Optional: true, - Computed: true, - DiffSuppressFunc: networkSubresIPAddreDiffSupperss, - Description: "Optional IP address to assign to this connection. This IP should belong to the selected network and free for use.", - }, - - "mac": { - Type: schema.TypeString, - Computed: true, - Description: "MAC address associated with this connection. MAC address is assigned automatically.", - }, - }, - }, - Description: "Optional network connection(s) for this compute. You may specify several network blocks, one for each connection.", - }, - "affinity_label": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "Set affinity label for compute", - }, - "affinity_rules": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "topology": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{"node", "compute"}, false), - Description: "compute or node, for whom rule applies", - }, - "policy": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{"RECOMMENDED", "REQUIRED"}, false), - Description: "RECOMMENDED or REQUIRED, the degree of 'strictness' of this rule", - }, - "mode": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{"EQ", "NE", "ANY"}, false), - Description: "EQ or NE or ANY - the comparison mode is 'value', recorded by the specified 'key'", - }, - "key": { - Type: schema.TypeString, - Required: true, - Description: "key that are taken into account when analyzing this rule will be identified", - }, - "value": { - Type: schema.TypeString, - Required: true, - Description: "value that must match the key to be taken into account when analyzing this rule", - }, - }, - }, - }, - "anti_affinity_rules": { - Type: schema.TypeList, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "topology": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{"node", "compute"}, false), - Description: "compute or node, for whom rule applies", - }, - "policy": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{"RECOMMENDED", "REQUIRED"}, false), - Description: "RECOMMENDED or REQUIRED, the degree of 'strictness' of this rule", - }, - "mode": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{"EQ", "NE", "ANY"}, false), - Description: "EQ or NE or ANY - the comparison mode is 'value', recorded by the specified 'key'", - }, - "key": { - Type: schema.TypeString, - Required: true, - Description: "key that are taken into account when analyzing this rule will be identified", - }, - "value": { - Type: schema.TypeString, - Required: true, - Description: "value that must match the key to be taken into account when analyzing this rule", - }, - }, - }, - }, - "disks": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "disk_name": { - Type: schema.TypeString, - Required: true, - Description: "Name for disk", - }, - "size": { - Type: schema.TypeInt, - Required: true, - Description: "Disk size in GiB", - }, - "sep_id": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: "Storage endpoint provider ID; by default the same with boot disk", - }, - "disk_type": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{"B", "D"}, false), - Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data'", - }, - "pool": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: "Pool name; by default will be chosen automatically", - }, - "desc": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: "Optional description", - }, - "image_id": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: "Specify image id for create disk from template", - }, - "permanently": { - Type: schema.TypeBool, - Computed: true, - Optional: true, - Description: "Disk deletion status", - }, - "disk_id": { - Type: schema.TypeInt, - Computed: true, - Description: "Disk ID", - }, - "shareable": { - Type: schema.TypeBool, - Computed: true, - }, - "size_max": { - Type: schema.TypeInt, - Computed: true, - }, - "size_used": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - "with_default_vins": { - Type: schema.TypeBool, - Optional: true, - Default: true, - Description: "Create compute with default resgroup ViNS (true) or without any interfaces (false). This parameter is ignored if network block is specified", - }, - "boot_disk": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "disk_name": { - Type: schema.TypeString, - Required: true, - Description: "Name for disk", - }, - "size": { - Type: schema.TypeInt, - Required: true, - Description: "Disk size in GiB", - }, - "sep_id": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: "Storage endpoint provider ID; by default the same with boot disk", - }, - "disk_type": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{"B", "D"}, false), - Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data'", - }, - "pool": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: "Pool name; by default will be chosen automatically", - }, - "desc": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: "Optional description", - }, - "image_id": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: "Specify image id for create disk from template", - }, - "permanently": { - Type: schema.TypeBool, - Optional: true, - Description: "Disk deletion status", - }, - "disk_id": { - Type: schema.TypeInt, - Computed: true, - Description: "Disk ID", - }, - "shareable": { - Type: schema.TypeBool, - Computed: true, - }, - "size_used": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - "extra_disks": { - Type: schema.TypeSet, - Optional: true, - MaxItems: constants.MaxExtraDisksPerCompute, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - Description: "Optional list of IDs of extra disks to attach to this compute. You may specify several extra disks.", - }, - "tags": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Required: true, - }, - "value": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - "port_forwarding": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "public_port_start": { - Type: schema.TypeInt, - Required: true, - }, - "public_port_end": { - Type: schema.TypeInt, - Optional: true, - Default: -1, - }, - "local_port": { - Type: schema.TypeInt, - Required: true, - }, - "proto": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{"tcp", "udp"}, false), - }, - }, - }, - }, - "user_access": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "username": { - Type: schema.TypeString, - Required: true, - }, - "access_type": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - "snapshot": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "label": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - "rollback": { - Type: schema.TypeSet, - MaxItems: 1, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "label": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - "cd": { - Type: schema.TypeSet, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cdrom_id": { - Type: schema.TypeInt, - Required: true, - }, - }, - }, - }, - "pin_to_stack": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "enabled": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - Description: "If true - enable compute, else - disable", - }, - "pause": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "reset": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "auto_start": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Flag for redeploy compute", - }, - "force_stop": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Flag for redeploy compute", - }, - "data_disks": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{"KEEP", "DETACH", "DESTROY"}, false), - Default: "DETACH", - Description: "Flag for redeploy compute", - }, - "detach_disks": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "permanently": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - // Computed properties - "account_id": { - Type: schema.TypeInt, - Computed: true, - Description: "ID of the account this compute instance belongs to.", - }, - "account_name": { - Type: schema.TypeString, - Computed: true, - Description: "Name of the account this compute instance belongs to.", - }, - "affinity_weight": { - Type: schema.TypeInt, - Computed: true, - }, - "arch": { - Type: schema.TypeString, - Computed: true, - }, - "boot_order": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "boot_disk_id": { - Type: schema.TypeInt, - Computed: true, - Description: "This compute instance boot disk ID.", - }, - "clone_reference": { - Type: schema.TypeInt, - Computed: true, - }, - "clones": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - "computeci_id": { - Type: schema.TypeInt, - Computed: true, - }, - "created_by": { - Type: schema.TypeString, - Computed: true, - }, - "created_time": { - Type: schema.TypeInt, - Computed: true, - }, - "deleted_by": { - Type: schema.TypeString, - Computed: true, - }, - "deleted_time": { - Type: schema.TypeInt, - Computed: true, - }, - "devices": { - Type: schema.TypeString, - Computed: true, - }, - "gid": { - Type: schema.TypeInt, - Computed: true, - }, - "guid": { - Type: schema.TypeInt, - Computed: true, - }, - "compute_id": { - Type: schema.TypeInt, - Computed: true, - }, - "interfaces": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "conn_id": { - Type: schema.TypeInt, - Computed: true, - }, - "conn_type": { - Type: schema.TypeString, - Computed: true, - }, - "def_gw": { - Type: schema.TypeString, - Computed: true, - }, - "flip_group_id": { - Type: schema.TypeInt, - Computed: true, - }, - "guid": { - Type: schema.TypeString, - Computed: true, - }, - "ip_address": { - Type: schema.TypeString, - Computed: true, - }, - "listen_ssh": { - Type: schema.TypeBool, - Computed: true, - }, - "mac": { - Type: schema.TypeString, - Computed: true, - }, - "name": { - Type: schema.TypeString, - Computed: true, - }, - "net_id": { - Type: schema.TypeInt, - Computed: true, - }, - "netmask": { - Type: schema.TypeInt, - Computed: true, - }, - "net_type": { - Type: schema.TypeString, - Computed: true, - }, - "pci_slot": { - Type: schema.TypeInt, - Computed: true, - }, - "qos": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "e_rate": { - Type: schema.TypeInt, - Computed: true, - }, - "guid": { - Type: schema.TypeString, - Computed: true, - }, - "in_brust": { - Type: schema.TypeInt, - Computed: true, - }, - "in_rate": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - "target": { - Type: schema.TypeString, - Computed: true, - }, - "type": { - Type: schema.TypeString, - Computed: true, - }, - "vnfs": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - }, - }, - }, - "lock_status": { - Type: schema.TypeString, - Computed: true, - }, - "manager_id": { - Type: schema.TypeInt, - Computed: true, - }, - "manager_type": { - Type: schema.TypeString, - Computed: true, - }, - "migrationjob": { - Type: schema.TypeInt, - Computed: true, - }, - "milestones": { - Type: schema.TypeInt, - Computed: true, - }, - "os_users": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "guid": { - Type: schema.TypeString, - Computed: true, - Description: "GUID of this guest OS user.", - }, - - "login": { - Type: schema.TypeString, - Computed: true, - Description: "Login name of this guest OS user.", - }, - - "password": { - Type: schema.TypeString, - Computed: true, - //Sensitive: true, - Description: "Password of this guest OS user.", - }, - - "public_key": { - Type: schema.TypeString, - Computed: true, - Description: "SSH public key of this guest OS user.", - }, - }, - }, - Description: "Guest OS users provisioned on this compute instance.", - }, - "pinned": { - Type: schema.TypeBool, - Computed: true, - }, - "reference_id": { - Type: schema.TypeString, - Computed: true, - }, - "registered": { - Type: schema.TypeBool, - Computed: true, - }, - "res_name": { - Type: schema.TypeString, - Computed: true, - }, - "rg_name": { - Type: schema.TypeString, - Computed: true, - Description: "Name of the resource group where this compute instance is located.", - }, - "snap_sets": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "disks": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - "guid": { - Type: schema.TypeString, - Computed: true, - }, - "label": { - Type: schema.TypeString, - Computed: true, - }, - "timestamp": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - "stack_id": { - Type: schema.TypeInt, - Computed: true, - Description: "ID of the stack, on which VM started", - }, - "stack_name": { - Type: schema.TypeString, - Computed: true, - Description: "Name of the stack, on which VM started", - }, - "stateless_sep_id": { - Type: schema.TypeInt, - Computed: true, - }, - "stateless_sep_type": { - Type: schema.TypeString, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "tech_status": { - Type: schema.TypeString, - Computed: true, - }, - "updated_by": { - Type: schema.TypeString, - Computed: true, - }, - "updated_time": { - Type: schema.TypeInt, - Computed: true, - }, - "user_managed": { - Type: schema.TypeBool, - Computed: true, - }, - "vgpus": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - "virtual_image_id": { - Type: schema.TypeInt, - Computed: true, - }, - } -} diff --git a/internal/service/cloudbroker/kvmvm/utility_compute.go b/internal/service/cloudbroker/kvmvm/utility_compute.go index a723877..256db47 100644 --- a/internal/service/cloudbroker/kvmvm/utility_compute.go +++ b/internal/service/cloudbroker/kvmvm/utility_compute.go @@ -34,6 +34,7 @@ package kvmvm import ( "context" "strconv" + "strings" log "github.com/sirupsen/logrus" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute" @@ -42,6 +43,260 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) +func utilityComputeEnabled(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + computeId, _ := strconv.ParseUint(d.Id(), 10, 64) + enabled := d.Get("enabled").(bool) + + if enabled { + req := compute.EnableRequest{ + ComputeID: computeId, + } + + if _, err := c.CloudBroker().Compute().Enable(ctx, req); err != nil { + return err + } + } else { + req := compute.DisableRequest{ + ComputeID: computeId, + } + + if _, err := c.CloudBroker().Compute().Disable(ctx, req); err != nil { + return err + } + } + log.Debugf("resourceComputeUpdate: enable=%v Compute ID %s after completing its resource configuration", enabled, d.Id()) + + return nil +} + +func utilityComputeStarted(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + computeId, _ := strconv.ParseUint(d.Id(), 10, 64) + + if d.Get("started").(bool) { + req := compute.StartRequest{ + ComputeID: computeId, + } + if altBootId, ok := d.Get("alt_boot_id").(int); ok { + req.AltBootID = uint64(altBootId) + } + if stackId, ok := d.Get("stack_id").(int); ok { + req.StackID = uint64(stackId) + } + if _, err := c.CloudBroker().Compute().Start(ctx, req); err != nil { + return err + } + } else { + req := compute.StopRequest{ + ComputeID: computeId, + } + if force, ok := d.Get("force_stop").(bool); ok { + req.Force = force + } + if _, err := c.CloudBroker().Compute().Stop(ctx, req); err != nil { + return err + } + } + return nil +} + +func utilityComputeResize(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + computeId, _ := strconv.ParseUint(d.Id(), 10, 64) + + resizeReq := compute.ResizeRequest{ + ComputeID: computeId, + Force: true, + } + doUpdate := false + + oldCpu, newCpu := d.GetChange("cpu") + if oldCpu.(int) != newCpu.(int) { + resizeReq.CPU = uint64(newCpu.(int)) + doUpdate = true + } else { + resizeReq.CPU = 0 + } + + oldRam, newRam := d.GetChange("ram") + if oldRam.(int) != newRam.(int) { + resizeReq.RAM = uint64(newRam.(int)) + doUpdate = true + } else { + resizeReq.RAM = 0 + } + + if doUpdate { + log.Debugf("resourceComputeUpdate: changing CPU %d -> %d and/or RAM %d -> %d", + oldCpu.(int), newCpu.(int), + oldRam.(int), newRam.(int)) + _, err := c.CloudBroker().Compute().Resize(ctx, resizeReq) + if err != nil { + return err + } + } + + return nil +} + +func utilityComputeBootDiskResize(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + computeId, _ := strconv.ParseUint(d.Id(), 10, 64) + + oldSize, newSize := d.GetChange("boot_disk_size") + if oldSize.(int) < newSize.(int) { + req := compute.DiskResizeRequest{ComputeID: computeId, Size: uint64(newSize.(int))} + if diskId, ok := d.GetOk("boot_disk_id"); ok { + req.DiskID = uint64(diskId.(int)) + + } else { + bootDisk, err := utilityComputeBootDiskCheckPresence(ctx, d, m) + if err != nil { + return err + } + + req.DiskID = bootDisk.ID + } + + log.Debugf("resourceComputeUpdate: compute ID %s, boot disk ID %d resize %d -> %d", + d.Id(), d.Get("boot_disk_id").(int), oldSize.(int), newSize.(int)) + + _, err := c.CloudBroker().Compute().DiskResize(ctx, req) + if err != nil { + return err + } + + } else if oldSize.(int) > newSize.(int) { + log.Warnf("resourceComputeUpdate: compute ID %s - shrinking boot disk is not allowed", d.Id()) + } + + return nil +} + +func utilityComputeUpdateDisks(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + computeId, _ := strconv.ParseUint(d.Id(), 10, 64) + + deletedDisks := make([]interface{}, 0) + addedDisks := make([]interface{}, 0) + updatedDisks := make([]interface{}, 0) + + oldDisks, newDisks := d.GetChange("disks") + oldConv := oldDisks.([]interface{}) + newConv := newDisks.([]interface{}) + + for _, el := range oldConv { + if !isContainsDisk(newConv, el) { + deletedDisks = append(deletedDisks, el) + } + } + + for _, el := range newConv { + if !isContainsDisk(oldConv, el) { + addedDisks = append(addedDisks, el) + } else { + if isChangeDisk(oldConv, el) { + updatedDisks = append(updatedDisks, el) + } + } + } + + if len(deletedDisks) > 0 { + stopReq := compute.StopRequest{ + ComputeID: computeId, + Force: false, + } + + _, err := c.CloudBroker().Compute().Stop(ctx, stopReq) + if err != nil { + return err + } + + for _, disk := range deletedDisks { + diskConv := disk.(map[string]interface{}) + if diskConv["disk_type"].(string) == "B" { + continue + } + + req := compute.DiskDelRequest{ + ComputeID: computeId, + DiskID: uint64(diskConv["disk_id"].(int)), + Permanently: diskConv["permanently"].(bool), + } + + _, err := c.CloudBroker().Compute().DiskDel(ctx, req) + if err != nil { + return err + } + } + req := compute.StartRequest{ + ComputeID: computeId, + AltBootID: 0, + } + _, err = c.CloudBroker().Compute().Start(ctx, req) + if err != nil { + return err + } + } + + if len(addedDisks) > 0 { + for _, disk := range addedDisks { + diskConv := disk.(map[string]interface{}) + if diskConv["disk_type"].(string) == "B" { + continue + } + req := compute.DiskAddRequest{ + ComputeID: computeId, + DiskName: diskConv["disk_name"].(string), + Size: uint64(diskConv["size"].(int)), + } + if diskConv["sep_id"].(int) != 0 { + req.SepID = uint64(diskConv["sep_id"].(int)) + } + if diskConv["disk_type"].(string) != "" { + req.DiskType = diskConv["disk_type"].(string) + } + if diskConv["pool"].(string) != "" { + req.Pool = diskConv["pool"].(string) + } + if diskConv["desc"].(string) != "" { + req.Description = diskConv["desc"].(string) + } + if diskConv["image_id"].(int) != 0 { + req.ImageID = uint64(diskConv["image_id"].(int)) + } + _, err := c.CloudBroker().Compute().DiskAdd(ctx, req) + if err != nil { + return err + } + } + } + + if len(updatedDisks) > 0 { + for _, disk := range updatedDisks { + diskConv := disk.(map[string]interface{}) + if diskConv["disk_type"].(string) == "B" { + continue + } + req := compute.DiskResizeRequest{ + ComputeID: computeId, + DiskID: uint64(diskConv["disk_id"].(int)), + Size: uint64(diskConv["size"].(int)), + } + + _, err := c.CloudBroker().Compute().DiskResize(ctx, req) + if err != nil { + return err + } + } + } + + return nil +} + func utilityComputeExtraDisksConfigure(ctx context.Context, d *schema.ResourceData, m interface{}, do_delta bool) error { c := m.(*controller.ControllerCfg) @@ -154,6 +409,10 @@ func utilityComputeCheckPresence(ctx context.Context, d *schema.ResourceData, m req.ComputeID = uint64(d.Get("compute_id").(int)) } + if reason, ok := d.GetOk("reason"); ok { + req.Reason = reason.(string) + } + res, err := c.CloudBroker().Compute().Get(ctx, req) if err != nil { return nil, err @@ -162,6 +421,15 @@ func utilityComputeCheckPresence(ctx context.Context, d *schema.ResourceData, m return res, nil } +func findBootDisk(disks compute.ListDisks) *compute.ItemDisk { + for _, disk := range disks { + if disk.Type == "B" { + return &disk + } + } + return nil +} + func networkSubresIPAddreDiffSupperss(key, oldVal, newVal string, d *schema.ResourceData) bool { if newVal != "" && newVal != oldVal { log.Debugf("networkSubresIPAddreDiffSupperss: key=%s, oldVal=%q, newVal=%q -> suppress=FALSE", key, oldVal, newVal) @@ -288,6 +556,659 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData return nil } +func utilityComputeUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + computeId, _ := strconv.ParseUint(d.Id(), 10, 64) + + req := compute.UpdateRequest{ + ComputeID: computeId, + } + + if d.HasChange("name") { + req.Name = d.Get("name").(string) + } + if d.HasChange("desc") { + req.Description = d.Get("desc").(string) + } + + if _, err := c.CloudBroker().Compute().Update(ctx, req); err != nil { + return err + } + + return nil +} + +func utilityComputeUpdateAffinityLabel(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + computeId, _ := strconv.ParseUint(d.Id(), 10, 64) + + affinityLabel := d.Get("affinity_label").(string) + if affinityLabel == "" { + req := compute.AffinityLabelRemoveRequest{ + ComputeIDs: []uint64{computeId}, + } + + _, err := c.CloudBroker().Compute().AffinityLabelRemove(ctx, req) + if err != nil { + return err + } + } + + req := compute.AffinityLabelSetRequest{ + ComputeIDs: []uint64{computeId}, + AffinityLabel: affinityLabel, + } + + _, err := c.CloudBroker().Compute().AffinityLabelSet(ctx, req) + if err != nil { + return err + } + + return nil +} + +func utilityComputeUpdateAffinityRules(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + computeId, _ := strconv.ParseUint(d.Id(), 10, 64) + + deletedAR := make([]interface{}, 0) + addedAR := make([]interface{}, 0) + + oldAR, newAR := d.GetChange("affinity_rules") + oldConv := oldAR.([]interface{}) + newConv := newAR.([]interface{}) + + if len(newConv) == 0 { + req := compute.AffinityRulesClearRequest{ + ComputeIDs: []uint64{computeId}, + } + + _, err := c.CloudBroker().Compute().AffinityRulesClear(ctx, req) + if err != nil { + return err + } + } else { + for _, el := range oldConv { + if !isContainsAR(newConv, el) { + deletedAR = append(deletedAR, el) + } + } + for _, el := range newConv { + if !isContainsAR(oldConv, el) { + addedAR = append(addedAR, el) + } + } + + if len(deletedAR) > 0 { + for _, ar := range deletedAR { + arConv := ar.(map[string]interface{}) + req := compute.AffinityRuleRemoveRequest{ + ComputeIDs: []uint64{computeId}, + Topology: arConv["topology"].(string), + Policy: arConv["policy"].(string), + Mode: arConv["mode"].(string), + Key: arConv["key"].(string), + Value: arConv["value"].(string), + } + + _, err := c.CloudBroker().Compute().AffinityRuleRemove(ctx, req) + if err != nil { + return err + } + } + } + if len(addedAR) > 0 { + for _, ar := range addedAR { + arConv := ar.(map[string]interface{}) + req := compute.AffinityRuleAddRequest{ + ComputeIDs: []uint64{computeId}, + Topology: arConv["topology"].(string), + Policy: arConv["policy"].(string), + Mode: arConv["mode"].(string), + Key: arConv["key"].(string), + Value: arConv["value"].(string), + } + + _, err := c.CloudBroker().Compute().AffinityRuleAdd(ctx, req) + if err != nil { + return err + } + } + } + } + + return nil +} + +func utilityComputeUpdateAntiAffinityRules(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + computeId, _ := strconv.ParseUint(d.Id(), 10, 64) + + deletedAR := make([]interface{}, 0) + addedAR := make([]interface{}, 0) + + oldAR, newAR := d.GetChange("anti_affinity_rules") + oldConv := oldAR.([]interface{}) + newConv := newAR.([]interface{}) + + if len(newConv) == 0 { + req := compute.AntiAffinityRulesClearRequest{ + ComputeIDs: []uint64{computeId}, + } + + _, err := c.CloudBroker().Compute().AntiAffinityRulesClear(ctx, req) + if err != nil { + return err + } + } else { + for _, el := range oldConv { + if !isContainsAR(newConv, el) { + deletedAR = append(deletedAR, el) + } + } + for _, el := range newConv { + if !isContainsAR(oldConv, el) { + addedAR = append(addedAR, el) + } + } + + if len(deletedAR) > 0 { + for _, ar := range deletedAR { + arConv := ar.(map[string]interface{}) + req := compute.AntiAffinityRuleRemoveRequest{ + ComputeIDs: []uint64{computeId}, + Topology: arConv["topology"].(string), + Policy: arConv["policy"].(string), + Mode: arConv["mode"].(string), + Key: arConv["key"].(string), + Value: arConv["value"].(string), + } + + _, err := c.CloudBroker().Compute().AntiAffinityRuleRemove(ctx, req) + if err != nil { + return err + } + } + } + if len(addedAR) > 0 { + for _, ar := range addedAR { + arConv := ar.(map[string]interface{}) + req := compute.AntiAffinityRuleAddRequest{ + ComputeIDs: []uint64{computeId}, + Topology: arConv["topology"].(string), + Policy: arConv["policy"].(string), + Mode: arConv["mode"].(string), + Key: arConv["key"].(string), + Value: arConv["value"].(string), + } + + _, err := c.CloudBroker().Compute().AntiAffinityRuleAdd(ctx, req) + if err != nil { + return err + } + } + } + } + + return nil +} + +func utilityComputeUpdateTags(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + computeId, _ := strconv.ParseUint(d.Id(), 10, 64) + + oldSet, newSet := d.GetChange("tags") + deletedTags := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List() + if len(deletedTags) > 0 { + for _, tagInterface := range deletedTags { + tagItem := tagInterface.(map[string]interface{}) + req := compute.TagRemoveRequest{ + ComputeIDs: []uint64{computeId}, + Key: tagItem["key"].(string), + } + + _, err := c.CloudBroker().Compute().TagRemove(ctx, req) + if err != nil { + return err + } + } + } + + addedTags := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List() + if len(addedTags) > 0 { + for _, tagInterface := range addedTags { + tagItem := tagInterface.(map[string]interface{}) + req := compute.TagAddRequest{ + ComputeIDs: []uint64{computeId}, + Key: tagItem["key"].(string), + Value: tagItem["value"].(string), + } + + _, err := c.CloudBroker().Compute().TagAdd(ctx, req) + if err != nil { + return err + } + } + } + + return nil +} + +func utilityComputeUpdatePFW(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + computeId, _ := strconv.ParseUint(d.Id(), 10, 64) + + oldSet, newSet := d.GetChange("port_forwarding") + deletedPfws := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List() + if len(deletedPfws) > 0 { + for _, pfwInterface := range deletedPfws { + pfwItem := pfwInterface.(map[string]interface{}) + req := compute.PFWDelRequest{ + ComputeID: computeId, + PublicPortStart: uint64(pfwItem["public_port_start"].(int)), + LocalBasePort: uint64(pfwItem["local_port"].(int)), + Proto: pfwItem["proto"].(string), + RuleID: uint64(pfwItem["rule_id"].(int)), + } + + if pfwItem["public_port_end"].(int) == -1 { + req.PublicPortEnd = req.PublicPortStart + } else { + req.PublicPortEnd = uint64(pfwItem["public_port_end"].(int)) + } + if pfwItem["reason"].(string) != "" { + req.Reason = pfwItem["reason"].(string) + } + + _, err := c.CloudBroker().Compute().PFWDel(ctx, req) + if err != nil { + return err + } + } + } + + addedPfws := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List() + if len(addedPfws) > 0 { + for _, pfwInterface := range addedPfws { + pfwItem := pfwInterface.(map[string]interface{}) + req := compute.PFWAddRequest{ + ComputeID: computeId, + PublicPortStart: uint64(pfwItem["public_port_start"].(int)), + PublicPortEnd: int64(pfwItem["public_port_end"].(int)), + LocalBasePort: uint64(pfwItem["local_port"].(int)), + Proto: pfwItem["proto"].(string), + } + + if pfwItem["reason"].(string) != "" { + req.Reason = pfwItem["reason"].(string) + } + + pwfId, err := c.CloudBroker().Compute().PFWAdd(ctx, req) + if err != nil { + return err + } + d.Set("rule_id", pwfId) + } + } + + return nil +} + +func utilityComputeRestore(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + computeId, _ := strconv.ParseUint(d.Id(), 10, 64) + + restoreReq := compute.RestoreRequest{ComputeID: computeId} + + _, err := c.CloudBroker().Compute().Restore(ctx, restoreReq) + if err != nil { + return err + } + + if _, ok := d.GetOk("enabled"); ok { + if err := utilityComputeEnabled(ctx, d, m); err != nil { + return err + } + } + + if _, ok := d.GetOk("started"); ok { + if err := utilityComputeStarted(ctx, d, m); err != nil { + return err + } + } + + return nil +} + +func utilityComputeUpdateUserAccess(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + computeId, _ := strconv.ParseUint(d.Id(), 10, 64) + + oldSet, newSet := d.GetChange("user_access") + deletedUserAcess := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List() + if len(deletedUserAcess) > 0 { + for _, userAcessInterface := range deletedUserAcess { + userAccessItem := userAcessInterface.(map[string]interface{}) + req := compute.UserRevokeRequest{ + ComputeID: computeId, + Username: userAccessItem["username"].(string), + } + + _, err := c.CloudBroker().Compute().UserRevoke(ctx, req) + if err != nil { + return err + } + } + } + + addedUserAccess := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List() + if len(addedUserAccess) > 0 { + for _, userAccessInterface := range addedUserAccess { + userAccessItem := userAccessInterface.(map[string]interface{}) + req := compute.UserGrantRequest{ + ComputeID: computeId, + Username: userAccessItem["username"].(string), + AccessType: userAccessItem["access_type"].(string), + } + + _, err := c.CloudBroker().Compute().UserGrant(ctx, req) + if err != nil { + return err + } + } + } + + return nil +} + +func utilityComputeUpdateSnapshot(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + computeId, _ := strconv.ParseUint(d.Id(), 10, 64) + + oldSet, newSet := d.GetChange("snapshot") + deletedSnapshots := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List() + if len(deletedSnapshots) > 0 { + for _, snapshotInterface := range deletedSnapshots { + snapshotItem := snapshotInterface.(map[string]interface{}) + req := compute.SnapshotDeleteRequest{ + ComputeID: computeId, + Label: snapshotItem["label"].(string), + } + + _, err := c.CloudBroker().Compute().SnapshotDelete(ctx, req) + if err != nil { + return err + } + } + } + + addedSnapshots := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List() + if len(addedSnapshots) > 0 { + for _, snapshotInterface := range addedSnapshots { + snapshotItem := snapshotInterface.(map[string]interface{}) + req := compute.SnapshotCreateRequest{ + ComputeID: computeId, + Label: snapshotItem["label"].(string), + } + + _, err := c.CloudBroker().Compute().SnapshotCreate(ctx, req) + if err != nil { + return err + } + } + } + + return nil +} + +func utilityComputeRollback(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + computeId, _ := strconv.ParseUint(d.Id(), 10, 64) + + if rollback, ok := d.GetOk("rollback"); ok { + req := compute.StopRequest{ + ComputeID: computeId, + Force: false, + } + + _, err := c.CloudBroker().Compute().Stop(ctx, req) + if err != nil { + return err + } + + rollbackInterface := rollback.(*schema.Set).List()[0] + rollbackItem := rollbackInterface.(map[string]interface{}) + + rollbackReq := compute.SnapshotRollbackRequest{ + ComputeID: computeId, + Label: rollbackItem["label"].(string), + } + + _, err = c.CloudBroker().Compute().SnapshotRollback(ctx, rollbackReq) + if err != nil { + return err + } + + startReq := compute.StartRequest{ComputeID: computeId} + + log.Debugf("utilityComputeRollback: starting compute %d", computeId) + + _, err = c.CloudBroker().Compute().Start(ctx, startReq) + if err != nil { + return err + } + } + + return nil +} + +func utilityComputeUpdateCD(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + computeId, _ := strconv.ParseUint(d.Id(), 10, 64) + + oldSet, newSet := d.GetChange("cd") + deletedCd := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List() + if len(deletedCd) > 0 { + req := compute.CDEjectRequest{ + ComputeID: computeId, + } + if reason, ok := d.GetOk("reason"); ok { + req.Reason = reason.(string) + } + + _, err := c.CloudBroker().Compute().CDEject(ctx, req) + if err != nil { + return err + } + } + + addedCd := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List() + if len(addedCd) > 0 { + cdItem := addedCd[0].(map[string]interface{}) + req := compute.CDInsertRequest{ + ComputeID: computeId, + CDROMID: uint64(cdItem["cdrom_id"].(int)), + } + if reason, ok := d.GetOk("reason"); ok { + req.Reason = reason.(string) + } + + _, err := c.CloudBroker().Compute().CDInsert(ctx, req) + if err != nil { + return err + } + } + return nil +} + +func utilityComputePinToStack(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + computeId, _ := strconv.ParseUint(d.Id(), 10, 64) + + oldPin, newPin := d.GetChange("pin_to_stack") + if oldPin.(bool) && !newPin.(bool) { + req := compute.UnpinFromStackRequest{ + ComputeID: computeId, + } + + _, err := c.CloudBroker().Compute().UnpinFromStack(ctx, req) + if err != nil { + return err + } + } + if !oldPin.(bool) && newPin.(bool) { + req := compute.PinToStackRequest{ + ComputeID: computeId, + TargetStackID: uint64(d.Get("target_stack_id").(int)), + } + + if force, ok := d.Get("force_pin").(bool); ok { + req.Force = force + } + _, err := c.CloudBroker().Compute().PinToStack(ctx, req) + if err != nil { + return err + } + } + return nil +} + +func utilityComputePause(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + computeId, _ := strconv.ParseUint(d.Id(), 10, 64) + + oldPause, newPause := d.GetChange("pause") + if oldPause.(bool) && !newPause.(bool) { + req := compute.ResumeRequest{ + ComputeID: computeId, + } + _, err := c.CloudBroker().Compute().Resume(ctx, req) + if err != nil { + return err + } + } + if !oldPause.(bool) && newPause.(bool) { + req := compute.PauseRequest{ + ComputeID: computeId, + } + + _, err := c.CloudBroker().Compute().Pause(ctx, req) + if err != nil { + return err + } + } + return nil +} + +func utilityComputeReset(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + computeId, _ := strconv.ParseUint(d.Id(), 10, 64) + + oldReset, newReset := d.GetChange("reset") + if !oldReset.(bool) && newReset.(bool) { + req := compute.ResetRequest{ + ComputeID: computeId, + } + _, err := c.CloudBroker().Compute().Reset(ctx, req) + if err != nil { + return err + } + } + return nil +} + +func utilityComputeUpdateImage(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + computeId, _ := strconv.ParseUint(d.Id(), 10, 64) + + oldImage, newImage := d.GetChange("image_id") + stopReq := compute.StopRequest{ + ComputeID: computeId, + Force: false, + } + + _, err := c.CloudBroker().Compute().Stop(ctx, stopReq) + if err != nil { + return err + } + + if oldImage.(int) != newImage.(int) { + req := compute.RedeployRequest{ + ComputeID: computeId, + ImageID: uint64(newImage.(int)), + } + + if diskSize, ok := d.GetOk("boot_disk_size"); ok { + req.DiskSize = uint64(diskSize.(int)) + } + if dataDisks, ok := d.GetOk("data_disks"); ok { + req.DataDisks = dataDisks.(string) + } + if autoStart, ok := d.GetOk("auto_start"); ok { + req.AutoStart = autoStart.(bool) + } + if forceStop, ok := d.GetOk("force_stop"); ok { + req.ForceStop = forceStop.(bool) + } + + _, err := c.CloudBroker().Compute().Redeploy(ctx, req) + if err != nil { + return err + } + } + return nil +} + +func utilityComputeUpdateCustomFields(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + computeId, _ := strconv.ParseUint(d.Id(), 10, 64) + + val := d.Get("custom_fields").(string) + val = strings.ReplaceAll(val, "\\", "") + val = strings.ReplaceAll(val, "\n", "") + val = strings.ReplaceAll(val, "\t", "") + val = strings.TrimSpace(val) + + if len(val) > 0 { + req := compute.SetCustomFieldsRequest{ + ComputeID: computeId, + CustomFields: val, + } + + _, err := c.CloudBroker().Compute().SetCustomFields(ctx, req) + if err != nil { + return err + } + } + // } else { + // // req := compute.DeleteCustomFieldsRequest{ + // // ComputeID: computeId, + // // } + + // // _, err := c.CloudBroker().Compute().DeleteCustomFields(ctx, req) + // // if err != nil { + // // return err + // // } + // } + return nil +} + func isChangeDisk(els []interface{}, el interface{}) bool { for _, elOld := range els { elOldConv := elOld.(map[string]interface{}) diff --git a/internal/service/cloudbroker/kvmvm/utility_compute_boot_disk.go b/internal/service/cloudbroker/kvmvm/utility_compute_boot_disk.go index 2248f73..999db72 100644 --- a/internal/service/cloudbroker/kvmvm/utility_compute_boot_disk.go +++ b/internal/service/cloudbroker/kvmvm/utility_compute_boot_disk.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, @@ -10,7 +10,7 @@ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/internal/service/cloudbroker/lb/data_source_lb.go b/internal/service/cloudbroker/lb/data_source_lb.go index 5b8adab..85b9b07 100644 --- a/internal/service/cloudbroker/lb/data_source_lb.go +++ b/internal/service/cloudbroker/lb/data_source_lb.go @@ -9,7 +9,7 @@ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -44,11 +44,11 @@ import ( func dataSourceLBRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { lb, err := utilityLBCheckPresence(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } d.SetId(strconv.FormatUint(lb.ID, 10)) - flattenLB(d, lb) return nil diff --git a/internal/service/cloudbroker/lb/data_source_lb_list.go b/internal/service/cloudbroker/lb/data_source_lb_list.go index bd49b94..cd6a44e 100644 --- a/internal/service/cloudbroker/lb/data_source_lb_list.go +++ b/internal/service/cloudbroker/lb/data_source_lb_list.go @@ -44,8 +44,10 @@ import ( func dataSourceLBListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { lbList, err := utilityLBListCheckPresence(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } + id := uuid.New() d.SetId(id.String()) d.Set("items", flattenLBList(lbList)) diff --git a/internal/service/cloudbroker/lb/data_source_lb_list_deleted.go b/internal/service/cloudbroker/lb/data_source_lb_list_deleted.go index 49ae8a0..a3169b4 100644 --- a/internal/service/cloudbroker/lb/data_source_lb_list_deleted.go +++ b/internal/service/cloudbroker/lb/data_source_lb_list_deleted.go @@ -44,8 +44,10 @@ import ( func dataSourceLBListDeletedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { lbList, err := utilityLBListDeletedCheckPresence(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } + id := uuid.New() d.SetId(id.String()) d.Set("items", flattenLBList(lbList)) diff --git a/internal/service/cloudbroker/lb/flattens.go b/internal/service/cloudbroker/lb/flattens.go index 520cc46..fae1386 100644 --- a/internal/service/cloudbroker/lb/flattens.go +++ b/internal/service/cloudbroker/lb/flattens.go @@ -96,11 +96,14 @@ func flattenLB(d *schema.ResourceData, lb *lb.RecordLB) { d.Set("ha_mode", lb.HAMode) d.Set("ckey", lb.CKey) d.Set("meta", flattens.FlattenMeta(lb.Meta)) + d.Set("acl", flattenACl(lb.ACL)) + d.Set("backend_haip", lb.BackendHAIP) d.Set("backends", flattenLBBackends(lb.Backends)) d.Set("desc", lb.Description) d.Set("dp_api_user", lb.DPAPIUser) d.Set("dp_api_password", lb.DPAPIPassword) d.Set("extnet_id", lb.ExtNetID) + d.Set("frontend_haip", lb.FrontendHAIP) d.Set("frontends", flattenFrontends(lb.Frontends)) d.Set("gid", lb.GID) d.Set("guid", lb.GUID) @@ -108,6 +111,7 @@ func flattenLB(d *schema.ResourceData, lb *lb.RecordLB) { d.Set("image_id", lb.ImageID) d.Set("milestones", lb.Milestones) d.Set("name", lb.Name) + d.Set("part_k8s", lb.PartK8s) d.Set("primary_node", flattenNode(lb.PrimaryNode)) d.Set("rg_id", lb.RGID) d.Set("secondary_node", flattenNode(lb.SecondaryNode)) @@ -216,6 +220,7 @@ func flattenLBList(lbl *lb.ListLB) []map[string]interface{} { temp := map[string]interface{}{ "ha_mode": lb.HAMode, "acl": flattenACl(lb.ACL), + "backend_haip": lb.BackendHAIP, "backends": flattenLBBackends(lb.Backends), "created_by": lb.CreatedBy, "created_time": lb.CreatedTime, @@ -225,6 +230,7 @@ func flattenLBList(lbl *lb.ListLB) []map[string]interface{} { "dp_api_user": lb.DPAPIUser, "dp_api_password": lb.DPAPIPassword, "extnet_id": lb.ExtNetID, + "frontend_haip": lb.FrontendHAIP, "frontends": flattenFrontends(lb.Frontends), "gid": lb.GID, "guid": lb.GUID, @@ -247,6 +253,7 @@ func flattenLBList(lbl *lb.ListLB) []map[string]interface{} { } func flattenACl(m interface{}) string { + switch d := m.(type) { case string: return d @@ -259,4 +266,5 @@ func flattenACl(m interface{}) string { default: return "" } + } diff --git a/internal/service/cloudbroker/lb/resource_check_input_values.go b/internal/service/cloudbroker/lb/resource_check_input_values.go index cbc4dc4..e294983 100644 --- a/internal/service/cloudbroker/lb/resource_check_input_values.go +++ b/internal/service/cloudbroker/lb/resource_check_input_values.go @@ -3,66 +3,32 @@ package lb import ( "context" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/extnet" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/ic" ) -func existLBID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { - c := m.(*controller.ControllerCfg) - lbId := uint64(d.Get("lb_id").(int)) +func checkParamsExistence(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg) diag.Diagnostics { + errs := []error{} - req := lb.ListRequest{} - - lbList, err := c.CloudBroker().LB().List(ctx, req) - if err != nil { - return false, err + if err := ic.ExistRG(ctx, uint64(d.Get("rg_id").(int)), c); err != nil { + errs = append(errs, err) } - return len(lbList.FilterByID(lbId).Data) != 0, nil -} - -func existRGID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { - c := m.(*controller.ControllerCfg) - rgId := uint64(d.Get("rg_id").(int)) - - req := rg.ListRequest{} - - rgList, err := c.CloudBroker().RG().List(ctx, req) - if err != nil { - return false, err + if err := ic.ExistExtNetInLb(ctx, uint64(d.Get("extnet_id").(int)), c); err != nil { + errs = append(errs, err) } - return len(rgList.FilterByID(rgId).Data) != 0, nil -} - -func existExtNetID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { - c := m.(*controller.ControllerCfg) - extNetID := uint64(d.Get("extnet_id").(int)) - - req := extnet.ListRequest{} - - extNetList, err := c.CloudBroker().ExtNet().List(ctx, req) - if err != nil { - return false, err + if err := ic.ExistVinsInLb(ctx, uint64(d.Get("vins_id").(int)), c); err != nil { + errs = append(errs, err) } - return len(extNetList.FilterByID(extNetID).Data) != 0, nil + return dc.ErrorsToDiagnostics(errs) } -func existViNSID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { - c := m.(*controller.ControllerCfg) - vinsID := uint64(d.Get("vins_id").(int)) - - req := vins.ListRequest{} - - vinsList, err := c.CloudBroker().VINS().List(ctx, req) - if err != nil { - return false, err - } - - return len(vinsList.FilterByID(vinsID).Data) != 0, nil +func checkParamsExistenceLb(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg) diag.Diagnostics { + err := ic.ExistLB(ctx, uint64(d.Get("lb_id").(int)), c) + return diag.FromErr(err) } diff --git a/internal/service/cloudbroker/lb/resource_lb.go b/internal/service/cloudbroker/lb/resource_lb.go index 4196b5d..4f979ed 100644 --- a/internal/service/cloudbroker/lb/resource_lb.go +++ b/internal/service/cloudbroker/lb/resource_lb.go @@ -34,6 +34,7 @@ package lb import ( "context" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc" "strconv" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -46,36 +47,13 @@ import ( ) func resourceLBCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceLBCreate") - - haveRGID, err := existRGID(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - - if !haveRGID { - return diag.Errorf("resourceLBCreate: can't create LB because RGID %d is not allowed or does not exist", d.Get("rg_id").(int)) - } - - haveExtNetID, err := existExtNetID(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - - if !haveExtNetID { - return diag.Errorf("resourceLBCreate: can't create LB because ExtNetID %d is not allowed or does not exist", d.Get("extnet_id").(int)) - } - - haveVins, err := existViNSID(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } + log.Debugf("resourceLBCreate called with name: %s", d.Get("name").(string)) + c := m.(*controller.ControllerCfg) - if !haveVins { - return diag.Errorf("resourceLBCreate: can't create LB because ViNSID %d is not allowed or does not exist", d.Get("vins_id").(int)) + if diags := checkParamsExistence(ctx, d, c); diags != nil { + return diags } - c := m.(*controller.ControllerCfg) req := lb.CreateRequest{ Name: d.Get("name").(string), RGID: uint64(d.Get("rg_id").(int)), @@ -88,49 +66,40 @@ func resourceLBCreate(ctx context.Context, d *schema.ResourceData, m interface{} if desc, ok := d.GetOk("desc"); ok { req.Description = desc.(string) } + if haMode, ok := d.GetOk("ha_mode"); ok { + req.HighlyAvailable = haMode.(bool) + } lbId, err := c.CloudBroker().LB().Create(ctx, req) if err != nil { + d.SetId("") return diag.FromErr(err) } d.SetId(strconv.FormatUint(lbId, 10)) d.Set("lb_id", lbId) - _, err = utilityLBCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } + var warnings dc.Warnings if enable, ok := d.GetOk("enable"); ok { - lbId := uint64(d.Get("lb_id").(int)) - if enable.(bool) { - req := lb.EnableRequest{ - LBID: lbId, - } - _, err := c.CloudBroker().LB().Enable(ctx, req) - if err != nil { - return diag.FromErr(err) + if err := resourceLbEnable(ctx, lbId, m); err != nil { + warnings.Add(err) } } else { - req := lb.DisableRequest{ - LBID: lbId, - } - _, err := c.CloudBroker().LB().Disable(ctx, req) - if err != nil { - return diag.FromErr(err) + if err := resourceLbDisable(ctx, lbId, m); err != nil { + warnings.Add(err) } } } - return resourceLBRead(ctx, d, m) + return append(warnings.Get(), resourceLBRead(ctx, d, m)...) } func resourceLBRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceLBRead") + log.Debugf("resourceLBRead called for lb_id %s", d.Id()) - c := m.(*controller.ControllerCfg) + // c := m.(*controller.ControllerCfg) lbRec, err := utilityLBCheckPresence(ctx, d, m) if lbRec == nil { @@ -147,43 +116,44 @@ func resourceLBRead(ctx context.Context, d *schema.ResourceData, m interface{}) case status.Created: case status.Deleting: case status.Deleted: - lbId, _ := strconv.ParseUint(d.Id(), 10, 64) - restoreReq := lb.RestoreRequest{LBID: lbId} - - _, err := c.CloudBroker().LB().Restore(ctx, restoreReq) - if err != nil { - return diag.FromErr(err) - } - - if enable := d.Get("enable"); enable.(bool) { - req := lb.EnableRequest{ - LBID: lbId, - } - _, err := c.CloudBroker().LB().Enable(ctx, req) - if err != nil { - return diag.FromErr(err) - } - } - if start := d.Get("start"); start.(bool) { - if enable := d.Get("enable"); enable.(bool) { - req := lb.StartRequest{ - LBID: lbId, - } - _, err := c.CloudBroker().LB().Start(ctx, req) - if err != nil { - return diag.FromErr(err) - } - } else { - return diag.Errorf("To start the LB, please, enable LB first.") - } - } - - hasChanged = true + // lbId, _ := strconv.ParseUint(d.Id(), 10, 64) + // restoreReq := lb.RestoreRequest{LBID: lbId} + + // _, err := c.CloudBroker().LB().Restore(ctx, restoreReq) + // if err != nil { + // return diag.FromErr(err) + // } + + // if enable := d.Get("enable"); enable.(bool) { + // req := lb.EnableRequest{ + // LBID: lbId, + // } + // _, err := c.CloudBroker().LB().Enable(ctx, req) + // if err != nil { + // return diag.FromErr(err) + // } + // } + // if start := d.Get("start"); start.(bool) { + // if enable := d.Get("enable"); enable.(bool) { + // req := lb.StartRequest{ + // LBID: lbId, + // } + // _, err := c.CloudBroker().LB().Start(ctx, req) + // if err != nil { + // return diag.FromErr(err) + // } + // } else { + // return diag.Errorf("To start the LB, please, enable LB first.") + // } + // } + + // hasChanged = true case status.Destroying: return diag.Errorf("The LB is in progress with status: %s", lbRec.Status) case status.Destroyed: d.SetId("") - return resourceLBCreate(ctx, d, m) + return diag.Errorf("The resource cannot be read because it has been destroyed") + // return resourceLBCreate(ctx, d, m) case status.Enabled: case status.Enabling: case status.Disabling: @@ -206,10 +176,11 @@ func resourceLBRead(ctx context.Context, d *schema.ResourceData, m interface{}) } func resourceLBDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceLBDelete") + log.Debugf("resourceLBDelete called with lb id: %v", d.Get("lb_id").(int)) _, err := utilityLBCheckPresence(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } @@ -233,34 +204,11 @@ func resourceLBDelete(ctx context.Context, d *schema.ResourceData, m interface{} } func resourceLBUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceLBUpdate") + log.Debugf("resourceLBUpdate called for lb_id %s", d.Id()) c := m.(*controller.ControllerCfg) - haveRGID, err := existRGID(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - - if !haveRGID { - return diag.Errorf("resourceLBUpdate: can't update LB because RGID %d is not allowed or does not exist", d.Get("rg_id").(int)) - } - - haveExtNetID, err := existExtNetID(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - - if !haveExtNetID { - return diag.Errorf("resourceLBUpdate: can't update LB because ExtNetID %d is not allowed or does not exist", d.Get("extnet_id").(int)) - } - - haveVins, err := existViNSID(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - - if !haveVins { - return diag.Errorf("resourceLBUpdate: can't update LB because ViNSID %d is not allowed or does not exist", d.Get("vins_id").(int)) + if diags := checkParamsExistence(ctx, d, c); diags != nil { + return diags } lbRec, err := utilityLBCheckPresence(ctx, d, m) @@ -278,34 +226,28 @@ func resourceLBUpdate(ctx context.Context, d *schema.ResourceData, m interface{} case status.Created: case status.Deleting: case status.Deleted: - lbId, _ := strconv.ParseUint(d.Id(), 10, 64) - restoreReq := lb.RestoreRequest{LBID: lbId} - - _, err := c.CloudBroker().LB().Restore(ctx, restoreReq) - if err != nil { - return diag.FromErr(err) + restore, ok := d.GetOk("restore") + if ok && restore.(bool) { + if err := resourceLbRestore(ctx, lbRec.ID, m); err != nil { + return diag.FromErr(err) + } } - if enable := d.Get("enable"); enable.(bool) { - req := lb.EnableRequest{ - LBID: lbId, - } - _, err := c.CloudBroker().LB().Enable(ctx, req) - if err != nil { + enable, ok := d.GetOk("enable") + if ok && enable.(bool) { + if err := resourceLbEnable(ctx, lbRec.ID, m); err != nil { return diag.FromErr(err) } } - if start := d.Get("start"); start.(bool) { - if enable := d.Get("enable"); enable.(bool) { - req := lb.StartRequest{ - LBID: lbId, - } - _, err := c.CloudBroker().LB().Start(ctx, req) - if err != nil { + + start, ok := d.GetOk("start") + if ok && start.(bool) { + if enable.(bool) { + if err := resourceLbStart(ctx, lbRec.ID, m); err != nil { return diag.FromErr(err) } } else { - return diag.Errorf("To start the LB, please, enable LB first.") + return diag.Errorf("to start the LB, please, enable LB first.") } } @@ -314,7 +256,8 @@ func resourceLBUpdate(ctx context.Context, d *schema.ResourceData, m interface{} return diag.Errorf("The LB is in progress with status: %s", lbRec.Status) case status.Destroyed: d.SetId("") - return resourceLBCreate(ctx, d, m) + return diag.Errorf("The resource cannot be updated because it has been destroyed") + // return resourceLBCreate(ctx, d, m) case status.Enabled: case status.Enabling: case status.Disabling: @@ -332,100 +275,201 @@ func resourceLBUpdate(ctx context.Context, d *schema.ResourceData, m interface{} } if d.HasChange("enable") { - enable := d.Get("enable").(bool) - if enable { - req := lb.EnableRequest{ - LBID: uint64(d.Get("lb_id").(int)), - } - _, err := c.CloudBroker().LB().Enable(ctx, req) - if err != nil { - return diag.FromErr(err) - } - } else { - req := lb.DisableRequest{ - LBID: uint64(d.Get("lb_id").(int)), - } - _, err := c.CloudBroker().LB().Disable(ctx, req) - if err != nil { - return diag.FromErr(err) - } + if err := resourceLbChangeEnable(ctx, d, lbRec.ID, m); err != nil { + return diag.FromErr(err) + } + } + if d.HasChange("ha_mode") { + if err := resourceLbChangeHaMode(ctx, d, lbRec.ID, m); err != nil { + return diag.FromErr(err) } } if d.HasChange("start") { - start := d.Get("start").(bool) - lbId := uint64(d.Get("lb_id").(int)) - if start { - req := lb.StartRequest{LBID: lbId} - _, err := c.CloudBroker().LB().Start(ctx, req) - if err != nil { - return diag.FromErr(err) - } - } else { - req := lb.StopRequest{LBID: lbId} - _, err := c.CloudBroker().LB().Stop(ctx, req) - if err != nil { - return diag.FromErr(err) - } + if err := resourceLbChangeStart(ctx, d, lbRec.ID, m); err != nil { + return diag.FromErr(err) } } if d.HasChange("desc") { - req := lb.UpdateRequest{ - LBID: uint64(d.Get("lb_id").(int)), - Description: d.Get("desc").(string), + if err := resourceLbChangeDesc(ctx, d, lbRec.ID, m); err != nil { + return diag.FromErr(err) } + } - _, err := c.CloudBroker().LB().Update(ctx, req) - if err != nil { + if d.HasChange("restart") { + if err := resourceLbChangeRestart(ctx, d, lbRec.ID, m); err != nil { return diag.FromErr(err) } } - if d.HasChange("restart") { - restart := d.Get("restart").(bool) - if restart { - req := lb.RestartRequest{ - LBID: uint64(d.Get("lb_id").(int)), - } + if d.HasChange("config_reset") { + if err := resourceLbChangeConfigReset(ctx, d, lbRec.ID, m); err != nil { + return diag.FromErr(err) + } + } - _, err := c.CloudBroker().LB().Restart(ctx, req) - if err != nil { - return diag.FromErr(err) - } + return resourceLBRead(ctx, d, m) +} + +func resourceLbEnable(ctx context.Context, lbId uint64, m interface{}) error { + c := m.(*controller.ControllerCfg) + + req := lb.EnableRequest{ + LBID: lbId, + } + _, err := c.CloudBroker().LB().Enable(ctx, req) + return err +} + +func resourceLbDisable(ctx context.Context, lbId uint64, m interface{}) error { + c := m.(*controller.ControllerCfg) + + req := lb.DisableRequest{ + LBID: lbId, + } + _, err := c.CloudBroker().LB().Disable(ctx, req) + return err +} + +func resourceLbRestore(ctx context.Context, lbId uint64, m interface{}) error { + c := m.(*controller.ControllerCfg) + + restoreReq := lb.RestoreRequest{ + LBID: lbId, + } + _, err := c.CloudBroker().LB().Restore(ctx, restoreReq) + return err +} + +func resourceLbStart(ctx context.Context, lbId uint64, m interface{}) error { + c := m.(*controller.ControllerCfg) + + req := lb.StartRequest{ + LBID: lbId, + } + _, err := c.CloudBroker().LB().Start(ctx, req) + return err +} + +func resourceLbChangeEnable(ctx context.Context, d *schema.ResourceData, lbId uint64, m interface{}) error { + enable := d.Get("enable").(bool) + + if enable { + if err := resourceLbEnable(ctx, lbId, m); err != nil { + return err + } + } else { + if err := resourceLbDisable(ctx, lbId, m); err != nil { + return err } } - if d.HasChange("restore") { - restore := d.Get("restore").(bool) - if restore { - req := lb.RestoreRequest{ - LBID: uint64(d.Get("lb_id").(int)), - } + return nil +} - _, err := c.CloudBroker().LB().Restore(ctx, req) - if err != nil { - return diag.FromErr(err) - } +func resourceLbChangeHaMode(ctx context.Context, d *schema.ResourceData, lbId uint64, m interface{}) error { + c := m.(*controller.ControllerCfg) + haModeOn := d.Get("ha_mode").(bool) + + if haModeOn { + req := lb.HighlyAvailableRequest{ + LBID: lbId, + } + + if _, err := c.CloudBroker().LB().HighlyAvailable(ctx, req); err != nil { + return err } } - if d.HasChange("config_reset") { - cfgReset := d.Get("config_reset").(bool) - if cfgReset { - req := lb.ConfigResetRequest{ - LBID: uint64(d.Get("lb_id").(int)), - } + return nil +} - _, err := c.CloudBroker().LB().ConfigReset(ctx, req) - if err != nil { - return diag.FromErr(err) - } +func resourceLbChangeStart(ctx context.Context, d *schema.ResourceData, lbId uint64, m interface{}) error { + c := m.(*controller.ControllerCfg) + start := d.Get("start").(bool) + + if start { + req := lb.StartRequest{LBID: lbId} + if _, err := c.CloudBroker().LB().Start(ctx, req); err != nil { + return err + } + } else { + req := lb.StopRequest{LBID: lbId} + if _, err := c.CloudBroker().LB().Stop(ctx, req); err != nil { + return err } } - return resourceLBRead(ctx, d, m) + return nil +} + +func resourceLbChangeDesc(ctx context.Context, d *schema.ResourceData, lbId uint64, m interface{}) error { + c := m.(*controller.ControllerCfg) + desc := d.Get("desc").(string) + + req := lb.UpdateRequest{ + LBID: lbId, + Description: desc, + } + + if _, err := c.CloudBroker().LB().Update(ctx, req); err != nil { + return err + } + + return nil +} + +func resourceLbChangeRestart(ctx context.Context, d *schema.ResourceData, lbId uint64, m interface{}) error { + c := m.(*controller.ControllerCfg) + + restart := d.Get("restart").(bool) + if restart { + req := lb.RestartRequest{ + LBID: lbId, + } + + if _, err := c.CloudBroker().LB().Restart(ctx, req); err != nil { + return err + } + } + + return nil +} + +func resourceLbChangeRestore(ctx context.Context, d *schema.ResourceData, lbId uint64, m interface{}) error { + c := m.(*controller.ControllerCfg) + + restore := d.Get("restore").(bool) + if restore { + req := lb.RestoreRequest{ + LBID: lbId, + } + + if _, err := c.CloudBroker().LB().Restore(ctx, req); err != nil { + return err + } + } + + return nil +} + +func resourceLbChangeConfigReset(ctx context.Context, d *schema.ResourceData, lbId uint64, m interface{}) error { + c := m.(*controller.ControllerCfg) + + cfgReset := d.Get("config_reset").(bool) + if cfgReset { + req := lb.ConfigResetRequest{ + LBID: lbId, + } + + if _, err := c.CloudBroker().LB().ConfigReset(ctx, req); err != nil { + return err + } + } + + return nil } func ResourceLB() *schema.Resource { diff --git a/internal/service/cloudbroker/lb/resource_lb_backend.go b/internal/service/cloudbroker/lb/resource_lb_backend.go index 5d8385d..171c2ea 100644 --- a/internal/service/cloudbroker/lb/resource_lb_backend.go +++ b/internal/service/cloudbroker/lb/resource_lb_backend.go @@ -39,7 +39,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" log "github.com/sirupsen/logrus" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" @@ -47,23 +46,18 @@ import ( ) func resourceLBBackendCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceLBBackendCreate") + log.Debugf("resourceLBBackendCreate: call for lb_backend id %s", d.Id()) + c := m.(*controller.ControllerCfg) - haveLBID, err := existLBID(ctx, d, m) - if err != nil { - return diag.FromErr(err) + if diags := checkParamsExistenceLb(ctx, d, c); diags != nil { + return diags } - if !haveLBID { - return diag.Errorf("resourceLBBackendCreate: can't create LB backend because LBID %d is not allowed or does not exist", d.Get("lb_id").(int)) + req := lb.BackendCreateRequest{ + LBID: uint64(d.Get("lb_id").(int)), + BackendName: d.Get("name").(string), } - c := m.(*controller.ControllerCfg) - req := lb.BackendCreateRequest{} - - req.BackendName = d.Get("name").(string) - req.LBID = uint64(d.Get("lb_id").(int)) - if algorithm, ok := d.GetOk("algorithm"); ok { req.Algorithm = algorithm.(string) } @@ -92,23 +86,18 @@ func resourceLBBackendCreate(ctx context.Context, d *schema.ResourceData, m inte req.Weight = uint64(weight.(int)) } - _, err = c.CloudBroker().LB().BackendCreate(ctx, req) + _, err := c.CloudBroker().LB().BackendCreate(ctx, req) if err != nil { return diag.FromErr(err) } d.SetId(strconv.Itoa(d.Get("lb_id").(int)) + "#" + d.Get("name").(string)) - _, err = utilityLBBackendCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - return resourceLBBackendRead(ctx, d, m) } func resourceLBBackendRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceLBBackendRead") + log.Debugf("resourceLBBackendRead: call for lb_backend id %s", d.Id()) b, err := utilityLBBackendCheckPresence(ctx, d, m) if b == nil { @@ -124,10 +113,11 @@ func resourceLBBackendRead(ctx context.Context, d *schema.ResourceData, m interf } func resourceLBBackendDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceLBBackendDelete") + log.Debugf("resourceLBBackendDelete: call for lb_backend id %s", d.Id()) _, err := utilityLBBackendCheckPresence(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } @@ -148,16 +138,11 @@ func resourceLBBackendDelete(ctx context.Context, d *schema.ResourceData, m inte } func resourceLBBackendUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceLBBackendEdit") + log.Debugf("resourceLBBackendUpdate: call for lb_backend id %s", d.Id()) c := m.(*controller.ControllerCfg) - haveLBID, err := existLBID(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - - if !haveLBID { - return diag.Errorf("resourceLBBackendUpdate: can't update LB backend because LBID %d is not allowed or does not exist", d.Get("lb_id").(int)) + if diags := checkParamsExistenceLb(ctx, d, c); diags != nil { + return diags } req := lb.BackendUpdateRequest{ @@ -193,7 +178,7 @@ func resourceLBBackendUpdate(ctx context.Context, d *schema.ResourceData, m inte req.Weight = uint64(d.Get("weight").(int)) } - _, err = c.CloudBroker().LB().BackendUpdate(ctx, req) + _, err := c.CloudBroker().LB().BackendUpdate(ctx, req) if err != nil { return diag.FromErr(err) } @@ -222,153 +207,6 @@ func ResourceLBBackend() *schema.Resource { Default: &constants.Timeout300s, }, - Schema: map[string]*schema.Schema{ - "lb_id": { - Type: schema.TypeInt, - Required: true, - Description: "ID of the LB instance to backendCreate", - }, - "name": { - Type: schema.TypeString, - Required: true, - Description: "Must be unique among all backends of this LB - name of the new backend to create", - }, - "algorithm": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice([]string{"roundrobin", "static-rr", "leastconn"}, false), - }, - "guid": { - Type: schema.TypeString, - Computed: true, - }, - "downinter": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "fall": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "inter": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "maxconn": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "maxqueue": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "rise": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "slowstart": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "weight": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "servers": { - Type: schema.TypeList, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "address": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "check": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "guid": { - Type: schema.TypeString, - Computed: true, - }, - "name": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "port": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "server_settings": { - Type: schema.TypeList, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "downinter": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "fall": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "guid": { - Type: schema.TypeString, - Computed: true, - }, - "inter": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "maxconn": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "maxqueue": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "rise": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "slowstart": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "weight": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - }, - }, - }, - }, - }, - }, - }, + Schema: resourceLbBackendSchemaMake(), } } diff --git a/internal/service/cloudbroker/lb/resource_lb_backend_server.go b/internal/service/cloudbroker/lb/resource_lb_backend_server.go index 5bd14cb..9d133df 100644 --- a/internal/service/cloudbroker/lb/resource_lb_backend_server.go +++ b/internal/service/cloudbroker/lb/resource_lb_backend_server.go @@ -39,7 +39,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" log "github.com/sirupsen/logrus" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" @@ -47,18 +46,16 @@ import ( ) func resourceLBBackendServerCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceLBBackendServerCreate") - - haveLBID, err := existLBID(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } + log.Debugf("resourceLBBackendServerCreate: call for lb_id %d, backend_name %s, server_name %s", + d.Get("lb_id").(int), + d.Get("backend_name").(string), + d.Get("name").(string)) + c := m.(*controller.ControllerCfg) - if !haveLBID { - return diag.Errorf("resourceLBBackendServerCreate: can't create LB backend server because LBID %d is not allowed or does not exist", d.Get("lb_id").(int)) + if diags := checkParamsExistenceLb(ctx, d, c); diags != nil { + return diags } - c := m.(*controller.ControllerCfg) req := lb.BackendServerAddRequest{ BackendName: d.Get("backend_name").(string), ServerName: d.Get("name").(string), @@ -95,23 +92,19 @@ func resourceLBBackendServerCreate(ctx context.Context, d *schema.ResourceData, req.Weight = uint64(weight.(int)) } - _, err = c.CloudBroker().LB().BackendServerAdd(ctx, req) + _, err := c.CloudBroker().LB().BackendServerAdd(ctx, req) if err != nil { + d.SetId("") return diag.FromErr(err) } d.SetId(strconv.Itoa(d.Get("lb_id").(int)) + "#" + d.Get("backend_name").(string) + "#" + d.Get("name").(string)) - _, err = utilityLBBackendServerCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - return resourceLBBackendServerRead(ctx, d, m) } func resourceLBBackendServerRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceLBBackendServerRead") + log.Debugf("resourceLBBackendServerRead: call for id %s", d.Id()) s, err := utilityLBBackendServerCheckPresence(ctx, d, m) if err != nil { @@ -128,10 +121,11 @@ func resourceLBBackendServerRead(ctx context.Context, d *schema.ResourceData, m } func resourceLBBackendServerDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceLBBackendServerDelete") + log.Debugf("resourceLBBackendServerDelete: call for id %s", d.Id()) _, err := utilityLBBackendServerCheckPresence(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } @@ -152,16 +146,11 @@ func resourceLBBackendServerDelete(ctx context.Context, d *schema.ResourceData, } func resourceLBBackendServerUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceLBBackendServerEdit") + log.Debugf("resourceLBBackendServerEdit: call for id %s", d.Id()) c := m.(*controller.ControllerCfg) - haveLBID, err := existLBID(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - - if !haveLBID { - return diag.Errorf("resourceLBBackendServerUpdate: can't update LB backend server because LBID %d is not allowed or does not exist", d.Get("lb_id").(int)) + if diags := checkParamsExistenceLb(ctx, d, c); diags != nil { + return diags } req := lb.BackendServerUpdateRequest{ @@ -200,7 +189,7 @@ func resourceLBBackendServerUpdate(ctx context.Context, d *schema.ResourceData, req.Weight = uint64(d.Get("weight").(int)) } - _, err = c.CloudBroker().LB().BackendServerUpdate(ctx, req) + _, err := c.CloudBroker().LB().BackendServerUpdate(ctx, req) if err != nil { return diag.FromErr(err) } @@ -229,83 +218,6 @@ func ResourceLBBackendServer() *schema.Resource { Default: &constants.Timeout300s, }, - Schema: map[string]*schema.Schema{ - "lb_id": { - Type: schema.TypeInt, - Required: true, - Description: "ID of the LB instance to backendCreate", - }, - "backend_name": { - Type: schema.TypeString, - Required: true, - Description: "Must be unique among all backends of this LB - name of the new backend to create", - }, - "name": { - Type: schema.TypeString, - Required: true, - Description: "Must be unique among all servers defined for this backend - name of the server definition to add.", - }, - "address": { - Type: schema.TypeString, - Required: true, - Description: "IP address of the server.", - }, - "port": { - Type: schema.TypeInt, - Required: true, - Description: "Port number on the server", - }, - "check": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice([]string{"disabled", "enabled"}, false), - Description: "set to disabled if this server should be used regardless of its state.", - }, - "guid": { - Type: schema.TypeString, - Computed: true, - }, - "downinter": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "fall": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "inter": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "maxconn": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "maxqueue": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "rise": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "slowstart": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "weight": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - }, + Schema: resourceLbBackendServerSchemaMake(), } } diff --git a/internal/service/cloudbroker/lb/resource_lb_frontend.go b/internal/service/cloudbroker/lb/resource_lb_frontend.go index 20cd94b..962bdf7 100644 --- a/internal/service/cloudbroker/lb/resource_lb_frontend.go +++ b/internal/service/cloudbroker/lb/resource_lb_frontend.go @@ -46,41 +46,36 @@ import ( ) func resourceLBFrontendCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceLBFrontendCreate") + log.Debugf("resourceLBFrontendCreate: call for lb_id %d, backend %s to create frontend %s", + d.Get("lb_id").(int), + d.Get("backend_name").(string), + d.Get("name").(string)) - haveLBID, err := existLBID(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } + c := m.(*controller.ControllerCfg) - if !haveLBID { - return diag.Errorf("resourceLBFrontendCreate: can't create LB frontend because LBID %d is not allowed or does not exist", d.Get("lb_id").(int)) + if diags := checkParamsExistenceLb(ctx, d, c); diags != nil { + return diags } - c := m.(*controller.ControllerCfg) req := lb.FrontendCreateRequest{ BackendName: d.Get("backend_name").(string), LBID: uint64(d.Get("lb_id").(int)), FrontendName: d.Get("name").(string), } - _, err = c.CloudBroker().LB().FrontendCreate(ctx, req) + _, err := c.CloudBroker().LB().FrontendCreate(ctx, req) if err != nil { + d.SetId("") return diag.FromErr(err) } d.SetId(strconv.Itoa(d.Get("lb_id").(int)) + "#" + d.Get("name").(string)) - _, err = utilityLBFrontendCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - return resourceLBFrontendRead(ctx, d, m) } func resourceLBFrontendRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceLBFrontendRead") + log.Debugf("resourceLBFrontendRead: call for id %s", d.Id()) f, err := utilityLBFrontendCheckPresence(ctx, d, m) if err != nil { @@ -96,10 +91,11 @@ func resourceLBFrontendRead(ctx context.Context, d *schema.ResourceData, m inter } func resourceLBFrontendDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceLBFrontendDelete") + log.Debugf("resourceLBFrontendDelete: call for id %s", d.Id()) _, err := utilityLBFrontendCheckPresence(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } @@ -144,48 +140,6 @@ func ResourceLBFrontend() *schema.Resource { Default: &constants.Timeout300s, }, - Schema: map[string]*schema.Schema{ - "lb_id": { - Type: schema.TypeInt, - Required: true, - Description: "ID of the LB instance to backendCreate", - }, - "backend_name": { - Type: schema.TypeString, - Required: true, - }, - "name": { - Type: schema.TypeString, - Required: true, - }, - "bindings": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "address": { - Type: schema.TypeString, - Computed: true, - }, - "guid": { - Type: schema.TypeString, - Computed: true, - }, - "name": { - Type: schema.TypeString, - Computed: true, - }, - "port": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - "guid": { - Type: schema.TypeString, - Computed: true, - }, - }, + Schema: resourceLbFrontendSchemaMake(), } } diff --git a/internal/service/cloudbroker/lb/resource_lb_frontend_bind.go b/internal/service/cloudbroker/lb/resource_lb_frontend_bind.go index 58ac1a9..9cc5b9f 100644 --- a/internal/service/cloudbroker/lb/resource_lb_frontend_bind.go +++ b/internal/service/cloudbroker/lb/resource_lb_frontend_bind.go @@ -46,18 +46,17 @@ import ( ) func resourceLBFrontendBindCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceLBFrontendBindCreate") + log.Debugf("resourceLBFrontendBindCreate: call for lb_id %d, frontend %s to create bind %s", + d.Get("lb_id").(int), + d.Get("frontend_name").(string), + d.Get("name").(string)) - haveLBID, err := existLBID(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } + c := m.(*controller.ControllerCfg) - if !haveLBID { - return diag.Errorf("resourceLBFrontendBindCreate: can't create LB frontend bind because LBID %d is not allowed or does not exist", d.Get("lb_id").(int)) + if diags := checkParamsExistenceLb(ctx, d, c); diags != nil { + return diags } - c := m.(*controller.ControllerCfg) req := lb.FrontendBindRequest{ LBID: uint64(d.Get("lb_id").(int)), FrontendName: d.Get("frontend_name").(string), @@ -66,23 +65,18 @@ func resourceLBFrontendBindCreate(ctx context.Context, d *schema.ResourceData, m BindingPort: uint64(d.Get("port").(int)), } - _, err = c.CloudBroker().LB().FrontendBind(ctx, req) + _, err := c.CloudBroker().LB().FrontendBind(ctx, req) if err != nil { return diag.FromErr(err) } d.SetId(strconv.Itoa(d.Get("lb_id").(int)) + "#" + d.Get("frontend_name").(string) + "#" + d.Get("name").(string)) - _, err = utilityLBFrontendBindCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - return resourceLBFrontendBindRead(ctx, d, m) } func resourceLBFrontendBindRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceLBFrontendBindRead") + log.Debugf("resourceLBFrontendBindRead: call for %s", d.Id()) b, err := utilityLBFrontendBindCheckPresence(ctx, d, m) if err != nil { @@ -99,10 +93,11 @@ func resourceLBFrontendBindRead(ctx context.Context, d *schema.ResourceData, m i } func resourceLBFrontendBindDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceLBFrontendBindDelete") + log.Debugf("resourceLBFrontendBindDelete: call for %s", d.Id()) _, err := utilityLBFrontendBindCheckPresence(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } @@ -124,16 +119,11 @@ func resourceLBFrontendBindDelete(ctx context.Context, d *schema.ResourceData, m } func resourceLBFrontendBindUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceLBFrontendBindEdit") + log.Debugf("resourceLBFrontendBindEdit: call for %s", d.Id()) c := m.(*controller.ControllerCfg) - haveLBID, err := existLBID(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - - if !haveLBID { - return diag.Errorf("resourceLBFrontendBindUpdate: can't update LB frontend bind because LBID %d is not allowed or does not exist", d.Get("lb_id").(int)) + if diags := checkParamsExistenceLb(ctx, d, c); diags != nil { + return diags } req := lb.FrontendBindUpdateRequest{ @@ -142,12 +132,12 @@ func resourceLBFrontendBindUpdate(ctx context.Context, d *schema.ResourceData, m LBID: uint64(d.Get("lb_id").(int)), } - if d.HasChange("address") || d.HasChange("port") { + if d.HasChange("address") || d.HasChange("port") { req.BindingAddress = d.Get("address").(string) req.BindingPort = uint64(d.Get("port").(int)) } - _, err = c.CloudBroker().LB().FrontendBindUpdate(ctx, req) + _, err := c.CloudBroker().LB().FrontendBindUpdate(ctx, req) if err != nil { return diag.FromErr(err) } @@ -176,33 +166,6 @@ func ResourceLBFrontendBind() *schema.Resource { Default: &constants.Timeout300s, }, - Schema: map[string]*schema.Schema{ - "lb_id": { - Type: schema.TypeInt, - Required: true, - Description: "ID of the LB instance to backendCreate", - }, - "frontend_name": { - Type: schema.TypeString, - Required: true, - Description: "Must be unique among all backends of this LB - name of the new backend to create", - }, - "address": { - Type: schema.TypeString, - Required: true, - }, - "guid": { - Type: schema.TypeString, - Computed: true, - }, - "name": { - Type: schema.TypeString, - Required: true, - }, - "port": { - Type: schema.TypeInt, - Required: true, - }, - }, + Schema: resourceLbFrontendBindSchemaMake(), } } diff --git a/internal/service/cloudbroker/lb/utility_lb.go b/internal/service/cloudbroker/lb/utility_lb.go index 21ad177..0e13132 100644 --- a/internal/service/cloudbroker/lb/utility_lb.go +++ b/internal/service/cloudbroker/lb/utility_lb.go @@ -45,14 +45,13 @@ func utilityLBCheckPresence(ctx context.Context, d *schema.ResourceData, m inter c := m.(*controller.ControllerCfg) req := lb.GetRequest{} - if d.Id() != "" { rgId, _ := strconv.ParseUint(d.Id(), 10, 64) req.LBID = rgId } else { req.LBID = uint64(d.Get("lb_id").(int)) } - + lb, err := c.CloudBroker().LB().Get(ctx, req) if err != nil { return nil, err diff --git a/internal/service/cloudbroker/lb/utility_lb_backend.go b/internal/service/cloudbroker/lb/utility_lb_backend.go index 80ec270..9798de4 100644 --- a/internal/service/cloudbroker/lb/utility_lb_backend.go +++ b/internal/service/cloudbroker/lb/utility_lb_backend.go @@ -57,17 +57,17 @@ func utilityLBBackendCheckPresence(ctx context.Context, d *schema.ResourceData, req.LBID = uint64(d.Get("lb_id").(int)) } - lb, err := c.CloudBroker().LB().Get(ctx, req) + lbRec, err := c.CloudBroker().LB().Get(ctx, req) if err != nil { return nil, err } - backends := lb.Backends + backends := lbRec.Backends for _, b := range backends { if b.Name == bName { return &b, nil } } - return nil, fmt.Errorf("can not find backend with name: %s for lb: %d", bName, lb.ID) + return nil, fmt.Errorf("can not find backend with name: %s for lb: %d", bName, lbRec.ID) } diff --git a/internal/service/cloudbroker/pcidevice/data_source_pcidevice.go b/internal/service/cloudbroker/pcidevice/data_source_pcidevice.go index d144eeb..dfc1f05 100644 --- a/internal/service/cloudbroker/pcidevice/data_source_pcidevice.go +++ b/internal/service/cloudbroker/pcidevice/data_source_pcidevice.go @@ -1,140 +1,69 @@ -/* -Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package pcidevice - -import ( - "context" - "strconv" - - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens" -) - -func dataSourcePcideviceRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - pcidevice, err := utilityPcideviceCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - - d.Set("ckey", pcidevice.CKey) - d.Set("meta", flattens.FlattenMeta(pcidevice.Meta)) - d.Set("compute_id", pcidevice.ComputeID) - d.Set("description", pcidevice.Description) - d.Set("guid", pcidevice.GUID) - d.Set("hw_path", pcidevice.HwPath) - d.Set("device_id",pcidevice.ID) - d.Set("name", pcidevice.Name) - d.Set("rg_id", pcidevice.RGID) - d.Set("stack_id", pcidevice.StackID) - d.Set("status", pcidevice.Status) - d.Set("system_name", pcidevice.SystemName) - - d.SetId(strconv.Itoa(d.Get("device_id").(int))) - - return nil -} - -func dataSourcePcideviceSchemaMake() map[string]*schema.Schema { - rets := map[string]*schema.Schema{ - "device_id": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - "ckey": { - Type: schema.TypeString, - Computed: true, - }, - "meta": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "compute_id": { - Type: schema.TypeInt, - Computed: true, - }, - "description": { - Type: schema.TypeString, - Computed: true, - }, - "guid": { - Type: schema.TypeInt, - Computed: true, - }, - "hw_path": { - Type: schema.TypeString, - Computed: true, - }, - "name": { - Type: schema.TypeString, - Computed: true, - }, - "rg_id": { - Type: schema.TypeInt, - Computed: true, - }, - "stack_id": { - Type: schema.TypeInt, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "system_name": { - Type: schema.TypeString, - Computed: true, - }, - } - - return rets -} - -func DataSourcePcidevice() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - ReadContext: dataSourcePcideviceRead, - - Timeouts: &schema.ResourceTimeout{ - Read: &constants.Timeout30s, - Default: &constants.Timeout60s, - }, - - Schema: dataSourcePcideviceSchemaMake(), - } -} +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package pcidevice + +import ( + "context" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourcePcideviceRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + pcidevice, err := utilityPcideviceCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + flattenPcidevice(d, pcidevice) + d.SetId(strconv.Itoa(d.Get("device_id").(int))) + + return nil +} + +func DataSourcePcidevice() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourcePcideviceRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourcePcideviceSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/pcidevice/data_source_pcidevice_list.go b/internal/service/cloudbroker/pcidevice/data_source_pcidevice_list.go index d831356..b661a2f 100644 --- a/internal/service/cloudbroker/pcidevice/data_source_pcidevice_list.go +++ b/internal/service/cloudbroker/pcidevice/data_source_pcidevice_list.go @@ -1,149 +1,72 @@ -/* -Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package pcidevice - -import ( - "context" - - "github.com/google/uuid" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/pcidevice" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens" -) - -func flattenPcideviceList(pl *pcidevice.ListPCIDevices) []map[string]interface{} { - res := make([]map[string]interface{}, 0, len(pl.Data)) - for _, item := range pl.Data { - temp := map[string]interface{}{ - "ckey": item.CKey, - "meta": flattens.FlattenMeta(item.Meta), - "compute_id": item.ComputeID, - "description": item.Description, - "guid": item.GUID, - "hw_path": item.HwPath, - "device_id": item.ID, - "rg_id": item.RGID, - "name": item.Name, - "stack_id": item.StackID, - "status": item.Status, - "system_name": item.SystemName, - } - res = append(res, temp) - } - return res -} - -func dataSourcePcideviceListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - pcideviceList, err := utilityPcideviceListCheckPresence(ctx, m) - if err != nil { - return diag.FromErr(err) - } - - d.Set("items", flattenPcideviceList(pcideviceList)) - d.Set("entry_count", pcideviceList.EntryCount) - - id := uuid.New() - d.SetId(id.String()) - - return nil -} - -func dataSourcePcideviceListSchemaMake() map[string]*schema.Schema { - rets := map[string]*schema.Schema{ - "by_id": { - Type: schema.TypeInt, - Optional: true, - Description: "by_id", - }, - "compute_id": { - Type: schema.TypeInt, - Optional: true, - Description: "compute_id", - }, - "name": { - Type: schema.TypeString, - Optional: true, - Description: "name", - }, - "rg_id": { - Type: schema.TypeInt, - Optional: true, - Description: "rg_id", - }, - "status": { - Type: schema.TypeString, - Optional: true, - Description: "status", - }, - "page": { - Type: schema.TypeInt, - Optional: true, - Description: "page number", - }, - "size": { - Type: schema.TypeInt, - Optional: true, - Description: "page size", - }, - "items": { - Type: schema.TypeList, - Computed: true, - Description: "pcidevice list", - Elem: &schema.Resource{ - Schema: dataSourcePcideviceSchemaMake(), - }, - }, - "entry_count": { - Type: schema.TypeInt, - Computed: true, - Description: "entry count", - }, - } - return rets -} - -func DataSourcePcideviceList() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - ReadContext: dataSourcePcideviceListRead, - - Timeouts: &schema.ResourceTimeout{ - Read: &constants.Timeout30s, - Default: &constants.Timeout60s, - }, - - Schema: dataSourcePcideviceListSchemaMake(), - } -} +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package pcidevice + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourcePcideviceListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + pcideviceList, err := utilityPcideviceListCheckPresence(ctx, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + d.Set("items", flattenPcideviceList(pcideviceList)) + d.Set("entry_count", pcideviceList.EntryCount) + + id := uuid.New() + d.SetId(id.String()) + + return nil +} + +func DataSourcePcideviceList() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourcePcideviceListRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourcePcideviceListSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/pcidevice/resource_check_input_vales.go b/internal/service/cloudbroker/pcidevice/resource_check_input_vales.go index 80f92c8..6392c00 100644 --- a/internal/service/cloudbroker/pcidevice/resource_check_input_vales.go +++ b/internal/service/cloudbroker/pcidevice/resource_check_input_vales.go @@ -34,55 +34,26 @@ package pcidevice import ( "context" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/ic" ) -func existStackID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { - c := m.(*controller.ControllerCfg) - StackID := uint64(d.Get("stack_id").(int)) - RGID := uint64(d.Get("rg_id").(int)) +func checkParamsExistence(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg) diag.Diagnostics { + var errs []error - req := rg.ListRequest{ - IncludeDeleted: false, - } - - rgList, err := c.CloudBroker().RG().List(ctx, req) - if err != nil { - return false, err - } - - for _, v := range rgList.FilterByID(RGID).Data { - for _, idVM := range v.VMs { - req := compute.GetRequest{ - ComputeID: idVM, - } - checkStackID, err := c.CloudBroker().Compute().Get(ctx, req) - if err != nil { - return false, err - } - if checkStackID.StackID == StackID { - return true, nil - } - } - } - - return false, err -} + stackId := uint64(d.Get("stack_id").(int)) + rgId := uint64(d.Get("rd_id").(int)) -func existRGID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { - c := m.(*controller.ControllerCfg) - RGID := uint64(d.Get("rg_id").(int)) - req := rg.ListRequest{ - IncludeDeleted: false, + if err := ic.ExistRG(ctx, rgId, c); err != nil { + errs = append(errs, err) } - rgList, err := c.CloudBroker().RG().List(ctx, req) - if err != nil { - return false, err + if err := ic.ExistStackInPcidevice(ctx, stackId, rgId, c); err != nil { + errs = append(errs, err) } - return len(rgList.FilterByID(RGID).Data) != 0, nil + return dc.ErrorsToDiagnostics(errs) } diff --git a/internal/service/cloudbroker/pcidevice/resource_pcidevice.go b/internal/service/cloudbroker/pcidevice/resource_pcidevice.go index aafd171..2720048 100644 --- a/internal/service/cloudbroker/pcidevice/resource_pcidevice.go +++ b/internal/service/cloudbroker/pcidevice/resource_pcidevice.go @@ -1,292 +1,229 @@ -/* -Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package pcidevice - -import ( - "context" - "strconv" - - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - log "github.com/sirupsen/logrus" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/pcidevice" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/status" - -) - -func resourcePcideviceCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourcePcideviceCreate: called for pcidevice %s", d.Get("name").(string)) - - c := m.(*controller.ControllerCfg) - req := pcidevice.CreateRequest{} - - haveRGID, err := existRGID(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - if !haveRGID { - return diag.Errorf("resourcePcideviceCreate: can't create Pcidevice because RGID %d is not allowed or does not exist", d.Get("rg_id").(int)) - } - - haveStackID, err := existStackID(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - if !haveStackID { - return diag.Errorf("resourcePcideviceCreate: can't create Pcidevice because StackID %d is not allowed or does not exist", d.Get("stack_id").(int)) - } - - req.StackID = uint64(d.Get("stack_id").(int)) - req.RGID = uint64(d.Get("rg_id").(int)) - req.Name = d.Get("name").(string) - req.HWPath = d.Get("hw_path").(string) - - if description, ok := d.GetOk("description"); ok { - req.Description = description.(string) - } - - pcideviceId, err := c.CloudBroker().PCIDevice().Create(ctx, req) - if err != nil { - return diag.FromErr(err) - } - - d.SetId(strconv.FormatUint(pcideviceId, 10)) - d.Set("device_id", pcideviceId) - - return resourcePcideviceRead(ctx, d, m) -} - -func resourcePcideviceRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - pcidevice, err := utilityPcideviceCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - - d.Set("ckey", pcidevice.CKey) - d.Set("meta", flattens.FlattenMeta(pcidevice.Meta)) - d.Set("compute_id", pcidevice.ComputeID) - d.Set("description", pcidevice.Description) - d.Set("guid", pcidevice.GUID) - d.Set("hw_path", pcidevice.HwPath) - d.Set("device_id", pcidevice.ID) - d.Set("rg_id", pcidevice.RGID) - d.Set("name", pcidevice.Name) - d.Set("stack_id", pcidevice.StackID) - d.Set("status", pcidevice.Status) - d.Set("system_name", pcidevice.SystemName) - - return nil -} - -func resourcePcideviceUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - - haveRGID, err := existRGID(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - if !haveRGID { - return diag.Errorf("resourcePcideviceCreate: can't update Pcidevice because RGID %d is not allowed or does not exist", d.Get("rg_id").(int)) - } - - haveStackID, err := existStackID(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - if !haveStackID { - return diag.Errorf("resourcePcideviceCreate: can't update Pcidevice because StackID %d is not allowed or does not exist", d.Get("stack_id").(int)) - } - - if d.HasChange("enable") { - state := d.Get("enable").(bool) - c := m.(*controller.ControllerCfg) - - if state { - req := pcidevice.EnableRequest{ - DeviceID: uint64(d.Get("device_id").(int)), - } - - _, err := c.CloudBroker().PCIDevice().Enable(ctx, req) - if err != nil { - return diag.FromErr(err) - } - } else { - req := pcidevice.DisableRequest{ - DeviceID: uint64(d.Get("device_id").(int)), - } - if force, ok := d.GetOk("force"); ok { - req.Force = force.(bool) - } - - _, err := c.CloudBroker().PCIDevice().Disable(ctx, req) - if err != nil { - return diag.FromErr(err) - } - } - } - - return resourcePcideviceRead(ctx, d, m) -} - -func resourcePcideviceDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourcePcideviceDelete: called for %s, id: %s", d.Get("name").(string), d.Id()) - - pciDevice, err := utilityPcideviceCheckPresence(ctx, d, m) - if err != nil { - d.SetId("") - return diag.FromErr(err) - } - if pciDevice.Status == status.Destroyed || pciDevice.Status == status.Purged { - return nil - } - - c := m.(*controller.ControllerCfg) - - req := pcidevice.DeleteRequest{ - DeviceID: pciDevice.ID, - } - - if force, ok := d.GetOk("force"); ok { - req.Force = force.(bool) - } - - _, err = c.CloudBroker().PCIDevice().Delete(ctx, req) - if err != nil { - return diag.FromErr(err) - } - - d.SetId("") - - return nil -} - -func resourcePcideviceSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "ckey": { - Type: schema.TypeString, - Computed: true, - }, - "meta": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "compute_id": { - Type: schema.TypeInt, - Computed: true, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "description, just for information", - }, - "guid": { - Type: schema.TypeInt, - Computed: true, - }, - "hw_path": { - Type: schema.TypeString, - Required: true, - Description: "PCI address of the device", - }, - "device_id": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "name": { - Type: schema.TypeString, - Required: true, - Description: "Name of Device", - }, - "rg_id": { - Type: schema.TypeInt, - Required: true, - Description: "Resource GROUP", - }, - "stack_id": { - Type: schema.TypeInt, - Required: true, - Description: "stackId", - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "system_name": { - Type: schema.TypeString, - Computed: true, - }, - "force": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Force delete", - }, - "enable": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Enable pci device", - }, - } -} - -func ResourcePcidevice() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - CreateContext: resourcePcideviceCreate, - ReadContext: resourcePcideviceRead, - UpdateContext: resourcePcideviceUpdate, - DeleteContext: resourcePcideviceDelete, - - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: &constants.Timeout60s, - Read: &constants.Timeout30s, - Update: &constants.Timeout60s, - Delete: &constants.Timeout60s, - Default: &constants.Timeout60s, - }, - - Schema: resourcePcideviceSchemaMake(), - } -} +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package pcidevice + +import ( + "context" + log "github.com/sirupsen/logrus" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/pcidevice" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/status" +) + +func resourcePcideviceCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourcePcideviceCreate: called for pcidevice %s", d.Get("name").(string)) + + c := m.(*controller.ControllerCfg) + createReq := pcidevice.CreateRequest{} + + if diags := checkParamsExistence(ctx, d, c); diags != nil { + return diags + } + + createReq.StackID = uint64(d.Get("stack_id").(int)) + createReq.RGID = uint64(d.Get("rg_id").(int)) + createReq.Name = d.Get("name").(string) + createReq.HWPath = d.Get("hw_path").(string) + + if description, ok := d.GetOk("description"); ok { + createReq.Description = description.(string) + } + + pcideviceId, err := c.CloudBroker().PCIDevice().Create(ctx, createReq) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + d.SetId(strconv.FormatUint(pcideviceId, 10)) + d.Set("device_id", pcideviceId) + + warnings := dc.Warnings{} + + if enable, ok := d.GetOk("enable"); ok { + log.Debugf("resourcePcideviceCreate: enable=%t device_id %d after completing its resource configuration", enable, pcideviceId) + + if enable.(bool) { + req := pcidevice.EnableRequest{DeviceID: pcideviceId} + if _, err := c.CloudBroker().PCIDevice().Enable(ctx, req); err != nil { + warnings.Add(err) + } + + } else { + req := pcidevice.DisableRequest{ + DeviceID: pcideviceId, + } + if force, ok := d.GetOk("force_disable"); ok { + req.Force = force.(bool) + log.Debugf("force_disable=%v", force) + } + if _, err := c.CloudBroker().PCIDevice().Disable(ctx, req); err != nil { + warnings.Add(err) + } + } + } + + return append(resourcePcideviceRead(ctx, d, m), warnings.Get()...) +} + +func resourcePcideviceRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourcePcideviceRead: called for pci_device id %d, name %s", + d.Id(), d.Get("name").(string)) + + pcideviceRec, err := utilityPcideviceCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + flattenPcidevice(d, pcideviceRec) + + log.Debugf("resourcePcideviceRead: after flattenPcidevice: device_id %s, name %s", + d.Id(), d.Get("name").(string)) + + return nil +} + +func resourcePcideviceUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourcePcideviceUpdate: called for pcidevice id %s, name %s", d.Id(), d.Get("name").(string)) + + c := m.(*controller.ControllerCfg) + + if diags := checkParamsExistence(ctx, d, c); diags != nil { + return diags + } + + if d.HasChange("enable") { + err := resourcePcideviceChangeEnable(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + } + + return resourcePcideviceRead(ctx, d, m) +} + +func resourcePcideviceDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourcePcideviceDelete: called for %s, id: %s", d.Get("name").(string), d.Id()) + + c := m.(*controller.ControllerCfg) + + pciDevice, err := utilityPcideviceCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + if pciDevice.Status == status.Destroyed || pciDevice.Status == status.Purged { + return nil + } + + req := pcidevice.DeleteRequest{ + DeviceID: pciDevice.ID, + } + if force, ok := d.GetOk("force_delete"); ok { + req.Force = force.(bool) + } + + if _, err = c.CloudBroker().PCIDevice().Delete(ctx, req); err != nil { + return diag.FromErr(err) + } + + d.SetId("") + + return nil +} + +func ResourcePcidevice() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + CreateContext: resourcePcideviceCreate, + ReadContext: resourcePcideviceRead, + UpdateContext: resourcePcideviceUpdate, + DeleteContext: resourcePcideviceDelete, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: &constants.Timeout60s, + Read: &constants.Timeout30s, + Update: &constants.Timeout60s, + Delete: &constants.Timeout60s, + Default: &constants.Timeout60s, + }, + + Schema: resourcePcideviceSchemaMake(), + } +} + +func resourcePcideviceChangeEnable(ctx context.Context, d *schema.ResourceData, m interface{}) error { + enable := d.Get("enable").(bool) + log.Debugf("resourcePcideviceChangeEnable: enable=%t device_id %s after completing its resource configuration", enable, d.Id()) + + pcideviceRec, err := utilityPcideviceCheckPresence(ctx, d, m) + if err != nil { + return err + } + c := m.(*controller.ControllerCfg) + + if enable { + req := pcidevice.EnableRequest{ + DeviceID: pcideviceRec.ID, + } + + if _, err := c.CloudBroker().PCIDevice().Enable(ctx, req); err != nil { + return err + } + } else { + req := pcidevice.DisableRequest{ + DeviceID: pcideviceRec.ID, + } + if force, ok := d.GetOk("force_disable"); ok { + req.Force = force.(bool) + } + + if _, err := c.CloudBroker().PCIDevice().Disable(ctx, req); err != nil { + return err + } + } + + return nil +} diff --git a/internal/service/cloudbroker/rg/data_source_rg.go b/internal/service/cloudbroker/rg/data_source_rg.go index df8d29b..d439389 100644 --- a/internal/service/cloudbroker/rg/data_source_rg.go +++ b/internal/service/cloudbroker/rg/data_source_rg.go @@ -35,84 +35,12 @@ import ( "context" "fmt" - log "github.com/sirupsen/logrus" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -func flattenResgroup(d *schema.ResourceData, rgData *rg.RecordRG) { - log.Debugf("flattenResgroup: decoded RG name %q / ID %d, account ID %d", - rgData.Name, rgData.ID, rgData.AccountID) - - d.Set("account_id", rgData.AccountID) - d.Set("account_name", rgData.AccountName) - d.Set("acl", flattenRgAcl(rgData.ACL)) - d.Set("cpu_allocation_parameter", rgData.CPUAllocationParameter) - d.Set("cpu_allocation_ratio", rgData.CPUAllocationRatio) - d.Set("created_by", rgData.CreatedBy) - d.Set("created_time", rgData.CreatedTime) - d.Set("def_net_id", rgData.DefNetID) - d.Set("def_net_type", rgData.DefNetType) - d.Set("deleted_by", rgData.DeletedBy) - d.Set("deleted_time", rgData.DeletedTime) - d.Set("desc", rgData.Description) - d.Set("dirty", rgData.Dirty) - d.Set("gid", rgData.GID) - d.Set("guid", rgData.GUID) - d.Set("rg_id", rgData.ID) - d.Set("lock_status", rgData.LockStatus) - d.Set("milestones", rgData.Milestones) - d.Set("name", rgData.Name) - d.Set("register_computes", rgData.RegisterComputes) - d.Set("resource_limits", flattenRgResourceLimits(rgData.ResourceLimits)) - d.Set("resource_types", rgData.ResTypes) - d.Set("secret", rgData.Secret) - d.Set("status", rgData.Status) - d.Set("uniq_pools", rgData.UniqPools) - d.Set("updated_by", rgData.UpdatedBy) - d.Set("updated_time", rgData.UpdatedTime) - d.Set("vins", rgData.VINS) - d.Set("computes", rgData.VMs) -} - -func flattenRgAcl(rgACLs rg.ListACL) []map[string]interface{} { - res := make([]map[string]interface{}, 0) - for _, acl := range rgACLs { - temp := map[string]interface{}{ - "explicit": acl.Explicit, - "guid": acl.GUID, - "right": acl.Right, - "status": acl.Status, - "type": acl.Type, - "user_group_id": acl.UserGroupID, - } - - res = append(res, temp) - } - - return res -} - -func flattenRgResourceLimits(rl rg.ResourceLimits) []map[string]interface{} { - res := make([]map[string]interface{}, 0) - temp := map[string]interface{}{ - "cu_c": rl.CUC, - "cu_d": rl.CuD, - "cu_dm": rl.CUDM, - "cu_i": rl.CUI, - "cu_m": rl.CUM, - "cu_np": rl.CUNP, - "gpu_units": rl.GPUUnits, - } - res = append(res, temp) - - return res - -} - func dataSourceResgroupRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { rg, err := utilityResgroupCheckPresence(ctx, d, m) if err != nil { @@ -141,200 +69,3 @@ func DataSourceResgroup() *schema.Resource { Schema: dataSourceRgSchemaMake(), } } - -func dataSourceRgSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "rg_id": { - Type: schema.TypeInt, - Required: true, - }, - "reason": { - Type: schema.TypeString, - Optional: true, - }, - "account_id": { - Type: schema.TypeInt, - Computed: true, - }, - "account_name": { - Type: schema.TypeString, - Computed: true, - }, - "acl": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "explicit": { - Type: schema.TypeBool, - Computed: true, - }, - "guid": { - Type: schema.TypeString, - Computed: true, - }, - "right": { - Type: schema.TypeString, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "type": { - Type: schema.TypeString, - Computed: true, - }, - "user_group_id": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - "cpu_allocation_parameter": { - Type: schema.TypeString, - Computed: true, - }, - "cpu_allocation_ratio": { - Type: schema.TypeFloat, - Computed: true, - }, - "created_by": { - Type: schema.TypeString, - Computed: true, - }, - "created_time": { - Type: schema.TypeInt, - Computed: true, - }, - "def_net_id": { - Type: schema.TypeInt, - Computed: true, - }, - "def_net_type": { - Type: schema.TypeString, - Computed: true, - }, - "deleted_by": { - Type: schema.TypeString, - Computed: true, - }, - "deleted_time": { - Type: schema.TypeInt, - Computed: true, - }, - "desc": { - Type: schema.TypeString, - Computed: true, - }, - "dirty": { - Type: schema.TypeBool, - Computed: true, - }, - "gid": { - Type: schema.TypeInt, - Computed: true, - }, - "guid": { - Type: schema.TypeInt, - Computed: true, - }, - "lock_status": { - Type: schema.TypeString, - Computed: true, - }, - "milestones": { - Type: schema.TypeInt, - Computed: true, - }, - "name": { - Type: schema.TypeString, - Computed: true, - }, - "register_computes": { - Type: schema.TypeBool, - Computed: true, - }, - "resource_limits": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cu_c": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_d": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_dm": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_i": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_m": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_np": { - Type: schema.TypeFloat, - Computed: true, - }, - "gpu_units": { - Type: schema.TypeFloat, - Computed: true, - }, - }, - }, - }, - "resource_types": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "secret": { - Type: schema.TypeString, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "uniq_pools": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "updated_by": { - Type: schema.TypeString, - Computed: true, - }, - "updated_time": { - Type: schema.TypeInt, - Computed: true, - }, - "vins": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - "computes": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - } -} diff --git a/internal/service/cloudbroker/rg/data_source_rg_affinity_group_computes.go b/internal/service/cloudbroker/rg/data_source_rg_affinity_group_computes.go index 248a9a4..f5977a3 100644 --- a/internal/service/cloudbroker/rg/data_source_rg_affinity_group_computes.go +++ b/internal/service/cloudbroker/rg/data_source_rg_affinity_group_computes.go @@ -6,28 +6,13 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" ) -func DataSourceRgAffinityGroupComputes() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - ReadContext: dataSourceRgAffinityGroupComputesRead, - - Timeouts: &schema.ResourceTimeout{ - Read: &constants.Timeout30s, - Default: &constants.Timeout60s, - }, - - Schema: dataSourceRgAffinityGroupComputesSchemaMake(), - } -} - func dataSourceRgAffinityGroupComputesRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { rgComputes, err := utilityRgAffinityGroupComputesCheckPresence(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } @@ -36,93 +21,17 @@ func dataSourceRgAffinityGroupComputesRead(ctx context.Context, d *schema.Resour return nil } -func flattenRgAffinityGroupComputes(list rg.ListAffinityGroupCompute) []map[string]interface{} { - res := make([]map[string]interface{}, 0) - - for _, item := range list { - temp := map[string]interface{}{ - "compute_id": item.ComputeID, - "other_node": item.OtherNode, - "other_node_indirect": item.OtherNodeIndirect, - "other_node_indirect_soft": item.OtherNodeIndirectSoft, - "other_node_soft": item.OtherNodeSoft, - "same_node": item.SameNode, - "same_node_soft": item.SameNodeSoft, - } - res = append(res, temp) - } +func DataSourceRgAffinityGroupComputes() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, - return res -} + ReadContext: dataSourceRgAffinityGroupComputesRead, -func dataSourceRgAffinityGroupComputesSchemaMake() map[string]*schema.Schema { - res := map[string]*schema.Schema{ - "rg_id": { - Type: schema.TypeInt, - Required: true, - Description: "ID of the RG", - }, - "affinity_group": { - Type: schema.TypeString, - Required: true, - Description: "Affinity group label", + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, }, - "items": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "compute_id": { - Type: schema.TypeInt, - Computed: true, - }, - "other_node": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - "other_node_indirect": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - "other_node_indirect_soft": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - "other_node_soft": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - "same_node": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - "same_node_soft": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - }, - }, - }, + Schema: dataSourceRgAffinityGroupComputesSchemaMake(), } - - return res } diff --git a/internal/service/cloudbroker/rg/data_source_rg_affinity_groups_get.go b/internal/service/cloudbroker/rg/data_source_rg_affinity_groups_get.go index ec14f17..f119e54 100644 --- a/internal/service/cloudbroker/rg/data_source_rg_affinity_groups_get.go +++ b/internal/service/cloudbroker/rg/data_source_rg_affinity_groups_get.go @@ -9,24 +9,10 @@ import ( "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" ) -func DataSourceRgAffinityGroupsGet() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - ReadContext: dataSourceRgAffinityGroupsGetRead, - - Timeouts: &schema.ResourceTimeout{ - Read: &constants.Timeout30s, - Default: &constants.Timeout60s, - }, - - Schema: dataSourceRgAffinityGroupsGetSchemaMake(), - } -} - func dataSourceRgAffinityGroupsGetRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { computes, err := utilityRgAffinityGroupsGetCheckPresence(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } d.SetId(strconv.Itoa(d.Get("rg_id").(int))) @@ -34,27 +20,17 @@ func dataSourceRgAffinityGroupsGetRead(ctx context.Context, d *schema.ResourceDa return nil } -func dataSourceRgAffinityGroupsGetSchemaMake() map[string]*schema.Schema { - res := map[string]*schema.Schema{ - "rg_id": { - Type: schema.TypeInt, - Required: true, - Description: "ID of the RG", - }, - "affinity_group": { - Type: schema.TypeString, - Required: true, - Description: "Affinity group label", - }, +func DataSourceRgAffinityGroupsGet() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceRgAffinityGroupsGetRead, - "ids": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, }, - } - return res + Schema: dataSourceRgAffinityGroupsGetSchemaMake(), + } } diff --git a/internal/service/cloudbroker/rg/data_source_rg_affinity_groups_list.go b/internal/service/cloudbroker/rg/data_source_rg_affinity_groups_list.go index fba49e0..6f7d17f 100644 --- a/internal/service/cloudbroker/rg/data_source_rg_affinity_groups_list.go +++ b/internal/service/cloudbroker/rg/data_source_rg_affinity_groups_list.go @@ -2,12 +2,26 @@ package rg import ( "context" + "strconv" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" ) +func dataSourceRgAffinityGroupsListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + list, err := utilityRgAffinityGroupsListCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + d.SetId(strconv.Itoa(d.Get("rg_id").(int))) + d.Set("affinity_groups", flattenRgListGroups(list)) + d.Set("entry_count", list.EntryCount) + return nil +} + func DataSourceRgAffinityGroupsList() *schema.Resource { return &schema.Resource{ SchemaVersion: 1, @@ -22,13 +36,3 @@ func DataSourceRgAffinityGroupsList() *schema.Resource { Schema: dataSourceRgAffinityGroupsListSchemaMake(), } } - -func dataSourceRgAffinityGroupsListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - return nil -} - -func dataSourceRgAffinityGroupsListSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - - } -} diff --git a/internal/service/cloudbroker/rg/data_source_rg_audits.go b/internal/service/cloudbroker/rg/data_source_rg_audits.go index 431d326..8cd285d 100644 --- a/internal/service/cloudbroker/rg/data_source_rg_audits.go +++ b/internal/service/cloudbroker/rg/data_source_rg_audits.go @@ -6,28 +6,13 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" ) -func DataSourceRgAudits() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - ReadContext: dataSourceRgAuditsRead, - - Timeouts: &schema.ResourceTimeout{ - Read: &constants.Timeout30s, - Default: &constants.Timeout60s, - }, - - Schema: dataSourceRgAuditsSchemaMake(), - } -} - func dataSourceRgAuditsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { rgAudits, err := utilityRgAuditsCheckPresence(ctx, d, m) if err != nil { + d.SetId("") diag.FromErr(err) } @@ -36,56 +21,17 @@ func dataSourceRgAuditsRead(ctx context.Context, d *schema.ResourceData, m inter return nil } -func flattenRgAudits(rgAudits rg.ListAudits) []map[string]interface{} { - res := make([]map[string]interface{}, 0) - for _, rgAudit := range rgAudits { - temp := map[string]interface{}{ - "call": rgAudit.Call, - "responsetime": rgAudit.ResponseTime, - "statuscode": rgAudit.StatusCode, - "timestamp": rgAudit.Timestamp, - "user": rgAudit.User, - } - - res = append(res, temp) - } +func DataSourceRgAudits() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, - return res -} + ReadContext: dataSourceRgAuditsRead, -func dataSourceRgAuditsSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "rg_id": { - Type: schema.TypeInt, - Required: true, - }, - "items": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "call": { - Type: schema.TypeString, - Computed: true, - }, - "responsetime": { - Type: schema.TypeFloat, - Computed: true, - }, - "statuscode": { - Type: schema.TypeInt, - Computed: true, - }, - "timestamp": { - Type: schema.TypeFloat, - Computed: true, - }, - "user": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, }, + + Schema: dataSourceRgAuditsSchemaMake(), } } diff --git a/internal/service/cloudbroker/rg/data_source_rg_list.go b/internal/service/cloudbroker/rg/data_source_rg_list.go index d73f0a5..5800bac 100644 --- a/internal/service/cloudbroker/rg/data_source_rg_list.go +++ b/internal/service/cloudbroker/rg/data_source_rg_list.go @@ -1,293 +1,71 @@ -/* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package rg - -import ( - "context" - - "github.com/google/uuid" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" -) - -func flattenRgList(rgl *rg.ListRG) []map[string]interface{} { - res := make([]map[string]interface{}, 0) - for _, rg := range rgl.Data { - temp := map[string]interface{}{ - "account_id": rg.AccountID, - "account_name": rg.AccountName, - "acl": flattenRgAcl(rg.ACL), - "created_by": rg.CreatedBy, - "created_time": rg.CreatedTime, - "def_net_id": rg.DefNetID, - "def_net_type": rg.DefNetType, - "deleted_by": rg.DeletedBy, - "deleted_time": rg.DeletedTime, - "desc": rg.Description, - "gid": rg.GID, - "guid": rg.GUID, - "rg_id": rg.ID, - "lock_status": rg.LockStatus, - "milestones": rg.Milestones, - "name": rg.Name, - "register_computes": rg.RegisterComputes, - "resource_limits": flattenRgResourceLimits(rg.ResourceLimits), - "secret": rg.Secret, - "status": rg.Status, - "updated_by": rg.UpdatedBy, - "updated_time": rg.UpdatedTime, - "vins": rg.VINS, - "vms": rg.VMs, - } - res = append(res, temp) - } - return res - -} - -func dataSourceRgListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - rgList, err := utilityRgListCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - - id := uuid.New() - d.SetId(id.String()) - d.Set("items", flattenRgList(rgList)) - - return nil -} - -func dataSourceRgListSchemaMake() map[string]*schema.Schema { - res := map[string]*schema.Schema{ - "includedeleted": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "included deleted resource groups", - }, - "page": { - Type: schema.TypeInt, - Optional: true, - Description: "Page number", - }, - "size": { - Type: schema.TypeInt, - Optional: true, - Description: "Page size", - }, - "items": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "account_id": { - Type: schema.TypeInt, - Computed: true, - }, - "account_name": { - Type: schema.TypeString, - Computed: true, - }, - "acl": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "explicit": { - Type: schema.TypeBool, - Computed: true, - }, - "guid": { - Type: schema.TypeString, - Computed: true, - }, - "right": { - Type: schema.TypeString, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "type": { - Type: schema.TypeString, - Computed: true, - }, - "user_group_id": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - "created_by": { - Type: schema.TypeString, - Computed: true, - }, - "created_time": { - Type: schema.TypeInt, - Computed: true, - }, - "def_net_id": { - Type: schema.TypeInt, - Computed: true, - }, - "def_net_type": { - Type: schema.TypeString, - Computed: true, - }, - "deleted_by": { - Type: schema.TypeString, - Computed: true, - }, - "deleted_time": { - Type: schema.TypeInt, - Computed: true, - }, - "desc": { - Type: schema.TypeString, - Computed: true, - }, - "gid": { - Type: schema.TypeInt, - Computed: true, - }, - "guid": { - Type: schema.TypeInt, - Computed: true, - }, - "rg_id": { - Type: schema.TypeInt, - Computed: true, - }, - "lock_status": { - Type: schema.TypeString, - Computed: true, - }, - "milestones": { - Type: schema.TypeInt, - Computed: true, - }, - "name": { - Type: schema.TypeString, - Computed: true, - }, - "register_computes": { - Type: schema.TypeBool, - Computed: true, - }, - "resource_limits": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cu_c": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_d": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_i": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_m": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_np": { - Type: schema.TypeFloat, - Computed: true, - }, - "gpu_units": { - Type: schema.TypeFloat, - Computed: true, - }, - }, - }, - }, - "secret": { - Type: schema.TypeString, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "updated_by": { - Type: schema.TypeString, - Computed: true, - }, - "updated_time": { - Type: schema.TypeInt, - Computed: true, - }, - "vins": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - "vms": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - }, - }, - }, - } - return res -} - -func DataSourceRgList() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - ReadContext: dataSourceRgListRead, - - Timeouts: &schema.ResourceTimeout{ - Read: &constants.Timeout30s, - Default: &constants.Timeout60s, - }, - - Schema: dataSourceRgListSchemaMake(), - } -} +/* +Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package rg + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceRgListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + rgList, err := utilityRgListCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + id := uuid.New() + d.SetId(id.String()) + d.Set("items", flattenRgList(rgList)) + d.Set("entry_count", rgList.EntryCount) + + return nil +} + +func DataSourceRgList() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceRgListRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceRgListSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/rg/resource_check_input_values.go b/internal/service/cloudbroker/rg/resource_check_input_values.go index 17b3fd2..951740c 100644 --- a/internal/service/cloudbroker/rg/resource_check_input_values.go +++ b/internal/service/cloudbroker/rg/resource_check_input_values.go @@ -2,60 +2,31 @@ package rg import ( "context" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/ic" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/extnet" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" ) -func existAccountID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { - c := m.(*controller.ControllerCfg) - accountId := uint64(d.Get("account_id").(int)) +func checkParamsExistence(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, accountId, gid uint64) diag.Diagnostics { + var errs []error - req := account.ListRequest{} - - accountList, err := c.CloudBroker().Account().List(ctx, req) - if err != nil { - return false, err + if err := ic.ExistAccount(ctx, accountId, c); err != nil { + errs = append(errs, err) } - return len(accountList.FilterByID(accountId).Data) != 0, nil -} - -func existGID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { - c := m.(*controller.ControllerCfg) - - gid := uint64(d.Get("gid").(int)) - - gidList, err := c.CloudBroker().Grid().List(ctx, grid.ListRequest{}) - if err != nil { - return false, err + if err := ic.ExistGID(ctx, gid, c); err != nil { + errs = append(errs, err) } - for _, elem := range gidList.Data { - if elem.GID == gid { - return true, nil + extNetId, ok := d.GetOk("ext_net_id") + if ok { + if err := ic.ExistExtNetInRG(ctx, uint64(extNetId.(int)), accountId, c); err != nil { + errs = append(errs, err) } } - - return false, nil -} - -func existExtNet(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { - c := m.(*controller.ControllerCfg) - extNetId := uint64(d.Get("ext_net_id").(int)) - - req := extnet.ListRequest{ - AccountID: uint64(d.Get("account_id").(int)), - } - - listExtNet, err := c.CloudBroker().ExtNet().List(ctx, req) - if err != nil { - return false, err - } - - return len(listExtNet.FilterByID(extNetId).Data) != 0, nil + return dc.ErrorsToDiagnostics(errs) } diff --git a/internal/service/cloudbroker/rg/resource_rg.go b/internal/service/cloudbroker/rg/resource_rg.go index 8f0db89..35cf9a0 100644 --- a/internal/service/cloudbroker/rg/resource_rg.go +++ b/internal/service/cloudbroker/rg/resource_rg.go @@ -45,45 +45,23 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func resourceResgroupCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - c := m.(*controller.ControllerCfg) - - req := rg.CreateRequest{} - - req.Name = d.Get("rg_name").(string) - log.Debugf("resourceResgroupCreate: called for RG name %s, account ID %d", - req.Name, d.Get("account_id").(int)) - - haveAccount, err := existAccountID(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - if !haveAccount { - return diag.Errorf("resourceResgroupCreate: can't create RG because AccountID %d is not allowed or does not exist", d.Get("account_id").(int)) - } - req.AccountID = uint64(d.Get("account_id").(int)) + d.Get("rg_name").(string), d.Get("account_id").(int)) + c := m.(*controller.ControllerCfg) - haveGID, err := existGID(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - if !haveGID { - return diag.Errorf("resourceResgroupCreate: can't create RG because GID %d is not allowed or does not exist", d.Get("gid").(int)) + accountId := uint64(d.Get("account_id").(int)) + gid := uint64(d.Get("gid").(int)) + req := rg.CreateRequest{ + AccountID: accountId, + GID: gid, + Name: d.Get("rg_name").(string), } - req.GID = uint64(d.Get("gid").(int)) - if _, ok := d.GetOk("ext_net_id"); ok { - haveExtNet, err := existExtNet(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - if !haveExtNet { - return diag.Errorf("resourceResgroupCreate: can't create RG because ExtNetID %d is not allowed or does not exist", d.Get("ext_net_id").(int)) - } + if diags := checkParamsExistence(ctx, d, c, accountId, gid); diags != nil { + return diags } if resLimits, ok := d.GetOk("resource_limits"); ok { @@ -131,6 +109,10 @@ func resourceResgroupCreate(ctx context.Context, d *schema.ResourceData, m inter } } + if owner, ok := d.GetOk("owner"); ok { + req.Owner = owner.(string) + } + if defNetType, ok := d.GetOk("def_net_type"); ok { req.DefNet = defNetType.(string) } @@ -169,120 +151,57 @@ func resourceResgroupCreate(ctx context.Context, d *schema.ResourceData, m inter rgID, err := c.CloudBroker().RG().Create(ctx, req) if err != nil { + d.SetId("") return diag.FromErr(err) } d.SetId(strconv.FormatUint(rgID, 10)) + d.Set("rg_id", rgID) w := dc.Warnings{} - if access, ok := d.GetOk("access"); ok { - var user, right string - - if access.(*schema.Set).Len() > 0 { - accessList := access.(*schema.Set).List() - for _, accessIface := range accessList { - access := accessIface.(map[string]interface{}) - user = access["user"].(string) - right = access["right"].(string) - - req := rg.AccessGrantRequest{ - RGID: rgID, - User: user, - Right: right, - } - - if reason, ok := access["reason"]; ok { - req.Reason = reason.(string) - } - - _, err := c.CloudBroker().RG().AccessGrant(ctx, req) - if err != nil { - w.Add(err) - } + if _, ok := d.GetOk("access"); ok { + if errs := resourceRGAccessGrant(ctx, d, m); len(errs) != 0 { + for _, err := range errs { + w.Add(err) } } } - if defNet, ok := d.GetOk("def_net"); ok { - if defNet.(*schema.Set).Len() > 0 { - defNetList := defNet.(*schema.Set).List() - defNetItem := defNetList[0].(map[string]interface{}) - - netType := defNetItem["net_type"].(string) - - req := rg.SetDefNetRequest{ - RGID: rgID, - NetType: netType, - } - - if netID, ok := defNetItem["net_id"]; ok { - req.NetID = uint64(netID.(int)) - } - if reason, ok := defNetItem["reason"]; ok { - req.Reason = reason.(string) - } - - _, err := c.CloudBroker().RG().SetDefNet(ctx, req) - if err != nil { - w.Add(err) - } - - d.Set("def_net_type", netType) + if _, ok := d.GetOk("def_net"); ok { + if err := resourceRGSetDefNet(ctx, d, m); err != nil { + w.Add(err) } + } - if cpuAllocationParameter, ok := d.GetOk("cpu_allocation_parameter"); ok { - cpuAllocationParameter := cpuAllocationParameter.(string) - - req := rg.SetCPUAllocationParameterRequest{ - RGID: rgID, - StrictLoose: cpuAllocationParameter, - } - - log.Debugf("setting account cpu allocation parameter") - _, err := c.CloudBroker().RG().SetCPUAllocationParameter(ctx, req) - if err != nil { - w.Add(err) - } + if _, ok := d.GetOk("cpu_allocation_parameter"); ok { + if err := resourceRGSetCPUAllocationParameter(ctx, d, m); err != nil { + w.Add(err) } + } - if cpuAllocationRatio, ok := d.GetOk("cpu_allocation_ratio"); ok { - cpuAllocationRatio := cpuAllocationRatio.(float64) - - req := rg.SetCPUAllocationRatioRequest{ - RGID: rgID, - Ratio: cpuAllocationRatio, - } - - log.Debugf("setting account cpu allocation ratio") - _, err := c.CloudBroker().RG().SetCPUAllocationRatio(ctx, req) - if err != nil { - w.Add(err) - } + if _, ok := d.GetOk("cpu_allocation_ratio"); ok { + if err := resourceRGSetCPUAllocationRatio(ctx, d, m); err != nil { + w.Add(err) } + } - if !d.Get("enable").(bool) { - _, err := c.CloudBroker().RG().Disable(ctx, rg.DisableRequest{ - RGID: rgID, - }) - - if err != nil { - w.Add(err) - } + enable, ok := d.GetOk("enable") + if ok && !enable.(bool) { + _, err := c.CloudBroker().RG().Disable(ctx, rg.DisableRequest{RGID: rgID}) + if err != nil { + w.Add(err) } } - diags := resourceResgroupRead(ctx, d, m) - diags = append(diags, w.Get()...) - - return diags + return append(resourceResgroupRead(ctx, d, m), w.Get()...) } func resourceResgroupRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { log.Debugf("resourceResgroupRead: called for RG name %s, account ID %d", d.Get("rg_name").(string), d.Get("account_id").(int)) - c := m.(*controller.ControllerCfg) + //c := m.(*controller.ControllerCfg) rgData, err := utilityResgroupCheckPresence(ctx, d, m) if err != nil { @@ -298,26 +217,33 @@ func resourceResgroupRead(ctx context.Context, d *schema.ResourceData, m interfa case status.Created: case status.Enabled: case status.Deleted: - restoreReq := rg.RestoreRequest{RGID: rgData.ID} - enableReq := rg.EnableRequest{RGID: rgData.ID} - - log.Debugf("restoring RG") - _, err := c.CloudBroker().RG().Restore(ctx, restoreReq) - if err != nil { - return diag.FromErr(err) - } - - log.Debugf("enabling RG") - _, err = c.CloudBroker().RG().Enable(context.Background(), enableReq) - if err != nil { - return diag.FromErr(err) - } - - hasChanged = true + //restoreReq := rg.RestoreRequest{ + // RGID: rgData.ID, + // Reason: "automatic restore of resource by terraform", + //} + //enableReq := rg.EnableRequest{ + // RGID: rgData.ID, + // Reason: "automatic enable of resource by terraform", + //} + // + //log.Debugf("restoring RG") + //_, err := c.CloudBroker().RG().Restore(ctx, restoreReq) + //if err != nil { + // return diag.FromErr(err) + //} + // + //log.Debugf("enabling RG") + //_, err = c.CloudBroker().RG().Enable(context.Background(), enableReq) + //if err != nil { + // return diag.FromErr(err) + //} + // + //hasChanged = true case status.Deleting: case status.Destroyed: d.SetId("") - return resourceResgroupCreate(ctx, d, m) + return diag.Errorf("The resource cannot be read because it has been destroyed") + //return resourceResgroupCreate(ctx, d, m) case status.Destroying: case status.Disabled: case status.Disabling: @@ -340,34 +266,14 @@ func resourceResgroupRead(ctx context.Context, d *schema.ResourceData, m interfa func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { log.Debugf("resourceResgroupUpdate: called for RG name %s, account ID %d", - d.Get("name").(string), d.Get("account_id").(int)) + d.Get("rg_name").(string), d.Get("account_id").(int)) c := m.(*controller.ControllerCfg) + accountId := uint64(d.Get("account_id").(int)) + gid := uint64(d.Get("gid").(int)) - haveAccount, err := existAccountID(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - if !haveAccount { - return diag.Errorf("resourceResgroupUpdate: can't create RG bacause AccountID %d not allowed or does not exist", d.Get("account_id").(int)) - } - - haveGID, err := existGID(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - if !haveGID { - return diag.Errorf("resourceResgroupUpdate: can't create RG because GID %d not allowed or does not exist", d.Get("gid").(int)) - } - - if _, ok := d.GetOk("ext_net_id"); ok { - haveExtNet, err := existExtNet(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - if !haveExtNet { - return diag.Errorf("resourceResgroupUpdate: can't create RG bacause ExtNetID %d not allowed or does not exist", d.Get("ext_net_id").(int)) - } + if diags := checkParamsExistence(ctx, d, c, accountId, gid); diags != nil { + return diags } rgData, err := utilityResgroupCheckPresence(ctx, d, m) @@ -383,24 +289,40 @@ func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m inter case status.Created: case status.Enabled: case status.Deleted: - restoreReq := rg.RestoreRequest{RGID: rgData.ID} - enableReq := rg.EnableRequest{RGID: rgData.ID} + restore, ok := d.GetOk("restore") + if ok && restore.(bool) { + restoreReq := rg.RestoreRequest{ + RGID: rgData.ID, + Reason: "automatic restore of resource by terraform", + } - _, err := c.CloudBroker().RG().Restore(ctx, restoreReq) - if err != nil { - return diag.FromErr(err) - } + _, err := c.CloudBroker().RG().Restore(ctx, restoreReq) + if err != nil { + return diag.FromErr(err) + } - _, err = c.CloudBroker().RG().Enable(ctx, enableReq) - if err != nil { - return diag.FromErr(err) + hasChanged = true } - hasChanged = true + enable, ok := d.GetOk("enable") + if ok && enable.(bool) { + enableReq := rg.EnableRequest{ + RGID: rgData.ID, + Reason: "automatic enable of resource by terraform", + } + + _, err = c.CloudBroker().RG().Enable(ctx, enableReq) + if err != nil { + return diag.FromErr(err) + } + + hasChanged = true + } case status.Deleting: case status.Destroyed: d.SetId("") - return resourceResgroupCreate(ctx, d, m) + return diag.Errorf("The resource cannot be updated because it has been destroyed") + //return resourceResgroupCreate(ctx, d, m) case status.Destroying: case status.Disabled: case status.Disabling: @@ -501,8 +423,8 @@ func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m inter doGeneralUpdate = true } - if d.HasChange("desc") { - req.Description = d.Get("desc").(string) + if d.HasChange("description") { + req.Description = d.Get("description").(string) doGeneralUpdate = true } @@ -530,24 +452,178 @@ func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m inter log.Debugf("resourceResgroupUpdate: no difference between old and new state - no update on the RG will be done") } + if d.HasChange("access") { + if err := resourceRGChangeAccess(ctx, d, m); err != nil { + return diag.FromErr(err) + } + } + + if d.HasChange("def_net") { + if err := resourceRGSetDefNet(ctx, d, m); err != nil { + return diag.FromErr(err) + } + } + + if d.HasChange("cpu_allocation_parameter") { + if err := resourceRGSetCPUAllocationParameter(ctx, d, m); err != nil { + return diag.FromErr(err) + } + } + + if d.HasChange("cpu_allocation_ratio") { + if err := resourceRGSetCPUAllocationRatio(ctx, d, m); err != nil { + return diag.FromErr(err) + } + } + if d.HasChange("enable") { - enable := d.Get("enable").(bool) + if err := resourceRGChangeEnable(ctx, d, m); err != nil { + return diag.FromErr(err) + } + } - if enable && rgData.Status == status.Disabled { - _, err := c.CloudBroker().RG().Enable(ctx, rg.EnableRequest{RGID: rgData.ID}) - if err != nil { - return diag.FromErr(err) + return resourceResgroupRead(ctx, d, m) +} + +func resourceResgroupDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceResgroupDelete: called for RG name %s, account ID %d", + d.Get("rg_name").(string), d.Get("account_id").(int)) + + rg_facts, err := utilityResgroupCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + req := rg.DeleteRequest{ + RGID: rg_facts.ID, + } + + if force, ok := d.GetOk("force"); ok { + req.Force = force.(bool) + } + if permanently, ok := d.GetOk("permanently"); ok { + req.Permanently = permanently.(bool) + } + if reason, ok := d.GetOk("reason"); ok { + req.Reason = reason.(string) + } + + c := m.(*controller.ControllerCfg) + _, err = c.CloudBroker().RG().Delete(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + d.SetId("") + + return nil +} + +func resourceRGAccessGrant(ctx context.Context, d *schema.ResourceData, m interface{}) []error { + var errs []error + + access := d.Get("access") + rgId := uint64(d.Get("rg_id").(int)) + log.Debugf("resourceRGAccessGrant: access %v for rg id %d", access, rgId) + + c := m.(*controller.ControllerCfg) + + var user, right string + + if access.(*schema.Set).Len() > 0 { + accessList := access.(*schema.Set).List() + for _, accessIface := range accessList { + access := accessIface.(map[string]interface{}) + user = access["user"].(string) + right = access["right"].(string) + + req := rg.AccessGrantRequest{ + RGID: rgId, + User: user, + Right: right, } - } else if !enable && rgData.Status == status.Enabled { - req := rg.DisableRequest{RGID: rgData.ID} - _, err := c.CloudBroker().RG().Disable(ctx, req) - if err != nil { - return diag.FromErr(err) + if reason, ok := access["reason"]; ok { + req.Reason = reason.(string) + } + + if _, err := c.CloudBroker().RG().AccessGrant(ctx, req); err != nil { + errs = append(errs, err) } } - } // убрать в конец + } + + return errs +} + +func resourceRGSetDefNet(ctx context.Context, d *schema.ResourceData, m interface{}) error { + defNet := d.Get("def_net") + rgId := uint64(d.Get("rg_id").(int)) + log.Debugf("resourceRGSetDefNet: def_net %v for rg id %d", defNet, rgId) + + c := m.(*controller.ControllerCfg) + + if defNet.(*schema.Set).Len() > 0 { + defNetList := defNet.(*schema.Set).List() + defNetItem := defNetList[0].(map[string]interface{}) + + netType := defNetItem["net_type"].(string) + + req := rg.SetDefNetRequest{ + RGID: rgId, + NetType: netType, + } + + if netID, ok := defNetItem["net_id"]; ok { + req.NetID = uint64(netID.(int)) + } + if reason, ok := defNetItem["reason"]; ok { + req.Reason = reason.(string) + } + + _, err := c.CloudBroker().RG().SetDefNet(ctx, req) + return err + } + return nil +} + +func resourceRGSetCPUAllocationParameter(ctx context.Context, d *schema.ResourceData, m interface{}) error { + cpuAllocationParameter := d.Get("cpu_allocation_parameter").(string) + log.Debugf("resourceRGSetCPUAllocationParameter: cpuAllocationParameter %s for rg id %d", cpuAllocationParameter, uint64(d.Get("rg_id").(int))) + + c := m.(*controller.ControllerCfg) + + req := rg.SetCPUAllocationParameterRequest{ + RGID: uint64(d.Get("rg_id").(int)), + StrictLoose: cpuAllocationParameter, + } + + _, err := c.CloudBroker().RG().SetCPUAllocationParameter(ctx, req) + return err +} + +func resourceRGSetCPUAllocationRatio(ctx context.Context, d *schema.ResourceData, m interface{}) error { + cpuAllocationRatio := d.Get("cpu_allocation_ratio").(float64) + log.Debugf("resourceRGSetCPUAllocationRatio: cpuAllocationRatio %s for rg id %d", cpuAllocationRatio, uint64(d.Get("rg_id").(int))) + + c := m.(*controller.ControllerCfg) + + req := rg.SetCPUAllocationRatioRequest{ + RGID: uint64(d.Get("rg_id").(int)), + Ratio: cpuAllocationRatio, + } + + _, err := c.CloudBroker().RG().SetCPUAllocationRatio(ctx, req) + return err +} + +func resourceRGChangeAccess(ctx context.Context, d *schema.ResourceData, m interface{}) error { + rgId := uint64(d.Get("rg_id").(int)) + log.Debugf("resourceRGChangeAccess: for rg id %d", rgId) + + c := m.(*controller.ControllerCfg) oldSet, newSet := d.GetChange("access") deletedAccess := oldSet.(*schema.Set).Difference(newSet.(*schema.Set)).List() @@ -555,12 +631,18 @@ func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m inter deleteItem := deletedIface.(map[string]interface{}) user := deleteItem["user"].(string) - _, err := c.CloudBroker().RG().AccessRevoke(ctx, rg.AccessRevokeRequest{ - RGID: rgData.ID, + reqRevoke := rg.AccessRevokeRequest{ + RGID: rgId, User: user, - }) + } + + if reason, ok := deleteItem["reason"]; ok { + reqRevoke.Reason = reason.(string) + } + + _, err := c.CloudBroker().RG().AccessRevoke(ctx, reqRevoke) if err != nil { - return diag.FromErr(err) + return err } } @@ -570,92 +652,44 @@ func resourceResgroupUpdate(ctx context.Context, d *schema.ResourceData, m inter user := addedItem["user"].(string) right := addedItem["right"].(string) - _, err := c.CloudBroker().RG().AccessGrant(ctx, rg.AccessGrantRequest{ - RGID: rgData.ID, + reqGrant := rg.AccessGrantRequest{ + RGID: rgId, User: user, Right: right, - }) - if err != nil { - return diag.FromErr(err) - } - } - - if ok := d.HasChange("def_net"); ok { - oldDefNet, newDefNet := d.GetChange("def_net") - if newDefNet.(*schema.Set).Len() > 0 { - changedDefNet := (newDefNet.(*schema.Set).Difference(oldDefNet.(*schema.Set))).List() - for _, changedDefNetInterface := range changedDefNet { - defNetItem := changedDefNetInterface.(map[string]interface{}) - netType := defNetItem["net_type"].(string) - - req := rg.SetDefNetRequest{ - RGID: rgData.ID, - NetType: netType, - } - - if netID, ok := defNetItem["net_id"]; ok { - req.NetID = uint64(netID.(int)) - } - if reason, ok := defNetItem["reason"]; ok { - req.Reason = reason.(string) - } - - _, err := c.CloudBroker().RG().SetDefNet(ctx, req) - if err != nil { - return diag.FromErr(err) - } - } } - } - if d.HasChange("cpu_allocation_parameter") { - cpuAllocationParameter := d.Get("cpu_allocation_parameter").(string) - _, err := c.CloudBroker().RG().SetCPUAllocationParameter(ctx, rg.SetCPUAllocationParameterRequest{ - RGID: rgData.ID, - StrictLoose: cpuAllocationParameter, - }) - if err != nil { - return diag.FromErr(err) + if reason, ok := addedItem["reason"]; ok { + reqGrant.Reason = reason.(string) } - } - if d.HasChange("cpu_allocation_ratio") { - cpuAllocationRatio := d.Get("cpu_allocation_ratio").(float64) - _, err := c.CloudBroker().RG().SetCPUAllocationRatio(ctx, rg.SetCPUAllocationRatioRequest{ - RGID: rgData.ID, - Ratio: cpuAllocationRatio, - }) + _, err := c.CloudBroker().RG().AccessGrant(ctx, reqGrant) if err != nil { - return diag.FromErr(err) + return err } } - return nil // убери + return nil } -func resourceResgroupDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceResgroupDelete: called for RG name %s, account ID %d", - d.Get("rg_name").(string), d.Get("account_id").(int)) +func resourceRGChangeEnable(ctx context.Context, d *schema.ResourceData, m interface{}) error { + rgId := uint64(d.Get("rg_id").(int)) + rgStatus := d.Get("status").(string) + enable := d.Get("enable").(bool) + log.Debugf("resourceRGChangeEnable: enable %t for rg id %d with status %s", enable, rgId, rgStatus) - rg_facts, err := utilityResgroupCheckPresence(ctx, d, m) - if rg_facts == nil { - if err != nil { - return diag.FromErr(err) + c := m.(*controller.ControllerCfg) + + if enable && rgStatus == status.Disabled { + if _, err := c.CloudBroker().RG().Enable(ctx, rg.EnableRequest{RGID: rgId}); err != nil { + return err } - return nil - } - req := rg.DeleteRequest{ - RGID: rg_facts.ID, - Force: true, - Permanently: true, - Reason: "Destroyed by DECORT Terraform provider", - } + } else if !enable && (rgStatus == status.Enabled || rgStatus == status.Created) { + req := rg.DisableRequest{RGID: rgId} - c := m.(*controller.ControllerCfg) - _, err = c.CloudBroker().RG().Delete(ctx, req) - if err != nil { - return diag.FromErr(err) + if _, err := c.CloudBroker().RG().Disable(ctx, req); err != nil { + return err + } } return nil @@ -682,339 +716,6 @@ func ResourceResgroup() *schema.Resource { Default: &constants.Timeout60s, }, - Schema: map[string]*schema.Schema{ - "account_id": { - Type: schema.TypeInt, - Required: true, - ValidateFunc: validation.IntAtLeast(1), - Description: "Unique ID of the account, which this resource group belongs to.", - }, - - "gid": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - Description: "Unique ID of the grid, where this resource group is deployed.", - }, - - "rg_name": { - Type: schema.TypeString, - Required: true, - Description: "Name of this resource group. Names are case sensitive and unique within the context of a account.", - }, - - "resource_limits": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cu_c": { - Type: schema.TypeFloat, - Optional: true, - Computed: true, - }, - "cu_d": { - Type: schema.TypeFloat, - Computed: true, - }, - "cu_dm": { - Type: schema.TypeFloat, - Optional: true, - Computed: true, - }, - "cu_i": { - Type: schema.TypeFloat, - Optional: true, - Computed: true, - }, - "cu_m": { - Type: schema.TypeFloat, - Optional: true, - Computed: true, - }, - "cu_np": { - Type: schema.TypeFloat, - Optional: true, - Computed: true, - }, - "gpu_units": { - Type: schema.TypeFloat, - Computed: true, - }, - }, - }, - }, - - "def_net_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - // Default: "PRIVATE", - ValidateFunc: validation.StringInSlice([]string{"PRIVATE", "PUBLIC", "NONE"}, false), - Description: "Type of the network, which this resource group will use as default for its computes - PRIVATE or PUBLIC or NONE.", - }, - - "ipcidr": { - Type: schema.TypeString, - Optional: true, - Description: "Address of the netowrk inside the private network segment (aka ViNS) if def_net_type=PRIVATE", - }, - - "description": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "User-defined text description of this resource group.", - }, - - "reason": { - Type: schema.TypeString, - Optional: true, - }, - - "ext_net_id": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - Description: "ID of the external network for default ViNS. Pass 0 if def_net_type=PUBLIC or no external connection required for the defult ViNS when def_net_type=PRIVATE", - }, - - "ext_ip": { - Type: schema.TypeString, - Optional: true, - Description: "IP address on the external netowrk to request when def_net_type=PRIVATE and ext_net_id is not 0", - }, - - "register_computes": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - // Default: false, - Description: "Register computes in registration system", - }, - - "uniq_pools": { - Type: schema.TypeList, - Computed: true, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - - "access": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "user": { - Type: schema.TypeString, - Required: true, - Description: "User or group name to grant access", - }, - "right": { - Type: schema.TypeString, - Required: true, - Description: "Access rights to set, one of 'R', 'RCX' or 'ARCXDU'", - }, - "reason": { - Type: schema.TypeString, - Optional: true, - Description: "Reason for action", - }, - }, - }, - }, - - "def_net": { - Type: schema.TypeSet, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "net_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{"PRIVATE", "PUBLIC"}, false), - Description: "Network type to set. Must be on of 'PRIVATE' or 'PUBLIC'.", - }, - "net_id": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - Description: "Network segment ID. If netType is PUBLIC and netId is 0 then default external network segment will be selected. If netType is PRIVATE and netId=0, the first ViNS defined for this RG will be selected. Otherwise, netId identifies either existing external network segment or ViNS.", - }, - "reason": { - Type: schema.TypeString, - Optional: true, - Description: "Reason for action", - }, - }, - }, - }, - - "cpu_allocation_parameter": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "set cpu allocation parameter", - }, - - "cpu_allocation_ratio": { - Type: schema.TypeFloat, - Optional: true, - Computed: true, - Description: "set cpu allocation ratio", - }, - - "enable": { - Type: schema.TypeBool, - Optional: true, - Default: true, - Description: "enable/disable account", - }, - - "acl": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "explicit": { - Type: schema.TypeBool, - Computed: true, - }, - "guid": { - Type: schema.TypeString, - Computed: true, - }, - "right": { - Type: schema.TypeString, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "type": { - Type: schema.TypeString, - Computed: true, - }, - "user_group_id": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - - "account_name": { - Type: schema.TypeString, - Computed: true, - Description: "Name of the account, which this resource group belongs to.", - }, - "created_by": { - Type: schema.TypeString, - Computed: true, - }, - "created_time": { - Type: schema.TypeInt, - Computed: true, - }, - - "def_net_id": { - Type: schema.TypeInt, - Computed: true, - Description: "ID of the default network for this resource group (if any).", - }, - - "deleted_by": { - Type: schema.TypeString, - Computed: true, - }, - "deleted_time": { - Type: schema.TypeInt, - Computed: true, - }, - "guid": { - Type: schema.TypeInt, - Computed: true, - }, - "rg_id": { - Type: schema.TypeInt, - Computed: true, - }, - "lock_status": { - Type: schema.TypeString, - Computed: true, - }, - "milestones": { - Type: schema.TypeInt, - Computed: true, - }, - "resource_types": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "secret": { - Type: schema.TypeString, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - Description: "Current status of this resource group.", - }, - "updated_by": { - Type: schema.TypeString, - Computed: true, - }, - "updated_time": { - Type: schema.TypeInt, - Computed: true, - }, - "vins": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - Description: "List of VINs deployed in this resource group.", - }, - }, + Schema: resourceRgSchemaMake(), } } - -func flattenResourceRG(d *schema.ResourceData, rgData *rg.RecordRG) { - d.Set("account_id", rgData.AccountID) - d.Set("gid", rgData.GID) - d.Set("rg_name", rgData.Name) - d.Set("resource_limits", flattenRgResourceLimits(rgData.ResourceLimits)) - d.Set("def_net_type", rgData.DefNetType) - d.Set("description", rgData.Description) - d.Set("register_computes", rgData.RegisterComputes) - d.Set("uniq_pools", rgData.UniqPools) - d.Set("cpu_allocation_parameter", rgData.CPUAllocationParameter) - d.Set("cpu_allocation_ratio", rgData.CPUAllocationRatio) - d.Set("acl", flattenRgAcl(rgData.ACL)) - d.Set("account_name", rgData.AccountName) - d.Set("created_by", rgData.CreatedBy) - d.Set("created_time", rgData.CreatedTime) - d.Set("def_net_id", rgData.DefNetID) - d.Set("deleted_by", rgData.DeletedBy) - d.Set("deleted_time", rgData.DeletedTime) - d.Set("guid", rgData.GUID) - d.Set("rg_id", rgData.ID) - d.Set("lock_status", rgData.LockStatus) - d.Set("milestones", rgData.Milestones) - d.Set("resource_types", rgData.ResTypes) - d.Set("secret", rgData.Secret) - d.Set("status", rgData.Status) - d.Set("updated_by", rgData.UpdatedBy) - d.Set("updated_time", rgData.UpdatedTime) - d.Set("vins", rgData.VINS) -} diff --git a/internal/service/cloudbroker/rg/utility_rg_affinity_groups_list.go b/internal/service/cloudbroker/rg/utility_rg_affinity_groups_list.go index df71ad9..2f54bea 100644 --- a/internal/service/cloudbroker/rg/utility_rg_affinity_groups_list.go +++ b/internal/service/cloudbroker/rg/utility_rg_affinity_groups_list.go @@ -1 +1,55 @@ -package rg \ No newline at end of file +/* +Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Kasim Baybikov, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package rg + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" +) + +func utilityRgAffinityGroupsListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*rg.ListAffinityGroup, error) { + c := m.(*controller.ControllerCfg) + req := rg.AffinityGroupsListRequest{ + RGID: uint64(d.Get("rg_id").(int)), + } + + groups, err := c.CloudBroker().RG().AffinityGroupsList(ctx, req) + if err != nil { + return nil, err + } + + return groups, nil +} diff --git a/internal/service/cloudbroker/rg/utility_rg_list.go b/internal/service/cloudbroker/rg/utility_rg_list.go index 249d8df..9437111 100644 --- a/internal/service/cloudbroker/rg/utility_rg_list.go +++ b/internal/service/cloudbroker/rg/utility_rg_list.go @@ -1,65 +1,90 @@ -/* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package rg - -import ( - "context" - - log "github.com/sirupsen/logrus" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func utilityRgListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*rg.ListRG, error) { - c := m.(*controller.ControllerCfg) - req := rg.ListRequest{} - - if size, ok := d.GetOk("size"); ok { - req.Size = uint64(size.(int)) - } - if page, ok := d.GetOk("page"); ok { - req.Page = uint64(page.(int)) - } - if includedeleted, ok := d.GetOk("includedeleted"); ok { - req.IncludeDeleted = includedeleted.(bool) - } - - log.Debugf("utilityRgListCheckPresence: load rg list") - rgList, err := c.CloudBroker().RG().List(ctx, req) - if err != nil { - return nil, err - } - - return rgList, nil -} +/* +Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package rg + +import ( + "context" + + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func utilityRgListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*rg.ListRG, error) { + c := m.(*controller.ControllerCfg) + req := rg.ListRequest{} + + if byId, ok := d.GetOk("by_id"); ok { + req.ByID = uint64(byId.(int)) + } + if name, ok := d.GetOk("name"); ok { + req.Name = name.(string) + } + + if accountId, ok := d.GetOk("account_id"); ok { + req.AccountID = uint64(accountId.(int)) + } + if accountName, ok := d.GetOk("account_name"); ok { + req.AccountName = accountName.(string) + } + + if createdAfter, ok := d.GetOk("created_after"); ok { + req.CreatedAfter = uint64(createdAfter.(int)) + } + if createdBefore, ok := d.GetOk("created_before"); ok { + req.CreatedBefore = uint64(createdBefore.(int)) + } + + if status, ok := d.GetOk("status"); ok { + req.Status = status.(string) + } + if includedeleted, ok := d.GetOk("includedeleted"); ok { + req.IncludeDeleted = includedeleted.(bool) + } + + if size, ok := d.GetOk("size"); ok { + req.Size = uint64(size.(int)) + } + if page, ok := d.GetOk("page"); ok { + req.Page = uint64(page.(int)) + } + + log.Debugf("utilityRgListCheckPresence: load rg list") + rgList, err := c.CloudBroker().RG().List(ctx, req) + if err != nil { + return nil, err + } + + return rgList, nil +} diff --git a/internal/service/cloudbroker/sep/data_source_sep.go b/internal/service/cloudbroker/sep/data_source_sep.go index e3b53e4..c903679 100644 --- a/internal/service/cloudbroker/sep/data_source_sep.go +++ b/internal/service/cloudbroker/sep/data_source_sep.go @@ -1,165 +1,70 @@ -/* -Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, -Sergey Kisil, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package sep - -import ( - "context" - "encoding/json" - "strconv" - - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens" -) - -func dataSourceSepRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - desSep, err := utilitySepCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - - d.Set("ckey", desSep.CKey) - d.Set("meta", flattens.FlattenMeta(desSep.Meta)) - d.Set("consumed_by", desSep.ConsumedBy) - d.Set("desc", desSep.Description) - d.Set("gid", desSep.GID) - d.Set("guid", desSep.GUID) - d.Set("sep_id", desSep.ID) - d.Set("milestones", desSep.Milestones) - d.Set("name", desSep.Name) - d.Set("obj_status", desSep.ObjStatus) - d.Set("provided_by", desSep.ProvidedBy) - d.Set("shared_with", desSep.SharedWith) - d.Set("tech_status", desSep.TechStatus) - d.Set("type", desSep.Type) - data, _ := json.Marshal(desSep.Config) - d.Set("config", string(data)) - - d.SetId(strconv.Itoa(d.Get("sep_id").(int))) - - return nil -} - -func dataSourceSepCSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "sep_id": { - Type: schema.TypeInt, - Required: true, - Description: "sep type des id", - }, - "ckey": { - Type: schema.TypeString, - Computed: true, - }, - "meta": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "consumed_by": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - "desc": { - Type: schema.TypeString, - Computed: true, - }, - "gid": { - Type: schema.TypeInt, - Computed: true, - }, - "guid": { - Type: schema.TypeInt, - Computed: true, - }, - "milestones": { - Type: schema.TypeInt, - Computed: true, - }, - "name": { - Type: schema.TypeString, - Computed: true, - }, - "obj_status": { - Type: schema.TypeString, - Computed: true, - }, - "provided_by": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - "shared_with": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - "tech_status": { - Type: schema.TypeString, - Computed: true, - }, - "type": { - Type: schema.TypeString, - Computed: true, - }, - "config": { - Type: schema.TypeString, - Computed: true, - }, - } -} - -func DataSourceSep() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - ReadContext: dataSourceSepRead, - - Timeouts: &schema.ResourceTimeout{ - Read: &constants.Timeout30s, - Default: &constants.Timeout60s, - }, - - Schema: dataSourceSepCSchemaMake(), - } -} +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Sergey Kisil, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package sep + +import ( + "context" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceSepRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + desSep, err := utilitySepCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + flattenSep(d, desSep) + d.SetId(strconv.Itoa(d.Get("sep_id").(int))) + + return nil +} + +func DataSourceSep() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceSepRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceSepCSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/sep/data_source_sep_config.go b/internal/service/cloudbroker/sep/data_source_sep_config.go index 236ee81..f3c5708 100644 --- a/internal/service/cloudbroker/sep/data_source_sep_config.go +++ b/internal/service/cloudbroker/sep/data_source_sep_config.go @@ -1,87 +1,73 @@ -/* -Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package sep - -import ( - "context" - "encoding/json" - - "github.com/google/uuid" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" -) - -func dataSourceSepConfigRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - sepConfig, err := utilitySepConfigCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - - id := uuid.New() - d.SetId(id.String()) - - data, _ := json.Marshal(sepConfig) - d.Set("config", string(data)) - - return nil -} - -func dataSourceSepConfigSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "sep_id": { - Type: schema.TypeInt, - Required: true, - Description: "storage endpoint provider ID", - }, - "config": { - Type: schema.TypeString, - Computed: true, - Description: "sep config json string", - }, - } -} - -func DataSourceSepConfig() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - ReadContext: dataSourceSepConfigRead, - - Timeouts: &schema.ResourceTimeout{ - Read: &constants.Timeout30s, - Default: &constants.Timeout60s, - }, - - Schema: dataSourceSepConfigSchemaMake(), - } -} +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package sep + +import ( + "context" + "encoding/json" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceSepConfigRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + sepConfig, err := utilitySepConfigCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + id := uuid.New() + d.SetId(id.String()) + + data, _ := json.Marshal(sepConfig) + d.Set("config", string(data)) + + return nil +} + +func DataSourceSepConfig() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceSepConfigRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceSepConfigSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/sep/data_source_sep_consumption.go b/internal/service/cloudbroker/sep/data_source_sep_consumption.go index ef1637c..a697893 100644 --- a/internal/service/cloudbroker/sep/data_source_sep_consumption.go +++ b/internal/service/cloudbroker/sep/data_source_sep_consumption.go @@ -1,174 +1,71 @@ -/* -Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, -Sergey Kisil, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package sep - -import ( - "context" - - "github.com/google/uuid" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" -) - -func dataSourceSepConsumptionRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - sepCons, err := utilitySepConsumptionCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - id := uuid.New() - d.SetId(id.String()) - - d.Set("type", sepCons.Type) - d.Set("total", flattenSepConsumption(sepCons.Total)) - d.Set("by_pool", flattenSepConsumptionPools(sepCons)) - - return nil -} - -func dataSourceSepConsumptionSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "sep_id": { - Type: schema.TypeInt, - Required: true, - Description: "sep id", - }, - "by_pool": { - Type: schema.TypeList, - Computed: true, - Description: "consumption divided by pool", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Computed: true, - Description: "pool name", - }, - "disk_count": { - Type: schema.TypeInt, - Computed: true, - Description: "number of disks", - }, - "disk_usage": { - Type: schema.TypeInt, - Computed: true, - Description: "disk usage", - }, - "snapshot_count": { - Type: schema.TypeInt, - Computed: true, - Description: "number of snapshots", - }, - "snapshot_usage": { - Type: schema.TypeInt, - Computed: true, - Description: "snapshot usage", - }, - "usage": { - Type: schema.TypeInt, - Computed: true, - Description: "usage", - }, - "usage_limit": { - Type: schema.TypeInt, - Computed: true, - Description: "usage limit", - }, - }, - }, - }, - "total": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "capacity_limit": { - Type: schema.TypeInt, - Computed: true, - }, - "disk_count": { - Type: schema.TypeInt, - Computed: true, - Description: "number of disks", - }, - "disk_usage": { - Type: schema.TypeInt, - Computed: true, - Description: "disk usage", - }, - "snapshot_count": { - Type: schema.TypeInt, - Computed: true, - Description: "number of snapshots", - }, - "snapshot_usage": { - Type: schema.TypeInt, - Computed: true, - Description: "snapshot usage", - }, - "usage": { - Type: schema.TypeInt, - Computed: true, - Description: "usage", - }, - "usage_limit": { - Type: schema.TypeInt, - Computed: true, - Description: "usage limit", - }, - }, - }, - Description: "total consumption", - }, - "type": { - Type: schema.TypeString, - Computed: true, - Description: "sep type", - }, - } -} - -func DataSourceSepConsumption() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - ReadContext: dataSourceSepConsumptionRead, - - Timeouts: &schema.ResourceTimeout{ - Read: &constants.Timeout30s, - Default: &constants.Timeout60s, - }, - - Schema: dataSourceSepConsumptionSchemaMake(), - } -} +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Sergey Kisil, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package sep + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceSepConsumptionRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + sepCons, err := utilitySepConsumptionCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + id := uuid.New() + d.SetId(id.String()) + + flattenSepConsumption(d, sepCons) + + return nil +} + +func DataSourceSepConsumption() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceSepConsumptionRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceSepConsumptionSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/sep/data_source_sep_disk_list.go b/internal/service/cloudbroker/sep/data_source_sep_disk_list.go index 1610bc4..7165be7 100644 --- a/internal/service/cloudbroker/sep/data_source_sep_disk_list.go +++ b/internal/service/cloudbroker/sep/data_source_sep_disk_list.go @@ -1,93 +1,70 @@ -/* -Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package sep - -import ( - "context" - - "github.com/google/uuid" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" -) - -func dataSourceSepDiskListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - sepDiskList, err := utilitySepDiskListCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - id := uuid.New() - d.SetId(id.String()) - d.Set("items", sepDiskList) - - return nil -} - -func dataSourceSepDiskListSchemaMake() map[string]*schema.Schema { - rets := map[string]*schema.Schema{ - "sep_id": { - Type: schema.TypeInt, - Required: true, - Description: "storage endpoint provider ID", - }, - "pool_name": { - Type: schema.TypeString, - Optional: true, - Description: "pool name", - }, - "items": { - Type: schema.TypeList, - Computed: true, - Description: "sep disk list", - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - } - - return rets -} - -func DataSourceSepDiskList() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - ReadContext: dataSourceSepDiskListRead, - - Timeouts: &schema.ResourceTimeout{ - Read: &constants.Timeout30s, - Default: &constants.Timeout60s, - }, - - Schema: dataSourceSepDiskListSchemaMake(), - } -} +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package sep + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceSepDiskListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + sepDiskList, err := utilitySepDiskListCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + id := uuid.New() + d.SetId(id.String()) + d.Set("items", sepDiskList) + + return nil +} + +func DataSourceSepDiskList() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceSepDiskListRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceSepDiskListSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/sep/data_source_sep_list.go b/internal/service/cloudbroker/sep/data_source_sep_list.go index 81c8e15..82ce835 100644 --- a/internal/service/cloudbroker/sep/data_source_sep_list.go +++ b/internal/service/cloudbroker/sep/data_source_sep_list.go @@ -1,212 +1,73 @@ -/* -Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, -Sergey Kisil, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package sep - -import ( - "context" - - "github.com/google/uuid" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" -) - -func dataSourceSepListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - sepList, err := utilitySepListCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - id := uuid.New() - d.SetId(id.String()) - d.Set("items", flattenSepList(sepList)) - d.Set("entryCount", sepList.EntryCount) - - return nil -} - -func dataSourceSepListSchemaMake() map[string]*schema.Schema { - rets := map[string]*schema.Schema{ - "by_id": { - Type: schema.TypeInt, - Optional: true, - Description: "find by id", - }, - "name": { - Type: schema.TypeString, - Optional: true, - Description: "find by name", - }, - "gid": { - Type: schema.TypeInt, - Optional: true, - Description: "find by gid", - }, - "type": { - Type: schema.TypeString, - Optional: true, - Description: "find by sep type", - }, - "provided_by": { - Type: schema.TypeInt, - Optional: true, - Description: "find by provided physical node id", - }, - "tech_status": { - Type: schema.TypeString, - Optional: true, - Description: "find by techStatus", - }, - "consumed_by": { - Type: schema.TypeInt, - Optional: true, - Description: "find by consumed physical node id", - }, - "page": { - Type: schema.TypeInt, - Optional: true, - Description: "page number", - }, - "size": { - Type: schema.TypeInt, - Optional: true, - Description: "page size", - }, - "items": { - Type: schema.TypeList, - Computed: true, - Description: "sep list", - Elem: &schema.Resource{ - Schema: dataSourceSepShortSchemaMake(), - }, - }, - "entry_count": { - Type: schema.TypeInt, - Computed: true, - Description: "entryCount", - }, - } - - return rets -} - -func dataSourceSepShortSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "ckey": { - Type: schema.TypeString, - Computed: true, - }, - "meta": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "consumed_by": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - "desc": { - Type: schema.TypeString, - Computed: true, - }, - "gid": { - Type: schema.TypeInt, - Computed: true, - }, - "guid": { - Type: schema.TypeInt, - Computed: true, - }, - "sep_id": { - Type: schema.TypeInt, - Computed: true, - }, - "milestones": { - Type: schema.TypeInt, - Computed: true, - }, - "name": { - Type: schema.TypeString, - Computed: true, - }, - "obj_status": { - Type: schema.TypeString, - Computed: true, - }, - "provided_by": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - "shared_with": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - "tech_status": { - Type: schema.TypeString, - Computed: true, - }, - "type": { - Type: schema.TypeString, - Computed: true, - }, - "config": { - Type: schema.TypeString, - Computed: true, - }, - } -} - -func DataSourceSepList() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - ReadContext: dataSourceSepListRead, - - Timeouts: &schema.ResourceTimeout{ - Read: &constants.Timeout30s, - Default: &constants.Timeout60s, - }, - - Schema: dataSourceSepListSchemaMake(), - } -} +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Sergey Kisil, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package sep + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceSepListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + sepList, err := utilitySepListCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + id := uuid.New() + d.SetId(id.String()) + + d.Set("items", flattenSepListItems(sepList)) + d.Set("entry_count", sepList.EntryCount) + + return nil +} + +func DataSourceSepList() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceSepListRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceSepListSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/sep/data_source_sep_pool.go b/internal/service/cloudbroker/sep/data_source_sep_pool.go index f9d13c2..2e56798 100644 --- a/internal/service/cloudbroker/sep/data_source_sep_pool.go +++ b/internal/service/cloudbroker/sep/data_source_sep_pool.go @@ -1,145 +1,71 @@ -/* -Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, -Sergey Kisil, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package sep - -import ( - "context" - - "github.com/google/uuid" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" -) - -func dataSourceSepPoolRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - sepPool, err := utilitySepPoolCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - id := uuid.New() - d.SetId(id.String()) - d.Set("pool", flattenSepPool(sepPool)) - - return nil -} - -func dataSourceSepPoolSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "sep_id": { - Type: schema.TypeInt, - Required: true, - Description: "storage endpoint provider ID", - }, - "pool_name": { - Type: schema.TypeString, - Required: true, - Description: "pool name", - }, - "pool": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "access_account_ids": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - "access_res_group_ids": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - "name": { - Type: schema.TypeString, - Computed: true, - }, - "pagecache_ratio": { - Type: schema.TypeInt, - Computed: true, - }, - "reference_id": { - Type: schema.TypeString, - Computed: true, - }, - "types": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "uris": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ip": { - Type: schema.TypeString, - Computed: true, - }, - "port": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - "usage_limit": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - } -} - -func DataSourceSepPool() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - ReadContext: dataSourceSepPoolRead, - - Timeouts: &schema.ResourceTimeout{ - Read: &constants.Timeout30s, - Default: &constants.Timeout60s, - }, - - Schema: dataSourceSepPoolSchemaMake(), - } -} +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Sergey Kisil, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package sep + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceSepPoolRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + sepPool, err := utilitySepPoolCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + id := uuid.New() + d.SetId(id.String()) + d.Set("pool", flattenSepPool(sepPool)) + + return nil +} + +func DataSourceSepPool() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceSepPoolRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceSepPoolSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/sep/flattens.go b/internal/service/cloudbroker/sep/flattens.go index 3ecc192..f0d0df2 100644 --- a/internal/service/cloudbroker/sep/flattens.go +++ b/internal/service/cloudbroker/sep/flattens.go @@ -35,11 +35,31 @@ package sep import ( "encoding/json" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/sep" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens" ) -func flattenSepList(sl *sep.ListSEP) []map[string]interface{} { +func flattenSep(d *schema.ResourceData, desSep *sep.RecordSEP) { + d.Set("ckey", desSep.CKey) + d.Set("meta", flattens.FlattenMeta(desSep.Meta)) + d.Set("consumed_by", desSep.ConsumedBy) + d.Set("desc", desSep.Description) + d.Set("gid", desSep.GID) + d.Set("guid", desSep.GUID) + d.Set("sep_id", desSep.ID) + d.Set("milestones", desSep.Milestones) + d.Set("name", desSep.Name) + d.Set("obj_status", desSep.ObjStatus) + d.Set("provided_by", desSep.ProvidedBy) + d.Set("shared_with", desSep.SharedWith) + d.Set("tech_status", desSep.TechStatus) + d.Set("type", desSep.Type) + data, _ := json.Marshal(desSep.Config) + d.Set("config", string(data)) +} + +func flattenSepListItems(sl *sep.ListSEP) []map[string]interface{} { res := make([]map[string]interface{}, 0) for _, item := range sl.Data { data, _ := json.Marshal(item.Config) @@ -93,7 +113,13 @@ func flattenSepPoolUris(rp *sep.RecordPool) []map[string]interface{} { return res } -func flattenSepConsumption(sc sep.Total) []map[string]interface{} { +func flattenSepConsumption(d *schema.ResourceData, sepCons *sep.RecordConsumption) { + d.Set("type", sepCons.Type) + d.Set("total", flattenSepConsumptionTotal(sepCons.Total)) + d.Set("by_pool", flattenSepConsumptionPools(sepCons)) +} + +func flattenSepConsumptionTotal(sc sep.Total) []map[string]interface{} { sh := make([]map[string]interface{}, 0) temp := map[string]interface{}{ "capacity_limit": sc.CapacityLimit, @@ -112,7 +138,7 @@ func flattenSepConsumptionPools(bp *sep.RecordConsumption) []map[string]interfac sh := make([]map[string]interface{}, 0) for key, value := range bp.ByPool { temp := map[string]interface{}{ - "name": key, + "name": key, "disk_count": value.DiskCount, "disk_usage": value.DiskUsage, "snapshot_count": value.SnapshotCount, diff --git a/internal/service/cloudbroker/sep/resource_check_input_values.go b/internal/service/cloudbroker/sep/resource_check_input_values.go index 9f2f43b..0a5de37 100644 --- a/internal/service/cloudbroker/sep/resource_check_input_values.go +++ b/internal/service/cloudbroker/sep/resource_check_input_values.go @@ -34,36 +34,22 @@ package sep import ( "context" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/ic" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - log "github.com/sirupsen/logrus" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" ) -func existGID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { - c := m.(*controller.ControllerCfg) - gid := uint64(d.Get("gid").(int)) - req := grid.ListRequest{} - - gridList, err := c.CloudBroker().Grid().List(ctx, req) - if err != nil { - return false, err - } +func checkParamsExistence(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg) diag.Diagnostics { + var errs []error - return len(gridList.FilterByID(gid).Data) != 0, nil -} - -func resourceSepConfigExists(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { - log.Debugf("resourceSepConfigExists: called for sep id: %d", d.Get("sep_id").(int)) + gid := uint64(d.Get("gid").(int)) - sepDesConfig, err := utilitySepConfigCheckPresence(ctx, d, m) - if sepDesConfig == nil { - if err != nil { - return false, err - } - return false, nil + if err := ic.ExistGID(ctx, gid, c); err != nil { + errs = append(errs, err) } - return true, nil + return dc.ErrorsToDiagnostics(errs) } diff --git a/internal/service/cloudbroker/sep/resource_sep.go b/internal/service/cloudbroker/sep/resource_sep.go index 3e4afba..1c759c5 100644 --- a/internal/service/cloudbroker/sep/resource_sep.go +++ b/internal/service/cloudbroker/sep/resource_sep.go @@ -1,510 +1,534 @@ -/* -Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, -Sergey Kisil, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package sep - -import ( - "context" - "encoding/json" - "strconv" - - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - log "github.com/sirupsen/logrus" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/sep" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens" -) - -func resourceSepCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceSepCreate: called for sep %s", d.Get("name").(string)) - - c := m.(*controller.ControllerCfg) - req := sep.CreateRequest{} - - haveGID, err := existGID(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - if !haveGID { - return diag.Errorf("resourceSepCreate: can't create Sep because GID %d is not allowed or does not exist", d.Get("gid").(int)) - } - req.GID = uint64(d.Get("gid").(int)) - - req.Name = d.Get("name").(string) - req.SEPType = d.Get("type").(string) - - if desc, ok := d.GetOk("desc"); ok { - req.Description = desc.(string) - } - if configString, ok := d.GetOk("config"); ok { - req.Config = configString.(string) - } - if enable, ok := d.GetOk("enable"); ok { - req.Enable = enable.(bool) - } - - var consumedNIDs []uint64 - for _, item := range d.Get("consumed_by").([]interface{}) { - consumedNIDs = append(consumedNIDs, uint64(item.(int))) - } - - req.ConsumerNIDs = consumedNIDs - - var providerNIDs []uint64 - for _, item := range d.Get("provided_by").([]interface{}) { - providerNIDs = append(providerNIDs, uint64(item.(int))) - } - - req.ProviderNIDs = providerNIDs - - sepId, err := c.CloudBroker().SEP().Create(ctx, req) - if err != nil { - return diag.FromErr(err) - } - - d.SetId(strconv.FormatUint(sepId, 10)) - d.Set("sep_id", sepId) - - return resourceSepRead(ctx, d, m) -} - -func resourceSepRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceSepRead: called for %s id: %d", d.Get("name").(string), d.Get("sep_id").(int)) - - sep, err := utilitySepCheckPresence(ctx, d, m) - if sep == nil { - d.SetId("") - return diag.FromErr(err) - } - - d.Set("ckey", sep.CKey) - d.Set("meta", flattens.FlattenMeta(sep.Meta)) - d.Set("consumed_by", sep.ConsumedBy) - d.Set("desc", sep.Description) - d.Set("gid", sep.GID) - d.Set("guid", sep.GUID) - d.Set("sep_id", sep.ID) - d.Set("milestones", sep.Milestones) - d.Set("name", sep.Name) - d.Set("obj_status", sep.ObjStatus) - d.Set("provided_by", sep.ProvidedBy) - d.Set("shared_with", sep.SharedWith) - d.Set("tech_status", sep.TechStatus) - d.Set("type", sep.Type) - data, _ := json.Marshal(sep.Config) - d.Set("config", string(data)) - - return nil -} - -func resourceSepDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceSepDelete: called for %s, id: %d", d.Get("name").(string), d.Get("sep_id").(int)) - - sepDes, err := utilitySepCheckPresence(ctx, d, m) - if sepDes == nil { - if err != nil { - return diag.FromErr(err) - } - return nil - } - - c := m.(*controller.ControllerCfg) - req := sep.DeleteRequest{ - SEPID: sepDes.ID, - } - - _, err = c.CloudBroker().SEP().Delete(ctx, req) - if err != nil { - return diag.FromErr(err) - } - - d.SetId("") - - return nil -} - -func resourceSepUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceSepEdit: called for %s, id: %d", d.Get("name").(string), d.Get("sep_id").(int)) - c := m.(*controller.ControllerCfg) - - haveGID, err := existGID(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - if !haveGID { - return diag.Errorf("resourceSepUpdate: can't update Sep because GID %d is not allowed or does not exist", d.Get("gid").(int)) - } - - if d.HasChange("decommission") { - decommission := d.Get("decommission").(bool) - if decommission { - req := sep.DecommissionRequest{ - SEPID: uint64(d.Get("sep_id").(int)), - ClearPhisically: d.Get("clear_physically").(bool), - } - - _, err := c.CloudBroker().SEP().Decommission(ctx, req) - if err != nil { - return diag.FromErr(err) - } - } - } - - if d.HasChange("upd_capacity_limit") { - updCapacityLimit := d.Get("upd_capacity_limit").(bool) - if updCapacityLimit { - req := sep.UpdateCapacityLimitRequest{ - SEPID: uint64(d.Get("sep_id").(int)), - } - - _, err := c.CloudBroker().SEP().UpdateCapacityLimit(ctx, req) - if err != nil { - return diag.FromErr(err) - } - } - } - - if d.HasChange("config") { - validateReq := sep.ConfigValidateRequest{ - SEPID: uint64(d.Get("sep_id").(int)), - Config: d.Get("config").(string), - } - - insertReq := sep.ConfigInsertRequest{ - SEPID: uint64(d.Get("sep_id").(int)), - Config: d.Get("config").(string), - } - - _, err := c.CloudBroker().SEP().ConfigValidate(ctx, validateReq) - if err != nil { - return diag.FromErr(err) - } - _, err = c.CloudBroker().SEP().ConfigInsert(ctx, insertReq) - if err != nil { - return diag.FromErr(err) - } - - } - - if d.HasChange("field_edit") { - fieldConfig := d.Get("field_edit").([]interface{}) - field := fieldConfig[0].(map[string]interface{}) - req := sep.ConfigFieldEditRequest{ - SEPID: uint64(d.Get("sep_id").(int)), - FieldName: field["field_name"].(string), - FieldValue: field["field_value"].(string), - FieldType: field["field_type"].(string), - } - - _, err := c.CloudBroker().SEP().ConfigFieldEdit(ctx, req) - if err != nil { - return diag.FromErr(err) - } - } - - if d.HasChange("enable") { - err := resourceSepChangeEnabled(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - } - - if d.HasChange("consumed_by") { - err := resourceSepUpdateNodes(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - } - - if d.HasChange("provided_by") { - err := resourceSepUpdateProviders(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - } - - return resourceSepRead(ctx, d, m) -} - -func resourceSepChangeEnabled(ctx context.Context, d *schema.ResourceData, m interface{}) error { - sepID := uint64(d.Get("sep_id").(int)) - - c := m.(*controller.ControllerCfg) - if d.Get("enable").(bool) { - req := sep.EnableRequest{ - SEPID: sepID, - } - - _, err := c.CloudBroker().SEP().Enable(ctx, req) - if err != nil { - return err - } - } else { - req := sep.DisableRequest{ - SEPID: sepID, - } - - _, err := c.CloudBroker().SEP().Disable(ctx, req) - if err != nil { - return err - } - } - - return nil -} - -func resourceSepUpdateNodes(ctx context.Context, d *schema.ResourceData, m interface{}) error { - log.Debugf("resourceSepUpdateNodes: called for %s, id: %d", d.Get("name").(string), d.Get("sep_id").(int)) - c := m.(*controller.ControllerCfg) - - oldSet, newSet := d.GetChange("consumed_by") - - deletedConsumed := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List() - var consumerDelIds []uint64 - for _, deletedInterface := range deletedConsumed { - deletedItem := deletedInterface.(int) - consumerDelIds = append(consumerDelIds, uint64(deletedItem)) - } - if len(consumerDelIds) != 0 { - reqDel := sep.DelConsumerNodesRequest{ - SEPID: uint64(d.Get("sep_id").(int)), - ConsumerNIDs: consumerDelIds, - } - _, err := c.CloudBroker().SEP().DelConsumerNodes(ctx, reqDel) - if err != nil { - return err - } - } - - addedConsumed := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List() - var consumerAddIds []uint64 - for _, addedInterface := range addedConsumed { - AddedItem := addedInterface.(int) - consumerAddIds = append(consumerAddIds, uint64(AddedItem)) - } - if len(consumerAddIds) != 0 { - reqAdd := sep.AddConsumerNodesRequest{ - SEPID: uint64(d.Get("sep_id").(int)), - ConsumerNIDs: consumerAddIds, - } - _, err := c.CloudBroker().SEP().AddConsumerNodes(ctx, reqAdd) - if err != nil { - return err - } - } - return nil -} - -func resourceSepUpdateProviders(ctx context.Context, d *schema.ResourceData, m interface{}) error { - log.Debugf("resourceSepUpdateProviders: called for %s, id: %d", d.Get("name").(string), d.Get("sep_id").(int)) - c := m.(*controller.ControllerCfg) - req := sep.AddProviderNodesRequest{ - SEPID: uint64(d.Get("sep_id").(int)), - } - - var providerNIDs []uint64 - for _, item := range d.Get("provided_by").([]interface{}) { - providerNIDs = append(providerNIDs, uint64(item.(int))) - } - req.ProviderNIDs = providerNIDs - - _, err := c.CloudBroker().SEP().AddProviderNodes(ctx, req) - if err != nil { - return err - } - - return nil -} - -func resourceSepSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "sep_id": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - Description: "sep type des id", - }, - "upd_capacity_limit": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Update SEP capacity limit", - }, - "decommission": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "unlink everything that exists from SEP", - }, - "clear_physically": { - Type: schema.TypeBool, - Optional: true, - Default: true, - Description: "clear disks and images physically", - }, - "config": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: "sep config string", - }, - "ckey": { - Type: schema.TypeString, - Computed: true, - }, - "meta": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "consumed_by": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - Description: "list of consumer nodes IDs", - }, - "desc": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: "sep description", - }, - "gid": { - Type: schema.TypeInt, - Required: true, - Description: "grid (platform) ID", - }, - "guid": { - Type: schema.TypeInt, - Computed: true, - }, - "milestones": { - Type: schema.TypeInt, - Computed: true, - }, - "name": { - Type: schema.TypeString, - Required: true, - Description: "SEP name", - }, - "obj_status": { - Type: schema.TypeString, - Computed: true, - }, - "provided_by": { - Type: schema.TypeList, - Optional: true, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - Description: "list of provider nodes IDs", - }, - "shared_with": { - Type: schema.TypeList, - Optional: true, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - "tech_status": { - Type: schema.TypeString, - Computed: true, - }, - "type": { - Type: schema.TypeString, - Required: true, - Description: "type of storage", - }, - "enable": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "enable SEP after creation", - }, - "field_edit": { - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "field_name": { - Type: schema.TypeString, - Required: true, - }, - "field_value": { - Type: schema.TypeString, - Required: true, - }, - "field_type": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - } -} - -func ResourceSep() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - CreateContext: resourceSepCreate, - ReadContext: resourceSepRead, - UpdateContext: resourceSepUpdate, - DeleteContext: resourceSepDelete, - - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: &constants.Timeout60s, - Read: &constants.Timeout30s, - Update: &constants.Timeout60s, - Delete: &constants.Timeout60s, - Default: &constants.Timeout60s, - }, - - Schema: resourceSepSchemaMake(), - } -} +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Sergey Kisil, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package sep + +import ( + "context" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/sep" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" +) + +func resourceSepCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceSepCreate: called for sep %s", d.Get("name").(string)) + + c := m.(*controller.ControllerCfg) + + if diags := checkParamsExistence(ctx, d, c); diags != nil { + return diags + } + + req := sep.CreateRequest{ + GID: uint64(d.Get("gid").(int)), + Name: d.Get("name").(string), + SEPType: d.Get("type").(string), + } + + if desc, ok := d.GetOk("desc"); ok { + req.Description = desc.(string) + } + if configString, ok := d.GetOk("config"); ok { + req.Config = configString.(string) + } + if enable, ok := d.GetOk("enable"); ok { + req.Enable = enable.(bool) + } + + var consumedNIDs []uint64 + for _, item := range d.Get("consumed_by").([]interface{}) { + consumedNIDs = append(consumedNIDs, uint64(item.(int))) + } + + req.ConsumerNIDs = consumedNIDs + + var providerNIDs []uint64 + for _, item := range d.Get("provided_by").([]interface{}) { + providerNIDs = append(providerNIDs, uint64(item.(int))) + } + + req.ProviderNIDs = providerNIDs + + sepId, err := c.CloudBroker().SEP().Create(ctx, req) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + d.SetId(strconv.FormatUint(sepId, 10)) + d.Set("sep_id", sepId) + + warnings := dc.Warnings{} + + if enable, ok := d.GetOk("enable"); ok { + log.Debugf("resourceSepCreate, Enable: enable=%t sep_id %d after completing its resource configuration", enable, sepId) + err := resourceSepChangeEnabled(ctx, d, m) + if err != nil { + warnings.Add(err) + } + } + + if accountIds, ok := d.GetOk("account_ids"); ok { + log.Debugf("resourceSepCreate, accessGrant: accountIds=%v sep_id %d after completing its resource configuration", accountIds, sepId) + err := resourceSepChangeAccess(ctx, d, m) + if err != nil { + warnings.Add(err) + } + } + + if accessToPool, ok := d.GetOk("access_to_pool"); ok { + log.Debugf("resourceSepCreate, accessToPool: accessToPool=%v sep_id %d after completing its resource configuration", accessToPool, sepId) + err := resourceSepChangeAccessToPool(ctx, d, m) + if err != nil { + warnings.Add(err) + } + } + + return append(resourceSepRead(ctx, d, m), warnings.Get()...) +} + +func resourceSepRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceSepRead: called for %s id: %d", d.Get("name").(string), d.Get("sep_id").(int)) + + sepRec, err := utilitySepCheckPresence(ctx, d, m) + if sepRec == nil { + d.SetId("") + return diag.FromErr(err) + } + + flattenSep(d, sepRec) + + log.Debugf("resourceSepRead: after flattenSep: %s id: %d", d.Get("name").(string), d.Get("sep_id").(int)) + + return nil +} + +func resourceSepDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceSepDelete: called for %s, id: %d", d.Get("name").(string), d.Get("sep_id").(int)) + + sepDes, err := utilitySepCheckPresence(ctx, d, m) + if sepDes == nil { + d.SetId("") + if err != nil { + return diag.FromErr(err) + } + return nil + } + + c := m.(*controller.ControllerCfg) + req := sep.DeleteRequest{ + SEPID: sepDes.ID, + } + + _, err = c.CloudBroker().SEP().Delete(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + d.SetId("") + + return nil +} + +func resourceSepUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceSepUpdate: called for %s, id: %d", d.Get("name").(string), d.Get("sep_id").(int)) + c := m.(*controller.ControllerCfg) + + if diags := checkParamsExistence(ctx, d, c); diags != nil { + return diags + } + + if d.HasChange("account_ids") { + err := resourceSepChangeAccess(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + } + + if d.HasChange("access_to_pool") { + err := resourceSepChangeAccessToPool(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + } + + if d.HasChange("decommission") { + err := resourceSepDecommission(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + } + + if d.HasChange("upd_capacity_limit") { + err := resourceSepUpdateCapacityLimit(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + } + + if d.HasChange("config") { + err := resourceSepUpdateConfig(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + } + + if d.HasChange("field_edit") { + err := resourceSepFieldEdit(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + } + + if d.HasChange("enable") { + err := resourceSepChangeEnabled(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + } + + if d.HasChange("consumed_by") { + err := resourceSepUpdateNodes(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + } + + if d.HasChange("provided_by") { + err := resourceSepUpdateProviders(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + } + + return resourceSepRead(ctx, d, m) +} + +func resourceSepChangeAccess(ctx context.Context, d *schema.ResourceData, m interface{}) error { + log.Debugf("resourceSepChangeAccess: called for %s, id: %d", d.Get("name").(string), d.Get("sep_id").(int)) + c := m.(*controller.ControllerCfg) + + oldSet, newSet := d.GetChange("account_ids") + + deletedAccounts := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List() + var accountDelIds []uint64 + for _, deletedInterface := range deletedAccounts { + deletedItem := deletedInterface.(int) + accountDelIds = append(accountDelIds, uint64(deletedItem)) + } + if len(accountDelIds) != 0 { + for _, acc := range accountDelIds { + reqDel := sep.AccessRevokeRequest{ + SEPID: uint64(d.Get("sep_id").(int)), + AccountID: acc, + } + _, err := c.CloudBroker().SEP().AccessRevoke(ctx, reqDel) + if err != nil { + return err + } + } + } + + addedAccounts := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List() + var accountAddIds []uint64 + for _, addedInterface := range addedAccounts { + addedItem := addedInterface.(int) + accountAddIds = append(accountAddIds, uint64(addedItem)) + } + if len(accountAddIds) != 0 { + for _, acc := range accountAddIds { + reqDel := sep.AccessGrantRequest{ + SEPID: uint64(d.Get("sep_id").(int)), + AccountID: acc, + } + _, err := c.CloudBroker().SEP().AccessGrant(ctx, reqDel) + if err != nil { + return err + } + } + } + + return nil +} + +func resourceSepChangeAccessToPool(ctx context.Context, d *schema.ResourceData, m interface{}) error { + log.Debugf("resourceSepChangeAccessToPool: called for %s, id: %d", d.Get("name").(string), d.Get("sep_id").(int)) + c := m.(*controller.ControllerCfg) + + oldAccessToPool, newAccessToPool := d.GetChange("access_to_pool") + + oldAccess := oldAccessToPool.([]interface{})[0].(map[string]interface{}) + if oldAccess["pool_name"] != "" { + revokeReq := sep.AccessRevokeToPoolRequest{ + SEPID: uint64(d.Get("sep_id").(int)), + PoolName: oldAccess["pool_name"].(string), + } + + if oldAccId, ok := oldAccess["account_id_pool"]; ok { + revokeReq.AccountID = uint64(oldAccId.(int)) + } + + if oldRgId, ok := oldAccess["rg_id"]; ok { + revokeReq.RGID = uint64(oldRgId.(int)) + } + + _, err := c.CloudBroker().SEP().AccessRevokeToPool(ctx, revokeReq) + if err != nil { + return err + } + } + + newAccess := newAccessToPool.([]interface{})[0].(map[string]interface{}) + if newAccess["pool_name"] != "" { + grantReq := sep.AccessGrantToPoolRequest{ + SEPID: uint64(d.Get("sep_id").(int)), + PoolName: newAccess["pool_name"].(string), + } + + if newAccId, ok := newAccess["account_id_pool"]; ok { + grantReq.AccountID = uint64(newAccId.(int)) + } + + if newRgId, ok := newAccess["rg_id"]; ok { + grantReq.RGID = uint64(newRgId.(int)) + } + + _, err := c.CloudBroker().SEP().AccessGrantToPool(ctx, grantReq) + if err != nil { + return err + } + } + + return nil +} + +func resourceSepDecommission(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + decommission := d.Get("decommission").(bool) + if decommission { + req := sep.DecommissionRequest{ + SEPID: uint64(d.Get("sep_id").(int)), + ClearPhisically: d.Get("clear_physically").(bool), + } + + _, err := c.CloudBroker().SEP().Decommission(ctx, req) + if err != nil { + return err + } + } + + return nil +} + +func resourceSepUpdateCapacityLimit(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + updCapacityLimit := d.Get("upd_capacity_limit").(bool) + if updCapacityLimit { + req := sep.UpdateCapacityLimitRequest{ + SEPID: uint64(d.Get("sep_id").(int)), + } + + _, err := c.CloudBroker().SEP().UpdateCapacityLimit(ctx, req) + if err != nil { + return err + } + } + + return nil +} + +func resourceSepUpdateConfig(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + validateReq := sep.ConfigValidateRequest{ + SEPID: uint64(d.Get("sep_id").(int)), + Config: d.Get("config").(string), + } + _, err := c.CloudBroker().SEP().ConfigValidate(ctx, validateReq) + if err != nil { + return err + } + + insertReq := sep.ConfigInsertRequest{ + SEPID: uint64(d.Get("sep_id").(int)), + Config: d.Get("config").(string), + } + _, err = c.CloudBroker().SEP().ConfigInsert(ctx, insertReq) + if err != nil { + return err + } + + return nil +} + +func resourceSepFieldEdit(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + fieldConfig := d.Get("field_edit").([]interface{}) + field := fieldConfig[0].(map[string]interface{}) + req := sep.ConfigFieldEditRequest{ + SEPID: uint64(d.Get("sep_id").(int)), + FieldName: field["field_name"].(string), + FieldValue: field["field_value"].(string), + FieldType: field["field_type"].(string), + } + + _, err := c.CloudBroker().SEP().ConfigFieldEdit(ctx, req) + if err != nil { + return err + } + + return nil +} + +func resourceSepChangeEnabled(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + sepID := uint64(d.Get("sep_id").(int)) + + if d.Get("enable").(bool) { + req := sep.EnableRequest{ + SEPID: sepID, + } + + _, err := c.CloudBroker().SEP().Enable(ctx, req) + if err != nil { + return err + } + } else { + req := sep.DisableRequest{ + SEPID: sepID, + } + + _, err := c.CloudBroker().SEP().Disable(ctx, req) + if err != nil { + return err + } + } + + return nil +} + +func resourceSepUpdateNodes(ctx context.Context, d *schema.ResourceData, m interface{}) error { + log.Debugf("resourceSepUpdateNodes: called for %s, id: %d", d.Get("name").(string), d.Get("sep_id").(int)) + c := m.(*controller.ControllerCfg) + + oldSet, newSet := d.GetChange("consumed_by") + + deletedConsumed := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List() + var consumerDelIds []uint64 + for _, deletedInterface := range deletedConsumed { + deletedItem := deletedInterface.(int) + consumerDelIds = append(consumerDelIds, uint64(deletedItem)) + } + if len(consumerDelIds) != 0 { + reqDel := sep.DelConsumerNodesRequest{ + SEPID: uint64(d.Get("sep_id").(int)), + ConsumerNIDs: consumerDelIds, + } + _, err := c.CloudBroker().SEP().DelConsumerNodes(ctx, reqDel) + if err != nil { + return err + } + } + + addedConsumed := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List() + var consumerAddIds []uint64 + for _, addedInterface := range addedConsumed { + AddedItem := addedInterface.(int) + consumerAddIds = append(consumerAddIds, uint64(AddedItem)) + } + if len(consumerAddIds) != 0 { + reqAdd := sep.AddConsumerNodesRequest{ + SEPID: uint64(d.Get("sep_id").(int)), + ConsumerNIDs: consumerAddIds, + } + _, err := c.CloudBroker().SEP().AddConsumerNodes(ctx, reqAdd) + if err != nil { + return err + } + } + return nil +} + +func resourceSepUpdateProviders(ctx context.Context, d *schema.ResourceData, m interface{}) error { + log.Debugf("resourceSepUpdateProviders: called for %s, id: %d", d.Get("name").(string), d.Get("sep_id").(int)) + c := m.(*controller.ControllerCfg) + req := sep.AddProviderNodesRequest{ + SEPID: uint64(d.Get("sep_id").(int)), + } + + var providerNIDs []uint64 + for _, item := range d.Get("provided_by").([]interface{}) { + providerNIDs = append(providerNIDs, uint64(item.(int))) + } + req.ProviderNIDs = providerNIDs + + _, err := c.CloudBroker().SEP().AddProviderNodes(ctx, req) + if err != nil { + return err + } + + return nil +} + +func ResourceSep() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + CreateContext: resourceSepCreate, + ReadContext: resourceSepRead, + UpdateContext: resourceSepUpdate, + DeleteContext: resourceSepDelete, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: &constants.Timeout60s, + Read: &constants.Timeout30s, + Update: &constants.Timeout60s, + Delete: &constants.Timeout60s, + Default: &constants.Timeout60s, + }, + + Schema: resourceSepSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/sep/resource_sep_config.go b/internal/service/cloudbroker/sep/resource_sep_config.go index 1dd62e2..0f6a698 100644 --- a/internal/service/cloudbroker/sep/resource_sep_config.go +++ b/internal/service/cloudbroker/sep/resource_sep_config.go @@ -1,197 +1,127 @@ -/* -Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package sep - -import ( - "context" - "encoding/json" - - "github.com/google/uuid" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - log "github.com/sirupsen/logrus" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/sep" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" -) - -func resourceSepConfigCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceSepConfigCreate: called for sep id %d", d.Get("sep_id").(int)) - - if _, ok := d.GetOk("sep_id"); ok { - if exists, err := resourceSepConfigExists(ctx, d, m); exists { - if err != nil { - return diag.FromErr(err) - } - id := uuid.New() - d.SetId(id.String()) - diagnostics := resourceSepConfigRead(ctx, d, m) - if diagnostics != nil { - return diagnostics - } - - return nil - } - return diag.Errorf("provided sep id config does not exist") - } - - return resourceSepConfigRead(ctx, d, m) -} - -func resourceSepConfigRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceSepConfigRead: called for sep id: %d", d.Get("sep_id").(int)) - - sepConfig, err := utilitySepConfigCheckPresence(ctx, d, m) - if sepConfig == nil { - d.SetId("") - return diag.FromErr(err) - } - data, _ := json.Marshal(sepConfig) - d.Set("config", string(data)) - return nil -} - -func resourceSepConfigDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - d.SetId("") - return nil -} - -func resourceSepConfigUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - log.Debugf("resourceSepConfigEdit: called for sep id: %d", d.Get("sep_id").(int)) - c := m.(*controller.ControllerCfg) - - if d.HasChange("config") { - validateReq := sep.ConfigValidateRequest{ - SEPID: uint64(d.Get("sep_id").(int)), - Config: d.Get("config").(string), - } - insertReq := sep.ConfigInsertRequest{ - SEPID: uint64(d.Get("sep_id").(int)), - Config: d.Get("config").(string), - } - - _, err := c.CloudBroker().SEP().ConfigValidate(ctx, validateReq) - if err != nil { - return diag.FromErr(err) - } - - _, err = c.CloudBroker().SEP().ConfigInsert(ctx, insertReq) - if err != nil { - return diag.FromErr(err) - } - - } - - if d.HasChange("field_edit") { - fieldConfig := d.Get("field_edit").([]interface{}) - field := fieldConfig[0].(map[string]interface{}) - req := sep.ConfigFieldEditRequest{ - SEPID: uint64(d.Get("sep_id").(int)), - FieldName: field["field_name"].(string), - FieldValue: field["field_value"].(string), - FieldType: field["field_type"].(string), - } - - _, err := c.CloudBroker().SEP().ConfigFieldEdit(ctx, req) - if err != nil { - return diag.FromErr(err) - } - } - - diagnostics := resourceSepConfigRead(ctx, d, m) - if diagnostics != nil { - return diagnostics - } - - return nil -} - -func resourceSepConfigSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "sep_id": { - Type: schema.TypeInt, - Required: true, - }, - "config": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "field_edit": { - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "field_name": { - Type: schema.TypeString, - Required: true, - }, - "field_value": { - Type: schema.TypeString, - Required: true, - }, - "field_type": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - } -} - -func ResourceSepConfig() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - CreateContext: resourceSepConfigCreate, - ReadContext: resourceSepConfigRead, - UpdateContext: resourceSepConfigUpdate, - DeleteContext: resourceSepConfigDelete, - - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: &constants.Timeout60s, - Read: &constants.Timeout30s, - Update: &constants.Timeout60s, - Delete: &constants.Timeout60s, - Default: &constants.Timeout60s, - }, - - Schema: resourceSepConfigSchemaMake(), - } -} +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package sep + +import ( + "context" + "encoding/json" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func resourceSepConfigCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceSepConfigCreate: called for sep id %d", d.Get("sep_id").(int)) + + if _, ok := d.GetOk("sep_id"); ok { + sepConfig, err := utilitySepConfigCheckPresence(ctx, d, m) + + if err != nil { + return diag.FromErr(err) + } + + if sepConfig == nil { + return diag.Errorf("provided sep id config does not exist") + } + + id := uuid.New() + d.SetId(id.String()) + } + + return resourceSepConfigRead(ctx, d, m) +} + +func resourceSepConfigRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceSepConfigRead: called for sep id: %d", d.Get("sep_id").(int)) + + sepConfig, err := utilitySepConfigCheckPresence(ctx, d, m) + if sepConfig == nil { + d.SetId("") + return diag.FromErr(err) + } + data, _ := json.Marshal(sepConfig) + d.Set("config", string(data)) + return nil +} + +func resourceSepConfigDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + d.SetId("") + return nil +} + +func resourceSepConfigUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Debugf("resourceSepConfigEdit: called for sep id: %d", d.Get("sep_id").(int)) + + if d.HasChange("config") { + err := resourceSepUpdateConfig(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + } + + if d.HasChange("field_edit") { + err := resourceSepFieldEdit(ctx, d, m) + if err != nil { + return diag.FromErr(err) + } + } + + return resourceSepConfigRead(ctx, d, m) +} + +func ResourceSepConfig() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + CreateContext: resourceSepConfigCreate, + ReadContext: resourceSepConfigRead, + UpdateContext: resourceSepConfigUpdate, + DeleteContext: resourceSepConfigDelete, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: &constants.Timeout60s, + Read: &constants.Timeout30s, + Update: &constants.Timeout60s, + Delete: &constants.Timeout60s, + Default: &constants.Timeout60s, + }, + + Schema: resourceSepConfigSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/sep/utility_sep_list.go b/internal/service/cloudbroker/sep/utility_sep_list.go index d6acd93..7798f32 100644 --- a/internal/service/cloudbroker/sep/utility_sep_list.go +++ b/internal/service/cloudbroker/sep/utility_sep_list.go @@ -1,84 +1,84 @@ -/* -Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, -Sergey Kisil, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package sep - -import ( - "context" - - log "github.com/sirupsen/logrus" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/sep" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func utilitySepListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*sep.ListSEP, error) { - c := m.(*controller.ControllerCfg) - req := sep.ListRequest{} - - if by_id, ok := d.GetOk("by_id"); ok { - req.ByID = uint64(by_id.(int)) - } - if name, ok := d.GetOk("name"); ok { - req.Name = string(name.(string)) - } - if gid, ok := d.GetOk("gid"); ok { - req.GID = uint64(gid.(int)) - } - if type_, ok := d.GetOk("type"); ok { - req.Type = string(type_.(string)) - } - if provided_by, ok := d.GetOk("provided_by"); ok { - req.ProvidedBy = uint64(provided_by.(int)) - } - if tech_status, ok := d.GetOk("tech_status"); ok { - req.TechStatus = string(tech_status.(string)) - } - if consumed_by, ok := d.GetOk("consumed_by"); ok { - req.ConsumedBy = uint64(consumed_by.(int)) - } - if page, ok := d.GetOk("page"); ok { - req.Page = uint64(page.(int)) - } - if size, ok := d.GetOk("size"); ok { - req.Size = uint64(size.(int)) - } - - log.Debugf("utilitySepListCheckPresence: load image list") - sepList, err := c.CloudBroker().SEP().List(ctx, req) - if err != nil { - return nil, err - } - - return sepList, nil -} +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, +Sergey Kisil, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package sep + +import ( + "context" + + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/sep" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func utilitySepListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*sep.ListSEP, error) { + c := m.(*controller.ControllerCfg) + req := sep.ListRequest{} + + if by_id, ok := d.GetOk("by_id"); ok { + req.ByID = uint64(by_id.(int)) + } + if name, ok := d.GetOk("name"); ok { + req.Name = name.(string) + } + if gid, ok := d.GetOk("gid"); ok { + req.GID = uint64(gid.(int)) + } + if type_, ok := d.GetOk("type"); ok { + req.Type = type_.(string) + } + if provided_by, ok := d.GetOk("provided_by"); ok { + req.ProvidedBy = uint64(provided_by.(int)) + } + if tech_status, ok := d.GetOk("tech_status"); ok { + req.TechStatus = tech_status.(string) + } + if consumed_by, ok := d.GetOk("consumed_by"); ok { + req.ConsumedBy = uint64(consumed_by.(int)) + } + if page, ok := d.GetOk("page"); ok { + req.Page = uint64(page.(int)) + } + if size, ok := d.GetOk("size"); ok { + req.Size = uint64(size.(int)) + } + + log.Debugf("utilitySepListCheckPresence: load image list") + sepList, err := c.CloudBroker().SEP().List(ctx, req) + if err != nil { + return nil, err + } + + return sepList, nil +} diff --git a/internal/service/cloudbroker/sep/utility_sep_pool.go b/internal/service/cloudbroker/sep/utility_sep_pool.go index 8250ac8..c61937a 100644 --- a/internal/service/cloudbroker/sep/utility_sep_pool.go +++ b/internal/service/cloudbroker/sep/utility_sep_pool.go @@ -1,58 +1,58 @@ -/* -Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package sep - -import ( - "context" - - log "github.com/sirupsen/logrus" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/sep" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func utilitySepPoolCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*sep.RecordPool, error) { - c := m.(*controller.ControllerCfg) - req := sep.GetPoolRequest{ - SEPID: uint64(d.Get("sep_id").(int)), - PoolName: d.Get("pool_name").(string), - } - - log.Debugf("utilitySepDesPoolCheckPresence: load sep") - sepPool, err := c.CloudBroker().SEP().GetPool(ctx, req) - if err != nil { - return nil, err - } - - return sepPool, nil -} +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package sep + +import ( + "context" + + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/sep" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func utilitySepPoolCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*sep.RecordPool, error) { + c := m.(*controller.ControllerCfg) + req := sep.GetPoolRequest{ + SEPID: uint64(d.Get("sep_id").(int)), + PoolName: d.Get("pool_name").(string), + } + + log.Debugf("utilitySepDesPoolCheckPresence: load pool") + sepPool, err := c.CloudBroker().SEP().GetPool(ctx, req) + if err != nil { + return nil, err + } + + return sepPool, nil +} diff --git a/internal/service/cloudbroker/stack/data_source_stack_list.go b/internal/service/cloudbroker/stack/data_source_stack_list.go index c7efdf9..21e87b2 100644 --- a/internal/service/cloudbroker/stack/data_source_stack_list.go +++ b/internal/service/cloudbroker/stack/data_source_stack_list.go @@ -44,6 +44,7 @@ import ( func dataSourceStackListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { stackList, err := utilityStackListCheckPresence(ctx, d, m) if err != nil { + d.SetId("") return diag.FromErr(err) } @@ -55,54 +56,6 @@ func dataSourceStackListRead(ctx context.Context, d *schema.ResourceData, m inte return nil } -func dataSourceStaksListSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "by_id": { - Type: schema.TypeInt, - Optional: true, - Description: "by_id", - }, - "name": { - Type: schema.TypeString, - Optional: true, - Description: "name", - }, - "type": { - Type: schema.TypeString, - Optional: true, - Description: "type", - }, - "status": { - Type: schema.TypeString, - Optional: true, - Description: "type", - }, - "page": { - Type: schema.TypeInt, - Optional: true, - Description: "page number", - }, - "size": { - Type: schema.TypeInt, - Optional: true, - Description: "page size", - }, - "items": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: dataSourceStackSchemaMake(), - }, - Description: "items of stacks list", - }, - "entry_count": { - Type: schema.TypeInt, - Computed: true, - Description: "entry_count", - }, - } -} - func DataSourceStacksList() *schema.Resource { return &schema.Resource{ SchemaVersion: 1, @@ -114,6 +67,6 @@ func DataSourceStacksList() *schema.Resource { Default: &constants.Timeout60s, }, - Schema: dataSourceStaksListSchemaMake(), + Schema: dataSourceStacksListSchemaMake(), } } diff --git a/internal/service/cloudbroker/stack/data_sourse_stack.go b/internal/service/cloudbroker/stack/data_sourse_stack.go index af52c13..5ca6ba9 100644 --- a/internal/service/cloudbroker/stack/data_sourse_stack.go +++ b/internal/service/cloudbroker/stack/data_sourse_stack.go @@ -43,6 +43,7 @@ import ( func dataSourceStackRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { stack, err := utilityStackCheckPresence(ctx, d, m) if err != nil { + d.SetId("") // ensure ID is empty in this case return diag.FromErr(err) } @@ -52,257 +53,6 @@ func dataSourceStackRead(ctx context.Context, d *schema.ResourceData, m interfac return nil } -func dataSourceStackSchemaMake() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "stack_id": { - Type: schema.TypeInt, - Required: true, - Description: "stack_id", - }, - "ckey": { - Type: schema.TypeString, - Computed: true, - Description: "ckey", - }, - "meta": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "meta", - }, - "api_url": { - Type: schema.TypeString, - Computed: true, - Description: "api_url", - }, - "api_key": { - Type: schema.TypeString, - Computed: true, - Description: "api_key", - }, - "app_id": { - Type: schema.TypeString, - Computed: true, - Description: "api_id", - }, - "cpu_allocation_ratio": { - Type: schema.TypeFloat, - Computed: true, - Description: "cpu_allocation_ratio", - }, - "description": { - Type: schema.TypeString, - Computed: true, - Description: "description", - }, - "descr": { - Type: schema.TypeString, - Computed: true, - Description: "descr", - }, - "drivers": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "drivers", - }, - "eco": { - Type: schema.TypeString, - Computed: true, - Description: "eco", - }, - "error": { - Type: schema.TypeInt, - Computed: true, - Description: "error", - }, - "gid": { - Type: schema.TypeInt, - Computed: true, - Description: "gid", - }, - "guid": { - Type: schema.TypeInt, - Computed: true, - Description: "guid", - }, - "images": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - Description: "images", - }, - "login": { - Type: schema.TypeString, - Computed: true, - Description: "login", - }, - "mem_allocation_ratio": { - Type: schema.TypeFloat, - Computed: true, - Description: "mem_allocation_ratio", - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: "name", - }, - "packages": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: packagesSchemaMake(), - }, - }, - "passwd": { - Type: schema.TypeString, - Computed: true, - Description: "password", - }, - "reference_id": { - Type: schema.TypeString, - Computed: true, - Description: "reference_id", - }, - "status": { - Type: schema.TypeString, - Computed: true, - Description: "status", - }, - "type": { - Type: schema.TypeString, - Computed: true, - Description: "type", - }, - } -} - -func packagesSchemaMake() map[string]*schema.Schema { - res := map[string]*schema.Schema{ - "libvirt_bin": { - Type: schema.TypeList, - Computed: true, - Description: "libvirt_bin", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "installed_size": { - Type: schema.TypeString, - Computed: true, - Description: "installed_size", - }, - "ver": { - Type: schema.TypeString, - Computed: true, - Description: "ver", - }, - }, - }, - }, - "lvm2_lockd": { - Type: schema.TypeList, - Computed: true, - Description: "lvm2_lockd", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "installed_size": { - Type: schema.TypeString, - Computed: true, - Description: "installed_size", - }, - "ver": { - Type: schema.TypeString, - Computed: true, - Description: "ver", - }, - }, - }, - }, - "openvswitch_common": { - Type: schema.TypeList, - Computed: true, - Description: "openvswitch_common", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "installed_size": { - Type: schema.TypeString, - Computed: true, - Description: "installed_size", - }, - "ver": { - Type: schema.TypeString, - Computed: true, - Description: "ver", - }, - }, - }, - }, - "openvswitch_switch": { - Type: schema.TypeList, - Computed: true, - Description: "openvswitch_switch", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "installed_size": { - Type: schema.TypeString, - Computed: true, - Description: "installed_size", - }, - "ver": { - Type: schema.TypeString, - Computed: true, - Description: "ver", - }, - }, - }, - }, - "qemu_system_x86": { - Type: schema.TypeList, - Computed: true, - Description: "qemu_system_x86", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "installed_size": { - Type: schema.TypeString, - Computed: true, - Description: "installed_size", - }, - "ver": { - Type: schema.TypeString, - Computed: true, - Description: "ver", - }, - }, - }, - }, - "sanlock": { - Type: schema.TypeList, - Computed: true, - Description: "sanlock", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "installed_size": { - Type: schema.TypeString, - Computed: true, - Description: "installed_size", - }, - "ver": { - Type: schema.TypeString, - Computed: true, - Description: "ver", - }, - }, - }, - }, - } - return res -} - func DataSourceStack() *schema.Resource { return &schema.Resource{ SchemaVersion: 1, diff --git a/internal/service/cloudbroker/stack/flattens.go b/internal/service/cloudbroker/stack/flattens.go index 634e9c4..c489b58 100644 --- a/internal/service/cloudbroker/stack/flattens.go +++ b/internal/service/cloudbroker/stack/flattens.go @@ -33,6 +33,7 @@ Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/w package stack import ( + log "github.com/sirupsen/logrus" "strconv" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -41,6 +42,9 @@ import ( ) func flattenStack(d *schema.ResourceData, st *stack.InfoStack) { + log.Debugf("flattenStack: decoded Stack name %q / ID %d", + st.Name, st.ID) + d.Set("ckey", st.Ckey) d.Set("meta", flattens.FlattenMeta(st.Meta)) d.Set("api_url", st.APIURL) @@ -69,12 +73,13 @@ func flattenStack(d *schema.ResourceData, st *stack.InfoStack) { func flattenPackages(pg stack.Packages) []map[string]interface{} { res := make([]map[string]interface{}, 0) temp := map[string]interface{}{ - "libvirt_bin": flattenLibvirtBin (pg), - "lvm2_lockd": flattenLvm2Lockd (pg), - "openvswitch_common": flattenOpenvswitchCommon (pg), - "openvswitch_switch": flattenOpenvswitchSwitch (pg), - "qemu_system_x86": flattenQemuSystemX86 (pg), - "sanlock": flattenSanlock (pg), + "libvirt_bin": flattenLibvirtBin(pg), + "libvirt_daemon": flattenLibvirtDaemon(pg), + "lvm2_lockd": flattenLvm2Lockd(pg), + "openvswitch_common": flattenOpenvswitchCommon(pg), + "openvswitch_switch": flattenOpenvswitchSwitch(pg), + "qemu_system_x86": flattenQemuSystemX86(pg), + "sanlock": flattenSanlock(pg), } res = append(res, temp) return res @@ -83,8 +88,18 @@ func flattenPackages(pg stack.Packages) []map[string]interface{} { func flattenLibvirtBin(lb stack.Packages) []map[string]interface{} { res := make([]map[string]interface{}, 0) temp := map[string]interface{}{ - "installed_size": lb.LibvirtBin.InstalledSize, - "ver": lb.LibvirtBin.Ver, + "installed_size": lb.LibvirtBin.InstalledSize, + "ver": lb.LibvirtBin.Ver, + } + res = append(res, temp) + return res +} + +func flattenLibvirtDaemon(ld stack.Packages) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + temp := map[string]interface{}{ + "installed_size": ld.LibvirtDaemon.InstalledSize, + "ver": ld.LibvirtDaemon.Ver, } res = append(res, temp) return res @@ -93,8 +108,8 @@ func flattenLibvirtBin(lb stack.Packages) []map[string]interface{} { func flattenLvm2Lockd(ll stack.Packages) []map[string]interface{} { res := make([]map[string]interface{}, 0) temp := map[string]interface{}{ - "installed_size": ll.Lvm2Lockd.InstalledSize, - "ver": ll.Lvm2Lockd.Ver, + "installed_size": ll.Lvm2Lockd.InstalledSize, + "ver": ll.Lvm2Lockd.Ver, } res = append(res, temp) return res @@ -103,8 +118,8 @@ func flattenLvm2Lockd(ll stack.Packages) []map[string]interface{} { func flattenOpenvswitchCommon(oc stack.Packages) []map[string]interface{} { res := make([]map[string]interface{}, 0) temp := map[string]interface{}{ - "installed_size": oc.OpenvswitchCommon.InstalledSize, - "ver": oc.OpenvswitchCommon.Ver, + "installed_size": oc.OpenvswitchCommon.InstalledSize, + "ver": oc.OpenvswitchCommon.Ver, } res = append(res, temp) return res @@ -113,8 +128,8 @@ func flattenOpenvswitchCommon(oc stack.Packages) []map[string]interface{} { func flattenOpenvswitchSwitch(os stack.Packages) []map[string]interface{} { res := make([]map[string]interface{}, 0) temp := map[string]interface{}{ - "installed_size": os.OpenvswitchSwitch.InstalledSize, - "ver": os.OpenvswitchSwitch.Ver, + "installed_size": os.OpenvswitchSwitch.InstalledSize, + "ver": os.OpenvswitchSwitch.Ver, } res = append(res, temp) return res @@ -123,8 +138,8 @@ func flattenOpenvswitchSwitch(os stack.Packages) []map[string]interface{} { func flattenQemuSystemX86(qs stack.Packages) []map[string]interface{} { res := make([]map[string]interface{}, 0) temp := map[string]interface{}{ - "installed_size": qs.QemuSystemX86.InstalledSize, - "ver": qs.QemuSystemX86.Ver, + "installed_size": qs.QemuSystemX86.InstalledSize, + "ver": qs.QemuSystemX86.Ver, } res = append(res, temp) return res @@ -133,56 +148,55 @@ func flattenQemuSystemX86(qs stack.Packages) []map[string]interface{} { func flattenSanlock(sl stack.Packages) []map[string]interface{} { res := make([]map[string]interface{}, 0) temp := map[string]interface{}{ - "installed_size": sl.Sanlock.InstalledSize, - "ver": sl.Sanlock.Ver, + "installed_size": sl.Sanlock.InstalledSize, + "ver": sl.Sanlock.Ver, } res = append(res, temp) return res } - func flattenEco(m interface{}) string { - switch d := m.(type) { - case string: - return d - case int: - return strconv.Itoa(d) - case int64: - return strconv.FormatInt(d, 10) - case float64: - return strconv.FormatInt(int64(d), 10) - default: - return "" - } + switch d := m.(type) { + case string: + return d + case int: + return strconv.Itoa(d) + case int64: + return strconv.FormatInt(d, 10) + case float64: + return strconv.FormatInt(int64(d), 10) + default: + return "" + } } func flattenStacksList(sl *stack.ListStacks) []map[string]interface{} { res := make([]map[string]interface{}, 0, len(sl.Data)) for _, item := range sl.Data { temp := map[string]interface{}{ - "ckey": item.Ckey, - "meta": flattens.FlattenMeta(item.Meta), - "api_url": item.APIURL, - "api_key": item.Apikey, - "app_id": item.AppID, + "ckey": item.Ckey, + "meta": flattens.FlattenMeta(item.Meta), + "api_url": item.APIURL, + "api_key": item.Apikey, + "app_id": item.AppID, "cpu_allocation_ratio": item.CPUAllocationRatio, - "description": item.Description, - "descr": item.Descr, - "drivers": item.Drivers, - "eco": flattenEco(item.Eco), - "error": item.Error, - "gid": item.GID, - "guid": item.GUID, - "stack_id": item.ID, - "images": item.Images, - "login": item.Login, + "description": item.Description, + "descr": item.Descr, + "drivers": item.Drivers, + "eco": flattenEco(item.Eco), + "error": item.Error, + "gid": item.GID, + "guid": item.GUID, + "stack_id": item.ID, + "images": item.Images, + "login": item.Login, "mem_allocation_ratio": item.MemAllocationRatio, - "name": item.Name, - "packages": flattenPackages(item.Packages), - "passwd": item.Password, - "reference_id": item.ReferenceID, - "status": item.Status, - "type": item.Type, + "name": item.Name, + "packages": flattenPackages(item.Packages), + "passwd": item.Password, + "reference_id": item.ReferenceID, + "status": item.Status, + "type": item.Type, } res = append(res, temp) } diff --git a/internal/service/cloudbroker/stack/utility_stack.go b/internal/service/cloudbroker/stack/utility_stack.go index efeec61..e51b8d4 100644 --- a/internal/service/cloudbroker/stack/utility_stack.go +++ b/internal/service/cloudbroker/stack/utility_stack.go @@ -54,7 +54,7 @@ func utilityStackCheckPresence(ctx context.Context, d *schema.ResourceData, m in req.StackId = uint64(d.Get("stack_id").(int)) } - log.Debugf("utilityStackListCheckPresence: load stack list") + log.Debugf("utilityStackCheckPresence: load stack") stackInfo, err := c.CloudBroker().Stack().Get(ctx, req) if err != nil { return nil, err diff --git a/internal/service/cloudbroker/stack/utility_stack_list.go b/internal/service/cloudbroker/stack/utility_stack_list.go index 50294d8..29c2f43 100644 --- a/internal/service/cloudbroker/stack/utility_stack_list.go +++ b/internal/service/cloudbroker/stack/utility_stack_list.go @@ -50,20 +50,20 @@ func utilityStackListCheckPresence(ctx context.Context, d *schema.ResourceData, req.ByID = uint64(ByID.(int)) } if Name, ok := d.GetOk("name"); ok { - req.Name = string(Name.(string)) + req.Name = Name.(string) } if Type, ok := d.GetOk("type"); ok { - req.Type = string(Type.(string)) + req.Type = Type.(string) } if Status, ok := d.GetOk("status"); ok { - req.Status = string(Status.(string)) + req.Status = Status.(string) } if Page, ok := d.GetOk("page"); ok { req.Page = uint64(Page.(int)) } if Size, ok := d.GetOk("size"); ok { req.Size = uint64(Size.(int)) - } + } log.Debugf("utilityStackListCheckPresence: load stack list") stackList, err := c.CloudBroker().Stack().List(ctx, req) diff --git a/internal/service/cloudbroker/vins/data_source_vins.go b/internal/service/cloudbroker/vins/data_source_vins.go index b8929c6..77437ab 100644 --- a/internal/service/cloudbroker/vins/data_source_vins.go +++ b/internal/service/cloudbroker/vins/data_source_vins.go @@ -1,8 +1,9 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, +Sergey Kisil, Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -33,66 +34,23 @@ package vins import ( "context" - "fmt" - "reflect" - - log "github.com/sirupsen/logrus" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" - - // "net/url" + "strconv" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - // "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" ) -func flattenVins(d *schema.ResourceData, vinsRecord *vins.RecordVINS) diag.Diagnostics { - log.Debugf("flattenVins: decoded ViNS name:ID %s:%d, account ID %d, RG ID %d", - vinsRecord.Name, vinsRecord.ID, vinsRecord.AccountID, vinsRecord.RGID) - - d.SetId(fmt.Sprintf("%d", vinsRecord.ID)) - d.Set("name", vinsRecord.Name) - d.Set("account_id", vinsRecord.AccountID) - // d.Set("account_name", vinsRecord.AccountName) - d.Set("rg_id", vinsRecord.RGID) - d.Set("description", vinsRecord.Description) - d.Set("ipcidr", vinsRecord.Network) - - noExtNetConnection := true - gw := vinsRecord.VNFs.GW - if !reflect.ValueOf(gw).IsZero() { - log.Debugf("flattenVins: discovered GW VNF ID %d in ViNS ID %d", gw.ID, vinsRecord.ID) - extNetID := gw.Config.ExtNetID - extNetIP := gw.Config.ExtNetIP - if extNetID != 0 && extNetIP != "" { - log.Debugf("flattenVins: ViNS ext_net_id=%d, ext_net_ip=%s", extNetID, extNetIP) - d.Set("ext_ip_addr", extNetIP) - d.Set("ext_net_id", extNetID) - } else { - return diag.Errorf("Failed to unmarshal VNF GW Config - structure is invalid.") - } - noExtNetConnection = false - } - - if noExtNetConnection { - d.Set("ext_ip_addr", "") - d.Set("ext_net_id", 0) - } - - log.Debugf("flattenVins: EXTRA CHECK - schema rg_id=%d, ext_net_id=%d", d.Get("rg_id").(int), d.Get("ext_net_id").(int)) - - return nil -} - func dataSourceVinsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - vinsFacts, err := utilityVinsCheckPresence(ctx, d, m) - if vinsFacts == nil { + vins, err := utilityVinsCheckPresence(ctx, d, m) + if err != nil { d.SetId("") return diag.FromErr(err) } - return flattenVins(d, vinsFacts) + flattenVinsData(d, vins) + d.SetId(strconv.FormatUint(vins.ID, 10)) + return nil } func DataSourceVins() *schema.Resource { @@ -105,64 +63,6 @@ func DataSourceVins() *schema.Resource { Read: &constants.Timeout30s, Default: &constants.Timeout60s, }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: "Name of the ViNS. Names are case sensitive and unique within the context of an account or resource group.", - }, - - /* - "vins_id": { - Type: schema.TypeInt, - Optional: true, - Description: "Unique ID of the ViNS. If ViNS ID is specified, then ViNS name, rg_id and account_id are ignored.", - }, - */ - - "rg_id": { - Type: schema.TypeInt, - Optional: true, - Description: "Unique ID of the resource group, where this ViNS is belongs to (for ViNS created at resource group level, 0 otherwise).", - }, - - "account_id": { - Type: schema.TypeInt, - Optional: true, - Description: "Unique ID of the account, which this ViNS belongs to.", - }, - - // the rest of attributes are computed - "account_name": { - Type: schema.TypeString, - Computed: true, - Description: "Name of the account, which this ViNS belongs to.", - }, - - "description": { - Type: schema.TypeString, - Computed: true, - Description: "User-defined text description of this ViNS.", - }, - - "ext_ip_addr": { - Type: schema.TypeString, - Computed: true, - Description: "IP address of the external connection (valid for ViNS connected to external network, empty string otherwise).", - }, - - "ext_net_id": { - Type: schema.TypeInt, - Computed: true, - Description: "ID of the external network this ViNS is connected to (-1 means no external connection).", - }, - - "ipcidr": { - Type: schema.TypeString, - Computed: true, - Description: "Network address used by this ViNS.", - }, - }, + Schema: dataSourceVinsSchemaMake(), } } diff --git a/internal/service/cloudbroker/vins/data_source_vins_list.go b/internal/service/cloudbroker/vins/data_source_vins_list.go index 806ec36..817dca7 100644 --- a/internal/service/cloudbroker/vins/data_source_vins_list.go +++ b/internal/service/cloudbroker/vins/data_source_vins_list.go @@ -1,190 +1,71 @@ -/* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package vins - -import ( - "context" - - "github.com/google/uuid" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" -) - -func flattenVinsList(vl *vins.ListVINS) []map[string]interface{} { - res := make([]map[string]interface{}, 0) - for _, v := range vl.Data { - temp := map[string]interface{}{ - "account_id": v.AccountID, - "account_name": v.AccountName, - "created_by": v.CreatedBy, - "created_time": v.CreatedTime, - "deleted_by": v.DeletedBy, - "deleted_time": v.DeletedTime, - "external_ip": v.ExternalIP, - "vins_id": v.ID, - "vins_name": v.Name, - "network": v.Network, - "rg_id": v.RGID, - "rg_name": v.RGName, - "status": v.Status, - "updated_by": v.UpdatedBy, - "updated_time": v.UpdatedTime, - "vxlan_id": v.VXLANID, - } - res = append(res, temp) - } - return res -} - -func dataSourceVinsListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - vinsList, err := utilityVinsListCheckPresence(ctx, d, m) - if err != nil { - return diag.FromErr(err) - } - - id := uuid.New() - d.SetId(id.String()) - d.Set("items", flattenVinsList(vinsList)) - - return nil -} - -func dataSourceVinsListSchemaMake() map[string]*schema.Schema { - res := map[string]*schema.Schema{ - "include_deleted": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "include deleted computes", - }, - "page": { - Type: schema.TypeInt, - Optional: true, - Description: "Page number", - }, - "size": { - Type: schema.TypeInt, - Optional: true, - Description: "Page size", - }, - "items": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "account_id": { - Type: schema.TypeInt, - Computed: true, - }, - "account_name": { - Type: schema.TypeString, - Computed: true, - }, - "created_by": { - Type: schema.TypeString, - Computed: true, - }, - "created_time": { - Type: schema.TypeInt, - Computed: true, - }, - "deleted_by": { - Type: schema.TypeString, - Computed: true, - }, - "deleted_time": { - Type: schema.TypeInt, - Computed: true, - }, - "external_ip": { - Type: schema.TypeString, - Computed: true, - }, - "vins_id": { - Type: schema.TypeInt, - Computed: true, - }, - "vins_name": { - Type: schema.TypeString, - Computed: true, - }, - "network": { - Type: schema.TypeString, - Computed: true, - }, - "rg_id": { - Type: schema.TypeInt, - Computed: true, - }, - "rg_name": { - Type: schema.TypeString, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "updated_by": { - Type: schema.TypeString, - Computed: true, - }, - "updated_time": { - Type: schema.TypeInt, - Computed: true, - }, - "vxlan_id": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, - }, - } - return res -} - -func DataSourceVinsList() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - ReadContext: dataSourceVinsListRead, - - Timeouts: &schema.ResourceTimeout{ - Read: &constants.Timeout30s, - Default: &constants.Timeout60s, - }, - - Schema: dataSourceVinsListSchemaMake(), - } -} +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package vins + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" +) + +func dataSourceVinsListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + vinsList, err := utilityVinsListCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + + id := uuid.New() + d.SetId(id.String()) + d.Set("items", flattenVinsList(vinsList)) + d.Set("entry_count", vinsList.EntryCount) + + return nil +} + +func DataSourceVinsList() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + ReadContext: dataSourceVinsListRead, + + Timeouts: &schema.ResourceTimeout{ + Read: &constants.Timeout30s, + Default: &constants.Timeout60s, + }, + + Schema: dataSourceVinsListSchemaMake(), + } +} diff --git a/internal/service/cloudbroker/vins/resource_vins.go b/internal/service/cloudbroker/vins/resource_vins.go index 30b5b98..2e6fe92 100644 --- a/internal/service/cloudbroker/vins/resource_vins.go +++ b/internal/service/cloudbroker/vins/resource_vins.go @@ -1,8 +1,9 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, +Sergey Kisil, Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -39,83 +40,53 @@ import ( "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/status" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) -func ipcidrDiffSupperss(key, oldVal, newVal string, d *schema.ResourceData) bool { - if oldVal == "" && newVal != "" { - // if old value for "ipcidr" resource is empty string, it means that we are creating new ViNS - // and there is a chance that the user will want specific IP address range for this ViNS - - // check if "ipcidr" is explicitly set in TF file to a non-empty string. - log.Debugf("ipcidrDiffSupperss: key=%s, oldVal=%q, newVal=%q -> suppress=FALSE", key, oldVal, newVal) - return false // there is a difference between stored and new value - } - log.Debugf("ipcidrDiffSupperss: key=%s, oldVal=%q, newVal=%q -> suppress=TRUE", key, oldVal, newVal) - return true // suppress difference -} - func resourceVinsCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { log.Debugf("resourceVinsCreate: called for ViNS name %s, Account ID %d, RG ID %d", d.Get("name").(string), d.Get("account_id").(int), d.Get("rg_id").(int)) - createInAcc := true - c := m.(*controller.ControllerCfg) - inAccReq := vins.CreateInAccountRequest{ - Name: d.Get("name").(string), - } - inRGReq := vins.CreateInRGRequest{ - Name: d.Get("name").(string), - } - argVal, argSet := d.GetOk("rg_id") - if argSet && argVal.(int) > 0 { - createInAcc = false - inRGReq.RGID = uint64(argVal.(int)) - } else { - argVal, argSet = d.GetOk("account_id") - if !argSet || argVal.(int) <= 0 { - return diag.Errorf("resourceVinsCreate: ViNS name %s - no valid account and/or resource group ID specified", d.Id()) - } - inAccReq.AccountID = uint64(argVal.(int)) - } + RGID, rgOk := d.GetOk("rg_id") + AccountID, accountIdOk := d.GetOk("account_id") - argVal, argSet = d.GetOk("ext_net_id") // NB: even if ext_net_id value is explicitly set to 0, argSet = false anyway - if argSet { - if argVal.(int) > 0 { - inRGReq.ExtNetID = uint64(argVal.(int)) - } else { - inRGReq.ExtNetID = 0 - } + if !rgOk && !accountIdOk { + return diag.Errorf("resourceVinsCreate: no valid accountId or resource group ID specified") } - argVal, argSet = d.GetOk("ipcidr") - if argSet && argVal.(string) != "" { - log.Debugf("resourceVinsCreate: ipcidr is set to %s", argVal.(string)) - inAccReq.IPCIDR = argVal.(string) - inRGReq.IPCIDR = argVal.(string) - } - - argVal, argSet = d.GetOk("description") - if argSet { - inAccReq.Description = argVal.(string) - inRGReq.Description = argVal.(string) + if rgOk && accountIdOk { + return diag.Errorf("resourceVinsCreate: either accountId or resource group ID should be specified") } var vinsID uint64 - if createInAcc { - apiResp, err := c.CloudBroker().VINS().CreateInAccount(ctx, inAccReq) + if accountIdOk { + req, diags := createVinsInAcc(ctx, d, m, uint64(AccountID.(int))) + if diags != nil { + return diags + } + + apiResp, err := c.CloudBroker().VINS().CreateInAccount(ctx, req) if err != nil { + d.SetId("") return diag.FromErr(err) } vinsID = apiResp - } else { - apiResp, err := c.CloudBroker().VINS().CreateInRG(ctx, inRGReq) + } else if rgOk { + req, diags := createVinsInRG(ctx, d, m, uint64(RGID.(int))) + if diags != nil { + return diags + } + + apiResp, err := c.CloudBroker().VINS().CreateInRG(ctx, req) if err != nil { + d.SetId("") return diag.FromErr(err) } @@ -123,20 +94,103 @@ func resourceVinsCreate(ctx context.Context, d *schema.ResourceData, m interface } d.SetId(strconv.FormatUint(vinsID, 10)) + d.Set("vins_id", vinsID) log.Debugf("resourceVinsCreate: new ViNS ID / name %d / %s creation sequence complete", vinsID, d.Get("name").(string)) - return dataSourceVinsRead(ctx, d, m) + warnings := dc.Warnings{} + if _, ok := d.GetOk("ip"); ok { + if errs := resourceVinsIpReserve(ctx, d, m, vinsID); len(errs) != 0 { + for _, err := range errs { + warnings.Add(err) + } + } + } + + if _, ok := d.GetOk("nat_rule"); ok { + if errs := resourceVinsNatRuleAdd(ctx, d, m, vinsID); len(errs) != 0 { + for _, err := range errs { + warnings.Add(err) + } + } + } + + return append(warnings.Get(), resourceVinsRead(ctx, d, m)...) } func resourceVinsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - vinsFacts, err := utilityVinsCheckPresence(ctx, d, m) - if vinsFacts == nil { + log.Debugf("resourceVinsRead: called for vins id %s, name %s", + d.Id(), d.Get("name").(string)) + + warnings := dc.Warnings{} + + vinsData, err := utilityVinsCheckPresence(ctx, d, m) + if err != nil { d.SetId("") return diag.FromErr(err) } - return flattenVins(d, vinsFacts) + isEnabled := d.Get("enable").(bool) + + hasChangeState := false + + switch vinsData.Status { + case status.Destroyed: + d.Set("vins_id", 0) + d.SetId("") + return diag.Errorf("The resource cannot be read because it has been destroyed") + // return resourceVinsCreate(ctx, d, m) + case status.Deleted: + // hasChangeState = true + + // req := vins.RestoreRequest{ + // VINSID: vinsData.ID, + // } + // if reason, ok := d.GetOk("reason"); ok { + // req.Reason = reason.(string) + // } + + // _, err := c.CloudBroker().VINS().Restore(ctx, req) + // if err != nil { + // warnings.Add(err) + // } + case status.Modeled: + return diag.Errorf("ViNS are in status: %s, please, contact support for more information", vinsData.Status) + case status.Created: + case status.Enabled: + if !isEnabled { + hasChangeState = true + if err := resourceVinsDisable(ctx, d, m, vinsData.ID); err != nil { + warnings.Add(err) + } + } + case status.Enabling: + case status.Disabled: + if isEnabled { + hasChangeState = true + if err := resourceVinsEnable(ctx, d, m, vinsData.ID); err != nil { + warnings.Add(err) + } + } + case status.Disabling: + case status.Deleting: + return diag.Errorf("ViNS are in progress with status: %s", vinsData.Status) + } + + if hasChangeState { + vinsData, err = utilityVinsCheckPresence(ctx, d, m) + if vinsData == nil { + d.SetId("") + return diag.FromErr(err) + } + } + + flattenVins(d, vinsData) + + log.Debugf("resourceVinsRead: after flattenVins: vins_id %s, name %s", + d.Id(), d.Get("name").(string)) + + return warnings.Get() } func resourceVinsUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { @@ -144,137 +198,588 @@ func resourceVinsUpdate(ctx context.Context, d *schema.ResourceData, m interface d.Id(), d.Get("name").(string), d.Get("account_id").(int), d.Get("rg_id").(int)) c := m.(*controller.ControllerCfg) - vinsID, _ := strconv.ParseUint(d.Id(), 10, 64) - oldExtNetId, newExtNedId := d.GetChange("ext_net_id") - if oldExtNetId.(int) != newExtNedId.(int) { - log.Debugf("resourceVinsUpdate: changing ViNS ID %s - ext_net_id %d -> %d", d.Id(), oldExtNetId.(int), newExtNedId.(int)) + if diags := checkParamsExistenceUpdate(ctx, d, c); diags != nil { + return diags + } - if oldExtNetId.(int) > 0 { - // there was preexisting external net connection - disconnect ViNS - req := vins.ExtNetDisconnectRequest{VINSID: vinsID} + vinsData, err := utilityVinsCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } - _, err := c.CloudBroker().VINS().ExtNetDisconnect(ctx, req) - if err != nil { - return diag.FromErr(err) + isEnabled := d.Get("enable").(bool) + + hasChangeState := false + + warnings := dc.Warnings{} + switch vinsData.Status { + case status.Destroyed: + d.Set("vins_id", 0) + d.SetId("") + return diag.Errorf("The resource cannot be updated because it has been destroyed") + // return resourceVinsCreate(ctx, d, m) + case status.Deleted: + hasChangeState = true + + if err := resourceVinsRestore(ctx, d, m, vinsData.ID); err != nil { + warnings.Add(err) + } + case status.Modeled: + return diag.Errorf("ViNS are in status: %s, please, contact support for more information", vinsData.Status) + case status.Created: + case status.Enabled: + if !isEnabled { + hasChangeState = true + if err := resourceVinsDisable(ctx, d, m, vinsData.ID); err != nil { + warnings.Add(err) } } - - if newExtNedId.(int) > 0 { - req := vins.ExtNetConnectRequest{ - VINSID: vinsID, - NetID: uint64(newExtNedId.(int)), + case status.Enabling: + case status.Disabled: + if isEnabled { + hasChangeState = true + if err := resourceVinsEnable(ctx, d, m, vinsData.ID); err != nil { + warnings.Add(err) } + } + case status.Disabling: + case status.Deleting: + return diag.Errorf("ViNS are in progress with status: %s", vinsData.Status) + } - _, err := c.CloudBroker().VINS().ExtNetConnect(ctx, req) - if err != nil { - return diag.FromErr(err) + if hasChangeState { + vinsData, err = utilityVinsCheckPresence(ctx, d, m) + if err != nil { + d.SetId("") + return diag.FromErr(err) + } + } + + if d.HasChange("enable") { + if err := resourceVinsChangeEnabled(ctx, d, m); err != nil { + warnings.Add(err) + } + } + + if d.HasChange("ext_net_id") { + if err := resourceVinsChangeExtNetId(ctx, d, m); err != nil { + return diag.FromErr(err) + } + } + + if d.HasChange("ip") { + if errs := resourceVinsChangeIp(ctx, d, m); len(errs) != 0 { + for _, err := range errs { + warnings.Add(err) } } } + if d.HasChange("nat_rule") { + if errs := resourceVinsChangeNatRule(ctx, d, m); len(errs) != 0 { + for _, err := range errs { + warnings.Add(err) + } + } + } + + if d.HasChange("default_qos") { + if err := resourceVinsChangeDefaultQos(ctx, d, m); err != nil { + warnings.Add(err) + } + } - return dataSourceVinsRead(ctx, d, m) + if d.HasChange("vnfdev_redeploy") { + if err := resourceVinsChangeVnfRedeploy(ctx, d, m); err != nil { + warnings.Add(err) + } + } + + if d.HasChange("vnfdev_restart") { + if err := resourceVinsChangeVnfRestart(ctx, d, m); err != nil { + warnings.Add(err) + } + } + + if d.HasChange("vnfdev_reset") { + if err := resourceVinsChangeVnfReset(ctx, d, m); err != nil { + warnings.Add(err) + } + } + + if d.HasChange("vnfdev_start") { + if err := resourceVinsChangeVnfStartStop(ctx, d, m); err != nil { + warnings.Add(err) + } + } + + return append(warnings.Get(), dataSourceVinsRead(ctx, d, m)...) } func resourceVinsDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { log.Debugf("resourceVinsDelete: called for ViNS ID / name %s / %s, Account ID %d, RG ID %d", d.Id(), d.Get("name").(string), d.Get("account_id").(int), d.Get("rg_id").(int)) - vinsFacts, err := utilityVinsCheckPresence(ctx, d, m) - if vinsFacts == nil { + c := m.(*controller.ControllerCfg) + + vinsItem, err := utilityVinsCheckPresence(ctx, d, m) + if vinsItem == nil { + d.SetId("") + return diag.FromErr(err) + } + + req := vins.DeleteRequest{VINSID: vinsItem.ID} + + if force, ok := d.GetOk("force"); ok { + req.Force = force.(bool) + } + if permanently, ok := d.GetOk("permanently"); ok { + req.Permanently = permanently.(bool) + } + if reason, ok := d.GetOk("reason"); ok { + req.Reason = reason.(string) + } + + if _, err := c.CloudBroker().VINS().Delete(ctx, req); err != nil { + return diag.FromErr(err) + } + + d.SetId("") + + return nil +} + +func resourceVinsEnable(ctx context.Context, d *schema.ResourceData, m interface{}, vinsId uint64) error { + c := m.(*controller.ControllerCfg) + + req := vins.EnableRequest{ + VINSID: vinsId, + } + if reason, ok := d.GetOk("reason"); ok { + req.Reason = reason.(string) + } + + _, err := c.CloudBroker().VINS().Enable(ctx, req) + return err +} + +func resourceVinsDisable(ctx context.Context, d *schema.ResourceData, m interface{}, vinsId uint64) error { + c := m.(*controller.ControllerCfg) + + req := vins.DisableRequest{ + VINSID: vinsId, + } + if reason, ok := d.GetOk("reason"); ok { + req.Reason = reason.(string) + } + + _, err := c.CloudBroker().VINS().Disable(ctx, req) + return err +} + +func resourceVinsRestore(ctx context.Context, d *schema.ResourceData, m interface{}, vinsId uint64) error { + c := m.(*controller.ControllerCfg) + + req := vins.RestoreRequest{ + VINSID: vinsId, + } + if reason, ok := d.GetOk("reason"); ok { + req.Reason = reason.(string) + } + + _, err := c.CloudBroker().VINS().Restore(ctx, req) + return err +} + +func resourceVinsIpReserve(ctx context.Context, d *schema.ResourceData, m interface{}, vinsId uint64) []error { + var errs []error + c := m.(*controller.ControllerCfg) + + ipRes := d.Get("ip") + + ipsSlice := ipRes.([]interface{}) + for _, ipInterfase := range ipsSlice { + ip := ipInterfase.(map[string]interface{}) + + req := vins.IPReserveRequest{ + VINSID: vinsId, + Type: ip["type"].(string), + } + if ipAddr, ok := ip["ip_addr"]; ok { + req.IPAddr = ipAddr.(string) + } + if macAddr, ok := ip["mac"]; ok { + req.MAC = macAddr.(string) + } + if computeId, ok := ip["compute_id"]; ok { + req.ComputeID = uint64(computeId.(int)) + } + if reason, ok := ip["reason"]; ok { + req.Reason = reason.(string) + } + + _, err := c.CloudBroker().VINS().IPReserve(ctx, req) if err != nil { - return diag.FromErr(err) + errs = append(errs, err) } - // the specified ViNS does not exist - in this case according to Terraform best practice - // we exit from Destroy method without error - return nil } + return errs +} +func resourceVinsNatRuleAdd(ctx context.Context, d *schema.ResourceData, m interface{}, vinsId uint64) []error { + var errs []error c := m.(*controller.ControllerCfg) - req := vins.DeleteRequest{ - VINSID: vinsFacts.ID, - Force: true, - Permanently: true, + + natRule := d.Get("nat_rule") + + addedNatRules := natRule.([]interface{}) + if len(addedNatRules) > 0 { + for _, natRuleInterface := range addedNatRules { + natRule := natRuleInterface.(map[string]interface{}) + + req := vins.NATRuleAddRequest{ + VINSID: vinsId, + IntIP: natRule["int_ip"].(string), + IntPort: uint64(natRule["int_port"].(int)), + ExtPortStart: uint64(natRule["ext_port_start"].(int)), + } + if extPortEnd, ok := natRule["ext_port_end"]; ok { + req.ExtPortEnd = uint64(extPortEnd.(int)) + } + if proto, ok := natRule["proto"]; ok { + req.Proto = proto.(string) + } + if reason, ok := natRule["reason"]; ok { + req.Reason = reason.(string) + } + + _, err := c.CloudBroker().VINS().NATRuleAdd(ctx, req) + if err != nil { + errs = append(errs, err) + } + } } + return errs +} - _, err = c.CloudBroker().VINS().Delete(ctx, req) - if err != nil { - return diag.FromErr(err) +func resourceVinsChangeEnabled(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + vinsId := uint64(d.Get("vins_id").(int)) + + _, enableNew := d.GetChange("enable") + if enableNew.(bool) { + req := vins.EnableRequest{ + VINSID: vinsId, + } + + _, err := c.CloudBroker().VINS().Enable(ctx, req) + return err + } + + req := vins.DisableRequest{ + VINSID: vinsId, + } + + _, err := c.CloudBroker().VINS().Disable(ctx, req) + return err +} + +func resourceVinsChangeExtNetId(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + vinsId := uint64(d.Get("vins_id").(int)) + oldExtNetId, newExtNedId := d.GetChange("ext_net_id") + log.Debugf("resourceVinsUpdate - resourceVinsChangeExtNetId: changing ViNS ID %s - ext_net_id %d -> %d", d.Id(), oldExtNetId.(int), newExtNedId.(int)) + + if oldExtNetId.(int) > 0 { + // there was preexisting external net connection - disconnect ViNS + req := vins.ExtNetDisconnectRequest{VINSID: vinsId} + + if reason, ok := d.GetOk("reason"); ok { + req.Reason = reason.(string) + } + + _, err := c.CloudBroker().VINS().ExtNetDisconnect(ctx, req) + return err + } + + if newExtNedId.(int) > 0 { + req := vins.ExtNetConnectRequest{ + VINSID: vinsId, + NetID: uint64(newExtNedId.(int)), + } + if ip, ok := d.GetOk("ext_ip"); ok && ip != "" { + req.IP = ip.(string) + } + if reason, ok := d.GetOk("reason"); ok { + req.Reason = reason.(string) + } + + _, err := c.CloudBroker().VINS().ExtNetConnect(ctx, req) + return err } return nil } -func resourceVinsSchemaMake() map[string]*schema.Schema { - rets := map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringIsNotEmpty, - Description: "Name of the ViNS. Names are case sensitive and unique within the context of an account or resource group.", - }, +func resourceVinsChangeIp(ctx context.Context, d *schema.ResourceData, m interface{}) []error { + c := m.(*controller.ControllerCfg) - /* we do not need ViNS ID as an argument because if we already know this ID, it is not practical to call resource provider. - Resource Import will work anyway, as it obtains the ID of ViNS to be imported through another mechanism. - "vins_id": { - Type: schema.TypeInt, - Optional: true, - Description: "Unique ID of the ViNS. If ViNS ID is specified, then ViNS name, rg_id and account_id are ignored.", - }, - */ - - "rg_id": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Default: 0, - Description: "ID of the resource group, where this ViNS belongs to. Non-zero for ViNS created at resource group level, 0 otherwise.", - }, + var errs []error - "account_id": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - ValidateFunc: validation.IntAtLeast(1), - Description: "ID of the account, which this ViNS belongs to. For ViNS created at account level, resource group ID is 0.", - }, + vinsId := uint64(d.Get("vins_id").(int)) + deletedIps := make([]interface{}, 0) + addedIps := make([]interface{}, 0) - "ext_net_id": { - Type: schema.TypeInt, - Required: true, - ValidateFunc: validation.IntAtLeast(0), - Description: "ID of the external network this ViNS is connected to. Pass 0 if no external connection required.", - }, + oldIpInterface, newIpInterface := d.GetChange("ip") + oldIpSlice := oldIpInterface.([]interface{}) + newIpSlice := newIpInterface.([]interface{}) - "ipcidr": { - Type: schema.TypeString, - Optional: true, - DiffSuppressFunc: ipcidrDiffSupperss, - Description: "Network address to use by this ViNS. This parameter is only valid when creating new ViNS.", - }, + for _, el := range oldIpSlice { + if !isContainsIp(newIpSlice, el) { + deletedIps = append(deletedIps, el) + } + } - "description": { - Type: schema.TypeString, - Optional: true, - Default: "", - Description: "Optional user-defined text description of this ViNS.", - }, + for _, el := range newIpSlice { + if !isContainsIp(oldIpSlice, el) { + addedIps = append(addedIps, el) + } + } - // the rest of attributes are computed - "account_name": { - Type: schema.TypeString, - Computed: true, - Description: "Name of the account, which this ViNS belongs to.", - }, + if len(deletedIps) > 0 { + for _, ipInterface := range deletedIps { + ip := ipInterface.(map[string]interface{}) + req := vins.IPReleaseRequest{VINSID: vinsId} - "ext_ip_addr": { - Type: schema.TypeString, - Computed: true, - Description: "IP address of the external connection (valid for ViNS connected to external network, ignored otherwise).", - }, + if ip["ip_addr"].(string) != "" { + req.IPAddr = ip["ip_addr"].(string) + } + if ip["mac"].(string) != "" { + req.MAC = ip["mac"].(string) + } + + _, err := c.CloudBroker().VINS().IPRelease(ctx, req) + if err != nil { + errs = append(errs, err) + } + } + } + + if len(addedIps) > 0 { + for _, ipInterface := range addedIps { + ip := ipInterface.(map[string]interface{}) + req := vins.IPReserveRequest{ + VINSID: vinsId, + Type: ip["type"].(string), + } + + if ip["ip_addr"].(string) != "" { + req.IPAddr = ip["ip_addr"].(string) + } + if ip["mac"].(string) != "" { + req.MAC = ip["mac"].(string) + } + if ip["compute_id"].(int) != 0 { + req.ComputeID = uint64(ip["compute_id"].(int)) + } + if ip["reason"].(string) != "" { + req.Reason = ip["reason"].(string) + } + + _, err := c.CloudBroker().VINS().IPReserve(ctx, req) + if err != nil { + errs = append(errs, err) + } + } + } + + return errs +} + +func resourceVinsChangeNatRule(ctx context.Context, d *schema.ResourceData, m interface{}) []error { + c := m.(*controller.ControllerCfg) + + var errs []error + + vinsId := uint64(d.Get("vins_id").(int)) + + deletedNatRules := make([]interface{}, 0) + addedNatRules := make([]interface{}, 0) + + oldNatRulesInterface, newNatRulesInterface := d.GetChange("nat_rule") + oldNatRulesSlice := oldNatRulesInterface.([]interface{}) + newNatRulesSlice := newNatRulesInterface.([]interface{}) + + for _, el := range oldNatRulesSlice { + if !isContainsNatRule(newNatRulesSlice, el) { + deletedNatRules = append(deletedNatRules, el) + } + } + + for _, el := range newNatRulesSlice { + if !isContainsNatRule(oldNatRulesSlice, el) { + addedNatRules = append(addedNatRules, el) + } + } + + if len(deletedNatRules) > 0 { + for _, natRuleInterface := range deletedNatRules { + natRule := natRuleInterface.(map[string]interface{}) + req := vins.NATRuleDelRequest{ + VINSID: vinsId, + RuleID: int64(natRule["rule_id"].(int)), + } + if natRule["reason"].(string) != "" { + req.Reason = natRule["reason"].(string) + } + + _, err := c.CloudBroker().VINS().NATRuleDel(ctx, req) + errs = append(errs, err) + } + } + + if len(addedNatRules) > 0 { + for _, natRuleInterface := range addedNatRules { + natRule := natRuleInterface.(map[string]interface{}) + req := vins.NATRuleAddRequest{ + VINSID: vinsId, + IntIP: natRule["int_ip"].(string), + IntPort: uint64(natRule["int_port"].(int)), + ExtPortStart: uint64(natRule["ext_port_start"].(int)), + } + + if natRule["ext_port_end"].(int) != 0 { + req.ExtPortEnd = uint64(natRule["ext_port_end"].(int)) + } + if natRule["proto"].(string) != "" { + req.Proto = natRule["proto"].(string) + } + + _, err := c.CloudBroker().VINS().NATRuleAdd(ctx, req) + if err != nil { + errs = append(errs, err) + } + } + } + + return errs +} + +func resourceVinsChangeDefaultQos(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + vinsId := uint64(d.Get("vins_id").(int)) + + defaultQosInterface := d.Get("default_qos").([]interface{}) + + if len(defaultQosInterface) > 0 { + defaultQos := defaultQosInterface[0].(map[string]interface{}) + req := vins.DefaultQOSUpdateRequest{VINSID: vinsId} + if inRate, ok := defaultQos["in_rate"]; ok { + req.IngressRate = uint64(inRate.(int)) + } + if inBurst, ok := defaultQos["in_burst"]; ok { + req.IngressBirst = uint64(inBurst.(int)) + } + if eRate, ok := defaultQos["e_rate"]; ok { + req.EgressRate = uint64(eRate.(int)) + } + + _, err := c.CloudBroker().VINS().DefaultQOSUpdate(ctx, req) + return err + } + + return nil +} + +func resourceVinsChangeVnfRedeploy(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + vinsId := uint64(d.Get("vins_id").(int)) + + _, newRedeploy := d.GetChange("vnfdev_redeploy") + if newRedeploy.(bool) { + req := vins.VNFDevRedeployRequest{VINSID: vinsId} + if reason, ok := d.GetOk("reason"); ok { + req.Reason = reason.(string) + } + + _, err := c.CloudBroker().VINS().VNFDevRedeploy(ctx, req) + return err + } + + return nil +} + +func resourceVinsChangeVnfRestart(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + vinsId := uint64(d.Get("vins_id").(int)) + + _, newRestart := d.GetChange("vnfdev_restart") + if newRestart.(bool) { + req := vins.VNFDevRestartRequest{VINSID: vinsId} + if reason, ok := d.GetOk("reason"); ok { + req.Reason = reason.(string) + } + + _, err := c.CloudBroker().VINS().VNFDevRestart(ctx, req) + if err != nil { + return err + } + } + + return nil +} + +func resourceVinsChangeVnfReset(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + vinsId := uint64(d.Get("vins_id").(int)) + + _, newRestart := d.GetChange("vnfdev_reset") + if newRestart.(bool) { + req := vins.VNFDevResetRequest{VINSID: vinsId} + if reason, ok := d.GetOk("reason"); ok { + req.Reason = reason.(string) + } + + _, err := c.CloudBroker().VINS().VNFDevReset(ctx, req) + if err != nil { + return err + } + } + + return nil +} + +func resourceVinsChangeVnfStartStop(ctx context.Context, d *schema.ResourceData, m interface{}) error { + c := m.(*controller.ControllerCfg) + + vinsId := uint64(d.Get("vins_id").(int)) + + _, newStart := d.GetChange("vnfdev_start") + if newStart.(bool) { + req := vins.VNFDevStartRequest{VINSID: vinsId} + if reason, ok := d.GetOk("reason"); ok { + req.Reason = reason.(string) + } + + _, err := c.CloudBroker().VINS().VNFDevStart(ctx, req) + if err != nil { + return err + } + } + + req := vins.VNFDevStopRequest{VINSID: vinsId} + if reason, ok := d.GetOk("reason"); ok { + req.Reason = reason.(string) } - return rets + _, err := c.CloudBroker().VINS().VNFDevStop(ctx, req) + return err } func ResourceVins() *schema.Resource { diff --git a/internal/service/cloudbroker/vins/utility_vins.go b/internal/service/cloudbroker/vins/utility_vins.go index 898411c..a6dc803 100644 --- a/internal/service/cloudbroker/vins/utility_vins.go +++ b/internal/service/cloudbroker/vins/utility_vins.go @@ -1,8 +1,9 @@ /* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. Authors: Petr Krutov, Stanislav Solovev, +Sergey Kisil, Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -33,7 +34,6 @@ package vins import ( "context" - "fmt" "strconv" log "github.com/sirupsen/logrus" @@ -46,78 +46,24 @@ import ( func utilityVinsCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*vins.RecordVINS, error) { c := m.(*controller.ControllerCfg) - idSet := false - vinsID, err := strconv.ParseUint(d.Id(), 10, 64) - if err != nil || vinsID <= 0 { - vinsId, argSet := d.GetOk("vins_id") // NB: vins_id is NOT present in vinsResource schema! - if argSet { - vinsID = uint64(vinsId.(int)) - idSet = true - } - } else { - idSet = true - } - - if idSet { - log.Debugf("utilityVinsCheckPresence: locating ViNS by its ID %d", vinsID) - req := vins.GetRequest{VINSID: vinsID} - - vinsFacts, err := c.CloudBroker().VINS().Get(ctx, req) - if err != nil { - return nil, err - } - - return vinsFacts, nil - } - - vinsName, argSet := d.GetOk("name") - if !argSet { - return nil, fmt.Errorf("Cannot check ViNS presence if ViNS name is empty") - } - req := vins.SearchRequest{ - Name: vinsName.(string), - ShowAll: false, - } - log.Debugf("utilityVinsCheckPresence: preparing to locate ViNS name %s", vinsName.(string)) + log.Debug("utilityVinsCheckPresence: locating ViNS by its ID") + req := vins.GetRequest{} - rgId, rgSet := d.GetOk("rg_id") - if rgSet { - log.Debugf("utilityVinsCheckPresence: limiting ViNS search to RG ID %d", rgId.(int)) - req.RGID = uint64(rgId.(int)) + if d.Id() != "" { + id, _ := strconv.ParseUint(d.Id(), 10, 64) + req.VINSID = id + } else { + req.VINSID = uint64(d.Get("vins_id").(int)) } - accountId, accountSet := d.GetOk("account_id") - if accountSet { - log.Debugf("utilityVinsCheckPresence: limiting ViNS search to Account ID %d", accountId.(int)) - req.AccountID = uint64(accountId.(int)) + if reason, ok := d.GetOk("reason"); ok { + req.Reason = reason.(string) } - vinsList, err := c.CloudBroker().VINS().Search(ctx, req) + vins, err := c.CloudBroker().VINS().Get(ctx, req) if err != nil { return nil, err } - log.Debugf("utilityVinsCheckPresence: traversing decoded Json of length %d", len(vinsList)) - for index, item := range vinsList { - if item.Name == vinsName.(string) { - if (accountSet && item.AccountID != uint64(accountId.(int))) || - (rgSet && item.RGID != uint64(rgId.(int))) { - continue - } - - log.Debugf("utilityVinsCheckPresence: match ViNS name %s / ID %d, account ID %d, RG ID %d at index %d", - item.Name, item.ID, item.AccountID, item.RGID, index) - - req := vins.GetRequest{VINSID: item.ID} - - vinsGetResp, err := c.CloudBroker().VINS().Get(ctx, req) - if err != nil { - return nil, err - } - - return vinsGetResp, nil - } - } - - return nil, fmt.Errorf("Cannot find ViNS name %s. Check name and/or RG ID & Account ID and your access rights", vinsName.(string)) + return vins, nil } diff --git a/internal/service/cloudbroker/vins/utility_vins_list.go b/internal/service/cloudbroker/vins/utility_vins_list.go index 3495931..65088bc 100644 --- a/internal/service/cloudbroker/vins/utility_vins_list.go +++ b/internal/service/cloudbroker/vins/utility_vins_list.go @@ -1,62 +1,80 @@ -/* -Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. -Authors: -Petr Krutov, -Stanislav Solovev, - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud -Orchestration Technology) with Terraform by Hashicorp. - -Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort - -Please see README.md to learn where to place source code so that it -builds seamlessly. - -Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki -*/ - -package vins - -import ( - "context" - - log "github.com/sirupsen/logrus" - "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins" - "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func utilityVinsListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*vins.ListVINS, error) { - c := m.(*controller.ControllerCfg) - req := vins.ListRequest{} - - if page, ok := d.GetOk("page"); ok { - req.Page = uint64(page.(int)) - } - if size, ok := d.GetOk("size"); ok { - req.Size = uint64(size.(int)) - } - - log.Debugf("utilityVinsListCheckPresence") - vinsList, err := c.CloudBroker().VINS().List(ctx, req) - if err != nil { - return nil, err - } - - return vinsList, nil -} +/* +Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved. +Authors: +Petr Krutov, +Stanislav Solovev, + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud +Orchestration Technology) with Terraform by Hashicorp. + +Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort + +Please see README.md to learn where to place source code so that it +builds seamlessly. + +Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki +*/ + +package vins + +import ( + "context" + + log "github.com/sirupsen/logrus" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins" + "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func utilityVinsListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*vins.ListVINS, error) { + c := m.(*controller.ControllerCfg) + req := vins.ListRequest{} + + if byId, ok := d.GetOk("by_id"); ok { + req.ByID = uint64(byId.(int)) + } + if name, ok := d.GetOk("name"); ok { + req.Name = name.(string) + } + if accountId, ok := d.GetOk("account_id"); ok { + req.AccountID = uint64(accountId.(int)) + } + if rgId, ok := d.GetOk("rg_id"); ok { + req.RGID = uint64(rgId.(int)) + } + if extIp, ok := d.GetOk("ext_ip"); ok { + req.ExtIP = extIp.(string) + } + if page, ok := d.GetOk("page"); ok { + req.Page = uint64(page.(int)) + } + if size, ok := d.GetOk("size"); ok { + req.Size = uint64(size.(int)) + } + if includeDeleted, ok := d.GetOk("include_deleted"); ok { + req.IncludeDeleted = includeDeleted.(bool) + } + + log.Debugf("utilityVinsListCheckPresence") + vinsList, err := c.CloudBroker().VINS().List(ctx, req) + if err != nil { + return nil, err + } + + return vinsList, nil +} diff --git a/samples/cloudbroker/data_disk/main.tf b/samples/cloudbroker/data_disk/main.tf index 32ef443..5d7b892 100644 --- a/samples/cloudbroker/data_disk/main.tf +++ b/samples/cloudbroker/data_disk/main.tf @@ -1,6 +1,6 @@ /* Пример использования -Получение списка доступных образов +Получение данных диска */ #Расскомментируйте этот код, #и внесите необходимые правки в версию и путь, @@ -25,11 +25,13 @@ provider "decort" { allow_unverified_ssl = true } -data "decort_disk" "acl" { +data "decort_cb_disk" "acl" { + #id диска + #обязательный параметр + #тип - число disk_id = 49304 - } output "test" { - value = data.decort_disk.acl + value = data.decort_cb_disk.acl } diff --git a/samples/cloudbroker/data_disk_list/main.tf b/samples/cloudbroker/data_disk_list/main.tf index 370ac7c..cda9e79 100644 --- a/samples/cloudbroker/data_disk_list/main.tf +++ b/samples/cloudbroker/data_disk_list/main.tf @@ -26,7 +26,37 @@ provider "decort" { allow_unverified_ssl = true } -data "decort_disk_list" "dl" { +data "decort_cb_disk_list" "dl" { + #фильтр по id диска + #опциональный параметр + #тип - число + #by_id = 11111 + + #фильтр по имени диска + #опциональный параметр + #тип - строка + #name = "disk name" + + #фильтр по имени аккаунта + #опциональный параметр + #тип - строка + #account_name = "account name" + + #фильтр по максимальному размеру диска + #опциональный параметр + #тип - число + #disk_max_size = 3 + + #фильтр по статусу + #опциональный параметр + #тип - строка + #status = "started" + + #фильтр по полю shared + #опциональный параметр + #тип - булев + #shared = true + #id аккаунта для получения списка дисков #опциональный параметр #тип - число @@ -38,6 +68,16 @@ data "decort_disk_list" "dl" { #возможные типы: "b" - boot_disk, "d" - data_disk #type = "d" + #фильтр по sep id + #опциональный параметр + #тип - число + #sep_id = 1 + + #фильтр по названию pool + #опциональный параметр + #тип - строка + #pool = "pool name" + #кол-во страниц для вывода #опицональный параметр #тип - число @@ -50,5 +90,5 @@ data "decort_disk_list" "dl" { } output "test" { - value = data.decort_disk_list.dl + value = data.decort_cb_disk_list.dl } diff --git a/samples/cloudbroker/data_grid/main.tf b/samples/cloudbroker/data_grid/main.tf index 35156f2..75b36e1 100644 --- a/samples/cloudbroker/data_grid/main.tf +++ b/samples/cloudbroker/data_grid/main.tf @@ -26,13 +26,13 @@ provider "decort" { allow_unverified_ssl = true } -data "decort_grid" "image" { +data "decort_cb_grid" "grid" { #id grid для получения информации #обязательный параметр #тип - число - grid_id = 1 + grid_id = 215 } output "test" { - value = data.decort_grid.image + value = data.decort_cb_grid.grid } diff --git a/samples/cloudbroker/data_grid_list/main.tf b/samples/cloudbroker/data_grid_list/main.tf index 74132bf..43260ba 100644 --- a/samples/cloudbroker/data_grid_list/main.tf +++ b/samples/cloudbroker/data_grid_list/main.tf @@ -1,7 +1,6 @@ /* Пример использования Получение списка grid - */ #Расскомментируйте этот код, @@ -27,8 +26,17 @@ provider "decort" { allow_unverified_ssl = true } +data "decort_cb_grid_list" "gl" { + #фильтр по id grid + #опциональный параметр + #тип - число + #by_id = 1 + + #название grid + #опциональный параметр + #тип - строка + #name = "grid name" -data "decort_grid_list" "gl" { #номер страницы для отображения #опциональный параметр, тип - число #если не задан - выводятся все доступные данные @@ -41,5 +49,5 @@ data "decort_grid_list" "gl" { } output "test" { - value = data.decort_grid_list.gl + value = data.decort_cb_grid_list.gl } diff --git a/samples/cloudbroker/data_image/main.tf b/samples/cloudbroker/data_image/main.tf index 1220bb0..09bd3b6 100644 --- a/samples/cloudbroker/data_image/main.tf +++ b/samples/cloudbroker/data_image/main.tf @@ -26,7 +26,7 @@ provider "decort" { allow_unverified_ssl = true } -data "decort_image" "image" { +data "decort_cb_image" "image" { #id образа #обязательный параметр #тип - число @@ -34,5 +34,5 @@ data "decort_image" "image" { } output "test" { - value = data.decort_image.image + value = data.decort_cb_image.image } diff --git a/samples/cloudbroker/data_image_list/main.tf b/samples/cloudbroker/data_image_list/main.tf index c3efa0c..9f081b2 100644 --- a/samples/cloudbroker/data_image_list/main.tf +++ b/samples/cloudbroker/data_image_list/main.tf @@ -27,32 +27,80 @@ provider "decort" { allow_unverified_ssl = true } -data "decort_image_list" "il" { - #номер страницы для отображения +data "decort_cb_image_list" "il" { + #фильтр по id sep #опциональный параметр #тип - число - #если не задан - выводятся все доступные данные - #page = 2 + #sep_id = 1 + + #фильтр по id образа + #опциональный параметр + #тип - число + #by_id = 100 - #размер страницы + #фильтр по имени образа + #опциональный параметр + #тип - строка + #name = "test" + + #фильтр по статусу + #опциональный параметр + #тип - строка + #status = "ENABLED" + + #фильтр по архитектуре + #опциональный параметр + #тип - строка + #architecture = "x86_64" + + #фильтр по типу образа + #опциональный параметр + #тип - строка + #type_image = "cdrom" + + #фильтр по размеру образа #опциональный параметр #тип - число - #если не задан - выводятся все доступные данные - #size = 3 + #image_size = 100 - #фильтрация образов по id хранилища + #фильтр по имени SEP + #опциональный параметр + #тип - строка + #sep_name = "test" + + #фильтр по имени Pool + #опциональный параметр + #тип - строка + #pool = "test" + + #фильтр по доступу + #опциональный параметр + #тип - булевый тип + #public = "true" + + #фильтр по hot_resize + #опциональный параметр + #тип - булевый тип + #hot_resize = "true" + + #фильтр по bootable + #опциональный параметр + #тип - булевый тип + #bootable = "true" + + #номер страницы для отображения #опциональный параметр #тип - число - #если не задан - выволятся все доступные элементы - #sep_id = 123 + #если не задан - выводятся все доступные данные + #page = 2 - #фильтрация образов по id доступных аккаунтов + #размер страницы #опциональный параметр #тип - число - #если не задан - выволятся все доступные элементы - #shared_with = 123 + #если не задан - выводятся все доступные данные + #size = 3 } output "test" { - value = data.decort_image_list.il + value = data.decort_cb_image_list.il } diff --git a/samples/cloudbroker/data_image_list_stacks/main.tf b/samples/cloudbroker/data_image_list_stacks/main.tf index acc41db..cd95536 100644 --- a/samples/cloudbroker/data_image_list_stacks/main.tf +++ b/samples/cloudbroker/data_image_list_stacks/main.tf @@ -25,12 +25,27 @@ provider "decort" { allow_unverified_ssl = true } -data "decort_image_list_stacks" "im" { +data "decort_cb_image_list_stacks" "im" { #id образа #обязательный параметр #тип - число image_id = 6040 + #фильтр по имени образа + #опциональный параметр + #тип - строка + #name = "test" + + #фильтр по статусу + #опциональный параметр + #тип - строка + #status = "ENABLED" + + #фильтр по типу образа + #опциональный параметр + #тип - строка + #type_image = "cdrom" + #номер страницы для отображения информации #опциональный параметр #тип - число @@ -45,5 +60,5 @@ data "decort_image_list_stacks" "im" { } output "test" { - value = data.decort_image_list_stacks.im + value = data.decort_cb_image_list_stacks.im } diff --git a/samples/cloudbroker/data_pcidevice_list/main.tf b/samples/cloudbroker/data_pcidevice_list/main.tf index 23e1902..2e5ee60 100644 --- a/samples/cloudbroker/data_pcidevice_list/main.tf +++ b/samples/cloudbroker/data_pcidevice_list/main.tf @@ -27,7 +27,42 @@ provider "decort" { allow_unverified_ssl = true } -data "decort_cb_pcidevice_list" "pdl" {} +data "decort_cb_pcidevice_list" "pdl" { + #фильтр по id устройства + #опциональный параметр + #тип - число + #by_id = 111 + + #фильтр по id Compute + #опциональный параметр + #тип - число + #compute_id = 123 + + #фильтр по имени устройства + #опциональный параметр + #тип - строка + #name = "name" + + #фильтр по id ресурсной группы + #опциональный параметр + #тип - число + #rg_id = 111 + + #фильтр по статусу устройства + #опциональный параметр + #тип - строка + #status = "status" + + #номер страницы для отображения + #опциональный параметр + #тип - число + #page = 2 + + #размер страницы + #опциональный параметр + #тип - число + #size = 3 +} output "test" { value = data.decort_cb_pcidevice_list.pdl.items diff --git a/samples/cloudbroker/data_sep/main.tf b/samples/cloudbroker/data_sep/main.tf index 22b2ddd..5cc4f4c 100644 --- a/samples/cloudbroker/data_sep/main.tf +++ b/samples/cloudbroker/data_sep/main.tf @@ -25,7 +25,7 @@ provider "decort" { allow_unverified_ssl = true } -data "decort_sep" "sd" { +data "decort_cb_sep" "sd" { #id sep #обязательный параметр #тип - число @@ -34,9 +34,9 @@ data "decort_sep" "sd" { } output "test" { - value = data.decort_sep.sd + value = data.decort_cb_sep.sd } output "config" { - value = jsondecode(data.decort_sep.sd.config) + value = jsondecode(data.decort_cb_sep.sd.config) } diff --git a/samples/cloudbroker/data_sep_config/main.tf b/samples/cloudbroker/data_sep_config/main.tf index 8b5c15c..da3f737 100644 --- a/samples/cloudbroker/data_sep_config/main.tf +++ b/samples/cloudbroker/data_sep_config/main.tf @@ -25,7 +25,7 @@ provider "decort" { allow_unverified_ssl = true } -data "decort_sep_config" "sc" { +data "decort_cb_sep_config" "sc" { #id sep #обязательный параметр #тип - число @@ -33,9 +33,9 @@ data "decort_sep_config" "sc" { } output "test" { - value = data.decort_sep_config.sc + value = data.decort_cb_sep_config.sc } output "config" { - value = jsondecode(data.decort_config.sc.config) + value = jsondecode(data.decort_cb_sep_config.sc.config) } diff --git a/samples/cloudbroker/data_sep_consumption/main.tf b/samples/cloudbroker/data_sep_consumption/main.tf index fe4e455..1a0e8ca 100644 --- a/samples/cloudbroker/data_sep_consumption/main.tf +++ b/samples/cloudbroker/data_sep_consumption/main.tf @@ -25,7 +25,7 @@ provider "decort" { allow_unverified_ssl = true } -data "decort_sep_consumption" "scons" { +data "decort_cb_sep_consumption" "scons" { #id sep #обязательный параметр #тип - число @@ -33,5 +33,5 @@ data "decort_sep_consumption" "scons" { } output "test" { - value = data.decort_sep_consumption.scons + value = data.decort_cb_sep_consumption.scons } diff --git a/samples/cloudbroker/data_sep_disk_list/main.tf b/samples/cloudbroker/data_sep_disk_list/main.tf index 7749efe..5b39586 100644 --- a/samples/cloudbroker/data_sep_disk_list/main.tf +++ b/samples/cloudbroker/data_sep_disk_list/main.tf @@ -26,11 +26,12 @@ provider "decort" { allow_unverified_ssl = true } -data "decort_sep_disk_list" "sdl" { +data "decort_cb_sep_disk_list" "sdl" { #id sep #обязательный параметр #тип - число sep_id = 1111 + #sep pool name #необязательный параметр #тип - строка @@ -38,5 +39,5 @@ data "decort_sep_disk_list" "sdl" { } output "test" { - value = data.decort_sep_disk_list.sdl + value = data.decort_cb_sep_disk_list.sdl } diff --git a/samples/cloudbroker/data_sep_list/main.tf b/samples/cloudbroker/data_sep_list/main.tf index f2bfa47..287ba5b 100644 --- a/samples/cloudbroker/data_sep_list/main.tf +++ b/samples/cloudbroker/data_sep_list/main.tf @@ -24,11 +24,47 @@ provider "decort" { allow_unverified_ssl = true } -data "decort_sep_list" "sl" { +data "decort_cb_sep_list" "sl" { + #фильтр по sep id + #необязательный параметр + #тип - число + #by_id = 3 + + #фильтр по sep name + #необязательный параметр + #тип - строка + #name = "name" + + #фильтр по gid + #необязательный параметр + #тип - число + #gid = 1 + + #фильтр по sep type + #необязательный параметр + #тип - строка + #type = "type" + + #фильтр по provided physical node id + #необязательный параметр + #тип - число + #provided_by = 1 + + #фильтр по тех статусу + #необязательный параметр + #тип - строка + #tech_status = "status" + + #фильтр по consumed physical node id + #необязательный параметр + #тип - число + #consumed_by = 1 + #страница #необязательный параметр #тип - число #page = 3 + #размер страницы #необязательный параметр #тип - число @@ -36,5 +72,5 @@ data "decort_sep_list" "sl" { } output "test" { - value = data.decort_sep_list.sl + value = data.decort_cb_sep_list.sl } diff --git a/samples/cloudbroker/data_sep_pool/main.tf b/samples/cloudbroker/data_sep_pool/main.tf index 44dcf8d..dbfdd04 100644 --- a/samples/cloudbroker/data_sep_pool/main.tf +++ b/samples/cloudbroker/data_sep_pool/main.tf @@ -26,21 +26,20 @@ provider "decort" { allow_unverified_ssl = true } -data "decort_sep_pool" "sp" { +data "decort_cb_sep_pool" "sp" { #id sep #обязательный параметр #тип - число sep_id = 1111 + #sep pool name #обязательный параметр #тип - строка pool_name = "sep_pool" } -output "test" { - value = data.decort_sep_pool.sp -} - output "pool" { - value = jsondecode(data.decort_sep_pool.sp.pool) -} + value = { + for k, v in data.decort_cb_sep_pool.sp.pool : k => v + } +} \ No newline at end of file diff --git a/samples/cloudbroker/resource_account/main.tf b/samples/cloudbroker/resource_account/main.tf index e08fe7d..c80f7cf 100644 --- a/samples/cloudbroker/resource_account/main.tf +++ b/samples/cloudbroker/resource_account/main.tf @@ -60,10 +60,11 @@ resource "decort_cb_account" "acc" { #применяется при создании аккаунта и редактировании аккаунта send_access_emails = true - #имена пулов + #пары sep_id и sep_pool_name #необязательный параметр #тип - список строк #применяется при создании аккаунта и редактировании аккаунта + #каждая строка представляет собой пару, sepID_sepPoolName uniq_pools = ["sep1_poolName1", "sep2_poolName2"] #ограничение используемых ресурсов @@ -154,6 +155,11 @@ resource "decort_cb_account" "acc" { #используется при удалении аккаунта #по-умолчанию - false #permanently = true + + #флаг для восстановление аккаунта из корзины + #необязательный параметр + #тип - булев тип + #restore = true } output "test" { diff --git a/samples/cloudbroker/resource_disk/main.tf b/samples/cloudbroker/resource_disk/main.tf index b736b64..64dd85d 100644 --- a/samples/cloudbroker/resource_disk/main.tf +++ b/samples/cloudbroker/resource_disk/main.tf @@ -1,8 +1,11 @@ /* Пример использования -Получение списка доступных образов +Ресура диска: +1. Создание ресурса +2. Изменение ресурса +3. Удаление ресурса */ -#Расскомментируйте этот код, +#Раскомментируйте этот код, #и внесите необходимые правки в версию и путь, #чтобы работать с установленным вручную (не через hashicorp provider registry) провайдером @@ -15,7 +18,6 @@ terraform { } } - provider "decort" { authenticator = "decs3o" #controller_url = @@ -24,13 +26,91 @@ provider "decort" { oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } -resource "decort_disk" "acl" { + +resource "decort_cb_disk" "acl" { + #id аккаунта + #обязательный параметр + #тип - число account_id = 88366 + + #gid + #обязательный параметр + #тип - число gid = 212 + + #название диска диска + #обязательный параметр + #тип - строка disk_name = "super-disk-re" + + #тип диска + #обязательный параметр + #тип - строка + #допустимые значения: "D", "B", "T" + type = "d" + + #максимальный размер диска + #обязательный параметр + #тип - число + #значение по умолчанию 10 size_max = 20 - permanently = true - reason = "delete" + + #описание диска + #опциональный параметр + #тип - строка + #desc = "description" + + #размер ssd + #опциональный параметр + #тип - число + #значение по умолчанию 0 + #ssd_size = 1 + + #max IOPS disk can perform + #опциональный параметр + #тип - число + #значение по умолчанию 2000 + #iops = 1 + + #sep id + #опциональный параметр + #тип - число + #значение по умолчанию 0 + #sep_id = 1 + + #название pool + #опциональный параметр + #тип - строка + #pool = 1 + + #флаг для восстановления диска + #опциональный параметр + #тип - булев + #restore = true + + #флаг для удаления диска + #опциональный параметр + #тип - булев + #permanently = true + + #причина удаления диска + #опциональный параметр + #тип - строка + #reason = "delete" + + #флаг поделиться диском + #опциональный параметр + #тип - булев + #shareable = true + + #флаг отсоединения диска от машины перед удалением + #опциональный параметр + #тип - булев + #detach = true + + #настройки лимитов операций записи/чтения с диска + #опциональный параметр + #тип - блок, тип вложенных полей - число iotune { read_bytes_sec = 0 read_bytes_sec_max = 0 @@ -46,9 +126,8 @@ resource "decort_disk" "acl" { write_iops_sec = 0 write_iops_sec_max = 0 } - } output "test" { - value = decort_disk.acl + value = decort_cb_disk.acl } diff --git a/samples/cloudbroker/resource_extnet/main.tf b/samples/cloudbroker/resource_extnet/main.tf index cff2571..fc6cc82 100644 --- a/samples/cloudbroker/resource_extnet/main.tf +++ b/samples/cloudbroker/resource_extnet/main.tf @@ -95,7 +95,7 @@ resource "decort_cb_extnet" "new_extnet" { # Опциональный параметр # Тип - число # По-умолчанию значение 128 - pre_reservations_num = "128" + pre_reservations_num = 128 # Имя bridge Openvswitch для подключения к внешней сети # Опциональный параметр @@ -107,8 +107,66 @@ resource "decort_cb_extnet" "new_extnet" { # Тип - булев enable = true + # Сделать сеть сетью по умолчанию + # Опциональный параметр + # Тип - булев + set_default = true + # Список исключенных IP адресов # Опциональный параметр # Тип - массив строк excluded_ips = ["192.168.0.4", "192.168.0.5"] + + # Диапазон исключенных IP адресов + # Опциональный параметр + excluded_ips_range { + # Начало диапазона + # Обязательный параметр + # Тип - строка + ip_start = "192.168.0.4" + + # Окончание диапазона + # Обязательный параметр + # Тип - строка + ip_end = "192.168.0.50" + } + + # Список id аккаунтов, которым предоставлен доступ к extnet + # Опциональный параметр + # Тип - массив чисел + shared_with = [112, 123] + + # Изменение настроек ограничения трафика сети + # Значение по умолчанию 0, то есть без ограничений + # Опциональный параметр + default_qos { + # Ограничение внутреннего трафика в кбит + # Опциональный параметр + # Тип - число + in_rate = 0 + + # Ограничение внутреннего burst трафика в кбит + # Опциональный параметр + # Тип - число + in_burst = 0 + + # Ограничение внешнего трафика в кбит + # Опциональный параметр + # Тип - число + e_rate = 0 + } + + # ID stack на который происходит миграция + # Опциональный параметр + # Тип - число + migrate = 1 + + # Перезапустить сеть + # Опциональный параметр + # Тип - булев + restart = true } + +output "extnet" { + value = decort_cb_extnet.new_extnet +} \ No newline at end of file diff --git a/samples/cloudbroker/resource_image/main.tf b/samples/cloudbroker/resource_image/main.tf index a478bd1..594522d 100644 --- a/samples/cloudbroker/resource_image/main.tf +++ b/samples/cloudbroker/resource_image/main.tf @@ -31,7 +31,7 @@ provider "decort" { allow_unverified_ssl = true } -resource "decort_image" "my_image" { +resource "decort_cb_image" "my_image" { #имя образа #обязательный параметр #тип - строка @@ -66,6 +66,11 @@ resource "decort_image" "my_image" { #возможные варианты - ["KVM_X86", "SVA_KVM_X86"], ["KVM_X86"], ["SVA_KVM_X86"] drivers = ["KVM_X86", "SVA_KVM_X86"] + #поддержка hot resize + #опциональный параметр + #тип - булев тип + #hot_resize = true + #id аккаунта владельца образа #опциональный параметр #может быть использован как при создании, @@ -74,7 +79,7 @@ resource "decort_image" "my_image" { #account_id = 57252 #имя пользователя и пароль - #опциаональные параметры, + #опциональные параметры, #могут быть использованы как при создании образа, #так и при его редактировании #тип - строка @@ -82,7 +87,7 @@ resource "decort_image" "my_image" { #password = "123" #имя пользователя и пароль для загрузки бинарных данных - #опциаональные параметры, + #опциональные параметры, #могут быть использованы как при создании образа, #так и при его редактировании #тип - строка @@ -110,10 +115,20 @@ resource "decort_image" "my_image" { #тип - булево значение #enabled = true + #является ли образ загрузочным + #опциональный параметр, используется на уже созданном ресурсе + #тип - булево значение + #bootable = true + + #позволяет создавать образ в синхронном режиме + #опциональный параметр + #тип - булево значение + #sync_mode = true + #настройка доступа образа аккаунтам #опциональный параметр, используется на уже созданном ресурсе #тип - массив чисел - #пустой маасив - удаление всех доступов, если они были + #пустой массив - удаление всех доступов, если они были #shared_with = [28096, 57121] #установка computeci @@ -126,7 +141,7 @@ resource "decort_image" "my_image" { #опциональный параметр, используется на уже созданном ресурсе #тип - массив строк #для удаления всех доступных стаков, необходимо передать пустой массив - #enabled_stacks = ["9"] + #enabled_stacks = [9] #мгновенное удаление #опциональный параметр, можно использовать перед удалением @@ -137,9 +152,8 @@ resource "decort_image" "my_image" { #опциональный параметр, можно использовать перед удалением #тип - строка #reason = "test" - } output "test" { - value = decort_image.my_image + value = decort_cb_image.my_image } diff --git a/samples/cloudbroker/resource_image_cdrom/main.tf b/samples/cloudbroker/resource_image_cdrom/main.tf index 9f2a15c..47e3607 100644 --- a/samples/cloudbroker/resource_image_cdrom/main.tf +++ b/samples/cloudbroker/resource_image_cdrom/main.tf @@ -30,7 +30,7 @@ provider "decort" { allow_unverified_ssl = true } -resource "decort_cdrom_image" "my_image" { +resource "decort_cb_cdrom_image" "my_image" { #имя образа #обязательный параметр #тип - строка @@ -53,23 +53,20 @@ resource "decort_cdrom_image" "my_image" { #возможные варианты - ["KVM_X86", "SVA_KVM_X86"], ["KVM_X86"], ["SVA_KVM_X86"] drivers = ["KVM_X86", "SVA_KVM_X86"] + #поддержка hot resize + #опциональный параметр + #тип - булев тип + #hot_resize = true + #id аккаунта владельца образа #опциональный параметр #может быть использован как при создании, #так и при редактировании образа #тип данных - число - account_id = 57252 - - #имя пользователя и пароль - #опциаональные параметры, - #могут быть использованы как при создании образа, - #так и при его редактировании - #тип - строка - username = "Valera" - password = "123" + #account_id = 57252 #имя пользователя и пароль для загрузки бинарных данных - #опциаональные параметры, + #опциональные параметры, #могут быть использованы как при создании образа, #так и при его редактировании #тип - строка @@ -90,7 +87,7 @@ resource "decort_cdrom_image" "my_image" { #опциаональный параметр, используется при создании #тип - строка #доступные значения: "X86_64" - architecture = "X86_64" + #architecture = "X86_64" #доступность образа #опциональный параметр, используется на уже созданном ресурсе @@ -111,9 +108,14 @@ resource "decort_cdrom_image" "my_image" { #доступные стаки #опциональный параметр, используется на уже созданном ресурсе - #тип - массив строк + #тип - массив чисел #для удаления всех доступных стаков, необходимо передать пустой массив - #enabled_stacks = ["9"] + #enabled_stacks = [9] + + #является ли образ загрузочным + #опциональный параметр, используется на уже созданном ресурсе + #тип - булево значение + #bootable = true #мгновенное удаление #опциональный параметр, можно использовать перед удалением @@ -123,5 +125,5 @@ resource "decort_cdrom_image" "my_image" { } output "test" { - value = decort_cdrom_image.my_image + value = decort_cb_cdrom_image.my_image } diff --git a/samples/cloudbroker/resource_pcidevice/main.tf b/samples/cloudbroker/resource_pcidevice/main.tf index 083a91b..96171e1 100644 --- a/samples/cloudbroker/resource_pcidevice/main.tf +++ b/samples/cloudbroker/resource_pcidevice/main.tf @@ -67,22 +67,19 @@ resource "decort_cb_pcidevice" "pd" { #опциональный параметр #может использоваться на созданном ресурсе #тип - булево значение - #force = true + #force_disable = true #принудительное удаление устройства #опциональный параметр #используется при удалении ресурса #тип - булево значение - #force = true - + #force_delete = true #id устройства #опциональный параметр #позволяет "восстановить" состояние ресурса терраформа на локальной машине #тип - число #device_id = 86 - - } output "test" { diff --git a/samples/cloudbroker/resource_sep/main.tf b/samples/cloudbroker/resource_sep/main.tf index ea97c07..9a2f191 100644 --- a/samples/cloudbroker/resource_sep/main.tf +++ b/samples/cloudbroker/resource_sep/main.tf @@ -30,7 +30,7 @@ provider "decort" { allow_unverified_ssl = true } -resource "decort_sep" "s" { +resource "decort_cb_sep" "s" { #grid id #обязательный параметр #тип - число @@ -50,7 +50,7 @@ resource "decort_sep" "s" { #описание sep #необязательный параметр, используется при создании ресурса #тип - строка - desc = "rrrrr" + #desc = "rrrrr" #конфигурация sep #необязательный параметр, мб применен при создании или редактировании sep @@ -59,7 +59,7 @@ resource "decort_sep" "s" { #config = file("./config.json") #изменение поля в конфигурации - #необязательный параметр, мб применен на уже созданном sep + #необязательный параметр, может быть применен на уже созданном sep #тип - объект #внимание, во избежание конфликтов не использовать с полем config /* @@ -81,23 +81,51 @@ resource "decort_sep" "s" { } */ + #предоставление/отключение доступа указанных аккаунтов к sep + #необязательный параметр, используется при редактировании и создании ресурса + #тип - массив чисел + #account_ids = [12, 245] + + #предоставление/отключение доступа к пулу на sep + #необязательный параметр, может быть применен на уже созданном sep + #тип - объект + /* + access_to_pool { + #имя pool + #обязательный параметр + #тип - строка + pool_name = "pool name" + + #id аккаунта + #необязательный параметр + #тип - число + #account_id_pool = 123 + + #id ресурсной группы + #необязательный параметр + #тип - число + #rg_id = 1234 + } + */ + #доступность sep - #необязательный параметр, мб применен на уже созданном ресурсе + #необязательный параметр, может быть применен на уже созданном ресурсе #тип - булево значение + #дефолтное значение false #enable = false #использование нодами - #необязательный параметр, используется при редактировании ресурса + #необязательный параметр, используется при редактировании и создании ресурса #тип - массив чисел #consumed_by = [] #обновление лимита объема - #необязательный параметр, применяется на уж созданнном ресурсе + #необязательный параметр, применяется на уж созданном ресурсе #тип - булев тип #upd_capacity_limit = true #id provided nodes - #необязательный параметр, применяется на уже созданном ресурсе + #необязательный параметр, используется при редактировании и создании ресурса #тип - массив чисел #provided_by = [16, 14, 15] @@ -105,20 +133,20 @@ resource "decort_sep" "s" { #необязательный параметр, применяется на уже созданном ресурсе #тип - булев тип #используется в связке с clear_physically + #дефолтное значение true #decommission = true #физическое очищение nodes #необязательный параметр, используется при удалении ресурса #тип - булев тип + #внимание, во избежание конфликтов использовать только в связке с полем decommission = true #clear_physically = false - } output "test" { - value = decort_sep.s + value = decort_cb_sep.s } output "config" { - value = jsondecode(decort_sep.s.config) - + value = jsondecode(decort_cb_sep.s.config) } diff --git a/samples/cloudbroker/resource_sep_config/main.tf b/samples/cloudbroker/resource_sep_config/main.tf index c5a3a31..c1d1a1e 100644 --- a/samples/cloudbroker/resource_sep_config/main.tf +++ b/samples/cloudbroker/resource_sep_config/main.tf @@ -29,19 +29,19 @@ provider "decort" { allow_unverified_ssl = true } -resource "decort_sep_config" "sc" { +resource "decort_cb_sep_config" "sc" { #id sep #обязательный параметр #тип - число sep_id = 1111 #конфигурация - #необязательное поле, используется для изменения конфигурации + #необязательное поле, используется для создания и изменения конфигурации #тип - json-строка #config = file("./config.json") - #редактироваие поля - #неоябазательный параметр, используется при редактировании ресурса + #редактирование поля + #необязательный параметр, используется при редактировании ресурса #тип - объект /* field_edit { @@ -65,9 +65,9 @@ resource "decort_sep_config" "sc" { } output "sep_config" { - value = decort_sep_config.sc + value = decort_cb_sep_config.sc } output "sep_config_json" { - value = jsondecode(decort_sep_config.sc.config) + value = jsondecode(decort_cb_sep_config.sc.config) } diff --git a/samples/cloudbroker/resource_virtual_image/main.tf b/samples/cloudbroker/resource_virtual_image/main.tf index 0d07316..13e4bc6 100644 --- a/samples/cloudbroker/resource_virtual_image/main.tf +++ b/samples/cloudbroker/resource_virtual_image/main.tf @@ -30,7 +30,7 @@ provider "decort" { allow_unverified_ssl = true } -resource "decort_virtual_image" "my_image" { +resource "decort_cb_virtual_image" "my_image" { #имя виртуального образа #обязательный параметр #тип - строка @@ -38,13 +38,9 @@ resource "decort_virtual_image" "my_image" { #id образа, на основе котрого будет создан виртуальный #обязательный параметр + #может быть также использован при редактировании #тип - число - target_id = 6125 - - #изменение связи виртуального образа - #опциональный параметр, используется при редактировании ресурса - #тип - число - #link_to = 6062 + link_to = 6125 #id аккаунта владельца образа #опциональный параметр @@ -52,8 +48,18 @@ resource "decort_virtual_image" "my_image" { #тип данных - число #account_id = 57252 + #является ли образ загрузочным + #опциональный параметр, используется на уже созданном ресурсе + #тип - булево значение + #bootable = true + + #поддержка hot resize + #опциональный параметр + #тип - булев тип + #hot_resize = true + #имя пользователя и пароль - #опциаональные параметры, + #опциональные параметры, #могут быть использованы при редактировании образа #тип - строка #username = "Valera" @@ -80,7 +86,7 @@ resource "decort_virtual_image" "my_image" { #опциональный параметр, используется на уже созданном ресурсе #тип - массив строк #для удаления всех доступных стаков, необходимо передать пустой массив - #enabled_stacks = ["9"] + #enabled_stacks = [9] #мгновенное удаление #опциональный параметр, можно использовать перед удалением @@ -94,5 +100,5 @@ resource "decort_virtual_image" "my_image" { } output "test" { - value = decort_virtual_image.my_image + value = decort_cb_virtual_image.my_image }