4.7.4
loskutovanl 2 years ago
parent 294680282e
commit e2ee45ee14

@ -1,8 +1,24 @@
## Version 4.5.0 ## Version 4.5.1
## Feature ## Feature
- Added support for authorization using the Basis.Virtual Security system. Add client and config - Refactoring BVS config
- Added and updated data sources and resources for cloudbroker groups:
* account
* audit
* disks
* extnet
* flipgroup
* grid
* image
* k8ci
* k8s
* kvmvm (compute)
* lb (load balancer)
* pcidevice
* rg (resource group)
* sep
* stack
* vins
### Bugfix ### Bugfix
- Fixed bservice and rg schema and flatten in cloudapi - Fixed description update for compute in cloudapi/kvmvm
- Add stateUpgrader for k8s_cp in cloudapi/k8s

@ -7,7 +7,7 @@ ZIPDIR = ./zip
BINARY=${NAME} BINARY=${NAME}
WORKPATH= ./examples/terraform.d/plugins/${HOSTNAME}/${NAMESPACE}/${NAMESPACE}/${VERSION}/${OS_ARCH} WORKPATH= ./examples/terraform.d/plugins/${HOSTNAME}/${NAMESPACE}/${NAMESPACE}/${VERSION}/${OS_ARCH}
MAINPATH = ./cmd/decort/ MAINPATH = ./cmd/decort/
VERSION=4.5.0 VERSION=4.5.1
OS_ARCH=$(shell go env GOHOSTOS)_$(shell go env GOHOSTARCH) OS_ARCH=$(shell go env GOHOSTOS)_$(shell go env GOHOSTARCH)
FILES = ${BINARY}_${VERSION}_darwin_amd64\ FILES = ${BINARY}_${VERSION}_darwin_amd64\

@ -27,23 +27,42 @@ Terraform provider для платформы Digital Energy Cloud Orchestration
## Возможности провайдера ## Возможности провайдера
- Работа с Compute instances, - Режим пользователя:
- Работа с disks, - Работа с accounts,
- Работа с k8s, - Работа с bservice,
- Работа с image, - Работа с disks,
- Работа с flipgroups, - Работа с extnets,
- Работа с stacks, - Работа с flipgroups,
- Работа с reource groups, - Работа с image,
- Работа с VINS, - Работа с k8s,
- Работа с pfw, - Работа с Compute instances,
- Работа с accounts, - Работа с load balancer,
- Работа с snapshots, - Работа с locations,
- Работа с bservice, - Работа с pfw,
- Работа с extnets, - Работа с resource groups,
- Работа с locations, - Работа с snapshots,
- Работа с load balancer. - Работа с stacks,
- Работа с VINS.
Вики проекта: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
- Режим администратора:
- Работа с accounts,
- Работа с audits,
- Работа с disks,
- Работа с extnets,
- Работа с flipgroups,
- Работа с grids,
- Работа с images,
- Работа с k8ci,
- Работа с k8s,
- Работа с Compute instances,
- Работа с load balancer,
- Работа с pci device,
- Работа с resource groups,
- Работа с seps,
- Работа с stacks,
- Работа с VINS.
Со списком и описанием функционала всех групп можно ознамоиться на Вики проекта: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
## Установка ## Установка
Начиная с версии провайдера `4.3.0` в релизном архиве находятся скрипты-инсталляторы. Начиная с версии провайдера `4.3.0` в релизном архиве находятся скрипты-инсталляторы.

@ -58,7 +58,7 @@ Two ways for starting:
```terraform ```terraform
provider "decort" { provider "decort" {
authenticator = "oauth2" authenticator = "decs3o"
#controller_url = <DECORT_CONTROLLER_URL> #controller_url = <DECORT_CONTROLLER_URL>
controller_url = "https://ds1.digitalenergy.online" controller_url = "https://ds1.digitalenergy.online"
#oauth2_url = <DECORT_SSO_URL> #oauth2_url = <DECORT_SSO_URL>

@ -9,8 +9,7 @@ require (
github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1 github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1
github.com/sirupsen/logrus v1.9.0 github.com/sirupsen/logrus v1.9.0
golang.org/x/net v0.16.0 golang.org/x/net v0.16.0
golang.org/x/oauth2 v0.13.0 repository.basistech.ru/BASIS/decort-golang-sdk v1.7.2
repository.basistech.ru/BASIS/decort-golang-sdk v1.7.0
) )
require ( require (
@ -68,9 +67,9 @@ require (
github.com/vmihailenco/msgpack/v4 v4.3.12 // indirect github.com/vmihailenco/msgpack/v4 v4.3.12 // indirect
github.com/vmihailenco/tagparser v0.1.2 // indirect github.com/vmihailenco/tagparser v0.1.2 // indirect
github.com/zclconf/go-cty v1.12.1 // indirect github.com/zclconf/go-cty v1.12.1 // indirect
golang.org/x/crypto v0.14.0 // indirect golang.org/x/crypto v0.15.0 // indirect
golang.org/x/sys v0.13.0 // indirect golang.org/x/sys v0.14.0 // indirect
golang.org/x/text v0.13.0 // indirect golang.org/x/text v0.14.0 // indirect
google.golang.org/appengine v1.6.7 // indirect google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 // indirect google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 // indirect
google.golang.org/grpc v1.51.0 // indirect google.golang.org/grpc v1.51.0 // indirect

@ -249,8 +249,8 @@ golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA=
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -268,8 +268,6 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.16.0 h1:7eBu7KsSvFDtSXUIDbh3aqlK4DPsZ1rByC8PFfBThos= golang.org/x/net v0.16.0 h1:7eBu7KsSvFDtSXUIDbh3aqlK4DPsZ1rByC8PFfBThos=
golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY=
golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -295,8 +293,8 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
@ -306,8 +304,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
@ -341,5 +339,5 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
repository.basistech.ru/BASIS/decort-golang-sdk v1.7.0 h1:aGDg9hQXs70m4Llx8hw9Y50M1C2apDqSsNMsE8isyIM= repository.basistech.ru/BASIS/decort-golang-sdk v1.7.2 h1:Ll8MBcmDcElxxgxOUUaYbbafTSbIm4dcPEDLl4fdF8Q=
repository.basistech.ru/BASIS/decort-golang-sdk v1.7.0/go.mod h1:mwcpnw0dT/PQf6AOJShjlbDNDfNitr0WM77LNKL1qjo= repository.basistech.ru/BASIS/decort-golang-sdk v1.7.2/go.mod h1:7fj8sgGZFiiExewQeqckCS4WxwOmU0oP6BO6mi1Lpkw=

@ -32,7 +32,6 @@ import (
"strings" "strings"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"golang.org/x/oauth2"
decort "repository.basistech.ru/BASIS/decort-golang-sdk" decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/config" "repository.basistech.ru/BASIS/decort-golang-sdk/config"
"repository.basistech.ru/BASIS/decort-golang-sdk/interfaces" "repository.basistech.ru/BASIS/decort-golang-sdk/interfaces"
@ -60,7 +59,10 @@ type ControllerCfg struct {
bvs_user string // required for bvs mode bvs_user string // required for bvs mode
bvs_password string // required for bvs mode bvs_password string // required for bvs mode
domain string // required for bvs mode domain string // required for bvs mode
token oauth2.Token // obtained from BVS provider on successful login in bvs mode token config.Token // obtained from BVS provider on successful login in bvs mode
path_cfg string // the path of the configuration file entry
path_token string // the path of the token file entry
time_to_refresh int64 // the number of minutes before the expiration of the token, a refresh will be made
legacy_user string // required for legacy mode legacy_user string // required for legacy mode
legacy_password string // required for legacy mode legacy_password string // required for legacy mode
legacy_sid string // obtained from DECORT controller on successful login in legacy mode legacy_sid string // obtained from DECORT controller on successful login in legacy mode
@ -99,7 +101,10 @@ func ControllerConfigure(d *schema.ResourceData) (*ControllerCfg, error) {
app_secret: d.Get("app_secret").(string), app_secret: d.Get("app_secret").(string),
oauth2_url: d.Get("oauth2_url").(string), oauth2_url: d.Get("oauth2_url").(string),
decort_username: "", decort_username: "",
token: oauth2.Token{}, token: config.Token{},
path_cfg: d.Get("path_cfg").(string),
path_token: d.Get("path_token").(string),
time_to_refresh: int64(d.Get("time_to_refresh").(int)),
} }
allow_unverified_ssl := d.Get("allow_unverified_ssl").(bool) allow_unverified_ssl := d.Get("allow_unverified_ssl").(bool)
@ -242,6 +247,9 @@ func ControllerConfigure(d *schema.ResourceData) (*ControllerCfg, error) {
Password: ret_config.bvs_password, Password: ret_config.bvs_password,
Domain: ret_config.domain, Domain: ret_config.domain,
Token: ret_config.token, Token: ret_config.token,
PathCfg: ret_config.path_cfg,
PathToken: ret_config.path_token,
TimeToRefresh: ret_config.time_to_refresh,
} }
ret_config.caller = decort.NewBVS(sdkConf) ret_config.caller = decort.NewBVS(sdkConf)

@ -38,17 +38,23 @@ import (
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/vins" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/vins"
cb_account "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/account" cb_account "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/account"
// cb_disks "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/disks" cb_audit "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/audit"
cb_disks "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/disks"
cb_extnet "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/extnet" cb_extnet "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/extnet"
// cb_flipgroup "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/flipgroup" cb_flipgroup "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/flipgroup"
// cb_grid "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/grid" cb_grid "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/grid"
// cb_image "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/image" cb_image "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/image"
// cb_lb "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/lb" cb_kvmvm "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/kvmvm"
// cb_pcidevice "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/pcidevice" cb_lb "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/lb"
// cb_rg "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/rg" cb_pcidevice "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/pcidevice"
// cb_sep "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/sep" cb_rg "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/rg"
// cb_stack "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/stack" cb_sep "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/sep"
cb_stack "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/stack"
cb_vins "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/vins"
// cb_vgpu "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/vgpu" // cb_vgpu "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/vgpu"
cb_k8ci "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/k8ci"
cb_k8s "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/k8s"
) )
func newDataSourcesMap() map[string]*schema.Resource { func newDataSourcesMap() map[string]*schema.Resource {
@ -144,7 +150,7 @@ func newDataSourcesMap() map[string]*schema.Resource {
"decort_cb_account": cb_account.DataSourceAccount(), "decort_cb_account": cb_account.DataSourceAccount(),
"decort_cb_account_list": cb_account.DataSourceAccountList(), "decort_cb_account_list": cb_account.DataSourceAccountList(),
"decort_cb_account_computes_list": cb_account.DataSourceAccountComputesList(), "decort_cb_account_computes_list": cb_account.DataSourceAccountComputesList(),
"decort_cb_account_deleted_list": cb_account.DataSourceAccountDeletedList(), "decort_cb_account_list_deleted": cb_account.DataSourceAccountDeletedList(),
"decort_cb_account_disks_list": cb_account.DataSourceAccountDisksList(), "decort_cb_account_disks_list": cb_account.DataSourceAccountDisksList(),
"decort_cb_account_flipgroups_list": cb_account.DataSourceAccountFlipGroupsList(), "decort_cb_account_flipgroups_list": cb_account.DataSourceAccountFlipGroupsList(),
"decort_cb_account_rg_list": cb_account.DataSourceAccountRGList(), "decort_cb_account_rg_list": cb_account.DataSourceAccountRGList(),
@ -152,10 +158,97 @@ func newDataSourcesMap() map[string]*schema.Resource {
"decort_cb_account_resource_consumption_get": cb_account.DataSourceAccountResourceConsumptionGet(), "decort_cb_account_resource_consumption_get": cb_account.DataSourceAccountResourceConsumptionGet(),
"decort_cb_account_resource_consumption_list": cb_account.DataSourceAccountResourceConsumptionList(), "decort_cb_account_resource_consumption_list": cb_account.DataSourceAccountResourceConsumptionList(),
"decort_cb_account_audits_list": cb_account.DataSourceAccountAuditsList(), "decort_cb_account_audits_list": cb_account.DataSourceAccountAuditsList(),
"decort_cb_audit": cb_audit.DataSourceAudit(),
"decort_cb_audit_list": cb_audit.DataSourceAuditList(),
"decort_cb_audit_linked_jobs": cb_audit.DataSourceAuditLinkedJobs(),
"decort_cb_extnet": cb_extnet.DataSourceExtnetCB(), "decort_cb_extnet": cb_extnet.DataSourceExtnetCB(),
"decort_cb_extnet_list": cb_extnet.DataSourceExtnetListCB(), "decort_cb_extnet_list": cb_extnet.DataSourceExtnetListCB(),
"decort_cb_extnet_default": cb_extnet.DataSourceExtnetDefaultCB(), "decort_cb_extnet_default": cb_extnet.DataSourceExtnetDefaultCB(),
"decort_cb_extnet_static_route_list": cb_extnet.DataSourceStaticRouteList(), "decort_cb_extnet_static_route_list": cb_extnet.DataSourceStaticRouteList(),
"decort_cb_extnet_static_route": cb_extnet.DataSourceStaticRoute(), "decort_cb_extnet_static_route": cb_extnet.DataSourceStaticRoute(),
"decort_cb_image": cb_image.DataSourceImage(),
"decort_cb_grid": cb_grid.DataSourceGrid(),
"decort_cb_grid_get_status": cb_grid.DataSourceGridGetStatus(),
"decort_cb_grid_post_status": cb_grid.DataSourceGridPostStatus(),
"decort_cb_grid_get_diagnosis": cb_grid.DataSourceGridGetDiagnosis(),
"decort_cb_grid_post_diagnosis": cb_grid.DataSourceGridPostDiagnosis(),
"decort_cb_grid_list": cb_grid.DataSourceGridList(),
"decort_cb_grid_list_emails": cb_grid.DataSourceGridListEmails(),
"decort_cb_grid_list_consumption": cb_grid.DataSourceGridListConsumption(),
"decort_cb_grid_get_consumption": cb_grid.DataSourceGridGetConsumption(),
"decort_cb_image_list": cb_image.DataSourceImageList(),
"decort_cb_image_list_stacks": cb_image.DataSourceImageListStacks(),
"decort_cb_kvmvm": cb_kvmvm.DataSourceCompute(),
"decort_cb_kvmvm_affinity_relations": cb_kvmvm.DataSourceComputeAffinityRelations(),
"decort_cb_kvmvm_audits": cb_kvmvm.DataSourceComputeAudits(),
"decort_cb_kvmvm_boot_order_get": cb_kvmvm.DataSourceComputeBootOrderGet(),
"decort_cb_kvmvm_get_audits": cb_kvmvm.DataSourceComputeGetAudits(),
"decort_cb_kvmvm_get_console_url": cb_kvmvm.DataSourceComputeGetConsoleUrl(),
"decort_cb_kvmvm_get_log": cb_kvmvm.DataSourceComputeGetLog(),
"decort_cb_kvmvm_list": cb_kvmvm.DataSourceComputeList(),
"decort_cb_kvmvm_list_deleted": cb_kvmvm.DataSourceComputeListDeleted(),
"decort_cb_kvmvm_migrate_storage_info": cb_kvmvm.DataSourceComputeMigrateStorageInfo(),
"decort_cb_kvmvm_pci_device_list": cb_kvmvm.DataSourceComputePCIDeviceList(),
"decort_cb_kvmvm_pfw_list": cb_kvmvm.DataSourceComputePfwList(),
"decort_cb_kvmvm_snapshot_list": cb_kvmvm.DataSourceComputeSnapshotList(),
"decort_cb_kvmvm_snapshot_usage": cb_kvmvm.DataSourceComputeSnapshotUsage(),
"decort_cb_kvmvm_user_list": cb_kvmvm.DataSourceComputeUserList(),
"decort_cb_kvmvm_vgpu_list": cb_kvmvm.DataSourceComputeVGPUList(),
"decort_cb_disk": cb_disks.DataSourceDisk(),
"decort_cb_disk_list": cb_disks.DataSourceDiskList(),
"decort_cb_disk_list_deleted": cb_disks.DataSourceDiskListDeleted(),
"decort_cb_disk_list_types": cb_disks.DataSourceDiskListTypes(),
"decort_cb_disk_list_types_detailed": cb_disks.DataSourceDiskListTypesDetailed(),
"decort_cb_disk_list_unattached": cb_disks.DataSourceDiskListUnattached(),
"decort_cb_disk_snapshot": cb_disks.DataSourceDiskSnapshot(),
"decort_cb_disk_snapshot_list": cb_disks.DataSourceDiskSnapshotList(),
"decort_cb_pcidevice": cb_pcidevice.DataSourcePcidevice(),
"decort_cb_pcidevice_list": cb_pcidevice.DataSourcePcideviceList(),
"decort_cb_rg": cb_rg.DataSourceResgroup(),
"decort_cb_rg_affinity_group_computes": cb_rg.DataSourceRgAffinityGroupComputes(),
"decort_cb_rg_affinity_groups_get": cb_rg.DataSourceRgAffinityGroupsGet(),
"decort_cb_rg_affinity_groups_list": cb_rg.DataSourceRgAffinityGroupsList(),
"decort_cb_rg_resource_consumption_get": cb_rg.DataSourceRGResourceConsumptionGet(),
"decort_cb_rg_resource_consumption_list": cb_rg.DataSourceRGResourceConsumptionList(),
"decort_cb_rg_audits": cb_rg.DataSourceRgAudits(),
"decort_cb_rg_list": cb_rg.DataSourceRgList(),
"decort_cb_rg_list_deleted": cb_rg.DataSourceRgListDeleted(),
"decort_cb_rg_list_computes": cb_rg.DataSourceRgListComputes(),
"decort_cb_rg_list_lb": cb_rg.DataSourceRgListLb(),
"decort_cb_rg_list_pfw": cb_rg.DataSourceRgListPfw(),
"decort_cb_rg_list_vins": cb_rg.DataSourceRgListVins(),
"decort_cb_rg_usage": cb_rg.DataSourceRgUsage(),
"decort_cb_sep_list": cb_sep.DataSourceSepList(),
"decort_cb_sep": cb_sep.DataSourceSep(),
"decort_cb_sep_consumption": cb_sep.DataSourceSepConsumption(),
"decort_cb_sep_disk_list": cb_sep.DataSourceSepDiskList(),
"decort_cb_sep_config": cb_sep.DataSourceSepConfig(),
"decort_cb_sep_pool": cb_sep.DataSourceSepPool(),
"decort_cb_lb": cb_lb.DataSourceLB(),
"decort_cb_lb_list": cb_lb.DataSourceLBList(),
"decort_cb_lb_list_deleted": cb_lb.DataSourceLBListDeleted(),
"decort_cb_flipgroup_list": cb_flipgroup.DataSourceFlipgroupList(),
"decort_cb_flipgroup": cb_flipgroup.DataSourceFlipgroup(),
"decort_cb_stack_list": cb_stack.DataSourceStacksList(),
"decort_cb_stack": cb_stack.DataSourceStack(),
"decort_cb_vins": cb_vins.DataSourceVins(),
"decort_cb_vins_list": cb_vins.DataSourceVinsList(),
"decort_cb_vins_audits": cb_vins.DataSourceVinsAudits(),
"decort_cb_vins_ip_list": cb_vins.DataSourceVinsIpList(),
"decort_cb_vins_list_deleted": cb_vins.DataSourceVinsListDeleted(),
"decort_cb_vins_ext_net_list": cb_vins.DataSourceVinsExtNetList(),
"decort_cb_vins_nat_rule_list": cb_vins.DataSourceVinsNatRuleList(),
"decort_cb_vins_static_route": cb_vins.DataSourceStaticRoute(),
"decort_cb_vins_static_route_list": cb_vins.DataSourceStaticRouteList(),
"decort_cb_k8ci": cb_k8ci.DataSourceK8CI(),
"decort_cb_k8ci_list": cb_k8ci.DataSourceK8CIList(),
"decort_cb_k8ci_list_deleted": cb_k8ci.DataSourceK8CIListDeleted(),
"decort_cb_k8s": cb_k8s.DataSourceK8s(),
"decort_cb_k8s_list": cb_k8s.DataSourceK8sList(),
"decort_cb_k8s_list_deleted": cb_k8s.DataSourceK8sListDeleted(),
"decort_cb_k8s_wg": cb_k8s.DataSourceK8sWg(),
"decort_cb_k8s_wg_cloud_init": cb_k8s.DataSourceK8sWgCloudInit(),
"decort_cb_k8s_wg_list": cb_k8s.DataSourceK8sWgList(),
"decort_cb_k8s_computes": cb_k8s.DataSourceK8sComputes(),
} }
} }

@ -122,6 +122,24 @@ func Provider() *schema.Provider {
Default: false, Default: false,
Description: "If true, DECORT API will not verify SSL certificates. Use this with caution and in trusted environments only!", Description: "If true, DECORT API will not verify SSL certificates. Use this with caution and in trusted environments only!",
}, },
"path_cfg": {
Type: schema.TypeString,
Optional: true,
Description: "The path of the configuration file entry",
},
"path_token": {
Type: schema.TypeString,
Optional: true,
Description: "The path of the token file entry",
},
"time_to_refresh": {
Type: schema.TypeInt,
Optional: true,
Description: "The number of minutes before the expiration of the token, a refresh will be made",
},
}, },
ResourcesMap: newResourcesMap(), ResourcesMap: newResourcesMap(),

@ -36,19 +36,18 @@ import (
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/vins" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/vins"
cb_account "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/account" cb_account "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/account"
// cb_disks "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/disks" cb_disks "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/disks"
cb_extnet "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/extnet" cb_extnet "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/extnet"
// cb_flipgroup "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/flipgroup" cb_flipgroup "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/flipgroup"
// cb_image "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/image" cb_image "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/image"
// cb_k8s "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/k8s" cb_k8ci "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/k8ci"
// cb_kvmvm "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/kvmvm" cb_k8s "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/k8s"
// cb_lb "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/lb" cb_kvmvm "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/kvmvm"
// cb_pcidevice "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/pcidevice" cb_lb "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/lb"
// cb_pfw "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/pfw" cb_pcidevice "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/pcidevice"
// cb_rg "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/rg" cb_rg "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/rg"
// cb_sep "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/sep" cb_sep "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/sep"
// cb_snapshot "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/snapshot" cb_vins "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/vins"
// cb_vins "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/vins"
) )
func newResourcesMap() map[string]*schema.Resource { func newResourcesMap() map[string]*schema.Resource {
@ -76,29 +75,29 @@ func newResourcesMap() map[string]*schema.Resource {
"decort_flipgroup": flipgroup.ResourceFlipgroup(), "decort_flipgroup": flipgroup.ResourceFlipgroup(),
"decort_vins_static_route": vins.ResourceStaticRoute(), "decort_vins_static_route": vins.ResourceStaticRoute(),
"decort_cb_account": cb_account.ResourceAccount(), "decort_cb_account": cb_account.ResourceAccount(),
"decort_cb_extnet": cb_extnet.ResourceExtnetCB(), "decort_cb_extnet": cb_extnet.ResourceExtnetCB(),
// "decort_cb_disk": cb_disks.ResourceDisk(),
// "decort_cb_image": cb_image.ResourceImage(),
// "decort_cb_virtual_image": cb_image.ResourceVirtualImage(),
// "decort_cb_cdrom_image": cb_image.ResourceCDROMImage(),
// "decort_cb_delete_images": cb_image.ResourceDeleteImages(),
// "decort_cb_pcidevice": cb_pcidevice.ResourcePcidevice(),
// "decort_cb_sep": cb_sep.ResourceSep(),
// "decort_cb_sep_config": cb_sep.ResourceSepConfig(),
// "decort_cb_resgroup": cb_rg.ResourceResgroup(),
// "decort_cb_kvmvm": cb_kvmvm.ResourceCompute(),
// "decort_cb_vins": cb_vins.ResourceVins(),
// "decort_cb_pfw": cb_pfw.ResourcePfw(),
// "decort_cb_k8s": cb_k8s.ResourceK8s(),
// "decort_cb_k8s_wg": cb_k8s.ResourceK8sWg(),
// "decort_cb_snapshot": cb_snapshot.ResourceSnapshot(),
// "decort_cb_flipgroup": cb_flipgroup.ResourceFlipgroup(),
// "decort_cb_lb": cb_lb.ResourceLB(),
// "decort_cb_lb_backend": cb_lb.ResourceLBBackend(),
// "decort_cb_lb_backend_server": cb_lb.ResourceLBBackendServer(),
// "decort_cb_lb_frontend": cb_lb.ResourceLBFrontend(),
// "decort_cb_lb_frontend_bind": cb_lb.ResourceLBFrontendBind(),
"decort_cb_extnet_static_route": cb_extnet.ResourceStaticRoute(), "decort_cb_extnet_static_route": cb_extnet.ResourceStaticRoute(),
"decort_cb_disk": cb_disks.ResourceDisk(),
"decort_cb_disk_snapshot": cb_disks.ResourceDiskSnapshot(),
"decort_cb_image": cb_image.ResourceImage(),
"decort_cb_virtual_image": cb_image.ResourceVirtualImage(),
"decort_cb_cdrom_image": cb_image.ResourceCDROMImage(),
"decort_cb_pcidevice": cb_pcidevice.ResourcePcidevice(),
"decort_cb_sep": cb_sep.ResourceSep(),
"decort_cb_sep_config": cb_sep.ResourceSepConfig(),
"decort_cb_kvmvm": cb_kvmvm.ResourceCompute(),
"decort_cb_vins": cb_vins.ResourceVins(),
"decort_cb_k8ci": cb_k8ci.ResourceK8CI(),
"decort_cb_k8s_cp": cb_k8s.ResourceK8sCP(),
"decort_cb_k8s_wg": cb_k8s.ResourceK8sWg(),
"decort_cb_vins_static_route": cb_vins.ResourceStaticRoute(),
"decort_cb_flipgroup": cb_flipgroup.ResourceFlipgroup(),
"decort_cb_lb": cb_lb.ResourceLB(),
"decort_cb_lb_backend": cb_lb.ResourceLBBackend(),
"decort_cb_lb_backend_server": cb_lb.ResourceLBBackendServer(),
"decort_cb_lb_frontend": cb_lb.ResourceLBFrontend(),
"decort_cb_lb_frontend_bind": cb_lb.ResourceLBFrontendBind(),
"decort_cb_rg": cb_rg.ResourceResgroup(),
} }
} }

@ -142,7 +142,7 @@ func utilityK8sWgListCheckPresence(ctx context.Context, d *schema.ResourceData,
func utilityK8sWgCloudInitCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (string, error) { func utilityK8sWgCloudInitCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (string, error) {
c := m.(*controller.ControllerCfg) c := m.(*controller.ControllerCfg)
req := k8s.GetWorkerNodesMetaDataRequest{ req := k8s.GetWorkerNodesMetaDataRequest{
K8SID: uint64(d.Get("k8s_id").(int)), K8SID: uint64(d.Get("k8s_id").(int)),
WorkersGroupID: uint64(d.Get("wg_id").(int)), WorkersGroupID: uint64(d.Get("wg_id").(int)),
} }

@ -735,7 +735,7 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
Name: d.Get("name").(string), Name: d.Get("name").(string),
} }
if desc, ok := d.GetOk("desc"); ok { if desc, ok := d.GetOk("description"); ok {
req.Description = desc.(string) req.Description = desc.(string)
} }
@ -1613,15 +1613,15 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
Description: "Name of this compute. Compute names are case sensitive and must be unique in the resource group.", Description: "Name of this compute. Compute names are case sensitive and must be unique in the resource group.",
}, },
"rg_id": { "rg_id": {
Type: schema.TypeInt, Type: schema.TypeInt,
Required: true, Required: true,
// ForceNew: true, // ForceNew: true,
ValidateFunc: validation.IntAtLeast(1), ValidateFunc: validation.IntAtLeast(1),
Description: "ID of the resource group where this compute should be deployed.", Description: "ID of the resource group where this compute should be deployed.",
}, },
"driver": { "driver": {
Type: schema.TypeString, Type: schema.TypeString,
Required: true, Required: true,
// ForceNew: true, // ForceNew: true,
StateFunc: statefuncs.StateFuncToUpper, StateFunc: statefuncs.StateFuncToUpper,
ValidateFunc: validation.StringInSlice([]string{"SVA_KVM_X86", "KVM_X86", "KVM_PPC"}, false), // observe case while validating ValidateFunc: validation.StringInSlice([]string{"SVA_KVM_X86", "KVM_X86", "KVM_PPC"}, false), // observe case while validating

@ -44,6 +44,7 @@ import (
func dataSourceAccountRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceAccountRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
acc, err := utilityAccountCheckPresence(ctx, d, m) acc, err := utilityAccountCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -67,174 +68,3 @@ func DataSourceAccount() *schema.Resource {
Schema: dataSourceAccountSchemaMake(), Schema: dataSourceAccountSchemaMake(),
} }
} }
func dataSourceAccountSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"account_id": {
Type: schema.TypeInt,
Required: true,
},
"dc_location": {
Type: schema.TypeString,
Computed: true,
},
"ckey": {
Type: schema.TypeString,
Computed: true,
},
"acl": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"explicit": {
Type: schema.TypeBool,
Computed: true,
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
"right": {
Type: schema.TypeString,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"type": {
Type: schema.TypeString,
Computed: true,
},
"user_group_id": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"company": {
Type: schema.TypeString,
Computed: true,
},
"companyurl": {
Type: schema.TypeString,
Computed: true,
},
"cpu_allocation_parameter": {
Type: schema.TypeString,
Computed: true,
},
"cpu_allocation_ratio": {
Type: schema.TypeFloat,
Computed: true,
},
"created_by": {
Type: schema.TypeString,
Computed: true,
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
},
"deactivation_time": {
Type: schema.TypeInt,
Computed: true,
},
"deleted_by": {
Type: schema.TypeString,
Computed: true,
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
},
"displayname": {
Type: schema.TypeString,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"account_name": {
Type: schema.TypeString,
Computed: true,
},
"resource_limits": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cu_c": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_d": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_dm": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_i": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_m": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_np": {
Type: schema.TypeFloat,
Computed: true,
},
"gpu_units": {
Type: schema.TypeFloat,
Computed: true,
},
},
},
},
"resource_types": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"send_access_emails": {
Type: schema.TypeBool,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"uniq_pools": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"updated_time": {
Type: schema.TypeInt,
Computed: true,
},
"version": {
Type: schema.TypeInt,
Computed: true,
},
"vins": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
}
}

@ -43,6 +43,7 @@ import (
func dataSourceAccountAuditsListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceAccountAuditsListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountAuditsList, err := utilityAccountAuditsListCheckPresence(ctx, d, m) accountAuditsList, err := utilityAccountAuditsListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -53,46 +54,6 @@ func dataSourceAccountAuditsListRead(ctx context.Context, d *schema.ResourceData
return nil return nil
} }
func dataSourceAccountAuditsListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"account_id": {
Type: schema.TypeInt,
Required: true,
Description: "ID of the account",
},
"items": {
Type: schema.TypeList,
Computed: true,
Description: "Search Result",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"call": {
Type: schema.TypeString,
Computed: true,
},
"responsetime": {
Type: schema.TypeFloat,
Computed: true,
},
"statuscode": {
Type: schema.TypeInt,
Computed: true,
},
"timestamp": {
Type: schema.TypeFloat,
Computed: true,
},
"user": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
}
return res
}
func DataSourceAccountAuditsList() *schema.Resource { func DataSourceAccountAuditsList() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,

@ -43,6 +43,7 @@ import (
func dataSourceAccountComputesListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceAccountComputesListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountComputesList, err := utilityAccountComputesListCheckPresence(ctx, d, m) accountComputesList, err := utilityAccountComputesListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -54,161 +55,6 @@ func dataSourceAccountComputesListRead(ctx context.Context, d *schema.ResourceDa
return nil return nil
} }
func dataSourceAccountComputesListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"account_id": {
Type: schema.TypeInt,
Required: true,
Description: "ID of the account",
},
"compute_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Filter by compute ID",
},
"name": {
Type: schema.TypeString,
Optional: true,
Description: "Filter by compute name",
},
"rg_name": {
Type: schema.TypeString,
Optional: true,
Description: "Filter by RG name",
},
"rg_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Filter by RG ID",
},
"tech_status": {
Type: schema.TypeString,
Optional: true,
Description: "Filter by tech. status",
},
"ip_address": {
Type: schema.TypeString,
Optional: true,
Description: "Filter by IP address",
},
"extnet_name": {
Type: schema.TypeString,
Optional: true,
Description: "Filter by extnet name",
},
"extnet_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Filter by extnet ID",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "Page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "Page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Description: "Search Result",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"account_id": {
Type: schema.TypeInt,
Computed: true,
},
"account_name": {
Type: schema.TypeString,
Computed: true,
},
"cpus": {
Type: schema.TypeInt,
Computed: true,
},
"created_by": {
Type: schema.TypeString,
Computed: true,
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
},
"deleted_by": {
Type: schema.TypeString,
Computed: true,
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
},
"compute_id": {
Type: schema.TypeInt,
Computed: true,
},
"compute_name": {
Type: schema.TypeString,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"registered": {
Type: schema.TypeBool,
Computed: true,
},
"rg_id": {
Type: schema.TypeInt,
Computed: true,
},
"rg_name": {
Type: schema.TypeString,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"tech_status": {
Type: schema.TypeString,
Computed: true,
},
"total_disks_size": {
Type: schema.TypeInt,
Computed: true,
},
"updated_by": {
Type: schema.TypeString,
Computed: true,
},
"updated_time": {
Type: schema.TypeInt,
Computed: true,
},
"user_managed": {
Type: schema.TypeBool,
Computed: true,
},
"vins_connected": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
}
return res
}
func DataSourceAccountComputesList() *schema.Resource { func DataSourceAccountComputesList() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,

@ -40,24 +40,10 @@ import (
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
) )
func DataSourceAccountDeletedList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceAccountDeletedListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceAccountListDeletedSchemaMake(),
}
}
func dataSourceAccountDeletedListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceAccountDeletedListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountDeletedList, err := utilityAccountDeletedListCheckPresence(ctx, d, m) accountDeletedList, err := utilityAccountDeletedListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -69,216 +55,17 @@ func dataSourceAccountDeletedListRead(ctx context.Context, d *schema.ResourceDat
return nil return nil
} }
func dataSourceAccountListDeletedSchemaMake() map[string]*schema.Schema { func DataSourceAccountDeletedList() *schema.Resource {
return map[string]*schema.Schema{ return &schema.Resource{
"by_id": { SchemaVersion: 1,
Type: schema.TypeInt,
Optional: true, ReadContext: dataSourceAccountDeletedListRead,
Description: "Filter by ID",
}, Timeouts: &schema.ResourceTimeout{
"name": { Read: &constants.Timeout30s,
Type: schema.TypeString, Default: &constants.Timeout60s,
Optional: true,
Description: "Filter by name",
},
"acl": {
Type: schema.TypeString,
Optional: true,
Description: "Filter by ACL",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "Page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "Page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"dc_location": {
Type: schema.TypeString,
Computed: true,
},
"ckey": {
Type: schema.TypeString,
Computed: true,
},
"meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"acl": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"explicit": {
Type: schema.TypeBool,
Computed: true,
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
"right": {
Type: schema.TypeString,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"type": {
Type: schema.TypeString,
Computed: true,
},
"user_group_id": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"company": {
Type: schema.TypeString,
Computed: true,
},
"companyurl": {
Type: schema.TypeString,
Computed: true,
},
"cpu_allocation_parameter": {
Type: schema.TypeString,
Computed: true,
},
"cpu_allocation_ratio": {
Type: schema.TypeFloat,
Computed: true,
},
"created_by": {
Type: schema.TypeString,
Computed: true,
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
},
"deactivation_time": {
Type: schema.TypeFloat,
Computed: true,
},
"deleted_by": {
Type: schema.TypeString,
Computed: true,
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
},
"displayname": {
Type: schema.TypeString,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"account_id": {
Type: schema.TypeInt,
Computed: true,
},
"account_name": {
Type: schema.TypeString,
Computed: true,
},
"resource_limits": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cu_c": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_d": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_dm": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_i": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_m": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_np": {
Type: schema.TypeFloat,
Computed: true,
},
"gpu_units": {
Type: schema.TypeFloat,
Computed: true,
},
},
},
},
"resource_types": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"send_access_emails": {
Type: schema.TypeBool,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"uniq_pools": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"updated_time": {
Type: schema.TypeInt,
Computed: true,
},
"version": {
Type: schema.TypeInt,
Computed: true,
},
"vins": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
},
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
}, },
Schema: dataSourceAccountListDeletedSchemaMake(),
} }
} }

@ -42,6 +42,7 @@ import (
func dataSourceAccountDisksListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceAccountDisksListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountDisksList, err := utilityAccountDisksListCheckPresence(ctx, d, m) accountDisksList, err := utilityAccountDisksListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -53,88 +54,6 @@ func dataSourceAccountDisksListRead(ctx context.Context, d *schema.ResourceData,
return nil return nil
} }
func dataSourceAccountDisksListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"account_id": {
Type: schema.TypeInt,
Required: true,
Description: "ID of the account",
},
"disk_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Filter by disk ID",
},
"name": {
Type: schema.TypeString,
Optional: true,
Description: "Filter by disk name",
},
"disk_max_size": {
Type: schema.TypeInt,
Optional: true,
Description: "Filter by disk max size",
},
"type": {
Type: schema.TypeString,
Optional: true,
Description: "Filter by disk type",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "Page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "Page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Description: "Search Result",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"disk_id": {
Type: schema.TypeInt,
Computed: true,
},
"disk_name": {
Type: schema.TypeString,
Computed: true,
},
"pool_name": {
Type: schema.TypeString,
Computed: true,
},
"sep_id": {
Type: schema.TypeInt,
Computed: true,
},
"shareable": {
Type: schema.TypeBool,
Computed: true,
},
"size_max": {
Type: schema.TypeInt,
Computed: true,
},
"type": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
}
return res
}
func DataSourceAccountDisksList() *schema.Resource { func DataSourceAccountDisksList() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,

@ -43,6 +43,7 @@ import (
func dataSourceAccountFlipGroupsListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceAccountFlipGroupsListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountFlipGroupsList, err := utilityAccountFlipGroupsListCheckPresence(ctx, d, m) accountFlipGroupsList, err := utilityAccountFlipGroupsListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -54,154 +55,6 @@ func dataSourceAccountFlipGroupsListRead(ctx context.Context, d *schema.Resource
return nil return nil
} }
func dataSourceAccountFlipGroupsListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"account_id": {
Type: schema.TypeInt,
Required: true,
Description: "ID of the account",
},
"name": {
Type: schema.TypeString,
Optional: true,
Description: "Filter by name",
},
"vins_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Filter by ViNS ID",
},
"vins_name": {
Type: schema.TypeString,
Optional: true,
Description: "Filter by ViNS name",
},
"extnet_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Filter by extnet ID",
},
"by_ip": {
Type: schema.TypeString,
Optional: true,
Description: "Filter by IP",
},
"flipgroup_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Filter by flipgroup ID",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "Page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "Page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Description: "Search Result",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"account_id": {
Type: schema.TypeInt,
Computed: true,
},
"client_type": {
Type: schema.TypeString,
Computed: true,
},
"conn_type": {
Type: schema.TypeString,
Computed: true,
},
"created_by": {
Type: schema.TypeString,
Computed: true,
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
},
"default_gw": {
Type: schema.TypeString,
Computed: true,
},
"deleted_by": {
Type: schema.TypeString,
Computed: true,
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
},
"desc": {
Type: schema.TypeString,
Computed: true,
},
"gid": {
Type: schema.TypeInt,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"fg_id": {
Type: schema.TypeInt,
Computed: true,
},
"ip": {
Type: schema.TypeString,
Computed: true,
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
},
"fg_name": {
Type: schema.TypeString,
Computed: true,
},
"net_id": {
Type: schema.TypeInt,
Computed: true,
},
"net_type": {
Type: schema.TypeString,
Computed: true,
},
"netmask": {
Type: schema.TypeInt,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"updated_by": {
Type: schema.TypeString,
Computed: true,
},
"updated_time": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
}
return res
}
func DataSourceAccountFlipGroupsList() *schema.Resource { func DataSourceAccountFlipGroupsList() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,

@ -44,6 +44,7 @@ import (
func dataSourceAccountResourceConsumptionGetRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceAccountResourceConsumptionGetRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountResourceConsumptionRec, err := utilityAccountResourceConsumptionGetCheckPresence(ctx, d, m) accountResourceConsumptionRec, err := utilityAccountResourceConsumptionGetCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -53,173 +54,6 @@ func dataSourceAccountResourceConsumptionGetRead(ctx context.Context, d *schema.
return nil return nil
} }
func dataSourceAccountResourceConsumptionGetSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"account_id": {
Type: schema.TypeInt,
Required: true,
},
"consumed": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disksize": {
Type: schema.TypeFloat,
Computed: true,
},
"disksizemax": {
Type: schema.TypeFloat,
Computed: true,
},
"extips": {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeFloat,
Computed: true,
},
},
},
},
},
},
},
"reserved": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disksize": {
Type: schema.TypeFloat,
Computed: true,
},
"disksizemax": {
Type: schema.TypeFloat,
Computed: true,
},
"extips": {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeFloat,
Computed: true,
},
},
},
},
},
},
},
"resource_limits": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cu_c": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_d": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_dm": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_i": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_m": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_np": {
Type: schema.TypeFloat,
Computed: true,
},
"gpu_units": {
Type: schema.TypeFloat,
Computed: true,
},
},
},
},
}
return res
}
func DataSourceAccountResourceConsumptionGet() *schema.Resource { func DataSourceAccountResourceConsumptionGet() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,

@ -41,24 +41,10 @@ import (
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
) )
func DataSourceAccountList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceAccountListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceAccountListSchemaMake(),
}
}
func dataSourceAccountListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceAccountListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountList, err := utilityAccountListCheckPresence(ctx, d, m) accountList, err := utilityAccountListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -70,223 +56,17 @@ func dataSourceAccountListRead(ctx context.Context, d *schema.ResourceData, m in
return nil return nil
} }
func dataSourceAccountListSchemaMake() map[string]*schema.Schema { func DataSourceAccountList() *schema.Resource {
res := map[string]*schema.Schema{ return &schema.Resource{
"by_id": { SchemaVersion: 1,
Type: schema.TypeInt,
Optional: true, ReadContext: dataSourceAccountListRead,
Description: "Filter by ID",
}, Timeouts: &schema.ResourceTimeout{
"name": { Read: &constants.Timeout30s,
Type: schema.TypeString, Default: &constants.Timeout60s,
Optional: true,
Description: "Filter by name",
},
"acl": {
Type: schema.TypeString,
Optional: true,
Description: "Filter by ACL",
},
"status": {
Type: schema.TypeString,
Optional: true,
Description: "Filter by status",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "Page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "Page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"dc_location": {
Type: schema.TypeString,
Computed: true,
},
"ckey": {
Type: schema.TypeString,
Computed: true,
},
"meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"acl": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"explicit": {
Type: schema.TypeBool,
Computed: true,
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
"right": {
Type: schema.TypeString,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"type": {
Type: schema.TypeString,
Computed: true,
},
"user_group_id": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"company": {
Type: schema.TypeString,
Computed: true,
},
"companyurl": {
Type: schema.TypeString,
Computed: true,
},
"cpu_allocation_parameter": {
Type: schema.TypeString,
Computed: true,
},
"cpu_allocation_ratio": {
Type: schema.TypeFloat,
Computed: true,
},
"created_by": {
Type: schema.TypeString,
Computed: true,
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
},
"deactivation_time": {
Type: schema.TypeFloat,
Computed: true,
},
"deleted_by": {
Type: schema.TypeString,
Computed: true,
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
},
"displayname": {
Type: schema.TypeString,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"account_id": {
Type: schema.TypeInt,
Computed: true,
},
"account_name": {
Type: schema.TypeString,
Computed: true,
},
"resource_limits": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cu_c": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_d": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_dm": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_i": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_m": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_np": {
Type: schema.TypeFloat,
Computed: true,
},
"gpu_units": {
Type: schema.TypeFloat,
Computed: true,
},
},
},
},
"resource_types": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"send_access_emails": {
Type: schema.TypeBool,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"uniq_pools": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"updated_time": {
Type: schema.TypeInt,
Computed: true,
},
"version": {
Type: schema.TypeInt,
Computed: true,
},
"vins": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
},
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
}, },
Schema: dataSourceAccountListSchemaMake(),
} }
return res
} }

@ -44,6 +44,7 @@ import (
func dataSourceAccountResourceConsumptionListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceAccountResourceConsumptionListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountResourceConsumptionList, err := utilityAccountResourceConsumptionListCheckPresence(ctx, m) accountResourceConsumptionList, err := utilityAccountResourceConsumptionListCheckPresence(ctx, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -54,148 +55,6 @@ func dataSourceAccountResourceConsumptionListRead(ctx context.Context, d *schema
return nil return nil
} }
func dataSourceAccountResourceConsumptionListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"account_id": {
Type: schema.TypeInt,
Computed: true,
},
"consumed": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disksize": {
Type: schema.TypeFloat,
Computed: true,
},
"disksizemax": {
Type: schema.TypeFloat,
Computed: true,
},
"extips": {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeFloat,
Computed: true,
},
},
},
},
},
},
},
"reserved": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disksize": {
Type: schema.TypeFloat,
Computed: true,
},
"disksizemax": {
Type: schema.TypeFloat,
Computed: true,
},
"extips": {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeFloat,
Computed: true,
},
},
},
},
},
},
},
},
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
}
return res
}
func DataSourceAccountResourceConsumptionList() *schema.Resource { func DataSourceAccountResourceConsumptionList() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,

@ -40,24 +40,10 @@ import (
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
) )
func DataSourceAccountRGList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceAccountRGListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceAccountRGListSchemaMake(),
}
}
func dataSourceAccountRGListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceAccountRGListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountRGList, err := utilityAccountRGListCheckPresence(ctx, d, m) accountRGList, err := utilityAccountRGListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -69,294 +55,17 @@ func dataSourceAccountRGListRead(ctx context.Context, d *schema.ResourceData, m
return nil return nil
} }
func dataSourceAccountRGListSchemaMake() map[string]*schema.Schema { func DataSourceAccountRGList() *schema.Resource {
res := map[string]*schema.Schema{ return &schema.Resource{
"account_id": { SchemaVersion: 1,
Type: schema.TypeInt,
Required: true,
Description: "ID of the account",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "Page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "Page size",
},
"rg_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Filter by RG ID",
},
"name": {
Type: schema.TypeString,
Optional: true,
Description: "Filter by name",
},
"vins_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Filter by ViNS ID",
},
"vm_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Filter by VM ID",
},
"status": { ReadContext: dataSourceAccountRGListRead,
Type: schema.TypeString,
Optional: true,
Description: "Filter by status",
},
"items": {
Type: schema.TypeList,
Computed: true,
Description: "Search Result",
Elem: &schema.Resource{
Schema: dataSourceAccountRGSchemaMake(),
},
},
"entry_count": {
Type: schema.TypeInt,
Optional: true,
},
}
return res
}
func dataSourceAccountRGSchemaMake() map[string]*schema.Schema { Timeouts: &schema.ResourceTimeout{
return map[string]*schema.Schema{ Read: &constants.Timeout30s,
"computes": { Default: &constants.Timeout60s,
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"started": {
Type: schema.TypeInt,
Computed: true,
},
"stopped": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"resources": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"consumed": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disksize": {
Type: schema.TypeFloat,
Computed: true,
},
"disksizemax": {
Type: schema.TypeInt,
Computed: true,
},
"extips": {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeFloat,
Computed: true,
},
},
},
},
},
},
},
"limits": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disksize": {
Type: schema.TypeInt,
Computed: true,
},
"disksizemax": {
Type: schema.TypeInt,
Computed: true,
},
"extips": {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"reserved": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disksize": {
Type: schema.TypeInt,
Computed: true,
},
"disksizemax": {
Type: schema.TypeInt,
Computed: true,
},
"extips": {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeFloat,
Computed: true,
},
},
},
},
},
},
},
},
},
},
"created_by": {
Type: schema.TypeString,
Computed: true,
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
},
"deleted_by": {
Type: schema.TypeString,
Computed: true,
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
},
"rg_id": {
Type: schema.TypeInt,
Computed: true,
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
},
"rg_name": {
Type: schema.TypeString,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"updated_by": {
Type: schema.TypeString,
Computed: true,
},
"updated_time": {
Type: schema.TypeInt,
Computed: true,
},
"vinses": {
Type: schema.TypeInt,
Computed: true,
}, },
Schema: dataSourceAccountRGListSchemaMake(),
} }
} }

@ -43,6 +43,7 @@ import (
func dataSourceAccountVinsListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceAccountVinsListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountVinsList, err := utilityAccountVinsListCheckPresence(ctx, d, m) accountVinsList, err := utilityAccountVinsListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -54,128 +55,6 @@ func dataSourceAccountVinsListRead(ctx context.Context, d *schema.ResourceData,
return nil return nil
} }
func dataSourceAccountVinsListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"account_id": {
Type: schema.TypeInt,
Required: true,
Description: "ID of the account",
},
"vins_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Filter by ViNS ID",
},
"name": {
Type: schema.TypeString,
Optional: true,
Description: "Filter by name",
},
"rg_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Filter by RG ID",
},
"ext_ip": {
Type: schema.TypeString,
Optional: true,
Description: "Filter by external IP",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "Page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "Page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Description: "Search Result",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"account_id": {
Type: schema.TypeInt,
Computed: true,
},
"account_name": {
Type: schema.TypeString,
Computed: true,
},
"computes": {
Type: schema.TypeInt,
Computed: true,
},
"created_by": {
Type: schema.TypeString,
Computed: true,
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
},
"deleted_by": {
Type: schema.TypeString,
Computed: true,
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
},
"external_ip": {
Type: schema.TypeString,
Computed: true,
},
"vin_id": {
Type: schema.TypeInt,
Computed: true,
},
"vin_name": {
Type: schema.TypeString,
Computed: true,
},
"network": {
Type: schema.TypeString,
Computed: true,
},
"pri_vnf_dev_id": {
Type: schema.TypeInt,
Computed: true,
},
"rg_id": {
Type: schema.TypeInt,
Computed: true,
},
"rg_name": {
Type: schema.TypeString,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"updated_by": {
Type: schema.TypeString,
Computed: true,
},
"updated_time": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
}
return res
}
func DataSourceAccountVinsList() *schema.Resource { func DataSourceAccountVinsList() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,

@ -62,7 +62,7 @@ func flattenDataAccount(d *schema.ResourceData, acc *account.RecordAccount) {
} }
func flattenAccountRGList(argl *account.ListRG) []map[string]interface{} { func flattenAccountRGList(argl *account.ListRG) []map[string]interface{} {
res := make([]map[string]interface{}, 0) res := make([]map[string]interface{}, 0, len(argl.Data))
for _, arg := range argl.Data { for _, arg := range argl.Data {
temp := map[string]interface{}{ temp := map[string]interface{}{
"computes": flattenAccRGComputes(arg.Computes), "computes": flattenAccRGComputes(arg.Computes),
@ -139,7 +139,7 @@ func flattenAccResource(r account.Resource) []map[string]interface{} {
} }
func flattenAccAcl(acls []account.ACL) []map[string]interface{} { func flattenAccAcl(acls []account.ACL) []map[string]interface{} {
res := make([]map[string]interface{}, 0) res := make([]map[string]interface{}, 0, len(acls))
for _, acls := range acls { for _, acls := range acls {
temp := map[string]interface{}{ temp := map[string]interface{}{
"explicit": acls.Explicit, "explicit": acls.Explicit,
@ -187,7 +187,7 @@ func flattenRgAcl(rgAcls []account.ACL) []map[string]interface{} {
} }
func flattenListDeleted(al *account.ListAccounts) []map[string]interface{} { func flattenListDeleted(al *account.ListAccounts) []map[string]interface{} {
res := make([]map[string]interface{}, 0) res := make([]map[string]interface{}, 0, len(al.Data))
for _, acc := range al.Data { for _, acc := range al.Data {
temp := map[string]interface{}{ temp := map[string]interface{}{
"dc_location": acc.DCLocation, "dc_location": acc.DCLocation,
@ -222,7 +222,7 @@ func flattenListDeleted(al *account.ListAccounts) []map[string]interface{} {
} }
func flattenAccountList(al *account.ListAccounts) []map[string]interface{} { func flattenAccountList(al *account.ListAccounts) []map[string]interface{} {
res := make([]map[string]interface{}, 0) res := make([]map[string]interface{}, 0, len(al.Data))
for _, acc := range al.Data { for _, acc := range al.Data {
temp := map[string]interface{}{ temp := map[string]interface{}{
"dc_location": acc.DCLocation, "dc_location": acc.DCLocation,
@ -257,7 +257,7 @@ func flattenAccountList(al *account.ListAccounts) []map[string]interface{} {
} }
func flattenAccountAuditsList(aal account.ListAudits) []map[string]interface{} { func flattenAccountAuditsList(aal account.ListAudits) []map[string]interface{} {
res := make([]map[string]interface{}, 0) res := make([]map[string]interface{}, 0, len(aal))
for _, aa := range aal { for _, aa := range aal {
temp := map[string]interface{}{ temp := map[string]interface{}{
"call": aa.Call, "call": aa.Call,
@ -272,7 +272,7 @@ func flattenAccountAuditsList(aal account.ListAudits) []map[string]interface{} {
} }
func flattenAccountComputesList(acl *account.ListComputes) []map[string]interface{} { func flattenAccountComputesList(acl *account.ListComputes) []map[string]interface{} {
res := make([]map[string]interface{}, 0) res := make([]map[string]interface{}, 0, len(acl.Data))
for _, acc := range acl.Data { for _, acc := range acl.Data {
temp := map[string]interface{}{ temp := map[string]interface{}{
"account_id": acc.AccountID, "account_id": acc.AccountID,
@ -302,7 +302,7 @@ func flattenAccountComputesList(acl *account.ListComputes) []map[string]interfac
} }
func flattenAccountDisksList(adl *account.ListDisks) []map[string]interface{} { func flattenAccountDisksList(adl *account.ListDisks) []map[string]interface{} {
res := make([]map[string]interface{}, 0) res := make([]map[string]interface{}, 0, len(adl.Data))
for _, ad := range adl.Data { for _, ad := range adl.Data {
temp := map[string]interface{}{ temp := map[string]interface{}{
"disk_id": ad.ID, "disk_id": ad.ID,
@ -319,7 +319,7 @@ func flattenAccountDisksList(adl *account.ListDisks) []map[string]interface{} {
} }
func flattenAccountFlipGroupsList(afgl *account.ListFLIPGroups) []map[string]interface{} { func flattenAccountFlipGroupsList(afgl *account.ListFLIPGroups) []map[string]interface{} {
res := make([]map[string]interface{}, 0) res := make([]map[string]interface{}, 0, len(afgl.Data))
for _, afg := range afgl.Data { for _, afg := range afgl.Data {
temp := map[string]interface{}{ temp := map[string]interface{}{
"account_id": afg.AccountID, "account_id": afg.AccountID,
@ -350,7 +350,7 @@ func flattenAccountFlipGroupsList(afgl *account.ListFLIPGroups) []map[string]int
} }
func flattenAccountVinsList(avl *account.ListVINS) []map[string]interface{} { func flattenAccountVinsList(avl *account.ListVINS) []map[string]interface{} {
res := make([]map[string]interface{}, 0) res := make([]map[string]interface{}, 0, len(avl.Data))
for _, av := range avl.Data { for _, av := range avl.Data {
temp := map[string]interface{}{ temp := map[string]interface{}{
"account_id": av.AccountID, "account_id": av.AccountID,
@ -384,7 +384,7 @@ func flattenResourceConsumption(d *schema.ResourceData, acc *account.RecordResou
} }
func flattenAccountSeps(seps map[string]map[string]account.DiskUsage) []map[string]interface{} { func flattenAccountSeps(seps map[string]map[string]account.DiskUsage) []map[string]interface{} {
res := make([]map[string]interface{}, 0) res := make([]map[string]interface{}, 0, len(seps))
for sepKey, sepVal := range seps { for sepKey, sepVal := range seps {
for dataKey, dataVal := range sepVal { for dataKey, dataVal := range sepVal {
temp := map[string]interface{}{ temp := map[string]interface{}{

@ -34,7 +34,6 @@ package account
import ( import (
"context" "context"
"strconv" "strconv"
"strings"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
@ -51,10 +50,10 @@ func resourceAccountCreate(ctx context.Context, d *schema.ResourceData, m interf
c := m.(*controller.ControllerCfg) c := m.(*controller.ControllerCfg)
req := account.CreateRequest{} req := account.CreateRequest{
Name: d.Get("account_name").(string),
req.Name = d.Get("account_name").(string) Username: d.Get("username").(string),
req.Username = d.Get("username").(string) }
if emailaddress, ok := d.GetOk("emailaddress"); ok { if emailaddress, ok := d.GetOk("emailaddress"); ok {
req.EmailAddress = emailaddress.(string) req.EmailAddress = emailaddress.(string)
@ -192,10 +191,7 @@ func resourceAccountCreate(ctx context.Context, d *schema.ResourceData, m interf
} }
} }
diags := resourceAccountRead(ctx, d, m) return append(resourceAccountRead(ctx, d, m), w.Get()...)
diags = append(diags, w.Get()...)
return diags
} }
func resourceAccountRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceAccountRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
@ -217,6 +213,7 @@ func resourceAccountDelete(ctx context.Context, d *schema.ResourceData, m interf
accountData, err := utilityAccountCheckPresence(ctx, d, m) accountData, err := utilityAccountCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -258,15 +255,21 @@ func resourceAccountUpdate(ctx context.Context, d *schema.ResourceData, m interf
case status.Destroying: case status.Destroying:
return diag.Errorf("The account is in progress with status: %s", acc.Status) return diag.Errorf("The account is in progress with status: %s", acc.Status)
case status.Deleted: case status.Deleted:
_, err := c.CloudBroker().Account().Restore(ctx, account.RestoreRequest{ if d.Get("restore").(bool) {
AccountID: accountId, _, err := c.CloudBroker().Account().Restore(ctx, account.RestoreRequest{
}) AccountID: accountId,
})
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
}
if _, ok := d.GetOk("enable"); ok {
if err := utilityAccountEnableUpdate(ctx, d, m, acc); err != nil {
return diag.FromErr(err)
}
}
hasChanged = true
} }
hasChanged = true
case status.Disabled: case status.Disabled:
log.Debugf("The account is in status: %s, troubles may occur with update. Please, enable account first.", acc.Status) log.Debugf("The account is in status: %s, troubles may occur with update. Please, enable account first.", acc.Status)
case status.Confirmed: case status.Confirmed:
@ -281,240 +284,38 @@ func resourceAccountUpdate(ctx context.Context, d *schema.ResourceData, m interf
} }
if d.HasChange("enable") { if d.HasChange("enable") {
enable := d.Get("enable").(bool) if err := utilityAccountEnableUpdate(ctx, d, m, acc); err != nil {
return diag.FromErr(err)
if enable && acc.Status == status.Disabled {
_, err := c.CloudBroker().Account().Enable(ctx, account.EnableRequest{
AccountID: accountId,
})
if err != nil {
return diag.FromErr(err)
}
} else if !enable && acc.Status == status.Enabled {
_, err := c.CloudBroker().Account().Disable(ctx, account.DisableRequest{
AccountID: accountId,
})
if err != nil {
return diag.FromErr(err)
}
}
}
req := account.UpdateRequest{
AccountID: accountId,
}
updated := false
if d.HasChange("account_name") {
req.Name = d.Get("account_name").(string)
updated = true
}
if d.HasChange("send_access_emails") {
req.SendAccessEmails = d.Get("send_access_emails").(bool)
updated = true
}
if d.HasChange("uniq_pools") {
uniq_pools := d.Get("uniq_pools").([]interface{})
for _, pool := range uniq_pools {
req.UniqPools = append(req.UniqPools, pool.(string))
}
updated = true
}
if d.HasChange("resource_limits") {
resLimit := d.Get("resource_limits").([]interface{})[0]
resLimitConv := resLimit.(map[string]interface{})
if resLimitConv["cu_m"] != nil {
maxMemCap := int(resLimitConv["cu_m"].(float64))
if maxMemCap == 0 {
req.MaxMemoryCapacity = -1
} else {
req.MaxMemoryCapacity = int64(maxMemCap)
}
}
if resLimitConv["cu_dm"] != nil {
maxDiskCap := int(resLimitConv["cu_dm"].(float64))
if maxDiskCap == 0 {
req.MaxVDiskCapacity = -1
} else {
req.MaxVDiskCapacity = int64(maxDiskCap)
}
}
if resLimitConv["cu_c"] != nil {
maxCPUCap := int(resLimitConv["cu_c"].(float64))
if maxCPUCap == 0 {
req.MaxCPUCapacity = -1
} else {
req.MaxCPUCapacity = int64(maxCPUCap)
}
}
if resLimitConv["cu_i"] != nil {
maxNumPublicIP := int(resLimitConv["cu_i"].(float64))
if maxNumPublicIP == 0 {
req.MaxNumPublicIP = -1
} else {
req.MaxNumPublicIP = int64(maxNumPublicIP)
}
}
if resLimitConv["cu_np"] != nil {
maxNP := int(resLimitConv["cu_np"].(float64))
if maxNP == 0 {
req.MaxNetworkPeerTransfer = -1
} else {
req.MaxNetworkPeerTransfer = int64(maxNP)
}
}
if resLimitConv["gpu_units"] != nil {
gpuUnits := int(resLimitConv["gpu_units"].(float64))
if gpuUnits == 0 {
req.GPUUnits = -1
} else {
req.GPUUnits = int64(gpuUnits)
}
} }
updated = true
} }
if updated { if d.HasChanges("account_name", "send_access_emails", "uniq_pools", "resource_limits") {
_, err := c.CloudBroker().Account().Update(ctx, req) if err := utilityAccountUpdate(ctx, d, m); err != nil {
if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
} }
if d.HasChange("cpu_allocation_parameter") { if d.HasChange("cpu_allocation_parameter") {
cpuAllocationParameter := d.Get("cpu_allocation_parameter").(string) if err := utilityAccountCPUParameterUpdate(ctx, d, m); err != nil {
_, err := c.CloudBroker().Account().SetCPUAllocationParameter(ctx, account.SetCPUAllocationParameterRequest{
AccountID: acc.ID,
StrictLoose: cpuAllocationParameter,
})
if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
} }
if d.HasChange("cpu_allocation_ratio") { if d.HasChange("cpu_allocation_ratio") {
cpuAllocacationRatio := d.Get("cpu_allocation_ratio").(float64) if err := utilityAccountCPURatioUpdate(ctx, d, m); err != nil {
_, err := c.CloudBroker().Account().SetCPUAllocationRatio(ctx, account.SetCPUAllocationRatioRequest{
AccountID: acc.ID,
Ratio: cpuAllocacationRatio,
})
if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
} }
if d.HasChange("users") { if d.HasChange("users") {
deletedUsers := make([]interface{}, 0) if err := utilityAccountUsersUpdate(ctx, d, m, acc); err != nil {
addedUsers := make([]interface{}, 0) return diag.FromErr(err)
updatedUsers := make([]interface{}, 0)
old, new := d.GetChange("users")
oldConv := old.([]interface{})
newConv := new.([]interface{})
for _, el := range oldConv {
if !isContainsUser(newConv, el) {
deletedUsers = append(deletedUsers, el)
}
}
for _, el := range newConv {
if !isContainsUser(oldConv, el) {
duplicate := false
for _, user := range acc.ACL {
if user.UserGroupID == el.(map[string]interface{})["user_id"].(string) {
duplicate = true
}
}
if !duplicate {
addedUsers = append(addedUsers, el)
} else if isChangedUser(oldConv, el) {
updatedUsers = append(updatedUsers, el)
}
}
}
for _, user := range deletedUsers {
userConv := user.(map[string]interface{})
_, err := c.CloudBroker().Account().DeleteUser(ctx, account.DeleteUserRequest{
AccountID: accountId,
UserName: userConv["user_id"].(string),
RecursiveDelete: userConv["recursive_delete"].(bool),
})
if err != nil {
return diag.FromErr(err)
}
}
for _, user := range addedUsers {
userConv := user.(map[string]interface{})
_, err := c.CloudBroker().Account().AddUser(ctx, account.AddUserRequest{
AccountID: accountId,
Username: userConv["user_id"].(string),
AccessType: strings.ToUpper(userConv["access_type"].(string)),
})
if err != nil {
return diag.FromErr(err)
}
}
for _, user := range updatedUsers {
userConv := user.(map[string]interface{})
_, err := c.CloudBroker().Account().UpdateUser(ctx, account.UpdateUserRequest{
AccountID: accountId,
UserID: userConv["user_id"].(string),
AccessType: strings.ToUpper(userConv["access_type"].(string)),
})
if err != nil {
return diag.FromErr(err)
}
} }
} }
return resourceAccountRead(ctx, d, m) return resourceAccountRead(ctx, d, m)
} }
func isContainsUser(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})
elConv := el.(map[string]interface{})
if elOldConv["user_id"].(string) == elConv["user_id"].(string) {
return true
}
}
return false
}
func isChangedUser(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})
elConv := el.(map[string]interface{})
if elOldConv["user_id"].(string) == elConv["user_id"].(string) &&
(!strings.EqualFold(elOldConv["access_type"].(string), elConv["access_type"].(string)) ||
elOldConv["recursive_delete"].(bool) != elConv["recursive_delete"].(bool)) {
return true
}
}
return false
}
func ResourceAccount() *schema.Resource { func ResourceAccount() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,

@ -34,10 +34,12 @@ package account
import ( import (
"context" "context"
"strconv" "strconv"
"strings"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
) )
@ -63,3 +65,252 @@ func utilityAccountCheckPresence(ctx context.Context, d *schema.ResourceData, m
return account, nil return account, nil
} }
func utilityAccountEnableUpdate(ctx context.Context, d *schema.ResourceData, m interface{}, acc *account.RecordAccount) error {
c := m.(*controller.ControllerCfg)
enable := d.Get("enable").(bool)
if enable && acc.Status == status.Disabled {
_, err := c.CloudBroker().Account().Enable(ctx, account.EnableRequest{
AccountID: acc.ID,
})
if err != nil {
return err
}
} else if !enable && acc.Status == status.Enabled {
_, err := c.CloudBroker().Account().Disable(ctx, account.DisableRequest{
AccountID: acc.ID,
})
if err != nil {
return err
}
}
return nil
}
func utilityAccountCPUParameterUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
accountId, _ := strconv.ParseUint(d.Id(), 10, 64)
cpuAllocationParameter := d.Get("cpu_allocation_parameter").(string)
_, err := c.CloudBroker().Account().SetCPUAllocationParameter(ctx, account.SetCPUAllocationParameterRequest{
AccountID: accountId,
StrictLoose: cpuAllocationParameter,
})
if err != nil {
return err
}
return nil
}
func utilityAccountCPURatioUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
accountId, _ := strconv.ParseUint(d.Id(), 10, 64)
cpuAllocacationRatio := d.Get("cpu_allocation_ratio").(float64)
_, err := c.CloudBroker().Account().SetCPUAllocationRatio(ctx, account.SetCPUAllocationRatioRequest{
AccountID: accountId,
Ratio: cpuAllocacationRatio,
})
if err != nil {
return err
}
return nil
}
func utilityAccountUsersUpdate(ctx context.Context, d *schema.ResourceData, m interface{}, acc *account.RecordAccount) error {
c := m.(*controller.ControllerCfg)
deletedUsers := make([]interface{}, 0)
addedUsers := make([]interface{}, 0)
updatedUsers := make([]interface{}, 0)
old, new := d.GetChange("users")
oldConv := old.([]interface{})
newConv := new.([]interface{})
for _, el := range oldConv {
if !isContainsUser(newConv, el) {
deletedUsers = append(deletedUsers, el)
}
}
for _, el := range newConv {
if !isContainsUser(oldConv, el) {
duplicate := false
for _, user := range acc.ACL {
if user.UserGroupID == el.(map[string]interface{})["user_id"].(string) {
duplicate = true
}
}
if !duplicate {
addedUsers = append(addedUsers, el)
} else if isChangedUser(oldConv, el) {
updatedUsers = append(updatedUsers, el)
}
}
}
for _, user := range deletedUsers {
userConv := user.(map[string]interface{})
_, err := c.CloudBroker().Account().DeleteUser(ctx, account.DeleteUserRequest{
AccountID: acc.ID,
UserName: userConv["user_id"].(string),
RecursiveDelete: userConv["recursive_delete"].(bool),
})
if err != nil {
return err
}
}
for _, user := range addedUsers {
userConv := user.(map[string]interface{})
_, err := c.CloudBroker().Account().AddUser(ctx, account.AddUserRequest{
AccountID: acc.ID,
Username: userConv["user_id"].(string),
AccessType: strings.ToUpper(userConv["access_type"].(string)),
})
if err != nil {
return err
}
}
for _, user := range updatedUsers {
userConv := user.(map[string]interface{})
_, err := c.CloudBroker().Account().UpdateUser(ctx, account.UpdateUserRequest{
AccountID: acc.ID,
UserID: userConv["user_id"].(string),
AccessType: strings.ToUpper(userConv["access_type"].(string)),
})
if err != nil {
return err
}
}
return nil
}
func utilityAccountUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
accountId, _ := strconv.ParseUint(d.Id(), 10, 64)
req := account.UpdateRequest{
AccountID: accountId,
}
if d.HasChange("account_name") {
req.Name = d.Get("account_name").(string)
}
if d.HasChange("send_access_emails") {
req.SendAccessEmails = d.Get("send_access_emails").(bool)
}
if d.HasChange("uniq_pools") {
uniq_pools := d.Get("uniq_pools").([]interface{})
for _, pool := range uniq_pools {
req.UniqPools = append(req.UniqPools, pool.(string))
}
}
if d.HasChange("resource_limits") {
resLimit := d.Get("resource_limits").([]interface{})[0]
resLimitConv := resLimit.(map[string]interface{})
if resLimitConv["cu_m"] != nil {
maxMemCap := int(resLimitConv["cu_m"].(float64))
if maxMemCap == 0 {
req.MaxMemoryCapacity = -1
} else {
req.MaxMemoryCapacity = int64(maxMemCap)
}
}
if resLimitConv["cu_dm"] != nil {
maxDiskCap := int(resLimitConv["cu_dm"].(float64))
if maxDiskCap == 0 {
req.MaxVDiskCapacity = -1
} else {
req.MaxVDiskCapacity = int64(maxDiskCap)
}
}
if resLimitConv["cu_c"] != nil {
maxCPUCap := int(resLimitConv["cu_c"].(float64))
if maxCPUCap == 0 {
req.MaxCPUCapacity = -1
} else {
req.MaxCPUCapacity = int64(maxCPUCap)
}
}
if resLimitConv["cu_i"] != nil {
maxNumPublicIP := int(resLimitConv["cu_i"].(float64))
if maxNumPublicIP == 0 {
req.MaxNumPublicIP = -1
} else {
req.MaxNumPublicIP = int64(maxNumPublicIP)
}
}
if resLimitConv["cu_np"] != nil {
maxNP := int(resLimitConv["cu_np"].(float64))
if maxNP == 0 {
req.MaxNetworkPeerTransfer = -1
} else {
req.MaxNetworkPeerTransfer = int64(maxNP)
}
}
if resLimitConv["gpu_units"] != nil {
gpuUnits := int(resLimitConv["gpu_units"].(float64))
if gpuUnits == 0 {
req.GPUUnits = -1
} else {
req.GPUUnits = int64(gpuUnits)
}
}
}
_, err := c.CloudBroker().Account().Update(ctx, req)
if err != nil {
return err
}
return nil
}
func isContainsUser(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})
elConv := el.(map[string]interface{})
if elOldConv["user_id"].(string) == elConv["user_id"].(string) {
return true
}
}
return false
}
func isChangedUser(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})
elConv := el.(map[string]interface{})
if elOldConv["user_id"].(string) == elConv["user_id"].(string) &&
(!strings.EqualFold(elOldConv["access_type"].(string), elConv["access_type"].(string)) ||
elOldConv["recursive_delete"].(bool) != elConv["recursive_delete"].(bool)) {
return true
}
}
return false
}

@ -46,6 +46,7 @@ import (
func dataSourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
disk, err := utilityDiskCheckPresence(ctx, d, m) disk, err := utilityDiskCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -57,299 +58,6 @@ func dataSourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface
return nil return nil
} }
func dataSourceDiskSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"disk_id": {
Type: schema.TypeInt,
Required: true,
},
"account_id": {
Type: schema.TypeInt,
Computed: true,
},
"account_name": {
Type: schema.TypeString,
Computed: true,
},
"acl": {
Type: schema.TypeString,
Computed: true,
},
"boot_partition": {
Type: schema.TypeInt,
Computed: true,
},
"computes": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"compute_id": {
Type: schema.TypeString,
Computed: true,
},
"compute_name": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
},
"desc": {
Type: schema.TypeString,
Computed: true,
},
"destruction_time": {
Type: schema.TypeInt,
Computed: true,
},
"devicename": {
Type: schema.TypeString,
Computed: true,
},
"disk_path": {
Type: schema.TypeString,
Computed: true,
},
"gid": {
Type: schema.TypeInt,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"image_id": {
Type: schema.TypeInt,
Computed: true,
},
"images": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"iotune": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"read_bytes_sec": {
Type: schema.TypeInt,
Computed: true,
},
"read_bytes_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
"read_iops_sec": {
Type: schema.TypeInt,
Computed: true,
},
"read_iops_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
"size_iops_sec": {
Type: schema.TypeInt,
Computed: true,
},
"total_bytes_sec": {
Type: schema.TypeInt,
Computed: true,
},
"total_bytes_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
"total_iops_sec": {
Type: schema.TypeInt,
Computed: true,
},
"total_iops_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
"write_bytes_sec": {
Type: schema.TypeInt,
Computed: true,
},
"write_bytes_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
"write_iops_sec": {
Type: schema.TypeInt,
Computed: true,
},
"write_iops_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"iqn": {
Type: schema.TypeString,
Computed: true,
},
"login": {
Type: schema.TypeString,
Computed: true,
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
},
"disk_name": {
Type: schema.TypeString,
Computed: true,
},
"order": {
Type: schema.TypeInt,
Computed: true,
},
"params": {
Type: schema.TypeString,
Computed: true,
},
"parent_id": {
Type: schema.TypeInt,
Computed: true,
},
"passwd": {
Type: schema.TypeString,
Computed: true,
},
"pci_slot": {
Type: schema.TypeInt,
Computed: true,
},
"pool": {
Type: schema.TypeString,
Computed: true,
},
"present_to": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"purge_attempts": {
Type: schema.TypeInt,
Computed: true,
},
"purge_time": {
Type: schema.TypeInt,
Computed: true,
},
"reality_device_number": {
Type: schema.TypeInt,
Computed: true,
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
},
"res_name": {
Type: schema.TypeString,
Computed: true,
},
"role": {
Type: schema.TypeString,
Computed: true,
},
"sep_id": {
Type: schema.TypeInt,
Computed: true,
},
"sep_type": {
Type: schema.TypeString,
Computed: true,
},
"shareable": {
Type: schema.TypeString,
Computed: true,
},
"size_max": {
Type: schema.TypeInt,
Computed: true,
},
"size_used": {
Type: schema.TypeFloat,
Computed: true,
},
"snapshots": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"guid": {
Type: schema.TypeString,
Computed: true,
},
"label": {
Type: schema.TypeString,
Computed: true,
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
},
"snap_set_guid": {
Type: schema.TypeString,
Computed: true,
},
"snap_set_time": {
Type: schema.TypeInt,
Computed: true,
},
"timestamp": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"tech_status": {
Type: schema.TypeString,
Computed: true,
},
"type": {
Type: schema.TypeString,
Computed: true,
},
"vmid": {
Type: schema.TypeInt,
Computed: true,
},
}
return rets
}
func DataSourceDisk() *schema.Resource { func DataSourceDisk() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,

@ -43,6 +43,7 @@ import (
func dataSourceDiskListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceDiskListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
diskList, err := utilityDiskListCheckPresence(ctx, d, m) diskList, err := utilityDiskListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -54,378 +55,6 @@ func dataSourceDiskListRead(ctx context.Context, d *schema.ResourceData, m inter
return nil return nil
} }
func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"by_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by ID",
},
"name": {
Type: schema.TypeString,
Optional: true,
Description: "Find by name",
},
"account_name": {
Type: schema.TypeString,
Optional: true,
Description: "Find by account name",
},
"disk_max_size": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by max disk size",
},
"status": {
Type: schema.TypeString,
Optional: true,
Description: "Find by status",
},
"shared": {
Type: schema.TypeBool,
Optional: true,
Description: "Find by shared field",
},
"account_id": {
Type: schema.TypeInt,
Optional: true,
Description: "ID of the account the disks belong to",
},
"type": {
Type: schema.TypeString,
Optional: true,
Description: "type of the disks",
},
"sep_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by sep id",
},
"pool": {
Type: schema.TypeString,
Optional: true,
Description: "Find by pool name",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "Page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "Page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"account_id": {
Type: schema.TypeInt,
Computed: true,
},
"account_name": {
Type: schema.TypeString,
Computed: true,
},
"acl": {
Type: schema.TypeString,
Computed: true,
},
"boot_partition": {
Type: schema.TypeInt,
Computed: true,
},
"computes": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"compute_id": {
Type: schema.TypeString,
Computed: true,
},
"compute_name": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
},
"desc": {
Type: schema.TypeString,
Computed: true,
},
"destruction_time": {
Type: schema.TypeInt,
Computed: true,
},
"devicename": {
Type: schema.TypeString,
Computed: true,
},
"disk_path": {
Type: schema.TypeString,
Computed: true,
},
"gid": {
Type: schema.TypeInt,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"disk_id": {
Type: schema.TypeInt,
Computed: true,
},
"image_id": {
Type: schema.TypeInt,
Computed: true,
},
"images": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"iotune": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"read_bytes_sec": {
Type: schema.TypeInt,
Computed: true,
},
"read_bytes_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
"read_iops_sec": {
Type: schema.TypeInt,
Computed: true,
},
"read_iops_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
"size_iops_sec": {
Type: schema.TypeInt,
Computed: true,
},
"total_bytes_sec": {
Type: schema.TypeInt,
Computed: true,
},
"total_bytes_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
"total_iops_sec": {
Type: schema.TypeInt,
Computed: true,
},
"total_iops_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
"write_bytes_sec": {
Type: schema.TypeInt,
Computed: true,
},
"write_bytes_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
"write_iops_sec": {
Type: schema.TypeInt,
Computed: true,
},
"write_iops_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"iqn": {
Type: schema.TypeString,
Computed: true,
},
"login": {
Type: schema.TypeString,
Computed: true,
},
"machine_id": {
Type: schema.TypeInt,
Computed: true,
},
"machine_name": {
Type: schema.TypeString,
Computed: true,
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
},
"disk_name": {
Type: schema.TypeString,
Computed: true,
},
"order": {
Type: schema.TypeInt,
Computed: true,
},
"params": {
Type: schema.TypeString,
Computed: true,
},
"parent_id": {
Type: schema.TypeInt,
Computed: true,
},
"passwd": {
Type: schema.TypeString,
Computed: true,
},
"pci_slot": {
Type: schema.TypeInt,
Computed: true,
},
"pool": {
Type: schema.TypeString,
Computed: true,
},
"present_to": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"purge_attempts": {
Type: schema.TypeInt,
Computed: true,
},
"purge_time": {
Type: schema.TypeInt,
Computed: true,
},
"reality_device_number": {
Type: schema.TypeInt,
Computed: true,
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
},
"res_name": {
Type: schema.TypeString,
Computed: true,
},
"role": {
Type: schema.TypeString,
Computed: true,
},
"sep_id": {
Type: schema.TypeInt,
Computed: true,
},
"sep_type": {
Type: schema.TypeString,
Computed: true,
},
"shareable": {
Type: schema.TypeBool,
Computed: true,
},
"size_max": {
Type: schema.TypeInt,
Computed: true,
},
"size_used": {
Type: schema.TypeFloat,
Computed: true,
},
"snapshots": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"guid": {
Type: schema.TypeString,
Computed: true,
},
"label": {
Type: schema.TypeString,
Computed: true,
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
},
"snap_set_guid": {
Type: schema.TypeString,
Computed: true,
},
"snap_set_time": {
Type: schema.TypeInt,
Computed: true,
},
"timestamp": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"tech_status": {
Type: schema.TypeString,
Computed: true,
},
"type": {
Type: schema.TypeString,
Computed: true,
},
"vmid": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
}
return res
}
func DataSourceDiskList() *schema.Resource { func DataSourceDiskList() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,

@ -12,6 +12,7 @@ import (
func dataSourceDiskListTypesRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceDiskListTypesRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
listTypes, err := utilityDiskListTypesCheckPresence(ctx, d, m) listTypes, err := utilityDiskListTypesCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -23,24 +24,6 @@ func dataSourceDiskListTypesRead(ctx context.Context, d *schema.ResourceData, m
return nil return nil
} }
func dataSourceDiskListTypesSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "The types of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'",
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
}
return res
}
func DataSourceDiskListTypes() *schema.Resource { func DataSourceDiskListTypes() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,

@ -12,6 +12,7 @@ import (
func dataSourceDiskListTypesDetailedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceDiskListTypesDetailedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
listTypesDetailed, err := utilityDiskListTypesDetailedCheckPresence(ctx, d, m) listTypesDetailed, err := utilityDiskListTypesDetailedCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -32,53 +33,6 @@ func DataSourceDiskListTypesDetailed() *schema.Resource {
Default: &constants.Timeout60s, Default: &constants.Timeout60s,
}, },
Schema: map[string]*schema.Schema{ Schema: dataSourceDiskListTypesDetailedSchemaMake(),
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"pools": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Computed: true,
Description: "Pool name",
},
"system": {
Type: schema.TypeString,
Computed: true,
},
"types": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "The types of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'",
},
},
},
},
"sep_id": {
Type: schema.TypeInt,
Computed: true,
Description: "Storage endpoint provider ID to create disk",
},
"sep_name": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
},
} }
} }

@ -45,6 +45,7 @@ import (
func dataSourceDiskListUnattachedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceDiskListUnattachedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
diskListUnattached, err := utilityDiskListUnattachedCheckPresence(ctx, d, m) diskListUnattached, err := utilityDiskListUnattachedCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -70,401 +71,3 @@ func DataSourceDiskListUnattached() *schema.Resource {
Schema: dataSourceDiskListUnattachedSchemaMake(), Schema: dataSourceDiskListUnattachedSchemaMake(),
} }
} }
func dataSourceDiskListUnattachedSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"by_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by ID",
},
"account_name": {
Type: schema.TypeString,
Optional: true,
Description: "Find by account name",
},
"disk_max_size": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by max disk size",
},
"status": {
Type: schema.TypeString,
Optional: true,
Description: "Find by status",
},
"type": {
Type: schema.TypeString,
Optional: true,
Description: "type of the disks",
},
"account_id": {
Type: schema.TypeInt,
Optional: true,
Description: "ID of the account the disks belong to",
},
"sep_id": {
Type: schema.TypeInt,
Optional: true,
Description: "ID of SEP",
},
"pool": {
Type: schema.TypeString,
Optional: true,
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "Page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "Page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"_ckey": {
Type: schema.TypeString,
Computed: true,
Description: "CKey",
},
"_meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "Meta parameters",
},
"account_id": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the account the disks belong to",
},
"account_name": {
Type: schema.TypeString,
Computed: true,
Description: "The name of the subscriber '(account') to whom this disk belongs",
},
"acl": {
Type: schema.TypeString,
Computed: true,
},
"boot_partition": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of disk partitions",
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Created time",
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Deleted time",
},
"desc": {
Type: schema.TypeString,
Computed: true,
Description: "Description of disk",
},
"destruction_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Time of final deletion",
},
"disk_path": {
Type: schema.TypeString,
Computed: true,
Description: "Disk path",
},
"gid": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the grid (platform)",
},
"guid": {
Type: schema.TypeInt,
Computed: true,
Description: "Disk ID on the storage side",
},
"disk_id": {
Type: schema.TypeInt,
Computed: true,
Description: "The unique ID of the subscriber-owner of the disk",
},
"image_id": {
Type: schema.TypeInt,
Computed: true,
Description: "Image ID",
},
"images": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "IDs of images using the disk",
},
"iotune": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"read_bytes_sec": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of bytes to read per second",
},
"read_bytes_sec_max": {
Type: schema.TypeInt,
Computed: true,
Description: "Maximum number of bytes to read",
},
"read_iops_sec": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of io read operations per second",
},
"read_iops_sec_max": {
Type: schema.TypeInt,
Computed: true,
Description: "Maximum number of io read operations",
},
"size_iops_sec": {
Type: schema.TypeInt,
Computed: true,
Description: "Size of io operations",
},
"total_bytes_sec": {
Type: schema.TypeInt,
Computed: true,
Description: "Total size bytes per second",
},
"total_bytes_sec_max": {
Type: schema.TypeInt,
Computed: true,
Description: "Maximum total size of bytes per second",
},
"total_iops_sec": {
Type: schema.TypeInt,
Computed: true,
Description: "Total number of io operations per second",
},
"total_iops_sec_max": {
Type: schema.TypeInt,
Computed: true,
Description: "Maximum total number of io operations per second",
},
"write_bytes_sec": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of bytes to write per second",
},
"write_bytes_sec_max": {
Type: schema.TypeInt,
Computed: true,
Description: "Maximum number of bytes to write per second",
},
"write_iops_sec": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of write operations per second",
},
"write_iops_sec_max": {
Type: schema.TypeInt,
Computed: true,
Description: "Maximum number of write operations per second",
},
},
},
},
"iqn": {
Type: schema.TypeString,
Computed: true,
Description: "Disk IQN",
},
"login": {
Type: schema.TypeString,
Computed: true,
Description: "Login to access the disk",
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
Description: "Milestones",
},
"disk_name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of disk",
},
"order": {
Type: schema.TypeInt,
Computed: true,
Description: "Disk order",
},
"params": {
Type: schema.TypeString,
Computed: true,
Description: "Disk params",
},
"parent_id": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the parent disk",
},
"passwd": {
Type: schema.TypeString,
Computed: true,
Description: "Password to access the disk",
},
"pci_slot": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the pci slot to which the disk is connected",
},
"pool": {
Type: schema.TypeString,
Computed: true,
Description: "Pool for disk location",
},
"present_to": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"purge_attempts": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of deletion attempts",
},
"purge_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Time of the last deletion attempt",
},
"reality_device_number": {
Type: schema.TypeInt,
Computed: true,
Description: "Reality device number",
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
Description: "ID of the reference to the disk",
},
"res_id": {
Type: schema.TypeString,
Computed: true,
Description: "Resource ID",
},
"res_name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the resource",
},
"role": {
Type: schema.TypeString,
Computed: true,
Description: "Disk role",
},
"sep_id": {
Type: schema.TypeInt,
Computed: true,
Description: "Storage endpoint provider ID to create disk",
},
"shareable": {
Type: schema.TypeBool,
Computed: true,
},
"size_max": {
Type: schema.TypeInt,
Computed: true,
Description: "Size in GB",
},
"size_used": {
Type: schema.TypeFloat,
Computed: true,
Description: "Number of used space, in GB",
},
"snapshots": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"guid": {
Type: schema.TypeString,
Computed: true,
Description: "ID of the snapshot",
},
"label": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the snapshot",
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
Description: "Reference to the snapshot",
},
"snap_set_guid": {
Type: schema.TypeString,
Computed: true,
Description: "The set snapshot ID",
},
"snap_set_time": {
Type: schema.TypeInt,
Computed: true,
Description: "The set time of the snapshot",
},
"timestamp": {
Type: schema.TypeInt,
Computed: true,
Description: "Snapshot time",
},
},
},
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "Disk status",
},
"tech_status": {
Type: schema.TypeString,
Computed: true,
Description: "Technical status of the disk",
},
"type": {
Type: schema.TypeString,
Computed: true,
Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'",
},
"vmid": {
Type: schema.TypeInt,
Computed: true,
Description: "Virtual Machine ID (Deprecated)",
},
},
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
}
return res
}

@ -45,6 +45,7 @@ import (
func dataSourceDiskSnapshotRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceDiskSnapshotRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
disk, err := utilityDiskCheckPresence(ctx, d, m) disk, err := utilityDiskCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -82,48 +83,3 @@ func DataSourceDiskSnapshot() *schema.Resource {
Schema: dataSourceDiskSnapshotSchemaMake(), Schema: dataSourceDiskSnapshotSchemaMake(),
} }
} }
func dataSourceDiskSnapshotSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"disk_id": {
Type: schema.TypeInt,
Required: true,
Description: "The unique ID of the subscriber-owner of the disk",
},
"label": {
Type: schema.TypeString,
Required: true,
Description: "Name of the snapshot",
},
"guid": {
Type: schema.TypeString,
Computed: true,
Description: "ID of the snapshot",
},
"timestamp": {
Type: schema.TypeInt,
Computed: true,
Description: "Snapshot time",
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
Description: "Reference to the snapshot",
},
"snap_set_guid": {
Type: schema.TypeString,
Computed: true,
Description: "The set snapshot ID",
},
"snap_set_time": {
Type: schema.TypeInt,
Computed: true,
Description: "The set time of the snapshot",
},
}
return rets
}

@ -44,6 +44,7 @@ import (
func dataSourceDiskSnapshotListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceDiskSnapshotListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
disk, err := utilityDiskCheckPresence(ctx, d, m) disk, err := utilityDiskCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -67,56 +68,3 @@ func DataSourceDiskSnapshotList() *schema.Resource {
Schema: dataSourceDiskSnapshotListSchemaMake(), Schema: dataSourceDiskSnapshotListSchemaMake(),
} }
} }
func dataSourceDiskSnapshotListSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"disk_id": {
Type: schema.TypeInt,
Required: true,
Description: "The unique ID of the subscriber-owner of the disk",
},
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"label": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the snapshot",
},
"guid": {
Type: schema.TypeString,
Computed: true,
Description: "ID of the snapshot",
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"timestamp": {
Type: schema.TypeInt,
Computed: true,
Description: "Snapshot time",
},
"res_id": {
Type: schema.TypeString,
Computed: true,
Description: "Reference to the snapshot",
},
"snap_set_guid": {
Type: schema.TypeString,
Computed: true,
Description: "The set snapshot ID",
},
"snap_set_time": {
Type: schema.TypeInt,
Computed: true,
Description: "The set time of the snapshot",
},
},
},
},
}
return rets
}

@ -106,12 +106,11 @@ func flattenDiskList(dl *disks.ListDisks) []map[string]interface{} {
for _, disk := range dl.Data { for _, disk := range dl.Data {
diskAcl, _ := json.Marshal(disk.ACL) diskAcl, _ := json.Marshal(disk.ACL)
temp := map[string]interface{}{ temp := map[string]interface{}{
"account_id": disk.AccountID, "account_id": disk.AccountID,
"account_name": disk.AccountName, "account_name": disk.AccountName,
"acl": string(diskAcl), "acl": string(diskAcl),
"boot_partition": disk.BootPartition, "boot_partition": disk.BootPartition,
// "compute_id": disk.MachineID, "computes": flattenDiskComputes(disk.Computes),
// "compute_name": disk.MachineName,
"created_time": disk.CreatedTime, "created_time": disk.CreatedTime,
"deleted_time": disk.DeletedTime, "deleted_time": disk.DeletedTime,
"desc": disk.Description, "desc": disk.Description,

@ -2,54 +2,27 @@ package disks
import ( import (
"context" "context"
"fmt" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/ic"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
) )
func existAccountID(ctx context.Context, d *schema.ResourceData, m interface{}) error { func checkParamsExistence(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg) diag.Diagnostics {
c := m.(*controller.ControllerCfg) var errs []error
accountID := uint64(d.Get("account_id").(int))
accountList, err := c.CloudBroker().Account().List(ctx, account.ListRequest{})
if err != nil {
return err
}
if len(accountList.FilterByID(accountID).Data) == 0 {
return fmt.Errorf("resourceDiskCreate: can't create/update Disk because AccountID %d is not allowed or does not exist", accountID)
}
return nil
}
func existGID(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
accountID := uint64(d.Get("account_id").(int))
gid := uint64(d.Get("gid").(int)) gid := uint64(d.Get("gid").(int))
gidList, err := c.CloudBroker().Grid().List(ctx, grid.ListRequest{}) if err := ic.ExistAccount(ctx, accountID, c); err != nil {
if err != nil { errs = append(errs, err)
return err
}
for _, elem := range gidList.Data {
if elem.GID == gid {
return nil
}
} }
return fmt.Errorf("resourceDiskCreate: can't create/update Disk because GID %d is not allowed or does not exist", gid) if err := ic.ExistGID(ctx, gid, c); err != nil {
} errs = append(errs, err)
func checkParamsExists(ctx context.Context, d *schema.ResourceData, m interface{}) error {
err := existAccountID(ctx, d, m)
if err != nil {
return err
} }
return existGID(ctx, d, m) return dc.ErrorsToDiagnostics(errs)
} }

@ -44,16 +44,15 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
func resourceDiskCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceDiskCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceDiskCreate: called for disk %s", d.Get("disk_name").(string))
c := m.(*controller.ControllerCfg) c := m.(*controller.ControllerCfg)
err := checkParamsExists(ctx, d, m) if diags := checkParamsExistence(ctx, d, c); diags != nil {
if err != nil { return diags
return diag.FromErr(err)
} }
req := disks.CreateRequest{ req := disks.CreateRequest{
@ -72,6 +71,10 @@ func resourceDiskCreate(ctx context.Context, d *schema.ResourceData, m interface
req.SSDSize = uint64(ssdSize.(int)) req.SSDSize = uint64(ssdSize.(int))
} }
if iops, ok := d.GetOk("iops"); ok {
req.IOPS = uint64(iops.(int))
}
if sepID, ok := d.GetOk("sep_id"); ok { if sepID, ok := d.GetOk("sep_id"); ok {
req.SEPID = uint64(sepID.(int)) req.SEPID = uint64(sepID.(int))
} }
@ -82,45 +85,23 @@ func resourceDiskCreate(ctx context.Context, d *schema.ResourceData, m interface
diskID, err := c.CloudBroker().Disks().Create(ctx, req) diskID, err := c.CloudBroker().Disks().Create(ctx, req)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
d.SetId(strconv.FormatUint(diskID, 10)) d.SetId(strconv.FormatUint(diskID, 10))
d.Set("disk_id", diskID)
w := dc.Warnings{} w := dc.Warnings{}
if iotuneRaw, ok := d.GetOk("iotune"); ok { if _, ok := d.GetOk("iotune"); ok {
iot := iotuneRaw.([]interface{})[0] if err := resourceDiskChangeIotune(ctx, d, m); err != nil {
iotune := iot.(map[string]interface{})
req := disks.LimitIORequest{
DiskID: diskID,
IOPS: uint64(iotune["total_iops_sec"].(int)),
ReadBytesSec: uint64(iotune["read_bytes_sec"].(int)),
ReadBytesSecMax: uint64(iotune["read_bytes_sec_max"].(int)),
ReadIOPSSec: uint64(iotune["read_iops_sec"].(int)),
ReadIOPSSecMax: uint64(iotune["read_iops_sec_max"].(int)),
SizeIOPSSec: uint64(iotune["size_iops_sec"].(int)),
TotalBytesSec: uint64(iotune["total_bytes_sec"].(int)),
TotalBytesSecMax: uint64(iotune["total_bytes_sec_max"].(int)),
TotalIOPSSecMax: uint64(iotune["total_iops_sec_max"].(int)),
TotalIOPSSec: uint64(iotune["total_iops_sec"].(int)),
WriteBytesSec: uint64(iotune["write_bytes_sec"].(int)),
WriteBytesSecMax: uint64(iotune["write_bytes_sec_max"].(int)),
WriteIOPSSec: uint64(iotune["write_iops_sec"].(int)),
WriteIOPSSecMax: uint64(iotune["write_iops_sec_max"].(int)),
}
_, err := c.CloudBroker().Disks().LimitIO(ctx, req)
if err != nil {
w.Add(err) w.Add(err)
} }
} }
if shareable := d.Get("shareable"); shareable.(bool) { if shareable := d.Get("shareable"); shareable.(bool) {
_, err := c.CloudBroker().Disks().Share(ctx, disks.ShareRequest{ if err := resourceDiskChangeShareable(ctx, d, m); err != nil {
DiskID: diskID,
})
if err != nil {
w.Add(err) w.Add(err)
} }
} }
@ -129,7 +110,7 @@ func resourceDiskCreate(ctx context.Context, d *schema.ResourceData, m interface
} }
func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
c := m.(*controller.ControllerCfg) log.Debugf("resourceDiskRead: called for disk_id %d", d.Get("disk_id").(int))
w := dc.Warnings{} w := dc.Warnings{}
disk, err := utilityDiskCheckPresence(ctx, d, m) disk, err := utilityDiskCheckPresence(ctx, d, m)
@ -144,23 +125,13 @@ func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}
case status.Destroyed, status.Purged: case status.Destroyed, status.Purged:
d.Set("disk_id", 0) d.Set("disk_id", 0)
d.SetId("") d.SetId("")
return resourceDiskCreate(ctx, d, m) return diag.Errorf("The resource cannot be read because it has been destroyed")
//return resourceDiskCreate(ctx, d, m)
case status.Deleted: case status.Deleted:
hasChangeState = true //hasChangeState = true
req := disks.RestoreRequest{ //if err := resourceDiskRestore(ctx, d, m); err != nil {
DiskID: disk.ID, // w.Add(err)
} //}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
} else {
req.Reason = "Terraform automatic restore"
}
_, err := c.CloudBroker().Disks().Restore(ctx, req)
if err != nil {
w.Add(err)
}
case status.Assigned: case status.Assigned:
case status.Modeled: case status.Modeled:
return diag.Errorf("The disk is in status: %s, please, contact support for more information", disk.Status) return diag.Errorf("The disk is in status: %s, please, contact support for more information", disk.Status)
@ -173,6 +144,7 @@ func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}
if hasChangeState { if hasChangeState {
disk, err = utilityDiskCheckPresence(ctx, d, m) disk, err = utilityDiskCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
} }
@ -183,12 +155,12 @@ func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}
} }
func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceDiskUpdate: called for disk_id %d", d.Get("disk_id").(int))
c := m.(*controller.ControllerCfg) c := m.(*controller.ControllerCfg)
w := dc.Warnings{} w := dc.Warnings{}
err := checkParamsExists(ctx, d, m) if diags := checkParamsExistence(ctx, d, c); diags != nil {
if err != nil { return diags
return diag.FromErr(err)
} }
disk, err := utilityDiskCheckPresence(ctx, d, m) disk, err := utilityDiskCheckPresence(ctx, d, m)
@ -203,21 +175,11 @@ func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface
case status.Destroyed, status.Purged: case status.Destroyed, status.Purged:
d.Set("disk_id", 0) d.Set("disk_id", 0)
d.SetId("") d.SetId("")
return resourceDiskCreate(ctx, d, m) return diag.Errorf("The resource cannot be updated because it has been destroyed")
//return resourceDiskCreate(ctx, d, m)
case status.Deleted: case status.Deleted:
hasChangeState = true hasChangeState = true
req := disks.RestoreRequest{ if err := resourceDiskRestore(ctx, d, m); err != nil {
DiskID: disk.ID,
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
} else {
req.Reason = "Terraform automatic restore"
}
_, err := c.CloudBroker().Disks().Restore(ctx, req)
if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
case status.Assigned: case status.Assigned:
@ -239,75 +201,32 @@ func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface
if d.HasChange("size_max") { if d.HasChange("size_max") {
oldSize, newSize := d.GetChange("size_max") oldSize, newSize := d.GetChange("size_max")
if oldSize.(int) < newSize.(int) { if oldSize.(int) > newSize.(int) {
log.Debugf("resourceDiskUpdate: resizing disk ID %s - %d GB -> %d GB",
d.Id(), oldSize.(int), newSize.(int))
_, err := c.CloudBroker().Disks().Resize(ctx, disks.ResizeRequest{
DiskID: disk.ID,
Size: uint64(newSize.(int)),
})
if err != nil {
w.Add(err)
}
} else if oldSize.(int) > newSize.(int) {
return diag.FromErr(fmt.Errorf("resourceDiskUpdate: Disk ID %s - reducing disk size is not allowed", d.Id())) return diag.FromErr(fmt.Errorf("resourceDiskUpdate: Disk ID %s - reducing disk size is not allowed", d.Id()))
} }
log.Debugf("resourceDiskUpdate: resizing disk ID %s - %d GB -> %d GB",
d.Id(), oldSize.(int), newSize.(int))
if err := resourceDiskChangeSize(ctx, d, m); err != nil {
w.Add(err)
}
} }
if d.HasChange("disk_name") { if d.HasChange("disk_name") {
_, err := c.CloudBroker().Disks().Rename(ctx, disks.RenameRequest{ if err := resourceDiskChangeDiskName(ctx, d, m); err != nil {
DiskID: disk.ID,
Name: d.Get("disk_name").(string),
})
if err != nil {
w.Add(err) w.Add(err)
} }
} }
if d.HasChange("iotune") { if d.HasChange("iotune") {
iot := d.Get("iotune").([]interface{})[0] if err := resourceDiskChangeIotune(ctx, d, m); err != nil {
iotune := iot.(map[string]interface{})
req := disks.LimitIORequest{
IOPS: uint64(iotune["total_iops_sec"].(int)),
ReadBytesSec: uint64(iotune["read_bytes_sec"].(int)),
ReadBytesSecMax: uint64(iotune["read_bytes_sec_max"].(int)),
ReadIOPSSec: uint64(iotune["read_iops_sec"].(int)),
ReadIOPSSecMax: uint64(iotune["read_iops_sec_max"].(int)),
SizeIOPSSec: uint64(iotune["size_iops_sec"].(int)),
TotalBytesSec: uint64(iotune["total_bytes_sec"].(int)),
TotalBytesSecMax: uint64(iotune["total_bytes_sec_max"].(int)),
TotalIOPSSecMax: uint64(iotune["total_iops_sec_max"].(int)),
TotalIOPSSec: uint64(iotune["total_iops_sec"].(int)),
WriteBytesSec: uint64(iotune["write_bytes_sec"].(int)),
WriteBytesSecMax: uint64(iotune["write_bytes_sec_max"].(int)),
WriteIOPSSec: uint64(iotune["write_iops_sec"].(int)),
WriteIOPSSecMax: uint64(iotune["write_iops_sec_max"].(int)),
}
_, err := c.CloudBroker().Disks().LimitIO(ctx, req)
if err != nil {
w.Add(err) w.Add(err)
} }
} }
if d.HasChange("shareable") { if d.HasChange("shareable") {
old, new := d.GetChange("shareable") if err := resourceDiskChangeShareable(ctx, d, m); err != nil {
if !old.(bool) && new.(bool) && !disk.Shareable { w.Add(err)
_, err := c.CloudBroker().Disks().Share(ctx, disks.ShareRequest{
DiskID: disk.ID,
})
if err != nil {
w.Add(err)
}
}
if old.(bool) && !new.(bool) && disk.Shareable {
_, err := c.CloudBroker().Disks().Unshare(ctx, disks.UnshareRequest{
DiskID: disk.ID,
})
if err != nil {
w.Add(err)
}
} }
} }
@ -315,6 +234,7 @@ func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface
} }
func resourceDiskDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceDiskDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceDiskDelete: called for disk_id %d", d.Get("disk_id").(int))
c := m.(*controller.ControllerCfg) c := m.(*controller.ControllerCfg)
disk, err := utilityDiskCheckPresence(ctx, d, m) disk, err := utilityDiskCheckPresence(ctx, d, m)
@ -340,349 +260,91 @@ func resourceDiskDelete(ctx context.Context, d *schema.ResourceData, m interface
return nil return nil
} }
func resourceDiskSchemaMake() map[string]*schema.Schema { func resourceDiskChangeIotune(ctx context.Context, d *schema.ResourceData, m interface{}) error {
rets := map[string]*schema.Schema{ c := m.(*controller.ControllerCfg)
"account_id": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
},
"gid": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
},
"disk_name": {
Type: schema.TypeString,
Required: true,
},
"type": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringInSlice([]string{"D", "B", "T"}, false),
Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'",
},
"desc": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"size_max": {
Type: schema.TypeInt,
Required: true,
},
"ssd_size": {
Type: schema.TypeInt,
Optional: true,
},
"sep_id": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"pool": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"detach": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "detach disk from machine first",
},
"permanently": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "whether to completely delete the disk, works only with non attached disks",
},
"reason": {
Type: schema.TypeString,
Optional: true,
Default: "",
Description: "reason for an action",
},
"shareable": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
},
"restore": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "restore deleting disk",
},
"account_name": {
Type: schema.TypeString,
Computed: true,
},
"acl": {
Type: schema.TypeString,
Computed: true,
},
"boot_partition": {
Type: schema.TypeInt,
Computed: true,
},
"computes": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"compute_id": {
Type: schema.TypeString,
Computed: true,
},
"compute_name": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
},
"destruction_time": {
Type: schema.TypeInt,
Computed: true,
},
"devicename": {
Type: schema.TypeString,
Computed: true,
},
"disk_path": {
Type: schema.TypeString,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"disk_id": {
Type: schema.TypeInt,
Computed: true,
},
"image_id": {
Type: schema.TypeInt,
Computed: true,
},
"images": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"iotune": {
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"read_bytes_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"read_bytes_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"read_iops_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"read_iops_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"size_iops_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"total_bytes_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"total_bytes_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"total_iops_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"total_iops_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"write_bytes_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"write_bytes_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"write_iops_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"write_iops_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
},
},
},
"iqn": {
Type: schema.TypeString,
Computed: true,
},
"login": {
Type: schema.TypeString,
Computed: true,
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
},
"order": { iotuneRaw := d.Get("iotune")
Type: schema.TypeInt, diskId := uint64(d.Get("disk_id").(int))
Computed: true,
}, iot := iotuneRaw.([]interface{})[0]
"params": { iotune := iot.(map[string]interface{})
Type: schema.TypeString, req := disks.LimitIORequest{
Computed: true, DiskID: diskId,
}, ReadBytesSec: uint64(iotune["read_bytes_sec"].(int)),
"parent_id": { ReadBytesSecMax: uint64(iotune["read_bytes_sec_max"].(int)),
Type: schema.TypeInt, ReadIOPSSec: uint64(iotune["read_iops_sec"].(int)),
Computed: true, ReadIOPSSecMax: uint64(iotune["read_iops_sec_max"].(int)),
}, SizeIOPSSec: uint64(iotune["size_iops_sec"].(int)),
"passwd": { TotalBytesSec: uint64(iotune["total_bytes_sec"].(int)),
Type: schema.TypeString, TotalBytesSecMax: uint64(iotune["total_bytes_sec_max"].(int)),
Computed: true, TotalIOPSSecMax: uint64(iotune["total_iops_sec_max"].(int)),
}, TotalIOPSSec: uint64(iotune["total_iops_sec"].(int)),
"pci_slot": { WriteBytesSec: uint64(iotune["write_bytes_sec"].(int)),
Type: schema.TypeInt, WriteBytesSecMax: uint64(iotune["write_bytes_sec_max"].(int)),
Computed: true, WriteIOPSSec: uint64(iotune["write_iops_sec"].(int)),
}, WriteIOPSSecMax: uint64(iotune["write_iops_sec_max"].(int)),
"present_to": { }
Type: schema.TypeList,
Computed: true, if _, ok := iotune["total_iops_sec"]; ok {
Elem: &schema.Schema{ req.IOPS = uint64(iotune["total_iops_sec"].(int))
Type: schema.TypeInt, } else if _, ok := d.GetOk("iops"); ok {
}, req.IOPS = uint64(d.Get("iops").(int))
}, }
"purge_attempts": {
Type: schema.TypeInt, _, err := c.CloudBroker().Disks().LimitIO(ctx, req)
Computed: true, return err
}, }
"purge_time": {
Type: schema.TypeInt, func resourceDiskChangeShareable(ctx context.Context, d *schema.ResourceData, m interface{}) error {
Computed: true, c := m.(*controller.ControllerCfg)
},
"reality_device_number": { diskId := uint64(d.Get("disk_id").(int))
Type: schema.TypeInt, shareable := d.Get("shareable").(bool)
Computed: true,
}, if shareable {
"reference_id": { _, err := c.CloudBroker().Disks().Share(ctx, disks.ShareRequest{DiskID: diskId})
Type: schema.TypeString, return err
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
},
"res_name": {
Type: schema.TypeString,
Computed: true,
},
"role": {
Type: schema.TypeString,
Computed: true,
},
"sep_type": {
Type: schema.TypeString,
Computed: true,
},
"size_used": {
Type: schema.TypeFloat,
Computed: true,
},
"snapshots": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"guid": {
Type: schema.TypeString,
Computed: true,
},
"label": {
Type: schema.TypeString,
Computed: true,
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
},
"snap_set_guid": {
Type: schema.TypeString,
Computed: true,
},
"snap_set_time": {
Type: schema.TypeInt,
Computed: true,
},
"timestamp": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"tech_status": {
Type: schema.TypeString,
Computed: true,
},
"vmid": {
Type: schema.TypeInt,
Computed: true,
},
} }
return rets _, err := c.CloudBroker().Disks().Unshare(ctx, disks.UnshareRequest{DiskID: diskId})
return err
}
func resourceDiskRestore(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
req := disks.RestoreRequest{
DiskID: uint64(d.Get("disk_id").(int)),
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
} else {
req.Reason = "Terraform automatic restore"
}
_, err := c.CloudBroker().Disks().Restore(ctx, req)
return err
}
func resourceDiskChangeDiskName(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
_, err := c.CloudBroker().Disks().Rename(ctx, disks.RenameRequest{
DiskID: uint64(d.Get("disk_id").(int)),
Name: d.Get("disk_name").(string),
})
return err
}
func resourceDiskChangeSize(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
_, err := c.CloudBroker().Disks().Resize2(ctx, disks.ResizeRequest{
DiskID: uint64(d.Get("disk_id").(int)),
Size: uint64(d.Get("size_max").(int)),
})
return err
} }
func ResourceDisk() *schema.Resource { func ResourceDisk() *schema.Resource {

@ -2,6 +2,7 @@ package disks
import ( import (
"context" "context"
"fmt"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
@ -12,35 +13,21 @@ import (
) )
func resourceDiskSnapshotCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceDiskSnapshotCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
c := m.(*controller.ControllerCfg) log.Debugf("resourceDiskSnapshotCreate: call for disk_id %d, label %s",
d.Get("disk_id").(int),
d.Get("label").(string))
disk, err := utilityDiskCheckPresence(ctx, d, m) _, err := utilityDiskSnapshotCheckPresence(ctx, d, m)
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
snapshots := disk.Snapshots diskId := uint64(d.Get("disk_id").(int))
snapshot := disks.ItemSnapshot{}
label := d.Get("label").(string) label := d.Get("label").(string)
for _, sn := range snapshots { d.SetId(fmt.Sprintf("%d#%s", diskId, label))
if label == sn.Label {
snapshot = sn
break
}
}
if label != snapshot.Label {
return diag.Errorf("Snapshot with label \"%v\" not found", label)
}
if rollback := d.Get("rollback").(bool); rollback { if rollback := d.Get("rollback").(bool); rollback {
req := disks.SnapshotRollbackRequest{ err := resourceDiskSnapshotChangeRollback(ctx, d, m)
DiskID: disk.ID,
Label: label,
TimeStamp: uint64(d.Get("timestamp").(int)),
}
log.Debugf("resourceDiskCreate: Snapshot rollback with label", label)
_, err := c.CloudBroker().Disks().SnapshotRollback(ctx, req)
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -49,61 +36,30 @@ func resourceDiskSnapshotCreate(ctx context.Context, d *schema.ResourceData, m i
} }
func resourceDiskSnapshotRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceDiskSnapshotRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
disk, err := utilityDiskCheckPresence(ctx, d, m) log.Debugf("resourceDiskSnapshotRead: snapshot id %s", d.Id())
snapshot, err := utilityDiskSnapshotCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("") d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
snapshots := disk.Snapshots
snapshot := disks.ItemSnapshot{}
label := d.Get("label").(string)
for _, sn := range snapshots {
if label == sn.Label {
snapshot = sn
break
}
}
if label != snapshot.Label {
return diag.Errorf("Snapshot with label \"%v\" not found", label)
}
flattenDiskSnapshot(d, snapshot) flattenDiskSnapshot(d, snapshot)
return nil return nil
} }
func resourceDiskSnapshotUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceDiskSnapshotUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
c := m.(*controller.ControllerCfg) log.Debugf("resourceDiskSnapshotUpdate: snapshot id %s", d.Id())
disk, err := utilityDiskCheckPresence(ctx, d, m)
_, err := utilityDiskSnapshotCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("") d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
snapshots := disk.Snapshots if d.HasChange("rollback") {
snapshot := disks.ItemSnapshot{} err := resourceDiskSnapshotChangeRollback(ctx, d, m)
label := d.Get("label").(string)
for _, sn := range snapshots {
if label == sn.Label {
snapshot = sn
break
}
}
if label != snapshot.Label {
return diag.Errorf("Snapshot with label \"%v\" not found", label)
}
if d.HasChange("rollback") && d.Get("rollback").(bool) == true {
req := disks.SnapshotRollbackRequest{
DiskID: disk.ID,
Label: label,
TimeStamp: uint64(d.Get("timestamp").(int)),
}
log.Debugf("resourceDiskUpdtae: Snapshot rollback with label", label)
_, err := c.CloudBroker().Disks().SnapshotRollback(ctx, req)
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -113,16 +69,18 @@ func resourceDiskSnapshotUpdate(ctx context.Context, d *schema.ResourceData, m i
} }
func resourceDiskSnapshotDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceDiskSnapshotDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
c := m.(*controller.ControllerCfg) log.Debugf("resourceDiskSnapshotDelete: snapshot id %s", d.Id())
disk, err := utilityDiskCheckPresence(ctx, d, m) _, err := utilityDiskSnapshotCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("") d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
c := m.(*controller.ControllerCfg)
req := disks.SnapshotDeleteRequest{ req := disks.SnapshotDeleteRequest{
DiskID: disk.ID, DiskID: uint64(d.Get("disk_id").(int)),
Label: d.Get("label").(string), Label: d.Get("label").(string),
} }
@ -136,6 +94,31 @@ func resourceDiskSnapshotDelete(ctx context.Context, d *schema.ResourceData, m i
return nil return nil
} }
func resourceDiskSnapshotChangeRollback(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
rollback := d.Get("rollback").(bool)
if rollback {
label := d.Get("label").(string)
timestamp := uint64(d.Get("timestamp").(int))
diskId := uint64(d.Get("disk_id").(int))
req := disks.SnapshotRollbackRequest{
DiskID: diskId,
Label: label,
TimeStamp: timestamp,
}
log.Debugf("resourceDiskUpdate: Snapshot rollback with label %s", label)
if _, err := c.CloudBroker().Disks().SnapshotRollback(ctx, req); err != nil {
return err
}
}
return nil
}
func ResourceDiskSnapshot() *schema.Resource { func ResourceDiskSnapshot() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,
@ -160,56 +143,3 @@ func ResourceDiskSnapshot() *schema.Resource {
Schema: resourceDiskSnapshotSchemaMake(), Schema: resourceDiskSnapshotSchemaMake(),
} }
} }
func resourceDiskSnapshotSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"disk_id": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
Description: "The unique ID of the subscriber-owner of the disk",
},
"label": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
Description: "Name of the snapshot",
},
"rollback": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "Needed in order to make a snapshot rollback",
},
"timestamp": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "Snapshot time",
},
"guid": {
Type: schema.TypeString,
Computed: true,
Description: "ID of the snapshot",
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
Description: "Reference to the snapshot",
},
"snap_set_guid": {
Type: schema.TypeString,
Computed: true,
Description: "The set snapshot ID",
},
"snap_set_time": {
Type: schema.TypeInt,
Computed: true,
Description: "The set time of the snapshot",
},
}
}

@ -75,7 +75,7 @@ func utilityDiskListDeletedCheckPresence(ctx context.Context, d *schema.Resource
req.Size = uint64(size.(int)) req.Size = uint64(size.(int))
} }
log.Debugf("utilityDiskListDeletedCheckPresence: load disk list") log.Debugf("utilityDiskListDeletedCheckPresence: load disk list deleted")
diskList, err := c.CloudBroker().Disks().ListDeleted(ctx, req) diskList, err := c.CloudBroker().Disks().ListDeleted(ctx, req)
if err != nil { if err != nil {
return nil, err return nil, err

@ -15,6 +15,13 @@ func utilityDiskListTypesCheckPresence(ctx context.Context, d *schema.ResourceDa
Detailed: false, Detailed: false,
} }
if page, ok := d.GetOk("page"); ok {
req.Page = uint64(page.(int))
}
if size, ok := d.GetOk("size"); ok {
req.Size = uint64(size.(int))
}
log.Debugf("utilityDiskListTypesCheckPresence: load disk list Types Detailed") log.Debugf("utilityDiskListTypesCheckPresence: load disk list Types Detailed")
typesList, err := c.CloudBroker().Disks().ListTypes(ctx, req) typesList, err := c.CloudBroker().Disks().ListTypes(ctx, req)
if err != nil { if err != nil {

@ -12,10 +12,18 @@ import (
func utilityDiskListTypesDetailedCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*disks.ListTypes, error) { func utilityDiskListTypesDetailedCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*disks.ListTypes, error) {
c := m.(*controller.ControllerCfg) c := m.(*controller.ControllerCfg)
log.Debugf("utilityDiskListTypesDetailedCheckPresence: load disk list Types Detailed") req := disks.ListTypesRequest{
listTypesDetailed, err := c.CloudBroker().Disks().ListTypes(ctx, disks.ListTypesRequest{
Detailed: true, Detailed: true,
}) }
if page, ok := d.GetOk("page"); ok {
req.Page = uint64(page.(int))
}
if size, ok := d.GetOk("size"); ok {
req.Size = uint64(size.(int))
}
log.Debugf("utilityDiskListTypesDetailedCheckPresence: load disk list Types Detailed")
listTypesDetailed, err := c.CloudBroker().Disks().ListTypes(ctx, req)
log.Debugf("%+v", listTypesDetailed.Data[0].(map[string]interface{})) log.Debugf("%+v", listTypesDetailed.Data[0].(map[string]interface{}))

@ -28,7 +28,7 @@ func utilityDiskListUnattachedCheckPresence(ctx context.Context, d *schema.Resou
if diskType, ok := d.GetOk("type"); ok { if diskType, ok := d.GetOk("type"); ok {
req.Type = diskType.(string) req.Type = diskType.(string)
} }
if accountId, ok := d.GetOk("accountId"); ok { if accountId, ok := d.GetOk("account_id"); ok {
req.AccountID = uint64(accountId.(int)) req.AccountID = uint64(accountId.(int))
} }
if sepId, ok := d.GetOk("sep_id"); ok { if sepId, ok := d.GetOk("sep_id"); ok {

@ -45,6 +45,7 @@ import (
func dataSourceExtnetRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceExtnetRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
net, err := utilityExtnetCheckPresence(ctx, d, m) net, err := utilityExtnetCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -54,232 +55,6 @@ func dataSourceExtnetRead(ctx context.Context, d *schema.ResourceData, m interfa
return nil return nil
} }
func dataSourceExtnetSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"extnet_id": {
Type: schema.TypeInt,
Required: true,
},
"ckey": {
Type: schema.TypeString,
Computed: true,
},
"meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "meta",
},
"default": {
Type: schema.TypeBool,
Computed: true,
},
"default_qos": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"e_rate": {
Type: schema.TypeInt,
Computed: true,
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
"in_burst": {
Type: schema.TypeInt,
Computed: true,
},
"in_rate": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"desc": {
Type: schema.TypeString,
Computed: true,
},
"free_ips": {
Type: schema.TypeInt,
Computed: true,
},
"gid": {
Type: schema.TypeInt,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"ipcidr": {
Type: schema.TypeString,
Computed: true,
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
},
"name": {
Type: schema.TypeString,
Computed: true,
},
"network_id": {
Type: schema.TypeInt,
Computed: true,
},
"ovs_bridge": {
Type: schema.TypeString,
Computed: true,
},
"pre_reservations_num": {
Type: schema.TypeInt,
Computed: true,
},
"pri_vnfdev_id": {
Type: schema.TypeInt,
Computed: true,
},
"shared_with": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"vlan_id": {
Type: schema.TypeInt,
Computed: true,
},
"vnfs": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"dhcp": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"check_ips": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"dns": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"excluded": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"client_type": {
Type: schema.TypeString,
Computed: true,
},
"desc": {
Type: schema.TypeString,
Computed: true,
},
"domain_name": {
Type: schema.TypeString,
Computed: true,
},
"hostname": {
Type: schema.TypeString,
Computed: true,
},
"ip": {
Type: schema.TypeString,
Computed: true,
},
"mac": {
Type: schema.TypeString,
Computed: true,
},
"type": {
Type: schema.TypeString,
Computed: true,
},
"vm_id": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"gateway": {
Type: schema.TypeString,
Computed: true,
},
"network": {
Type: schema.TypeString,
Computed: true,
},
"prefix": {
Type: schema.TypeInt,
Computed: true,
},
"reservations": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"client_type": {
Type: schema.TypeString,
Computed: true,
},
"domain_name": {
Type: schema.TypeString,
Computed: true,
},
"hostname": {
Type: schema.TypeString,
Computed: true,
},
"desc": {
Type: schema.TypeString,
Computed: true,
},
"ip": {
Type: schema.TypeString,
Computed: true,
},
"mac": {
Type: schema.TypeString,
Computed: true,
},
"type": {
Type: schema.TypeString,
Computed: true,
},
"vm_id": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
}
}
func DataSourceExtnetCB() *schema.Resource { func DataSourceExtnetCB() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,

@ -44,26 +44,17 @@ import (
func dataSourceExtnetDefaultRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceExtnetDefaultRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
extnetId, err := utilityExtnetDefaultCheckPresence(ctx, m) extnetId, err := utilityExtnetDefaultCheckPresence(ctx, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
id := uuid.New() id := uuid.New()
d.SetId(id.String()) d.SetId(id.String())
d.Set("extnet_id", extnetId) d.Set("extnet_id", extnetId)
return nil return nil
} }
func dataSourceExtnetDefaultSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"extnet_id": {
Type: schema.TypeInt,
Computed: true,
},
}
}
func DataSourceExtnetDefaultCB() *schema.Resource { func DataSourceExtnetDefaultCB() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,

@ -45,6 +45,7 @@ import (
func dataSourceExtnetListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceExtnetListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
netList, err := utilityExtnetListCheckPresence(ctx, d, m) netList, err := utilityExtnetListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -56,189 +57,6 @@ func dataSourceExtnetListRead(ctx context.Context, d *schema.ResourceData, m int
return nil return nil
} }
func dataSourceExtnetListSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"account_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by account ID",
},
"by_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by ID",
},
"name": {
Type: schema.TypeString,
Optional: true,
Description: "Find by name",
},
"network": {
Type: schema.TypeString,
Optional: true,
},
"vlan_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by VLAN ID",
},
"vnfdev_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by VnfDEV ID",
},
"status": {
Type: schema.TypeString,
Optional: true,
Description: "Find by status",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "Page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "Page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"ckey": {
Type: schema.TypeString,
Computed: true,
},
"meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "meta",
},
"default": {
Type: schema.TypeBool,
Computed: true,
},
"default_qos": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"e_rate": {
Type: schema.TypeInt,
Computed: true,
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
"in_burst": {
Type: schema.TypeInt,
Computed: true,
},
"in_rate": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"desc": {
Type: schema.TypeString,
Computed: true,
},
"free_ips": {
Type: schema.TypeInt,
Computed: true,
},
"gid": {
Type: schema.TypeInt,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"extnet_id": {
Type: schema.TypeInt,
Computed: true,
},
"ipcidr": {
Type: schema.TypeString,
Computed: true,
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
},
"name": {
Type: schema.TypeString,
Computed: true,
},
"network_id": {
Type: schema.TypeInt,
Computed: true,
},
"ovs_bridge": {
Type: schema.TypeString,
Computed: true,
},
"pre_reservations_num": {
Type: schema.TypeInt,
Computed: true,
},
"pri_vnfdev_id": {
Type: schema.TypeInt,
Computed: true,
},
"shared_with": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"vlan_id": {
Type: schema.TypeInt,
Computed: true,
},
"vnfs": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"dhcp": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"check_ips": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
},
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
}
}
func DataSourceExtnetListCB() *schema.Resource { func DataSourceExtnetListCB() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,

@ -44,6 +44,7 @@ import (
func dataSourceStaticRouteRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceStaticRouteRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
staticRoute, err := utilityDataStaticRouteCheckPresence(ctx, d, m) staticRoute, err := utilityDataStaticRouteCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -52,45 +53,6 @@ func dataSourceStaticRouteRead(ctx context.Context, d *schema.ResourceData, m in
return nil return nil
} }
func dataSourceStaticRouteSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"extnet_id": {
Type: schema.TypeInt,
Required: true,
Description: "Unique ID of the ExtNet",
},
"route_id": {
Type: schema.TypeInt,
Required: true,
Description: "Unique ID of the static route",
},
"compute_ids": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"destination": {
Type: schema.TypeString,
Computed: true,
},
"gateway": {
Type: schema.TypeString,
Computed: true,
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
"netmask": {
Type: schema.TypeString,
Computed: true,
},
}
return rets
}
func DataSourceStaticRoute() *schema.Resource { func DataSourceStaticRoute() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,

@ -44,6 +44,7 @@ import (
func dataSourceStaticRouteListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceStaticRouteListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
staticRouteList, err := utilityStaticRouteListCheckPresence(ctx, d, m) staticRouteList, err := utilityStaticRouteListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -55,56 +56,6 @@ func dataSourceStaticRouteListRead(ctx context.Context, d *schema.ResourceData,
return nil return nil
} }
func dataSourceStaticRouteListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"extnet_id": {
Type: schema.TypeInt,
Required: true,
Description: "ID of ExtNet",
},
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"compute_ids": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"destination": {
Type: schema.TypeString,
Computed: true,
},
"gateway": {
Type: schema.TypeString,
Computed: true,
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
"netmask": {
Type: schema.TypeString,
Computed: true,
},
"route_id": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
}
return res
}
func DataSourceStaticRouteList() *schema.Resource { func DataSourceStaticRouteList() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,

@ -43,6 +43,7 @@ import (
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/extnet" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/extnet"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/ic" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/ic"
) )
@ -130,6 +131,8 @@ func resourceExtnetCreate(ctx context.Context, d *schema.ResourceData, m interfa
d.Set("extnet_id", netID) d.Set("extnet_id", netID)
log.Debugf("cloudbroker: Extnet with id %d successfully created on platform", netID) log.Debugf("cloudbroker: Extnet with id %d successfully created on platform", netID)
var w dc.Warnings
if d.Get("excluded_ips").(*schema.Set).Len() > 0 { if d.Get("excluded_ips").(*schema.Set).Len() > 0 {
ips := make([]string, 0) ips := make([]string, 0)
@ -144,7 +147,36 @@ func resourceExtnetCreate(ctx context.Context, d *schema.ResourceData, m interfa
_, err := c.CloudBroker().ExtNet().IPsExclude(ctx, req) _, err := c.CloudBroker().ExtNet().IPsExclude(ctx, req)
if err != nil { if err != nil {
return diag.FromErr(err) w.Add(err)
}
}
if d.Get("shared_with").(*schema.Set).Len() > 0 {
for _, id := range d.Get("shared_with").(*schema.Set).List() {
req := extnet.AccessRemoveRequest{
NetID: uint64(d.Get("extnet_id").(int)),
AccountID: uint64(id.(int)),
}
_, err := c.CloudBroker().ExtNet().AccessRemove(ctx, req)
if err != nil {
w.Add(err)
}
}
}
if d.Get("excluded_ips_range").(*schema.Set).Len() > 0 {
for _, ip := range d.Get("excluded_ips_range").(*schema.Set).List() {
req := extnet.IPsExcludeRangeRequest{
NetID: uint64(d.Get("extnet_id").(int)),
IPStart: ip.(map[string]interface{})["ip_start"].(string),
IPEnd: ip.(map[string]interface{})["ip_end"].(string),
}
_, err := c.CloudBroker().ExtNet().IPsExcludeRange(ctx, req)
if err != nil {
w.Add(err)
}
} }
} }
@ -155,7 +187,7 @@ func resourceExtnetCreate(ctx context.Context, d *schema.ResourceData, m interfa
}) })
if err != nil { if err != nil {
return diag.FromErr(err) w.Add(err)
} }
} }
@ -277,360 +309,6 @@ func resourceExtnetDelete(ctx context.Context, d *schema.ResourceData, m interfa
return nil return nil
} }
func resourceExtnetSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
Description: "External network name",
},
"gid": {
Type: schema.TypeInt,
Required: true,
Description: "Grid (platform) ID",
},
"ipcidr": {
Type: schema.TypeString,
Required: true,
// ForceNew: true,
Description: "IP network CIDR",
},
"vlan_id": {
Type: schema.TypeInt,
Required: true,
// ForceNew: true,
Description: "VLAN ID",
},
"gateway": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "External network gateway IP address",
},
"dns": {
Type: schema.TypeList,
Optional: true,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "List of DNS addresses",
},
"ntp": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "List of NTP addresses",
},
"check_ips": {
Type: schema.TypeList,
Optional: true,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "IPs to check network availability",
},
"virtual": {
Type: schema.TypeBool,
Optional: true,
Description: "If true - platform DHCP server will not be created",
},
"desc": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "Optional description",
},
"start_ip": {
Type: schema.TypeString,
Optional: true,
Description: "Start of IP range to be explicitly included",
},
"end_ip": {
Type: schema.TypeString,
Optional: true,
Description: "End of IP range to be explicitly included",
},
"vnfdev_ip": {
Type: schema.TypeString,
Optional: true,
Description: "IP to create VNFDev with",
},
"pre_reservations_num": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "Number of pre created reservations",
},
"ovs_bridge": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "OpenvSwith bridge name for ExtNet connection",
},
"enable": {
Type: schema.TypeBool,
Optional: true,
Default: true,
Description: "Disable/Enable extnet",
},
"set_default": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "Set current extnet as default (can not be undone)",
},
"excluded_ips": {
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "IPs to exclude in current extnet pool",
},
"excluded_ips_range": {
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"ip_start": {
Type: schema.TypeString,
Required: true,
},
"ip_end": {
Type: schema.TypeString,
Required: true,
},
},
},
Description: "Range of IPs to exclude in current extnet pool",
},
"default_qos": {
Type: schema.TypeList,
MaxItems: 1,
Optional: true,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"e_rate": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
"in_burst": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"in_rate": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
},
},
},
"restart":{
Type: schema.TypeBool,
Optional: true,
Description: "restart extnet vnf device",
},
"migrate":{
Type: schema.TypeInt,
Optional: true,
},
"ckey": {
Type: schema.TypeString,
Computed: true,
},
"meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "meta",
},
"default": {
Type: schema.TypeBool,
Computed: true,
},
"free_ips": {
Type: schema.TypeInt,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"extnet_id": {
Type: schema.TypeInt,
Computed: true,
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
},
"network_id": {
Type: schema.TypeInt,
Computed: true,
},
"pri_vnfdev_id": {
Type: schema.TypeInt,
Computed: true,
},
"shared_with": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"vnfs": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"dhcp": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"excluded": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"client_type": {
Type: schema.TypeString,
Computed: true,
},
"domain_name": {
Type: schema.TypeString,
Computed: true,
},
"hostname": {
Type: schema.TypeString,
Computed: true,
},
"ip": {
Type: schema.TypeString,
Computed: true,
},
"mac": {
Type: schema.TypeString,
Computed: true,
},
"type": {
Type: schema.TypeString,
Computed: true,
},
"vm_id": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"network": {
Type: schema.TypeString,
Computed: true,
},
"prefix": {
Type: schema.TypeInt,
Computed: true,
},
"routes": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"compute_ids": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"destination": {
Type: schema.TypeString,
Computed: true,
},
"gateway": {
Type: schema.TypeString,
Computed: true,
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
"netmask": {
Type: schema.TypeString,
Computed: true,
},
"route_id": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"reservations": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"client_type": {
Type: schema.TypeString,
Computed: true,
},
"domain_name": {
Type: schema.TypeString,
Computed: true,
},
"hostname": {
Type: schema.TypeString,
Computed: true,
},
"desc": {
Type: schema.TypeString,
Computed: true,
},
"ip": {
Type: schema.TypeString,
Computed: true,
},
"mac": {
Type: schema.TypeString,
Computed: true,
},
"type": {
Type: schema.TypeString,
Computed: true,
},
"vm_id": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
}
}
func ResourceExtnetCB() *schema.Resource { func ResourceExtnetCB() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,

@ -65,7 +65,7 @@ func resourceStaticRouteCreate(ctx context.Context, d *schema.ResourceData, m in
if computesIDS, ok := d.GetOk("compute_ids"); ok { if computesIDS, ok := d.GetOk("compute_ids"); ok {
ids := computesIDS.([]interface{}) ids := computesIDS.([]interface{})
res := make([]uint64, 0, len (ids)) res := make([]uint64, 0, len(ids))
for _, id := range ids { for _, id := range ids {
computeId := uint64(id.(int)) computeId := uint64(id.(int))
@ -82,6 +82,7 @@ func resourceStaticRouteCreate(ctx context.Context, d *schema.ResourceData, m in
staticRouteData, err := getStaticRouteData(ctx, d, m) staticRouteData, err := getStaticRouteData(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -91,7 +92,6 @@ func resourceStaticRouteCreate(ctx context.Context, d *schema.ResourceData, m in
} }
func resourceStaticRouteRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceStaticRouteRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
warnings := dc.Warnings{}
staticRouteData, err := utilityDataStaticRouteCheckPresence(ctx, d, m) staticRouteData, err := utilityDataStaticRouteCheckPresence(ctx, d, m)
if err != nil { if err != nil {
@ -101,7 +101,7 @@ func resourceStaticRouteRead(ctx context.Context, d *schema.ResourceData, m inte
flattenStaticRouteData(d, staticRouteData) flattenStaticRouteData(d, staticRouteData)
return warnings.Get() return nil
} }
func resourceStaticRouteUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceStaticRouteUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
@ -112,58 +112,9 @@ func resourceStaticRouteUpdate(ctx context.Context, d *schema.ResourceData, m in
return diag.FromErr(err) return diag.FromErr(err)
} }
staticRouteData, err := utilityDataStaticRouteCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
if d.HasChange("compute_ids") { if d.HasChange("compute_ids") {
deletedIds := make([]uint64, 0) if err := utilityStaticRouteComputeIDsUpdate(ctx, d, m); err != nil {
addedIds := make([]uint64, 0) warnings.Add(err)
oldComputeIds, newComputeIds := d.GetChange("compute_ids")
oldComputeIdsSlice := oldComputeIds.([]interface{})
newComputeIdsSlice := newComputeIds.([]interface{})
for _, el := range oldComputeIdsSlice {
if !isContainsIds(newComputeIdsSlice, el) {
convertedEl := uint64(el.(int))
deletedIds = append(deletedIds, convertedEl)
}
}
for _, el := range newComputeIdsSlice {
if !isContainsIds(oldComputeIdsSlice, el) {
convertedEl := uint64(el.(int))
addedIds = append(addedIds, convertedEl)
}
}
if len(deletedIds) > 0 {
req := extnet.StaticRouteAccessRevokeRequest{
ExtNetID: uint64(d.Get("extnet_id").(int)),
RouteId: staticRouteData.ID,
ComputeIds: deletedIds,
}
_, err := c.CloudBroker().ExtNet().StaticRouteAccessRevoke(ctx, req)
if err != nil {
warnings.Add(err)
}
}
if len(addedIds) > 0 {
req := extnet.StaticRouteAccessGrantRequest{
ExtNetID: uint64(d.Get("extnet_id").(int)),
RouteId: staticRouteData.ID,
ComputeIds: addedIds,
}
_, err := c.CloudBroker().ExtNet().StaticRouteAccessGrant(ctx, req)
if err != nil {
warnings.Add(err)
}
} }
} }
@ -192,47 +143,6 @@ func resourceStaticRouteDelete(ctx context.Context, d *schema.ResourceData, m in
return nil return nil
} }
func resourceStaticRouteSchemaMake() map[string]*schema.Schema {
rets := dataSourceStaticRouteSchemaMake()
rets["route_id"] = &schema.Schema{
Type: schema.TypeInt,
Computed: true,
Optional: true,
}
rets["compute_ids"] = &schema.Schema{
Type: schema.TypeList,
Optional: true,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
}
rets["destination"] = &schema.Schema{
Type: schema.TypeString,
Required: true,
}
rets["gateway"] = &schema.Schema{
Type: schema.TypeString,
Required: true,
}
rets["netmask"] = &schema.Schema{
Type: schema.TypeString,
Required: true,
}
return rets
}
func isContainsIds(els []interface{}, el interface{}) bool {
convEl := el.(int)
for _, elOld := range els {
if convEl == elOld.(int) {
return true
}
}
return false
}
func ResourceStaticRoute() *schema.Resource { func ResourceStaticRoute() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,

@ -253,32 +253,14 @@ func handleExcludedIPsRangeUpdate(ctx context.Context, d *schema.ResourceData, c
} }
func handleSharedWithUpdate(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg) error { func handleSharedWithUpdate(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg) error {
deletedIds := make([]uint64, 0) oldSet, newSet := d.GetChange("shared_with")
addedIds := make([]uint64, 0)
oldAccountIds, newAccountIds := d.GetChange("shared_with") deletedAccountIds := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List()
oldAccountIdsSlice := oldAccountIds.([]interface{}) if len(deletedAccountIds) > 0 {
newAccountIdsSlice := newAccountIds.([]interface{}) for _, accountIdInterface := range deletedAccountIds {
for _, el := range oldAccountIdsSlice {
if !isContainsIds(newAccountIdsSlice, el) {
convertedEl := uint64(el.(int))
deletedIds = append(deletedIds, convertedEl)
}
}
for _, el := range newAccountIdsSlice {
if !isContainsIds(oldAccountIdsSlice, el) {
convertedEl := uint64(el.(int))
addedIds = append(addedIds, convertedEl)
}
}
if len(deletedIds) > 0 {
for _, accountId := range deletedIds {
req := extnet.AccessRemoveRequest{ req := extnet.AccessRemoveRequest{
NetID: uint64(d.Get("extnet_id").(int)), NetID: uint64(d.Get("extnet_id").(int)),
AccountID: accountId, AccountID: uint64(accountIdInterface.(int)),
} }
_, err := c.CloudBroker().ExtNet().AccessRemove(ctx, req) _, err := c.CloudBroker().ExtNet().AccessRemove(ctx, req)
@ -288,11 +270,12 @@ func handleSharedWithUpdate(ctx context.Context, d *schema.ResourceData, c *cont
} }
} }
if len(addedIds) > 0 { addedAccountIds := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List()
for _, accountId := range addedIds { if len(addedAccountIds) > 0 {
for _, accountIdInterface := range addedAccountIds {
req := extnet.AccessAddRequest{ req := extnet.AccessAddRequest{
NetID: uint64(d.Get("extnet_id").(int)), NetID: uint64(d.Get("extnet_id").(int)),
AccountID: accountId, AccountID: uint64(accountIdInterface.(int)),
} }
_, err := c.CloudBroker().ExtNet().AccessAdd(ctx, req) _, err := c.CloudBroker().ExtNet().AccessAdd(ctx, req)
@ -300,7 +283,6 @@ func handleSharedWithUpdate(ctx context.Context, d *schema.ResourceData, c *cont
return err return err
} }
} }
} }
return nil return nil
@ -309,14 +291,14 @@ func handleSharedWithUpdate(ctx context.Context, d *schema.ResourceData, c *cont
func handleVirtualUpdate(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, recNet *extnet.RecordExtNet) error { func handleVirtualUpdate(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, recNet *extnet.RecordExtNet) error {
virtualOld, virtualNew := d.GetChange("virtual") virtualOld, virtualNew := d.GetChange("virtual")
if virtualOld == false && virtualNew == true { if !virtualOld.(bool) && virtualNew.(bool) {
req := extnet.DeviceRemoveRequest{NetID: recNet.ID} req := extnet.DeviceRemoveRequest{NetID: recNet.ID}
_, err := c.CloudBroker().ExtNet().DeviceRemove(ctx, req) _, err := c.CloudBroker().ExtNet().DeviceRemove(ctx, req)
if err != nil { if err != nil {
return err return err
} }
} else if virtualOld == true && virtualNew == false { } else if virtualOld.(bool) && !virtualNew.(bool) {
req := extnet.DeviceDeployRequest{NetID: recNet.ID} req := extnet.DeviceDeployRequest{NetID: recNet.ID}
_, err := c.CloudBroker().ExtNet().DeviceDeploy(ctx, req) _, err := c.CloudBroker().ExtNet().DeviceDeploy(ctx, req)
if err != nil { if err != nil {
@ -329,7 +311,7 @@ func handleVirtualUpdate(ctx context.Context, d *schema.ResourceData, c *control
func handleRestartUpdate(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, recNet *extnet.RecordExtNet) error { func handleRestartUpdate(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, recNet *extnet.RecordExtNet) error {
restartOld, restartNew := d.GetChange("restart") restartOld, restartNew := d.GetChange("restart")
if restartOld == false && restartNew == true { if !restartOld.(bool) && restartNew.(bool) {
req := extnet.DeviceRestartRequest{NetID: recNet.ID} req := extnet.DeviceRestartRequest{NetID: recNet.ID}
_, err := c.CloudBroker().ExtNet().DeviceRestart(ctx, req) _, err := c.CloudBroker().ExtNet().DeviceRestart(ctx, req)
if err != nil { if err != nil {

@ -42,7 +42,6 @@ import (
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/extnet" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/extnet"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
) )
@ -106,3 +105,54 @@ func getStaticRouteData(ctx context.Context, d *schema.ResourceData, m interface
return nil, fmt.Errorf("static route not found") return nil, fmt.Errorf("static route not found")
} }
func utilityStaticRouteComputeIDsUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
staticRouteData, err := utilityDataStaticRouteCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return err
}
oldSet, newSet := d.GetChange("compute_ids")
deletedComputeIDs := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List()
deletedIds := make([]uint64, 0, len(deletedComputeIDs))
if len(deletedComputeIDs) > 0 {
for _, computeIdInterface := range deletedComputeIDs {
deletedIds = append(deletedIds, uint64(computeIdInterface.(int)))
}
req := extnet.StaticRouteAccessRevokeRequest{
ExtNetID: uint64(d.Get("extnet_id").(int)),
RouteId: staticRouteData.ID,
ComputeIds: deletedIds,
}
_, err := c.CloudBroker().ExtNet().StaticRouteAccessRevoke(ctx, req)
if err != nil {
return err
}
}
addedComputeIDs := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List()
addedIds := make([]uint64, 0, len(addedComputeIDs))
if len(addedComputeIDs) > 0 {
for _, computeIdInterface := range addedComputeIDs {
addedIds = append(addedIds, uint64(computeIdInterface.(int)))
}
req := extnet.StaticRouteAccessGrantRequest{
ExtNetID: uint64(d.Get("extnet_id").(int)),
RouteId: staticRouteData.ID,
ComputeIds: addedIds,
}
_, err := c.CloudBroker().ExtNet().StaticRouteAccessGrant(ctx, req)
if err != nil {
return err
}
}
return nil
}

@ -56,153 +56,6 @@ func dataSourceFlipgroupRead(ctx context.Context, d *schema.ResourceData, m inte
return nil return nil
} }
func dataSourceFlipgroupSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"flipgroup_id": {
Type: schema.TypeInt,
Required: true,
Description: "flipgroup_id",
},
"account_id": {
Type: schema.TypeInt,
Computed: true,
Description: "account_id",
},
"account_name": {
Type: schema.TypeString,
Computed: true,
Description: "account_name",
},
"client_ids": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "client_ids",
},
"client_names": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "client_names",
},
"client_type": {
Type: schema.TypeString,
Computed: true,
Description: "client_type",
},
"conn_id": {
Type: schema.TypeInt,
Computed: true,
Description: "conn_id",
},
"conn_type": {
Type: schema.TypeString,
Computed: true,
Description: "conn_type",
},
"created_by": {
Type: schema.TypeString,
Computed: true,
Description: "created_by",
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
Description: "created_time",
},
"default_gw": {
Type: schema.TypeString,
Computed: true,
Description: "default_gw",
},
"deleted_by": {
Type: schema.TypeString,
Computed: true,
Description: "deleted_by",
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
Description: "deleted_time",
},
"description": {
Type: schema.TypeString,
Computed: true,
Description: "description",
},
"gid": {
Type: schema.TypeInt,
Computed: true,
Description: "gid",
},
"guid": {
Type: schema.TypeInt,
Computed: true,
Description: "guid",
},
"ip": {
Type: schema.TypeString,
Computed: true,
Description: "ip",
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
Description: "milestones",
},
"name": {
Type: schema.TypeString,
Computed: true,
Description: "name",
},
"net_id": {
Type: schema.TypeInt,
Computed: true,
Description: "net_id",
},
"net_type": {
Type: schema.TypeString,
Computed: true,
Description: "net_type",
},
"network": {
Type: schema.TypeString,
Computed: true,
Description: "network",
},
"rg_id": {
Type: schema.TypeInt,
Computed: true,
Description: "rg_id",
},
"rg_name": {
Type: schema.TypeString,
Computed: true,
Description: "rg_name",
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "status",
},
"updated_by": {
Type: schema.TypeString,
Computed: true,
Description: "updated_by",
},
"updated_time": {
Type: schema.TypeInt,
Computed: true,
Description: "updated_time",
},
}
return rets
}
func DataSourceFlipgroup() *schema.Resource { func DataSourceFlipgroup() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,

@ -58,175 +58,6 @@ func dataSourceFlipgroupListRead(ctx context.Context, d *schema.ResourceData, m
return nil return nil
} }
func dataSourceFlipgroupItemSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"ckey": {
Type: schema.TypeString,
Computed: true,
Description: "ckey",
},
"meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"account_id": {
Type: schema.TypeInt,
Computed: true,
Description: "account_id",
},
"client_ids": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "client_ids",
},
"client_type": {
Type: schema.TypeString,
Computed: true,
Description: "client_type",
},
"conn_id": {
Type: schema.TypeInt,
Computed: true,
Description: "conn_id",
},
"conn_type": {
Type: schema.TypeString,
Computed: true,
Description: "conn_type",
},
"default_gw": {
Type: schema.TypeString,
Computed: true,
Description: "default_gw",
},
"description": {
Type: schema.TypeString,
Computed: true,
Description: "description",
},
"gid": {
Type: schema.TypeInt,
Computed: true,
Description: "gid",
},
"guid": {
Type: schema.TypeInt,
Computed: true,
Description: "guid",
},
"flipgroup_id": {
Type: schema.TypeInt,
Computed: true,
Description: "flipgroup_id",
},
"ip": {
Type: schema.TypeString,
Computed: true,
Description: "ip",
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
Description: "milestones",
},
"name": {
Type: schema.TypeString,
Computed: true,
Description: "name",
},
"net_id": {
Type: schema.TypeInt,
Computed: true,
Description: "net_id",
},
"net_type": {
Type: schema.TypeString,
Computed: true,
Description: "net_type",
},
"net_mask": {
Type: schema.TypeInt,
Computed: true,
Description: "net_mask",
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "status",
},
}
return rets
}
func dataSourceFlipgroupsListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Optional: true,
Description: "name",
},
"vins_id": {
Type: schema.TypeInt,
Optional: true,
Description: "vins_id",
},
"vins_name": {
Type: schema.TypeString,
Optional: true,
Description: "vins_name",
},
"extnet_id": {
Type: schema.TypeInt,
Optional: true,
Description: "extnet_id",
},
"by_ip": {
Type: schema.TypeString,
Optional: true,
Description: "by_ip",
},
"rg_id": {
Type: schema.TypeInt,
Optional: true,
Description: "rg_id",
},
"by_id": {
Type: schema.TypeInt,
Optional: true,
Description: "by_id",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "Page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "Page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: dataSourceFlipgroupItemSchemaMake(),
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
Description: "entry_count",
},
}
return res
}
func DataSourceFlipgroupList() *schema.Resource { func DataSourceFlipgroupList() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,

@ -36,35 +36,35 @@ package flipgroup
import ( import (
"context" "context"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/ic"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins"
) )
func existAccountID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { func checkParamsExistence(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg) diag.Diagnostics {
c := m.(*controller.ControllerCfg) var errs []error
accountId := uint64(d.Get("account_id").(int)) accountId := uint64(d.Get("account_id").(int))
req := account.ListRequest{} netType := d.Get("net_type").(string)
netId := uint64(d.Get("net_id").(int))
accountList, err := c.CloudBroker().Account().List(ctx, req) if err := ic.ExistAccount(ctx, accountId, c); err != nil {
if err != nil { errs = append(errs, err)
return false, err
} }
return len(accountList.FilterByID(accountId).Data) != 0, nil switch netType {
} case "VINS":
if err := ic.ExistVins(ctx, netId, c); err != nil {
func existNetID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { errs = append(errs, err)
c := m.(*controller.ControllerCfg) }
netID := uint64(d.Get("net_id").(int)) case "EXTNET":
req := vins.ListRequest {} if err := ic.ExistExtNet(ctx, netId, c); err != nil {
errs = append(errs, err)
vinsList, err := c.CloudBroker().VINS().List(ctx, req) }
if err != nil {
return false, err
} }
return len(vinsList.FilterByID(netID).Data) != 0, nil return dc.ErrorsToDiagnostics(errs)
} }

@ -37,14 +37,13 @@ package flipgroup
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/flipgroup" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/flipgroup"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
@ -53,29 +52,18 @@ func resourceFlipgroupCreate(ctx context.Context, d *schema.ResourceData, m inte
log.Debugf("resourceFlipgroupCreate called with name: %s, accountID %v", d.Get("name").(string), d.Get("account_id").(int)) log.Debugf("resourceFlipgroupCreate called with name: %s, accountID %v", d.Get("name").(string), d.Get("account_id").(int))
c := m.(*controller.ControllerCfg) c := m.(*controller.ControllerCfg)
req := flipgroup.CreateRequest{
Name: d.Get("name").(string),
NetType: d.Get("net_type").(string),
ClientType: d.Get("client_type").(string),
}
haveAccount, err := existAccountID(ctx, d, m) if diags := checkParamsExistence(ctx, d, c); diags != nil {
if err != nil { return diags
return diag.FromErr(err)
}
if !haveAccount {
return diag.Errorf("resourceFlipgroupCreate: can't create Flipgroup because AccountID %d is not allowed or does not exist", d.Get("account_id").(int))
} }
req.AccountID = uint64(d.Get("account_id").(int))
haveVINS, err := existNetID(ctx, d, m) req := flipgroup.CreateRequest{
if err != nil { Name: d.Get("name").(string),
return diag.FromErr(err) NetType: d.Get("net_type").(string),
} ClientType: d.Get("client_type").(string),
if !haveVINS { AccountID: uint64(d.Get("account_id").(int)),
return diag.Errorf("resourceFlipgroupCreate: can't create Flipgroup because VinsID %d is not allowed or does not exist", d.Get("net_id").(int)) NetID: uint64(d.Get("net_id").(int)),
} }
req.NetID = uint64(d.Get("net_id").(int))
if IP, ok := d.GetOk("ip"); ok { if IP, ok := d.GetOk("ip"); ok {
req.IP = IP.(string) req.IP = IP.(string)
@ -86,10 +74,12 @@ func resourceFlipgroupCreate(ctx context.Context, d *schema.ResourceData, m inte
resp, err := c.CloudBroker().FLIPGroup().Create(ctx, req) resp, err := c.CloudBroker().FLIPGroup().Create(ctx, req)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
d.SetId(fmt.Sprint(resp.ID)) d.SetId(fmt.Sprint(resp.ID))
d.Set("flipgroup_id", resp.ID)
var warnings dc.Warnings var warnings dc.Warnings
@ -102,14 +92,26 @@ func resourceFlipgroupCreate(ctx context.Context, d *schema.ResourceData, m inte
} }
func resourceFlipgroupRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceFlipgroupRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceFlipgroupRead: called for flipgroup_id %s, name %s",
d.Id(), d.Get("name").(string))
fg, err := utilityFlipgroupCheckPresence(ctx, d, m) fg, err := utilityFlipgroupCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("") d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
switch fg.Status {
case status.Destroyed:
d.SetId("")
return diag.Errorf("The flipgroup status is destroyed and cannot be read.")
}
flattenFlipgroup(d, fg) flattenFlipgroup(d, fg)
log.Debugf("resourceFlipgroupRead: after flattenFlipgroup: flipgroup_id %s, name %s",
d.Id(), d.Get("name").(string))
return nil return nil
} }
@ -117,19 +119,8 @@ func resourceFlipgroupUpdate(ctx context.Context, d *schema.ResourceData, m inte
log.Debugf("resourceFlipgroupUpdate called with id: %v", d.Get("flipgroup_id").(int)) log.Debugf("resourceFlipgroupUpdate called with id: %v", d.Get("flipgroup_id").(int))
c := m.(*controller.ControllerCfg) c := m.(*controller.ControllerCfg)
haveAccount, err := existAccountID(ctx, d, m) if diags := checkParamsExistence(ctx, d, c); diags != nil {
if err != nil { return diags
return diag.FromErr(err)
}
if !haveAccount {
return diag.Errorf("resourceFlipgroupUpdate: can't update Flipgroup because AccountID %d is not allowed or does not exist", d.Get("account_id").(int))
}
haveVINS, err := existNetID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
if !haveVINS {
return diag.Errorf("resourceFlipgroupUpdate: can't update Flipgroup because VinsID %d is not allowed or does not exist", d.Get("net_id").(int))
} }
fg, err := utilityFlipgroupCheckPresence(ctx, d, m) fg, err := utilityFlipgroupCheckPresence(ctx, d, m)
@ -138,6 +129,12 @@ func resourceFlipgroupUpdate(ctx context.Context, d *schema.ResourceData, m inte
return diag.FromErr(err) return diag.FromErr(err)
} }
switch fg.Status {
case status.Destroyed:
d.SetId("")
return diag.Errorf("The flipgroup status is destroyed and cannot be updated.")
}
var warnings dc.Warnings var warnings dc.Warnings
basicUpdate := false basicUpdate := false
req := flipgroup.EditRequest{FLIPGroupID: fg.ID} req := flipgroup.EditRequest{FLIPGroupID: fg.ID}
@ -172,6 +169,7 @@ func resourceFlipgroupDelete(ctx context.Context, d *schema.ResourceData, m inte
fg, err := utilityFlipgroupCheckPresence(ctx, d, m) fg, err := utilityFlipgroupCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -179,163 +177,16 @@ func resourceFlipgroupDelete(ctx context.Context, d *schema.ResourceData, m inte
FLIPGroupID: fg.ID, FLIPGroupID: fg.ID,
} }
// When FLIPGroup().Delete() is executed, flipgroup automatically is removed from the compute it has been attached to, if any.
// No need to specifically call for FlipGroup().ComputeRemove().
_, err = c.CloudBroker().FLIPGroup().Delete(ctx, req) _, err = c.CloudBroker().FLIPGroup().Delete(ctx, req)
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
return nil d.SetId("")
}
func resourceFlipgroupSchemaMake() map[string]*schema.Schema { return nil
return map[string]*schema.Schema{
"account_id": {
Type: schema.TypeInt,
Required: true,
Description: "Account ID",
},
"name": {
Type: schema.TypeString,
Required: true,
Description: "Flipgroup name",
},
"net_id": {
Type: schema.TypeInt,
Required: true,
Description: "EXTNET or ViNS ID",
},
"net_type": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringInSlice([]string{"EXTNET", "VINS"}, true),
Description: "Network type, EXTNET or VINS",
},
"client_type": {
Type: schema.TypeString,
Required: true,
Description: "Type of client, 'compute' ('vins' will be later)",
ValidateFunc: validation.StringInSlice([]string{"compute"}, true),
},
"ip": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "IP address to associate with this group. If empty, the platform will autoselect IP address",
},
"desc": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "Text description of this Flipgroup instance",
},
"client_ids": {
Type: schema.TypeList,
Optional: true,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "List of clients attached to this Flipgroup instance",
},
"client_names": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "client_names",
},
"account_name": {
Type: schema.TypeString,
Computed: true,
Description: "account_name",
},
"flipgroup_id": {
Type: schema.TypeInt,
Computed: true,
},
"conn_id": {
Type: schema.TypeInt,
Computed: true,
},
"conn_type": {
Type: schema.TypeString,
Computed: true,
},
"created_by": {
Type: schema.TypeString,
Computed: true,
Description: "created_by",
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
Description: "created_time",
},
"default_gw": {
Type: schema.TypeString,
Computed: true,
},
"deleted_by": {
Type: schema.TypeString,
Computed: true,
Description: "deleted_by",
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
Description: "deleted_time",
},
"gid": {
Type: schema.TypeInt,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
},
"network": {
Type: schema.TypeString,
Computed: true,
Description: "network",
},
"rg_id": {
Type: schema.TypeInt,
Computed: true,
Description: "rg_id",
},
"rg_name": {
Type: schema.TypeString,
Computed: true,
Description: "rg_name",
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"updated_by": {
Type: schema.TypeString,
Computed: true,
Description: "updated_by",
},
"updated_time": {
Type: schema.TypeInt,
Computed: true,
Description: "updated_time",
},
"net_mask": {
Type: schema.TypeInt,
Computed: true,
},
"ckey": {
Type: schema.TypeString,
Computed: true,
},
}
} }
func ResourceFlipgroup() *schema.Resource { func ResourceFlipgroup() *schema.Resource {

@ -46,6 +46,7 @@ import (
) )
func utilityFlipgroupCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*flipgroup.RecordFLIPGroup, error) { func utilityFlipgroupCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*flipgroup.RecordFLIPGroup, error) {
log.Debugf("utilityFlipgroupCheckPresence")
c := m.(*controller.ControllerCfg) c := m.(*controller.ControllerCfg)
req := flipgroup.GetRequest{} req := flipgroup.GetRequest{}
@ -56,7 +57,6 @@ func utilityFlipgroupCheckPresence(ctx context.Context, d *schema.ResourceData,
req.FLIPGroupID = uint64(d.Get("flipgroup_id").(int)) req.FLIPGroupID = uint64(d.Get("flipgroup_id").(int))
} }
log.Debugf("utilityDiskCheckPresence: load disk")
flipgroup, err := c.CloudBroker().FLIPGroup().Get(ctx, req) flipgroup, err := c.CloudBroker().FLIPGroup().Get(ctx, req)
if err != nil { if err != nil {
return nil, err return nil, err

@ -43,6 +43,7 @@ import (
func dataSourceGridRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceGridRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
grid, err := utilityGridCheckPresence(ctx, d, m) grid, err := utilityGridCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
d.SetId(strconv.FormatUint(grid.ID, 10)) d.SetId(strconv.FormatUint(grid.ID, 10))
@ -51,39 +52,6 @@ func dataSourceGridRead(ctx context.Context, d *schema.ResourceData, m interface
return nil return nil
} }
func dataSourceGetGridSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"grid_id": {
Type: schema.TypeInt,
Required: true,
},
"flag": {
Type: schema.TypeString,
Computed: true,
},
"gid": {
Type: schema.TypeInt,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"id": {
Type: schema.TypeInt,
Computed: true,
},
"location_code": {
Type: schema.TypeString,
Computed: true,
},
"name": {
Type: schema.TypeString,
Computed: true,
},
}
}
func DataSourceGrid() *schema.Resource { func DataSourceGrid() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,

@ -34,6 +34,7 @@ package grid
import ( import (
"context" "context"
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
@ -43,26 +44,14 @@ import (
func dataSourceGridGetDiagnosisRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceGridGetDiagnosisRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
diagnosis, err := utilityGridGetDiagnosisCheckPresence(ctx, d, m) diagnosis, err := utilityGridGetDiagnosisCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
d.SetId(d.Id()) d.SetId(strconv.Itoa(d.Get("gid").(int)))
d.Set("diagnosis", diagnosis) d.Set("diagnosis", diagnosis)
return nil return nil
} }
func dataSourceGridGetDiagnosisSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"gid": {
Type: schema.TypeInt,
Required: true,
},
"diagnosis": {
Type: schema.TypeString,
Computed: true,
},
}
}
func DataSourceGridGetDiagnosis() *schema.Resource { func DataSourceGridGetDiagnosis() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,
@ -81,26 +70,14 @@ func DataSourceGridGetDiagnosis() *schema.Resource {
func dataSourceGridPostDiagnosisRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceGridPostDiagnosisRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
diagnosis, err := utilityGridPostDiagnosisCheckPresence(ctx, d, m) diagnosis, err := utilityGridPostDiagnosisCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
d.SetId(d.Id()) d.SetId(strconv.Itoa(d.Get("gid").(int)))
d.Set("diagnosis", diagnosis) d.Set("diagnosis", diagnosis)
return nil return nil
} }
func dataSourceGridPostDiagnosisSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"gid": {
Type: schema.TypeInt,
Required: true,
},
"diagnosis": {
Type: schema.TypeString,
Computed: true,
},
}
}
func DataSourceGridPostDiagnosis() *schema.Resource { func DataSourceGridPostDiagnosis() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,

@ -44,6 +44,7 @@ import (
func dataSourceGridGetStatusRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceGridGetStatusRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
grid, err := utilityGridGetStatusCheckPresence(ctx, d, m) grid, err := utilityGridGetStatusCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
id := uuid.New() id := uuid.New()
@ -52,15 +53,6 @@ func dataSourceGridGetStatusRead(ctx context.Context, d *schema.ResourceData, m
return nil return nil
} }
func dataSourceGridGetStatusSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"status": {
Type: schema.TypeBool,
Computed: true,
},
}
}
func DataSourceGridGetStatus() *schema.Resource { func DataSourceGridGetStatus() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,
@ -79,6 +71,7 @@ func DataSourceGridGetStatus() *schema.Resource {
func dataSourceGridPostStatusRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceGridPostStatusRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
grid, err := utilityGridPostStatusCheckPresence(ctx, d, m) grid, err := utilityGridPostStatusCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
id := uuid.New() id := uuid.New()
@ -87,15 +80,6 @@ func dataSourceGridPostStatusRead(ctx context.Context, d *schema.ResourceData, m
return nil return nil
} }
func dataSourceGridPostStatusSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"status": {
Type: schema.TypeBool,
Computed: true,
},
}
}
func DataSourceGridPostStatus() *schema.Resource { func DataSourceGridPostStatus() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,

@ -43,6 +43,7 @@ import (
func dataSourceGridListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceGridListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
gridList, err := utilityGridListCheckPresence(ctx, d, m) gridList, err := utilityGridListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
id := uuid.New() id := uuid.New()
@ -52,203 +53,6 @@ func dataSourceGridListRead(ctx context.Context, d *schema.ResourceData, m inter
return nil return nil
} }
func dataSourceGridListSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"by_id": {
Type: schema.TypeInt,
Optional: true,
Description: "by id",
},
"name": {
Type: schema.TypeString,
Optional: true,
Description: "name",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Description: "grid list",
Elem: &schema.Resource{
Schema: dataSourceGridSchemaMake(),
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
Description: "entry count",
},
}
return rets
}
func dataSourceGridSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"resources": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"current": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeInt,
Computed: true,
},
"ext_ips": {
Type: schema.TypeInt,
Computed: true,
},
"ext_traffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeFloat,
Computed: true,
},
},
},
},
},
},
},
"reserved": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeInt,
Computed: true,
},
"ext_ips": {
Type: schema.TypeInt,
Computed: true,
},
"ext_traffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeFloat,
Computed: true,
},
},
},
},
},
},
},
},
},
},
"flag": {
Type: schema.TypeString,
Computed: true,
},
"gid": {
Type: schema.TypeInt,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"id": {
Type: schema.TypeInt,
Computed: true,
},
"location_code": {
Type: schema.TypeString,
Computed: true,
},
"name": {
Type: schema.TypeString,
Computed: true,
},
}
}
func DataSourceGridList() *schema.Resource { func DataSourceGridList() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,

@ -44,6 +44,7 @@ import (
func dataSourceGridListEmailsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceGridListEmailsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
gridListEmails, err := utilityGridListEmailsCheckPresence(ctx, d, m) gridListEmails, err := utilityGridListEmailsCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
id := uuid.New() id := uuid.New()
@ -53,36 +54,6 @@ func dataSourceGridListEmailsRead(ctx context.Context, d *schema.ResourceData, m
return nil return nil
} }
func dataSourceGridListEmailsSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Description: "grid list emails",
Elem: &schema.Schema {
Type: schema.TypeString,
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
Description: "entry count",
},
}
return rets
}
func DataSourceGridListEmails() *schema.Resource { func DataSourceGridListEmails() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,

@ -44,143 +44,15 @@ import (
func dataSourceGridGetConsumptionRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceGridGetConsumptionRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
gridGetConsumption, err := utilityGridGetConsumptionCheckPresence(ctx, d, m) gridGetConsumption, err := utilityGridGetConsumptionCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
d.SetId(strconv.FormatUint(gridGetConsumption.GID, 10)) d.SetId(strconv.Itoa(d.Get("grid_id").(int)))
d.Set("consumed", flattenGridRecordResource(gridGetConsumption.Consumed)) d.Set("consumed", flattenGridRecordResource(gridGetConsumption.Consumed))
d.Set("reserved", flattenGridRecordResource(gridGetConsumption.Reserved)) d.Set("reserved", flattenGridRecordResource(gridGetConsumption.Reserved))
return nil return nil
} }
func dataSourceGridGetConsumptionSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"grid_id": {
Type: schema.TypeInt,
Required: true,
},
"consumed": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeInt,
Computed: true,
},
"ext_ips": {
Type: schema.TypeInt,
Computed: true,
},
"ext_traffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeFloat,
Computed: true,
},
},
},
},
},
},
},
"reserved": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeInt,
Computed: true,
},
"ext_ips": {
Type: schema.TypeInt,
Computed: true,
},
"ext_traffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeFloat,
Computed: true,
},
},
},
},
},
},
},
}
}
func DataSourceGridGetConsumption() *schema.Resource { func DataSourceGridGetConsumption() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,

@ -44,6 +44,7 @@ import (
func dataSourceGridListConsumptionRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceGridListConsumptionRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
gridListConsumption, err := utilityGridListConsumptionCheckPresence(ctx, d, m) gridListConsumption, err := utilityGridListConsumptionCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
id := uuid.New() id := uuid.New()
@ -53,154 +54,6 @@ func dataSourceGridListConsumptionRead(ctx context.Context, d *schema.ResourceDa
return nil return nil
} }
func dataSourceGridListConsumptionSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"items": {
Type: schema.TypeList,
Computed: true,
Description: "grid list consumption",
Elem: &schema.Resource{
Schema: dataSourceGridConsumptionSchemaMake(),
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
Description: "entry count",
},
}
return rets
}
func dataSourceGridConsumptionSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"consumed": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeInt,
Computed: true,
},
"ext_ips": {
Type: schema.TypeInt,
Computed: true,
},
"ext_traffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeFloat,
Computed: true,
},
},
},
},
},
},
},
"reserved": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeInt,
Computed: true,
},
"ext_ips": {
Type: schema.TypeInt,
Computed: true,
},
"ext_traffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeFloat,
Computed: true,
},
},
},
},
},
},
},
"id": {
Type: schema.TypeInt,
Computed: true,
},
}
}
func DataSourceGridListConsumption() *schema.Resource { func DataSourceGridListConsumption() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,

@ -3,9 +3,11 @@ package grid
import ( import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens"
) )
func flattenGrid(d *schema.ResourceData, grid *grid.RecordGrid) { func flattenGrid(d *schema.ResourceData, grid *grid.RecordGrid) {
d.Set("auth_broker", flattens.FlattenMeta(grid.AuthBroker))
d.Set("name", grid.Name) d.Set("name", grid.Name)
d.Set("flag", grid.Flag) d.Set("flag", grid.Flag)
d.Set("gid", grid.GID) d.Set("gid", grid.GID)
@ -20,6 +22,7 @@ func flattenGridList(gl *grid.ListGrids) []map[string]interface{} {
temp := map[string]interface{}{ temp := map[string]interface{}{
"resources": flattenGridResources(item.Resources), "resources": flattenGridResources(item.Resources),
"name": item.Name, "name": item.Name,
"auth_broker": flattens.FlattenMeta(item.AuthBroker),
"flag": item.Flag, "flag": item.Flag,
"gid": item.GID, "gid": item.GID,
"guid": item.GUID, "guid": item.GUID,
@ -37,7 +40,7 @@ func flattenGridListConsumption(gl *grid.ListResourceConsumption) []map[string]i
temp := map[string]interface{}{ temp := map[string]interface{}{
"consumed": flattenGridRecordResource(item.Consumed), "consumed": flattenGridRecordResource(item.Consumed),
"reserved": flattenGridRecordResource(item.Reserved), "reserved": flattenGridRecordResource(item.Reserved),
"id": item.GID, "id": item.GID,
} }
res = append(res, temp) res = append(res, temp)
} }

@ -54,10 +54,10 @@ func utilityGridCheckPresence(ctx context.Context, d *schema.ResourceData, m int
} }
log.Debugf("utilityGridCheckPresence: load grid") log.Debugf("utilityGridCheckPresence: load grid")
grid, err := c.CloudBroker().Grid().Get(ctx, req) gridRec, err := c.CloudBroker().Grid().Get(ctx, req)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return grid, nil return gridRec, nil
} }

@ -54,7 +54,7 @@ func utilityGridGetDiagnosisCheckPresence(ctx context.Context, d *schema.Resourc
req.GID = uint64(d.Get("gid").(int)) req.GID = uint64(d.Get("gid").(int))
} }
log.Debugf("utilityGridListConsumptionCheckPresence: load grid list consumption") log.Debugf("utilityGridGetDiagnosisCheckPresence: load grid get diagnosis")
gridGetDiagnosis, err := c.CloudBroker().Grid().GetDiagnosisGET(ctx, req) gridGetDiagnosis, err := c.CloudBroker().Grid().GetDiagnosisGET(ctx, req)
if err != nil { if err != nil {
return "", err return "", err
@ -74,7 +74,7 @@ func utilityGridPostDiagnosisCheckPresence(ctx context.Context, d *schema.Resour
req.GID = uint64(d.Get("gid").(int)) req.GID = uint64(d.Get("gid").(int))
} }
log.Debugf("utilityGridListConsumptionCheckPresence: load grid list consumption") log.Debugf("utilityGridPostDiagnosisCheckPresence: load grid post diagnosis")
gridPostDiagnosis, err := c.CloudBroker().Grid().GetDiagnosis(ctx, req) gridPostDiagnosis, err := c.CloudBroker().Grid().GetDiagnosis(ctx, req)
if err != nil { if err != nil {
return "", err return "", err

@ -5,9 +5,14 @@ import (
"context" "context"
"fmt" "fmt"
cb_account "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account"
cb_compute "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute"
cb_extnet "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/extnet" cb_extnet "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/extnet"
cb_gid "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid" cb_gid "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid"
cb_image "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image" cb_image "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image"
cb_k8ci "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/k8ci"
cb_k8s "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/k8s"
cb_lb "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb"
cb_rg "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" cb_rg "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg"
cb_stack "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/stack" cb_stack "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/stack"
cb_vins "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins" cb_vins "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins"
@ -49,6 +54,24 @@ func ExistImage(ctx context.Context, imageId uint64, c *controller.ControllerCfg
return nil return nil
} }
func ExistVins(ctx context.Context, vinsId uint64, c *controller.ControllerCfg) error {
req := cb_vins.ListRequest{
ByID: vinsId,
IncludeDeleted: false,
}
vinsList, err := c.CloudBroker().VINS().List(ctx, req)
if err != nil {
return err
}
if len(vinsList.Data) == 0 {
return fmt.Errorf("vins with ID %v not found", vinsId)
}
return nil
}
func ExistVinses(ctx context.Context, vinsIds []uint64, c *controller.ControllerCfg) []error { func ExistVinses(ctx context.Context, vinsIds []uint64, c *controller.ControllerCfg) []error {
var errs []error var errs []error
@ -109,10 +132,69 @@ func ExistExtNets(ctx context.Context, extNetIds []uint64, c *controller.Control
return errs return errs
} }
func ExistExtNetInLb(ctx context.Context, extNetId uint64, c *controller.ControllerCfg) error {
if extNetId == 0 {
return nil
}
req := cb_extnet.ListRequest{
ByID: extNetId,
}
extNetList, err := c.CloudBroker().ExtNet().List(ctx, req)
if err != nil {
return err
}
if len(extNetList.Data) == 0 {
return fmt.Errorf("EXTNET with ID %v not found", extNetId)
}
return nil
}
func ExistExtNetInRG(ctx context.Context, extNetId, accountId uint64, c *controller.ControllerCfg) error {
req := cb_extnet.ListRequest{
AccountID: accountId,
ByID: extNetId,
}
listExtNet, err := c.CloudBroker().ExtNet().List(ctx, req)
if err != nil {
return err
}
if len(listExtNet.Data) == 0 {
return fmt.Errorf("EXTNET with ID %v not found for account with id %d", extNetId, accountId)
}
return nil
}
func ExistExtNetInVins(ctx context.Context, extNetId int, c *controller.ControllerCfg) error {
if extNetId == 0 || extNetId == -1 {
return nil
}
req := cb_extnet.ListRequest{
ByID: uint64(extNetId),
}
extNetList, err := c.CloudBroker().ExtNet().List(ctx, req)
if err != nil {
return err
}
if len(extNetList.Data) == 0 {
return fmt.Errorf("EXTNET with ID %v not found", extNetId)
}
return nil
}
func ExistExtNet(ctx context.Context, extNetId uint64, c *controller.ControllerCfg) error { func ExistExtNet(ctx context.Context, extNetId uint64, c *controller.ControllerCfg) error {
req := cb_extnet.ListRequest{ req := cb_extnet.ListRequest{
ByID: extNetId, ByID: extNetId,
Status: "Enabled",
} }
extNetList, err := c.CloudBroker().ExtNet().List(ctx, req) extNetList, err := c.CloudBroker().ExtNet().List(ctx, req)
@ -127,6 +209,27 @@ func ExistExtNet(ctx context.Context, extNetId uint64, c *controller.ControllerC
return nil return nil
} }
func ExistVinsInLb(ctx context.Context, vinsId uint64, c *controller.ControllerCfg) error {
if vinsId == 0 {
return nil
}
req := cb_vins.ListRequest{
ByID: vinsId,
}
vinsList, err := c.CloudBroker().VINS().List(ctx, req)
if err != nil {
return err
}
if len(vinsList.Data) == 0 {
return fmt.Errorf("VINS with ID %v not found", vinsId)
}
return nil
}
func ExistGID(ctx context.Context, gid uint64, c *controller.ControllerCfg) error { func ExistGID(ctx context.Context, gid uint64, c *controller.ControllerCfg) error {
req := cb_gid.ListRequest{} req := cb_gid.ListRequest{}
@ -160,3 +263,144 @@ func ExistStack(ctx context.Context, stackId uint64, c *controller.ControllerCfg
return nil return nil
} }
// ExistStackInPcidevice checks if compute exists with specified stackId and specified non-deleted rgId.
func ExistStackInPcidevice(ctx context.Context, stackId, rgId uint64, c *controller.ControllerCfg) error {
req := cb_rg.ListRequest{
ByID: rgId,
IncludeDeleted: false,
}
rgList, err := c.CloudBroker().RG().List(ctx, req)
if err != nil {
return err
}
for _, v := range rgList.Data {
for _, idVM := range v.VMs {
req := cb_compute.GetRequest{
ComputeID: idVM,
}
computeRec, err := c.CloudBroker().Compute().Get(ctx, req)
if err != nil {
return err
}
if computeRec.StackID == stackId {
return nil
}
}
}
return fmt.Errorf("no compute found with stack_id %v and rg_id %v", stackId, rgId)
}
func ExistLB(ctx context.Context, lbId uint64, c *controller.ControllerCfg) error {
req := cb_lb.ListRequest{
ByID: lbId,
}
lbList, err := c.CloudBroker().LB().List(ctx, req)
if err != nil {
return err
}
if len(lbList.Data) == 0 {
return fmt.Errorf("LB with ID %v not found", lbId)
}
return nil
}
func ExistAccount(ctx context.Context, accountId uint64, c *controller.ControllerCfg) error {
req := cb_account.ListRequest{
ByID: accountId,
}
accountList, err := c.CloudBroker().Account().List(ctx, req)
if err != nil {
return err
}
if len(accountList.Data) == 0 {
return fmt.Errorf("account with id %d not found", accountId)
}
return nil
}
func ExistK8CI(ctx context.Context, k8ciId uint64, c *controller.ControllerCfg) error {
req := cb_k8ci.ListRequest{
ByID: k8ciId,
}
k8ciList, err := c.CloudBroker().K8CI().List(ctx, req)
if err != nil {
return err
}
if len(k8ciList.Data) == 0 {
return fmt.Errorf("k8ci with id %d not found", k8ciId)
}
return nil
}
func ExistExtNetInK8s(ctx context.Context, extNetId uint64, c *controller.ControllerCfg) error {
if extNetId == 0 {
return nil
}
req := cb_extnet.ListRequest{
ByID: extNetId,
}
extNetList, err := c.CloudBroker().ExtNet().List(ctx, req)
if err != nil {
return err
}
if len(extNetList.Data) == 0 {
return fmt.Errorf("EXTNET with ID %v not found", extNetId)
}
return nil
}
func ExistVinsInK8s(ctx context.Context, vinsId uint64, c *controller.ControllerCfg) error {
if vinsId == 0 {
return nil
}
req := cb_vins.ListRequest{
ByID: vinsId,
}
vinsList, err := c.CloudBroker().VINS().List(ctx, req)
if err != nil {
return err
}
if len(vinsList.Data) == 0 {
return fmt.Errorf("VINS with ID %v not found", vinsId)
}
return nil
}
func ExistK8s(ctx context.Context, k8sId uint64, c *controller.ControllerCfg) error {
req := cb_k8s.ListRequest{
ByID: k8sId,
}
k8sList, err := c.CloudBroker().K8S().List(ctx, req)
if err != nil {
return err
}
if len(k8sList.Data) == 0 {
return fmt.Errorf("k8s with id %d not found", k8sId)
}
return nil
}

@ -1,5 +1,5 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
@ -43,227 +43,14 @@ import (
func dataSourceImageRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceImageRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
image, err := utilityImageCheckPresence(ctx, d, m) image, err := utilityImageCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
d.SetId(strconv.FormatUint(image.ID, 10)) d.SetId(strconv.FormatUint(image.ID, 10))
flattenImage(d, image) flattenImage(d, image)
return nil return nil
} }
func dataSourceImageSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the rescue disk",
},
"url": {
Type: schema.TypeString,
Computed: true,
Description: "URL where to download media from",
},
"gid": {
Type: schema.TypeInt,
Computed: true,
Description: "grid (platform) ID where this template should be create in",
},
"boot_type": {
Type: schema.TypeString,
Computed: true,
Description: "Boot type of image bios or uefi",
},
"image_type": {
Type: schema.TypeString,
Computed: true,
Description: "Image type linux, windows or other",
},
"shared_with": {
Type: schema.TypeList,
Optional: true,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"history": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"guid": {
Type: schema.TypeString,
Computed: true,
},
"id": {
Type: schema.TypeInt,
Computed: true,
},
"timestamp": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"drivers": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "List of types of compute suitable for image. Example: [ \"KVM_X86\" ]",
},
"meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "meta",
},
"hot_resize": {
Type: schema.TypeBool,
Computed: true,
Description: "Does this machine supports hot resize",
},
"username": {
Type: schema.TypeString,
Computed: true,
Description: "Optional username for the image",
},
"password": {
Type: schema.TypeString,
Computed: true,
Description: "Optional password for the image",
},
"account_id": {
Type: schema.TypeInt,
Computed: true,
Description: "AccountId to make the image exclusive",
},
"username_dl": {
Type: schema.TypeString,
Computed: true,
Description: "username for upload binary media",
},
"password_dl": {
Type: schema.TypeString,
Computed: true,
Description: "password for upload binary media",
},
"sep_id": {
Type: schema.TypeInt,
Computed: true,
Description: "storage endpoint provider ID",
},
"pool_name": {
Type: schema.TypeString,
Computed: true,
Description: "pool for image create",
},
"architecture": {
Type: schema.TypeString,
Computed: true,
Description: "binary architecture of this image, one of X86_64 of PPC64_LE",
},
"image_id": {
Type: schema.TypeInt,
Required: true,
Description: "image id",
},
"permanently": {
Type: schema.TypeBool,
Computed: true,
Description: "Whether to completely delete the image",
},
"bootable": {
Type: schema.TypeBool,
Computed: true,
Description: "Does this image boot OS",
},
"unc_path": {
Type: schema.TypeString,
Computed: true,
Description: "unc path",
},
"link_to": {
Type: schema.TypeInt,
Computed: true,
Description: "",
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "status",
},
"tech_status": {
Type: schema.TypeString,
Computed: true,
Description: "tech atatus",
},
"version": {
Type: schema.TypeString,
Computed: true,
Description: "version",
},
"size": {
Type: schema.TypeInt,
Computed: true,
Description: "image size",
},
"enabled": {
Type: schema.TypeBool,
Computed: true,
},
"computeci_id": {
Type: schema.TypeInt,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
},
"provider_name": {
Type: schema.TypeString,
Computed: true,
},
"purge_attempts": {
Type: schema.TypeInt,
Computed: true,
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
},
"res_name": {
Type: schema.TypeString,
Computed: true,
},
"rescuecd": {
Type: schema.TypeBool,
Computed: true,
},
"last_modified": {
Type: schema.TypeInt,
Computed: true,
},
"desc": {
Type: schema.TypeString,
Computed: true,
},
}
}
func DataSourceImage() *schema.Resource { func DataSourceImage() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,

@ -1,5 +1,5 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
@ -43,45 +43,16 @@ import (
func dataSourceImageListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceImageListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
imageList, err := utilityImageListCheckPresence(ctx, d, m) imageList, err := utilityImageListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
id := uuid.New() id := uuid.New()
d.SetId(id.String()) d.SetId(id.String())
d.Set("items", flattenImageList(imageList)) d.Set("items", flattenImageList(imageList))
d.Set("entry_count", imageList.EntryCount)
return nil return nil
} }
func dataSourceImageListSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeInt,
Optional: true,
Description: "filter images by storage endpoint provider ID",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Description: "image list",
Elem: &schema.Resource{
Schema: dataSourceImageSchemaMake(),
},
},
}
return rets
}
func DataSourceImageList() *schema.Resource { func DataSourceImageList() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,

@ -1,5 +1,5 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
@ -43,114 +43,16 @@ import (
func dataSourceImageListStacksRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceImageListStacksRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
imageListStacks, err := utilityImageListStacksCheckPresence(ctx, d, m) imageListStacks, err := utilityImageListStacksCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
id := uuid.New() id := uuid.New()
d.SetId(id.String()) d.SetId(id.String())
d.Set("items", flattenImageListStacks(d, imageListStacks)) d.Set("items", flattenImageListStacks(imageListStacks))
d.Set("entry_count", imageListStacks.EntryCount)
return nil return nil
} }
func dataSourceImageListStackSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"api_url": {
Type: schema.TypeString,
Computed: true,
},
"api_key": {
Type: schema.TypeString,
Computed: true,
},
"app_id": {
Type: schema.TypeString,
Computed: true,
},
"desc": {
Type: schema.TypeString,
Computed: true,
},
"drivers": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"error": {
Type: schema.TypeInt,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"id": {
Type: schema.TypeInt,
Computed: true,
},
"images": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"login": {
Type: schema.TypeString,
Computed: true,
},
"name": {
Type: schema.TypeString,
Computed: true,
},
"passwd": {
Type: schema.TypeString,
Computed: true,
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"type": {
Type: schema.TypeString,
Computed: true,
},
}
}
func dataSourceImageListStacksSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"image_id": {
Type: schema.TypeInt,
Required: true,
Description: "image id",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: dataSourceImageListStackSchemaMake(),
},
Description: "items of stacks list",
},
}
}
func DataSourceImageListStacks() *schema.Resource { func DataSourceImageListStacks() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,

@ -4,154 +4,269 @@ import (
"strconv" "strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens"
) )
func flattenImage(d *schema.ResourceData, img *image.RecordImage) { func flattenImage(d *schema.ResourceData, img *image.RecordImage) {
d.Set("name", img.Name) log.Debugf("flattenImageID %d", img.ID)
d.Set("drivers", img.Drivers)
d.Set("url", img.URL)
d.Set("gid", img.GID)
d.Set("image_id", img.ID) d.Set("image_id", img.ID)
d.Set("unc_path", img.UNCPath)
d.Set("ckey", img.CKey)
d.Set("meta", flattens.FlattenMeta(img.Meta))
d.Set("account_id", img.AccountID)
d.Set("acl", flattenAcl(img.ACL))
d.Set("architecture", img.Architecture)
d.Set("boot_type", img.BootType) d.Set("boot_type", img.BootType)
d.Set("image_type", img.Type)
d.Set("bootable", img.Bootable) d.Set("bootable", img.Bootable)
d.Set("sep_id", img.SEPID)
d.Set("unc_path", img.UNCPath)
d.Set("link_to", img.LinkTo)
d.Set("status", img.Status)
d.Set("tech_status", img.TechStatus)
d.Set("version", img.Version)
d.Set("size", img.Size)
d.Set("enabled", img.Enabled)
d.Set("computeci_id", img.ComputeCIID) d.Set("computeci_id", img.ComputeCIID)
d.Set("pool_name", img.Pool) d.Set("deleted_time", img.DeletedTime)
d.Set("username", img.Username) d.Set("desc", img.Description)
// d.Set("username_dl", img.UsernameDL) d.Set("drivers", img.Drivers)
d.Set("password", img.Password) d.Set("enabled", img.Enabled)
// d.Set("password_dl", img.PasswordDL) d.Set("gid", img.GID)
d.Set("account_id", img.AccountID)
d.Set("guid", img.GUID) d.Set("guid", img.GUID)
d.Set("history", flattenHistory(img.History))
d.Set("hot_resize", img.HotResize)
d.Set("last_modified", img.LastModified)
d.Set("link_to", img.LinkTo)
d.Set("milestones", img.Milestones) d.Set("milestones", img.Milestones)
d.Set("name", img.Name)
d.Set("password", img.Password)
d.Set("pool_name", img.Pool)
d.Set("present_to", img.PresentTo)
d.Set("provider_name", img.ProviderName) d.Set("provider_name", img.ProviderName)
d.Set("purge_attempts", img.PurgeAttempts) d.Set("purge_attempts", img.PurgeAttempts)
d.Set("reference_id", img.ReferenceID) d.Set("reference_id", img.ReferenceID)
d.Set("res_id", img.ResID) d.Set("res_id", img.ResID)
d.Set("res_name", img.ResName) d.Set("res_name", img.ResName)
d.Set("rescuecd", img.RescueCD) d.Set("rescuecd", img.RescueCD)
d.Set("architecture", img.Architecture) d.Set("sep_id", img.SEPID)
d.Set("hot_resize", img.HotResize)
d.Set("history", flattenHistory(img.History))
d.Set("last_modified", img.LastModified)
d.Set("meta", flattens.FlattenMeta(img.Meta))
d.Set("desc", img.Description)
d.Set("shared_with", img.SharedWith) d.Set("shared_with", img.SharedWith)
d.Set("size", img.Size)
d.Set("status", img.Status)
d.Set("tech_status", img.TechStatus)
d.Set("image_type", img.Type)
d.Set("url", img.URL)
d.Set("username", img.Username)
d.Set("version", img.Version)
} }
func flattenMeta(m []interface{}) []string { func flattenAcl(acl image.ListACL) []map[string]interface{} {
output := []string{} res := make([]map[string]interface{}, 0, len(acl))
for _, item := range m { for _, val := range acl {
switch d := item.(type) { temp := map[string]interface{}{
case string: "explicit": val.Explicit,
output = append(output, d) "guid": val.GUID,
case int: "right": val.Right,
output = append(output, strconv.Itoa(d)) "status": val.Status,
case int64: "type": val.Type,
output = append(output, strconv.FormatInt(d, 10)) "user_group_id": val.UserGroupID,
case float64:
output = append(output, strconv.FormatInt(int64(d), 10))
default:
output = append(output, "")
} }
res = append(res, temp)
} }
return output return res
} }
func flattenHistory(history []image.History) []map[string]interface{} { func flattenHistory(history image.ListHistory) []map[string]interface{} {
temp := make([]map[string]interface{}, 0) temp := make([]map[string]interface{}, 0, len(history))
for _, item := range history { for _, item := range history {
t := map[string]interface{}{ t := map[string]interface{}{
"id": item.ID, "id": item.ID,
"guid": item.GUID, "guid": item.GUID,
"timestamp": item.Timestamp, "timestamp": item.Timestamp,
} }
temp = append(temp, t) temp = append(temp, t)
} }
return temp return temp
} }
func flattenImageList(il *image.ListImages) []map[string]interface{} { func flattenImageList(il *image.ListImages) []map[string]interface{} {
res := make([]map[string]interface{}, 0) log.Debug("flattenImageList")
res := make([]map[string]interface{}, 0, len(il.Data))
for _, item := range il.Data { for _, item := range il.Data {
temp := map[string]interface{}{ temp := map[string]interface{}{
"name": item.Name, "image_id": item.ID,
"url": item.URL, "unc_path": item.UNCPath,
"gid": item.GID, "ckey": item.CKey,
"guid": item.GUID, "meta": flattens.FlattenMeta(item.Meta),
"drivers": item.Drivers,
"image_id": item.ID,
"boot_type": item.BootType,
"bootable": item.Bootable,
"image_type": item.Type,
"status": item.Status,
"tech_status": item.TechStatus,
"version": item.Version,
"username": item.Username,
// "username_dl": item.UsernameDL,
"password": item.Password,
// "password_dl": item.PasswordDL,
"purge_attempts": item.PurgeAttempts,
"architecture": item.Architecture,
"account_id": item.AccountID, "account_id": item.AccountID,
"acl": flattenAcl(item.ACL),
"architecture": item.Architecture,
"boot_type": item.BootType,
"bootable": item.Bootable,
"computeci_id": item.ComputeCIID, "computeci_id": item.ComputeCIID,
"deleted_time": item.DeletedTime,
"desc": item.Description,
"drivers": item.Drivers,
"enabled": item.Enabled, "enabled": item.Enabled,
"gid": item.GID,
"guid": item.GUID,
"history": flattenHistory(item.History),
"hot_resize": item.HotResize,
"last_modified": item.LastModified,
"link_to": item.LinkTo,
"milestones": item.Milestones,
"name": item.Name,
"password": item.Password,
"pool_name": item.Pool,
"present_to": item.PresentTo,
"provider_name": item.ProviderName,
"purge_attempts": item.PurgeAttempts,
"reference_id": item.ReferenceID, "reference_id": item.ReferenceID,
"res_id": item.ResID, "res_id": item.ResID,
"res_name": item.ResName, "res_name": item.ResName,
"rescuecd": item.RescueCD, "rescuecd": item.RescueCD,
"provider_name": item.ProviderName,
"milestones": item.Milestones,
"size": item.Size,
"sep_id": item.SEPID, "sep_id": item.SEPID,
"link_to": item.LinkTo,
"unc_path": item.UNCPath,
"pool_name": item.Pool,
"hot_resize": item.HotResize,
"history": flattenHistory(item.History),
"last_modified": item.LastModified,
"meta": flattenMeta(item.Meta),
"desc": item.Description,
"shared_with": item.SharedWith, "shared_with": item.SharedWith,
"size": item.Size,
"status": item.Status,
"tech_status": item.TechStatus,
"image_type": item.Type,
"url": item.URL,
"username": item.Username,
"version": item.Version,
"virtual": item.Virtual,
} }
res = append(res, temp) res = append(res, temp)
} }
return res return res
} }
func flattenImageListStacks(_ *schema.ResourceData, stack *image.ListStacks) []map[string]interface{} { func flattenEco(m interface{}) string {
temp := make([]map[string]interface{}, 0) log.Debug("flattenEco")
output := ""
switch d := m.(type) {
case string:
output = d
case int:
output = strconv.Itoa(d)
case int64:
output = strconv.FormatInt(d, 10)
case float64:
output = strconv.FormatInt(int64(d), 10)
default:
}
return output
}
func flattenImageListStacks(stack *image.ListStacks) []map[string]interface{} {
log.Debug("flattenImageListStacks")
temp := make([]map[string]interface{}, 0, len(stack.Data))
for _, item := range stack.Data { for _, item := range stack.Data {
t := map[string]interface{}{ t := map[string]interface{}{
"api_url": item.APIURL, "ckey": item.CKey,
"api_key": item.APIKey, "meta": flattens.FlattenMeta(item.Meta),
"app_id": item.AppID, "api_url": item.APIURL,
"desc": item.Description, "api_key": item.APIKey,
"drivers": item.Drivers, "app_id": item.AppID,
"error": item.Error, "cpu_allocation_ratio": item.CPUAllocationRatio,
"guid": item.GUID, "desc": item.Description,
"id": item.ID, "descr": item.Descr,
"images": item.Images, "drivers": item.Drivers,
"login": item.Login, "eco": flattenEco(item.Eco),
"name": item.Name, "error": item.Error,
"passwd": item.Password, "gid": item.GID,
"reference_id": item.ReferenceID, "guid": item.GUID,
"status": item.Status, "id": item.ID,
"type": item.Type, "images": item.Images,
"login": item.Login,
"mem_allocation_ratio": item.MemAllocationRatio,
"name": item.Name,
"packages": flattenPackages(item.Packages),
"passwd": item.Password,
"reference_id": item.ReferenceID,
"status": item.Status,
"type": item.Type,
} }
temp = append(temp, t) temp = append(temp, t)
} }
return temp return temp
} }
func flattenPackages(pg image.Packages) []map[string]interface{} {
log.Debug("flattenPackages")
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"libvirt_bin": flattenLibvirtBin(pg),
"libvirt_daemon": flattenLibvirtDaemon(pg),
"lvm2_lockd": flattenLvm2Lockd(pg),
"openvswitch_common": flattenOpenvswitchCommon(pg),
"openvswitch_switch": flattenOpenvswitchSwitch(pg),
"qemu_system_x86": flattenQemuSystemX86(pg),
"sanlock": flattenSanlock(pg),
}
res = append(res, temp)
return res
}
func flattenLibvirtBin(lb image.Packages) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"installed_size": lb.LibvirtBin.InstalledSize,
"ver": lb.LibvirtBin.Ver,
}
res = append(res, temp)
return res
}
func flattenLibvirtDaemon(ld image.Packages) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"installed_size": ld.LibvirtDaemon.InstalledSize,
"ver": ld.LibvirtDaemon.Ver,
}
res = append(res, temp)
return res
}
func flattenLvm2Lockd(ll image.Packages) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"installed_size": ll.Lvm2Lockd.InstalledSize,
"ver": ll.Lvm2Lockd.Ver,
}
res = append(res, temp)
return res
}
func flattenOpenvswitchCommon(oc image.Packages) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"installed_size": oc.OpenvswitchCommon.InstalledSize,
"ver": oc.OpenvswitchCommon.Ver,
}
res = append(res, temp)
return res
}
func flattenOpenvswitchSwitch(os image.Packages) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"installed_size": os.OpenvswitchSwitch.InstalledSize,
"ver": os.OpenvswitchSwitch.Ver,
}
res = append(res, temp)
return res
}
func flattenQemuSystemX86(qs image.Packages) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"installed_size": qs.QemuSystemX86.InstalledSize,
"ver": qs.QemuSystemX86.Ver,
}
res = append(res, temp)
return res
}
func flattenSanlock(sl image.Packages) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"installed_size": sl.Sanlock.InstalledSize,
"ver": sl.Sanlock.Ver,
}
res = append(res, temp)
return res
}

@ -1,5 +1,5 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
@ -41,6 +41,8 @@ import (
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/ic"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
) )
func resourceCDROMImageCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceCDROMImageCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
@ -49,14 +51,18 @@ func resourceCDROMImageCreate(ctx context.Context, d *schema.ResourceData, m int
req := image.CreateCDROMImageRequest{ req := image.CreateCDROMImageRequest{
Name: d.Get("name").(string), Name: d.Get("name").(string),
URL: d.Get("url").(string), URL: d.Get("url").(string),
GID: uint64(d.Get("gid").(int)),
} }
if err := ic.ExistGID(ctx, uint64(d.Get("gid").(int)), c); err != nil {
return diag.FromErr(err)
}
req.GID = uint64(d.Get("gid").(int))
drivers := []string{} drivers := []string{}
for _, driver := range d.Get("drivers").([]interface{}) { for _, driver := range d.Get("drivers").([]interface{}) {
drivers = append(drivers, driver.(string)) drivers = append(drivers, driver.(string))
} }
req.Drivers = drivers req.Drivers = drivers
if username, ok := d.GetOk("username_dl"); ok { if username, ok := d.GetOk("username_dl"); ok {
@ -84,20 +90,29 @@ func resourceCDROMImageCreate(ctx context.Context, d *schema.ResourceData, m int
} }
d.SetId(strconv.FormatUint(imageId, 10)) d.SetId(strconv.FormatUint(imageId, 10))
d.Set("image_id", imageId)
image, err := utilityImageCheckPresence(ctx, d, m) return resourceImageRead(ctx, d, m)
}
func resourceCDROMImageRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceCDROMImageRead: called for %s id: %s", d.Get("name").(string), d.Id())
img, err := utilityImageCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
d.Set("bootable", image.Bootable) switch img.Status {
case status.Modeled:
diagnostics := resourceImageRead(ctx, d, m) return diag.Errorf("The image is in status: %s, please, contact support for more information", img.Status)
if diagnostics != nil { case status.Destroyed, status.Purged:
return diagnostics d.SetId("")
return diag.Errorf("The resource cannot be updated because it has been destroyed")
} }
flattenImage(d, img)
return nil return nil
} }
@ -106,6 +121,7 @@ func resourceCDROMImageDelete(ctx context.Context, d *schema.ResourceData, m int
imageData, err := utilityImageCheckPresence(ctx, d, m) imageData, err := utilityImageCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -128,232 +144,91 @@ func resourceCDROMImageDelete(ctx context.Context, d *schema.ResourceData, m int
return nil return nil
} }
func resourceCDROMImageSchemaMake() map[string]*schema.Schema { func resourceCDROMImageUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
return map[string]*schema.Schema{ log.Debugf("resourceCDROMImageEdit: called for %s, id: %s", d.Get("name").(string), d.Id())
"name": {
Type: schema.TypeString, img, err := utilityImageCheckPresence(ctx, d, m)
Required: true, if err != nil {
Description: "Name of the rescue disk", d.SetId("")
}, return diag.FromErr(err)
"url": { }
Type: schema.TypeString,
Required: true, switch img.Status {
Description: "URL where to download ISO from", case status.Modeled:
}, return diag.Errorf("The image is in status: %s, please, contact support for more information", img.Status)
"gid": { case status.Destroyed, status.Purged:
Type: schema.TypeInt, d.SetId("")
Required: true, return diag.Errorf("The resource cannot be updated because it has been destroyed")
Description: "grid (platform) ID where this template should be create in", }
},
"boot_type": { if d.HasChange("enabled") {
Type: schema.TypeString, err := resourceImageChangeEnabled(ctx, d, m)
Computed: true, if err != nil {
Description: "Boot type of image bios or uefi", return diag.FromErr(err)
}, }
"image_type": {
Type: schema.TypeString,
Computed: true,
Description: "Image type linux, windows or other",
},
"drivers": {
Type: schema.TypeList,
Required: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "List of types of compute suitable for image. Example: [ \"KVM_X86\" ]",
},
"meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "meta",
},
"hot_resize": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
Description: "Does this machine supports hot resize",
},
// "username": {
// Type: schema.TypeString,
// Optional: true,
// Computed: true,
// Description: "Optional username for the image",
// },
// "password": {
// Type: schema.TypeString,
// Optional: true,
// Computed: true,
// Description: "Optional password for the image",
// },
"account_id": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "AccountId to make the image exclusive",
},
"username_dl": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "username for upload binary media",
},
"password_dl": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "password for upload binary media",
},
"sep_id": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "storage endpoint provider ID",
},
"pool_name": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "pool for image create",
},
"architecture": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "binary architecture of this image, one of X86_64 of PPC64_LE",
},
"image_id": {
Type: schema.TypeInt,
Computed: true,
Description: "image id",
},
"permanently": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
Description: "Whether to completely delete the image",
},
"bootable": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
Description: "Does this image boot OS",
},
"unc_path": {
Type: schema.TypeString,
Computed: true,
Description: "unc path",
},
"link_to": {
Type: schema.TypeInt,
Computed: true,
Description: "",
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "status",
},
"tech_status": {
Type: schema.TypeString,
Computed: true,
Description: "tech atatus",
},
"version": {
Type: schema.TypeString,
Computed: true,
Description: "version",
},
"size": {
Type: schema.TypeInt,
Computed: true,
Description: "image size",
},
"enabled": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
},
"computeci_id": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
},
"provider_name": {
Type: schema.TypeString,
Computed: true,
},
"purge_attempts": {
Type: schema.TypeInt,
Computed: true,
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
},
"res_name": {
Type: schema.TypeString,
Computed: true,
},
"rescuecd": {
Type: schema.TypeBool,
Computed: true,
},
"desc": {
Type: schema.TypeString,
Computed: true,
},
"shared_with": {
Type: schema.TypeList,
Optional: true,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"enabled_stacks": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"history": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"guid": {
Type: schema.TypeString,
Computed: true,
},
"id": {
Type: schema.TypeInt,
Computed: true,
},
"timestamp": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
} }
if d.HasChange("shared_with") {
err := resourceImageShare(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("computeci_id") {
err := resourceImageChangeComputeci(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("enabled_stacks") {
err := resourceImageUpdateNodes(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChanges("name", "password_dl", "username_dl", "account_id", "bootable", "hot_resize") {
err := resourceImageCDROMEdit(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
return nil
}
func resourceImageCDROMEdit(ctx context.Context, d *schema.ResourceData, m interface{}) error {
log.Debugf("resourceImageEdit: called for %s, id: %s", d.Get("name").(string), d.Id())
c := m.(*controller.ControllerCfg)
req := image.EditRequest{}
req.ImageID = uint64(d.Get("image_id").(int))
if d.HasChange("name") {
req.Name = d.Get("name").(string)
}
if d.HasChange("username_dl") {
req.Username = d.Get("username_dl").(string)
}
if d.HasChange("password_dl") {
req.Password = d.Get("password_dl").(string)
}
if d.HasChange("account_id") {
req.AccountID = uint64(d.Get("account_id").(int))
}
if d.HasChange("bootable") {
req.Bootable = d.Get("bootable").(bool)
}
if d.HasChange("hot_resize") {
req.HotResize = d.Get("hot_resize").(bool)
}
_, err := c.CloudBroker().Image().Edit(ctx, req)
if err != nil {
return err
}
return nil
} }
func ResourceCDROMImage() *schema.Resource { func ResourceCDROMImage() *schema.Resource {
@ -361,8 +236,8 @@ func ResourceCDROMImage() *schema.Resource {
SchemaVersion: 1, SchemaVersion: 1,
CreateContext: resourceCDROMImageCreate, CreateContext: resourceCDROMImageCreate,
ReadContext: resourceImageRead, ReadContext: resourceCDROMImageRead,
UpdateContext: resourceImageUpdate, UpdateContext: resourceCDROMImageUpdate,
DeleteContext: resourceCDROMImageDelete, DeleteContext: resourceCDROMImageDelete,
Importer: &schema.ResourceImporter{ Importer: &schema.ResourceImporter{

@ -1,5 +1,5 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
@ -41,62 +41,38 @@ import (
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
) )
func resourceImageCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceImageCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceImageCreate: called for image %s", d.Get("name").(string)) log.Debugf("resourceImageCreate: called for image %s", d.Get("name").(string))
c := m.(*controller.ControllerCfg) c := m.(*controller.ControllerCfg)
req := image.CreateRequest{
Name: d.Get("name").(string),
URL: d.Get("url").(string),
GID: uint64(d.Get("gid").(int)),
BootType: d.Get("boot_type").(string),
ImageType: d.Get("image_type").(string),
}
drivers := []string{}
for _, driver := range d.Get("drivers").([]interface{}) {
drivers = append(drivers, driver.(string))
}
req.Drivers = drivers syncMode := d.Get("sync_mode").(bool)
var imageId uint64
if hotresize, ok := d.GetOk("hot_resize"); ok {
req.HotResize = hotresize.(bool)
}
if username, ok := d.GetOk("username"); ok {
req.Username = username.(string)
}
if password, ok := d.GetOk("password"); ok {
req.Password = password.(string)
}
if accountId, ok := d.GetOk("account_id"); ok {
req.AccountID = uint64(accountId.(int))
}
if usernameDL, ok := d.GetOk("username_dl"); ok {
req.UsernameDL = usernameDL.(string)
}
if passwordDL, ok := d.GetOk("password_dl"); ok {
req.PasswordDL = passwordDL.(string)
}
if sepId, ok := d.GetOk("sep_id"); ok {
req.SEPID = uint64(sepId.(int))
}
if poolName, ok := d.GetOk("pool_name"); ok {
req.PoolName = poolName.(string)
}
if architecture, ok := d.GetOk("architecture"); ok {
req.Architecture = architecture.(string)
}
imageId, err := c.CloudBroker().Image().CreateImage(ctx, req) if syncMode {
if err != nil { req, err := SyncCreateRequest(ctx, d, m)
return diag.FromErr(err) if err != nil {
return diag.FromErr(err)
}
imageId, err = c.CloudBroker().Image().SyncCreate(ctx, req)
if err != nil {
return diag.FromErr(err)
}
} else {
req, err := CreateRequest(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
imageId, err = c.CloudBroker().Image().CreateImage(ctx, req)
if err != nil {
return diag.FromErr(err)
}
} }
d.SetId(strconv.FormatUint(imageId, 10)) d.SetId(strconv.FormatUint(imageId, 10))
d.Set("image_id", imageId)
return resourceImageRead(ctx, d, m) return resourceImageRead(ctx, d, m)
} }
@ -104,13 +80,21 @@ func resourceImageCreate(ctx context.Context, d *schema.ResourceData, m interfac
func resourceImageRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceImageRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceImageRead: called for %s id: %s", d.Get("name").(string), d.Id()) log.Debugf("resourceImageRead: called for %s id: %s", d.Get("name").(string), d.Id())
image, err := utilityImageCheckPresence(ctx, d, m) img, err := utilityImageCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("") d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
flattenImage(d, image) switch img.Status {
case status.Modeled:
return diag.Errorf("The image is in status: %s, please, contact support for more information", img.Status)
case status.Destroyed, status.Purged:
d.SetId("")
return diag.Errorf("The resource cannot be updated because it has been destroyed")
}
flattenImage(d, img)
return nil return nil
} }
@ -120,6 +104,7 @@ func resourceImageDelete(ctx context.Context, d *schema.ResourceData, m interfac
_, err := utilityImageCheckPresence(ctx, d, m) _, err := utilityImageCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -145,25 +130,22 @@ func resourceImageDelete(ctx context.Context, d *schema.ResourceData, m interfac
return nil return nil
} }
func resourceImageEditName(ctx context.Context, d *schema.ResourceData, m interface{}) error { func resourceImageUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceImageEditName: called for %s, id: %s", d.Get("name").(string), d.Id()) log.Debugf("resourceImageEdit: called for %s, id: %s", d.Get("name").(string), d.Id())
c := m.(*controller.ControllerCfg)
req := image.RenameRequest{
ImageID: uint64(d.Get("image_id").(int)),
Name: d.Get("name").(string),
}
_, err := c.CloudBroker().Image().Rename(ctx, req) img, err := utilityImageCheckPresence(ctx, d, m)
if err != nil { if err != nil {
return err d.SetId("")
return diag.FromErr(err)
} }
return nil switch img.Status {
} case status.Modeled:
return diag.Errorf("The image is in status: %s, please, contact support for more information", img.Status)
func resourceImageUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { case status.Destroyed, status.Purged:
log.Debugf("resourceImageEdit: called for %s, id: %s", d.Get("name").(string), d.Id()) d.SetId("")
c := m.(*controller.ControllerCfg) return diag.Errorf("The resource cannot be updated because it has been destroyed")
}
if d.HasChange("enabled") { if d.HasChange("enabled") {
err := resourceImageChangeEnabled(ctx, d, m) err := resourceImageChangeEnabled(ctx, d, m)
@ -172,13 +154,6 @@ func resourceImageUpdate(ctx context.Context, d *schema.ResourceData, m interfac
} }
} }
if d.HasChange("name") {
err := resourceImageEditName(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("shared_with") { if d.HasChange("shared_with") {
err := resourceImageShare(ctx, d, m) err := resourceImageShare(ctx, d, m)
if err != nil { if err != nil {
@ -200,26 +175,8 @@ func resourceImageUpdate(ctx context.Context, d *schema.ResourceData, m interfac
} }
} }
if d.HasChange("link_to") {
err := resourceImageLink(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChanges("name", "username", "password", "account_id", "bootable", "hot_resize") { if d.HasChanges("name", "username", "password", "account_id", "bootable", "hot_resize") {
req := image.EditRequest{} err := resourceImageEdit(ctx, d, m)
req.ImageID = uint64(d.Get("image_id").(int))
req.Name = d.Get("name").(string)
req.Username = d.Get("username").(string)
req.Password = d.Get("password").(string)
req.AccountID = uint64(d.Get("account_id").(int))
req.Bootable = d.Get("bootable").(bool)
req.HotResize = d.Get("hot_resize").(bool)
_, err := c.CloudBroker().Image().Edit(ctx, req)
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -255,22 +212,6 @@ func resourceImageChangeEnabled(ctx context.Context, d *schema.ResourceData, m i
return nil return nil
} }
func resourceImageLink(ctx context.Context, d *schema.ResourceData, m interface{}) error {
log.Debugf("resourceVirtualImageLink: called for %s, id: %s", d.Get("name").(string), d.Id())
c := m.(*controller.ControllerCfg)
req := image.LinkRequest{
ImageID: uint64(d.Get("image_id").(int)),
TargetID: uint64(d.Get("link_to").(int)),
}
_, err := c.CloudBroker().Image().Link(ctx, req)
if err != nil {
return err
}
return nil
}
func resourceImageShare(ctx context.Context, d *schema.ResourceData, m interface{}) error { func resourceImageShare(ctx context.Context, d *schema.ResourceData, m interface{}) error {
log.Debugf("resourceImageShare: called for %s, id: %s", d.Get("name").(string), d.Id()) log.Debugf("resourceImageShare: called for %s, id: %s", d.Get("name").(string), d.Id())
c := m.(*controller.ControllerCfg) c := m.(*controller.ControllerCfg)
@ -342,247 +283,36 @@ func resourceImageUpdateNodes(ctx context.Context, d *schema.ResourceData, m int
return nil return nil
} }
func resourceImageSchemaMake() map[string]*schema.Schema { func resourceImageEdit(ctx context.Context, d *schema.ResourceData, m interface{}) error {
return map[string]*schema.Schema{ log.Debugf("resourceImageEdit: called for %s, id: %s", d.Get("name").(string), d.Id())
"name": { c := m.(*controller.ControllerCfg)
Type: schema.TypeString, req := image.EditRequest{}
Required: true,
Description: "Name of the rescue disk", req.ImageID = uint64(d.Get("image_id").(int))
}, if d.HasChange("name") {
"url": { req.Name = d.Get("name").(string)
Type: schema.TypeString,
Required: true,
Description: "URL where to download media from",
},
"gid": {
Type: schema.TypeInt,
Required: true,
Description: "grid (platform) ID where this template should be create in",
},
"boot_type": {
Type: schema.TypeString,
Required: true,
Description: "Boot type of image bios or uefi",
},
"image_type": {
Type: schema.TypeString,
Required: true,
Description: "Image type linux, windows or other",
},
"drivers": {
Type: schema.TypeList,
Required: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "List of types of compute suitable for image. Example: [ \"KVM_X86\" ]",
},
"meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "meta",
},
"hot_resize": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
Description: "Does this machine supports hot resize",
},
"username": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "Optional username for the image",
},
"password": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "Optional password for the image",
},
"account_id": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "AccountId to make the image exclusive",
},
"username_dl": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "username for upload binary media",
},
"password_dl": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "password for upload binary media",
},
"sep_id": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "storage endpoint provider ID",
},
"pool_name": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "pool for image create",
},
"architecture": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "binary architecture of this image, one of X86_64 of PPC64_LE",
},
"image_id": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "image id",
},
"permanently": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
Description: "Whether to completely delete the image",
},
"bootable": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
Description: "Does this image boot OS",
},
"unc_path": {
Type: schema.TypeString,
Computed: true,
Description: "unc path",
},
"link_to": {
Type: schema.TypeInt,
Computed: true,
Description: "",
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "status",
},
"tech_status": {
Type: schema.TypeString,
Computed: true,
Description: "tech atatus",
},
"version": {
Type: schema.TypeString,
Computed: true,
Description: "version",
},
"size": {
Type: schema.TypeInt,
Computed: true,
Description: "image size",
},
"enabled": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
},
"computeci_id": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
},
"provider_name": {
Type: schema.TypeString,
Computed: true,
},
"purge_attempts": {
Type: schema.TypeInt,
Computed: true,
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
},
"res_name": {
Type: schema.TypeString,
Computed: true,
},
"rescuecd": {
Type: schema.TypeBool,
Computed: true,
},
"reason": {
Type: schema.TypeString,
Optional: true,
},
"last_modified": {
Type: schema.TypeInt,
Computed: true,
},
"desc": {
Type: schema.TypeString,
Computed: true,
},
"shared_with": {
Type: schema.TypeList,
Optional: true,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"sync": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "Create image from a media identified by URL (in synchronous mode)",
},
"enabled_stacks": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"history": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"guid": {
Type: schema.TypeString,
Computed: true,
},
"id": {
Type: schema.TypeInt,
Computed: true,
},
"timestamp": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
} }
if d.HasChange("username") {
req.Username = d.Get("username").(string)
}
if d.HasChange("password") {
req.Password = d.Get("password").(string)
}
if d.HasChange("account_id") {
req.AccountID = uint64(d.Get("account_id").(int))
}
if d.HasChange("bootable") {
req.Bootable = d.Get("bootable").(bool)
}
if d.HasChange("hot_resize") {
req.HotResize = d.Get("hot_resize").(bool)
}
_, err := c.CloudBroker().Image().Edit(ctx, req)
if err != nil {
return err
}
return nil
} }
func ResourceImage() *schema.Resource { func ResourceImage() *schema.Resource {

@ -41,6 +41,7 @@ import (
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
) )
func resourceVirtualImageCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceVirtualImageCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
@ -49,7 +50,7 @@ func resourceVirtualImageCreate(ctx context.Context, d *schema.ResourceData, m i
c := m.(*controller.ControllerCfg) c := m.(*controller.ControllerCfg)
req := image.CreateVirtualRequest{ req := image.CreateVirtualRequest{
Name: d.Get("name").(string), Name: d.Get("name").(string),
TargetID: uint64(d.Get("target_id").(int)), TargetID: uint64(d.Get("link_to").(int)),
} }
imageId, err := c.CloudBroker().Image().CreateVirtual(ctx, req) imageId, err := c.CloudBroker().Image().CreateVirtual(ctx, req)
@ -58,251 +59,139 @@ func resourceVirtualImageCreate(ctx context.Context, d *schema.ResourceData, m i
} }
d.SetId(strconv.FormatUint(imageId, 10)) d.SetId(strconv.FormatUint(imageId, 10))
d.Set("image_id", imageId)
return resourceImageRead(ctx, d, m) return resourceImageRead(ctx, d, m)
} }
func resourceVirtualImageSchemaMake() map[string]*schema.Schema { func resourceVirtualImageRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
return map[string]*schema.Schema{ log.Debugf("resourceVirtualImageRead: called for %s id: %s", d.Get("name").(string), d.Id())
"name": {
Type: schema.TypeString, img, err := utilityImageCheckPresence(ctx, d, m)
Required: true, if err != nil {
Description: "name of the virtual image to create", d.SetId("")
}, return diag.FromErr(err)
"target_id": { }
Type: schema.TypeInt,
Required: true, switch img.Status {
Description: "ID of real image to link this virtual image to upon creation", case status.Modeled:
}, return diag.Errorf("The image is in status: %s, please, contact support for more information", img.Status)
"history": { case status.Destroyed, status.Purged:
Type: schema.TypeList, d.SetId("")
Computed: true, return diag.Errorf("The resource cannot be updated because it has been destroyed")
Elem: &schema.Resource{ }
Schema: map[string]*schema.Schema{
"guid": { flattenImage(d, img)
Type: schema.TypeString,
Computed: true, return nil
}, }
"id": {
Type: schema.TypeInt, func resourceVirtualImageDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
Computed: true, log.Debugf("resourceVirtualImageDelete: called for %s, id: %s", d.Get("name").(string), d.Id())
},
"timestamp": { _, err := utilityImageCheckPresence(ctx, d, m)
Type: schema.TypeInt, if err != nil {
Computed: true, d.SetId("")
}, return diag.FromErr(err)
}, }
},
}, c := m.(*controller.ControllerCfg)
"url": { req := image.DeleteRequest{
Type: schema.TypeString, ImageID: uint64(d.Get("image_id").(int)),
Computed: true, }
Description: "URL where to download media from",
}, if reason, ok := d.GetOk("reason"); ok {
"gid": { req.Reason = reason.(string)
Type: schema.TypeInt, }
Computed: true, if permanently, ok := d.GetOk("permanently"); ok {
Description: "grid (platform) ID where this template should be create in", req.Permanently = permanently.(bool)
}, }
"boot_type": {
Type: schema.TypeString, _, err = c.CloudBroker().Image().Delete(ctx, req)
Computed: true, if err != nil {
Description: "Boot type of image bios or uefi", return diag.FromErr(err)
}, }
"image_type": {
Type: schema.TypeString, d.SetId("")
Computed: true,
Description: "Image type linux, windows or other", return nil
}, }
"drivers": {
Type: schema.TypeList, func resourceVirtualImageUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
Computed: true, log.Debugf("resourceVirtualImageEdit: called for %s, id: %s", d.Get("name").(string), d.Id())
Elem: &schema.Schema{
Type: schema.TypeString, img, err := utilityImageCheckPresence(ctx, d, m)
}, if err != nil {
Description: "List of types of compute suitable for image. Example: [ \"KVM_X86\" ]", d.SetId("")
}, return diag.FromErr(err)
"meta": { }
Type: schema.TypeList,
Computed: true, switch img.Status {
Elem: &schema.Schema{ case status.Modeled:
Type: schema.TypeString, return diag.Errorf("The image is in status: %s, please, contact support for more information", img.Status)
}, case status.Destroyed, status.Purged:
Description: "meta", d.SetId("")
}, return diag.Errorf("The resource cannot be updated because it has been destroyed")
"hot_resize": { }
Type: schema.TypeBool,
Optional: true, if d.HasChange("enabled") {
Computed: true, err := resourceImageChangeEnabled(ctx, d, m)
Description: "Does this machine supports hot resize", if err != nil {
}, return diag.FromErr(err)
"username": { }
Type: schema.TypeString, }
Optional: true,
Computed: true, if d.HasChange("shared_with") {
Description: "Optional username for the image", err := resourceImageShare(ctx, d, m)
}, if err != nil {
"password": { return diag.FromErr(err)
Type: schema.TypeString, }
Optional: true, }
Computed: true,
Description: "Optional password for the image", if d.HasChange("computeci_id") {
}, err := resourceImageChangeComputeci(ctx, d, m)
"account_id": { if err != nil {
Type: schema.TypeInt, return diag.FromErr(err)
Optional: true, }
Computed: true, }
Description: "AccountId to make the image exclusive",
}, if d.HasChange("enabled_stacks") {
"username_dl": { err := resourceImageUpdateNodes(ctx, d, m)
Type: schema.TypeString, if err != nil {
Optional: true, return diag.FromErr(err)
Computed: true, }
Description: "username for upload binary media", }
},
"password_dl": { if d.HasChanges("name", "username", "password", "account_id", "bootable", "hot_resize") {
Type: schema.TypeString, err := resourceImageEdit(ctx, d, m)
Optional: true, if err != nil {
Computed: true, return diag.FromErr(err)
Description: "password for upload binary media", }
}, }
"sep_id": {
Type: schema.TypeInt, if d.HasChange("link_to") {
Optional: true, err := resourceImageLink(ctx, d, m)
Computed: true, if err != nil {
Description: "storage endpoint provider ID", return diag.FromErr(err)
}, }
"pool_name": { }
Type: schema.TypeString,
Optional: true, return nil
Computed: true, }
Description: "pool for image create",
}, func resourceImageLink(ctx context.Context, d *schema.ResourceData, m interface{}) error {
"architecture": { log.Debugf("resourceVirtualImageLink: called for %s, id: %s", d.Get("name").(string), d.Id())
Type: schema.TypeString, c := m.(*controller.ControllerCfg)
Optional: true, req := image.LinkRequest{
Computed: true, ImageID: uint64(d.Get("image_id").(int)),
Description: "binary architecture of this image, one of X86_64 of PPC64_LE", TargetID: uint64(d.Get("link_to").(int)),
}, }
"image_id": {
Type: schema.TypeInt, _, err := c.CloudBroker().Image().Link(ctx, req)
Computed: true, if err != nil {
Description: "image id", return err
},
"permanently": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
Description: "Whether to completely delete the image",
},
"bootable": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
Description: "Does this image boot OS",
},
"unc_path": {
Type: schema.TypeString,
Computed: true,
Description: "unc path",
},
"link_to": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "",
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "status",
},
"tech_status": {
Type: schema.TypeString,
Computed: true,
Description: "tech atatus",
},
"version": {
Type: schema.TypeString,
Computed: true,
Description: "version",
},
"size": {
Type: schema.TypeInt,
Computed: true,
Description: "image size",
},
"enabled": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
},
"computeci_id": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
},
"provider_name": {
Type: schema.TypeString,
Computed: true,
},
"purge_attempts": {
Type: schema.TypeInt,
Computed: true,
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
},
"res_name": {
Type: schema.TypeString,
Computed: true,
},
"rescuecd": {
Type: schema.TypeBool,
Computed: true,
},
"reason": {
Type: schema.TypeString,
Optional: true,
},
"last_modified": {
Type: schema.TypeInt,
Computed: true,
},
"desc": {
Type: schema.TypeString,
Computed: true,
},
"enabled_stacks": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"shared_with": {
Type: schema.TypeList,
Optional: true,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
} }
return nil
} }
func ResourceVirtualImage() *schema.Resource { func ResourceVirtualImage() *schema.Resource {
@ -310,9 +199,9 @@ func ResourceVirtualImage() *schema.Resource {
SchemaVersion: 1, SchemaVersion: 1,
CreateContext: resourceVirtualImageCreate, CreateContext: resourceVirtualImageCreate,
ReadContext: resourceImageRead, ReadContext: resourceVirtualImageRead,
UpdateContext: resourceImageUpdate, UpdateContext: resourceVirtualImageUpdate,
DeleteContext: resourceImageDelete, DeleteContext: resourceVirtualImageDelete,
Importer: &schema.ResourceImporter{ Importer: &schema.ResourceImporter{
StateContext: schema.ImportStatePassthroughContext, StateContext: schema.ImportStatePassthroughContext,

@ -44,11 +44,11 @@ func utilityImageCheckPresence(ctx context.Context, d *schema.ResourceData, m in
c := m.(*controller.ControllerCfg) c := m.(*controller.ControllerCfg)
req := image.GetRequest{} req := image.GetRequest{}
if (strconv.Itoa(d.Get("image_id").(int))) != "0" { if d.Id() != "" {
req.ImageID = uint64(d.Get("image_id").(int))
} else {
id, _ := strconv.ParseUint(d.Id(), 10, 64) id, _ := strconv.ParseUint(d.Id(), 10, 64)
req.ImageID = id req.ImageID = id
} else {
req.ImageID = uint64(d.Get("image_id").(int))
} }
image, err := c.CloudBroker().Image().Get(ctx, req) image, err := c.CloudBroker().Image().Get(ctx, req)

@ -1,5 +1,5 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
@ -48,7 +48,39 @@ func utilityImageListCheckPresence(ctx context.Context, d *schema.ResourceData,
if sepId, ok := d.GetOk("sep_id"); ok { if sepId, ok := d.GetOk("sep_id"); ok {
req.SepID = uint64(sepId.(int)) req.SepID = uint64(sepId.(int))
} }
if byId, ok := d.GetOk("by_id"); ok {
req.ByID = uint64(byId.(int))
}
if name, ok := d.GetOk("name"); ok {
req.Name = name.(string)
}
if status, ok := d.GetOk("status"); ok {
req.Status = status.(string)
}
if architecture, ok := d.GetOk("architecture"); ok {
req.Architecture = architecture.(string)
}
if typeImage, ok := d.GetOk("type_image"); ok {
req.TypeImage = typeImage.(string)
}
if imageSize, ok := d.GetOk("image_size"); ok {
req.ImageSize = uint64(imageSize.(int))
}
if sepName, ok := d.GetOk("sep_name"); ok {
req.SEPName = sepName.(string)
}
if pool, ok := d.GetOk("pool"); ok {
req.Pool = pool.(string)
}
if public, ok := d.GetOk("public"); ok {
req.Public = public.(bool)
}
if hotResize, ok := d.GetOk("hot_resize"); ok {
req.HotResize = hotResize.(bool)
}
if bootable, ok := d.GetOk("bootable"); ok {
req.Bootable = bootable.(bool)
}
if page, ok := d.GetOk("page"); ok { if page, ok := d.GetOk("page"); ok {
req.Page = uint64(page.(int)) req.Page = uint64(page.(int))
} }

@ -1,5 +1,5 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
@ -47,6 +47,22 @@ func utilityImageListStacksCheckPresence(ctx context.Context, d *schema.Resource
ImageID: uint64(d.Get("image_id").(int)), ImageID: uint64(d.Get("image_id").(int)),
} }
if page, ok := d.GetOk("page"); ok {
req.Page = uint64(page.(int))
}
if size, ok := d.GetOk("size"); ok {
req.Size = uint64(size.(int))
}
if name, ok := d.GetOk("name"); ok {
req.Name = name.(string)
}
if status, ok := d.GetOk("status"); ok {
req.Status = status.(string)
}
if typeImage, ok := d.GetOk("type_image"); ok {
req.Type = typeImage.(string)
}
log.Debugf("utilityImageListStacksCheckPresence: load image list") log.Debugf("utilityImageListStacksCheckPresence: load image list")
imageListStacks, err := c.CloudBroker().Image().ListStacks(ctx, req) imageListStacks, err := c.CloudBroker().Image().ListStacks(ctx, req)
if err != nil { if err != nil {

@ -33,26 +33,65 @@ package k8s
import ( import (
"context" "context"
"fmt"
"strconv"
"strings"
"time"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/k8s" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/k8s"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/tasks"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/ic"
) )
func resourceK8sWgCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceK8sWgCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceK8sWgCreate: called with k8s id %d", d.Get("k8s_id").(int)) log.Debugf("resourceK8sWgCreate: called with k8s id %d", d.Get("k8s_id").(int))
c := m.(*controller.ControllerCfg) c := m.(*controller.ControllerCfg)
if err := ic.ExistK8s(ctx, uint64(d.Get("k8s_id").(int)), c); err != nil {
return diag.FromErr(err)
}
req := k8s.WorkersGroupAddRequest{ req := k8s.WorkersGroupAddRequest{
K8SID: uint64(d.Get("k8s_id").(int)), K8SID: uint64(d.Get("k8s_id").(int)),
Name: d.Get("name").(string), Name: d.Get("name").(string),
WorkerNum: uint64(d.Get("num").(int)), WorkerNum: uint64(d.Get("num").(int)),
WorkerCPU: uint64(d.Get("cpu").(int)), WorkerCPU: uint64(d.Get("cpu").(int)),
WorkerRAM: uint64(d.Get("ram").(int)), WorkerRAM: uint64(d.Get("ram").(int)),
WorkerDisk: uint64(d.Get("disk").(int)), WorkerSEPID: uint64(d.Get("worker_sep_id").(int)),
WorkerSEPPool: d.Get("worker_sep_pool").(string),
}
if d.Get("disk") == nil {
req.WorkerDisk = 0
} else {
req.WorkerDisk = uint64(d.Get("disk").(int))
}
labels, _ := d.Get("labels").([]interface{})
for _, label := range labels {
if !strings.HasPrefix(label.(string), "workersGroupName") {
req.Labels = append(req.Labels, label.(string))
}
}
annotations, _ := d.Get("annotations").([]interface{})
for _, annotation := range annotations {
req.Annotations = append(req.Annotations, annotation.(string))
}
taints, _ := d.Get("taints").([]interface{})
for _, taint := range taints {
req.Taints = append(req.Taints, taint.(string))
}
if cloudInit, ok := d.GetOk("cloud_init"); ok {
req.UserData = cloudInit.(string)
} }
resp, err := c.CloudBroker().K8S().WorkersGroupAdd(ctx, req) resp, err := c.CloudBroker().K8S().WorkersGroupAdd(ctx, req)
@ -60,9 +99,31 @@ func resourceK8sWgCreate(ctx context.Context, d *schema.ResourceData, m interfac
return diag.FromErr(err) return diag.FromErr(err)
} }
d.SetId(resp) taskReq := tasks.GetRequest{
AuditID: strings.Trim(resp, `"`),
}
return nil for {
task, err := c.CloudBroker().Tasks().Get(ctx, taskReq)
if err != nil {
return diag.FromErr(err)
}
log.Debugf("resourceK8sWgCreate: instance creating - %s", task.Stage)
if task.Completed {
if task.Error != "" {
return diag.FromErr(fmt.Errorf("cannot create k8sWg instance: %v", task.Error))
}
d.SetId(strconv.Itoa(int(task.Result)))
break
}
time.Sleep(time.Second * 20)
}
return resourceK8sWgRead(ctx, d, m)
} }
func resourceK8sWgRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceK8sWgRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
@ -74,11 +135,30 @@ func resourceK8sWgRead(ctx context.Context, d *schema.ResourceData, m interface{
return diag.FromErr(err) return diag.FromErr(err)
} }
d.Set("name", wg.Name) workersComputeList := make([]compute.RecordCompute, 0)
d.Set("num", wg.Num) for _, info := range wg.DetailedInfo {
d.Set("cpu", wg.CPU) compute, err := utilityComputeCheckPresence(ctx, d, m, info.ID)
d.Set("ram", wg.RAM) if err != nil {
d.Set("disk", wg.Disk) return diag.FromErr(err)
}
workersComputeList = append(workersComputeList, *compute)
}
d.Set("wg_id", wg.ID)
if strings.Contains(d.Id(), "#") {
k8sId, err := strconv.Atoi(strings.Split(d.Id(), "#")[1])
if err != nil {
return diag.FromErr(err)
}
d.Set("k8s_id", k8sId)
} else {
d.Set("k8s_id", d.Get("k8s_id"))
}
d.SetId(strings.Split(d.Id(), "#")[0])
flattenWg(d, wg, workersComputeList)
return nil return nil
} }
@ -88,35 +168,56 @@ func resourceK8sWgUpdate(ctx context.Context, d *schema.ResourceData, m interfac
c := m.(*controller.ControllerCfg) c := m.(*controller.ControllerCfg)
if err := ic.ExistK8s(ctx, uint64(d.Get("k8s_id").(int)), c); err != nil {
return diag.FromErr(err)
}
wg, err := utilityK8sWgCheckPresence(ctx, d, m) wg, err := utilityK8sWgCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
wgId, _ := strconv.ParseUint(d.Id(), 10, 64)
if d.HasChange("num") {
if newNum := d.Get("num").(int); uint64(newNum) > wg.Num {
req := k8s.WorkerAddRequest{
K8SID: uint64(d.Get("k8s_id").(int)),
WorkersGroupID: wgId,
Num: uint64(newNum) - wg.Num,
}
addReq := k8s.WorkerAddRequest{ _, err := c.CloudBroker().K8S().WorkerAdd(ctx, req)
K8SID: uint64(d.Get("k8s_id").(int)), if err != nil {
WorkersGroupID: wg.ID, return diag.FromErr(err)
}
} else {
for i := int(wg.Num) - 1; i >= newNum; i-- {
req := k8s.DeleteWorkerFromGroupRequest{
K8SID: uint64(d.Get("k8s_id").(int)),
WorkersGroupID: wgId,
WorkerID: wg.DetailedInfo[i].ID,
}
_, err := c.CloudBroker().K8S().DeleteWorkerFromGroup(ctx, req)
if err != nil {
return diag.FromErr(err)
}
}
}
} }
delReq := k8s.DeleteWorkerFromGroupRequest{ if d.HasChange("cloud_init") {
K8SID: uint64(d.Get("k8s_id").(int)), req := k8s.UpdateWorkerNodesMetaDataRequest{
WorkersGroupID: wg.ID, K8SID: uint64(d.Get("k8s_id").(int)),
} WorkersGroupID: wgId,
UserData: d.Get("cloud_init").(string),
}
if newNum := uint64(d.Get("num").(int)); newNum > wg.Num { _, err := c.CloudBroker().K8S().UpdateWorkerNodesMetaData(ctx, req)
addReq.Num = newNum - wg.Num
_, err := c.CloudBroker().K8S().WorkerAdd(ctx, addReq)
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
} else {
for i := wg.Num - 1; i >= newNum; i-- {
delReq.WorkerID = wg.DetailedInfo[i].ID
_, err := c.CloudBroker().K8S().DeleteWorkerFromGroup(ctx, delReq)
if err != nil {
return diag.FromErr(err)
}
}
} }
return nil return nil
@ -146,55 +247,6 @@ func resourceK8sWgDelete(ctx context.Context, d *schema.ResourceData, m interfac
return nil return nil
} }
func resourceK8sWgSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"k8s_id": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
Description: "ID of k8s instance.",
},
"name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
Description: "Name of the worker group.",
},
"num": {
Type: schema.TypeInt,
Optional: true,
Default: 1,
Description: "Number of worker nodes to create.",
},
"cpu": {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
Default: 1,
Description: "Worker node CPU count.",
},
"ram": {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
Default: 1024,
Description: "Worker node RAM in MB.",
},
"disk": {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
Default: 0,
Description: "Worker node boot disk size. If unspecified or 0, size is defined by OS image size.",
},
}
}
func ResourceK8sWg() *schema.Resource { func ResourceK8sWg() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,

@ -1,5 +1,5 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
@ -36,6 +36,7 @@ import (
"strconv" "strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/k8s" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/k8s"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
) )
@ -43,9 +44,13 @@ import (
func utilityK8sCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*k8s.RecordK8S, error) { func utilityK8sCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*k8s.RecordK8S, error) {
c := m.(*controller.ControllerCfg) c := m.(*controller.ControllerCfg)
req := k8s.GetRequest{} req := k8s.GetRequest{}
k8sId, _ := strconv.ParseUint(d.Id(), 10, 64)
req.K8SID = k8sId if d.Id() != "" {
k8sId, _ := strconv.ParseUint(d.Id(), 10, 64)
req.K8SID = k8sId
} else {
req.K8SID = uint64(d.Get("k8s_id").(int))
}
k8s, err := c.CloudBroker().K8S().Get(ctx, req) k8s, err := c.CloudBroker().K8S().Get(ctx, req)
if err != nil { if err != nil {
@ -54,3 +59,17 @@ func utilityK8sCheckPresence(ctx context.Context, d *schema.ResourceData, m inte
return k8s, nil return k8s, nil
} }
func utilityComputeCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}, computeID uint64) (*compute.RecordCompute, error) {
c := m.(*controller.ControllerCfg)
req := compute.GetRequest{
ComputeID: computeID,
}
compute, err := c.CloudBroker().Compute().Get(ctx, req)
if err != nil {
return nil, err
}
return compute, nil
}

@ -1,5 +1,5 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
@ -33,31 +33,136 @@ package k8s
import ( import (
"context" "context"
"fmt"
"strconv" "strconv"
"strings"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/k8s" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/k8s"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
) )
func utilityK8sWgCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*k8s.RecordK8SGroup, error) { func utilityK8sWgCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*k8s.RecordK8SGroup, error) {
c := m.(*controller.ControllerCfg) c := m.(*controller.ControllerCfg)
var wgId int
var k8sId int
var err error
if strings.Contains(d.Id(), "#") {
wgId, err = strconv.Atoi(strings.Split(d.Id(), "#")[0])
if err != nil {
return nil, err
}
k8sId, err = strconv.Atoi(strings.Split(d.Id(), "#")[1])
if err != nil {
return nil, err
}
} else {
wgId, err = strconv.Atoi(d.Id())
if err != nil {
return nil, err
}
k8sId = d.Get("k8s_id").(int)
}
req := k8s.GetRequest{ req := k8s.GetRequest{
K8SID: uint64(d.Get("k8s_id").(int)), K8SID: uint64(k8sId),
} }
k8sData, err := c.CloudBroker().K8S().Get(ctx, req) cluster, err := c.CloudBroker().K8S().Get(ctx, req)
if err != nil { if err != nil {
return nil, err return nil, err
} }
k8sWgID, _ := strconv.ParseUint(d.Id(), 10, 64) for _, wg := range cluster.K8SGroups.Workers {
if wg.ID == uint64(wgId) {
for _, wg := range k8sData.K8SGroups.Workers {
if wg.ID == k8sWgID {
return &wg, nil return &wg, nil
} }
} }
return nil, nil return nil, fmt.Errorf("not found wg with id: %v in k8s cluster: %v", wgId, cluster.ID)
}
func utilityDataK8sWgCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*k8s.RecordK8SGroup, []compute.RecordCompute, error) {
c := m.(*controller.ControllerCfg)
k8sId := uint64(d.Get("k8s_id").(int))
wgId := uint64(d.Get("wg_id").(int))
k8sGetReq := k8s.GetRequest{
K8SID: k8sId,
}
cluster, err := c.CloudBroker().K8S().Get(ctx, k8sGetReq)
if err != nil {
return nil, nil, err
}
curWg := k8s.RecordK8SGroup{}
for _, wg := range cluster.K8SGroups.Workers {
if wg.ID == wgId {
curWg = wg
break
}
}
if curWg.ID == 0 {
return nil, nil, fmt.Errorf("WG with id %v in k8s cluster %v not found", wgId, k8sId)
}
workersComputeList := make([]compute.RecordCompute, 0)
for _, info := range curWg.DetailedInfo {
compute, err := utilityComputeCheckPresence(ctx, d, m, info.ID)
if err != nil {
return nil, nil, err
}
workersComputeList = append(workersComputeList, *compute)
}
return &curWg, workersComputeList, nil
}
func utilityK8sWgListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*k8s.ListK8SGroup, error) {
c := m.(*controller.ControllerCfg)
req := k8s.GetRequest{}
if d.Id() != "" {
k8sId, _ := strconv.ParseUint(d.Id(), 10, 64)
req.K8SID = k8sId
} else {
req.K8SID = uint64(d.Get("k8s_id").(int))
}
cluster, err := c.CloudBroker().K8S().Get(ctx, req)
if err != nil {
return nil, err
}
return &cluster.K8SGroups.Workers, nil
}
func utilityK8sWgCloudInitCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (string, error) {
c := m.(*controller.ControllerCfg)
req := k8s.GetWorkerNodesMetaDataRequest{
K8SID: uint64(d.Get("k8s_id").(int)),
WorkersGroupID: uint64(d.Get("wg_id").(int)),
}
cloudInit, err := c.CloudBroker().K8S().GetWorkerNodesMetaData(ctx, req)
if err != nil {
return "", err
}
return cloudInit, nil
}
func cloudInitDiffSupperss(key, oldVal, newVal string, d *schema.ResourceData) bool {
if newVal != "" && newVal != oldVal {
log.Debugf("networkSubresIPAddreDiffSupperss: key=%s, oldVal=%q, newVal=%q -> suppress=FALSE", key, oldVal, newVal)
return false
}
log.Debugf("networkSubresIPAddreDiffSupperss: key=%s, oldVal=%q, newVal=%q -> suppress=TRUE", key, oldVal, newVal)
return true // suppress difference
} }

@ -1,5 +1,5 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
@ -33,12 +33,8 @@ package kvmvm
import ( import (
"context" "context"
"fmt"
// "net/url" // "net/url"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
@ -46,121 +42,6 @@ import (
// "github.com/hashicorp/terraform-plugin-sdk/helper/validation" // "github.com/hashicorp/terraform-plugin-sdk/helper/validation"
) )
// Parse list of all disks from API compute/get into a list of "extra disks" attached to this compute
// Extra disks are all compute disks but a boot disk.
func parseComputeDisksToExtraDisks(disks compute.ListDisks) []interface{} {
// this return value will be used to d.Set("extra_disks",) item of dataSourceCompute schema,
// which is a simple list of integer disk IDs excluding boot disk ID
length := len(disks)
log.Debugf("parseComputeDisksToExtraDisks: called for %d disks", length)
if length == 0 || (length == 1 && disks[0].Type == "B") {
// the disk list is empty (which is kind of strange - diskless compute?), or
// there is only one disk in the list and it is a boot disk;
// as we skip boot disks, the result will be of 0 length anyway
return make([]interface{}, 0)
}
result := make([]interface{}, length-1)
idx := 0
for _, value := range disks {
if value.Type == "B" {
// skip boot disk when iterating over the list of disks
continue
}
result[idx] = value.ID
idx++
}
return result
}
// Parse the list of interfaces from compute/get response into a list of networks
// attached to this compute
func parseComputeInterfacesToNetworks(ifaces compute.ListInterfaces) []interface{} {
// return value will be used to d.Set("network") item of dataSourceCompute schema
length := len(ifaces)
log.Debugf("parseComputeInterfacesToNetworks: called for %d ifaces", length)
result := []interface{}{}
for _, value := range ifaces {
elem := make(map[string]interface{})
// Keys in this map should correspond to the Schema definition
// as returned by networkSubresourceSchemaMake()
elem["net_id"] = value.NetID
elem["net_type"] = value.NetType
elem["ip_address"] = value.IPAddress
elem["mac"] = value.MAC
// log.Debugf(" element %d: net_id=%d, net_type=%s", i, value.NetID, value.NetType)
result = append(result, elem)
}
return result
}
func flattenDataCompute(d *schema.ResourceData, compFacts *compute.RecordCompute) error {
// This function expects that compFacts string contains response from API compute/get,
// i.e. detailed information about compute instance.
//
// NOTE: this function modifies ResourceData argument - as such it should never be called
// from resourceComputeExists(...) method
log.Debugf("flattenCompute: ID %d, RG ID %d", compFacts.ID, compFacts.RGID)
d.SetId(fmt.Sprintf("%d", compFacts.ID))
d.Set("name", compFacts.Name)
d.Set("rg_id", compFacts.RGID)
d.Set("rg_name", compFacts.RGName)
d.Set("account_id", compFacts.AccountID)
d.Set("account_name", compFacts.AccountName)
d.Set("driver", compFacts.Driver)
d.Set("cpu", compFacts.CPUs)
d.Set("ram", compFacts.RAM)
d.Set("image_id", compFacts.ImageID)
d.Set("description", compFacts.Description)
d.Set("cloud_init", "applied")
if compFacts.TechStatus == "STARTED" {
d.Set("started", true)
} else {
d.Set("started", false)
}
bootDisk := findBootDisk(compFacts.Disks)
d.Set("boot_disk_size", bootDisk.SizeMax)
d.Set("boot_disk_id", bootDisk.ID) // we may need boot disk ID in resize operations
d.Set("sep_id", bootDisk.SEPID)
d.Set("pool", bootDisk.Pool)
if len(compFacts.Disks) > 0 {
log.Debugf("flattenCompute: calling parseComputeDisksToExtraDisks for %d disks", len(compFacts.Disks))
if err := d.Set("extra_disks", parseComputeDisksToExtraDisks(compFacts.Disks)); err != nil {
return err
}
}
if len(compFacts.Interfaces) > 0 {
log.Debugf("flattenCompute: calling parseComputeInterfacesToNetworks for %d interfaces", len(compFacts.Interfaces))
if err := d.Set("network", parseComputeInterfacesToNetworks(compFacts.Interfaces)); err != nil {
return err
}
}
if len(compFacts.OSUsers) > 0 {
log.Debugf("flattenCompute: calling parseOsUsers for %d logins", len(compFacts.OSUsers))
if err := d.Set("os_users", parseOsUsers(compFacts.OSUsers)); err != nil {
return err
}
}
return nil
}
func dataSourceComputeRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceComputeRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
compFacts, err := utilityComputeCheckPresence(ctx, d, m) compFacts, err := utilityComputeCheckPresence(ctx, d, m)
if compFacts == nil { if compFacts == nil {
@ -186,143 +67,6 @@ func DataSourceCompute() *schema.Resource {
Default: &constants.Timeout60s, Default: &constants.Timeout60s,
}, },
Schema: map[string]*schema.Schema{ Schema: dataSourceComputeSchemaMake(),
"name": {
Type: schema.TypeString,
Optional: true,
Description: "Name of this compute instance. NOTE: this parameter is case sensitive.",
},
// TODO: consider removing compute_id from the schema, as it not practical to call this data provider if
// corresponding compute ID is already known
"compute_id": {
Type: schema.TypeInt,
Optional: true,
Description: "ID of the compute instance. If ID is specified, name and resource group ID are ignored.",
},
"rg_id": {
Type: schema.TypeInt,
Optional: true,
Description: "ID of the resource group where this compute instance is located.",
},
"rg_name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the resource group where this compute instance is located.",
},
"account_id": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the account this compute instance belongs to.",
},
"account_name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the account this compute instance belongs to.",
},
"driver": {
Type: schema.TypeString,
Computed: true,
Description: "Hardware architecture of this compute instance.",
},
"cpu": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of CPUs allocated for this compute instance.",
},
"ram": {
Type: schema.TypeInt,
Computed: true,
Description: "Amount of RAM in MB allocated for this compute instance.",
},
"image_id": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the OS image this compute instance is based on.",
},
"image_name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the OS image this compute instance is based on.",
},
"boot_disk_size": {
Type: schema.TypeInt,
Computed: true,
Description: "This compute instance boot disk size in GB.",
},
"boot_disk_id": {
Type: schema.TypeInt,
Computed: true,
Description: "This compute instance boot disk ID.",
},
"extra_disks": {
Type: schema.TypeSet,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "IDs of the extra disk(s) attached to this compute.",
},
/*
"disks": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: dataSourceDiskSchemaMake(), // ID, type, name, size, account ID, SEP ID, SEP type, pool, status, tech status, compute ID, image ID
},
Description: "Detailed specification for all disks attached to this compute instance (including bood disk).",
},
*/
// "network": {
// Type: schema.TypeSet,
// Optional: true,
// MaxItems: constants.MaxNetworksPerCompute,
// Elem: &schema.Resource{
// Schema: networkSubresourceSchemaMake(),
// },
// Description: "Network connection(s) for this compute.",
// },
"os_users": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: osUsersSubresourceSchemaMake(),
},
Description: "Guest OS users provisioned on this compute instance.",
},
"description": {
Type: schema.TypeString,
Computed: true,
Description: "User-defined text description of this compute instance.",
},
"cloud_init": {
Type: schema.TypeString,
Computed: true,
Description: "Placeholder for cloud_init parameters.",
},
"started": {
Type: schema.TypeBool,
Optional: true,
Default: true,
Description: "Is compute started.",
},
},
} }
} }

@ -2,11 +2,14 @@ package kvmvm
import ( import (
"encoding/json" "encoding/json"
"fmt"
"sort" "sort"
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens"
) )
func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute) error { func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute) error {
@ -14,18 +17,35 @@ func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute) e
customFields, _ := json.Marshal(computeRec.CustomFields) customFields, _ := json.Marshal(computeRec.CustomFields)
devices, _ := json.Marshal(computeRec.Devices) devices, _ := json.Marshal(computeRec.Devices)
userData, _ := json.Marshal(computeRec.Userdata)
bootDisk := findBootDisk(computeRec.Disks) bootDisk := findBootDisk(computeRec.Disks)
//extra fields setting
if len(computeRec.Disks) > 0 {
log.Debugf("flattenCompute: calling parseComputeDisksToExtraDisks for %d disks", len(computeRec.Disks))
if err := d.Set("extra_disks", parseComputeDisksToExtraDisks(computeRec.Disks)); err != nil {
return err
}
}
if len(computeRec.Interfaces) > 0 {
log.Debugf("flattenCompute: calling parseComputeInterfacesToNetworks for %d interfaces", len(computeRec.Interfaces))
if err := d.Set("network", parseComputeInterfacesToNetworks(computeRec.Interfaces)); err != nil {
return err
}
}
d.Set("account_id", computeRec.AccountID) d.Set("account_id", computeRec.AccountID)
d.Set("account_name", computeRec.AccountName) d.Set("account_name", computeRec.AccountName)
d.Set("acl", flattenListACLInterface(computeRec.ACL))
d.Set("affinity_label", computeRec.AffinityLabel) d.Set("affinity_label", computeRec.AffinityLabel)
d.Set("affinity_weight", computeRec.AffinityWeight) d.Set("affinity_weight", computeRec.AffinityWeight)
d.Set("affinity_rules", flattenAffinityRules(computeRec.AffinityRules)) d.Set("affinity_rules", flattenAffinityRules(computeRec.AffinityRules))
d.Set("anti_affinity_rules", flattenAffinityRules(computeRec.AntiAffinityRules)) d.Set("anti_affinity_rules", flattenAffinityRules(computeRec.AntiAffinityRules))
d.Set("arch", computeRec.Arch) d.Set("arch", computeRec.Arch)
d.Set("boot_order", computeRec.BootOrder) d.Set("boot_order", computeRec.BootOrder)
d.Set("boot_disk_id", bootDisk.ID)
d.Set("boot_disk_size", computeRec.BootDiskSize) d.Set("boot_disk_size", computeRec.BootDiskSize)
d.Set("cd_image_id", computeRec.CdImageId)
d.Set("clone_reference", computeRec.CloneReference) d.Set("clone_reference", computeRec.CloneReference)
d.Set("clones", computeRec.Clones) d.Set("clones", computeRec.Clones)
d.Set("computeci_id", computeRec.ComputeCIID) d.Set("computeci_id", computeRec.ComputeCIID)
@ -36,13 +56,11 @@ func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute) e
d.Set("deleted_time", computeRec.DeletedTime) d.Set("deleted_time", computeRec.DeletedTime)
d.Set("description", computeRec.Description) d.Set("description", computeRec.Description)
d.Set("devices", string(devices)) d.Set("devices", string(devices))
d.Set("disks", err := d.Set("disks", flattenComputeDisks(computeRec.Disks, d.Get("extra_disks").(*schema.Set).List(), bootDisk.ID))
flattenComputeDisks( if err != nil {
computeRec.Disks, return err
d.Get("extra_disks").(*schema.Set).List(), }
bootDisk.ID, d.Set("driver", computeRec.Driver)
),
)
d.Set("gid", computeRec.GID) d.Set("gid", computeRec.GID)
d.Set("guid", computeRec.GUID) d.Set("guid", computeRec.GUID)
d.Set("compute_id", computeRec.ID) d.Set("compute_id", computeRec.ID)
@ -53,6 +71,7 @@ func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute) e
d.Set("manager_type", computeRec.ManagerType) d.Set("manager_type", computeRec.ManagerType)
d.Set("migrationjob", computeRec.MigrationJob) d.Set("migrationjob", computeRec.MigrationJob)
d.Set("milestones", computeRec.Milestones) d.Set("milestones", computeRec.Milestones)
d.Set("need_reboot", computeRec.NeedReboot)
d.Set("os_users", flattenOSUsers(computeRec.OSUsers)) d.Set("os_users", flattenOSUsers(computeRec.OSUsers))
d.Set("pinned", computeRec.Pinned) d.Set("pinned", computeRec.Pinned)
d.Set("reference_id", computeRec.ReferenceID) d.Set("reference_id", computeRec.ReferenceID)
@ -69,6 +88,7 @@ func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute) e
d.Set("tech_status", computeRec.TechStatus) d.Set("tech_status", computeRec.TechStatus)
d.Set("updated_by", computeRec.UpdatedBy) d.Set("updated_by", computeRec.UpdatedBy)
d.Set("updated_time", computeRec.UpdatedTime) d.Set("updated_time", computeRec.UpdatedTime)
d.Set("user_data", string(userData))
d.Set("user_managed", computeRec.UserManaged) d.Set("user_managed", computeRec.UserManaged)
d.Set("vgpus", computeRec.VGPUs) d.Set("vgpus", computeRec.VGPUs)
d.Set("virtual_image_id", computeRec.VirtualImageID) d.Set("virtual_image_id", computeRec.VirtualImageID)
@ -175,6 +195,7 @@ func flattenComputeDisks(disksList compute.ListDisks, extraDisks []interface{},
"disk_id": disk.ID, "disk_id": disk.ID,
"shareable": disk.Shareable, "shareable": disk.Shareable,
"size_used": disk.SizeUsed, "size_used": disk.SizeUsed,
"size_max": disk.SizeMax,
} }
res = append(res, temp) res = append(res, temp)
} }
@ -193,15 +214,6 @@ func findInExtraDisks(diskId uint, extraDisks []interface{}) bool {
return false return false
} }
func findBootDisk(disks compute.ListDisks) *compute.ItemDisk {
for _, disk := range disks {
if disk.Type == "B" {
return &disk
}
}
return nil
}
func flattenAffinityRules(rules compute.ListRules) []map[string]interface{} { func flattenAffinityRules(rules compute.ListRules) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(rules)) res := make([]map[string]interface{}, 0, len(rules))
@ -217,3 +229,541 @@ func flattenAffinityRules(rules compute.ListRules) []map[string]interface{} {
return res return res
} }
func flattenComputeList(computes *compute.ListComputes) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(computes.Data))
for _, computeItem := range computes.Data {
customFields, _ := json.Marshal(computeItem.CustomFields)
devices, _ := json.Marshal(computeItem.Devices)
userData, _ := json.Marshal(computeItem.Userdata)
temp := map[string]interface{}{
"acl": flattenListACLInterface(computeItem.ACL),
"account_id": computeItem.AccountID,
"account_name": computeItem.AccountName,
"affinity_label": computeItem.AffinityLabel,
"affinity_rules": flattenListRules(computeItem.AffinityRules),
"affinity_weight": computeItem.AffinityWeight,
"anti_affinity_rules": flattenListRules(computeItem.AntiAffinityRules),
"arch": computeItem.Arch,
"cd_image_id": computeItem.CdImageId,
"boot_order": computeItem.BootOrder,
"bootdisk_size": computeItem.BootDiskSize,
"clone_reference": computeItem.CloneReference,
"clones": computeItem.Clones,
"computeci_id": computeItem.ComputeCIID,
"cpus": computeItem.CPUs,
"created_by": computeItem.CreatedBy,
"created_time": computeItem.CreatedTime,
"custom_fields": string(customFields),
"deleted_by": computeItem.DeletedBy,
"deleted_time": computeItem.DeletedTime,
"desc": computeItem.Description,
"devices": string(devices),
"disks": flattenDisks(computeItem.Disks),
"driver": computeItem.Driver,
"gid": computeItem.GID,
"guid": computeItem.GUID,
"compute_id": computeItem.ID,
"image_id": computeItem.ImageID,
"interfaces": flattenInterfaces(computeItem.Interfaces),
"lock_status": computeItem.LockStatus,
"manager_id": computeItem.ManagerID,
"manager_type": computeItem.ManagerType,
"migrationjob": computeItem.MigrationJob,
"milestones": computeItem.Milestones,
"name": computeItem.Name,
"need_reboot": computeItem.NeedReboot,
"os_users": flattenOSUsers(computeItem.OSUsers),
"pinned": computeItem.Pinned,
"ram": computeItem.RAM,
"reference_id": computeItem.ReferenceID,
"registered": computeItem.Registered,
"res_name": computeItem.ResName,
"rg_id": computeItem.RGID,
"rg_name": computeItem.RGName,
"snap_sets": flattenSnapSets(computeItem.SnapSets),
"stack_id": computeItem.StackID,
"stateless_sep_id": computeItem.StatelessSEPID,
"stateless_sep_type": computeItem.StatelessSEPType,
"status": computeItem.Status,
"tags": flattenTags(computeItem.Tags),
"tech_status": computeItem.TechStatus,
"total_disk_size": computeItem.TotalDiskSize,
"updated_by": computeItem.UpdatedBy,
"updated_time": computeItem.UpdatedTime,
"user_data": string(userData),
"user_managed": computeItem.UserManaged,
"vgpus": computeItem.VGPUs,
"vins_connected": computeItem.VINSConnected,
"virtual_image_id": computeItem.VirtualImageID,
}
res = append(res, temp)
}
return res
}
func flattenListACLInterface(listAcl []interface{}) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(listAcl))
for _, aclInterface := range listAcl {
acl := aclInterface.(map[string]interface{})
temp := map[string]interface{}{
"explicit": acl["explicit"],
"guid": acl["guid"],
"right": acl["right"],
"status": acl["status"],
"type": acl["type"],
"user_group_id": acl["user_group_id"],
}
res = append(res, temp)
}
return res
}
func flattenListACL(listAcl compute.ListACL) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(listAcl))
for _, acl := range listAcl {
temp := map[string]interface{}{
"explicit": acl.Explicit,
"guid": acl.GUID,
"right": acl.Right,
"status": acl.Status,
"type": acl.Type,
"user_group_id": acl.UserGroupID,
}
res = append(res, temp)
}
return res
}
func flattenListComputeACL(listAcl []compute.ItemComputeACL) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(listAcl))
for _, acl := range listAcl {
temp := map[string]interface{}{
"explicit": acl.Explicit,
"guid": acl.GUID,
"right": acl.Right,
"status": acl.Status,
"type": acl.Type,
"user_group_id": acl.UserGroupID,
}
res = append(res, temp)
}
return res
}
func flattenListRules(listRules compute.ListRules) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(listRules))
for _, rule := range listRules {
temp := map[string]interface{}{
"guid": rule.GUID,
"key": rule.Key,
"mode": rule.Mode,
"policy": rule.Policy,
"topology": rule.Topology,
"value": rule.Value,
}
res = append(res, temp)
}
return res
}
func flattenDisks(disks []compute.InfoDisk) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, disk := range disks {
temp := map[string]interface{}{
"disk_id": disk.ID,
"pci_slot": disk.PCISlot,
}
res = append(res, temp)
}
return res
}
func flattenComputeAudits(computeAudits compute.ListDetailedAudits) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(computeAudits))
for _, computeAudit := range computeAudits {
temp := map[string]interface{}{
"call": computeAudit.Call,
"responsetime": computeAudit.ResponseTime,
"statuscode": computeAudit.StatusCode,
"timestamp": computeAudit.Timestamp,
"user": computeAudit.User,
}
res = append(res, temp)
}
return res
}
func flattenComputeGetAudits(computeAudits compute.ListAudits) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(computeAudits))
for _, computeAudit := range computeAudits {
temp := map[string]interface{}{
"epoch": computeAudit.Epoch,
"message": computeAudit.Message,
}
res = append(res, temp)
}
return res
}
func flattenPfwList(computePfws *compute.ListPFW) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(computePfws.Data))
for _, computePfw := range computePfws.Data {
temp := map[string]interface{}{
"pfw_id": computePfw.ID,
"local_ip": computePfw.LocalIP,
"local_port": computePfw.LocalPort,
"protocol": computePfw.Protocol,
"public_port_end": computePfw.PublicPortEnd,
"public_port_start": computePfw.PublicPortStart,
"vm_id": computePfw.VMID,
}
res = append(res, temp)
}
return res
}
func flattenUserList(d *schema.ResourceData, userList *compute.ListUsers) {
d.Set("account_acl", flattenListACL(userList.Data.AccountACL))
d.Set("compute_acl", flattenListComputeACL(userList.Data.ComputeACL))
d.Set("rg_acl", flattenListACL(userList.Data.RGACL))
}
func flattenSnapshotList(computeSnapshots *compute.ListSnapShot) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(computeSnapshots.Data))
for _, snp := range computeSnapshots.Data {
temp := map[string]interface{}{
"disks": snp.Disks,
"guid": snp.GUID,
"label": snp.Label,
"timestamp": snp.Timestamp,
}
res = append(res, temp)
}
return res
}
func flattenAffinityRelations(d *schema.ResourceData, ar *compute.RecordAffinityRelations) {
d.Set("other_node", flattenNodes(ar.OtherNode))
d.Set("other_node_indirect", flattenNodes(ar.OtherNodeIndirect))
d.Set("other_node_indirect_soft", flattenNodes(ar.OtherNodeIndirectSoft))
d.Set("other_node_soft", flattenNodes(ar.OtherNodeSoft))
d.Set("same_node", flattenNodes(ar.SameNode))
d.Set("same_node_soft", flattenNodes(ar.SameNodeSoft))
}
func flattenSnapshotUsage(computeSnapshotUsages compute.ListSnapshotUsage) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(computeSnapshotUsages))
for _, computeUsage := range computeSnapshotUsages {
temp := map[string]interface{}{
"count": computeUsage.Count,
"stored": computeUsage.Stored,
"label": computeUsage.Label,
"timestamp": computeUsage.Timestamp,
}
res = append(res, temp)
}
return res
}
func flattenPCIDevice(deviceList []compute.ItemPCIDevice) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(deviceList))
for _, dev := range deviceList {
temp := map[string]interface{}{
"ckey": dev.CKey,
"meta": flattens.FlattenMeta(dev.Meta),
"compute_id": dev.ComputeID,
"description": dev.Description,
"guid": dev.GUID,
"hwpath": dev.HwPath,
"device_id": dev.ID,
"name": dev.Name,
"rg_id": dev.RGID,
"stack_id": dev.StackID,
"status": dev.Status,
"system_name": dev.SystemName,
}
res = append(res, temp)
}
return res
}
func flattenVGPU(m []interface{}) []string {
var output []string
for _, item := range m {
switch d := item.(type) {
case string:
output = append(output, d)
case int:
output = append(output, strconv.Itoa(d))
case int64:
output = append(output, strconv.FormatInt(d, 10))
case float64:
output = append(output, strconv.FormatInt(int64(d), 10))
default:
output = append(output, "")
}
}
return output
}
func flattenNodes(m []interface{}) []string {
var output []string
for _, item := range m {
switch d := item.(type) {
case string:
output = append(output, d)
case int:
output = append(output, strconv.Itoa(d))
case int64:
output = append(output, strconv.FormatInt(d, 10))
case float64:
output = append(output, strconv.FormatInt(int64(d), 10))
default:
output = append(output, "")
}
}
return output
}
func flattenDataCompute(d *schema.ResourceData, compFacts *compute.RecordCompute) error {
// This function expects that compFacts string contains response from API compute/get,
// i.e. detailed information about compute instance.
//
// NOTE: this function modifies ResourceData argument - as such it should never be called
// from resourceComputeExists(...) method
log.Debugf("flattenCompute: ID %d, RG ID %d", compFacts.ID, compFacts.RGID)
customFields, _ := json.Marshal(compFacts.CustomFields)
devices, _ := json.Marshal(compFacts.Devices)
userData, _ := json.Marshal(compFacts.Userdata)
// general fields setting
d.SetId(fmt.Sprintf("%d", compFacts.ID))
d.Set("account_id", compFacts.AccountID)
d.Set("account_name", compFacts.AccountName)
d.Set("acl", flattenListACLInterface(compFacts.ACL))
d.Set("affinity_label", compFacts.AffinityLabel)
d.Set("affinity_rules", flattenAffinityRules(compFacts.AffinityRules))
d.Set("affinity_weight", compFacts.AffinityWeight)
d.Set("anti_affinity_rules", flattenAffinityRules(compFacts.AntiAffinityRules))
d.Set("arch", compFacts.Arch)
d.Set("boot_order", compFacts.BootOrder)
d.Set("cd_image_id", compFacts.CdImageId)
d.Set("clone_reference", compFacts.CloneReference)
d.Set("clones", compFacts.Clones)
d.Set("computeci_id", compFacts.ComputeCIID)
d.Set("cpus", compFacts.CPUs)
d.Set("created_by", compFacts.CreatedBy)
d.Set("created_time", compFacts.CreatedTime)
d.Set("custom_fields", string(customFields))
d.Set("deleted_by", compFacts.DeletedBy)
d.Set("deleted_time", compFacts.DeletedTime)
d.Set("desc", compFacts.Description)
d.Set("devices", string(devices))
d.Set("disks", flattenDisk(compFacts.Disks))
d.Set("driver", compFacts.Driver)
d.Set("gid", compFacts.GID)
d.Set("guid", compFacts.GUID)
d.Set("image_id", compFacts.ImageID)
d.Set("interfaces", flattenInterfaces(compFacts.Interfaces))
d.Set("lock_status", compFacts.LockStatus)
d.Set("manager_id", compFacts.ManagerID)
d.Set("manager_type", compFacts.ManagerType)
d.Set("migrationjob", compFacts.MigrationJob)
d.Set("milestones", compFacts.Milestones)
d.Set("name", compFacts.Name)
d.Set("need_reboot", compFacts.NeedReboot)
d.Set("os_users", flattenOSUsers(compFacts.OSUsers))
d.Set("pinned", compFacts.Pinned)
d.Set("ram", compFacts.RAM)
d.Set("reference_id", compFacts.ReferenceID)
d.Set("registered", compFacts.Registered)
d.Set("res_name", compFacts.ResName)
d.Set("rg_id", compFacts.RGID)
d.Set("rg_name", compFacts.RGName)
d.Set("snap_sets", flattenSnapSets(compFacts.SnapSets))
d.Set("stack_id", compFacts.StackID)
d.Set("stack_name", compFacts.StackName)
d.Set("stateless_sep_id", compFacts.StatelessSEPID)
d.Set("stateless_sep_type", compFacts.StatelessSEPType)
d.Set("status", compFacts.Status)
d.Set("tags", flattenTags(compFacts.Tags))
d.Set("tech_status", compFacts.TechStatus)
d.Set("updated_by", compFacts.UpdatedBy)
d.Set("updated_time", compFacts.UpdatedTime)
d.Set("user_data", string(userData))
d.Set("user_managed", compFacts.UserManaged)
d.Set("vgpus", compFacts.VGPUs)
d.Set("virtual_image_id", compFacts.VirtualImageID)
//extra fields setting
bootDisk := findBootDisk(compFacts.Disks)
if bootDisk != nil {
d.Set("boot_disk_size", bootDisk.SizeMax)
d.Set("boot_disk_id", bootDisk.ID) // we may need boot disk ID in resize operations
d.Set("sep_id", bootDisk.SEPID)
d.Set("pool", bootDisk.Pool)
}
if len(compFacts.Disks) > 0 {
log.Debugf("flattenCompute: calling parseComputeDisksToExtraDisks for %d disks", len(compFacts.Disks))
if err := d.Set("extra_disks", parseComputeDisksToExtraDisks(compFacts.Disks)); err != nil {
return err
}
}
if len(compFacts.Interfaces) > 0 {
log.Debugf("flattenCompute: calling parseComputeInterfacesToNetworks for %d interfaces", len(compFacts.Interfaces))
if err := d.Set("network", parseComputeInterfacesToNetworks(compFacts.Interfaces)); err != nil {
return err
}
}
return nil
}
// Parse list of all disks from API compute/get into a list of "extra disks" attached to this compute
// Extra disks are all compute disks but a boot disk.
func parseComputeDisksToExtraDisks(disks compute.ListDisks) []interface{} {
// this return value will be used to d.Set("extra_disks",) item of dataSourceCompute schema,
// which is a simple list of integer disk IDs excluding boot disk ID
length := len(disks)
log.Debugf("parseComputeDisksToExtraDisks: called for %d disks", length)
if length == 0 || (length == 1 && disks[0].Type == "B") {
// the disk list is empty (which is kind of strange - diskless compute?), or
// there is only one disk in the list and it is a boot disk;
// as we skip boot disks, the result will be of 0 length anyway
return make([]interface{}, 0)
}
result := make([]interface{}, length-1)
idx := 0
for _, value := range disks {
if value.Type == "B" {
// skip boot disk when iterating over the list of disks
continue
}
result[idx] = value.ID
idx++
}
return result
}
// Parse the list of interfaces from compute/get response into a list of networks
// attached to this compute
func parseComputeInterfacesToNetworks(ifaces compute.ListInterfaces) []interface{} {
// return value will be used to d.Set("network") item of dataSourceCompute schema
length := len(ifaces)
log.Debugf("parseComputeInterfacesToNetworks: called for %d ifaces", length)
result := []interface{}{}
for _, value := range ifaces {
elem := make(map[string]interface{})
// Keys in this map should correspond to the Schema definition for "network"
elem["net_id"] = value.NetID
elem["net_type"] = value.NetType
elem["ip_address"] = value.IPAddress
elem["mac"] = value.MAC
result = append(result, elem)
}
return result
}
func flattenDisk(diskList compute.ListDisks) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(diskList))
for _, disk := range diskList {
temp := map[string]interface{}{
"ckey": disk.CKey,
"meta": flattens.FlattenMeta(disk.Meta),
"account_id": disk.AccountID,
"boot_partition": disk.BootPartition,
"created_time": disk.CreatedTime,
"deleted_time": disk.DeletedTime,
"desc": disk.Description,
"destruction_time": disk.DestructionTime,
"disk_path": disk.DiskPath,
"gid": disk.GID,
"guid": disk.GUID,
"disk_id": disk.ID,
"image_id": disk.ImageID,
"images": disk.Images,
"iotune": flattenIOTune(disk.IOTune),
"iqn": disk.IQN,
"login": disk.Login,
"milestones": disk.Milestones,
"name": disk.Name,
"order": disk.Order,
"params": disk.Params,
"parent_id": disk.ParentID,
"passwd": disk.Password,
"pci_slot": disk.PCISlot,
"pool": disk.Pool,
"purge_attempts": disk.PurgeAttempts,
"present_to": disk.PresentTo,
"purge_time": disk.PurgeTime,
"reality_device_number": disk.RealityDeviceNumber,
"reference_id": disk.ReferenceID,
"res_id": disk.ResID,
"res_name": disk.ResName,
"role": disk.Role,
"sep_id": disk.SEPID,
"shareable": disk.Shareable,
"size_max": disk.SizeMax,
"size_used": disk.SizeUsed,
"snapshots": flattendDiskSnapshotList(disk.Snapshots),
"status": disk.Status,
"tech_status": disk.TechStatus,
"type": disk.Type,
"vmid": disk.VMID,
}
res = append(res, temp)
}
return res
}
func flattenIOTune(iot compute.IOTune) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"read_bytes_sec": iot.ReadBytesSec,
"read_bytes_sec_max": iot.ReadBytesSecMax,
"read_iops_sec": iot.ReadIOPSSec,
"read_iops_sec_max": iot.ReadIOPSSecMax,
"size_iops_sec": iot.SizeIOPSSec,
"total_bytes_sec": iot.TotalBytesSec,
"total_bytes_sec_max": iot.TotalBytesSecMax,
"total_iops_sec": iot.TotalIOPSSec,
"total_iops_sec_max": iot.TotalIOPSSecMax,
"write_bytes_sec": iot.WriteBytesSec,
"write_bytes_sec_max": iot.WriteBytesSecMax,
"write_iops_sec": iot.WriteIOPSSec,
"write_iops_sec_max": iot.WriteIOPSSecMax,
}
res = append(res, temp)
return res
}
func flattendDiskSnapshotList(sl compute.ListDetailedSnapshots) []interface{} {
res := make([]interface{}, 0)
for _, snapshot := range sl {
temp := map[string]interface{}{
"guid": snapshot.GUID,
"label": snapshot.Label,
"res_id": snapshot.ResID,
"snap_set_guid": snapshot.SnapSetGUID,
"snap_set_time": snapshot.SnapSetTime,
"timestamp": snapshot.TimeStamp,
}
res = append(res, temp)
}
return res
}

File diff suppressed because it is too large Load Diff

@ -34,6 +34,7 @@ package kvmvm
import ( import (
"context" "context"
"strconv" "strconv"
"strings"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute"
@ -42,6 +43,260 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
) )
func utilityComputeEnabled(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
enabled := d.Get("enabled").(bool)
if enabled {
req := compute.EnableRequest{
ComputeID: computeId,
}
if _, err := c.CloudBroker().Compute().Enable(ctx, req); err != nil {
return err
}
} else {
req := compute.DisableRequest{
ComputeID: computeId,
}
if _, err := c.CloudBroker().Compute().Disable(ctx, req); err != nil {
return err
}
}
log.Debugf("resourceComputeUpdate: enable=%v Compute ID %s after completing its resource configuration", enabled, d.Id())
return nil
}
func utilityComputeStarted(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
if d.Get("started").(bool) {
req := compute.StartRequest{
ComputeID: computeId,
}
if altBootId, ok := d.Get("alt_boot_id").(int); ok {
req.AltBootID = uint64(altBootId)
}
if stackId, ok := d.Get("stack_id").(int); ok {
req.StackID = uint64(stackId)
}
if _, err := c.CloudBroker().Compute().Start(ctx, req); err != nil {
return err
}
} else {
req := compute.StopRequest{
ComputeID: computeId,
}
if force, ok := d.Get("force_stop").(bool); ok {
req.Force = force
}
if _, err := c.CloudBroker().Compute().Stop(ctx, req); err != nil {
return err
}
}
return nil
}
func utilityComputeResize(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
resizeReq := compute.ResizeRequest{
ComputeID: computeId,
Force: true,
}
doUpdate := false
oldCpu, newCpu := d.GetChange("cpu")
if oldCpu.(int) != newCpu.(int) {
resizeReq.CPU = uint64(newCpu.(int))
doUpdate = true
} else {
resizeReq.CPU = 0
}
oldRam, newRam := d.GetChange("ram")
if oldRam.(int) != newRam.(int) {
resizeReq.RAM = uint64(newRam.(int))
doUpdate = true
} else {
resizeReq.RAM = 0
}
if doUpdate {
log.Debugf("resourceComputeUpdate: changing CPU %d -> %d and/or RAM %d -> %d",
oldCpu.(int), newCpu.(int),
oldRam.(int), newRam.(int))
_, err := c.CloudBroker().Compute().Resize(ctx, resizeReq)
if err != nil {
return err
}
}
return nil
}
func utilityComputeBootDiskResize(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
oldSize, newSize := d.GetChange("boot_disk_size")
if oldSize.(int) < newSize.(int) {
req := compute.DiskResizeRequest{ComputeID: computeId, Size: uint64(newSize.(int))}
if diskId, ok := d.GetOk("boot_disk_id"); ok {
req.DiskID = uint64(diskId.(int))
} else {
bootDisk, err := utilityComputeBootDiskCheckPresence(ctx, d, m)
if err != nil {
return err
}
req.DiskID = bootDisk.ID
}
log.Debugf("resourceComputeUpdate: compute ID %s, boot disk ID %d resize %d -> %d",
d.Id(), d.Get("boot_disk_id").(int), oldSize.(int), newSize.(int))
_, err := c.CloudBroker().Compute().DiskResize(ctx, req)
if err != nil {
return err
}
} else if oldSize.(int) > newSize.(int) {
log.Warnf("resourceComputeUpdate: compute ID %s - shrinking boot disk is not allowed", d.Id())
}
return nil
}
func utilityComputeUpdateDisks(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
deletedDisks := make([]interface{}, 0)
addedDisks := make([]interface{}, 0)
updatedDisks := make([]interface{}, 0)
oldDisks, newDisks := d.GetChange("disks")
oldConv := oldDisks.([]interface{})
newConv := newDisks.([]interface{})
for _, el := range oldConv {
if !isContainsDisk(newConv, el) {
deletedDisks = append(deletedDisks, el)
}
}
for _, el := range newConv {
if !isContainsDisk(oldConv, el) {
addedDisks = append(addedDisks, el)
} else {
if isChangeDisk(oldConv, el) {
updatedDisks = append(updatedDisks, el)
}
}
}
if len(deletedDisks) > 0 {
stopReq := compute.StopRequest{
ComputeID: computeId,
Force: false,
}
_, err := c.CloudBroker().Compute().Stop(ctx, stopReq)
if err != nil {
return err
}
for _, disk := range deletedDisks {
diskConv := disk.(map[string]interface{})
if diskConv["disk_type"].(string) == "B" {
continue
}
req := compute.DiskDelRequest{
ComputeID: computeId,
DiskID: uint64(diskConv["disk_id"].(int)),
Permanently: diskConv["permanently"].(bool),
}
_, err := c.CloudBroker().Compute().DiskDel(ctx, req)
if err != nil {
return err
}
}
req := compute.StartRequest{
ComputeID: computeId,
AltBootID: 0,
}
_, err = c.CloudBroker().Compute().Start(ctx, req)
if err != nil {
return err
}
}
if len(addedDisks) > 0 {
for _, disk := range addedDisks {
diskConv := disk.(map[string]interface{})
if diskConv["disk_type"].(string) == "B" {
continue
}
req := compute.DiskAddRequest{
ComputeID: computeId,
DiskName: diskConv["disk_name"].(string),
Size: uint64(diskConv["size"].(int)),
}
if diskConv["sep_id"].(int) != 0 {
req.SepID = uint64(diskConv["sep_id"].(int))
}
if diskConv["disk_type"].(string) != "" {
req.DiskType = diskConv["disk_type"].(string)
}
if diskConv["pool"].(string) != "" {
req.Pool = diskConv["pool"].(string)
}
if diskConv["desc"].(string) != "" {
req.Description = diskConv["desc"].(string)
}
if diskConv["image_id"].(int) != 0 {
req.ImageID = uint64(diskConv["image_id"].(int))
}
_, err := c.CloudBroker().Compute().DiskAdd(ctx, req)
if err != nil {
return err
}
}
}
if len(updatedDisks) > 0 {
for _, disk := range updatedDisks {
diskConv := disk.(map[string]interface{})
if diskConv["disk_type"].(string) == "B" {
continue
}
req := compute.DiskResizeRequest{
ComputeID: computeId,
DiskID: uint64(diskConv["disk_id"].(int)),
Size: uint64(diskConv["size"].(int)),
}
_, err := c.CloudBroker().Compute().DiskResize(ctx, req)
if err != nil {
return err
}
}
}
return nil
}
func utilityComputeExtraDisksConfigure(ctx context.Context, d *schema.ResourceData, m interface{}, do_delta bool) error { func utilityComputeExtraDisksConfigure(ctx context.Context, d *schema.ResourceData, m interface{}, do_delta bool) error {
c := m.(*controller.ControllerCfg) c := m.(*controller.ControllerCfg)
@ -154,6 +409,10 @@ func utilityComputeCheckPresence(ctx context.Context, d *schema.ResourceData, m
req.ComputeID = uint64(d.Get("compute_id").(int)) req.ComputeID = uint64(d.Get("compute_id").(int))
} }
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
res, err := c.CloudBroker().Compute().Get(ctx, req) res, err := c.CloudBroker().Compute().Get(ctx, req)
if err != nil { if err != nil {
return nil, err return nil, err
@ -162,6 +421,15 @@ func utilityComputeCheckPresence(ctx context.Context, d *schema.ResourceData, m
return res, nil return res, nil
} }
func findBootDisk(disks compute.ListDisks) *compute.ItemDisk {
for _, disk := range disks {
if disk.Type == "B" {
return &disk
}
}
return nil
}
func networkSubresIPAddreDiffSupperss(key, oldVal, newVal string, d *schema.ResourceData) bool { func networkSubresIPAddreDiffSupperss(key, oldVal, newVal string, d *schema.ResourceData) bool {
if newVal != "" && newVal != oldVal { if newVal != "" && newVal != oldVal {
log.Debugf("networkSubresIPAddreDiffSupperss: key=%s, oldVal=%q, newVal=%q -> suppress=FALSE", key, oldVal, newVal) log.Debugf("networkSubresIPAddreDiffSupperss: key=%s, oldVal=%q, newVal=%q -> suppress=FALSE", key, oldVal, newVal)
@ -288,6 +556,659 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
return nil return nil
} }
func utilityComputeUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
req := compute.UpdateRequest{
ComputeID: computeId,
}
if d.HasChange("name") {
req.Name = d.Get("name").(string)
}
if d.HasChange("desc") {
req.Description = d.Get("desc").(string)
}
if _, err := c.CloudBroker().Compute().Update(ctx, req); err != nil {
return err
}
return nil
}
func utilityComputeUpdateAffinityLabel(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
affinityLabel := d.Get("affinity_label").(string)
if affinityLabel == "" {
req := compute.AffinityLabelRemoveRequest{
ComputeIDs: []uint64{computeId},
}
_, err := c.CloudBroker().Compute().AffinityLabelRemove(ctx, req)
if err != nil {
return err
}
}
req := compute.AffinityLabelSetRequest{
ComputeIDs: []uint64{computeId},
AffinityLabel: affinityLabel,
}
_, err := c.CloudBroker().Compute().AffinityLabelSet(ctx, req)
if err != nil {
return err
}
return nil
}
func utilityComputeUpdateAffinityRules(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
deletedAR := make([]interface{}, 0)
addedAR := make([]interface{}, 0)
oldAR, newAR := d.GetChange("affinity_rules")
oldConv := oldAR.([]interface{})
newConv := newAR.([]interface{})
if len(newConv) == 0 {
req := compute.AffinityRulesClearRequest{
ComputeIDs: []uint64{computeId},
}
_, err := c.CloudBroker().Compute().AffinityRulesClear(ctx, req)
if err != nil {
return err
}
} else {
for _, el := range oldConv {
if !isContainsAR(newConv, el) {
deletedAR = append(deletedAR, el)
}
}
for _, el := range newConv {
if !isContainsAR(oldConv, el) {
addedAR = append(addedAR, el)
}
}
if len(deletedAR) > 0 {
for _, ar := range deletedAR {
arConv := ar.(map[string]interface{})
req := compute.AffinityRuleRemoveRequest{
ComputeIDs: []uint64{computeId},
Topology: arConv["topology"].(string),
Policy: arConv["policy"].(string),
Mode: arConv["mode"].(string),
Key: arConv["key"].(string),
Value: arConv["value"].(string),
}
_, err := c.CloudBroker().Compute().AffinityRuleRemove(ctx, req)
if err != nil {
return err
}
}
}
if len(addedAR) > 0 {
for _, ar := range addedAR {
arConv := ar.(map[string]interface{})
req := compute.AffinityRuleAddRequest{
ComputeIDs: []uint64{computeId},
Topology: arConv["topology"].(string),
Policy: arConv["policy"].(string),
Mode: arConv["mode"].(string),
Key: arConv["key"].(string),
Value: arConv["value"].(string),
}
_, err := c.CloudBroker().Compute().AffinityRuleAdd(ctx, req)
if err != nil {
return err
}
}
}
}
return nil
}
func utilityComputeUpdateAntiAffinityRules(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
deletedAR := make([]interface{}, 0)
addedAR := make([]interface{}, 0)
oldAR, newAR := d.GetChange("anti_affinity_rules")
oldConv := oldAR.([]interface{})
newConv := newAR.([]interface{})
if len(newConv) == 0 {
req := compute.AntiAffinityRulesClearRequest{
ComputeIDs: []uint64{computeId},
}
_, err := c.CloudBroker().Compute().AntiAffinityRulesClear(ctx, req)
if err != nil {
return err
}
} else {
for _, el := range oldConv {
if !isContainsAR(newConv, el) {
deletedAR = append(deletedAR, el)
}
}
for _, el := range newConv {
if !isContainsAR(oldConv, el) {
addedAR = append(addedAR, el)
}
}
if len(deletedAR) > 0 {
for _, ar := range deletedAR {
arConv := ar.(map[string]interface{})
req := compute.AntiAffinityRuleRemoveRequest{
ComputeIDs: []uint64{computeId},
Topology: arConv["topology"].(string),
Policy: arConv["policy"].(string),
Mode: arConv["mode"].(string),
Key: arConv["key"].(string),
Value: arConv["value"].(string),
}
_, err := c.CloudBroker().Compute().AntiAffinityRuleRemove(ctx, req)
if err != nil {
return err
}
}
}
if len(addedAR) > 0 {
for _, ar := range addedAR {
arConv := ar.(map[string]interface{})
req := compute.AntiAffinityRuleAddRequest{
ComputeIDs: []uint64{computeId},
Topology: arConv["topology"].(string),
Policy: arConv["policy"].(string),
Mode: arConv["mode"].(string),
Key: arConv["key"].(string),
Value: arConv["value"].(string),
}
_, err := c.CloudBroker().Compute().AntiAffinityRuleAdd(ctx, req)
if err != nil {
return err
}
}
}
}
return nil
}
func utilityComputeUpdateTags(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
oldSet, newSet := d.GetChange("tags")
deletedTags := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List()
if len(deletedTags) > 0 {
for _, tagInterface := range deletedTags {
tagItem := tagInterface.(map[string]interface{})
req := compute.TagRemoveRequest{
ComputeIDs: []uint64{computeId},
Key: tagItem["key"].(string),
}
_, err := c.CloudBroker().Compute().TagRemove(ctx, req)
if err != nil {
return err
}
}
}
addedTags := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List()
if len(addedTags) > 0 {
for _, tagInterface := range addedTags {
tagItem := tagInterface.(map[string]interface{})
req := compute.TagAddRequest{
ComputeIDs: []uint64{computeId},
Key: tagItem["key"].(string),
Value: tagItem["value"].(string),
}
_, err := c.CloudBroker().Compute().TagAdd(ctx, req)
if err != nil {
return err
}
}
}
return nil
}
func utilityComputeUpdatePFW(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
oldSet, newSet := d.GetChange("port_forwarding")
deletedPfws := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List()
if len(deletedPfws) > 0 {
for _, pfwInterface := range deletedPfws {
pfwItem := pfwInterface.(map[string]interface{})
req := compute.PFWDelRequest{
ComputeID: computeId,
PublicPortStart: uint64(pfwItem["public_port_start"].(int)),
LocalBasePort: uint64(pfwItem["local_port"].(int)),
Proto: pfwItem["proto"].(string),
RuleID: uint64(pfwItem["rule_id"].(int)),
}
if pfwItem["public_port_end"].(int) == -1 {
req.PublicPortEnd = req.PublicPortStart
} else {
req.PublicPortEnd = uint64(pfwItem["public_port_end"].(int))
}
if pfwItem["reason"].(string) != "" {
req.Reason = pfwItem["reason"].(string)
}
_, err := c.CloudBroker().Compute().PFWDel(ctx, req)
if err != nil {
return err
}
}
}
addedPfws := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List()
if len(addedPfws) > 0 {
for _, pfwInterface := range addedPfws {
pfwItem := pfwInterface.(map[string]interface{})
req := compute.PFWAddRequest{
ComputeID: computeId,
PublicPortStart: uint64(pfwItem["public_port_start"].(int)),
PublicPortEnd: int64(pfwItem["public_port_end"].(int)),
LocalBasePort: uint64(pfwItem["local_port"].(int)),
Proto: pfwItem["proto"].(string),
}
if pfwItem["reason"].(string) != "" {
req.Reason = pfwItem["reason"].(string)
}
pwfId, err := c.CloudBroker().Compute().PFWAdd(ctx, req)
if err != nil {
return err
}
d.Set("rule_id", pwfId)
}
}
return nil
}
func utilityComputeRestore(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
restoreReq := compute.RestoreRequest{ComputeID: computeId}
_, err := c.CloudBroker().Compute().Restore(ctx, restoreReq)
if err != nil {
return err
}
if _, ok := d.GetOk("enabled"); ok {
if err := utilityComputeEnabled(ctx, d, m); err != nil {
return err
}
}
if _, ok := d.GetOk("started"); ok {
if err := utilityComputeStarted(ctx, d, m); err != nil {
return err
}
}
return nil
}
func utilityComputeUpdateUserAccess(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
oldSet, newSet := d.GetChange("user_access")
deletedUserAcess := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List()
if len(deletedUserAcess) > 0 {
for _, userAcessInterface := range deletedUserAcess {
userAccessItem := userAcessInterface.(map[string]interface{})
req := compute.UserRevokeRequest{
ComputeID: computeId,
Username: userAccessItem["username"].(string),
}
_, err := c.CloudBroker().Compute().UserRevoke(ctx, req)
if err != nil {
return err
}
}
}
addedUserAccess := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List()
if len(addedUserAccess) > 0 {
for _, userAccessInterface := range addedUserAccess {
userAccessItem := userAccessInterface.(map[string]interface{})
req := compute.UserGrantRequest{
ComputeID: computeId,
Username: userAccessItem["username"].(string),
AccessType: userAccessItem["access_type"].(string),
}
_, err := c.CloudBroker().Compute().UserGrant(ctx, req)
if err != nil {
return err
}
}
}
return nil
}
func utilityComputeUpdateSnapshot(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
oldSet, newSet := d.GetChange("snapshot")
deletedSnapshots := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List()
if len(deletedSnapshots) > 0 {
for _, snapshotInterface := range deletedSnapshots {
snapshotItem := snapshotInterface.(map[string]interface{})
req := compute.SnapshotDeleteRequest{
ComputeID: computeId,
Label: snapshotItem["label"].(string),
}
_, err := c.CloudBroker().Compute().SnapshotDelete(ctx, req)
if err != nil {
return err
}
}
}
addedSnapshots := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List()
if len(addedSnapshots) > 0 {
for _, snapshotInterface := range addedSnapshots {
snapshotItem := snapshotInterface.(map[string]interface{})
req := compute.SnapshotCreateRequest{
ComputeID: computeId,
Label: snapshotItem["label"].(string),
}
_, err := c.CloudBroker().Compute().SnapshotCreate(ctx, req)
if err != nil {
return err
}
}
}
return nil
}
func utilityComputeRollback(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
if rollback, ok := d.GetOk("rollback"); ok {
req := compute.StopRequest{
ComputeID: computeId,
Force: false,
}
_, err := c.CloudBroker().Compute().Stop(ctx, req)
if err != nil {
return err
}
rollbackInterface := rollback.(*schema.Set).List()[0]
rollbackItem := rollbackInterface.(map[string]interface{})
rollbackReq := compute.SnapshotRollbackRequest{
ComputeID: computeId,
Label: rollbackItem["label"].(string),
}
_, err = c.CloudBroker().Compute().SnapshotRollback(ctx, rollbackReq)
if err != nil {
return err
}
startReq := compute.StartRequest{ComputeID: computeId}
log.Debugf("utilityComputeRollback: starting compute %d", computeId)
_, err = c.CloudBroker().Compute().Start(ctx, startReq)
if err != nil {
return err
}
}
return nil
}
func utilityComputeUpdateCD(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
oldSet, newSet := d.GetChange("cd")
deletedCd := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List()
if len(deletedCd) > 0 {
req := compute.CDEjectRequest{
ComputeID: computeId,
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
_, err := c.CloudBroker().Compute().CDEject(ctx, req)
if err != nil {
return err
}
}
addedCd := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List()
if len(addedCd) > 0 {
cdItem := addedCd[0].(map[string]interface{})
req := compute.CDInsertRequest{
ComputeID: computeId,
CDROMID: uint64(cdItem["cdrom_id"].(int)),
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
_, err := c.CloudBroker().Compute().CDInsert(ctx, req)
if err != nil {
return err
}
}
return nil
}
func utilityComputePinToStack(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
oldPin, newPin := d.GetChange("pin_to_stack")
if oldPin.(bool) && !newPin.(bool) {
req := compute.UnpinFromStackRequest{
ComputeID: computeId,
}
_, err := c.CloudBroker().Compute().UnpinFromStack(ctx, req)
if err != nil {
return err
}
}
if !oldPin.(bool) && newPin.(bool) {
req := compute.PinToStackRequest{
ComputeID: computeId,
TargetStackID: uint64(d.Get("target_stack_id").(int)),
}
if force, ok := d.Get("force_pin").(bool); ok {
req.Force = force
}
_, err := c.CloudBroker().Compute().PinToStack(ctx, req)
if err != nil {
return err
}
}
return nil
}
func utilityComputePause(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
oldPause, newPause := d.GetChange("pause")
if oldPause.(bool) && !newPause.(bool) {
req := compute.ResumeRequest{
ComputeID: computeId,
}
_, err := c.CloudBroker().Compute().Resume(ctx, req)
if err != nil {
return err
}
}
if !oldPause.(bool) && newPause.(bool) {
req := compute.PauseRequest{
ComputeID: computeId,
}
_, err := c.CloudBroker().Compute().Pause(ctx, req)
if err != nil {
return err
}
}
return nil
}
func utilityComputeReset(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
oldReset, newReset := d.GetChange("reset")
if !oldReset.(bool) && newReset.(bool) {
req := compute.ResetRequest{
ComputeID: computeId,
}
_, err := c.CloudBroker().Compute().Reset(ctx, req)
if err != nil {
return err
}
}
return nil
}
func utilityComputeUpdateImage(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
oldImage, newImage := d.GetChange("image_id")
stopReq := compute.StopRequest{
ComputeID: computeId,
Force: false,
}
_, err := c.CloudBroker().Compute().Stop(ctx, stopReq)
if err != nil {
return err
}
if oldImage.(int) != newImage.(int) {
req := compute.RedeployRequest{
ComputeID: computeId,
ImageID: uint64(newImage.(int)),
}
if diskSize, ok := d.GetOk("boot_disk_size"); ok {
req.DiskSize = uint64(diskSize.(int))
}
if dataDisks, ok := d.GetOk("data_disks"); ok {
req.DataDisks = dataDisks.(string)
}
if autoStart, ok := d.GetOk("auto_start"); ok {
req.AutoStart = autoStart.(bool)
}
if forceStop, ok := d.GetOk("force_stop"); ok {
req.ForceStop = forceStop.(bool)
}
_, err := c.CloudBroker().Compute().Redeploy(ctx, req)
if err != nil {
return err
}
}
return nil
}
func utilityComputeUpdateCustomFields(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
val := d.Get("custom_fields").(string)
val = strings.ReplaceAll(val, "\\", "")
val = strings.ReplaceAll(val, "\n", "")
val = strings.ReplaceAll(val, "\t", "")
val = strings.TrimSpace(val)
if len(val) > 0 {
req := compute.SetCustomFieldsRequest{
ComputeID: computeId,
CustomFields: val,
}
_, err := c.CloudBroker().Compute().SetCustomFields(ctx, req)
if err != nil {
return err
}
}
// } else {
// // req := compute.DeleteCustomFieldsRequest{
// // ComputeID: computeId,
// // }
// // _, err := c.CloudBroker().Compute().DeleteCustomFields(ctx, req)
// // if err != nil {
// // return err
// // }
// }
return nil
}
func isChangeDisk(els []interface{}, el interface{}) bool { func isChangeDisk(els []interface{}, el interface{}) bool {
for _, elOld := range els { for _, elOld := range els {
elOldConv := elOld.(map[string]interface{}) elOldConv := elOld.(map[string]interface{})

@ -1,5 +1,5 @@
/* /*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors: Authors:
Petr Krutov, <petr.krutov@digitalenergy.online> Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online> Stanislav Solovev, <spsolovev@digitalenergy.online>
@ -10,7 +10,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,

@ -9,7 +9,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
@ -44,11 +44,11 @@ import (
func dataSourceLBRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceLBRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
lb, err := utilityLBCheckPresence(ctx, d, m) lb, err := utilityLBCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
d.SetId(strconv.FormatUint(lb.ID, 10)) d.SetId(strconv.FormatUint(lb.ID, 10))
flattenLB(d, lb) flattenLB(d, lb)
return nil return nil

@ -44,8 +44,10 @@ import (
func dataSourceLBListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceLBListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
lbList, err := utilityLBListCheckPresence(ctx, d, m) lbList, err := utilityLBListCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
id := uuid.New() id := uuid.New()
d.SetId(id.String()) d.SetId(id.String())
d.Set("items", flattenLBList(lbList)) d.Set("items", flattenLBList(lbList))

@ -44,8 +44,10 @@ import (
func dataSourceLBListDeletedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourceLBListDeletedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
lbList, err := utilityLBListDeletedCheckPresence(ctx, d, m) lbList, err := utilityLBListDeletedCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
id := uuid.New() id := uuid.New()
d.SetId(id.String()) d.SetId(id.String())
d.Set("items", flattenLBList(lbList)) d.Set("items", flattenLBList(lbList))

@ -96,11 +96,14 @@ func flattenLB(d *schema.ResourceData, lb *lb.RecordLB) {
d.Set("ha_mode", lb.HAMode) d.Set("ha_mode", lb.HAMode)
d.Set("ckey", lb.CKey) d.Set("ckey", lb.CKey)
d.Set("meta", flattens.FlattenMeta(lb.Meta)) d.Set("meta", flattens.FlattenMeta(lb.Meta))
d.Set("acl", flattenACl(lb.ACL))
d.Set("backend_haip", lb.BackendHAIP)
d.Set("backends", flattenLBBackends(lb.Backends)) d.Set("backends", flattenLBBackends(lb.Backends))
d.Set("desc", lb.Description) d.Set("desc", lb.Description)
d.Set("dp_api_user", lb.DPAPIUser) d.Set("dp_api_user", lb.DPAPIUser)
d.Set("dp_api_password", lb.DPAPIPassword) d.Set("dp_api_password", lb.DPAPIPassword)
d.Set("extnet_id", lb.ExtNetID) d.Set("extnet_id", lb.ExtNetID)
d.Set("frontend_haip", lb.FrontendHAIP)
d.Set("frontends", flattenFrontends(lb.Frontends)) d.Set("frontends", flattenFrontends(lb.Frontends))
d.Set("gid", lb.GID) d.Set("gid", lb.GID)
d.Set("guid", lb.GUID) d.Set("guid", lb.GUID)
@ -108,6 +111,7 @@ func flattenLB(d *schema.ResourceData, lb *lb.RecordLB) {
d.Set("image_id", lb.ImageID) d.Set("image_id", lb.ImageID)
d.Set("milestones", lb.Milestones) d.Set("milestones", lb.Milestones)
d.Set("name", lb.Name) d.Set("name", lb.Name)
d.Set("part_k8s", lb.PartK8s)
d.Set("primary_node", flattenNode(lb.PrimaryNode)) d.Set("primary_node", flattenNode(lb.PrimaryNode))
d.Set("rg_id", lb.RGID) d.Set("rg_id", lb.RGID)
d.Set("secondary_node", flattenNode(lb.SecondaryNode)) d.Set("secondary_node", flattenNode(lb.SecondaryNode))
@ -216,6 +220,7 @@ func flattenLBList(lbl *lb.ListLB) []map[string]interface{} {
temp := map[string]interface{}{ temp := map[string]interface{}{
"ha_mode": lb.HAMode, "ha_mode": lb.HAMode,
"acl": flattenACl(lb.ACL), "acl": flattenACl(lb.ACL),
"backend_haip": lb.BackendHAIP,
"backends": flattenLBBackends(lb.Backends), "backends": flattenLBBackends(lb.Backends),
"created_by": lb.CreatedBy, "created_by": lb.CreatedBy,
"created_time": lb.CreatedTime, "created_time": lb.CreatedTime,
@ -225,6 +230,7 @@ func flattenLBList(lbl *lb.ListLB) []map[string]interface{} {
"dp_api_user": lb.DPAPIUser, "dp_api_user": lb.DPAPIUser,
"dp_api_password": lb.DPAPIPassword, "dp_api_password": lb.DPAPIPassword,
"extnet_id": lb.ExtNetID, "extnet_id": lb.ExtNetID,
"frontend_haip": lb.FrontendHAIP,
"frontends": flattenFrontends(lb.Frontends), "frontends": flattenFrontends(lb.Frontends),
"gid": lb.GID, "gid": lb.GID,
"guid": lb.GUID, "guid": lb.GUID,
@ -247,6 +253,7 @@ func flattenLBList(lbl *lb.ListLB) []map[string]interface{} {
} }
func flattenACl(m interface{}) string { func flattenACl(m interface{}) string {
switch d := m.(type) { switch d := m.(type) {
case string: case string:
return d return d
@ -259,4 +266,5 @@ func flattenACl(m interface{}) string {
default: default:
return "" return ""
} }
} }

@ -3,66 +3,32 @@ package lb
import ( import (
"context" "context"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/extnet"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/ic"
) )
func existLBID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { func checkParamsExistence(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg) diag.Diagnostics {
c := m.(*controller.ControllerCfg) errs := []error{}
lbId := uint64(d.Get("lb_id").(int))
req := lb.ListRequest{} if err := ic.ExistRG(ctx, uint64(d.Get("rg_id").(int)), c); err != nil {
errs = append(errs, err)
lbList, err := c.CloudBroker().LB().List(ctx, req)
if err != nil {
return false, err
} }
return len(lbList.FilterByID(lbId).Data) != 0, nil if err := ic.ExistExtNetInLb(ctx, uint64(d.Get("extnet_id").(int)), c); err != nil {
} errs = append(errs, err)
func existRGID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) {
c := m.(*controller.ControllerCfg)
rgId := uint64(d.Get("rg_id").(int))
req := rg.ListRequest{}
rgList, err := c.CloudBroker().RG().List(ctx, req)
if err != nil {
return false, err
} }
return len(rgList.FilterByID(rgId).Data) != 0, nil if err := ic.ExistVinsInLb(ctx, uint64(d.Get("vins_id").(int)), c); err != nil {
} errs = append(errs, err)
func existExtNetID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) {
c := m.(*controller.ControllerCfg)
extNetID := uint64(d.Get("extnet_id").(int))
req := extnet.ListRequest{}
extNetList, err := c.CloudBroker().ExtNet().List(ctx, req)
if err != nil {
return false, err
} }
return len(extNetList.FilterByID(extNetID).Data) != 0, nil return dc.ErrorsToDiagnostics(errs)
} }
func existViNSID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { func checkParamsExistenceLb(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg) diag.Diagnostics {
c := m.(*controller.ControllerCfg) err := ic.ExistLB(ctx, uint64(d.Get("lb_id").(int)), c)
vinsID := uint64(d.Get("vins_id").(int)) return diag.FromErr(err)
req := vins.ListRequest{}
vinsList, err := c.CloudBroker().VINS().List(ctx, req)
if err != nil {
return false, err
}
return len(vinsList.FilterByID(vinsID).Data) != 0, nil
} }

@ -34,6 +34,7 @@ package lb
import ( import (
"context" "context"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
"strconv" "strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
@ -46,36 +47,13 @@ import (
) )
func resourceLBCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceLBCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBCreate") log.Debugf("resourceLBCreate called with name: %s", d.Get("name").(string))
c := m.(*controller.ControllerCfg)
haveRGID, err := existRGID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
if !haveRGID {
return diag.Errorf("resourceLBCreate: can't create LB because RGID %d is not allowed or does not exist", d.Get("rg_id").(int))
}
haveExtNetID, err := existExtNetID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
if !haveExtNetID {
return diag.Errorf("resourceLBCreate: can't create LB because ExtNetID %d is not allowed or does not exist", d.Get("extnet_id").(int))
}
haveVins, err := existViNSID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
if !haveVins { if diags := checkParamsExistence(ctx, d, c); diags != nil {
return diag.Errorf("resourceLBCreate: can't create LB because ViNSID %d is not allowed or does not exist", d.Get("vins_id").(int)) return diags
} }
c := m.(*controller.ControllerCfg)
req := lb.CreateRequest{ req := lb.CreateRequest{
Name: d.Get("name").(string), Name: d.Get("name").(string),
RGID: uint64(d.Get("rg_id").(int)), RGID: uint64(d.Get("rg_id").(int)),
@ -88,49 +66,40 @@ func resourceLBCreate(ctx context.Context, d *schema.ResourceData, m interface{}
if desc, ok := d.GetOk("desc"); ok { if desc, ok := d.GetOk("desc"); ok {
req.Description = desc.(string) req.Description = desc.(string)
} }
if haMode, ok := d.GetOk("ha_mode"); ok {
req.HighlyAvailable = haMode.(bool)
}
lbId, err := c.CloudBroker().LB().Create(ctx, req) lbId, err := c.CloudBroker().LB().Create(ctx, req)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
d.SetId(strconv.FormatUint(lbId, 10)) d.SetId(strconv.FormatUint(lbId, 10))
d.Set("lb_id", lbId) d.Set("lb_id", lbId)
_, err = utilityLBCheckPresence(ctx, d, m) var warnings dc.Warnings
if err != nil {
return diag.FromErr(err)
}
if enable, ok := d.GetOk("enable"); ok { if enable, ok := d.GetOk("enable"); ok {
lbId := uint64(d.Get("lb_id").(int))
if enable.(bool) { if enable.(bool) {
req := lb.EnableRequest{ if err := resourceLbEnable(ctx, lbId, m); err != nil {
LBID: lbId, warnings.Add(err)
}
_, err := c.CloudBroker().LB().Enable(ctx, req)
if err != nil {
return diag.FromErr(err)
} }
} else { } else {
req := lb.DisableRequest{ if err := resourceLbDisable(ctx, lbId, m); err != nil {
LBID: lbId, warnings.Add(err)
}
_, err := c.CloudBroker().LB().Disable(ctx, req)
if err != nil {
return diag.FromErr(err)
} }
} }
} }
return resourceLBRead(ctx, d, m) return append(warnings.Get(), resourceLBRead(ctx, d, m)...)
} }
func resourceLBRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceLBRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBRead") log.Debugf("resourceLBRead called for lb_id %s", d.Id())
c := m.(*controller.ControllerCfg) // c := m.(*controller.ControllerCfg)
lbRec, err := utilityLBCheckPresence(ctx, d, m) lbRec, err := utilityLBCheckPresence(ctx, d, m)
if lbRec == nil { if lbRec == nil {
@ -147,43 +116,44 @@ func resourceLBRead(ctx context.Context, d *schema.ResourceData, m interface{})
case status.Created: case status.Created:
case status.Deleting: case status.Deleting:
case status.Deleted: case status.Deleted:
lbId, _ := strconv.ParseUint(d.Id(), 10, 64) // lbId, _ := strconv.ParseUint(d.Id(), 10, 64)
restoreReq := lb.RestoreRequest{LBID: lbId} // restoreReq := lb.RestoreRequest{LBID: lbId}
_, err := c.CloudBroker().LB().Restore(ctx, restoreReq) // _, err := c.CloudBroker().LB().Restore(ctx, restoreReq)
if err != nil { // if err != nil {
return diag.FromErr(err) // return diag.FromErr(err)
} // }
if enable := d.Get("enable"); enable.(bool) { // if enable := d.Get("enable"); enable.(bool) {
req := lb.EnableRequest{ // req := lb.EnableRequest{
LBID: lbId, // LBID: lbId,
} // }
_, err := c.CloudBroker().LB().Enable(ctx, req) // _, err := c.CloudBroker().LB().Enable(ctx, req)
if err != nil { // if err != nil {
return diag.FromErr(err) // return diag.FromErr(err)
} // }
} // }
if start := d.Get("start"); start.(bool) { // if start := d.Get("start"); start.(bool) {
if enable := d.Get("enable"); enable.(bool) { // if enable := d.Get("enable"); enable.(bool) {
req := lb.StartRequest{ // req := lb.StartRequest{
LBID: lbId, // LBID: lbId,
} // }
_, err := c.CloudBroker().LB().Start(ctx, req) // _, err := c.CloudBroker().LB().Start(ctx, req)
if err != nil { // if err != nil {
return diag.FromErr(err) // return diag.FromErr(err)
} // }
} else { // } else {
return diag.Errorf("To start the LB, please, enable LB first.") // return diag.Errorf("To start the LB, please, enable LB first.")
} // }
} // }
hasChanged = true // hasChanged = true
case status.Destroying: case status.Destroying:
return diag.Errorf("The LB is in progress with status: %s", lbRec.Status) return diag.Errorf("The LB is in progress with status: %s", lbRec.Status)
case status.Destroyed: case status.Destroyed:
d.SetId("") d.SetId("")
return resourceLBCreate(ctx, d, m) return diag.Errorf("The resource cannot be read because it has been destroyed")
// return resourceLBCreate(ctx, d, m)
case status.Enabled: case status.Enabled:
case status.Enabling: case status.Enabling:
case status.Disabling: case status.Disabling:
@ -206,10 +176,11 @@ func resourceLBRead(ctx context.Context, d *schema.ResourceData, m interface{})
} }
func resourceLBDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceLBDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBDelete") log.Debugf("resourceLBDelete called with lb id: %v", d.Get("lb_id").(int))
_, err := utilityLBCheckPresence(ctx, d, m) _, err := utilityLBCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -233,34 +204,11 @@ func resourceLBDelete(ctx context.Context, d *schema.ResourceData, m interface{}
} }
func resourceLBUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceLBUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBUpdate") log.Debugf("resourceLBUpdate called for lb_id %s", d.Id())
c := m.(*controller.ControllerCfg) c := m.(*controller.ControllerCfg)
haveRGID, err := existRGID(ctx, d, m) if diags := checkParamsExistence(ctx, d, c); diags != nil {
if err != nil { return diags
return diag.FromErr(err)
}
if !haveRGID {
return diag.Errorf("resourceLBUpdate: can't update LB because RGID %d is not allowed or does not exist", d.Get("rg_id").(int))
}
haveExtNetID, err := existExtNetID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
if !haveExtNetID {
return diag.Errorf("resourceLBUpdate: can't update LB because ExtNetID %d is not allowed or does not exist", d.Get("extnet_id").(int))
}
haveVins, err := existViNSID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
if !haveVins {
return diag.Errorf("resourceLBUpdate: can't update LB because ViNSID %d is not allowed or does not exist", d.Get("vins_id").(int))
} }
lbRec, err := utilityLBCheckPresence(ctx, d, m) lbRec, err := utilityLBCheckPresence(ctx, d, m)
@ -278,34 +226,28 @@ func resourceLBUpdate(ctx context.Context, d *schema.ResourceData, m interface{}
case status.Created: case status.Created:
case status.Deleting: case status.Deleting:
case status.Deleted: case status.Deleted:
lbId, _ := strconv.ParseUint(d.Id(), 10, 64) restore, ok := d.GetOk("restore")
restoreReq := lb.RestoreRequest{LBID: lbId} if ok && restore.(bool) {
if err := resourceLbRestore(ctx, lbRec.ID, m); err != nil {
_, err := c.CloudBroker().LB().Restore(ctx, restoreReq) return diag.FromErr(err)
if err != nil { }
return diag.FromErr(err)
} }
if enable := d.Get("enable"); enable.(bool) { enable, ok := d.GetOk("enable")
req := lb.EnableRequest{ if ok && enable.(bool) {
LBID: lbId, if err := resourceLbEnable(ctx, lbRec.ID, m); err != nil {
}
_, err := c.CloudBroker().LB().Enable(ctx, req)
if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
} }
if start := d.Get("start"); start.(bool) {
if enable := d.Get("enable"); enable.(bool) { start, ok := d.GetOk("start")
req := lb.StartRequest{ if ok && start.(bool) {
LBID: lbId, if enable.(bool) {
} if err := resourceLbStart(ctx, lbRec.ID, m); err != nil {
_, err := c.CloudBroker().LB().Start(ctx, req)
if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
} else { } else {
return diag.Errorf("To start the LB, please, enable LB first.") return diag.Errorf("to start the LB, please, enable LB first.")
} }
} }
@ -314,7 +256,8 @@ func resourceLBUpdate(ctx context.Context, d *schema.ResourceData, m interface{}
return diag.Errorf("The LB is in progress with status: %s", lbRec.Status) return diag.Errorf("The LB is in progress with status: %s", lbRec.Status)
case status.Destroyed: case status.Destroyed:
d.SetId("") d.SetId("")
return resourceLBCreate(ctx, d, m) return diag.Errorf("The resource cannot be updated because it has been destroyed")
// return resourceLBCreate(ctx, d, m)
case status.Enabled: case status.Enabled:
case status.Enabling: case status.Enabling:
case status.Disabling: case status.Disabling:
@ -332,100 +275,201 @@ func resourceLBUpdate(ctx context.Context, d *schema.ResourceData, m interface{}
} }
if d.HasChange("enable") { if d.HasChange("enable") {
enable := d.Get("enable").(bool) if err := resourceLbChangeEnable(ctx, d, lbRec.ID, m); err != nil {
if enable { return diag.FromErr(err)
req := lb.EnableRequest{ }
LBID: uint64(d.Get("lb_id").(int)), }
}
_, err := c.CloudBroker().LB().Enable(ctx, req)
if err != nil {
return diag.FromErr(err)
}
} else {
req := lb.DisableRequest{
LBID: uint64(d.Get("lb_id").(int)),
}
_, err := c.CloudBroker().LB().Disable(ctx, req)
if err != nil {
return diag.FromErr(err)
}
if d.HasChange("ha_mode") {
if err := resourceLbChangeHaMode(ctx, d, lbRec.ID, m); err != nil {
return diag.FromErr(err)
} }
} }
if d.HasChange("start") { if d.HasChange("start") {
start := d.Get("start").(bool) if err := resourceLbChangeStart(ctx, d, lbRec.ID, m); err != nil {
lbId := uint64(d.Get("lb_id").(int)) return diag.FromErr(err)
if start {
req := lb.StartRequest{LBID: lbId}
_, err := c.CloudBroker().LB().Start(ctx, req)
if err != nil {
return diag.FromErr(err)
}
} else {
req := lb.StopRequest{LBID: lbId}
_, err := c.CloudBroker().LB().Stop(ctx, req)
if err != nil {
return diag.FromErr(err)
}
} }
} }
if d.HasChange("desc") { if d.HasChange("desc") {
req := lb.UpdateRequest{ if err := resourceLbChangeDesc(ctx, d, lbRec.ID, m); err != nil {
LBID: uint64(d.Get("lb_id").(int)), return diag.FromErr(err)
Description: d.Get("desc").(string),
} }
}
_, err := c.CloudBroker().LB().Update(ctx, req) if d.HasChange("restart") {
if err != nil { if err := resourceLbChangeRestart(ctx, d, lbRec.ID, m); err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
} }
if d.HasChange("restart") { if d.HasChange("config_reset") {
restart := d.Get("restart").(bool) if err := resourceLbChangeConfigReset(ctx, d, lbRec.ID, m); err != nil {
if restart { return diag.FromErr(err)
req := lb.RestartRequest{ }
LBID: uint64(d.Get("lb_id").(int)), }
}
_, err := c.CloudBroker().LB().Restart(ctx, req) return resourceLBRead(ctx, d, m)
if err != nil { }
return diag.FromErr(err)
} func resourceLbEnable(ctx context.Context, lbId uint64, m interface{}) error {
c := m.(*controller.ControllerCfg)
req := lb.EnableRequest{
LBID: lbId,
}
_, err := c.CloudBroker().LB().Enable(ctx, req)
return err
}
func resourceLbDisable(ctx context.Context, lbId uint64, m interface{}) error {
c := m.(*controller.ControllerCfg)
req := lb.DisableRequest{
LBID: lbId,
}
_, err := c.CloudBroker().LB().Disable(ctx, req)
return err
}
func resourceLbRestore(ctx context.Context, lbId uint64, m interface{}) error {
c := m.(*controller.ControllerCfg)
restoreReq := lb.RestoreRequest{
LBID: lbId,
}
_, err := c.CloudBroker().LB().Restore(ctx, restoreReq)
return err
}
func resourceLbStart(ctx context.Context, lbId uint64, m interface{}) error {
c := m.(*controller.ControllerCfg)
req := lb.StartRequest{
LBID: lbId,
}
_, err := c.CloudBroker().LB().Start(ctx, req)
return err
}
func resourceLbChangeEnable(ctx context.Context, d *schema.ResourceData, lbId uint64, m interface{}) error {
enable := d.Get("enable").(bool)
if enable {
if err := resourceLbEnable(ctx, lbId, m); err != nil {
return err
}
} else {
if err := resourceLbDisable(ctx, lbId, m); err != nil {
return err
} }
} }
if d.HasChange("restore") { return nil
restore := d.Get("restore").(bool) }
if restore {
req := lb.RestoreRequest{
LBID: uint64(d.Get("lb_id").(int)),
}
_, err := c.CloudBroker().LB().Restore(ctx, req) func resourceLbChangeHaMode(ctx context.Context, d *schema.ResourceData, lbId uint64, m interface{}) error {
if err != nil { c := m.(*controller.ControllerCfg)
return diag.FromErr(err) haModeOn := d.Get("ha_mode").(bool)
}
if haModeOn {
req := lb.HighlyAvailableRequest{
LBID: lbId,
}
if _, err := c.CloudBroker().LB().HighlyAvailable(ctx, req); err != nil {
return err
} }
} }
if d.HasChange("config_reset") { return nil
cfgReset := d.Get("config_reset").(bool) }
if cfgReset {
req := lb.ConfigResetRequest{
LBID: uint64(d.Get("lb_id").(int)),
}
_, err := c.CloudBroker().LB().ConfigReset(ctx, req) func resourceLbChangeStart(ctx context.Context, d *schema.ResourceData, lbId uint64, m interface{}) error {
if err != nil { c := m.(*controller.ControllerCfg)
return diag.FromErr(err) start := d.Get("start").(bool)
}
if start {
req := lb.StartRequest{LBID: lbId}
if _, err := c.CloudBroker().LB().Start(ctx, req); err != nil {
return err
}
} else {
req := lb.StopRequest{LBID: lbId}
if _, err := c.CloudBroker().LB().Stop(ctx, req); err != nil {
return err
} }
} }
return resourceLBRead(ctx, d, m) return nil
}
func resourceLbChangeDesc(ctx context.Context, d *schema.ResourceData, lbId uint64, m interface{}) error {
c := m.(*controller.ControllerCfg)
desc := d.Get("desc").(string)
req := lb.UpdateRequest{
LBID: lbId,
Description: desc,
}
if _, err := c.CloudBroker().LB().Update(ctx, req); err != nil {
return err
}
return nil
}
func resourceLbChangeRestart(ctx context.Context, d *schema.ResourceData, lbId uint64, m interface{}) error {
c := m.(*controller.ControllerCfg)
restart := d.Get("restart").(bool)
if restart {
req := lb.RestartRequest{
LBID: lbId,
}
if _, err := c.CloudBroker().LB().Restart(ctx, req); err != nil {
return err
}
}
return nil
}
func resourceLbChangeRestore(ctx context.Context, d *schema.ResourceData, lbId uint64, m interface{}) error {
c := m.(*controller.ControllerCfg)
restore := d.Get("restore").(bool)
if restore {
req := lb.RestoreRequest{
LBID: lbId,
}
if _, err := c.CloudBroker().LB().Restore(ctx, req); err != nil {
return err
}
}
return nil
}
func resourceLbChangeConfigReset(ctx context.Context, d *schema.ResourceData, lbId uint64, m interface{}) error {
c := m.(*controller.ControllerCfg)
cfgReset := d.Get("config_reset").(bool)
if cfgReset {
req := lb.ConfigResetRequest{
LBID: lbId,
}
if _, err := c.CloudBroker().LB().ConfigReset(ctx, req); err != nil {
return err
}
}
return nil
} }
func ResourceLB() *schema.Resource { func ResourceLB() *schema.Resource {

@ -39,7 +39,6 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
@ -47,23 +46,18 @@ import (
) )
func resourceLBBackendCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceLBBackendCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBBackendCreate") log.Debugf("resourceLBBackendCreate: call for lb_backend id %s", d.Id())
c := m.(*controller.ControllerCfg)
haveLBID, err := existLBID(ctx, d, m) if diags := checkParamsExistenceLb(ctx, d, c); diags != nil {
if err != nil { return diags
return diag.FromErr(err)
} }
if !haveLBID { req := lb.BackendCreateRequest{
return diag.Errorf("resourceLBBackendCreate: can't create LB backend because LBID %d is not allowed or does not exist", d.Get("lb_id").(int)) LBID: uint64(d.Get("lb_id").(int)),
BackendName: d.Get("name").(string),
} }
c := m.(*controller.ControllerCfg)
req := lb.BackendCreateRequest{}
req.BackendName = d.Get("name").(string)
req.LBID = uint64(d.Get("lb_id").(int))
if algorithm, ok := d.GetOk("algorithm"); ok { if algorithm, ok := d.GetOk("algorithm"); ok {
req.Algorithm = algorithm.(string) req.Algorithm = algorithm.(string)
} }
@ -92,23 +86,18 @@ func resourceLBBackendCreate(ctx context.Context, d *schema.ResourceData, m inte
req.Weight = uint64(weight.(int)) req.Weight = uint64(weight.(int))
} }
_, err = c.CloudBroker().LB().BackendCreate(ctx, req) _, err := c.CloudBroker().LB().BackendCreate(ctx, req)
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
d.SetId(strconv.Itoa(d.Get("lb_id").(int)) + "#" + d.Get("name").(string)) d.SetId(strconv.Itoa(d.Get("lb_id").(int)) + "#" + d.Get("name").(string))
_, err = utilityLBBackendCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
return resourceLBBackendRead(ctx, d, m) return resourceLBBackendRead(ctx, d, m)
} }
func resourceLBBackendRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceLBBackendRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBBackendRead") log.Debugf("resourceLBBackendRead: call for lb_backend id %s", d.Id())
b, err := utilityLBBackendCheckPresence(ctx, d, m) b, err := utilityLBBackendCheckPresence(ctx, d, m)
if b == nil { if b == nil {
@ -124,10 +113,11 @@ func resourceLBBackendRead(ctx context.Context, d *schema.ResourceData, m interf
} }
func resourceLBBackendDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceLBBackendDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBBackendDelete") log.Debugf("resourceLBBackendDelete: call for lb_backend id %s", d.Id())
_, err := utilityLBBackendCheckPresence(ctx, d, m) _, err := utilityLBBackendCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -148,16 +138,11 @@ func resourceLBBackendDelete(ctx context.Context, d *schema.ResourceData, m inte
} }
func resourceLBBackendUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceLBBackendUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBBackendEdit") log.Debugf("resourceLBBackendUpdate: call for lb_backend id %s", d.Id())
c := m.(*controller.ControllerCfg) c := m.(*controller.ControllerCfg)
haveLBID, err := existLBID(ctx, d, m) if diags := checkParamsExistenceLb(ctx, d, c); diags != nil {
if err != nil { return diags
return diag.FromErr(err)
}
if !haveLBID {
return diag.Errorf("resourceLBBackendUpdate: can't update LB backend because LBID %d is not allowed or does not exist", d.Get("lb_id").(int))
} }
req := lb.BackendUpdateRequest{ req := lb.BackendUpdateRequest{
@ -193,7 +178,7 @@ func resourceLBBackendUpdate(ctx context.Context, d *schema.ResourceData, m inte
req.Weight = uint64(d.Get("weight").(int)) req.Weight = uint64(d.Get("weight").(int))
} }
_, err = c.CloudBroker().LB().BackendUpdate(ctx, req) _, err := c.CloudBroker().LB().BackendUpdate(ctx, req)
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -222,153 +207,6 @@ func ResourceLBBackend() *schema.Resource {
Default: &constants.Timeout300s, Default: &constants.Timeout300s,
}, },
Schema: map[string]*schema.Schema{ Schema: resourceLbBackendSchemaMake(),
"lb_id": {
Type: schema.TypeInt,
Required: true,
Description: "ID of the LB instance to backendCreate",
},
"name": {
Type: schema.TypeString,
Required: true,
Description: "Must be unique among all backends of this LB - name of the new backend to create",
},
"algorithm": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ValidateFunc: validation.StringInSlice([]string{"roundrobin", "static-rr", "leastconn"}, false),
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
"downinter": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"fall": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"inter": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"maxconn": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"maxqueue": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"rise": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"slowstart": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"weight": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"servers": {
Type: schema.TypeList,
Optional: true,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"address": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"check": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
"name": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"port": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"server_settings": {
Type: schema.TypeList,
Optional: true,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"downinter": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"fall": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
"inter": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"maxconn": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"maxqueue": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"rise": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"slowstart": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"weight": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
},
},
},
},
},
},
},
} }
} }

@ -39,7 +39,6 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
@ -47,18 +46,16 @@ import (
) )
func resourceLBBackendServerCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceLBBackendServerCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBBackendServerCreate") log.Debugf("resourceLBBackendServerCreate: call for lb_id %d, backend_name %s, server_name %s",
d.Get("lb_id").(int),
haveLBID, err := existLBID(ctx, d, m) d.Get("backend_name").(string),
if err != nil { d.Get("name").(string))
return diag.FromErr(err) c := m.(*controller.ControllerCfg)
}
if !haveLBID { if diags := checkParamsExistenceLb(ctx, d, c); diags != nil {
return diag.Errorf("resourceLBBackendServerCreate: can't create LB backend server because LBID %d is not allowed or does not exist", d.Get("lb_id").(int)) return diags
} }
c := m.(*controller.ControllerCfg)
req := lb.BackendServerAddRequest{ req := lb.BackendServerAddRequest{
BackendName: d.Get("backend_name").(string), BackendName: d.Get("backend_name").(string),
ServerName: d.Get("name").(string), ServerName: d.Get("name").(string),
@ -95,23 +92,19 @@ func resourceLBBackendServerCreate(ctx context.Context, d *schema.ResourceData,
req.Weight = uint64(weight.(int)) req.Weight = uint64(weight.(int))
} }
_, err = c.CloudBroker().LB().BackendServerAdd(ctx, req) _, err := c.CloudBroker().LB().BackendServerAdd(ctx, req)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
d.SetId(strconv.Itoa(d.Get("lb_id").(int)) + "#" + d.Get("backend_name").(string) + "#" + d.Get("name").(string)) d.SetId(strconv.Itoa(d.Get("lb_id").(int)) + "#" + d.Get("backend_name").(string) + "#" + d.Get("name").(string))
_, err = utilityLBBackendServerCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
return resourceLBBackendServerRead(ctx, d, m) return resourceLBBackendServerRead(ctx, d, m)
} }
func resourceLBBackendServerRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceLBBackendServerRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBBackendServerRead") log.Debugf("resourceLBBackendServerRead: call for id %s", d.Id())
s, err := utilityLBBackendServerCheckPresence(ctx, d, m) s, err := utilityLBBackendServerCheckPresence(ctx, d, m)
if err != nil { if err != nil {
@ -128,10 +121,11 @@ func resourceLBBackendServerRead(ctx context.Context, d *schema.ResourceData, m
} }
func resourceLBBackendServerDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceLBBackendServerDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBBackendServerDelete") log.Debugf("resourceLBBackendServerDelete: call for id %s", d.Id())
_, err := utilityLBBackendServerCheckPresence(ctx, d, m) _, err := utilityLBBackendServerCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -152,16 +146,11 @@ func resourceLBBackendServerDelete(ctx context.Context, d *schema.ResourceData,
} }
func resourceLBBackendServerUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceLBBackendServerUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBBackendServerEdit") log.Debugf("resourceLBBackendServerEdit: call for id %s", d.Id())
c := m.(*controller.ControllerCfg) c := m.(*controller.ControllerCfg)
haveLBID, err := existLBID(ctx, d, m) if diags := checkParamsExistenceLb(ctx, d, c); diags != nil {
if err != nil { return diags
return diag.FromErr(err)
}
if !haveLBID {
return diag.Errorf("resourceLBBackendServerUpdate: can't update LB backend server because LBID %d is not allowed or does not exist", d.Get("lb_id").(int))
} }
req := lb.BackendServerUpdateRequest{ req := lb.BackendServerUpdateRequest{
@ -200,7 +189,7 @@ func resourceLBBackendServerUpdate(ctx context.Context, d *schema.ResourceData,
req.Weight = uint64(d.Get("weight").(int)) req.Weight = uint64(d.Get("weight").(int))
} }
_, err = c.CloudBroker().LB().BackendServerUpdate(ctx, req) _, err := c.CloudBroker().LB().BackendServerUpdate(ctx, req)
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -229,83 +218,6 @@ func ResourceLBBackendServer() *schema.Resource {
Default: &constants.Timeout300s, Default: &constants.Timeout300s,
}, },
Schema: map[string]*schema.Schema{ Schema: resourceLbBackendServerSchemaMake(),
"lb_id": {
Type: schema.TypeInt,
Required: true,
Description: "ID of the LB instance to backendCreate",
},
"backend_name": {
Type: schema.TypeString,
Required: true,
Description: "Must be unique among all backends of this LB - name of the new backend to create",
},
"name": {
Type: schema.TypeString,
Required: true,
Description: "Must be unique among all servers defined for this backend - name of the server definition to add.",
},
"address": {
Type: schema.TypeString,
Required: true,
Description: "IP address of the server.",
},
"port": {
Type: schema.TypeInt,
Required: true,
Description: "Port number on the server",
},
"check": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ValidateFunc: validation.StringInSlice([]string{"disabled", "enabled"}, false),
Description: "set to disabled if this server should be used regardless of its state.",
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
"downinter": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"fall": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"inter": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"maxconn": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"maxqueue": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"rise": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"slowstart": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"weight": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
},
} }
} }

@ -46,41 +46,36 @@ import (
) )
func resourceLBFrontendCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceLBFrontendCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBFrontendCreate") log.Debugf("resourceLBFrontendCreate: call for lb_id %d, backend %s to create frontend %s",
d.Get("lb_id").(int),
d.Get("backend_name").(string),
d.Get("name").(string))
haveLBID, err := existLBID(ctx, d, m) c := m.(*controller.ControllerCfg)
if err != nil {
return diag.FromErr(err)
}
if !haveLBID { if diags := checkParamsExistenceLb(ctx, d, c); diags != nil {
return diag.Errorf("resourceLBFrontendCreate: can't create LB frontend because LBID %d is not allowed or does not exist", d.Get("lb_id").(int)) return diags
} }
c := m.(*controller.ControllerCfg)
req := lb.FrontendCreateRequest{ req := lb.FrontendCreateRequest{
BackendName: d.Get("backend_name").(string), BackendName: d.Get("backend_name").(string),
LBID: uint64(d.Get("lb_id").(int)), LBID: uint64(d.Get("lb_id").(int)),
FrontendName: d.Get("name").(string), FrontendName: d.Get("name").(string),
} }
_, err = c.CloudBroker().LB().FrontendCreate(ctx, req) _, err := c.CloudBroker().LB().FrontendCreate(ctx, req)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
d.SetId(strconv.Itoa(d.Get("lb_id").(int)) + "#" + d.Get("name").(string)) d.SetId(strconv.Itoa(d.Get("lb_id").(int)) + "#" + d.Get("name").(string))
_, err = utilityLBFrontendCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
return resourceLBFrontendRead(ctx, d, m) return resourceLBFrontendRead(ctx, d, m)
} }
func resourceLBFrontendRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceLBFrontendRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBFrontendRead") log.Debugf("resourceLBFrontendRead: call for id %s", d.Id())
f, err := utilityLBFrontendCheckPresence(ctx, d, m) f, err := utilityLBFrontendCheckPresence(ctx, d, m)
if err != nil { if err != nil {
@ -96,10 +91,11 @@ func resourceLBFrontendRead(ctx context.Context, d *schema.ResourceData, m inter
} }
func resourceLBFrontendDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceLBFrontendDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBFrontendDelete") log.Debugf("resourceLBFrontendDelete: call for id %s", d.Id())
_, err := utilityLBFrontendCheckPresence(ctx, d, m) _, err := utilityLBFrontendCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -144,48 +140,6 @@ func ResourceLBFrontend() *schema.Resource {
Default: &constants.Timeout300s, Default: &constants.Timeout300s,
}, },
Schema: map[string]*schema.Schema{ Schema: resourceLbFrontendSchemaMake(),
"lb_id": {
Type: schema.TypeInt,
Required: true,
Description: "ID of the LB instance to backendCreate",
},
"backend_name": {
Type: schema.TypeString,
Required: true,
},
"name": {
Type: schema.TypeString,
Required: true,
},
"bindings": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"address": {
Type: schema.TypeString,
Computed: true,
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
"name": {
Type: schema.TypeString,
Computed: true,
},
"port": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
},
} }
} }

@ -46,18 +46,17 @@ import (
) )
func resourceLBFrontendBindCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceLBFrontendBindCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBFrontendBindCreate") log.Debugf("resourceLBFrontendBindCreate: call for lb_id %d, frontend %s to create bind %s",
d.Get("lb_id").(int),
d.Get("frontend_name").(string),
d.Get("name").(string))
haveLBID, err := existLBID(ctx, d, m) c := m.(*controller.ControllerCfg)
if err != nil {
return diag.FromErr(err)
}
if !haveLBID { if diags := checkParamsExistenceLb(ctx, d, c); diags != nil {
return diag.Errorf("resourceLBFrontendBindCreate: can't create LB frontend bind because LBID %d is not allowed or does not exist", d.Get("lb_id").(int)) return diags
} }
c := m.(*controller.ControllerCfg)
req := lb.FrontendBindRequest{ req := lb.FrontendBindRequest{
LBID: uint64(d.Get("lb_id").(int)), LBID: uint64(d.Get("lb_id").(int)),
FrontendName: d.Get("frontend_name").(string), FrontendName: d.Get("frontend_name").(string),
@ -66,23 +65,18 @@ func resourceLBFrontendBindCreate(ctx context.Context, d *schema.ResourceData, m
BindingPort: uint64(d.Get("port").(int)), BindingPort: uint64(d.Get("port").(int)),
} }
_, err = c.CloudBroker().LB().FrontendBind(ctx, req) _, err := c.CloudBroker().LB().FrontendBind(ctx, req)
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
d.SetId(strconv.Itoa(d.Get("lb_id").(int)) + "#" + d.Get("frontend_name").(string) + "#" + d.Get("name").(string)) d.SetId(strconv.Itoa(d.Get("lb_id").(int)) + "#" + d.Get("frontend_name").(string) + "#" + d.Get("name").(string))
_, err = utilityLBFrontendBindCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
return resourceLBFrontendBindRead(ctx, d, m) return resourceLBFrontendBindRead(ctx, d, m)
} }
func resourceLBFrontendBindRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceLBFrontendBindRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBFrontendBindRead") log.Debugf("resourceLBFrontendBindRead: call for %s", d.Id())
b, err := utilityLBFrontendBindCheckPresence(ctx, d, m) b, err := utilityLBFrontendBindCheckPresence(ctx, d, m)
if err != nil { if err != nil {
@ -99,10 +93,11 @@ func resourceLBFrontendBindRead(ctx context.Context, d *schema.ResourceData, m i
} }
func resourceLBFrontendBindDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceLBFrontendBindDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBFrontendBindDelete") log.Debugf("resourceLBFrontendBindDelete: call for %s", d.Id())
_, err := utilityLBFrontendBindCheckPresence(ctx, d, m) _, err := utilityLBFrontendBindCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -124,16 +119,11 @@ func resourceLBFrontendBindDelete(ctx context.Context, d *schema.ResourceData, m
} }
func resourceLBFrontendBindUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourceLBFrontendBindUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBFrontendBindEdit") log.Debugf("resourceLBFrontendBindEdit: call for %s", d.Id())
c := m.(*controller.ControllerCfg) c := m.(*controller.ControllerCfg)
haveLBID, err := existLBID(ctx, d, m) if diags := checkParamsExistenceLb(ctx, d, c); diags != nil {
if err != nil { return diags
return diag.FromErr(err)
}
if !haveLBID {
return diag.Errorf("resourceLBFrontendBindUpdate: can't update LB frontend bind because LBID %d is not allowed or does not exist", d.Get("lb_id").(int))
} }
req := lb.FrontendBindUpdateRequest{ req := lb.FrontendBindUpdateRequest{
@ -142,12 +132,12 @@ func resourceLBFrontendBindUpdate(ctx context.Context, d *schema.ResourceData, m
LBID: uint64(d.Get("lb_id").(int)), LBID: uint64(d.Get("lb_id").(int)),
} }
if d.HasChange("address") || d.HasChange("port") { if d.HasChange("address") || d.HasChange("port") {
req.BindingAddress = d.Get("address").(string) req.BindingAddress = d.Get("address").(string)
req.BindingPort = uint64(d.Get("port").(int)) req.BindingPort = uint64(d.Get("port").(int))
} }
_, err = c.CloudBroker().LB().FrontendBindUpdate(ctx, req) _, err := c.CloudBroker().LB().FrontendBindUpdate(ctx, req)
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -176,33 +166,6 @@ func ResourceLBFrontendBind() *schema.Resource {
Default: &constants.Timeout300s, Default: &constants.Timeout300s,
}, },
Schema: map[string]*schema.Schema{ Schema: resourceLbFrontendBindSchemaMake(),
"lb_id": {
Type: schema.TypeInt,
Required: true,
Description: "ID of the LB instance to backendCreate",
},
"frontend_name": {
Type: schema.TypeString,
Required: true,
Description: "Must be unique among all backends of this LB - name of the new backend to create",
},
"address": {
Type: schema.TypeString,
Required: true,
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
"name": {
Type: schema.TypeString,
Required: true,
},
"port": {
Type: schema.TypeInt,
Required: true,
},
},
} }
} }

@ -45,7 +45,6 @@ func utilityLBCheckPresence(ctx context.Context, d *schema.ResourceData, m inter
c := m.(*controller.ControllerCfg) c := m.(*controller.ControllerCfg)
req := lb.GetRequest{} req := lb.GetRequest{}
if d.Id() != "" { if d.Id() != "" {
rgId, _ := strconv.ParseUint(d.Id(), 10, 64) rgId, _ := strconv.ParseUint(d.Id(), 10, 64)
req.LBID = rgId req.LBID = rgId

@ -57,17 +57,17 @@ func utilityLBBackendCheckPresence(ctx context.Context, d *schema.ResourceData,
req.LBID = uint64(d.Get("lb_id").(int)) req.LBID = uint64(d.Get("lb_id").(int))
} }
lb, err := c.CloudBroker().LB().Get(ctx, req) lbRec, err := c.CloudBroker().LB().Get(ctx, req)
if err != nil { if err != nil {
return nil, err return nil, err
} }
backends := lb.Backends backends := lbRec.Backends
for _, b := range backends { for _, b := range backends {
if b.Name == bName { if b.Name == bName {
return &b, nil return &b, nil
} }
} }
return nil, fmt.Errorf("can not find backend with name: %s for lb: %d", bName, lb.ID) return nil, fmt.Errorf("can not find backend with name: %s for lb: %d", bName, lbRec.ID)
} }

@ -38,92 +38,21 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens"
) )
func dataSourcePcideviceRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourcePcideviceRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
pcidevice, err := utilityPcideviceCheckPresence(ctx, d, m) pcidevice, err := utilityPcideviceCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
d.Set("ckey", pcidevice.CKey) flattenPcidevice(d, pcidevice)
d.Set("meta", flattens.FlattenMeta(pcidevice.Meta))
d.Set("compute_id", pcidevice.ComputeID)
d.Set("description", pcidevice.Description)
d.Set("guid", pcidevice.GUID)
d.Set("hw_path", pcidevice.HwPath)
d.Set("device_id",pcidevice.ID)
d.Set("name", pcidevice.Name)
d.Set("rg_id", pcidevice.RGID)
d.Set("stack_id", pcidevice.StackID)
d.Set("status", pcidevice.Status)
d.Set("system_name", pcidevice.SystemName)
d.SetId(strconv.Itoa(d.Get("device_id").(int))) d.SetId(strconv.Itoa(d.Get("device_id").(int)))
return nil return nil
} }
func dataSourcePcideviceSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"device_id": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
},
"ckey": {
Type: schema.TypeString,
Computed: true,
},
"meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"compute_id": {
Type: schema.TypeInt,
Computed: true,
},
"description": {
Type: schema.TypeString,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"hw_path": {
Type: schema.TypeString,
Computed: true,
},
"name": {
Type: schema.TypeString,
Computed: true,
},
"rg_id": {
Type: schema.TypeInt,
Computed: true,
},
"stack_id": {
Type: schema.TypeInt,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"system_name": {
Type: schema.TypeString,
Computed: true,
},
}
return rets
}
func DataSourcePcidevice() *schema.Resource { func DataSourcePcidevice() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,

@ -37,36 +37,13 @@ import (
"github.com/google/uuid" "github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/pcidevice"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens"
) )
func flattenPcideviceList(pl *pcidevice.ListPCIDevices) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(pl.Data))
for _, item := range pl.Data {
temp := map[string]interface{}{
"ckey": item.CKey,
"meta": flattens.FlattenMeta(item.Meta),
"compute_id": item.ComputeID,
"description": item.Description,
"guid": item.GUID,
"hw_path": item.HwPath,
"device_id": item.ID,
"rg_id": item.RGID,
"name": item.Name,
"stack_id": item.StackID,
"status": item.Status,
"system_name": item.SystemName,
}
res = append(res, temp)
}
return res
}
func dataSourcePcideviceListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func dataSourcePcideviceListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
pcideviceList, err := utilityPcideviceListCheckPresence(ctx, m) pcideviceList, err := utilityPcideviceListCheckPresence(ctx, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -79,60 +56,6 @@ func dataSourcePcideviceListRead(ctx context.Context, d *schema.ResourceData, m
return nil return nil
} }
func dataSourcePcideviceListSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"by_id": {
Type: schema.TypeInt,
Optional: true,
Description: "by_id",
},
"compute_id": {
Type: schema.TypeInt,
Optional: true,
Description: "compute_id",
},
"name": {
Type: schema.TypeString,
Optional: true,
Description: "name",
},
"rg_id": {
Type: schema.TypeInt,
Optional: true,
Description: "rg_id",
},
"status": {
Type: schema.TypeString,
Optional: true,
Description: "status",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Description: "pcidevice list",
Elem: &schema.Resource{
Schema: dataSourcePcideviceSchemaMake(),
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
Description: "entry count",
},
}
return rets
}
func DataSourcePcideviceList() *schema.Resource { func DataSourcePcideviceList() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,

@ -34,55 +34,26 @@ package pcidevice
import ( import (
"context" "context"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/ic"
) )
func existStackID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { func checkParamsExistence(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg) diag.Diagnostics {
c := m.(*controller.ControllerCfg) var errs []error
StackID := uint64(d.Get("stack_id").(int))
RGID := uint64(d.Get("rg_id").(int))
req := rg.ListRequest{ stackId := uint64(d.Get("stack_id").(int))
IncludeDeleted: false, rgId := uint64(d.Get("rd_id").(int))
}
rgList, err := c.CloudBroker().RG().List(ctx, req)
if err != nil {
return false, err
}
for _, v := range rgList.FilterByID(RGID).Data {
for _, idVM := range v.VMs {
req := compute.GetRequest{
ComputeID: idVM,
}
checkStackID, err := c.CloudBroker().Compute().Get(ctx, req)
if err != nil {
return false, err
}
if checkStackID.StackID == StackID {
return true, nil
}
}
}
return false, err
}
func existRGID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { if err := ic.ExistRG(ctx, rgId, c); err != nil {
c := m.(*controller.ControllerCfg) errs = append(errs, err)
RGID := uint64(d.Get("rg_id").(int))
req := rg.ListRequest{
IncludeDeleted: false,
} }
rgList, err := c.CloudBroker().RG().List(ctx, req) if err := ic.ExistStackInPcidevice(ctx, stackId, rgId, c); err != nil {
if err != nil { errs = append(errs, err)
return false, err
} }
return len(rgList.FilterByID(RGID).Data) != 0, nil return dc.ErrorsToDiagnostics(errs)
} }

@ -33,126 +33,105 @@ package pcidevice
import ( import (
"context" "context"
log "github.com/sirupsen/logrus"
"strconv" "strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/pcidevice" "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/pcidevice"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status" "repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
) )
func resourcePcideviceCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourcePcideviceCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourcePcideviceCreate: called for pcidevice %s", d.Get("name").(string)) log.Debugf("resourcePcideviceCreate: called for pcidevice %s", d.Get("name").(string))
c := m.(*controller.ControllerCfg) c := m.(*controller.ControllerCfg)
req := pcidevice.CreateRequest{} createReq := pcidevice.CreateRequest{}
haveRGID, err := existRGID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
if !haveRGID {
return diag.Errorf("resourcePcideviceCreate: can't create Pcidevice because RGID %d is not allowed or does not exist", d.Get("rg_id").(int))
}
haveStackID, err := existStackID(ctx, d, m) if diags := checkParamsExistence(ctx, d, c); diags != nil {
if err != nil { return diags
return diag.FromErr(err)
}
if !haveStackID {
return diag.Errorf("resourcePcideviceCreate: can't create Pcidevice because StackID %d is not allowed or does not exist", d.Get("stack_id").(int))
} }
req.StackID = uint64(d.Get("stack_id").(int)) createReq.StackID = uint64(d.Get("stack_id").(int))
req.RGID = uint64(d.Get("rg_id").(int)) createReq.RGID = uint64(d.Get("rg_id").(int))
req.Name = d.Get("name").(string) createReq.Name = d.Get("name").(string)
req.HWPath = d.Get("hw_path").(string) createReq.HWPath = d.Get("hw_path").(string)
if description, ok := d.GetOk("description"); ok { if description, ok := d.GetOk("description"); ok {
req.Description = description.(string) createReq.Description = description.(string)
} }
pcideviceId, err := c.CloudBroker().PCIDevice().Create(ctx, req) pcideviceId, err := c.CloudBroker().PCIDevice().Create(ctx, createReq)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
d.SetId(strconv.FormatUint(pcideviceId, 10)) d.SetId(strconv.FormatUint(pcideviceId, 10))
d.Set("device_id", pcideviceId) d.Set("device_id", pcideviceId)
return resourcePcideviceRead(ctx, d, m) warnings := dc.Warnings{}
if enable, ok := d.GetOk("enable"); ok {
log.Debugf("resourcePcideviceCreate: enable=%t device_id %d after completing its resource configuration", enable, pcideviceId)
if enable.(bool) {
req := pcidevice.EnableRequest{DeviceID: pcideviceId}
if _, err := c.CloudBroker().PCIDevice().Enable(ctx, req); err != nil {
warnings.Add(err)
}
} else {
req := pcidevice.DisableRequest{
DeviceID: pcideviceId,
}
if force, ok := d.GetOk("force_disable"); ok {
req.Force = force.(bool)
log.Debugf("force_disable=%v", force)
}
if _, err := c.CloudBroker().PCIDevice().Disable(ctx, req); err != nil {
warnings.Add(err)
}
}
}
return append(resourcePcideviceRead(ctx, d, m), warnings.Get()...)
} }
func resourcePcideviceRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourcePcideviceRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
pcidevice, err := utilityPcideviceCheckPresence(ctx, d, m) log.Debugf("resourcePcideviceRead: called for pci_device id %d, name %s",
d.Id(), d.Get("name").(string))
pcideviceRec, err := utilityPcideviceCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
d.Set("ckey", pcidevice.CKey) flattenPcidevice(d, pcideviceRec)
d.Set("meta", flattens.FlattenMeta(pcidevice.Meta))
d.Set("compute_id", pcidevice.ComputeID) log.Debugf("resourcePcideviceRead: after flattenPcidevice: device_id %s, name %s",
d.Set("description", pcidevice.Description) d.Id(), d.Get("name").(string))
d.Set("guid", pcidevice.GUID)
d.Set("hw_path", pcidevice.HwPath)
d.Set("device_id", pcidevice.ID)
d.Set("rg_id", pcidevice.RGID)
d.Set("name", pcidevice.Name)
d.Set("stack_id", pcidevice.StackID)
d.Set("status", pcidevice.Status)
d.Set("system_name", pcidevice.SystemName)
return nil return nil
} }
func resourcePcideviceUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourcePcideviceUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourcePcideviceUpdate: called for pcidevice id %s, name %s", d.Id(), d.Get("name").(string))
haveRGID, err := existRGID(ctx, d, m) c := m.(*controller.ControllerCfg)
if err != nil {
return diag.FromErr(err)
}
if !haveRGID {
return diag.Errorf("resourcePcideviceCreate: can't update Pcidevice because RGID %d is not allowed or does not exist", d.Get("rg_id").(int))
}
haveStackID, err := existStackID(ctx, d, m) if diags := checkParamsExistence(ctx, d, c); diags != nil {
if err != nil { return diags
return diag.FromErr(err)
}
if !haveStackID {
return diag.Errorf("resourcePcideviceCreate: can't update Pcidevice because StackID %d is not allowed or does not exist", d.Get("stack_id").(int))
} }
if d.HasChange("enable") { if d.HasChange("enable") {
state := d.Get("enable").(bool) err := resourcePcideviceChangeEnable(ctx, d, m)
c := m.(*controller.ControllerCfg) if err != nil {
return diag.FromErr(err)
if state {
req := pcidevice.EnableRequest{
DeviceID: uint64(d.Get("device_id").(int)),
}
_, err := c.CloudBroker().PCIDevice().Enable(ctx, req)
if err != nil {
return diag.FromErr(err)
}
} else {
req := pcidevice.DisableRequest{
DeviceID: uint64(d.Get("device_id").(int)),
}
if force, ok := d.GetOk("force"); ok {
req.Force = force.(bool)
}
_, err := c.CloudBroker().PCIDevice().Disable(ctx, req)
if err != nil {
return diag.FromErr(err)
}
} }
} }
@ -162,27 +141,26 @@ func resourcePcideviceUpdate(ctx context.Context, d *schema.ResourceData, m inte
func resourcePcideviceDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { func resourcePcideviceDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourcePcideviceDelete: called for %s, id: %s", d.Get("name").(string), d.Id()) log.Debugf("resourcePcideviceDelete: called for %s, id: %s", d.Get("name").(string), d.Id())
c := m.(*controller.ControllerCfg)
pciDevice, err := utilityPcideviceCheckPresence(ctx, d, m) pciDevice, err := utilityPcideviceCheckPresence(ctx, d, m)
if err != nil { if err != nil {
d.SetId("") d.SetId("")
return diag.FromErr(err) return diag.FromErr(err)
} }
if pciDevice.Status == status.Destroyed || pciDevice.Status == status.Purged { if pciDevice.Status == status.Destroyed || pciDevice.Status == status.Purged {
return nil return nil
} }
c := m.(*controller.ControllerCfg)
req := pcidevice.DeleteRequest{ req := pcidevice.DeleteRequest{
DeviceID: pciDevice.ID, DeviceID: pciDevice.ID,
} }
if force, ok := d.GetOk("force_delete"); ok {
if force, ok := d.GetOk("force"); ok {
req.Force = force.(bool) req.Force = force.(bool)
} }
_, err = c.CloudBroker().PCIDevice().Delete(ctx, req) if _, err = c.CloudBroker().PCIDevice().Delete(ctx, req); err != nil {
if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -191,81 +169,6 @@ func resourcePcideviceDelete(ctx context.Context, d *schema.ResourceData, m inte
return nil return nil
} }
func resourcePcideviceSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"ckey": {
Type: schema.TypeString,
Computed: true,
},
"meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"compute_id": {
Type: schema.TypeInt,
Computed: true,
},
"description": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "description, just for information",
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"hw_path": {
Type: schema.TypeString,
Required: true,
Description: "PCI address of the device",
},
"device_id": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"name": {
Type: schema.TypeString,
Required: true,
Description: "Name of Device",
},
"rg_id": {
Type: schema.TypeInt,
Required: true,
Description: "Resource GROUP",
},
"stack_id": {
Type: schema.TypeInt,
Required: true,
Description: "stackId",
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"system_name": {
Type: schema.TypeString,
Computed: true,
},
"force": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "Force delete",
},
"enable": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "Enable pci device",
},
}
}
func ResourcePcidevice() *schema.Resource { func ResourcePcidevice() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
SchemaVersion: 1, SchemaVersion: 1,
@ -290,3 +193,37 @@ func ResourcePcidevice() *schema.Resource {
Schema: resourcePcideviceSchemaMake(), Schema: resourcePcideviceSchemaMake(),
} }
} }
func resourcePcideviceChangeEnable(ctx context.Context, d *schema.ResourceData, m interface{}) error {
enable := d.Get("enable").(bool)
log.Debugf("resourcePcideviceChangeEnable: enable=%t device_id %s after completing its resource configuration", enable, d.Id())
pcideviceRec, err := utilityPcideviceCheckPresence(ctx, d, m)
if err != nil {
return err
}
c := m.(*controller.ControllerCfg)
if enable {
req := pcidevice.EnableRequest{
DeviceID: pcideviceRec.ID,
}
if _, err := c.CloudBroker().PCIDevice().Enable(ctx, req); err != nil {
return err
}
} else {
req := pcidevice.DisableRequest{
DeviceID: pcideviceRec.ID,
}
if force, ok := d.GetOk("force_disable"); ok {
req.Force = force.(bool)
}
if _, err := c.CloudBroker().PCIDevice().Disable(ctx, req); err != nil {
return err
}
}
return nil
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save