4.7.4
loskutovanl 2 years ago
parent 294680282e
commit e2ee45ee14

@ -1,8 +1,24 @@
## Version 4.5.0
## Version 4.5.1
## Feature
- Added support for authorization using the Basis.Virtual Security system. Add client and config
- Refactoring BVS config
- Added and updated data sources and resources for cloudbroker groups:
* account
* audit
* disks
* extnet
* flipgroup
* grid
* image
* k8ci
* k8s
* kvmvm (compute)
* lb (load balancer)
* pcidevice
* rg (resource group)
* sep
* stack
* vins
### Bugfix
- Fixed bservice and rg schema and flatten in cloudapi
- Add stateUpgrader for k8s_cp in cloudapi/k8s
- Fixed description update for compute in cloudapi/kvmvm

@ -7,7 +7,7 @@ ZIPDIR = ./zip
BINARY=${NAME}
WORKPATH= ./examples/terraform.d/plugins/${HOSTNAME}/${NAMESPACE}/${NAMESPACE}/${VERSION}/${OS_ARCH}
MAINPATH = ./cmd/decort/
VERSION=4.5.0
VERSION=4.5.1
OS_ARCH=$(shell go env GOHOSTOS)_$(shell go env GOHOSTARCH)
FILES = ${BINARY}_${VERSION}_darwin_amd64\

@ -27,23 +27,42 @@ Terraform provider для платформы Digital Energy Cloud Orchestration
## Возможности провайдера
- Работа с Compute instances,
- Работа с disks,
- Работа с k8s,
- Работа с image,
- Работа с flipgroups,
- Работа с stacks,
- Работа с reource groups,
- Работа с VINS,
- Работа с pfw,
- Работа с accounts,
- Работа с snapshots,
- Работа с bservice,
- Работа с extnets,
- Работа с locations,
- Работа с load balancer.
Вики проекта: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
- Режим пользователя:
- Работа с accounts,
- Работа с bservice,
- Работа с disks,
- Работа с extnets,
- Работа с flipgroups,
- Работа с image,
- Работа с k8s,
- Работа с Compute instances,
- Работа с load balancer,
- Работа с locations,
- Работа с pfw,
- Работа с resource groups,
- Работа с snapshots,
- Работа с stacks,
- Работа с VINS.
- Режим администратора:
- Работа с accounts,
- Работа с audits,
- Работа с disks,
- Работа с extnets,
- Работа с flipgroups,
- Работа с grids,
- Работа с images,
- Работа с k8ci,
- Работа с k8s,
- Работа с Compute instances,
- Работа с load balancer,
- Работа с pci device,
- Работа с resource groups,
- Работа с seps,
- Работа с stacks,
- Работа с VINS.
Со списком и описанием функционала всех групп можно ознамоиться на Вики проекта: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
## Установка
Начиная с версии провайдера `4.3.0` в релизном архиве находятся скрипты-инсталляторы.

@ -58,7 +58,7 @@ Two ways for starting:
```terraform
provider "decort" {
authenticator = "oauth2"
authenticator = "decs3o"
#controller_url = <DECORT_CONTROLLER_URL>
controller_url = "https://ds1.digitalenergy.online"
#oauth2_url = <DECORT_SSO_URL>

@ -9,8 +9,7 @@ require (
github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1
github.com/sirupsen/logrus v1.9.0
golang.org/x/net v0.16.0
golang.org/x/oauth2 v0.13.0
repository.basistech.ru/BASIS/decort-golang-sdk v1.7.0
repository.basistech.ru/BASIS/decort-golang-sdk v1.7.2
)
require (
@ -68,9 +67,9 @@ require (
github.com/vmihailenco/msgpack/v4 v4.3.12 // indirect
github.com/vmihailenco/tagparser v0.1.2 // indirect
github.com/zclconf/go-cty v1.12.1 // indirect
golang.org/x/crypto v0.14.0 // indirect
golang.org/x/sys v0.13.0 // indirect
golang.org/x/text v0.13.0 // indirect
golang.org/x/crypto v0.15.0 // indirect
golang.org/x/sys v0.14.0 // indirect
golang.org/x/text v0.14.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 // indirect
google.golang.org/grpc v1.51.0 // indirect

@ -249,8 +249,8 @@ golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA=
golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -268,8 +268,6 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.16.0 h1:7eBu7KsSvFDtSXUIDbh3aqlK4DPsZ1rByC8PFfBThos=
golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY=
golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -295,8 +293,8 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
@ -306,8 +304,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
@ -341,5 +339,5 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
repository.basistech.ru/BASIS/decort-golang-sdk v1.7.0 h1:aGDg9hQXs70m4Llx8hw9Y50M1C2apDqSsNMsE8isyIM=
repository.basistech.ru/BASIS/decort-golang-sdk v1.7.0/go.mod h1:mwcpnw0dT/PQf6AOJShjlbDNDfNitr0WM77LNKL1qjo=
repository.basistech.ru/BASIS/decort-golang-sdk v1.7.2 h1:Ll8MBcmDcElxxgxOUUaYbbafTSbIm4dcPEDLl4fdF8Q=
repository.basistech.ru/BASIS/decort-golang-sdk v1.7.2/go.mod h1:7fj8sgGZFiiExewQeqckCS4WxwOmU0oP6BO6mi1Lpkw=

@ -32,7 +32,6 @@ import (
"strings"
log "github.com/sirupsen/logrus"
"golang.org/x/oauth2"
decort "repository.basistech.ru/BASIS/decort-golang-sdk"
"repository.basistech.ru/BASIS/decort-golang-sdk/config"
"repository.basistech.ru/BASIS/decort-golang-sdk/interfaces"
@ -60,7 +59,10 @@ type ControllerCfg struct {
bvs_user string // required for bvs mode
bvs_password string // required for bvs mode
domain string // required for bvs mode
token oauth2.Token // obtained from BVS provider on successful login in bvs mode
token config.Token // obtained from BVS provider on successful login in bvs mode
path_cfg string // the path of the configuration file entry
path_token string // the path of the token file entry
time_to_refresh int64 // the number of minutes before the expiration of the token, a refresh will be made
legacy_user string // required for legacy mode
legacy_password string // required for legacy mode
legacy_sid string // obtained from DECORT controller on successful login in legacy mode
@ -99,7 +101,10 @@ func ControllerConfigure(d *schema.ResourceData) (*ControllerCfg, error) {
app_secret: d.Get("app_secret").(string),
oauth2_url: d.Get("oauth2_url").(string),
decort_username: "",
token: oauth2.Token{},
token: config.Token{},
path_cfg: d.Get("path_cfg").(string),
path_token: d.Get("path_token").(string),
time_to_refresh: int64(d.Get("time_to_refresh").(int)),
}
allow_unverified_ssl := d.Get("allow_unverified_ssl").(bool)
@ -231,7 +236,7 @@ func ControllerConfigure(d *schema.ResourceData) (*ControllerCfg, error) {
ret_config.caller = decort.New(sdkConf)
case MODE_BVS:
sdkConf := config.BVSConfig{
AppID: ret_config.app_id,
AppSecret: ret_config.app_secret,
@ -242,6 +247,9 @@ func ControllerConfigure(d *schema.ResourceData) (*ControllerCfg, error) {
Password: ret_config.bvs_password,
Domain: ret_config.domain,
Token: ret_config.token,
PathCfg: ret_config.path_cfg,
PathToken: ret_config.path_token,
TimeToRefresh: ret_config.time_to_refresh,
}
ret_config.caller = decort.NewBVS(sdkConf)

@ -1,161 +1,254 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package provider
import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/account"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/bservice"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/disks"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/extnet"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/flipgroup"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/image"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/k8s"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/kvmvm"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/lb"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/locations"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/rg"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/snapshot"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/stack"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/vins"
cb_account "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/account"
// cb_disks "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/disks"
cb_extnet "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/extnet"
// cb_flipgroup "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/flipgroup"
// cb_grid "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/grid"
// cb_image "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/image"
// cb_lb "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/lb"
// cb_pcidevice "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/pcidevice"
// cb_rg "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/rg"
// cb_sep "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/sep"
// cb_stack "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/stack"
// cb_vgpu "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/vgpu"
)
func newDataSourcesMap() map[string]*schema.Resource {
return map[string]*schema.Resource{
"decort_account": account.DataSourceAccount(),
"decort_resgroup": rg.DataSourceResgroup(),
"decort_kvmvm": kvmvm.DataSourceCompute(),
"decort_kvmvm_list": kvmvm.DataSourceComputeList(),
"decort_kvmvm_audits": kvmvm.DataSourceComputeAudits(),
"decort_kvmvm_get_audits": kvmvm.DataSourceComputeGetAudits(),
"decort_kvmvm_get_console_url": kvmvm.DataSourceComputeGetConsoleUrl(),
"decort_kvmvm_get_log": kvmvm.DataSourceComputeGetLog(),
"decort_kvmvm_pfw_list": kvmvm.DataSourceComputePfwList(),
"decort_kvmvm_user_list": kvmvm.DataSourceComputeUserList(),
"decort_kvmvm_snapshot_usage": kvmvm.DataSourceComputeSnapshotUsage(),
"decort_k8s": k8s.DataSourceK8s(),
"decort_k8s_list": k8s.DataSourceK8sList(),
"decort_k8s_list_deleted": k8s.DataSourceK8sListDeleted(),
"decort_k8s_wg": k8s.DataSourceK8sWg(),
"decort_k8s_wg_list": k8s.DataSourceK8sWgList(),
"decort_k8s_computes": k8s.DataSourceK8sComputes(),
"decort_k8ci_list": k8s.DataSourceK8CIList(),
"decort_vins": vins.DataSourceVins(),
"decort_vins_list": vins.DataSourceVinsList(),
"decort_vins_audits": vins.DataSourceVinsAudits(),
"decort_vins_ip_list": vins.DataSourceVinsIpList(),
"decort_vins_list_deleted": vins.DataSourceVinsListDeleted(),
"decort_vins_ext_net_list": vins.DataSourceVinsExtNetList(),
"decort_vins_nat_rule_list": vins.DataSourceVinsNatRuleList(),
"decort_vins_static_route_list": vins.DataSourceStaticRouteList(),
"decort_vins_static_route": vins.DataSourceStaticRoute(),
"decort_snapshot_list": snapshot.DataSourceSnapshotList(),
"decort_disk": disks.DataSourceDisk(),
"decort_disk_list": disks.DataSourceDiskList(),
"decort_rg_list": rg.DataSourceRgList(),
"decort_rg_affinity_group_computes": rg.DataSourceRgAffinityGroupComputes(),
"decort_rg_affinity_groups_list": rg.DataSourceRgAffinityGroupsList(),
"decort_rg_affinity_groups_get": rg.DataSourceRgAffinityGroupsGet(),
"decort_rg_audits": rg.DataSourceRgAudits(),
"decort_rg_list_computes": rg.DataSourceRgListComputes(),
"decort_rg_list_deleted": rg.DataSourceRgListDeleted(),
"decort_rg_list_lb": rg.DataSourceRgListLb(),
"decort_rg_list_pfw": rg.DataSourceRgListPfw(),
"decort_rg_list_vins": rg.DataSourceRgListVins(),
"decort_rg_usage": rg.DataSourceRgUsage(),
"decort_disk_list_types_detailed": disks.DataSourceDiskListTypesDetailed(),
"decort_disk_list_types": disks.DataSourceDiskListTypes(),
"decort_disk_list_deleted": disks.DataSourceDiskListDeleted(),
"decort_disk_list_unattached": disks.DataSourceDiskListUnattached(),
"decort_disk_snapshot": disks.DataSourceDiskSnapshot(),
"decort_disk_snapshot_list": disks.DataSourceDiskSnapshotList(),
"decort_account_list": account.DataSourceAccountList(),
"decort_account_computes_list": account.DataSourceAccountComputesList(),
"decort_account_disks_list": account.DataSourceAccountDisksList(),
"decort_account_vins_list": account.DataSourceAccountVinsList(),
"decort_account_audits_list": account.DataSourceAccountAuditsList(),
"decort_account_rg_list": account.DataSourceAccountRGList(),
"decort_account_consumed_units": account.DataSourceAccountConsumedUnits(),
"decort_account_consumed_units_by_type": account.DataSourceAccountConsumedUnitsByType(),
"decort_account_reserved_units": account.DataSourceAccountReservedUnits(),
"decort_account_templates_list": account.DataSourceAccountTemplatessList(),
"decort_account_deleted_list": account.DataSourceAccountDeletedList(),
"decort_account_flipgroups_list": account.DataSourceAccountFlipGroupsList(),
"decort_bservice_list": bservice.DataSourceBasicServiceList(),
"decort_bservice": bservice.DataSourceBasicService(),
"decort_bservice_snapshot_list": bservice.DataSourceBasicServiceSnapshotList(),
"decort_bservice_group": bservice.DataSourceBasicServiceGroup(),
"decort_bservice_deleted_list": bservice.DataSourceBasicServiceDeletedList(),
"decort_extnet_list": extnet.DataSourceExtnetList(),
"decort_extnet_computes_list": extnet.DataSourceExtnetComputesList(),
"decort_extnet": extnet.DataSourceExtnet(),
"decort_extnet_default": extnet.DataSourceExtnetDefault(),
"decort_locations_list": locations.DataSourceLocationsList(),
"decort_location_url": locations.DataSourceLocationUrl(),
"decort_image_list": image.DataSourceImageList(),
"decort_image": image.DataSourceImage(),
"decort_lb": lb.DataSourceLB(),
"decort_lb_list": lb.DataSourceLBList(),
"decort_lb_list_deleted": lb.DataSourceLBListDeleted(),
"decort_flipgroup": flipgroup.DataSourceFlipgroup(),
"decort_flipgroup_list": flipgroup.DataSourceFlipGroupList(),
"decort_stack": stack.DataSourceStack(),
"decort_stack_list": stack.DataSourceStackList(),
"decort_account_resource_consumption_list": account.DataSourceAccountResourceConsumptionList(),
"decort_account_resource_consumption_get": account.DataSourceAccountResourceConsumptionGet(),
"decort_kvmvm_list_deleted": kvmvm.DataSourceComputeListDeleted(),
"decort_kvmvm_vgpu_list": kvmvm.DataSourceComputeVGPUList(),
"decort_kvmvm_pci_device_list": kvmvm.DataSourceComputePCIDeviceList(),
"decort_k8s_wg_cloud_init": k8s.DataSourceK8sWgCloudInit(),
"decort_rg_resource_consumption_list": rg.DataSourceRGResourceConsumptionList(),
"decort_rg_resource_consumption_get": rg.DataSourceRGResourceConsumptionGet(),
"decort_cb_account": cb_account.DataSourceAccount(),
"decort_cb_account_list": cb_account.DataSourceAccountList(),
"decort_cb_account_computes_list": cb_account.DataSourceAccountComputesList(),
"decort_cb_account_deleted_list": cb_account.DataSourceAccountDeletedList(),
"decort_cb_account_disks_list": cb_account.DataSourceAccountDisksList(),
"decort_cb_account_flipgroups_list": cb_account.DataSourceAccountFlipGroupsList(),
"decort_cb_account_rg_list": cb_account.DataSourceAccountRGList(),
"decort_cb_account_vins_list": cb_account.DataSourceAccountVinsList(),
"decort_cb_account_resource_consumption_get": cb_account.DataSourceAccountResourceConsumptionGet(),
"decort_cb_account_resource_consumption_list": cb_account.DataSourceAccountResourceConsumptionList(),
"decort_cb_account_audits_list": cb_account.DataSourceAccountAuditsList(),
"decort_cb_extnet": cb_extnet.DataSourceExtnetCB(),
"decort_cb_extnet_list": cb_extnet.DataSourceExtnetListCB(),
"decort_cb_extnet_default": cb_extnet.DataSourceExtnetDefaultCB(),
"decort_cb_extnet_static_route_list": cb_extnet.DataSourceStaticRouteList(),
"decort_cb_extnet_static_route": cb_extnet.DataSourceStaticRoute(),
}
}
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package provider
import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/account"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/bservice"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/disks"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/extnet"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/flipgroup"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/image"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/k8s"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/kvmvm"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/lb"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/locations"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/rg"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/snapshot"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/stack"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/vins"
cb_account "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/account"
cb_audit "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/audit"
cb_disks "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/disks"
cb_extnet "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/extnet"
cb_flipgroup "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/flipgroup"
cb_grid "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/grid"
cb_image "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/image"
cb_kvmvm "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/kvmvm"
cb_lb "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/lb"
cb_pcidevice "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/pcidevice"
cb_rg "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/rg"
cb_sep "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/sep"
cb_stack "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/stack"
cb_vins "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/vins"
// cb_vgpu "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/vgpu"
cb_k8ci "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/k8ci"
cb_k8s "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/k8s"
)
func newDataSourcesMap() map[string]*schema.Resource {
return map[string]*schema.Resource{
"decort_account": account.DataSourceAccount(),
"decort_resgroup": rg.DataSourceResgroup(),
"decort_kvmvm": kvmvm.DataSourceCompute(),
"decort_kvmvm_list": kvmvm.DataSourceComputeList(),
"decort_kvmvm_audits": kvmvm.DataSourceComputeAudits(),
"decort_kvmvm_get_audits": kvmvm.DataSourceComputeGetAudits(),
"decort_kvmvm_get_console_url": kvmvm.DataSourceComputeGetConsoleUrl(),
"decort_kvmvm_get_log": kvmvm.DataSourceComputeGetLog(),
"decort_kvmvm_pfw_list": kvmvm.DataSourceComputePfwList(),
"decort_kvmvm_user_list": kvmvm.DataSourceComputeUserList(),
"decort_kvmvm_snapshot_usage": kvmvm.DataSourceComputeSnapshotUsage(),
"decort_k8s": k8s.DataSourceK8s(),
"decort_k8s_list": k8s.DataSourceK8sList(),
"decort_k8s_list_deleted": k8s.DataSourceK8sListDeleted(),
"decort_k8s_wg": k8s.DataSourceK8sWg(),
"decort_k8s_wg_list": k8s.DataSourceK8sWgList(),
"decort_k8s_computes": k8s.DataSourceK8sComputes(),
"decort_k8ci_list": k8s.DataSourceK8CIList(),
"decort_vins": vins.DataSourceVins(),
"decort_vins_list": vins.DataSourceVinsList(),
"decort_vins_audits": vins.DataSourceVinsAudits(),
"decort_vins_ip_list": vins.DataSourceVinsIpList(),
"decort_vins_list_deleted": vins.DataSourceVinsListDeleted(),
"decort_vins_ext_net_list": vins.DataSourceVinsExtNetList(),
"decort_vins_nat_rule_list": vins.DataSourceVinsNatRuleList(),
"decort_vins_static_route_list": vins.DataSourceStaticRouteList(),
"decort_vins_static_route": vins.DataSourceStaticRoute(),
"decort_snapshot_list": snapshot.DataSourceSnapshotList(),
"decort_disk": disks.DataSourceDisk(),
"decort_disk_list": disks.DataSourceDiskList(),
"decort_rg_list": rg.DataSourceRgList(),
"decort_rg_affinity_group_computes": rg.DataSourceRgAffinityGroupComputes(),
"decort_rg_affinity_groups_list": rg.DataSourceRgAffinityGroupsList(),
"decort_rg_affinity_groups_get": rg.DataSourceRgAffinityGroupsGet(),
"decort_rg_audits": rg.DataSourceRgAudits(),
"decort_rg_list_computes": rg.DataSourceRgListComputes(),
"decort_rg_list_deleted": rg.DataSourceRgListDeleted(),
"decort_rg_list_lb": rg.DataSourceRgListLb(),
"decort_rg_list_pfw": rg.DataSourceRgListPfw(),
"decort_rg_list_vins": rg.DataSourceRgListVins(),
"decort_rg_usage": rg.DataSourceRgUsage(),
"decort_disk_list_types_detailed": disks.DataSourceDiskListTypesDetailed(),
"decort_disk_list_types": disks.DataSourceDiskListTypes(),
"decort_disk_list_deleted": disks.DataSourceDiskListDeleted(),
"decort_disk_list_unattached": disks.DataSourceDiskListUnattached(),
"decort_disk_snapshot": disks.DataSourceDiskSnapshot(),
"decort_disk_snapshot_list": disks.DataSourceDiskSnapshotList(),
"decort_account_list": account.DataSourceAccountList(),
"decort_account_computes_list": account.DataSourceAccountComputesList(),
"decort_account_disks_list": account.DataSourceAccountDisksList(),
"decort_account_vins_list": account.DataSourceAccountVinsList(),
"decort_account_audits_list": account.DataSourceAccountAuditsList(),
"decort_account_rg_list": account.DataSourceAccountRGList(),
"decort_account_consumed_units": account.DataSourceAccountConsumedUnits(),
"decort_account_consumed_units_by_type": account.DataSourceAccountConsumedUnitsByType(),
"decort_account_reserved_units": account.DataSourceAccountReservedUnits(),
"decort_account_templates_list": account.DataSourceAccountTemplatessList(),
"decort_account_deleted_list": account.DataSourceAccountDeletedList(),
"decort_account_flipgroups_list": account.DataSourceAccountFlipGroupsList(),
"decort_bservice_list": bservice.DataSourceBasicServiceList(),
"decort_bservice": bservice.DataSourceBasicService(),
"decort_bservice_snapshot_list": bservice.DataSourceBasicServiceSnapshotList(),
"decort_bservice_group": bservice.DataSourceBasicServiceGroup(),
"decort_bservice_deleted_list": bservice.DataSourceBasicServiceDeletedList(),
"decort_extnet_list": extnet.DataSourceExtnetList(),
"decort_extnet_computes_list": extnet.DataSourceExtnetComputesList(),
"decort_extnet": extnet.DataSourceExtnet(),
"decort_extnet_default": extnet.DataSourceExtnetDefault(),
"decort_locations_list": locations.DataSourceLocationsList(),
"decort_location_url": locations.DataSourceLocationUrl(),
"decort_image_list": image.DataSourceImageList(),
"decort_image": image.DataSourceImage(),
"decort_lb": lb.DataSourceLB(),
"decort_lb_list": lb.DataSourceLBList(),
"decort_lb_list_deleted": lb.DataSourceLBListDeleted(),
"decort_flipgroup": flipgroup.DataSourceFlipgroup(),
"decort_flipgroup_list": flipgroup.DataSourceFlipGroupList(),
"decort_stack": stack.DataSourceStack(),
"decort_stack_list": stack.DataSourceStackList(),
"decort_account_resource_consumption_list": account.DataSourceAccountResourceConsumptionList(),
"decort_account_resource_consumption_get": account.DataSourceAccountResourceConsumptionGet(),
"decort_kvmvm_list_deleted": kvmvm.DataSourceComputeListDeleted(),
"decort_kvmvm_vgpu_list": kvmvm.DataSourceComputeVGPUList(),
"decort_kvmvm_pci_device_list": kvmvm.DataSourceComputePCIDeviceList(),
"decort_k8s_wg_cloud_init": k8s.DataSourceK8sWgCloudInit(),
"decort_rg_resource_consumption_list": rg.DataSourceRGResourceConsumptionList(),
"decort_rg_resource_consumption_get": rg.DataSourceRGResourceConsumptionGet(),
"decort_cb_account": cb_account.DataSourceAccount(),
"decort_cb_account_list": cb_account.DataSourceAccountList(),
"decort_cb_account_computes_list": cb_account.DataSourceAccountComputesList(),
"decort_cb_account_list_deleted": cb_account.DataSourceAccountDeletedList(),
"decort_cb_account_disks_list": cb_account.DataSourceAccountDisksList(),
"decort_cb_account_flipgroups_list": cb_account.DataSourceAccountFlipGroupsList(),
"decort_cb_account_rg_list": cb_account.DataSourceAccountRGList(),
"decort_cb_account_vins_list": cb_account.DataSourceAccountVinsList(),
"decort_cb_account_resource_consumption_get": cb_account.DataSourceAccountResourceConsumptionGet(),
"decort_cb_account_resource_consumption_list": cb_account.DataSourceAccountResourceConsumptionList(),
"decort_cb_account_audits_list": cb_account.DataSourceAccountAuditsList(),
"decort_cb_audit": cb_audit.DataSourceAudit(),
"decort_cb_audit_list": cb_audit.DataSourceAuditList(),
"decort_cb_audit_linked_jobs": cb_audit.DataSourceAuditLinkedJobs(),
"decort_cb_extnet": cb_extnet.DataSourceExtnetCB(),
"decort_cb_extnet_list": cb_extnet.DataSourceExtnetListCB(),
"decort_cb_extnet_default": cb_extnet.DataSourceExtnetDefaultCB(),
"decort_cb_extnet_static_route_list": cb_extnet.DataSourceStaticRouteList(),
"decort_cb_extnet_static_route": cb_extnet.DataSourceStaticRoute(),
"decort_cb_image": cb_image.DataSourceImage(),
"decort_cb_grid": cb_grid.DataSourceGrid(),
"decort_cb_grid_get_status": cb_grid.DataSourceGridGetStatus(),
"decort_cb_grid_post_status": cb_grid.DataSourceGridPostStatus(),
"decort_cb_grid_get_diagnosis": cb_grid.DataSourceGridGetDiagnosis(),
"decort_cb_grid_post_diagnosis": cb_grid.DataSourceGridPostDiagnosis(),
"decort_cb_grid_list": cb_grid.DataSourceGridList(),
"decort_cb_grid_list_emails": cb_grid.DataSourceGridListEmails(),
"decort_cb_grid_list_consumption": cb_grid.DataSourceGridListConsumption(),
"decort_cb_grid_get_consumption": cb_grid.DataSourceGridGetConsumption(),
"decort_cb_image_list": cb_image.DataSourceImageList(),
"decort_cb_image_list_stacks": cb_image.DataSourceImageListStacks(),
"decort_cb_kvmvm": cb_kvmvm.DataSourceCompute(),
"decort_cb_kvmvm_affinity_relations": cb_kvmvm.DataSourceComputeAffinityRelations(),
"decort_cb_kvmvm_audits": cb_kvmvm.DataSourceComputeAudits(),
"decort_cb_kvmvm_boot_order_get": cb_kvmvm.DataSourceComputeBootOrderGet(),
"decort_cb_kvmvm_get_audits": cb_kvmvm.DataSourceComputeGetAudits(),
"decort_cb_kvmvm_get_console_url": cb_kvmvm.DataSourceComputeGetConsoleUrl(),
"decort_cb_kvmvm_get_log": cb_kvmvm.DataSourceComputeGetLog(),
"decort_cb_kvmvm_list": cb_kvmvm.DataSourceComputeList(),
"decort_cb_kvmvm_list_deleted": cb_kvmvm.DataSourceComputeListDeleted(),
"decort_cb_kvmvm_migrate_storage_info": cb_kvmvm.DataSourceComputeMigrateStorageInfo(),
"decort_cb_kvmvm_pci_device_list": cb_kvmvm.DataSourceComputePCIDeviceList(),
"decort_cb_kvmvm_pfw_list": cb_kvmvm.DataSourceComputePfwList(),
"decort_cb_kvmvm_snapshot_list": cb_kvmvm.DataSourceComputeSnapshotList(),
"decort_cb_kvmvm_snapshot_usage": cb_kvmvm.DataSourceComputeSnapshotUsage(),
"decort_cb_kvmvm_user_list": cb_kvmvm.DataSourceComputeUserList(),
"decort_cb_kvmvm_vgpu_list": cb_kvmvm.DataSourceComputeVGPUList(),
"decort_cb_disk": cb_disks.DataSourceDisk(),
"decort_cb_disk_list": cb_disks.DataSourceDiskList(),
"decort_cb_disk_list_deleted": cb_disks.DataSourceDiskListDeleted(),
"decort_cb_disk_list_types": cb_disks.DataSourceDiskListTypes(),
"decort_cb_disk_list_types_detailed": cb_disks.DataSourceDiskListTypesDetailed(),
"decort_cb_disk_list_unattached": cb_disks.DataSourceDiskListUnattached(),
"decort_cb_disk_snapshot": cb_disks.DataSourceDiskSnapshot(),
"decort_cb_disk_snapshot_list": cb_disks.DataSourceDiskSnapshotList(),
"decort_cb_pcidevice": cb_pcidevice.DataSourcePcidevice(),
"decort_cb_pcidevice_list": cb_pcidevice.DataSourcePcideviceList(),
"decort_cb_rg": cb_rg.DataSourceResgroup(),
"decort_cb_rg_affinity_group_computes": cb_rg.DataSourceRgAffinityGroupComputes(),
"decort_cb_rg_affinity_groups_get": cb_rg.DataSourceRgAffinityGroupsGet(),
"decort_cb_rg_affinity_groups_list": cb_rg.DataSourceRgAffinityGroupsList(),
"decort_cb_rg_resource_consumption_get": cb_rg.DataSourceRGResourceConsumptionGet(),
"decort_cb_rg_resource_consumption_list": cb_rg.DataSourceRGResourceConsumptionList(),
"decort_cb_rg_audits": cb_rg.DataSourceRgAudits(),
"decort_cb_rg_list": cb_rg.DataSourceRgList(),
"decort_cb_rg_list_deleted": cb_rg.DataSourceRgListDeleted(),
"decort_cb_rg_list_computes": cb_rg.DataSourceRgListComputes(),
"decort_cb_rg_list_lb": cb_rg.DataSourceRgListLb(),
"decort_cb_rg_list_pfw": cb_rg.DataSourceRgListPfw(),
"decort_cb_rg_list_vins": cb_rg.DataSourceRgListVins(),
"decort_cb_rg_usage": cb_rg.DataSourceRgUsage(),
"decort_cb_sep_list": cb_sep.DataSourceSepList(),
"decort_cb_sep": cb_sep.DataSourceSep(),
"decort_cb_sep_consumption": cb_sep.DataSourceSepConsumption(),
"decort_cb_sep_disk_list": cb_sep.DataSourceSepDiskList(),
"decort_cb_sep_config": cb_sep.DataSourceSepConfig(),
"decort_cb_sep_pool": cb_sep.DataSourceSepPool(),
"decort_cb_lb": cb_lb.DataSourceLB(),
"decort_cb_lb_list": cb_lb.DataSourceLBList(),
"decort_cb_lb_list_deleted": cb_lb.DataSourceLBListDeleted(),
"decort_cb_flipgroup_list": cb_flipgroup.DataSourceFlipgroupList(),
"decort_cb_flipgroup": cb_flipgroup.DataSourceFlipgroup(),
"decort_cb_stack_list": cb_stack.DataSourceStacksList(),
"decort_cb_stack": cb_stack.DataSourceStack(),
"decort_cb_vins": cb_vins.DataSourceVins(),
"decort_cb_vins_list": cb_vins.DataSourceVinsList(),
"decort_cb_vins_audits": cb_vins.DataSourceVinsAudits(),
"decort_cb_vins_ip_list": cb_vins.DataSourceVinsIpList(),
"decort_cb_vins_list_deleted": cb_vins.DataSourceVinsListDeleted(),
"decort_cb_vins_ext_net_list": cb_vins.DataSourceVinsExtNetList(),
"decort_cb_vins_nat_rule_list": cb_vins.DataSourceVinsNatRuleList(),
"decort_cb_vins_static_route": cb_vins.DataSourceStaticRoute(),
"decort_cb_vins_static_route_list": cb_vins.DataSourceStaticRouteList(),
"decort_cb_k8ci": cb_k8ci.DataSourceK8CI(),
"decort_cb_k8ci_list": cb_k8ci.DataSourceK8CIList(),
"decort_cb_k8ci_list_deleted": cb_k8ci.DataSourceK8CIListDeleted(),
"decort_cb_k8s": cb_k8s.DataSourceK8s(),
"decort_cb_k8s_list": cb_k8s.DataSourceK8sList(),
"decort_cb_k8s_list_deleted": cb_k8s.DataSourceK8sListDeleted(),
"decort_cb_k8s_wg": cb_k8s.DataSourceK8sWg(),
"decort_cb_k8s_wg_cloud_init": cb_k8s.DataSourceK8sWgCloudInit(),
"decort_cb_k8s_wg_list": cb_k8s.DataSourceK8sWgList(),
"decort_cb_k8s_computes": cb_k8s.DataSourceK8sComputes(),
}
}

@ -1,150 +1,168 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package provider
import (
"fmt"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
"golang.org/x/net/context"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/location"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/statefuncs"
)
func Provider() *schema.Provider {
return &schema.Provider{
Schema: map[string]*schema.Schema{
"authenticator": {
Type: schema.TypeString,
Required: true,
StateFunc: statefuncs.StateFuncToLower,
ValidateFunc: validation.StringInSlice([]string{"decs3o", "legacy", "jwt", "bvs"}, true), // ignore case while validating
Description: "Authentication mode to use when connecting to DECORT cloud API. Should be one of 'decs3o', 'legacy', 'jwt' or 'bvs'.",
},
"oauth2_url": {
Type: schema.TypeString,
Optional: true,
StateFunc: statefuncs.StateFuncToLower,
DefaultFunc: schema.EnvDefaultFunc("DECORT_OAUTH2_URL", nil),
Description: "OAuth2 application URL in 'decs3o' and 'bvs' authentication mode.",
},
"controller_url": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
StateFunc: statefuncs.StateFuncToLower,
Description: "URL of DECORT Cloud controller to use. API calls will be directed to this URL.",
},
"user": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("DECORT_USER", nil),
Description: "User name for DECORT cloud API operations in 'legacy' authentication mode.",
},
"password": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("DECORT_PASSWORD", nil),
Description: "User password for DECORT cloud API operations in 'legacy' authentication mode.",
},
"bvs_user": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("DECORT_BVS_USER", nil),
Description: "User name for DECORT cloud API operations in 'bvs' authentication mode.",
},
"bvs_password": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("DECORT_BVS_PASSWORD", nil),
Description: "User password for DECORT cloud API operations in 'bvs' authentication mode.",
},
"domain": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("DECORT_DOMAIN", nil),
Description: "User password for DECORT cloud API operations in 'bvs' authentication mode.",
},
"app_id": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("DECORT_APP_ID", nil),
Description: "Application ID to access DECORT cloud API in 'decs3o' and 'bvs' authentication mode.",
},
"app_secret": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("DECORT_APP_SECRET", nil),
Description: "Application secret to access DECORT cloud API in 'decs3o' and 'bvs' authentication mode.",
},
"jwt": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("DECORT_JWT", nil),
Description: "JWT to access DECORT cloud API in 'jwt' authentication mode.",
},
"allow_unverified_ssl": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "If true, DECORT API will not verify SSL certificates. Use this with caution and in trusted environments only!",
},
},
ResourcesMap: newResourcesMap(),
DataSourcesMap: newDataSourcesMap(),
ConfigureContextFunc: providerConfigure,
}
}
func providerConfigure(ctx context.Context, d *schema.ResourceData) (interface{}, diag.Diagnostics) {
decsController, err := controller.ControllerConfigure(d)
if err != nil {
return nil, diag.FromErr(err)
}
gridId, err := location.UtilityLocationGetDefaultGridID(ctx, decsController)
if err != nil {
return nil, diag.FromErr(err)
}
if gridId == 0 {
return nil, diag.FromErr(fmt.Errorf("providerConfigure: invalid default Grid ID = 0"))
}
return decsController, nil
}
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package provider
import (
"fmt"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
"golang.org/x/net/context"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/location"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/statefuncs"
)
func Provider() *schema.Provider {
return &schema.Provider{
Schema: map[string]*schema.Schema{
"authenticator": {
Type: schema.TypeString,
Required: true,
StateFunc: statefuncs.StateFuncToLower,
ValidateFunc: validation.StringInSlice([]string{"decs3o", "legacy", "jwt", "bvs"}, true), // ignore case while validating
Description: "Authentication mode to use when connecting to DECORT cloud API. Should be one of 'decs3o', 'legacy', 'jwt' or 'bvs'.",
},
"oauth2_url": {
Type: schema.TypeString,
Optional: true,
StateFunc: statefuncs.StateFuncToLower,
DefaultFunc: schema.EnvDefaultFunc("DECORT_OAUTH2_URL", nil),
Description: "OAuth2 application URL in 'decs3o' and 'bvs' authentication mode.",
},
"controller_url": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
StateFunc: statefuncs.StateFuncToLower,
Description: "URL of DECORT Cloud controller to use. API calls will be directed to this URL.",
},
"user": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("DECORT_USER", nil),
Description: "User name for DECORT cloud API operations in 'legacy' authentication mode.",
},
"password": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("DECORT_PASSWORD", nil),
Description: "User password for DECORT cloud API operations in 'legacy' authentication mode.",
},
"bvs_user": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("DECORT_BVS_USER", nil),
Description: "User name for DECORT cloud API operations in 'bvs' authentication mode.",
},
"bvs_password": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("DECORT_BVS_PASSWORD", nil),
Description: "User password for DECORT cloud API operations in 'bvs' authentication mode.",
},
"domain": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("DECORT_DOMAIN", nil),
Description: "User password for DECORT cloud API operations in 'bvs' authentication mode.",
},
"app_id": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("DECORT_APP_ID", nil),
Description: "Application ID to access DECORT cloud API in 'decs3o' and 'bvs' authentication mode.",
},
"app_secret": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("DECORT_APP_SECRET", nil),
Description: "Application secret to access DECORT cloud API in 'decs3o' and 'bvs' authentication mode.",
},
"jwt": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("DECORT_JWT", nil),
Description: "JWT to access DECORT cloud API in 'jwt' authentication mode.",
},
"allow_unverified_ssl": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "If true, DECORT API will not verify SSL certificates. Use this with caution and in trusted environments only!",
},
"path_cfg": {
Type: schema.TypeString,
Optional: true,
Description: "The path of the configuration file entry",
},
"path_token": {
Type: schema.TypeString,
Optional: true,
Description: "The path of the token file entry",
},
"time_to_refresh": {
Type: schema.TypeInt,
Optional: true,
Description: "The number of minutes before the expiration of the token, a refresh will be made",
},
},
ResourcesMap: newResourcesMap(),
DataSourcesMap: newDataSourcesMap(),
ConfigureContextFunc: providerConfigure,
}
}
func providerConfigure(ctx context.Context, d *schema.ResourceData) (interface{}, diag.Diagnostics) {
decsController, err := controller.ControllerConfigure(d)
if err != nil {
return nil, diag.FromErr(err)
}
gridId, err := location.UtilityLocationGetDefaultGridID(ctx, decsController)
if err != nil {
return nil, diag.FromErr(err)
}
if gridId == 0 {
return nil, diag.FromErr(fmt.Errorf("providerConfigure: invalid default Grid ID = 0"))
}
return decsController, nil
}

@ -1,104 +1,103 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package provider
import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/account"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/bservice"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/disks"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/flipgroup"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/image"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/k8s"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/kvmvm"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/lb"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/pfw"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/rg"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/snapshot"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/vins"
cb_account "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/account"
// cb_disks "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/disks"
cb_extnet "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/extnet"
// cb_flipgroup "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/flipgroup"
// cb_image "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/image"
// cb_k8s "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/k8s"
// cb_kvmvm "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/kvmvm"
// cb_lb "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/lb"
// cb_pcidevice "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/pcidevice"
// cb_pfw "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/pfw"
// cb_rg "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/rg"
// cb_sep "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/sep"
// cb_snapshot "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/snapshot"
// cb_vins "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/vins"
)
func newResourcesMap() map[string]*schema.Resource {
return map[string]*schema.Resource{
"decort_resgroup": rg.ResourceResgroup(),
"decort_kvmvm": kvmvm.ResourceCompute(),
"decort_disk": disks.ResourceDisk(),
"decort_disk_snapshot": disks.ResourceDiskSnapshot(),
"decort_vins": vins.ResourceVins(),
"decort_pfw": pfw.ResourcePfw(),
"decort_k8s": k8s.ResourceK8s(),
"decort_k8s_wg": k8s.ResourceK8sWg(),
"decort_k8s_cp": k8s.ResourceK8sCP(),
"decort_snapshot": snapshot.ResourceSnapshot(),
"decort_account": account.ResourceAccount(),
"decort_bservice": bservice.ResourceBasicService(),
"decort_bservice_group": bservice.ResourceBasicServiceGroup(),
"decort_image": image.ResourceImage(),
"decort_image_virtual": image.ResourceImageVirtual(),
"decort_lb": lb.ResourceLB(),
"decort_lb_backend": lb.ResourceLBBackend(),
"decort_lb_backend_server": lb.ResourceLBBackendServer(),
"decort_lb_frontend": lb.ResourceLBFrontend(),
"decort_lb_frontend_bind": lb.ResourceLBFrontendBind(),
"decort_flipgroup": flipgroup.ResourceFlipgroup(),
"decort_vins_static_route": vins.ResourceStaticRoute(),
"decort_cb_account": cb_account.ResourceAccount(),
"decort_cb_extnet": cb_extnet.ResourceExtnetCB(),
// "decort_cb_disk": cb_disks.ResourceDisk(),
// "decort_cb_image": cb_image.ResourceImage(),
// "decort_cb_virtual_image": cb_image.ResourceVirtualImage(),
// "decort_cb_cdrom_image": cb_image.ResourceCDROMImage(),
// "decort_cb_delete_images": cb_image.ResourceDeleteImages(),
// "decort_cb_pcidevice": cb_pcidevice.ResourcePcidevice(),
// "decort_cb_sep": cb_sep.ResourceSep(),
// "decort_cb_sep_config": cb_sep.ResourceSepConfig(),
// "decort_cb_resgroup": cb_rg.ResourceResgroup(),
// "decort_cb_kvmvm": cb_kvmvm.ResourceCompute(),
// "decort_cb_vins": cb_vins.ResourceVins(),
// "decort_cb_pfw": cb_pfw.ResourcePfw(),
// "decort_cb_k8s": cb_k8s.ResourceK8s(),
// "decort_cb_k8s_wg": cb_k8s.ResourceK8sWg(),
// "decort_cb_snapshot": cb_snapshot.ResourceSnapshot(),
// "decort_cb_flipgroup": cb_flipgroup.ResourceFlipgroup(),
// "decort_cb_lb": cb_lb.ResourceLB(),
// "decort_cb_lb_backend": cb_lb.ResourceLBBackend(),
// "decort_cb_lb_backend_server": cb_lb.ResourceLBBackendServer(),
// "decort_cb_lb_frontend": cb_lb.ResourceLBFrontend(),
// "decort_cb_lb_frontend_bind": cb_lb.ResourceLBFrontendBind(),
"decort_cb_extnet_static_route": cb_extnet.ResourceStaticRoute(),
}
}
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Kasim Baybikov, <kmbaybikov@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package provider
import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/account"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/bservice"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/disks"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/flipgroup"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/image"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/k8s"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/kvmvm"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/lb"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/pfw"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/rg"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/snapshot"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudapi/vins"
cb_account "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/account"
cb_disks "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/disks"
cb_extnet "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/extnet"
cb_flipgroup "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/flipgroup"
cb_image "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/image"
cb_k8ci "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/k8ci"
cb_k8s "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/k8s"
cb_kvmvm "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/kvmvm"
cb_lb "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/lb"
cb_pcidevice "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/pcidevice"
cb_rg "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/rg"
cb_sep "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/sep"
cb_vins "repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/vins"
)
func newResourcesMap() map[string]*schema.Resource {
return map[string]*schema.Resource{
"decort_resgroup": rg.ResourceResgroup(),
"decort_kvmvm": kvmvm.ResourceCompute(),
"decort_disk": disks.ResourceDisk(),
"decort_disk_snapshot": disks.ResourceDiskSnapshot(),
"decort_vins": vins.ResourceVins(),
"decort_pfw": pfw.ResourcePfw(),
"decort_k8s": k8s.ResourceK8s(),
"decort_k8s_wg": k8s.ResourceK8sWg(),
"decort_k8s_cp": k8s.ResourceK8sCP(),
"decort_snapshot": snapshot.ResourceSnapshot(),
"decort_account": account.ResourceAccount(),
"decort_bservice": bservice.ResourceBasicService(),
"decort_bservice_group": bservice.ResourceBasicServiceGroup(),
"decort_image": image.ResourceImage(),
"decort_image_virtual": image.ResourceImageVirtual(),
"decort_lb": lb.ResourceLB(),
"decort_lb_backend": lb.ResourceLBBackend(),
"decort_lb_backend_server": lb.ResourceLBBackendServer(),
"decort_lb_frontend": lb.ResourceLBFrontend(),
"decort_lb_frontend_bind": lb.ResourceLBFrontendBind(),
"decort_flipgroup": flipgroup.ResourceFlipgroup(),
"decort_vins_static_route": vins.ResourceStaticRoute(),
"decort_cb_account": cb_account.ResourceAccount(),
"decort_cb_extnet": cb_extnet.ResourceExtnetCB(),
"decort_cb_extnet_static_route": cb_extnet.ResourceStaticRoute(),
"decort_cb_disk": cb_disks.ResourceDisk(),
"decort_cb_disk_snapshot": cb_disks.ResourceDiskSnapshot(),
"decort_cb_image": cb_image.ResourceImage(),
"decort_cb_virtual_image": cb_image.ResourceVirtualImage(),
"decort_cb_cdrom_image": cb_image.ResourceCDROMImage(),
"decort_cb_pcidevice": cb_pcidevice.ResourcePcidevice(),
"decort_cb_sep": cb_sep.ResourceSep(),
"decort_cb_sep_config": cb_sep.ResourceSepConfig(),
"decort_cb_kvmvm": cb_kvmvm.ResourceCompute(),
"decort_cb_vins": cb_vins.ResourceVins(),
"decort_cb_k8ci": cb_k8ci.ResourceK8CI(),
"decort_cb_k8s_cp": cb_k8s.ResourceK8sCP(),
"decort_cb_k8s_wg": cb_k8s.ResourceK8sWg(),
"decort_cb_vins_static_route": cb_vins.ResourceStaticRoute(),
"decort_cb_flipgroup": cb_flipgroup.ResourceFlipgroup(),
"decort_cb_lb": cb_lb.ResourceLB(),
"decort_cb_lb_backend": cb_lb.ResourceLBBackend(),
"decort_cb_lb_backend_server": cb_lb.ResourceLBBackendServer(),
"decort_cb_lb_frontend": cb_lb.ResourceLBFrontend(),
"decort_cb_lb_frontend_bind": cb_lb.ResourceLBFrontendBind(),
"decort_cb_rg": cb_rg.ResourceResgroup(),
}
}

@ -142,7 +142,7 @@ func utilityK8sWgListCheckPresence(ctx context.Context, d *schema.ResourceData,
func utilityK8sWgCloudInitCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (string, error) {
c := m.(*controller.ControllerCfg)
req := k8s.GetWorkerNodesMetaDataRequest{
K8SID: uint64(d.Get("k8s_id").(int)),
K8SID: uint64(d.Get("k8s_id").(int)),
WorkersGroupID: uint64(d.Get("wg_id").(int)),
}

@ -735,7 +735,7 @@ func resourceComputeUpdate(ctx context.Context, d *schema.ResourceData, m interf
Name: d.Get("name").(string),
}
if desc, ok := d.GetOk("desc"); ok {
if desc, ok := d.GetOk("description"); ok {
req.Description = desc.(string)
}
@ -1613,15 +1613,15 @@ func ResourceComputeSchemaMake() map[string]*schema.Schema {
Description: "Name of this compute. Compute names are case sensitive and must be unique in the resource group.",
},
"rg_id": {
Type: schema.TypeInt,
Required: true,
Type: schema.TypeInt,
Required: true,
// ForceNew: true,
ValidateFunc: validation.IntAtLeast(1),
Description: "ID of the resource group where this compute should be deployed.",
},
"driver": {
Type: schema.TypeString,
Required: true,
Type: schema.TypeString,
Required: true,
// ForceNew: true,
StateFunc: statefuncs.StateFuncToUpper,
ValidateFunc: validation.StringInSlice([]string{"SVA_KVM_X86", "KVM_X86", "KVM_PPC"}, false), // observe case while validating

@ -44,6 +44,7 @@ import (
func dataSourceAccountRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
acc, err := utilityAccountCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
@ -67,174 +68,3 @@ func DataSourceAccount() *schema.Resource {
Schema: dataSourceAccountSchemaMake(),
}
}
func dataSourceAccountSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"account_id": {
Type: schema.TypeInt,
Required: true,
},
"dc_location": {
Type: schema.TypeString,
Computed: true,
},
"ckey": {
Type: schema.TypeString,
Computed: true,
},
"acl": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"explicit": {
Type: schema.TypeBool,
Computed: true,
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
"right": {
Type: schema.TypeString,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"type": {
Type: schema.TypeString,
Computed: true,
},
"user_group_id": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"company": {
Type: schema.TypeString,
Computed: true,
},
"companyurl": {
Type: schema.TypeString,
Computed: true,
},
"cpu_allocation_parameter": {
Type: schema.TypeString,
Computed: true,
},
"cpu_allocation_ratio": {
Type: schema.TypeFloat,
Computed: true,
},
"created_by": {
Type: schema.TypeString,
Computed: true,
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
},
"deactivation_time": {
Type: schema.TypeInt,
Computed: true,
},
"deleted_by": {
Type: schema.TypeString,
Computed: true,
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
},
"displayname": {
Type: schema.TypeString,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"account_name": {
Type: schema.TypeString,
Computed: true,
},
"resource_limits": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cu_c": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_d": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_dm": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_i": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_m": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_np": {
Type: schema.TypeFloat,
Computed: true,
},
"gpu_units": {
Type: schema.TypeFloat,
Computed: true,
},
},
},
},
"resource_types": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"send_access_emails": {
Type: schema.TypeBool,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"uniq_pools": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"updated_time": {
Type: schema.TypeInt,
Computed: true,
},
"version": {
Type: schema.TypeInt,
Computed: true,
},
"vins": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
}
}

@ -1,109 +1,70 @@
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package account
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceAccountAuditsListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountAuditsList, err := utilityAccountAuditsListCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenAccountAuditsList(accountAuditsList))
return nil
}
func dataSourceAccountAuditsListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"account_id": {
Type: schema.TypeInt,
Required: true,
Description: "ID of the account",
},
"items": {
Type: schema.TypeList,
Computed: true,
Description: "Search Result",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"call": {
Type: schema.TypeString,
Computed: true,
},
"responsetime": {
Type: schema.TypeFloat,
Computed: true,
},
"statuscode": {
Type: schema.TypeInt,
Computed: true,
},
"timestamp": {
Type: schema.TypeFloat,
Computed: true,
},
"user": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
}
return res
}
func DataSourceAccountAuditsList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceAccountAuditsListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceAccountAuditsListSchemaMake(),
}
}
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package account
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceAccountAuditsListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountAuditsList, err := utilityAccountAuditsListCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenAccountAuditsList(accountAuditsList))
return nil
}
func DataSourceAccountAuditsList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceAccountAuditsListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceAccountAuditsListSchemaMake(),
}
}

@ -1,225 +1,71 @@
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package account
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceAccountComputesListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountComputesList, err := utilityAccountComputesListCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenAccountComputesList(accountComputesList))
d.Set("entry_count", accountComputesList.EntryCount)
return nil
}
func dataSourceAccountComputesListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"account_id": {
Type: schema.TypeInt,
Required: true,
Description: "ID of the account",
},
"compute_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Filter by compute ID",
},
"name": {
Type: schema.TypeString,
Optional: true,
Description: "Filter by compute name",
},
"rg_name": {
Type: schema.TypeString,
Optional: true,
Description: "Filter by RG name",
},
"rg_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Filter by RG ID",
},
"tech_status": {
Type: schema.TypeString,
Optional: true,
Description: "Filter by tech. status",
},
"ip_address": {
Type: schema.TypeString,
Optional: true,
Description: "Filter by IP address",
},
"extnet_name": {
Type: schema.TypeString,
Optional: true,
Description: "Filter by extnet name",
},
"extnet_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Filter by extnet ID",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "Page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "Page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Description: "Search Result",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"account_id": {
Type: schema.TypeInt,
Computed: true,
},
"account_name": {
Type: schema.TypeString,
Computed: true,
},
"cpus": {
Type: schema.TypeInt,
Computed: true,
},
"created_by": {
Type: schema.TypeString,
Computed: true,
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
},
"deleted_by": {
Type: schema.TypeString,
Computed: true,
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
},
"compute_id": {
Type: schema.TypeInt,
Computed: true,
},
"compute_name": {
Type: schema.TypeString,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"registered": {
Type: schema.TypeBool,
Computed: true,
},
"rg_id": {
Type: schema.TypeInt,
Computed: true,
},
"rg_name": {
Type: schema.TypeString,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"tech_status": {
Type: schema.TypeString,
Computed: true,
},
"total_disks_size": {
Type: schema.TypeInt,
Computed: true,
},
"updated_by": {
Type: schema.TypeString,
Computed: true,
},
"updated_time": {
Type: schema.TypeInt,
Computed: true,
},
"user_managed": {
Type: schema.TypeBool,
Computed: true,
},
"vins_connected": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
}
return res
}
func DataSourceAccountComputesList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceAccountComputesListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceAccountComputesListSchemaMake(),
}
}
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package account
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceAccountComputesListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountComputesList, err := utilityAccountComputesListCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenAccountComputesList(accountComputesList))
d.Set("entry_count", accountComputesList.EntryCount)
return nil
}
func DataSourceAccountComputesList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceAccountComputesListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceAccountComputesListSchemaMake(),
}
}

@ -1,284 +1,71 @@
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package account
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func DataSourceAccountDeletedList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceAccountDeletedListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceAccountListDeletedSchemaMake(),
}
}
func dataSourceAccountDeletedListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountDeletedList, err := utilityAccountDeletedListCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenListDeleted(accountDeletedList))
d.Set("entry_count", accountDeletedList.EntryCount)
return nil
}
func dataSourceAccountListDeletedSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"by_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Filter by ID",
},
"name": {
Type: schema.TypeString,
Optional: true,
Description: "Filter by name",
},
"acl": {
Type: schema.TypeString,
Optional: true,
Description: "Filter by ACL",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "Page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "Page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"dc_location": {
Type: schema.TypeString,
Computed: true,
},
"ckey": {
Type: schema.TypeString,
Computed: true,
},
"meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"acl": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"explicit": {
Type: schema.TypeBool,
Computed: true,
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
"right": {
Type: schema.TypeString,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"type": {
Type: schema.TypeString,
Computed: true,
},
"user_group_id": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"company": {
Type: schema.TypeString,
Computed: true,
},
"companyurl": {
Type: schema.TypeString,
Computed: true,
},
"cpu_allocation_parameter": {
Type: schema.TypeString,
Computed: true,
},
"cpu_allocation_ratio": {
Type: schema.TypeFloat,
Computed: true,
},
"created_by": {
Type: schema.TypeString,
Computed: true,
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
},
"deactivation_time": {
Type: schema.TypeFloat,
Computed: true,
},
"deleted_by": {
Type: schema.TypeString,
Computed: true,
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
},
"displayname": {
Type: schema.TypeString,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"account_id": {
Type: schema.TypeInt,
Computed: true,
},
"account_name": {
Type: schema.TypeString,
Computed: true,
},
"resource_limits": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cu_c": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_d": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_dm": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_i": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_m": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_np": {
Type: schema.TypeFloat,
Computed: true,
},
"gpu_units": {
Type: schema.TypeFloat,
Computed: true,
},
},
},
},
"resource_types": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"send_access_emails": {
Type: schema.TypeBool,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"uniq_pools": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"updated_time": {
Type: schema.TypeInt,
Computed: true,
},
"version": {
Type: schema.TypeInt,
Computed: true,
},
"vins": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
},
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
}
}
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package account
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceAccountDeletedListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountDeletedList, err := utilityAccountDeletedListCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenListDeleted(accountDeletedList))
d.Set("entry_count", accountDeletedList.EntryCount)
return nil
}
func DataSourceAccountDeletedList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceAccountDeletedListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceAccountListDeletedSchemaMake(),
}
}

@ -1,151 +1,70 @@
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package account
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceAccountDisksListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountDisksList, err := utilityAccountDisksListCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenAccountDisksList(accountDisksList))
d.Set("entry_count", accountDisksList.EntryCount)
return nil
}
func dataSourceAccountDisksListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"account_id": {
Type: schema.TypeInt,
Required: true,
Description: "ID of the account",
},
"disk_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Filter by disk ID",
},
"name": {
Type: schema.TypeString,
Optional: true,
Description: "Filter by disk name",
},
"disk_max_size": {
Type: schema.TypeInt,
Optional: true,
Description: "Filter by disk max size",
},
"type": {
Type: schema.TypeString,
Optional: true,
Description: "Filter by disk type",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "Page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "Page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Description: "Search Result",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"disk_id": {
Type: schema.TypeInt,
Computed: true,
},
"disk_name": {
Type: schema.TypeString,
Computed: true,
},
"pool_name": {
Type: schema.TypeString,
Computed: true,
},
"sep_id": {
Type: schema.TypeInt,
Computed: true,
},
"shareable": {
Type: schema.TypeBool,
Computed: true,
},
"size_max": {
Type: schema.TypeInt,
Computed: true,
},
"type": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
}
return res
}
func DataSourceAccountDisksList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceAccountDisksListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceAccountDisksListSchemaMake(),
}
}
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package account
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceAccountDisksListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountDisksList, err := utilityAccountDisksListCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenAccountDisksList(accountDisksList))
d.Set("entry_count", accountDisksList.EntryCount)
return nil
}
func DataSourceAccountDisksList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceAccountDisksListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceAccountDisksListSchemaMake(),
}
}

@ -1,218 +1,71 @@
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package account
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceAccountFlipGroupsListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountFlipGroupsList, err := utilityAccountFlipGroupsListCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenAccountFlipGroupsList(accountFlipGroupsList))
d.Set("entry_count", accountFlipGroupsList.EntryCount)
return nil
}
func dataSourceAccountFlipGroupsListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"account_id": {
Type: schema.TypeInt,
Required: true,
Description: "ID of the account",
},
"name": {
Type: schema.TypeString,
Optional: true,
Description: "Filter by name",
},
"vins_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Filter by ViNS ID",
},
"vins_name": {
Type: schema.TypeString,
Optional: true,
Description: "Filter by ViNS name",
},
"extnet_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Filter by extnet ID",
},
"by_ip": {
Type: schema.TypeString,
Optional: true,
Description: "Filter by IP",
},
"flipgroup_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Filter by flipgroup ID",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "Page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "Page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Description: "Search Result",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"account_id": {
Type: schema.TypeInt,
Computed: true,
},
"client_type": {
Type: schema.TypeString,
Computed: true,
},
"conn_type": {
Type: schema.TypeString,
Computed: true,
},
"created_by": {
Type: schema.TypeString,
Computed: true,
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
},
"default_gw": {
Type: schema.TypeString,
Computed: true,
},
"deleted_by": {
Type: schema.TypeString,
Computed: true,
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
},
"desc": {
Type: schema.TypeString,
Computed: true,
},
"gid": {
Type: schema.TypeInt,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"fg_id": {
Type: schema.TypeInt,
Computed: true,
},
"ip": {
Type: schema.TypeString,
Computed: true,
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
},
"fg_name": {
Type: schema.TypeString,
Computed: true,
},
"net_id": {
Type: schema.TypeInt,
Computed: true,
},
"net_type": {
Type: schema.TypeString,
Computed: true,
},
"netmask": {
Type: schema.TypeInt,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"updated_by": {
Type: schema.TypeString,
Computed: true,
},
"updated_time": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
}
return res
}
func DataSourceAccountFlipGroupsList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceAccountFlipGroupsListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceAccountFlipGroupsListSchemaMake(),
}
}
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package account
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceAccountFlipGroupsListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountFlipGroupsList, err := utilityAccountFlipGroupsListCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenAccountFlipGroupsList(accountFlipGroupsList))
d.Set("entry_count", accountFlipGroupsList.EntryCount)
return nil
}
func DataSourceAccountFlipGroupsList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceAccountFlipGroupsListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceAccountFlipGroupsListSchemaMake(),
}
}

@ -44,6 +44,7 @@ import (
func dataSourceAccountResourceConsumptionGetRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountResourceConsumptionRec, err := utilityAccountResourceConsumptionGetCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
@ -53,173 +54,6 @@ func dataSourceAccountResourceConsumptionGetRead(ctx context.Context, d *schema.
return nil
}
func dataSourceAccountResourceConsumptionGetSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"account_id": {
Type: schema.TypeInt,
Required: true,
},
"consumed": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disksize": {
Type: schema.TypeFloat,
Computed: true,
},
"disksizemax": {
Type: schema.TypeFloat,
Computed: true,
},
"extips": {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeFloat,
Computed: true,
},
},
},
},
},
},
},
"reserved": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disksize": {
Type: schema.TypeFloat,
Computed: true,
},
"disksizemax": {
Type: schema.TypeFloat,
Computed: true,
},
"extips": {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeFloat,
Computed: true,
},
},
},
},
},
},
},
"resource_limits": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cu_c": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_d": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_dm": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_i": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_m": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_np": {
Type: schema.TypeFloat,
Computed: true,
},
"gpu_units": {
Type: schema.TypeFloat,
Computed: true,
},
},
},
},
}
return res
}
func DataSourceAccountResourceConsumptionGet() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,

@ -1,292 +1,72 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Nikita Sorokin, <nesorokin@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package account
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func DataSourceAccountList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceAccountListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceAccountListSchemaMake(),
}
}
func dataSourceAccountListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountList, err := utilityAccountListCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenAccountList(accountList))
d.Set("entry_count", accountList.EntryCount)
return nil
}
func dataSourceAccountListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"by_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Filter by ID",
},
"name": {
Type: schema.TypeString,
Optional: true,
Description: "Filter by name",
},
"acl": {
Type: schema.TypeString,
Optional: true,
Description: "Filter by ACL",
},
"status": {
Type: schema.TypeString,
Optional: true,
Description: "Filter by status",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "Page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "Page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"dc_location": {
Type: schema.TypeString,
Computed: true,
},
"ckey": {
Type: schema.TypeString,
Computed: true,
},
"meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"acl": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"explicit": {
Type: schema.TypeBool,
Computed: true,
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
"right": {
Type: schema.TypeString,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"type": {
Type: schema.TypeString,
Computed: true,
},
"user_group_id": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"company": {
Type: schema.TypeString,
Computed: true,
},
"companyurl": {
Type: schema.TypeString,
Computed: true,
},
"cpu_allocation_parameter": {
Type: schema.TypeString,
Computed: true,
},
"cpu_allocation_ratio": {
Type: schema.TypeFloat,
Computed: true,
},
"created_by": {
Type: schema.TypeString,
Computed: true,
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
},
"deactivation_time": {
Type: schema.TypeFloat,
Computed: true,
},
"deleted_by": {
Type: schema.TypeString,
Computed: true,
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
},
"displayname": {
Type: schema.TypeString,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"account_id": {
Type: schema.TypeInt,
Computed: true,
},
"account_name": {
Type: schema.TypeString,
Computed: true,
},
"resource_limits": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cu_c": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_d": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_dm": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_i": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_m": {
Type: schema.TypeFloat,
Computed: true,
},
"cu_np": {
Type: schema.TypeFloat,
Computed: true,
},
"gpu_units": {
Type: schema.TypeFloat,
Computed: true,
},
},
},
},
"resource_types": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"send_access_emails": {
Type: schema.TypeBool,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"uniq_pools": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"updated_time": {
Type: schema.TypeInt,
Computed: true,
},
"version": {
Type: schema.TypeInt,
Computed: true,
},
"vins": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
},
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
}
return res
}
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Nikita Sorokin, <nesorokin@basistech.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package account
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceAccountListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountList, err := utilityAccountListCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenAccountList(accountList))
d.Set("entry_count", accountList.EntryCount)
return nil
}
func DataSourceAccountList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceAccountListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceAccountListSchemaMake(),
}
}

@ -44,6 +44,7 @@ import (
func dataSourceAccountResourceConsumptionListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountResourceConsumptionList, err := utilityAccountResourceConsumptionListCheckPresence(ctx, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
@ -54,148 +55,6 @@ func dataSourceAccountResourceConsumptionListRead(ctx context.Context, d *schema
return nil
}
func dataSourceAccountResourceConsumptionListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"account_id": {
Type: schema.TypeInt,
Computed: true,
},
"consumed": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disksize": {
Type: schema.TypeFloat,
Computed: true,
},
"disksizemax": {
Type: schema.TypeFloat,
Computed: true,
},
"extips": {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeFloat,
Computed: true,
},
},
},
},
},
},
},
"reserved": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disksize": {
Type: schema.TypeFloat,
Computed: true,
},
"disksizemax": {
Type: schema.TypeFloat,
Computed: true,
},
"extips": {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeFloat,
Computed: true,
},
},
},
},
},
},
},
},
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
}
return res
}
func DataSourceAccountResourceConsumptionList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,

@ -1,362 +1,71 @@
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package account
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func DataSourceAccountRGList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceAccountRGListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceAccountRGListSchemaMake(),
}
}
func dataSourceAccountRGListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountRGList, err := utilityAccountRGListCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenAccountRGList(accountRGList))
d.Set("entry_count", accountRGList.EntryCount)
return nil
}
func dataSourceAccountRGListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"account_id": {
Type: schema.TypeInt,
Required: true,
Description: "ID of the account",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "Page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "Page size",
},
"rg_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Filter by RG ID",
},
"name": {
Type: schema.TypeString,
Optional: true,
Description: "Filter by name",
},
"vins_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Filter by ViNS ID",
},
"vm_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Filter by VM ID",
},
"status": {
Type: schema.TypeString,
Optional: true,
Description: "Filter by status",
},
"items": {
Type: schema.TypeList,
Computed: true,
Description: "Search Result",
Elem: &schema.Resource{
Schema: dataSourceAccountRGSchemaMake(),
},
},
"entry_count": {
Type: schema.TypeInt,
Optional: true,
},
}
return res
}
func dataSourceAccountRGSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"computes": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"started": {
Type: schema.TypeInt,
Computed: true,
},
"stopped": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"resources": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"consumed": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disksize": {
Type: schema.TypeFloat,
Computed: true,
},
"disksizemax": {
Type: schema.TypeInt,
Computed: true,
},
"extips": {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeFloat,
Computed: true,
},
},
},
},
},
},
},
"limits": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disksize": {
Type: schema.TypeInt,
Computed: true,
},
"disksizemax": {
Type: schema.TypeInt,
Computed: true,
},
"extips": {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"reserved": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disksize": {
Type: schema.TypeInt,
Computed: true,
},
"disksizemax": {
Type: schema.TypeInt,
Computed: true,
},
"extips": {
Type: schema.TypeInt,
Computed: true,
},
"exttraffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeFloat,
Computed: true,
},
},
},
},
},
},
},
},
},
},
"created_by": {
Type: schema.TypeString,
Computed: true,
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
},
"deleted_by": {
Type: schema.TypeString,
Computed: true,
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
},
"rg_id": {
Type: schema.TypeInt,
Computed: true,
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
},
"rg_name": {
Type: schema.TypeString,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"updated_by": {
Type: schema.TypeString,
Computed: true,
},
"updated_time": {
Type: schema.TypeInt,
Computed: true,
},
"vinses": {
Type: schema.TypeInt,
Computed: true,
},
}
}
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package account
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceAccountRGListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountRGList, err := utilityAccountRGListCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenAccountRGList(accountRGList))
d.Set("entry_count", accountRGList.EntryCount)
return nil
}
func DataSourceAccountRGList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceAccountRGListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceAccountRGListSchemaMake(),
}
}

@ -1,192 +1,71 @@
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package account
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceAccountVinsListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountVinsList, err := utilityAccountVinsListCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenAccountVinsList(accountVinsList))
d.Set("entry_count", accountVinsList.EntryCount)
return nil
}
func dataSourceAccountVinsListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"account_id": {
Type: schema.TypeInt,
Required: true,
Description: "ID of the account",
},
"vins_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Filter by ViNS ID",
},
"name": {
Type: schema.TypeString,
Optional: true,
Description: "Filter by name",
},
"rg_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Filter by RG ID",
},
"ext_ip": {
Type: schema.TypeString,
Optional: true,
Description: "Filter by external IP",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "Page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "Page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Description: "Search Result",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"account_id": {
Type: schema.TypeInt,
Computed: true,
},
"account_name": {
Type: schema.TypeString,
Computed: true,
},
"computes": {
Type: schema.TypeInt,
Computed: true,
},
"created_by": {
Type: schema.TypeString,
Computed: true,
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
},
"deleted_by": {
Type: schema.TypeString,
Computed: true,
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
},
"external_ip": {
Type: schema.TypeString,
Computed: true,
},
"vin_id": {
Type: schema.TypeInt,
Computed: true,
},
"vin_name": {
Type: schema.TypeString,
Computed: true,
},
"network": {
Type: schema.TypeString,
Computed: true,
},
"pri_vnf_dev_id": {
Type: schema.TypeInt,
Computed: true,
},
"rg_id": {
Type: schema.TypeInt,
Computed: true,
},
"rg_name": {
Type: schema.TypeString,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"updated_by": {
Type: schema.TypeString,
Computed: true,
},
"updated_time": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
}
return res
}
func DataSourceAccountVinsList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceAccountVinsListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceAccountVinsListSchemaMake(),
}
}
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package account
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceAccountVinsListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
accountVinsList, err := utilityAccountVinsListCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenAccountVinsList(accountVinsList))
d.Set("entry_count", accountVinsList.EntryCount)
return nil
}
func DataSourceAccountVinsList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceAccountVinsListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceAccountVinsListSchemaMake(),
}
}

@ -62,7 +62,7 @@ func flattenDataAccount(d *schema.ResourceData, acc *account.RecordAccount) {
}
func flattenAccountRGList(argl *account.ListRG) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
res := make([]map[string]interface{}, 0, len(argl.Data))
for _, arg := range argl.Data {
temp := map[string]interface{}{
"computes": flattenAccRGComputes(arg.Computes),
@ -139,7 +139,7 @@ func flattenAccResource(r account.Resource) []map[string]interface{} {
}
func flattenAccAcl(acls []account.ACL) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
res := make([]map[string]interface{}, 0, len(acls))
for _, acls := range acls {
temp := map[string]interface{}{
"explicit": acls.Explicit,
@ -187,7 +187,7 @@ func flattenRgAcl(rgAcls []account.ACL) []map[string]interface{} {
}
func flattenListDeleted(al *account.ListAccounts) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
res := make([]map[string]interface{}, 0, len(al.Data))
for _, acc := range al.Data {
temp := map[string]interface{}{
"dc_location": acc.DCLocation,
@ -222,7 +222,7 @@ func flattenListDeleted(al *account.ListAccounts) []map[string]interface{} {
}
func flattenAccountList(al *account.ListAccounts) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
res := make([]map[string]interface{}, 0, len(al.Data))
for _, acc := range al.Data {
temp := map[string]interface{}{
"dc_location": acc.DCLocation,
@ -257,7 +257,7 @@ func flattenAccountList(al *account.ListAccounts) []map[string]interface{} {
}
func flattenAccountAuditsList(aal account.ListAudits) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
res := make([]map[string]interface{}, 0, len(aal))
for _, aa := range aal {
temp := map[string]interface{}{
"call": aa.Call,
@ -272,7 +272,7 @@ func flattenAccountAuditsList(aal account.ListAudits) []map[string]interface{} {
}
func flattenAccountComputesList(acl *account.ListComputes) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
res := make([]map[string]interface{}, 0, len(acl.Data))
for _, acc := range acl.Data {
temp := map[string]interface{}{
"account_id": acc.AccountID,
@ -302,7 +302,7 @@ func flattenAccountComputesList(acl *account.ListComputes) []map[string]interfac
}
func flattenAccountDisksList(adl *account.ListDisks) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
res := make([]map[string]interface{}, 0, len(adl.Data))
for _, ad := range adl.Data {
temp := map[string]interface{}{
"disk_id": ad.ID,
@ -319,7 +319,7 @@ func flattenAccountDisksList(adl *account.ListDisks) []map[string]interface{} {
}
func flattenAccountFlipGroupsList(afgl *account.ListFLIPGroups) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
res := make([]map[string]interface{}, 0, len(afgl.Data))
for _, afg := range afgl.Data {
temp := map[string]interface{}{
"account_id": afg.AccountID,
@ -350,7 +350,7 @@ func flattenAccountFlipGroupsList(afgl *account.ListFLIPGroups) []map[string]int
}
func flattenAccountVinsList(avl *account.ListVINS) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
res := make([]map[string]interface{}, 0, len(avl.Data))
for _, av := range avl.Data {
temp := map[string]interface{}{
"account_id": av.AccountID,
@ -384,7 +384,7 @@ func flattenResourceConsumption(d *schema.ResourceData, acc *account.RecordResou
}
func flattenAccountSeps(seps map[string]map[string]account.DiskUsage) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
res := make([]map[string]interface{}, 0, len(seps))
for sepKey, sepVal := range seps {
for dataKey, dataVal := range sepVal {
temp := map[string]interface{}{

@ -1,541 +1,342 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package account
import (
"context"
"strconv"
"strings"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
)
func resourceAccountCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourseAccountCreate")
c := m.(*controller.ControllerCfg)
req := account.CreateRequest{}
req.Name = d.Get("account_name").(string)
req.Username = d.Get("username").(string)
if emailaddress, ok := d.GetOk("emailaddress"); ok {
req.EmailAddress = emailaddress.(string)
}
if sendAccessEmails, ok := d.GetOk("send_access_emails"); ok {
req.SendAccessEmails = sendAccessEmails.(bool)
}
if uniqPools, ok := d.GetOk("uniq_pools"); ok {
uniqPools := uniqPools.([]interface{})
for _, pool := range uniqPools {
req.UniqPools = append(req.UniqPools, pool.(string))
}
}
if resLimits, ok := d.GetOk("resource_limits"); ok {
resLimits := resLimits.([]interface{})[0]
resLimitsConv := resLimits.(map[string]interface{})
if resLimitsConv["cu_m"] != nil {
maxMemCap := int64(resLimitsConv["cu_m"].(float64))
if maxMemCap == 0 {
req.MaxMemoryCapacity = -1
} else {
req.MaxMemoryCapacity = maxMemCap
}
}
if resLimitsConv["cu_dm"] != nil {
maxDiskCap := int64(resLimitsConv["cu_dm"].(float64))
if maxDiskCap == 0 {
req.MaxVDiskCapacity = -1
} else {
req.MaxVDiskCapacity = maxDiskCap
}
}
if resLimitsConv["cu_c"] != nil {
maxCPUCap := int64(resLimitsConv["cu_c"].(float64))
if maxCPUCap == 0 {
req.MaxCPUCapacity = -1
} else {
req.MaxCPUCapacity = maxCPUCap
}
}
if resLimitsConv["cu_i"] != nil {
maxNumPublicIP := int64(resLimitsConv["cu_i"].(float64))
if maxNumPublicIP == 0 {
req.MaxNumPublicIP = -1
} else {
req.MaxNumPublicIP = maxNumPublicIP
}
}
if resLimitsConv["cu_np"] != nil {
maxNP := int64(resLimitsConv["cu_np"].(float64))
if maxNP == 0 {
req.MaxNetworkPeerTransfer = -1
} else {
req.MaxNetworkPeerTransfer = maxNP
}
}
if resLimitsConv["gpu_units"] != nil {
gpuUnits := int64(resLimitsConv["gpu_units"].(float64))
if gpuUnits == 0 {
req.GPUUnits = -1
} else {
req.GPUUnits = gpuUnits
}
}
}
accountId, err := c.CloudBroker().Account().Create(ctx, req)
if err != nil {
return diag.FromErr(err)
}
d.SetId(strconv.FormatUint(accountId, 10))
var w dc.Warnings
if users, ok := d.GetOk("users"); ok {
addedUsers := users.([]interface{})
for _, user := range addedUsers {
userConv := user.(map[string]interface{})
req := account.AddUserRequest{
AccountID: accountId,
Username: userConv["user_id"].(string),
AccessType: userConv["access_type"].(string),
}
_, err := c.CloudBroker().Account().AddUser(ctx, req)
if err != nil {
w.Add(err)
}
}
}
if cpuAllocationParameter, ok := d.GetOk("cpu_allocation_parameter"); ok {
cpuAllocationParameter := cpuAllocationParameter.(string)
req := account.SetCPUAllocationParameterRequest{
AccountID: accountId,
StrictLoose: cpuAllocationParameter,
}
log.Debugf("setting account cpu allocation parameter")
_, err := c.CloudBroker().Account().SetCPUAllocationParameter(ctx, req)
if err != nil {
w.Add(err)
}
}
if cpuAllocationRatio, ok := d.GetOk("cpu_allocation_ratio"); ok {
cpuAllocationRatio := cpuAllocationRatio.(float64)
req := account.SetCPUAllocationRatioRequest{
AccountID: accountId,
Ratio: cpuAllocationRatio,
}
log.Debugf("setting account cpu allocation ratio")
_, err := c.CloudBroker().Account().SetCPUAllocationRatio(ctx, req)
if err != nil {
w.Add(err)
}
}
if !d.Get("enable").(bool) {
_, err := c.CloudBroker().Account().Disable(ctx, account.DisableRequest{
AccountID: accountId,
})
if err != nil {
w.Add(err)
}
}
diags := resourceAccountRead(ctx, d, m)
diags = append(diags, w.Get()...)
return diags
}
func resourceAccountRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceAccountRead")
acc, err := utilityAccountCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
flattenResourceAccount(d, acc)
return nil
}
func resourceAccountDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceAccountDelete")
accountData, err := utilityAccountCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
c := m.(*controller.ControllerCfg)
req := account.DeleteRequest{
AccountID: accountData.ID,
Permanently: d.Get("permanently").(bool),
}
_, err = c.CloudBroker().Account().Delete(ctx, req)
if err != nil {
return diag.FromErr(err)
}
d.SetId("")
return nil
}
func resourceAccountUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceAccountUpdate")
c := m.(*controller.ControllerCfg)
acc, err := utilityAccountCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
accountId, _ := strconv.ParseUint(d.Id(), 10, 64)
hasChanged := false
switch acc.Status {
case status.Destroyed:
d.SetId("")
// return resourceAccountCreate(ctx, d, m)
return diag.Errorf("The resource cannot be updated because it has been destroyed")
case status.Destroying:
return diag.Errorf("The account is in progress with status: %s", acc.Status)
case status.Deleted:
_, err := c.CloudBroker().Account().Restore(ctx, account.RestoreRequest{
AccountID: accountId,
})
if err != nil {
return diag.FromErr(err)
}
hasChanged = true
case status.Disabled:
log.Debugf("The account is in status: %s, troubles may occur with update. Please, enable account first.", acc.Status)
case status.Confirmed:
}
if hasChanged {
acc, err = utilityAccountCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
}
if d.HasChange("enable") {
enable := d.Get("enable").(bool)
if enable && acc.Status == status.Disabled {
_, err := c.CloudBroker().Account().Enable(ctx, account.EnableRequest{
AccountID: accountId,
})
if err != nil {
return diag.FromErr(err)
}
} else if !enable && acc.Status == status.Enabled {
_, err := c.CloudBroker().Account().Disable(ctx, account.DisableRequest{
AccountID: accountId,
})
if err != nil {
return diag.FromErr(err)
}
}
}
req := account.UpdateRequest{
AccountID: accountId,
}
updated := false
if d.HasChange("account_name") {
req.Name = d.Get("account_name").(string)
updated = true
}
if d.HasChange("send_access_emails") {
req.SendAccessEmails = d.Get("send_access_emails").(bool)
updated = true
}
if d.HasChange("uniq_pools") {
uniq_pools := d.Get("uniq_pools").([]interface{})
for _, pool := range uniq_pools {
req.UniqPools = append(req.UniqPools, pool.(string))
}
updated = true
}
if d.HasChange("resource_limits") {
resLimit := d.Get("resource_limits").([]interface{})[0]
resLimitConv := resLimit.(map[string]interface{})
if resLimitConv["cu_m"] != nil {
maxMemCap := int(resLimitConv["cu_m"].(float64))
if maxMemCap == 0 {
req.MaxMemoryCapacity = -1
} else {
req.MaxMemoryCapacity = int64(maxMemCap)
}
}
if resLimitConv["cu_dm"] != nil {
maxDiskCap := int(resLimitConv["cu_dm"].(float64))
if maxDiskCap == 0 {
req.MaxVDiskCapacity = -1
} else {
req.MaxVDiskCapacity = int64(maxDiskCap)
}
}
if resLimitConv["cu_c"] != nil {
maxCPUCap := int(resLimitConv["cu_c"].(float64))
if maxCPUCap == 0 {
req.MaxCPUCapacity = -1
} else {
req.MaxCPUCapacity = int64(maxCPUCap)
}
}
if resLimitConv["cu_i"] != nil {
maxNumPublicIP := int(resLimitConv["cu_i"].(float64))
if maxNumPublicIP == 0 {
req.MaxNumPublicIP = -1
} else {
req.MaxNumPublicIP = int64(maxNumPublicIP)
}
}
if resLimitConv["cu_np"] != nil {
maxNP := int(resLimitConv["cu_np"].(float64))
if maxNP == 0 {
req.MaxNetworkPeerTransfer = -1
} else {
req.MaxNetworkPeerTransfer = int64(maxNP)
}
}
if resLimitConv["gpu_units"] != nil {
gpuUnits := int(resLimitConv["gpu_units"].(float64))
if gpuUnits == 0 {
req.GPUUnits = -1
} else {
req.GPUUnits = int64(gpuUnits)
}
}
updated = true
}
if updated {
_, err := c.CloudBroker().Account().Update(ctx, req)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("cpu_allocation_parameter") {
cpuAllocationParameter := d.Get("cpu_allocation_parameter").(string)
_, err := c.CloudBroker().Account().SetCPUAllocationParameter(ctx, account.SetCPUAllocationParameterRequest{
AccountID: acc.ID,
StrictLoose: cpuAllocationParameter,
})
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("cpu_allocation_ratio") {
cpuAllocacationRatio := d.Get("cpu_allocation_ratio").(float64)
_, err := c.CloudBroker().Account().SetCPUAllocationRatio(ctx, account.SetCPUAllocationRatioRequest{
AccountID: acc.ID,
Ratio: cpuAllocacationRatio,
})
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("users") {
deletedUsers := make([]interface{}, 0)
addedUsers := make([]interface{}, 0)
updatedUsers := make([]interface{}, 0)
old, new := d.GetChange("users")
oldConv := old.([]interface{})
newConv := new.([]interface{})
for _, el := range oldConv {
if !isContainsUser(newConv, el) {
deletedUsers = append(deletedUsers, el)
}
}
for _, el := range newConv {
if !isContainsUser(oldConv, el) {
duplicate := false
for _, user := range acc.ACL {
if user.UserGroupID == el.(map[string]interface{})["user_id"].(string) {
duplicate = true
}
}
if !duplicate {
addedUsers = append(addedUsers, el)
} else if isChangedUser(oldConv, el) {
updatedUsers = append(updatedUsers, el)
}
}
}
for _, user := range deletedUsers {
userConv := user.(map[string]interface{})
_, err := c.CloudBroker().Account().DeleteUser(ctx, account.DeleteUserRequest{
AccountID: accountId,
UserName: userConv["user_id"].(string),
RecursiveDelete: userConv["recursive_delete"].(bool),
})
if err != nil {
return diag.FromErr(err)
}
}
for _, user := range addedUsers {
userConv := user.(map[string]interface{})
_, err := c.CloudBroker().Account().AddUser(ctx, account.AddUserRequest{
AccountID: accountId,
Username: userConv["user_id"].(string),
AccessType: strings.ToUpper(userConv["access_type"].(string)),
})
if err != nil {
return diag.FromErr(err)
}
}
for _, user := range updatedUsers {
userConv := user.(map[string]interface{})
_, err := c.CloudBroker().Account().UpdateUser(ctx, account.UpdateUserRequest{
AccountID: accountId,
UserID: userConv["user_id"].(string),
AccessType: strings.ToUpper(userConv["access_type"].(string)),
})
if err != nil {
return diag.FromErr(err)
}
}
}
return resourceAccountRead(ctx, d, m)
}
func isContainsUser(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})
elConv := el.(map[string]interface{})
if elOldConv["user_id"].(string) == elConv["user_id"].(string) {
return true
}
}
return false
}
func isChangedUser(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})
elConv := el.(map[string]interface{})
if elOldConv["user_id"].(string) == elConv["user_id"].(string) &&
(!strings.EqualFold(elOldConv["access_type"].(string), elConv["access_type"].(string)) ||
elOldConv["recursive_delete"].(bool) != elConv["recursive_delete"].(bool)) {
return true
}
}
return false
}
func ResourceAccount() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
CreateContext: resourceAccountCreate,
ReadContext: resourceAccountRead,
UpdateContext: resourceAccountUpdate,
DeleteContext: resourceAccountDelete,
Importer: &schema.ResourceImporter{
StateContext: schema.ImportStatePassthroughContext,
},
Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout60s,
Read: &constants.Timeout30s,
Update: &constants.Timeout60s,
Delete: &constants.Timeout60s,
Default: &constants.Timeout60s,
},
Schema: resourceAccountSchemaMake(),
}
}
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package account
import (
"context"
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
)
func resourceAccountCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourseAccountCreate")
c := m.(*controller.ControllerCfg)
req := account.CreateRequest{
Name: d.Get("account_name").(string),
Username: d.Get("username").(string),
}
if emailaddress, ok := d.GetOk("emailaddress"); ok {
req.EmailAddress = emailaddress.(string)
}
if sendAccessEmails, ok := d.GetOk("send_access_emails"); ok {
req.SendAccessEmails = sendAccessEmails.(bool)
}
if uniqPools, ok := d.GetOk("uniq_pools"); ok {
uniqPools := uniqPools.([]interface{})
for _, pool := range uniqPools {
req.UniqPools = append(req.UniqPools, pool.(string))
}
}
if resLimits, ok := d.GetOk("resource_limits"); ok {
resLimits := resLimits.([]interface{})[0]
resLimitsConv := resLimits.(map[string]interface{})
if resLimitsConv["cu_m"] != nil {
maxMemCap := int64(resLimitsConv["cu_m"].(float64))
if maxMemCap == 0 {
req.MaxMemoryCapacity = -1
} else {
req.MaxMemoryCapacity = maxMemCap
}
}
if resLimitsConv["cu_dm"] != nil {
maxDiskCap := int64(resLimitsConv["cu_dm"].(float64))
if maxDiskCap == 0 {
req.MaxVDiskCapacity = -1
} else {
req.MaxVDiskCapacity = maxDiskCap
}
}
if resLimitsConv["cu_c"] != nil {
maxCPUCap := int64(resLimitsConv["cu_c"].(float64))
if maxCPUCap == 0 {
req.MaxCPUCapacity = -1
} else {
req.MaxCPUCapacity = maxCPUCap
}
}
if resLimitsConv["cu_i"] != nil {
maxNumPublicIP := int64(resLimitsConv["cu_i"].(float64))
if maxNumPublicIP == 0 {
req.MaxNumPublicIP = -1
} else {
req.MaxNumPublicIP = maxNumPublicIP
}
}
if resLimitsConv["cu_np"] != nil {
maxNP := int64(resLimitsConv["cu_np"].(float64))
if maxNP == 0 {
req.MaxNetworkPeerTransfer = -1
} else {
req.MaxNetworkPeerTransfer = maxNP
}
}
if resLimitsConv["gpu_units"] != nil {
gpuUnits := int64(resLimitsConv["gpu_units"].(float64))
if gpuUnits == 0 {
req.GPUUnits = -1
} else {
req.GPUUnits = gpuUnits
}
}
}
accountId, err := c.CloudBroker().Account().Create(ctx, req)
if err != nil {
return diag.FromErr(err)
}
d.SetId(strconv.FormatUint(accountId, 10))
var w dc.Warnings
if users, ok := d.GetOk("users"); ok {
addedUsers := users.([]interface{})
for _, user := range addedUsers {
userConv := user.(map[string]interface{})
req := account.AddUserRequest{
AccountID: accountId,
Username: userConv["user_id"].(string),
AccessType: userConv["access_type"].(string),
}
_, err := c.CloudBroker().Account().AddUser(ctx, req)
if err != nil {
w.Add(err)
}
}
}
if cpuAllocationParameter, ok := d.GetOk("cpu_allocation_parameter"); ok {
cpuAllocationParameter := cpuAllocationParameter.(string)
req := account.SetCPUAllocationParameterRequest{
AccountID: accountId,
StrictLoose: cpuAllocationParameter,
}
log.Debugf("setting account cpu allocation parameter")
_, err := c.CloudBroker().Account().SetCPUAllocationParameter(ctx, req)
if err != nil {
w.Add(err)
}
}
if cpuAllocationRatio, ok := d.GetOk("cpu_allocation_ratio"); ok {
cpuAllocationRatio := cpuAllocationRatio.(float64)
req := account.SetCPUAllocationRatioRequest{
AccountID: accountId,
Ratio: cpuAllocationRatio,
}
log.Debugf("setting account cpu allocation ratio")
_, err := c.CloudBroker().Account().SetCPUAllocationRatio(ctx, req)
if err != nil {
w.Add(err)
}
}
if !d.Get("enable").(bool) {
_, err := c.CloudBroker().Account().Disable(ctx, account.DisableRequest{
AccountID: accountId,
})
if err != nil {
w.Add(err)
}
}
return append(resourceAccountRead(ctx, d, m), w.Get()...)
}
func resourceAccountRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceAccountRead")
acc, err := utilityAccountCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
flattenResourceAccount(d, acc)
return nil
}
func resourceAccountDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceAccountDelete")
accountData, err := utilityAccountCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
c := m.(*controller.ControllerCfg)
req := account.DeleteRequest{
AccountID: accountData.ID,
Permanently: d.Get("permanently").(bool),
}
_, err = c.CloudBroker().Account().Delete(ctx, req)
if err != nil {
return diag.FromErr(err)
}
d.SetId("")
return nil
}
func resourceAccountUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceAccountUpdate")
c := m.(*controller.ControllerCfg)
acc, err := utilityAccountCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
accountId, _ := strconv.ParseUint(d.Id(), 10, 64)
hasChanged := false
switch acc.Status {
case status.Destroyed:
d.SetId("")
// return resourceAccountCreate(ctx, d, m)
return diag.Errorf("The resource cannot be updated because it has been destroyed")
case status.Destroying:
return diag.Errorf("The account is in progress with status: %s", acc.Status)
case status.Deleted:
if d.Get("restore").(bool) {
_, err := c.CloudBroker().Account().Restore(ctx, account.RestoreRequest{
AccountID: accountId,
})
if err != nil {
return diag.FromErr(err)
}
if _, ok := d.GetOk("enable"); ok {
if err := utilityAccountEnableUpdate(ctx, d, m, acc); err != nil {
return diag.FromErr(err)
}
}
hasChanged = true
}
case status.Disabled:
log.Debugf("The account is in status: %s, troubles may occur with update. Please, enable account first.", acc.Status)
case status.Confirmed:
}
if hasChanged {
acc, err = utilityAccountCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
}
if d.HasChange("enable") {
if err := utilityAccountEnableUpdate(ctx, d, m, acc); err != nil {
return diag.FromErr(err)
}
}
if d.HasChanges("account_name", "send_access_emails", "uniq_pools", "resource_limits") {
if err := utilityAccountUpdate(ctx, d, m); err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("cpu_allocation_parameter") {
if err := utilityAccountCPUParameterUpdate(ctx, d, m); err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("cpu_allocation_ratio") {
if err := utilityAccountCPURatioUpdate(ctx, d, m); err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("users") {
if err := utilityAccountUsersUpdate(ctx, d, m, acc); err != nil {
return diag.FromErr(err)
}
}
return resourceAccountRead(ctx, d, m)
}
func ResourceAccount() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
CreateContext: resourceAccountCreate,
ReadContext: resourceAccountRead,
UpdateContext: resourceAccountUpdate,
DeleteContext: resourceAccountDelete,
Importer: &schema.ResourceImporter{
StateContext: schema.ImportStatePassthroughContext,
},
Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout60s,
Read: &constants.Timeout30s,
Update: &constants.Timeout60s,
Delete: &constants.Timeout60s,
Default: &constants.Timeout60s,
},
Schema: resourceAccountSchemaMake(),
}
}

@ -34,10 +34,12 @@ package account
import (
"context"
"strconv"
"strings"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
@ -63,3 +65,252 @@ func utilityAccountCheckPresence(ctx context.Context, d *schema.ResourceData, m
return account, nil
}
func utilityAccountEnableUpdate(ctx context.Context, d *schema.ResourceData, m interface{}, acc *account.RecordAccount) error {
c := m.(*controller.ControllerCfg)
enable := d.Get("enable").(bool)
if enable && acc.Status == status.Disabled {
_, err := c.CloudBroker().Account().Enable(ctx, account.EnableRequest{
AccountID: acc.ID,
})
if err != nil {
return err
}
} else if !enable && acc.Status == status.Enabled {
_, err := c.CloudBroker().Account().Disable(ctx, account.DisableRequest{
AccountID: acc.ID,
})
if err != nil {
return err
}
}
return nil
}
func utilityAccountCPUParameterUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
accountId, _ := strconv.ParseUint(d.Id(), 10, 64)
cpuAllocationParameter := d.Get("cpu_allocation_parameter").(string)
_, err := c.CloudBroker().Account().SetCPUAllocationParameter(ctx, account.SetCPUAllocationParameterRequest{
AccountID: accountId,
StrictLoose: cpuAllocationParameter,
})
if err != nil {
return err
}
return nil
}
func utilityAccountCPURatioUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
accountId, _ := strconv.ParseUint(d.Id(), 10, 64)
cpuAllocacationRatio := d.Get("cpu_allocation_ratio").(float64)
_, err := c.CloudBroker().Account().SetCPUAllocationRatio(ctx, account.SetCPUAllocationRatioRequest{
AccountID: accountId,
Ratio: cpuAllocacationRatio,
})
if err != nil {
return err
}
return nil
}
func utilityAccountUsersUpdate(ctx context.Context, d *schema.ResourceData, m interface{}, acc *account.RecordAccount) error {
c := m.(*controller.ControllerCfg)
deletedUsers := make([]interface{}, 0)
addedUsers := make([]interface{}, 0)
updatedUsers := make([]interface{}, 0)
old, new := d.GetChange("users")
oldConv := old.([]interface{})
newConv := new.([]interface{})
for _, el := range oldConv {
if !isContainsUser(newConv, el) {
deletedUsers = append(deletedUsers, el)
}
}
for _, el := range newConv {
if !isContainsUser(oldConv, el) {
duplicate := false
for _, user := range acc.ACL {
if user.UserGroupID == el.(map[string]interface{})["user_id"].(string) {
duplicate = true
}
}
if !duplicate {
addedUsers = append(addedUsers, el)
} else if isChangedUser(oldConv, el) {
updatedUsers = append(updatedUsers, el)
}
}
}
for _, user := range deletedUsers {
userConv := user.(map[string]interface{})
_, err := c.CloudBroker().Account().DeleteUser(ctx, account.DeleteUserRequest{
AccountID: acc.ID,
UserName: userConv["user_id"].(string),
RecursiveDelete: userConv["recursive_delete"].(bool),
})
if err != nil {
return err
}
}
for _, user := range addedUsers {
userConv := user.(map[string]interface{})
_, err := c.CloudBroker().Account().AddUser(ctx, account.AddUserRequest{
AccountID: acc.ID,
Username: userConv["user_id"].(string),
AccessType: strings.ToUpper(userConv["access_type"].(string)),
})
if err != nil {
return err
}
}
for _, user := range updatedUsers {
userConv := user.(map[string]interface{})
_, err := c.CloudBroker().Account().UpdateUser(ctx, account.UpdateUserRequest{
AccountID: acc.ID,
UserID: userConv["user_id"].(string),
AccessType: strings.ToUpper(userConv["access_type"].(string)),
})
if err != nil {
return err
}
}
return nil
}
func utilityAccountUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
accountId, _ := strconv.ParseUint(d.Id(), 10, 64)
req := account.UpdateRequest{
AccountID: accountId,
}
if d.HasChange("account_name") {
req.Name = d.Get("account_name").(string)
}
if d.HasChange("send_access_emails") {
req.SendAccessEmails = d.Get("send_access_emails").(bool)
}
if d.HasChange("uniq_pools") {
uniq_pools := d.Get("uniq_pools").([]interface{})
for _, pool := range uniq_pools {
req.UniqPools = append(req.UniqPools, pool.(string))
}
}
if d.HasChange("resource_limits") {
resLimit := d.Get("resource_limits").([]interface{})[0]
resLimitConv := resLimit.(map[string]interface{})
if resLimitConv["cu_m"] != nil {
maxMemCap := int(resLimitConv["cu_m"].(float64))
if maxMemCap == 0 {
req.MaxMemoryCapacity = -1
} else {
req.MaxMemoryCapacity = int64(maxMemCap)
}
}
if resLimitConv["cu_dm"] != nil {
maxDiskCap := int(resLimitConv["cu_dm"].(float64))
if maxDiskCap == 0 {
req.MaxVDiskCapacity = -1
} else {
req.MaxVDiskCapacity = int64(maxDiskCap)
}
}
if resLimitConv["cu_c"] != nil {
maxCPUCap := int(resLimitConv["cu_c"].(float64))
if maxCPUCap == 0 {
req.MaxCPUCapacity = -1
} else {
req.MaxCPUCapacity = int64(maxCPUCap)
}
}
if resLimitConv["cu_i"] != nil {
maxNumPublicIP := int(resLimitConv["cu_i"].(float64))
if maxNumPublicIP == 0 {
req.MaxNumPublicIP = -1
} else {
req.MaxNumPublicIP = int64(maxNumPublicIP)
}
}
if resLimitConv["cu_np"] != nil {
maxNP := int(resLimitConv["cu_np"].(float64))
if maxNP == 0 {
req.MaxNetworkPeerTransfer = -1
} else {
req.MaxNetworkPeerTransfer = int64(maxNP)
}
}
if resLimitConv["gpu_units"] != nil {
gpuUnits := int(resLimitConv["gpu_units"].(float64))
if gpuUnits == 0 {
req.GPUUnits = -1
} else {
req.GPUUnits = int64(gpuUnits)
}
}
}
_, err := c.CloudBroker().Account().Update(ctx, req)
if err != nil {
return err
}
return nil
}
func isContainsUser(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})
elConv := el.(map[string]interface{})
if elOldConv["user_id"].(string) == elConv["user_id"].(string) {
return true
}
}
return false
}
func isChangedUser(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})
elConv := el.(map[string]interface{})
if elOldConv["user_id"].(string) == elConv["user_id"].(string) &&
(!strings.EqualFold(elOldConv["access_type"].(string), elConv["access_type"].(string)) ||
elOldConv["recursive_delete"].(bool) != elConv["recursive_delete"].(bool)) {
return true
}
}
return false
}

@ -46,6 +46,7 @@ import (
func dataSourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
disk, err := utilityDiskCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
@ -57,299 +58,6 @@ func dataSourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface
return nil
}
func dataSourceDiskSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"disk_id": {
Type: schema.TypeInt,
Required: true,
},
"account_id": {
Type: schema.TypeInt,
Computed: true,
},
"account_name": {
Type: schema.TypeString,
Computed: true,
},
"acl": {
Type: schema.TypeString,
Computed: true,
},
"boot_partition": {
Type: schema.TypeInt,
Computed: true,
},
"computes": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"compute_id": {
Type: schema.TypeString,
Computed: true,
},
"compute_name": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
},
"desc": {
Type: schema.TypeString,
Computed: true,
},
"destruction_time": {
Type: schema.TypeInt,
Computed: true,
},
"devicename": {
Type: schema.TypeString,
Computed: true,
},
"disk_path": {
Type: schema.TypeString,
Computed: true,
},
"gid": {
Type: schema.TypeInt,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"image_id": {
Type: schema.TypeInt,
Computed: true,
},
"images": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"iotune": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"read_bytes_sec": {
Type: schema.TypeInt,
Computed: true,
},
"read_bytes_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
"read_iops_sec": {
Type: schema.TypeInt,
Computed: true,
},
"read_iops_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
"size_iops_sec": {
Type: schema.TypeInt,
Computed: true,
},
"total_bytes_sec": {
Type: schema.TypeInt,
Computed: true,
},
"total_bytes_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
"total_iops_sec": {
Type: schema.TypeInt,
Computed: true,
},
"total_iops_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
"write_bytes_sec": {
Type: schema.TypeInt,
Computed: true,
},
"write_bytes_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
"write_iops_sec": {
Type: schema.TypeInt,
Computed: true,
},
"write_iops_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"iqn": {
Type: schema.TypeString,
Computed: true,
},
"login": {
Type: schema.TypeString,
Computed: true,
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
},
"disk_name": {
Type: schema.TypeString,
Computed: true,
},
"order": {
Type: schema.TypeInt,
Computed: true,
},
"params": {
Type: schema.TypeString,
Computed: true,
},
"parent_id": {
Type: schema.TypeInt,
Computed: true,
},
"passwd": {
Type: schema.TypeString,
Computed: true,
},
"pci_slot": {
Type: schema.TypeInt,
Computed: true,
},
"pool": {
Type: schema.TypeString,
Computed: true,
},
"present_to": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"purge_attempts": {
Type: schema.TypeInt,
Computed: true,
},
"purge_time": {
Type: schema.TypeInt,
Computed: true,
},
"reality_device_number": {
Type: schema.TypeInt,
Computed: true,
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
},
"res_name": {
Type: schema.TypeString,
Computed: true,
},
"role": {
Type: schema.TypeString,
Computed: true,
},
"sep_id": {
Type: schema.TypeInt,
Computed: true,
},
"sep_type": {
Type: schema.TypeString,
Computed: true,
},
"shareable": {
Type: schema.TypeString,
Computed: true,
},
"size_max": {
Type: schema.TypeInt,
Computed: true,
},
"size_used": {
Type: schema.TypeFloat,
Computed: true,
},
"snapshots": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"guid": {
Type: schema.TypeString,
Computed: true,
},
"label": {
Type: schema.TypeString,
Computed: true,
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
},
"snap_set_guid": {
Type: schema.TypeString,
Computed: true,
},
"snap_set_time": {
Type: schema.TypeInt,
Computed: true,
},
"timestamp": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"tech_status": {
Type: schema.TypeString,
Computed: true,
},
"type": {
Type: schema.TypeString,
Computed: true,
},
"vmid": {
Type: schema.TypeInt,
Computed: true,
},
}
return rets
}
func DataSourceDisk() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,

@ -1,442 +1,71 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package disks
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceDiskListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
diskList, err := utilityDiskListCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenDiskList(diskList))
d.Set("entry_count", diskList.EntryCount)
return nil
}
func dataSourceDiskListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"by_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by ID",
},
"name": {
Type: schema.TypeString,
Optional: true,
Description: "Find by name",
},
"account_name": {
Type: schema.TypeString,
Optional: true,
Description: "Find by account name",
},
"disk_max_size": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by max disk size",
},
"status": {
Type: schema.TypeString,
Optional: true,
Description: "Find by status",
},
"shared": {
Type: schema.TypeBool,
Optional: true,
Description: "Find by shared field",
},
"account_id": {
Type: schema.TypeInt,
Optional: true,
Description: "ID of the account the disks belong to",
},
"type": {
Type: schema.TypeString,
Optional: true,
Description: "type of the disks",
},
"sep_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by sep id",
},
"pool": {
Type: schema.TypeString,
Optional: true,
Description: "Find by pool name",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "Page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "Page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"account_id": {
Type: schema.TypeInt,
Computed: true,
},
"account_name": {
Type: schema.TypeString,
Computed: true,
},
"acl": {
Type: schema.TypeString,
Computed: true,
},
"boot_partition": {
Type: schema.TypeInt,
Computed: true,
},
"computes": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"compute_id": {
Type: schema.TypeString,
Computed: true,
},
"compute_name": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
},
"desc": {
Type: schema.TypeString,
Computed: true,
},
"destruction_time": {
Type: schema.TypeInt,
Computed: true,
},
"devicename": {
Type: schema.TypeString,
Computed: true,
},
"disk_path": {
Type: schema.TypeString,
Computed: true,
},
"gid": {
Type: schema.TypeInt,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"disk_id": {
Type: schema.TypeInt,
Computed: true,
},
"image_id": {
Type: schema.TypeInt,
Computed: true,
},
"images": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"iotune": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"read_bytes_sec": {
Type: schema.TypeInt,
Computed: true,
},
"read_bytes_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
"read_iops_sec": {
Type: schema.TypeInt,
Computed: true,
},
"read_iops_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
"size_iops_sec": {
Type: schema.TypeInt,
Computed: true,
},
"total_bytes_sec": {
Type: schema.TypeInt,
Computed: true,
},
"total_bytes_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
"total_iops_sec": {
Type: schema.TypeInt,
Computed: true,
},
"total_iops_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
"write_bytes_sec": {
Type: schema.TypeInt,
Computed: true,
},
"write_bytes_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
"write_iops_sec": {
Type: schema.TypeInt,
Computed: true,
},
"write_iops_sec_max": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"iqn": {
Type: schema.TypeString,
Computed: true,
},
"login": {
Type: schema.TypeString,
Computed: true,
},
"machine_id": {
Type: schema.TypeInt,
Computed: true,
},
"machine_name": {
Type: schema.TypeString,
Computed: true,
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
},
"disk_name": {
Type: schema.TypeString,
Computed: true,
},
"order": {
Type: schema.TypeInt,
Computed: true,
},
"params": {
Type: schema.TypeString,
Computed: true,
},
"parent_id": {
Type: schema.TypeInt,
Computed: true,
},
"passwd": {
Type: schema.TypeString,
Computed: true,
},
"pci_slot": {
Type: schema.TypeInt,
Computed: true,
},
"pool": {
Type: schema.TypeString,
Computed: true,
},
"present_to": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"purge_attempts": {
Type: schema.TypeInt,
Computed: true,
},
"purge_time": {
Type: schema.TypeInt,
Computed: true,
},
"reality_device_number": {
Type: schema.TypeInt,
Computed: true,
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
},
"res_name": {
Type: schema.TypeString,
Computed: true,
},
"role": {
Type: schema.TypeString,
Computed: true,
},
"sep_id": {
Type: schema.TypeInt,
Computed: true,
},
"sep_type": {
Type: schema.TypeString,
Computed: true,
},
"shareable": {
Type: schema.TypeBool,
Computed: true,
},
"size_max": {
Type: schema.TypeInt,
Computed: true,
},
"size_used": {
Type: schema.TypeFloat,
Computed: true,
},
"snapshots": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"guid": {
Type: schema.TypeString,
Computed: true,
},
"label": {
Type: schema.TypeString,
Computed: true,
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
},
"snap_set_guid": {
Type: schema.TypeString,
Computed: true,
},
"snap_set_time": {
Type: schema.TypeInt,
Computed: true,
},
"timestamp": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"tech_status": {
Type: schema.TypeString,
Computed: true,
},
"type": {
Type: schema.TypeString,
Computed: true,
},
"vmid": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
}
return res
}
func DataSourceDiskList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceDiskListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceDiskListSchemaMake(),
}
}
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package disks
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceDiskListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
diskList, err := utilityDiskListCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenDiskList(diskList))
d.Set("entry_count", diskList.EntryCount)
return nil
}
func DataSourceDiskList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceDiskListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceDiskListSchemaMake(),
}
}

@ -12,6 +12,7 @@ import (
func dataSourceDiskListTypesRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
listTypes, err := utilityDiskListTypesCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
@ -23,24 +24,6 @@ func dataSourceDiskListTypesRead(ctx context.Context, d *schema.ResourceData, m
return nil
}
func dataSourceDiskListTypesSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "The types of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'",
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
}
return res
}
func DataSourceDiskListTypes() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,

@ -12,6 +12,7 @@ import (
func dataSourceDiskListTypesDetailedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
listTypesDetailed, err := utilityDiskListTypesDetailedCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
@ -32,53 +33,6 @@ func DataSourceDiskListTypesDetailed() *schema.Resource {
Default: &constants.Timeout60s,
},
Schema: map[string]*schema.Schema{
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"pools": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Computed: true,
Description: "Pool name",
},
"system": {
Type: schema.TypeString,
Computed: true,
},
"types": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "The types of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'",
},
},
},
},
"sep_id": {
Type: schema.TypeInt,
Computed: true,
Description: "Storage endpoint provider ID to create disk",
},
"sep_name": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
},
Schema: dataSourceDiskListTypesDetailedSchemaMake(),
}
}

@ -45,6 +45,7 @@ import (
func dataSourceDiskListUnattachedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
diskListUnattached, err := utilityDiskListUnattachedCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
@ -70,401 +71,3 @@ func DataSourceDiskListUnattached() *schema.Resource {
Schema: dataSourceDiskListUnattachedSchemaMake(),
}
}
func dataSourceDiskListUnattachedSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"by_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by ID",
},
"account_name": {
Type: schema.TypeString,
Optional: true,
Description: "Find by account name",
},
"disk_max_size": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by max disk size",
},
"status": {
Type: schema.TypeString,
Optional: true,
Description: "Find by status",
},
"type": {
Type: schema.TypeString,
Optional: true,
Description: "type of the disks",
},
"account_id": {
Type: schema.TypeInt,
Optional: true,
Description: "ID of the account the disks belong to",
},
"sep_id": {
Type: schema.TypeInt,
Optional: true,
Description: "ID of SEP",
},
"pool": {
Type: schema.TypeString,
Optional: true,
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "Page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "Page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"_ckey": {
Type: schema.TypeString,
Computed: true,
Description: "CKey",
},
"_meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "Meta parameters",
},
"account_id": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the account the disks belong to",
},
"account_name": {
Type: schema.TypeString,
Computed: true,
Description: "The name of the subscriber '(account') to whom this disk belongs",
},
"acl": {
Type: schema.TypeString,
Computed: true,
},
"boot_partition": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of disk partitions",
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Created time",
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Deleted time",
},
"desc": {
Type: schema.TypeString,
Computed: true,
Description: "Description of disk",
},
"destruction_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Time of final deletion",
},
"disk_path": {
Type: schema.TypeString,
Computed: true,
Description: "Disk path",
},
"gid": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the grid (platform)",
},
"guid": {
Type: schema.TypeInt,
Computed: true,
Description: "Disk ID on the storage side",
},
"disk_id": {
Type: schema.TypeInt,
Computed: true,
Description: "The unique ID of the subscriber-owner of the disk",
},
"image_id": {
Type: schema.TypeInt,
Computed: true,
Description: "Image ID",
},
"images": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "IDs of images using the disk",
},
"iotune": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"read_bytes_sec": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of bytes to read per second",
},
"read_bytes_sec_max": {
Type: schema.TypeInt,
Computed: true,
Description: "Maximum number of bytes to read",
},
"read_iops_sec": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of io read operations per second",
},
"read_iops_sec_max": {
Type: schema.TypeInt,
Computed: true,
Description: "Maximum number of io read operations",
},
"size_iops_sec": {
Type: schema.TypeInt,
Computed: true,
Description: "Size of io operations",
},
"total_bytes_sec": {
Type: schema.TypeInt,
Computed: true,
Description: "Total size bytes per second",
},
"total_bytes_sec_max": {
Type: schema.TypeInt,
Computed: true,
Description: "Maximum total size of bytes per second",
},
"total_iops_sec": {
Type: schema.TypeInt,
Computed: true,
Description: "Total number of io operations per second",
},
"total_iops_sec_max": {
Type: schema.TypeInt,
Computed: true,
Description: "Maximum total number of io operations per second",
},
"write_bytes_sec": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of bytes to write per second",
},
"write_bytes_sec_max": {
Type: schema.TypeInt,
Computed: true,
Description: "Maximum number of bytes to write per second",
},
"write_iops_sec": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of write operations per second",
},
"write_iops_sec_max": {
Type: schema.TypeInt,
Computed: true,
Description: "Maximum number of write operations per second",
},
},
},
},
"iqn": {
Type: schema.TypeString,
Computed: true,
Description: "Disk IQN",
},
"login": {
Type: schema.TypeString,
Computed: true,
Description: "Login to access the disk",
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
Description: "Milestones",
},
"disk_name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of disk",
},
"order": {
Type: schema.TypeInt,
Computed: true,
Description: "Disk order",
},
"params": {
Type: schema.TypeString,
Computed: true,
Description: "Disk params",
},
"parent_id": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the parent disk",
},
"passwd": {
Type: schema.TypeString,
Computed: true,
Description: "Password to access the disk",
},
"pci_slot": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the pci slot to which the disk is connected",
},
"pool": {
Type: schema.TypeString,
Computed: true,
Description: "Pool for disk location",
},
"present_to": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"purge_attempts": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of deletion attempts",
},
"purge_time": {
Type: schema.TypeInt,
Computed: true,
Description: "Time of the last deletion attempt",
},
"reality_device_number": {
Type: schema.TypeInt,
Computed: true,
Description: "Reality device number",
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
Description: "ID of the reference to the disk",
},
"res_id": {
Type: schema.TypeString,
Computed: true,
Description: "Resource ID",
},
"res_name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the resource",
},
"role": {
Type: schema.TypeString,
Computed: true,
Description: "Disk role",
},
"sep_id": {
Type: schema.TypeInt,
Computed: true,
Description: "Storage endpoint provider ID to create disk",
},
"shareable": {
Type: schema.TypeBool,
Computed: true,
},
"size_max": {
Type: schema.TypeInt,
Computed: true,
Description: "Size in GB",
},
"size_used": {
Type: schema.TypeFloat,
Computed: true,
Description: "Number of used space, in GB",
},
"snapshots": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"guid": {
Type: schema.TypeString,
Computed: true,
Description: "ID of the snapshot",
},
"label": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the snapshot",
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
Description: "Reference to the snapshot",
},
"snap_set_guid": {
Type: schema.TypeString,
Computed: true,
Description: "The set snapshot ID",
},
"snap_set_time": {
Type: schema.TypeInt,
Computed: true,
Description: "The set time of the snapshot",
},
"timestamp": {
Type: schema.TypeInt,
Computed: true,
Description: "Snapshot time",
},
},
},
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "Disk status",
},
"tech_status": {
Type: schema.TypeString,
Computed: true,
Description: "Technical status of the disk",
},
"type": {
Type: schema.TypeString,
Computed: true,
Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'",
},
"vmid": {
Type: schema.TypeInt,
Computed: true,
Description: "Virtual Machine ID (Deprecated)",
},
},
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
}
return res
}

@ -45,6 +45,7 @@ import (
func dataSourceDiskSnapshotRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
disk, err := utilityDiskCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
@ -82,48 +83,3 @@ func DataSourceDiskSnapshot() *schema.Resource {
Schema: dataSourceDiskSnapshotSchemaMake(),
}
}
func dataSourceDiskSnapshotSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"disk_id": {
Type: schema.TypeInt,
Required: true,
Description: "The unique ID of the subscriber-owner of the disk",
},
"label": {
Type: schema.TypeString,
Required: true,
Description: "Name of the snapshot",
},
"guid": {
Type: schema.TypeString,
Computed: true,
Description: "ID of the snapshot",
},
"timestamp": {
Type: schema.TypeInt,
Computed: true,
Description: "Snapshot time",
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
Description: "Reference to the snapshot",
},
"snap_set_guid": {
Type: schema.TypeString,
Computed: true,
Description: "The set snapshot ID",
},
"snap_set_time": {
Type: schema.TypeInt,
Computed: true,
Description: "The set time of the snapshot",
},
}
return rets
}

@ -44,6 +44,7 @@ import (
func dataSourceDiskSnapshotListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
disk, err := utilityDiskCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
@ -67,56 +68,3 @@ func DataSourceDiskSnapshotList() *schema.Resource {
Schema: dataSourceDiskSnapshotListSchemaMake(),
}
}
func dataSourceDiskSnapshotListSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"disk_id": {
Type: schema.TypeInt,
Required: true,
Description: "The unique ID of the subscriber-owner of the disk",
},
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"label": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the snapshot",
},
"guid": {
Type: schema.TypeString,
Computed: true,
Description: "ID of the snapshot",
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"timestamp": {
Type: schema.TypeInt,
Computed: true,
Description: "Snapshot time",
},
"res_id": {
Type: schema.TypeString,
Computed: true,
Description: "Reference to the snapshot",
},
"snap_set_guid": {
Type: schema.TypeString,
Computed: true,
Description: "The set snapshot ID",
},
"snap_set_time": {
Type: schema.TypeInt,
Computed: true,
Description: "The set time of the snapshot",
},
},
},
},
}
return rets
}

@ -106,12 +106,11 @@ func flattenDiskList(dl *disks.ListDisks) []map[string]interface{} {
for _, disk := range dl.Data {
diskAcl, _ := json.Marshal(disk.ACL)
temp := map[string]interface{}{
"account_id": disk.AccountID,
"account_name": disk.AccountName,
"acl": string(diskAcl),
"boot_partition": disk.BootPartition,
// "compute_id": disk.MachineID,
// "compute_name": disk.MachineName,
"account_id": disk.AccountID,
"account_name": disk.AccountName,
"acl": string(diskAcl),
"boot_partition": disk.BootPartition,
"computes": flattenDiskComputes(disk.Computes),
"created_time": disk.CreatedTime,
"deleted_time": disk.DeletedTime,
"desc": disk.Description,

@ -2,54 +2,27 @@ package disks
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/ic"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
)
func existAccountID(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
accountID := uint64(d.Get("account_id").(int))
accountList, err := c.CloudBroker().Account().List(ctx, account.ListRequest{})
if err != nil {
return err
}
if len(accountList.FilterByID(accountID).Data) == 0 {
return fmt.Errorf("resourceDiskCreate: can't create/update Disk because AccountID %d is not allowed or does not exist", accountID)
}
return nil
}
func existGID(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
func checkParamsExistence(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg) diag.Diagnostics {
var errs []error
accountID := uint64(d.Get("account_id").(int))
gid := uint64(d.Get("gid").(int))
gidList, err := c.CloudBroker().Grid().List(ctx, grid.ListRequest{})
if err != nil {
return err
}
for _, elem := range gidList.Data {
if elem.GID == gid {
return nil
}
if err := ic.ExistAccount(ctx, accountID, c); err != nil {
errs = append(errs, err)
}
return fmt.Errorf("resourceDiskCreate: can't create/update Disk because GID %d is not allowed or does not exist", gid)
}
func checkParamsExists(ctx context.Context, d *schema.ResourceData, m interface{}) error {
err := existAccountID(ctx, d, m)
if err != nil {
return err
if err := ic.ExistGID(ctx, gid, c); err != nil {
errs = append(errs, err)
}
return existGID(ctx, d, m)
return dc.ErrorsToDiagnostics(errs)
}

@ -44,16 +44,15 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
log "github.com/sirupsen/logrus"
)
func resourceDiskCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceDiskCreate: called for disk %s", d.Get("disk_name").(string))
c := m.(*controller.ControllerCfg)
err := checkParamsExists(ctx, d, m)
if err != nil {
return diag.FromErr(err)
if diags := checkParamsExistence(ctx, d, c); diags != nil {
return diags
}
req := disks.CreateRequest{
@ -72,6 +71,10 @@ func resourceDiskCreate(ctx context.Context, d *schema.ResourceData, m interface
req.SSDSize = uint64(ssdSize.(int))
}
if iops, ok := d.GetOk("iops"); ok {
req.IOPS = uint64(iops.(int))
}
if sepID, ok := d.GetOk("sep_id"); ok {
req.SEPID = uint64(sepID.(int))
}
@ -82,45 +85,23 @@ func resourceDiskCreate(ctx context.Context, d *schema.ResourceData, m interface
diskID, err := c.CloudBroker().Disks().Create(ctx, req)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
d.SetId(strconv.FormatUint(diskID, 10))
d.Set("disk_id", diskID)
w := dc.Warnings{}
if iotuneRaw, ok := d.GetOk("iotune"); ok {
iot := iotuneRaw.([]interface{})[0]
iotune := iot.(map[string]interface{})
req := disks.LimitIORequest{
DiskID: diskID,
IOPS: uint64(iotune["total_iops_sec"].(int)),
ReadBytesSec: uint64(iotune["read_bytes_sec"].(int)),
ReadBytesSecMax: uint64(iotune["read_bytes_sec_max"].(int)),
ReadIOPSSec: uint64(iotune["read_iops_sec"].(int)),
ReadIOPSSecMax: uint64(iotune["read_iops_sec_max"].(int)),
SizeIOPSSec: uint64(iotune["size_iops_sec"].(int)),
TotalBytesSec: uint64(iotune["total_bytes_sec"].(int)),
TotalBytesSecMax: uint64(iotune["total_bytes_sec_max"].(int)),
TotalIOPSSecMax: uint64(iotune["total_iops_sec_max"].(int)),
TotalIOPSSec: uint64(iotune["total_iops_sec"].(int)),
WriteBytesSec: uint64(iotune["write_bytes_sec"].(int)),
WriteBytesSecMax: uint64(iotune["write_bytes_sec_max"].(int)),
WriteIOPSSec: uint64(iotune["write_iops_sec"].(int)),
WriteIOPSSecMax: uint64(iotune["write_iops_sec_max"].(int)),
}
_, err := c.CloudBroker().Disks().LimitIO(ctx, req)
if err != nil {
if _, ok := d.GetOk("iotune"); ok {
if err := resourceDiskChangeIotune(ctx, d, m); err != nil {
w.Add(err)
}
}
if shareable := d.Get("shareable"); shareable.(bool) {
_, err := c.CloudBroker().Disks().Share(ctx, disks.ShareRequest{
DiskID: diskID,
})
if err != nil {
if err := resourceDiskChangeShareable(ctx, d, m); err != nil {
w.Add(err)
}
}
@ -129,7 +110,7 @@ func resourceDiskCreate(ctx context.Context, d *schema.ResourceData, m interface
}
func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
c := m.(*controller.ControllerCfg)
log.Debugf("resourceDiskRead: called for disk_id %d", d.Get("disk_id").(int))
w := dc.Warnings{}
disk, err := utilityDiskCheckPresence(ctx, d, m)
@ -144,23 +125,13 @@ func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}
case status.Destroyed, status.Purged:
d.Set("disk_id", 0)
d.SetId("")
return resourceDiskCreate(ctx, d, m)
return diag.Errorf("The resource cannot be read because it has been destroyed")
//return resourceDiskCreate(ctx, d, m)
case status.Deleted:
hasChangeState = true
req := disks.RestoreRequest{
DiskID: disk.ID,
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
} else {
req.Reason = "Terraform automatic restore"
}
_, err := c.CloudBroker().Disks().Restore(ctx, req)
if err != nil {
w.Add(err)
}
//hasChangeState = true
//if err := resourceDiskRestore(ctx, d, m); err != nil {
// w.Add(err)
//}
case status.Assigned:
case status.Modeled:
return diag.Errorf("The disk is in status: %s, please, contact support for more information", disk.Status)
@ -173,6 +144,7 @@ func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}
if hasChangeState {
disk, err = utilityDiskCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
}
@ -183,12 +155,12 @@ func resourceDiskRead(ctx context.Context, d *schema.ResourceData, m interface{}
}
func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceDiskUpdate: called for disk_id %d", d.Get("disk_id").(int))
c := m.(*controller.ControllerCfg)
w := dc.Warnings{}
err := checkParamsExists(ctx, d, m)
if err != nil {
return diag.FromErr(err)
if diags := checkParamsExistence(ctx, d, c); diags != nil {
return diags
}
disk, err := utilityDiskCheckPresence(ctx, d, m)
@ -203,21 +175,11 @@ func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface
case status.Destroyed, status.Purged:
d.Set("disk_id", 0)
d.SetId("")
return resourceDiskCreate(ctx, d, m)
return diag.Errorf("The resource cannot be updated because it has been destroyed")
//return resourceDiskCreate(ctx, d, m)
case status.Deleted:
hasChangeState = true
req := disks.RestoreRequest{
DiskID: disk.ID,
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
} else {
req.Reason = "Terraform automatic restore"
}
_, err := c.CloudBroker().Disks().Restore(ctx, req)
if err != nil {
if err := resourceDiskRestore(ctx, d, m); err != nil {
return diag.FromErr(err)
}
case status.Assigned:
@ -239,75 +201,32 @@ func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface
if d.HasChange("size_max") {
oldSize, newSize := d.GetChange("size_max")
if oldSize.(int) < newSize.(int) {
log.Debugf("resourceDiskUpdate: resizing disk ID %s - %d GB -> %d GB",
d.Id(), oldSize.(int), newSize.(int))
_, err := c.CloudBroker().Disks().Resize(ctx, disks.ResizeRequest{
DiskID: disk.ID,
Size: uint64(newSize.(int)),
})
if err != nil {
w.Add(err)
}
} else if oldSize.(int) > newSize.(int) {
if oldSize.(int) > newSize.(int) {
return diag.FromErr(fmt.Errorf("resourceDiskUpdate: Disk ID %s - reducing disk size is not allowed", d.Id()))
}
log.Debugf("resourceDiskUpdate: resizing disk ID %s - %d GB -> %d GB",
d.Id(), oldSize.(int), newSize.(int))
if err := resourceDiskChangeSize(ctx, d, m); err != nil {
w.Add(err)
}
}
if d.HasChange("disk_name") {
_, err := c.CloudBroker().Disks().Rename(ctx, disks.RenameRequest{
DiskID: disk.ID,
Name: d.Get("disk_name").(string),
})
if err != nil {
if err := resourceDiskChangeDiskName(ctx, d, m); err != nil {
w.Add(err)
}
}
if d.HasChange("iotune") {
iot := d.Get("iotune").([]interface{})[0]
iotune := iot.(map[string]interface{})
req := disks.LimitIORequest{
IOPS: uint64(iotune["total_iops_sec"].(int)),
ReadBytesSec: uint64(iotune["read_bytes_sec"].(int)),
ReadBytesSecMax: uint64(iotune["read_bytes_sec_max"].(int)),
ReadIOPSSec: uint64(iotune["read_iops_sec"].(int)),
ReadIOPSSecMax: uint64(iotune["read_iops_sec_max"].(int)),
SizeIOPSSec: uint64(iotune["size_iops_sec"].(int)),
TotalBytesSec: uint64(iotune["total_bytes_sec"].(int)),
TotalBytesSecMax: uint64(iotune["total_bytes_sec_max"].(int)),
TotalIOPSSecMax: uint64(iotune["total_iops_sec_max"].(int)),
TotalIOPSSec: uint64(iotune["total_iops_sec"].(int)),
WriteBytesSec: uint64(iotune["write_bytes_sec"].(int)),
WriteBytesSecMax: uint64(iotune["write_bytes_sec_max"].(int)),
WriteIOPSSec: uint64(iotune["write_iops_sec"].(int)),
WriteIOPSSecMax: uint64(iotune["write_iops_sec_max"].(int)),
}
_, err := c.CloudBroker().Disks().LimitIO(ctx, req)
if err != nil {
if err := resourceDiskChangeIotune(ctx, d, m); err != nil {
w.Add(err)
}
}
if d.HasChange("shareable") {
old, new := d.GetChange("shareable")
if !old.(bool) && new.(bool) && !disk.Shareable {
_, err := c.CloudBroker().Disks().Share(ctx, disks.ShareRequest{
DiskID: disk.ID,
})
if err != nil {
w.Add(err)
}
}
if old.(bool) && !new.(bool) && disk.Shareable {
_, err := c.CloudBroker().Disks().Unshare(ctx, disks.UnshareRequest{
DiskID: disk.ID,
})
if err != nil {
w.Add(err)
}
if err := resourceDiskChangeShareable(ctx, d, m); err != nil {
w.Add(err)
}
}
@ -315,6 +234,7 @@ func resourceDiskUpdate(ctx context.Context, d *schema.ResourceData, m interface
}
func resourceDiskDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceDiskDelete: called for disk_id %d", d.Get("disk_id").(int))
c := m.(*controller.ControllerCfg)
disk, err := utilityDiskCheckPresence(ctx, d, m)
@ -340,349 +260,91 @@ func resourceDiskDelete(ctx context.Context, d *schema.ResourceData, m interface
return nil
}
func resourceDiskSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"account_id": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
},
"gid": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
},
"disk_name": {
Type: schema.TypeString,
Required: true,
},
"type": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringInSlice([]string{"D", "B", "T"}, false),
Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'",
},
"desc": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"size_max": {
Type: schema.TypeInt,
Required: true,
},
"ssd_size": {
Type: schema.TypeInt,
Optional: true,
},
"sep_id": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"pool": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"detach": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "detach disk from machine first",
},
"permanently": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "whether to completely delete the disk, works only with non attached disks",
},
"reason": {
Type: schema.TypeString,
Optional: true,
Default: "",
Description: "reason for an action",
},
"shareable": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
},
"restore": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "restore deleting disk",
},
"account_name": {
Type: schema.TypeString,
Computed: true,
},
"acl": {
Type: schema.TypeString,
Computed: true,
},
"boot_partition": {
Type: schema.TypeInt,
Computed: true,
},
"computes": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"compute_id": {
Type: schema.TypeString,
Computed: true,
},
"compute_name": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
},
"destruction_time": {
Type: schema.TypeInt,
Computed: true,
},
"devicename": {
Type: schema.TypeString,
Computed: true,
},
"disk_path": {
Type: schema.TypeString,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"disk_id": {
Type: schema.TypeInt,
Computed: true,
},
"image_id": {
Type: schema.TypeInt,
Computed: true,
},
"images": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"iotune": {
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"read_bytes_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"read_bytes_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"read_iops_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"read_iops_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"size_iops_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"total_bytes_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"total_bytes_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"total_iops_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"total_iops_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"write_bytes_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"write_bytes_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"write_iops_sec": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"write_iops_sec_max": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
},
},
},
"iqn": {
Type: schema.TypeString,
Computed: true,
},
"login": {
Type: schema.TypeString,
Computed: true,
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
},
func resourceDiskChangeIotune(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
"order": {
Type: schema.TypeInt,
Computed: true,
},
"params": {
Type: schema.TypeString,
Computed: true,
},
"parent_id": {
Type: schema.TypeInt,
Computed: true,
},
"passwd": {
Type: schema.TypeString,
Computed: true,
},
"pci_slot": {
Type: schema.TypeInt,
Computed: true,
},
"present_to": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"purge_attempts": {
Type: schema.TypeInt,
Computed: true,
},
"purge_time": {
Type: schema.TypeInt,
Computed: true,
},
"reality_device_number": {
Type: schema.TypeInt,
Computed: true,
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
},
"res_name": {
Type: schema.TypeString,
Computed: true,
},
"role": {
Type: schema.TypeString,
Computed: true,
},
"sep_type": {
Type: schema.TypeString,
Computed: true,
},
"size_used": {
Type: schema.TypeFloat,
Computed: true,
},
"snapshots": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"guid": {
Type: schema.TypeString,
Computed: true,
},
"label": {
Type: schema.TypeString,
Computed: true,
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
},
"snap_set_guid": {
Type: schema.TypeString,
Computed: true,
},
"snap_set_time": {
Type: schema.TypeInt,
Computed: true,
},
"timestamp": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"tech_status": {
Type: schema.TypeString,
Computed: true,
},
"vmid": {
Type: schema.TypeInt,
Computed: true,
},
iotuneRaw := d.Get("iotune")
diskId := uint64(d.Get("disk_id").(int))
iot := iotuneRaw.([]interface{})[0]
iotune := iot.(map[string]interface{})
req := disks.LimitIORequest{
DiskID: diskId,
ReadBytesSec: uint64(iotune["read_bytes_sec"].(int)),
ReadBytesSecMax: uint64(iotune["read_bytes_sec_max"].(int)),
ReadIOPSSec: uint64(iotune["read_iops_sec"].(int)),
ReadIOPSSecMax: uint64(iotune["read_iops_sec_max"].(int)),
SizeIOPSSec: uint64(iotune["size_iops_sec"].(int)),
TotalBytesSec: uint64(iotune["total_bytes_sec"].(int)),
TotalBytesSecMax: uint64(iotune["total_bytes_sec_max"].(int)),
TotalIOPSSecMax: uint64(iotune["total_iops_sec_max"].(int)),
TotalIOPSSec: uint64(iotune["total_iops_sec"].(int)),
WriteBytesSec: uint64(iotune["write_bytes_sec"].(int)),
WriteBytesSecMax: uint64(iotune["write_bytes_sec_max"].(int)),
WriteIOPSSec: uint64(iotune["write_iops_sec"].(int)),
WriteIOPSSecMax: uint64(iotune["write_iops_sec_max"].(int)),
}
if _, ok := iotune["total_iops_sec"]; ok {
req.IOPS = uint64(iotune["total_iops_sec"].(int))
} else if _, ok := d.GetOk("iops"); ok {
req.IOPS = uint64(d.Get("iops").(int))
}
_, err := c.CloudBroker().Disks().LimitIO(ctx, req)
return err
}
func resourceDiskChangeShareable(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
diskId := uint64(d.Get("disk_id").(int))
shareable := d.Get("shareable").(bool)
if shareable {
_, err := c.CloudBroker().Disks().Share(ctx, disks.ShareRequest{DiskID: diskId})
return err
}
return rets
_, err := c.CloudBroker().Disks().Unshare(ctx, disks.UnshareRequest{DiskID: diskId})
return err
}
func resourceDiskRestore(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
req := disks.RestoreRequest{
DiskID: uint64(d.Get("disk_id").(int)),
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
} else {
req.Reason = "Terraform automatic restore"
}
_, err := c.CloudBroker().Disks().Restore(ctx, req)
return err
}
func resourceDiskChangeDiskName(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
_, err := c.CloudBroker().Disks().Rename(ctx, disks.RenameRequest{
DiskID: uint64(d.Get("disk_id").(int)),
Name: d.Get("disk_name").(string),
})
return err
}
func resourceDiskChangeSize(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
_, err := c.CloudBroker().Disks().Resize2(ctx, disks.ResizeRequest{
DiskID: uint64(d.Get("disk_id").(int)),
Size: uint64(d.Get("size_max").(int)),
})
return err
}
func ResourceDisk() *schema.Resource {

@ -2,6 +2,7 @@ package disks
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
@ -12,35 +13,21 @@ import (
)
func resourceDiskSnapshotCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
c := m.(*controller.ControllerCfg)
log.Debugf("resourceDiskSnapshotCreate: call for disk_id %d, label %s",
d.Get("disk_id").(int),
d.Get("label").(string))
disk, err := utilityDiskCheckPresence(ctx, d, m)
_, err := utilityDiskSnapshotCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
snapshots := disk.Snapshots
snapshot := disks.ItemSnapshot{}
diskId := uint64(d.Get("disk_id").(int))
label := d.Get("label").(string)
for _, sn := range snapshots {
if label == sn.Label {
snapshot = sn
break
}
}
if label != snapshot.Label {
return diag.Errorf("Snapshot with label \"%v\" not found", label)
}
d.SetId(fmt.Sprintf("%d#%s", diskId, label))
if rollback := d.Get("rollback").(bool); rollback {
req := disks.SnapshotRollbackRequest{
DiskID: disk.ID,
Label: label,
TimeStamp: uint64(d.Get("timestamp").(int)),
}
log.Debugf("resourceDiskCreate: Snapshot rollback with label", label)
_, err := c.CloudBroker().Disks().SnapshotRollback(ctx, req)
err := resourceDiskSnapshotChangeRollback(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
@ -49,61 +36,30 @@ func resourceDiskSnapshotCreate(ctx context.Context, d *schema.ResourceData, m i
}
func resourceDiskSnapshotRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
disk, err := utilityDiskCheckPresence(ctx, d, m)
log.Debugf("resourceDiskSnapshotRead: snapshot id %s", d.Id())
snapshot, err := utilityDiskSnapshotCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
snapshots := disk.Snapshots
snapshot := disks.ItemSnapshot{}
label := d.Get("label").(string)
for _, sn := range snapshots {
if label == sn.Label {
snapshot = sn
break
}
}
if label != snapshot.Label {
return diag.Errorf("Snapshot with label \"%v\" not found", label)
}
flattenDiskSnapshot(d, snapshot)
return nil
}
func resourceDiskSnapshotUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
c := m.(*controller.ControllerCfg)
disk, err := utilityDiskCheckPresence(ctx, d, m)
log.Debugf("resourceDiskSnapshotUpdate: snapshot id %s", d.Id())
_, err := utilityDiskSnapshotCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
snapshots := disk.Snapshots
snapshot := disks.ItemSnapshot{}
label := d.Get("label").(string)
for _, sn := range snapshots {
if label == sn.Label {
snapshot = sn
break
}
}
if label != snapshot.Label {
return diag.Errorf("Snapshot with label \"%v\" not found", label)
}
if d.HasChange("rollback") && d.Get("rollback").(bool) == true {
req := disks.SnapshotRollbackRequest{
DiskID: disk.ID,
Label: label,
TimeStamp: uint64(d.Get("timestamp").(int)),
}
log.Debugf("resourceDiskUpdtae: Snapshot rollback with label", label)
_, err := c.CloudBroker().Disks().SnapshotRollback(ctx, req)
if d.HasChange("rollback") {
err := resourceDiskSnapshotChangeRollback(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
@ -113,16 +69,18 @@ func resourceDiskSnapshotUpdate(ctx context.Context, d *schema.ResourceData, m i
}
func resourceDiskSnapshotDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
c := m.(*controller.ControllerCfg)
log.Debugf("resourceDiskSnapshotDelete: snapshot id %s", d.Id())
disk, err := utilityDiskCheckPresence(ctx, d, m)
_, err := utilityDiskSnapshotCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
c := m.(*controller.ControllerCfg)
req := disks.SnapshotDeleteRequest{
DiskID: disk.ID,
DiskID: uint64(d.Get("disk_id").(int)),
Label: d.Get("label").(string),
}
@ -136,6 +94,31 @@ func resourceDiskSnapshotDelete(ctx context.Context, d *schema.ResourceData, m i
return nil
}
func resourceDiskSnapshotChangeRollback(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
rollback := d.Get("rollback").(bool)
if rollback {
label := d.Get("label").(string)
timestamp := uint64(d.Get("timestamp").(int))
diskId := uint64(d.Get("disk_id").(int))
req := disks.SnapshotRollbackRequest{
DiskID: diskId,
Label: label,
TimeStamp: timestamp,
}
log.Debugf("resourceDiskUpdate: Snapshot rollback with label %s", label)
if _, err := c.CloudBroker().Disks().SnapshotRollback(ctx, req); err != nil {
return err
}
}
return nil
}
func ResourceDiskSnapshot() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
@ -160,56 +143,3 @@ func ResourceDiskSnapshot() *schema.Resource {
Schema: resourceDiskSnapshotSchemaMake(),
}
}
func resourceDiskSnapshotSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"disk_id": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
Description: "The unique ID of the subscriber-owner of the disk",
},
"label": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
Description: "Name of the snapshot",
},
"rollback": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "Needed in order to make a snapshot rollback",
},
"timestamp": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "Snapshot time",
},
"guid": {
Type: schema.TypeString,
Computed: true,
Description: "ID of the snapshot",
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
Description: "Reference to the snapshot",
},
"snap_set_guid": {
Type: schema.TypeString,
Computed: true,
Description: "The set snapshot ID",
},
"snap_set_time": {
Type: schema.TypeInt,
Computed: true,
Description: "The set time of the snapshot",
},
}
}

@ -75,7 +75,7 @@ func utilityDiskListDeletedCheckPresence(ctx context.Context, d *schema.Resource
req.Size = uint64(size.(int))
}
log.Debugf("utilityDiskListDeletedCheckPresence: load disk list")
log.Debugf("utilityDiskListDeletedCheckPresence: load disk list deleted")
diskList, err := c.CloudBroker().Disks().ListDeleted(ctx, req)
if err != nil {
return nil, err

@ -15,6 +15,13 @@ func utilityDiskListTypesCheckPresence(ctx context.Context, d *schema.ResourceDa
Detailed: false,
}
if page, ok := d.GetOk("page"); ok {
req.Page = uint64(page.(int))
}
if size, ok := d.GetOk("size"); ok {
req.Size = uint64(size.(int))
}
log.Debugf("utilityDiskListTypesCheckPresence: load disk list Types Detailed")
typesList, err := c.CloudBroker().Disks().ListTypes(ctx, req)
if err != nil {

@ -12,10 +12,18 @@ import (
func utilityDiskListTypesDetailedCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*disks.ListTypes, error) {
c := m.(*controller.ControllerCfg)
log.Debugf("utilityDiskListTypesDetailedCheckPresence: load disk list Types Detailed")
listTypesDetailed, err := c.CloudBroker().Disks().ListTypes(ctx, disks.ListTypesRequest{
req := disks.ListTypesRequest{
Detailed: true,
})
}
if page, ok := d.GetOk("page"); ok {
req.Page = uint64(page.(int))
}
if size, ok := d.GetOk("size"); ok {
req.Size = uint64(size.(int))
}
log.Debugf("utilityDiskListTypesDetailedCheckPresence: load disk list Types Detailed")
listTypesDetailed, err := c.CloudBroker().Disks().ListTypes(ctx, req)
log.Debugf("%+v", listTypesDetailed.Data[0].(map[string]interface{}))

@ -28,7 +28,7 @@ func utilityDiskListUnattachedCheckPresence(ctx context.Context, d *schema.Resou
if diskType, ok := d.GetOk("type"); ok {
req.Type = diskType.(string)
}
if accountId, ok := d.GetOk("accountId"); ok {
if accountId, ok := d.GetOk("account_id"); ok {
req.AccountID = uint64(accountId.(int))
}
if sepId, ok := d.GetOk("sep_id"); ok {

@ -45,6 +45,7 @@ import (
func dataSourceExtnetRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
net, err := utilityExtnetCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
@ -54,232 +55,6 @@ func dataSourceExtnetRead(ctx context.Context, d *schema.ResourceData, m interfa
return nil
}
func dataSourceExtnetSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"extnet_id": {
Type: schema.TypeInt,
Required: true,
},
"ckey": {
Type: schema.TypeString,
Computed: true,
},
"meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "meta",
},
"default": {
Type: schema.TypeBool,
Computed: true,
},
"default_qos": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"e_rate": {
Type: schema.TypeInt,
Computed: true,
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
"in_burst": {
Type: schema.TypeInt,
Computed: true,
},
"in_rate": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"desc": {
Type: schema.TypeString,
Computed: true,
},
"free_ips": {
Type: schema.TypeInt,
Computed: true,
},
"gid": {
Type: schema.TypeInt,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"ipcidr": {
Type: schema.TypeString,
Computed: true,
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
},
"name": {
Type: schema.TypeString,
Computed: true,
},
"network_id": {
Type: schema.TypeInt,
Computed: true,
},
"ovs_bridge": {
Type: schema.TypeString,
Computed: true,
},
"pre_reservations_num": {
Type: schema.TypeInt,
Computed: true,
},
"pri_vnfdev_id": {
Type: schema.TypeInt,
Computed: true,
},
"shared_with": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"vlan_id": {
Type: schema.TypeInt,
Computed: true,
},
"vnfs": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"dhcp": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"check_ips": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"dns": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"excluded": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"client_type": {
Type: schema.TypeString,
Computed: true,
},
"desc": {
Type: schema.TypeString,
Computed: true,
},
"domain_name": {
Type: schema.TypeString,
Computed: true,
},
"hostname": {
Type: schema.TypeString,
Computed: true,
},
"ip": {
Type: schema.TypeString,
Computed: true,
},
"mac": {
Type: schema.TypeString,
Computed: true,
},
"type": {
Type: schema.TypeString,
Computed: true,
},
"vm_id": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"gateway": {
Type: schema.TypeString,
Computed: true,
},
"network": {
Type: schema.TypeString,
Computed: true,
},
"prefix": {
Type: schema.TypeInt,
Computed: true,
},
"reservations": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"client_type": {
Type: schema.TypeString,
Computed: true,
},
"domain_name": {
Type: schema.TypeString,
Computed: true,
},
"hostname": {
Type: schema.TypeString,
Computed: true,
},
"desc": {
Type: schema.TypeString,
Computed: true,
},
"ip": {
Type: schema.TypeString,
Computed: true,
},
"mac": {
Type: schema.TypeString,
Computed: true,
},
"type": {
Type: schema.TypeString,
Computed: true,
},
"vm_id": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
}
}
func DataSourceExtnetCB() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,

@ -44,26 +44,17 @@ import (
func dataSourceExtnetDefaultRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
extnetId, err := utilityExtnetDefaultCheckPresence(ctx, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("extnet_id", extnetId)
return nil
}
func dataSourceExtnetDefaultSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"extnet_id": {
Type: schema.TypeInt,
Computed: true,
},
}
}
func DataSourceExtnetDefaultCB() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,

@ -45,6 +45,7 @@ import (
func dataSourceExtnetListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
netList, err := utilityExtnetListCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
@ -56,189 +57,6 @@ func dataSourceExtnetListRead(ctx context.Context, d *schema.ResourceData, m int
return nil
}
func dataSourceExtnetListSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"account_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by account ID",
},
"by_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by ID",
},
"name": {
Type: schema.TypeString,
Optional: true,
Description: "Find by name",
},
"network": {
Type: schema.TypeString,
Optional: true,
},
"vlan_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by VLAN ID",
},
"vnfdev_id": {
Type: schema.TypeInt,
Optional: true,
Description: "Find by VnfDEV ID",
},
"status": {
Type: schema.TypeString,
Optional: true,
Description: "Find by status",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "Page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "Page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"ckey": {
Type: schema.TypeString,
Computed: true,
},
"meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "meta",
},
"default": {
Type: schema.TypeBool,
Computed: true,
},
"default_qos": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"e_rate": {
Type: schema.TypeInt,
Computed: true,
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
"in_burst": {
Type: schema.TypeInt,
Computed: true,
},
"in_rate": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"desc": {
Type: schema.TypeString,
Computed: true,
},
"free_ips": {
Type: schema.TypeInt,
Computed: true,
},
"gid": {
Type: schema.TypeInt,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"extnet_id": {
Type: schema.TypeInt,
Computed: true,
},
"ipcidr": {
Type: schema.TypeString,
Computed: true,
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
},
"name": {
Type: schema.TypeString,
Computed: true,
},
"network_id": {
Type: schema.TypeInt,
Computed: true,
},
"ovs_bridge": {
Type: schema.TypeString,
Computed: true,
},
"pre_reservations_num": {
Type: schema.TypeInt,
Computed: true,
},
"pri_vnfdev_id": {
Type: schema.TypeInt,
Computed: true,
},
"shared_with": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"vlan_id": {
Type: schema.TypeInt,
Computed: true,
},
"vnfs": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"dhcp": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"check_ips": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
},
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
}
}
func DataSourceExtnetListCB() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,

@ -44,6 +44,7 @@ import (
func dataSourceStaticRouteRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
staticRoute, err := utilityDataStaticRouteCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
@ -52,45 +53,6 @@ func dataSourceStaticRouteRead(ctx context.Context, d *schema.ResourceData, m in
return nil
}
func dataSourceStaticRouteSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"extnet_id": {
Type: schema.TypeInt,
Required: true,
Description: "Unique ID of the ExtNet",
},
"route_id": {
Type: schema.TypeInt,
Required: true,
Description: "Unique ID of the static route",
},
"compute_ids": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"destination": {
Type: schema.TypeString,
Computed: true,
},
"gateway": {
Type: schema.TypeString,
Computed: true,
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
"netmask": {
Type: schema.TypeString,
Computed: true,
},
}
return rets
}
func DataSourceStaticRoute() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,

@ -44,6 +44,7 @@ import (
func dataSourceStaticRouteListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
staticRouteList, err := utilityStaticRouteListCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
@ -55,56 +56,6 @@ func dataSourceStaticRouteListRead(ctx context.Context, d *schema.ResourceData,
return nil
}
func dataSourceStaticRouteListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"extnet_id": {
Type: schema.TypeInt,
Required: true,
Description: "ID of ExtNet",
},
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"compute_ids": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"destination": {
Type: schema.TypeString,
Computed: true,
},
"gateway": {
Type: schema.TypeString,
Computed: true,
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
"netmask": {
Type: schema.TypeString,
Computed: true,
},
"route_id": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
},
}
return res
}
func DataSourceStaticRouteList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,

@ -43,6 +43,7 @@ import (
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/extnet"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/ic"
)
@ -130,6 +131,8 @@ func resourceExtnetCreate(ctx context.Context, d *schema.ResourceData, m interfa
d.Set("extnet_id", netID)
log.Debugf("cloudbroker: Extnet with id %d successfully created on platform", netID)
var w dc.Warnings
if d.Get("excluded_ips").(*schema.Set).Len() > 0 {
ips := make([]string, 0)
@ -144,7 +147,36 @@ func resourceExtnetCreate(ctx context.Context, d *schema.ResourceData, m interfa
_, err := c.CloudBroker().ExtNet().IPsExclude(ctx, req)
if err != nil {
return diag.FromErr(err)
w.Add(err)
}
}
if d.Get("shared_with").(*schema.Set).Len() > 0 {
for _, id := range d.Get("shared_with").(*schema.Set).List() {
req := extnet.AccessRemoveRequest{
NetID: uint64(d.Get("extnet_id").(int)),
AccountID: uint64(id.(int)),
}
_, err := c.CloudBroker().ExtNet().AccessRemove(ctx, req)
if err != nil {
w.Add(err)
}
}
}
if d.Get("excluded_ips_range").(*schema.Set).Len() > 0 {
for _, ip := range d.Get("excluded_ips_range").(*schema.Set).List() {
req := extnet.IPsExcludeRangeRequest{
NetID: uint64(d.Get("extnet_id").(int)),
IPStart: ip.(map[string]interface{})["ip_start"].(string),
IPEnd: ip.(map[string]interface{})["ip_end"].(string),
}
_, err := c.CloudBroker().ExtNet().IPsExcludeRange(ctx, req)
if err != nil {
w.Add(err)
}
}
}
@ -155,7 +187,7 @@ func resourceExtnetCreate(ctx context.Context, d *schema.ResourceData, m interfa
})
if err != nil {
return diag.FromErr(err)
w.Add(err)
}
}
@ -257,7 +289,7 @@ func resourceExtnetUpdate(ctx context.Context, d *schema.ResourceData, m interfa
return diag.FromErr(err)
}
}
return resourceExtnetRead(ctx, d, m)
}
@ -277,360 +309,6 @@ func resourceExtnetDelete(ctx context.Context, d *schema.ResourceData, m interfa
return nil
}
func resourceExtnetSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
Description: "External network name",
},
"gid": {
Type: schema.TypeInt,
Required: true,
Description: "Grid (platform) ID",
},
"ipcidr": {
Type: schema.TypeString,
Required: true,
// ForceNew: true,
Description: "IP network CIDR",
},
"vlan_id": {
Type: schema.TypeInt,
Required: true,
// ForceNew: true,
Description: "VLAN ID",
},
"gateway": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "External network gateway IP address",
},
"dns": {
Type: schema.TypeList,
Optional: true,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "List of DNS addresses",
},
"ntp": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "List of NTP addresses",
},
"check_ips": {
Type: schema.TypeList,
Optional: true,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "IPs to check network availability",
},
"virtual": {
Type: schema.TypeBool,
Optional: true,
Description: "If true - platform DHCP server will not be created",
},
"desc": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "Optional description",
},
"start_ip": {
Type: schema.TypeString,
Optional: true,
Description: "Start of IP range to be explicitly included",
},
"end_ip": {
Type: schema.TypeString,
Optional: true,
Description: "End of IP range to be explicitly included",
},
"vnfdev_ip": {
Type: schema.TypeString,
Optional: true,
Description: "IP to create VNFDev with",
},
"pre_reservations_num": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "Number of pre created reservations",
},
"ovs_bridge": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "OpenvSwith bridge name for ExtNet connection",
},
"enable": {
Type: schema.TypeBool,
Optional: true,
Default: true,
Description: "Disable/Enable extnet",
},
"set_default": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "Set current extnet as default (can not be undone)",
},
"excluded_ips": {
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "IPs to exclude in current extnet pool",
},
"excluded_ips_range": {
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"ip_start": {
Type: schema.TypeString,
Required: true,
},
"ip_end": {
Type: schema.TypeString,
Required: true,
},
},
},
Description: "Range of IPs to exclude in current extnet pool",
},
"default_qos": {
Type: schema.TypeList,
MaxItems: 1,
Optional: true,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"e_rate": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
"in_burst": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"in_rate": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
},
},
},
"restart":{
Type: schema.TypeBool,
Optional: true,
Description: "restart extnet vnf device",
},
"migrate":{
Type: schema.TypeInt,
Optional: true,
},
"ckey": {
Type: schema.TypeString,
Computed: true,
},
"meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "meta",
},
"default": {
Type: schema.TypeBool,
Computed: true,
},
"free_ips": {
Type: schema.TypeInt,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"extnet_id": {
Type: schema.TypeInt,
Computed: true,
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
},
"network_id": {
Type: schema.TypeInt,
Computed: true,
},
"pri_vnfdev_id": {
Type: schema.TypeInt,
Computed: true,
},
"shared_with": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"vnfs": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"dhcp": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"excluded": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"client_type": {
Type: schema.TypeString,
Computed: true,
},
"domain_name": {
Type: schema.TypeString,
Computed: true,
},
"hostname": {
Type: schema.TypeString,
Computed: true,
},
"ip": {
Type: schema.TypeString,
Computed: true,
},
"mac": {
Type: schema.TypeString,
Computed: true,
},
"type": {
Type: schema.TypeString,
Computed: true,
},
"vm_id": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"network": {
Type: schema.TypeString,
Computed: true,
},
"prefix": {
Type: schema.TypeInt,
Computed: true,
},
"routes": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"compute_ids": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"destination": {
Type: schema.TypeString,
Computed: true,
},
"gateway": {
Type: schema.TypeString,
Computed: true,
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
"netmask": {
Type: schema.TypeString,
Computed: true,
},
"route_id": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"reservations": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"client_type": {
Type: schema.TypeString,
Computed: true,
},
"domain_name": {
Type: schema.TypeString,
Computed: true,
},
"hostname": {
Type: schema.TypeString,
Computed: true,
},
"desc": {
Type: schema.TypeString,
Computed: true,
},
"ip": {
Type: schema.TypeString,
Computed: true,
},
"mac": {
Type: schema.TypeString,
Computed: true,
},
"type": {
Type: schema.TypeString,
Computed: true,
},
"vm_id": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
}
}
func ResourceExtnetCB() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,

@ -65,7 +65,7 @@ func resourceStaticRouteCreate(ctx context.Context, d *schema.ResourceData, m in
if computesIDS, ok := d.GetOk("compute_ids"); ok {
ids := computesIDS.([]interface{})
res := make([]uint64, 0, len (ids))
res := make([]uint64, 0, len(ids))
for _, id := range ids {
computeId := uint64(id.(int))
@ -82,6 +82,7 @@ func resourceStaticRouteCreate(ctx context.Context, d *schema.ResourceData, m in
staticRouteData, err := getStaticRouteData(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
@ -91,7 +92,6 @@ func resourceStaticRouteCreate(ctx context.Context, d *schema.ResourceData, m in
}
func resourceStaticRouteRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
warnings := dc.Warnings{}
staticRouteData, err := utilityDataStaticRouteCheckPresence(ctx, d, m)
if err != nil {
@ -101,7 +101,7 @@ func resourceStaticRouteRead(ctx context.Context, d *schema.ResourceData, m inte
flattenStaticRouteData(d, staticRouteData)
return warnings.Get()
return nil
}
func resourceStaticRouteUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
@ -112,58 +112,9 @@ func resourceStaticRouteUpdate(ctx context.Context, d *schema.ResourceData, m in
return diag.FromErr(err)
}
staticRouteData, err := utilityDataStaticRouteCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
if d.HasChange("compute_ids") {
deletedIds := make([]uint64, 0)
addedIds := make([]uint64, 0)
oldComputeIds, newComputeIds := d.GetChange("compute_ids")
oldComputeIdsSlice := oldComputeIds.([]interface{})
newComputeIdsSlice := newComputeIds.([]interface{})
for _, el := range oldComputeIdsSlice {
if !isContainsIds(newComputeIdsSlice, el) {
convertedEl := uint64(el.(int))
deletedIds = append(deletedIds, convertedEl)
}
}
for _, el := range newComputeIdsSlice {
if !isContainsIds(oldComputeIdsSlice, el) {
convertedEl := uint64(el.(int))
addedIds = append(addedIds, convertedEl)
}
}
if len(deletedIds) > 0 {
req := extnet.StaticRouteAccessRevokeRequest{
ExtNetID: uint64(d.Get("extnet_id").(int)),
RouteId: staticRouteData.ID,
ComputeIds: deletedIds,
}
_, err := c.CloudBroker().ExtNet().StaticRouteAccessRevoke(ctx, req)
if err != nil {
warnings.Add(err)
}
}
if len(addedIds) > 0 {
req := extnet.StaticRouteAccessGrantRequest{
ExtNetID: uint64(d.Get("extnet_id").(int)),
RouteId: staticRouteData.ID,
ComputeIds: addedIds,
}
_, err := c.CloudBroker().ExtNet().StaticRouteAccessGrant(ctx, req)
if err != nil {
warnings.Add(err)
}
if err := utilityStaticRouteComputeIDsUpdate(ctx, d, m); err != nil {
warnings.Add(err)
}
}
@ -192,47 +143,6 @@ func resourceStaticRouteDelete(ctx context.Context, d *schema.ResourceData, m in
return nil
}
func resourceStaticRouteSchemaMake() map[string]*schema.Schema {
rets := dataSourceStaticRouteSchemaMake()
rets["route_id"] = &schema.Schema{
Type: schema.TypeInt,
Computed: true,
Optional: true,
}
rets["compute_ids"] = &schema.Schema{
Type: schema.TypeList,
Optional: true,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
}
rets["destination"] = &schema.Schema{
Type: schema.TypeString,
Required: true,
}
rets["gateway"] = &schema.Schema{
Type: schema.TypeString,
Required: true,
}
rets["netmask"] = &schema.Schema{
Type: schema.TypeString,
Required: true,
}
return rets
}
func isContainsIds(els []interface{}, el interface{}) bool {
convEl := el.(int)
for _, elOld := range els {
if convEl == elOld.(int) {
return true
}
}
return false
}
func ResourceStaticRoute() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,

@ -253,32 +253,14 @@ func handleExcludedIPsRangeUpdate(ctx context.Context, d *schema.ResourceData, c
}
func handleSharedWithUpdate(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg) error {
deletedIds := make([]uint64, 0)
addedIds := make([]uint64, 0)
oldSet, newSet := d.GetChange("shared_with")
oldAccountIds, newAccountIds := d.GetChange("shared_with")
oldAccountIdsSlice := oldAccountIds.([]interface{})
newAccountIdsSlice := newAccountIds.([]interface{})
for _, el := range oldAccountIdsSlice {
if !isContainsIds(newAccountIdsSlice, el) {
convertedEl := uint64(el.(int))
deletedIds = append(deletedIds, convertedEl)
}
}
for _, el := range newAccountIdsSlice {
if !isContainsIds(oldAccountIdsSlice, el) {
convertedEl := uint64(el.(int))
addedIds = append(addedIds, convertedEl)
}
}
if len(deletedIds) > 0 {
for _, accountId := range deletedIds {
deletedAccountIds := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List()
if len(deletedAccountIds) > 0 {
for _, accountIdInterface := range deletedAccountIds {
req := extnet.AccessRemoveRequest{
NetID: uint64(d.Get("extnet_id").(int)),
AccountID: accountId,
AccountID: uint64(accountIdInterface.(int)),
}
_, err := c.CloudBroker().ExtNet().AccessRemove(ctx, req)
@ -288,11 +270,12 @@ func handleSharedWithUpdate(ctx context.Context, d *schema.ResourceData, c *cont
}
}
if len(addedIds) > 0 {
for _, accountId := range addedIds {
addedAccountIds := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List()
if len(addedAccountIds) > 0 {
for _, accountIdInterface := range addedAccountIds {
req := extnet.AccessAddRequest{
NetID: uint64(d.Get("extnet_id").(int)),
AccountID: accountId,
AccountID: uint64(accountIdInterface.(int)),
}
_, err := c.CloudBroker().ExtNet().AccessAdd(ctx, req)
@ -300,7 +283,6 @@ func handleSharedWithUpdate(ctx context.Context, d *schema.ResourceData, c *cont
return err
}
}
}
return nil
@ -309,14 +291,14 @@ func handleSharedWithUpdate(ctx context.Context, d *schema.ResourceData, c *cont
func handleVirtualUpdate(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, recNet *extnet.RecordExtNet) error {
virtualOld, virtualNew := d.GetChange("virtual")
if virtualOld == false && virtualNew == true {
if !virtualOld.(bool) && virtualNew.(bool) {
req := extnet.DeviceRemoveRequest{NetID: recNet.ID}
_, err := c.CloudBroker().ExtNet().DeviceRemove(ctx, req)
if err != nil {
return err
}
} else if virtualOld == true && virtualNew == false {
} else if virtualOld.(bool) && !virtualNew.(bool) {
req := extnet.DeviceDeployRequest{NetID: recNet.ID}
_, err := c.CloudBroker().ExtNet().DeviceDeploy(ctx, req)
if err != nil {
@ -329,7 +311,7 @@ func handleVirtualUpdate(ctx context.Context, d *schema.ResourceData, c *control
func handleRestartUpdate(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg, recNet *extnet.RecordExtNet) error {
restartOld, restartNew := d.GetChange("restart")
if restartOld == false && restartNew == true {
if !restartOld.(bool) && restartNew.(bool) {
req := extnet.DeviceRestartRequest{NetID: recNet.ID}
_, err := c.CloudBroker().ExtNet().DeviceRestart(ctx, req)
if err != nil {

@ -41,7 +41,6 @@ import (
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/extnet"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
@ -106,3 +105,54 @@ func getStaticRouteData(ctx context.Context, d *schema.ResourceData, m interface
return nil, fmt.Errorf("static route not found")
}
func utilityStaticRouteComputeIDsUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
staticRouteData, err := utilityDataStaticRouteCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return err
}
oldSet, newSet := d.GetChange("compute_ids")
deletedComputeIDs := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List()
deletedIds := make([]uint64, 0, len(deletedComputeIDs))
if len(deletedComputeIDs) > 0 {
for _, computeIdInterface := range deletedComputeIDs {
deletedIds = append(deletedIds, uint64(computeIdInterface.(int)))
}
req := extnet.StaticRouteAccessRevokeRequest{
ExtNetID: uint64(d.Get("extnet_id").(int)),
RouteId: staticRouteData.ID,
ComputeIds: deletedIds,
}
_, err := c.CloudBroker().ExtNet().StaticRouteAccessRevoke(ctx, req)
if err != nil {
return err
}
}
addedComputeIDs := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List()
addedIds := make([]uint64, 0, len(addedComputeIDs))
if len(addedComputeIDs) > 0 {
for _, computeIdInterface := range addedComputeIDs {
addedIds = append(addedIds, uint64(computeIdInterface.(int)))
}
req := extnet.StaticRouteAccessGrantRequest{
ExtNetID: uint64(d.Get("extnet_id").(int)),
RouteId: staticRouteData.ID,
ComputeIds: addedIds,
}
_, err := c.CloudBroker().ExtNet().StaticRouteAccessGrant(ctx, req)
if err != nil {
return err
}
}
return nil
}

@ -56,153 +56,6 @@ func dataSourceFlipgroupRead(ctx context.Context, d *schema.ResourceData, m inte
return nil
}
func dataSourceFlipgroupSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"flipgroup_id": {
Type: schema.TypeInt,
Required: true,
Description: "flipgroup_id",
},
"account_id": {
Type: schema.TypeInt,
Computed: true,
Description: "account_id",
},
"account_name": {
Type: schema.TypeString,
Computed: true,
Description: "account_name",
},
"client_ids": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "client_ids",
},
"client_names": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "client_names",
},
"client_type": {
Type: schema.TypeString,
Computed: true,
Description: "client_type",
},
"conn_id": {
Type: schema.TypeInt,
Computed: true,
Description: "conn_id",
},
"conn_type": {
Type: schema.TypeString,
Computed: true,
Description: "conn_type",
},
"created_by": {
Type: schema.TypeString,
Computed: true,
Description: "created_by",
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
Description: "created_time",
},
"default_gw": {
Type: schema.TypeString,
Computed: true,
Description: "default_gw",
},
"deleted_by": {
Type: schema.TypeString,
Computed: true,
Description: "deleted_by",
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
Description: "deleted_time",
},
"description": {
Type: schema.TypeString,
Computed: true,
Description: "description",
},
"gid": {
Type: schema.TypeInt,
Computed: true,
Description: "gid",
},
"guid": {
Type: schema.TypeInt,
Computed: true,
Description: "guid",
},
"ip": {
Type: schema.TypeString,
Computed: true,
Description: "ip",
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
Description: "milestones",
},
"name": {
Type: schema.TypeString,
Computed: true,
Description: "name",
},
"net_id": {
Type: schema.TypeInt,
Computed: true,
Description: "net_id",
},
"net_type": {
Type: schema.TypeString,
Computed: true,
Description: "net_type",
},
"network": {
Type: schema.TypeString,
Computed: true,
Description: "network",
},
"rg_id": {
Type: schema.TypeInt,
Computed: true,
Description: "rg_id",
},
"rg_name": {
Type: schema.TypeString,
Computed: true,
Description: "rg_name",
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "status",
},
"updated_by": {
Type: schema.TypeString,
Computed: true,
Description: "updated_by",
},
"updated_time": {
Type: schema.TypeInt,
Computed: true,
Description: "updated_time",
},
}
return rets
}
func DataSourceFlipgroup() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,

@ -58,175 +58,6 @@ func dataSourceFlipgroupListRead(ctx context.Context, d *schema.ResourceData, m
return nil
}
func dataSourceFlipgroupItemSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"ckey": {
Type: schema.TypeString,
Computed: true,
Description: "ckey",
},
"meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"account_id": {
Type: schema.TypeInt,
Computed: true,
Description: "account_id",
},
"client_ids": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "client_ids",
},
"client_type": {
Type: schema.TypeString,
Computed: true,
Description: "client_type",
},
"conn_id": {
Type: schema.TypeInt,
Computed: true,
Description: "conn_id",
},
"conn_type": {
Type: schema.TypeString,
Computed: true,
Description: "conn_type",
},
"default_gw": {
Type: schema.TypeString,
Computed: true,
Description: "default_gw",
},
"description": {
Type: schema.TypeString,
Computed: true,
Description: "description",
},
"gid": {
Type: schema.TypeInt,
Computed: true,
Description: "gid",
},
"guid": {
Type: schema.TypeInt,
Computed: true,
Description: "guid",
},
"flipgroup_id": {
Type: schema.TypeInt,
Computed: true,
Description: "flipgroup_id",
},
"ip": {
Type: schema.TypeString,
Computed: true,
Description: "ip",
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
Description: "milestones",
},
"name": {
Type: schema.TypeString,
Computed: true,
Description: "name",
},
"net_id": {
Type: schema.TypeInt,
Computed: true,
Description: "net_id",
},
"net_type": {
Type: schema.TypeString,
Computed: true,
Description: "net_type",
},
"net_mask": {
Type: schema.TypeInt,
Computed: true,
Description: "net_mask",
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "status",
},
}
return rets
}
func dataSourceFlipgroupsListSchemaMake() map[string]*schema.Schema {
res := map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Optional: true,
Description: "name",
},
"vins_id": {
Type: schema.TypeInt,
Optional: true,
Description: "vins_id",
},
"vins_name": {
Type: schema.TypeString,
Optional: true,
Description: "vins_name",
},
"extnet_id": {
Type: schema.TypeInt,
Optional: true,
Description: "extnet_id",
},
"by_ip": {
Type: schema.TypeString,
Optional: true,
Description: "by_ip",
},
"rg_id": {
Type: schema.TypeInt,
Optional: true,
Description: "rg_id",
},
"by_id": {
Type: schema.TypeInt,
Optional: true,
Description: "by_id",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "Page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "Page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: dataSourceFlipgroupItemSchemaMake(),
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
Description: "entry_count",
},
}
return res
}
func DataSourceFlipgroupList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,

@ -36,35 +36,35 @@ package flipgroup
import (
"context"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/ic"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins"
)
func existAccountID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) {
c := m.(*controller.ControllerCfg)
func checkParamsExistence(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg) diag.Diagnostics {
var errs []error
accountId := uint64(d.Get("account_id").(int))
req := account.ListRequest{}
netType := d.Get("net_type").(string)
netId := uint64(d.Get("net_id").(int))
accountList, err := c.CloudBroker().Account().List(ctx, req)
if err != nil {
return false, err
if err := ic.ExistAccount(ctx, accountId, c); err != nil {
errs = append(errs, err)
}
return len(accountList.FilterByID(accountId).Data) != 0, nil
}
func existNetID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) {
c := m.(*controller.ControllerCfg)
netID := uint64(d.Get("net_id").(int))
req := vins.ListRequest {}
vinsList, err := c.CloudBroker().VINS().List(ctx, req)
if err != nil {
return false, err
switch netType {
case "VINS":
if err := ic.ExistVins(ctx, netId, c); err != nil {
errs = append(errs, err)
}
case "EXTNET":
if err := ic.ExistExtNet(ctx, netId, c); err != nil {
errs = append(errs, err)
}
}
return len(vinsList.FilterByID(netID).Data) != 0, nil
return dc.ErrorsToDiagnostics(errs)
}

@ -37,14 +37,13 @@ package flipgroup
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/flipgroup"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
log "github.com/sirupsen/logrus"
)
@ -53,43 +52,34 @@ func resourceFlipgroupCreate(ctx context.Context, d *schema.ResourceData, m inte
log.Debugf("resourceFlipgroupCreate called with name: %s, accountID %v", d.Get("name").(string), d.Get("account_id").(int))
c := m.(*controller.ControllerCfg)
req := flipgroup.CreateRequest{
Name: d.Get("name").(string),
NetType: d.Get("net_type").(string),
ClientType: d.Get("client_type").(string),
}
haveAccount, err := existAccountID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
if !haveAccount {
return diag.Errorf("resourceFlipgroupCreate: can't create Flipgroup because AccountID %d is not allowed or does not exist", d.Get("account_id").(int))
}
req.AccountID = uint64(d.Get("account_id").(int))
haveVINS, err := existNetID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
if diags := checkParamsExistence(ctx, d, c); diags != nil {
return diags
}
if !haveVINS {
return diag.Errorf("resourceFlipgroupCreate: can't create Flipgroup because VinsID %d is not allowed or does not exist", d.Get("net_id").(int))
req := flipgroup.CreateRequest{
Name: d.Get("name").(string),
NetType: d.Get("net_type").(string),
ClientType: d.Get("client_type").(string),
AccountID: uint64(d.Get("account_id").(int)),
NetID: uint64(d.Get("net_id").(int)),
}
req.NetID = uint64(d.Get("net_id").(int))
if IP, ok := d.GetOk("ip"); ok {
req.IP = IP.(string)
}
if description, ok := d.GetOk("desc"); ok {
req.Description = description.(string)
}
resp, err := c.CloudBroker().FLIPGroup().Create(ctx, req)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
d.SetId(fmt.Sprint(resp.ID))
d.Set("flipgroup_id", resp.ID)
var warnings dc.Warnings
@ -102,14 +92,26 @@ func resourceFlipgroupCreate(ctx context.Context, d *schema.ResourceData, m inte
}
func resourceFlipgroupRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceFlipgroupRead: called for flipgroup_id %s, name %s",
d.Id(), d.Get("name").(string))
fg, err := utilityFlipgroupCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
switch fg.Status {
case status.Destroyed:
d.SetId("")
return diag.Errorf("The flipgroup status is destroyed and cannot be read.")
}
flattenFlipgroup(d, fg)
log.Debugf("resourceFlipgroupRead: after flattenFlipgroup: flipgroup_id %s, name %s",
d.Id(), d.Get("name").(string))
return nil
}
@ -117,19 +119,8 @@ func resourceFlipgroupUpdate(ctx context.Context, d *schema.ResourceData, m inte
log.Debugf("resourceFlipgroupUpdate called with id: %v", d.Get("flipgroup_id").(int))
c := m.(*controller.ControllerCfg)
haveAccount, err := existAccountID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
if !haveAccount {
return diag.Errorf("resourceFlipgroupUpdate: can't update Flipgroup because AccountID %d is not allowed or does not exist", d.Get("account_id").(int))
}
haveVINS, err := existNetID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
if !haveVINS {
return diag.Errorf("resourceFlipgroupUpdate: can't update Flipgroup because VinsID %d is not allowed or does not exist", d.Get("net_id").(int))
if diags := checkParamsExistence(ctx, d, c); diags != nil {
return diags
}
fg, err := utilityFlipgroupCheckPresence(ctx, d, m)
@ -138,6 +129,12 @@ func resourceFlipgroupUpdate(ctx context.Context, d *schema.ResourceData, m inte
return diag.FromErr(err)
}
switch fg.Status {
case status.Destroyed:
d.SetId("")
return diag.Errorf("The flipgroup status is destroyed and cannot be updated.")
}
var warnings dc.Warnings
basicUpdate := false
req := flipgroup.EditRequest{FLIPGroupID: fg.ID}
@ -172,6 +169,7 @@ func resourceFlipgroupDelete(ctx context.Context, d *schema.ResourceData, m inte
fg, err := utilityFlipgroupCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
@ -179,163 +177,16 @@ func resourceFlipgroupDelete(ctx context.Context, d *schema.ResourceData, m inte
FLIPGroupID: fg.ID,
}
// When FLIPGroup().Delete() is executed, flipgroup automatically is removed from the compute it has been attached to, if any.
// No need to specifically call for FlipGroup().ComputeRemove().
_, err = c.CloudBroker().FLIPGroup().Delete(ctx, req)
if err != nil {
return diag.FromErr(err)
}
return nil
}
d.SetId("")
func resourceFlipgroupSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"account_id": {
Type: schema.TypeInt,
Required: true,
Description: "Account ID",
},
"name": {
Type: schema.TypeString,
Required: true,
Description: "Flipgroup name",
},
"net_id": {
Type: schema.TypeInt,
Required: true,
Description: "EXTNET or ViNS ID",
},
"net_type": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringInSlice([]string{"EXTNET", "VINS"}, true),
Description: "Network type, EXTNET or VINS",
},
"client_type": {
Type: schema.TypeString,
Required: true,
Description: "Type of client, 'compute' ('vins' will be later)",
ValidateFunc: validation.StringInSlice([]string{"compute"}, true),
},
"ip": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "IP address to associate with this group. If empty, the platform will autoselect IP address",
},
"desc": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "Text description of this Flipgroup instance",
},
"client_ids": {
Type: schema.TypeList,
Optional: true,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "List of clients attached to this Flipgroup instance",
},
"client_names": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "client_names",
},
"account_name": {
Type: schema.TypeString,
Computed: true,
Description: "account_name",
},
"flipgroup_id": {
Type: schema.TypeInt,
Computed: true,
},
"conn_id": {
Type: schema.TypeInt,
Computed: true,
},
"conn_type": {
Type: schema.TypeString,
Computed: true,
},
"created_by": {
Type: schema.TypeString,
Computed: true,
Description: "created_by",
},
"created_time": {
Type: schema.TypeInt,
Computed: true,
Description: "created_time",
},
"default_gw": {
Type: schema.TypeString,
Computed: true,
},
"deleted_by": {
Type: schema.TypeString,
Computed: true,
Description: "deleted_by",
},
"deleted_time": {
Type: schema.TypeInt,
Computed: true,
Description: "deleted_time",
},
"gid": {
Type: schema.TypeInt,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
},
"network": {
Type: schema.TypeString,
Computed: true,
Description: "network",
},
"rg_id": {
Type: schema.TypeInt,
Computed: true,
Description: "rg_id",
},
"rg_name": {
Type: schema.TypeString,
Computed: true,
Description: "rg_name",
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"updated_by": {
Type: schema.TypeString,
Computed: true,
Description: "updated_by",
},
"updated_time": {
Type: schema.TypeInt,
Computed: true,
Description: "updated_time",
},
"net_mask": {
Type: schema.TypeInt,
Computed: true,
},
"ckey": {
Type: schema.TypeString,
Computed: true,
},
}
return nil
}
func ResourceFlipgroup() *schema.Resource {

@ -46,6 +46,7 @@ import (
)
func utilityFlipgroupCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*flipgroup.RecordFLIPGroup, error) {
log.Debugf("utilityFlipgroupCheckPresence")
c := m.(*controller.ControllerCfg)
req := flipgroup.GetRequest{}
@ -56,7 +57,6 @@ func utilityFlipgroupCheckPresence(ctx context.Context, d *schema.ResourceData,
req.FLIPGroupID = uint64(d.Get("flipgroup_id").(int))
}
log.Debugf("utilityDiskCheckPresence: load disk")
flipgroup, err := c.CloudBroker().FLIPGroup().Get(ctx, req)
if err != nil {
return nil, err

@ -1,100 +1,68 @@
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package grid
import (
"context"
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceGridRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
grid, err := utilityGridCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
d.SetId(strconv.FormatUint(grid.ID, 10))
flattenGrid(d, grid)
return nil
}
func dataSourceGetGridSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"grid_id": {
Type: schema.TypeInt,
Required: true,
},
"flag": {
Type: schema.TypeString,
Computed: true,
},
"gid": {
Type: schema.TypeInt,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"id": {
Type: schema.TypeInt,
Computed: true,
},
"location_code": {
Type: schema.TypeString,
Computed: true,
},
"name": {
Type: schema.TypeString,
Computed: true,
},
}
}
func DataSourceGrid() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceGridRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceGetGridSchemaMake(),
}
}
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package grid
import (
"context"
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceGridRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
grid, err := utilityGridCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
d.SetId(strconv.FormatUint(grid.ID, 10))
flattenGrid(d, grid)
return nil
}
func DataSourceGrid() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceGridRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceGetGridSchemaMake(),
}
}

@ -34,6 +34,7 @@ package grid
import (
"context"
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
@ -43,26 +44,14 @@ import (
func dataSourceGridGetDiagnosisRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
diagnosis, err := utilityGridGetDiagnosisCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
d.SetId(d.Id())
d.SetId(strconv.Itoa(d.Get("gid").(int)))
d.Set("diagnosis", diagnosis)
return nil
}
func dataSourceGridGetDiagnosisSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"gid": {
Type: schema.TypeInt,
Required: true,
},
"diagnosis": {
Type: schema.TypeString,
Computed: true,
},
}
}
func DataSourceGridGetDiagnosis() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
@ -81,26 +70,14 @@ func DataSourceGridGetDiagnosis() *schema.Resource {
func dataSourceGridPostDiagnosisRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
diagnosis, err := utilityGridPostDiagnosisCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
d.SetId(d.Id())
d.SetId(strconv.Itoa(d.Get("gid").(int)))
d.Set("diagnosis", diagnosis)
return nil
}
func dataSourceGridPostDiagnosisSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"gid": {
Type: schema.TypeInt,
Required: true,
},
"diagnosis": {
Type: schema.TypeString,
Computed: true,
},
}
}
func DataSourceGridPostDiagnosis() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,

@ -44,6 +44,7 @@ import (
func dataSourceGridGetStatusRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
grid, err := utilityGridGetStatusCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
@ -52,15 +53,6 @@ func dataSourceGridGetStatusRead(ctx context.Context, d *schema.ResourceData, m
return nil
}
func dataSourceGridGetStatusSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"status": {
Type: schema.TypeBool,
Computed: true,
},
}
}
func DataSourceGridGetStatus() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
@ -79,6 +71,7 @@ func DataSourceGridGetStatus() *schema.Resource {
func dataSourceGridPostStatusRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
grid, err := utilityGridPostStatusCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
@ -87,15 +80,6 @@ func dataSourceGridPostStatusRead(ctx context.Context, d *schema.ResourceData, m
return nil
}
func dataSourceGridPostStatusSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"status": {
Type: schema.TypeBool,
Computed: true,
},
}
}
func DataSourceGridPostStatus() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,

@ -1,265 +1,69 @@
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package grid
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceGridListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
gridList, err := utilityGridListCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenGridList(gridList))
d.Set("entry_count", gridList.EntryCount)
return nil
}
func dataSourceGridListSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"by_id": {
Type: schema.TypeInt,
Optional: true,
Description: "by id",
},
"name": {
Type: schema.TypeString,
Optional: true,
Description: "name",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Description: "grid list",
Elem: &schema.Resource{
Schema: dataSourceGridSchemaMake(),
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
Description: "entry count",
},
}
return rets
}
func dataSourceGridSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"resources": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"current": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeInt,
Computed: true,
},
"ext_ips": {
Type: schema.TypeInt,
Computed: true,
},
"ext_traffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeFloat,
Computed: true,
},
},
},
},
},
},
},
"reserved": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeInt,
Computed: true,
},
"ext_ips": {
Type: schema.TypeInt,
Computed: true,
},
"ext_traffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeFloat,
Computed: true,
},
},
},
},
},
},
},
},
},
},
"flag": {
Type: schema.TypeString,
Computed: true,
},
"gid": {
Type: schema.TypeInt,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"id": {
Type: schema.TypeInt,
Computed: true,
},
"location_code": {
Type: schema.TypeString,
Computed: true,
},
"name": {
Type: schema.TypeString,
Computed: true,
},
}
}
func DataSourceGridList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceGridListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceGridListSchemaMake(),
}
}
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package grid
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceGridListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
gridList, err := utilityGridListCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenGridList(gridList))
d.Set("entry_count", gridList.EntryCount)
return nil
}
func DataSourceGridList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceGridListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceGridListSchemaMake(),
}
}

@ -44,6 +44,7 @@ import (
func dataSourceGridListEmailsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
gridListEmails, err := utilityGridListEmailsCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
@ -53,36 +54,6 @@ func dataSourceGridListEmailsRead(ctx context.Context, d *schema.ResourceData, m
return nil
}
func dataSourceGridListEmailsSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Description: "grid list emails",
Elem: &schema.Schema {
Type: schema.TypeString,
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
Description: "entry count",
},
}
return rets
}
func DataSourceGridListEmails() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,

@ -44,143 +44,15 @@ import (
func dataSourceGridGetConsumptionRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
gridGetConsumption, err := utilityGridGetConsumptionCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
d.SetId(strconv.FormatUint(gridGetConsumption.GID, 10))
d.SetId(strconv.Itoa(d.Get("grid_id").(int)))
d.Set("consumed", flattenGridRecordResource(gridGetConsumption.Consumed))
d.Set("reserved", flattenGridRecordResource(gridGetConsumption.Reserved))
return nil
}
func dataSourceGridGetConsumptionSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"grid_id": {
Type: schema.TypeInt,
Required: true,
},
"consumed": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeInt,
Computed: true,
},
"ext_ips": {
Type: schema.TypeInt,
Computed: true,
},
"ext_traffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeFloat,
Computed: true,
},
},
},
},
},
},
},
"reserved": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeInt,
Computed: true,
},
"ext_ips": {
Type: schema.TypeInt,
Computed: true,
},
"ext_traffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeFloat,
Computed: true,
},
},
},
},
},
},
},
}
}
func DataSourceGridGetConsumption() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,

@ -44,6 +44,7 @@ import (
func dataSourceGridListConsumptionRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
gridListConsumption, err := utilityGridListConsumptionCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
@ -53,154 +54,6 @@ func dataSourceGridListConsumptionRead(ctx context.Context, d *schema.ResourceDa
return nil
}
func dataSourceGridListConsumptionSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"items": {
Type: schema.TypeList,
Computed: true,
Description: "grid list consumption",
Elem: &schema.Resource{
Schema: dataSourceGridConsumptionSchemaMake(),
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
Description: "entry count",
},
}
return rets
}
func dataSourceGridConsumptionSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"consumed": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeInt,
Computed: true,
},
"ext_ips": {
Type: schema.TypeInt,
Computed: true,
},
"ext_traffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeFloat,
Computed: true,
},
},
},
},
},
},
},
"reserved": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu": {
Type: schema.TypeInt,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeInt,
Computed: true,
},
"ext_ips": {
Type: schema.TypeInt,
Computed: true,
},
"ext_traffic": {
Type: schema.TypeInt,
Computed: true,
},
"gpu": {
Type: schema.TypeInt,
Computed: true,
},
"ram": {
Type: schema.TypeInt,
Computed: true,
},
"seps": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeString,
Computed: true,
},
"data_name": {
Type: schema.TypeString,
Computed: true,
},
"disk_size": {
Type: schema.TypeFloat,
Computed: true,
},
"disk_size_max": {
Type: schema.TypeFloat,
Computed: true,
},
},
},
},
},
},
},
"id": {
Type: schema.TypeInt,
Computed: true,
},
}
}
func DataSourceGridListConsumption() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,

@ -3,9 +3,11 @@ package grid
import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens"
)
func flattenGrid(d *schema.ResourceData, grid *grid.RecordGrid) {
d.Set("auth_broker", flattens.FlattenMeta(grid.AuthBroker))
d.Set("name", grid.Name)
d.Set("flag", grid.Flag)
d.Set("gid", grid.GID)
@ -20,6 +22,7 @@ func flattenGridList(gl *grid.ListGrids) []map[string]interface{} {
temp := map[string]interface{}{
"resources": flattenGridResources(item.Resources),
"name": item.Name,
"auth_broker": flattens.FlattenMeta(item.AuthBroker),
"flag": item.Flag,
"gid": item.GID,
"guid": item.GUID,
@ -37,7 +40,7 @@ func flattenGridListConsumption(gl *grid.ListResourceConsumption) []map[string]i
temp := map[string]interface{}{
"consumed": flattenGridRecordResource(item.Consumed),
"reserved": flattenGridRecordResource(item.Reserved),
"id": item.GID,
"id": item.GID,
}
res = append(res, temp)
}

@ -1,63 +1,63 @@
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package grid
import (
"context"
"strconv"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func utilityGridCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*grid.RecordGrid, error) {
c := m.(*controller.ControllerCfg)
req := grid.GetRequest{}
if d.Id() != "" {
id, _ := strconv.ParseUint(d.Id(), 10, 64)
req.GID = id
} else {
req.GID = uint64(d.Get("grid_id").(int))
}
log.Debugf("utilityGridCheckPresence: load grid")
grid, err := c.CloudBroker().Grid().Get(ctx, req)
if err != nil {
return nil, err
}
return grid, nil
}
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package grid
import (
"context"
"strconv"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func utilityGridCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*grid.RecordGrid, error) {
c := m.(*controller.ControllerCfg)
req := grid.GetRequest{}
if d.Id() != "" {
id, _ := strconv.ParseUint(d.Id(), 10, 64)
req.GID = id
} else {
req.GID = uint64(d.Get("grid_id").(int))
}
log.Debugf("utilityGridCheckPresence: load grid")
gridRec, err := c.CloudBroker().Grid().Get(ctx, req)
if err != nil {
return nil, err
}
return gridRec, nil
}

@ -22,7 +22,7 @@ limitations under the License.
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
@ -54,7 +54,7 @@ func utilityGridGetDiagnosisCheckPresence(ctx context.Context, d *schema.Resourc
req.GID = uint64(d.Get("gid").(int))
}
log.Debugf("utilityGridListConsumptionCheckPresence: load grid list consumption")
log.Debugf("utilityGridGetDiagnosisCheckPresence: load grid get diagnosis")
gridGetDiagnosis, err := c.CloudBroker().Grid().GetDiagnosisGET(ctx, req)
if err != nil {
return "", err
@ -73,8 +73,8 @@ func utilityGridPostDiagnosisCheckPresence(ctx context.Context, d *schema.Resour
} else {
req.GID = uint64(d.Get("gid").(int))
}
log.Debugf("utilityGridListConsumptionCheckPresence: load grid list consumption")
log.Debugf("utilityGridPostDiagnosisCheckPresence: load grid post diagnosis")
gridPostDiagnosis, err := c.CloudBroker().Grid().GetDiagnosis(ctx, req)
if err != nil {
return "", err

@ -5,9 +5,14 @@ import (
"context"
"fmt"
cb_account "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account"
cb_compute "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute"
cb_extnet "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/extnet"
cb_gid "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid"
cb_image "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image"
cb_k8ci "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/k8ci"
cb_k8s "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/k8s"
cb_lb "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb"
cb_rg "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg"
cb_stack "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/stack"
cb_vins "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins"
@ -49,6 +54,24 @@ func ExistImage(ctx context.Context, imageId uint64, c *controller.ControllerCfg
return nil
}
func ExistVins(ctx context.Context, vinsId uint64, c *controller.ControllerCfg) error {
req := cb_vins.ListRequest{
ByID: vinsId,
IncludeDeleted: false,
}
vinsList, err := c.CloudBroker().VINS().List(ctx, req)
if err != nil {
return err
}
if len(vinsList.Data) == 0 {
return fmt.Errorf("vins with ID %v not found", vinsId)
}
return nil
}
func ExistVinses(ctx context.Context, vinsIds []uint64, c *controller.ControllerCfg) []error {
var errs []error
@ -109,10 +132,69 @@ func ExistExtNets(ctx context.Context, extNetIds []uint64, c *controller.Control
return errs
}
func ExistExtNetInLb(ctx context.Context, extNetId uint64, c *controller.ControllerCfg) error {
if extNetId == 0 {
return nil
}
req := cb_extnet.ListRequest{
ByID: extNetId,
}
extNetList, err := c.CloudBroker().ExtNet().List(ctx, req)
if err != nil {
return err
}
if len(extNetList.Data) == 0 {
return fmt.Errorf("EXTNET with ID %v not found", extNetId)
}
return nil
}
func ExistExtNetInRG(ctx context.Context, extNetId, accountId uint64, c *controller.ControllerCfg) error {
req := cb_extnet.ListRequest{
AccountID: accountId,
ByID: extNetId,
}
listExtNet, err := c.CloudBroker().ExtNet().List(ctx, req)
if err != nil {
return err
}
if len(listExtNet.Data) == 0 {
return fmt.Errorf("EXTNET with ID %v not found for account with id %d", extNetId, accountId)
}
return nil
}
func ExistExtNetInVins(ctx context.Context, extNetId int, c *controller.ControllerCfg) error {
if extNetId == 0 || extNetId == -1 {
return nil
}
req := cb_extnet.ListRequest{
ByID: uint64(extNetId),
}
extNetList, err := c.CloudBroker().ExtNet().List(ctx, req)
if err != nil {
return err
}
if len(extNetList.Data) == 0 {
return fmt.Errorf("EXTNET with ID %v not found", extNetId)
}
return nil
}
func ExistExtNet(ctx context.Context, extNetId uint64, c *controller.ControllerCfg) error {
req := cb_extnet.ListRequest{
ByID: extNetId,
ByID: extNetId,
Status: "Enabled",
}
extNetList, err := c.CloudBroker().ExtNet().List(ctx, req)
@ -127,6 +209,27 @@ func ExistExtNet(ctx context.Context, extNetId uint64, c *controller.ControllerC
return nil
}
func ExistVinsInLb(ctx context.Context, vinsId uint64, c *controller.ControllerCfg) error {
if vinsId == 0 {
return nil
}
req := cb_vins.ListRequest{
ByID: vinsId,
}
vinsList, err := c.CloudBroker().VINS().List(ctx, req)
if err != nil {
return err
}
if len(vinsList.Data) == 0 {
return fmt.Errorf("VINS with ID %v not found", vinsId)
}
return nil
}
func ExistGID(ctx context.Context, gid uint64, c *controller.ControllerCfg) error {
req := cb_gid.ListRequest{}
@ -160,3 +263,144 @@ func ExistStack(ctx context.Context, stackId uint64, c *controller.ControllerCfg
return nil
}
// ExistStackInPcidevice checks if compute exists with specified stackId and specified non-deleted rgId.
func ExistStackInPcidevice(ctx context.Context, stackId, rgId uint64, c *controller.ControllerCfg) error {
req := cb_rg.ListRequest{
ByID: rgId,
IncludeDeleted: false,
}
rgList, err := c.CloudBroker().RG().List(ctx, req)
if err != nil {
return err
}
for _, v := range rgList.Data {
for _, idVM := range v.VMs {
req := cb_compute.GetRequest{
ComputeID: idVM,
}
computeRec, err := c.CloudBroker().Compute().Get(ctx, req)
if err != nil {
return err
}
if computeRec.StackID == stackId {
return nil
}
}
}
return fmt.Errorf("no compute found with stack_id %v and rg_id %v", stackId, rgId)
}
func ExistLB(ctx context.Context, lbId uint64, c *controller.ControllerCfg) error {
req := cb_lb.ListRequest{
ByID: lbId,
}
lbList, err := c.CloudBroker().LB().List(ctx, req)
if err != nil {
return err
}
if len(lbList.Data) == 0 {
return fmt.Errorf("LB with ID %v not found", lbId)
}
return nil
}
func ExistAccount(ctx context.Context, accountId uint64, c *controller.ControllerCfg) error {
req := cb_account.ListRequest{
ByID: accountId,
}
accountList, err := c.CloudBroker().Account().List(ctx, req)
if err != nil {
return err
}
if len(accountList.Data) == 0 {
return fmt.Errorf("account with id %d not found", accountId)
}
return nil
}
func ExistK8CI(ctx context.Context, k8ciId uint64, c *controller.ControllerCfg) error {
req := cb_k8ci.ListRequest{
ByID: k8ciId,
}
k8ciList, err := c.CloudBroker().K8CI().List(ctx, req)
if err != nil {
return err
}
if len(k8ciList.Data) == 0 {
return fmt.Errorf("k8ci with id %d not found", k8ciId)
}
return nil
}
func ExistExtNetInK8s(ctx context.Context, extNetId uint64, c *controller.ControllerCfg) error {
if extNetId == 0 {
return nil
}
req := cb_extnet.ListRequest{
ByID: extNetId,
}
extNetList, err := c.CloudBroker().ExtNet().List(ctx, req)
if err != nil {
return err
}
if len(extNetList.Data) == 0 {
return fmt.Errorf("EXTNET with ID %v not found", extNetId)
}
return nil
}
func ExistVinsInK8s(ctx context.Context, vinsId uint64, c *controller.ControllerCfg) error {
if vinsId == 0 {
return nil
}
req := cb_vins.ListRequest{
ByID: vinsId,
}
vinsList, err := c.CloudBroker().VINS().List(ctx, req)
if err != nil {
return err
}
if len(vinsList.Data) == 0 {
return fmt.Errorf("VINS with ID %v not found", vinsId)
}
return nil
}
func ExistK8s(ctx context.Context, k8sId uint64, c *controller.ControllerCfg) error {
req := cb_k8s.ListRequest{
ByID: k8sId,
}
k8sList, err := c.CloudBroker().K8S().List(ctx, req)
if err != nil {
return err
}
if len(k8sList.Data) == 0 {
return fmt.Errorf("k8s with id %d not found", k8sId)
}
return nil
}

@ -1,5 +1,5 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
@ -43,227 +43,14 @@ import (
func dataSourceImageRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
image, err := utilityImageCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
d.SetId(strconv.FormatUint(image.ID, 10))
flattenImage(d, image)
return nil
}
func dataSourceImageSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the rescue disk",
},
"url": {
Type: schema.TypeString,
Computed: true,
Description: "URL where to download media from",
},
"gid": {
Type: schema.TypeInt,
Computed: true,
Description: "grid (platform) ID where this template should be create in",
},
"boot_type": {
Type: schema.TypeString,
Computed: true,
Description: "Boot type of image bios or uefi",
},
"image_type": {
Type: schema.TypeString,
Computed: true,
Description: "Image type linux, windows or other",
},
"shared_with": {
Type: schema.TypeList,
Optional: true,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"history": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"guid": {
Type: schema.TypeString,
Computed: true,
},
"id": {
Type: schema.TypeInt,
Computed: true,
},
"timestamp": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"drivers": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "List of types of compute suitable for image. Example: [ \"KVM_X86\" ]",
},
"meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "meta",
},
"hot_resize": {
Type: schema.TypeBool,
Computed: true,
Description: "Does this machine supports hot resize",
},
"username": {
Type: schema.TypeString,
Computed: true,
Description: "Optional username for the image",
},
"password": {
Type: schema.TypeString,
Computed: true,
Description: "Optional password for the image",
},
"account_id": {
Type: schema.TypeInt,
Computed: true,
Description: "AccountId to make the image exclusive",
},
"username_dl": {
Type: schema.TypeString,
Computed: true,
Description: "username for upload binary media",
},
"password_dl": {
Type: schema.TypeString,
Computed: true,
Description: "password for upload binary media",
},
"sep_id": {
Type: schema.TypeInt,
Computed: true,
Description: "storage endpoint provider ID",
},
"pool_name": {
Type: schema.TypeString,
Computed: true,
Description: "pool for image create",
},
"architecture": {
Type: schema.TypeString,
Computed: true,
Description: "binary architecture of this image, one of X86_64 of PPC64_LE",
},
"image_id": {
Type: schema.TypeInt,
Required: true,
Description: "image id",
},
"permanently": {
Type: schema.TypeBool,
Computed: true,
Description: "Whether to completely delete the image",
},
"bootable": {
Type: schema.TypeBool,
Computed: true,
Description: "Does this image boot OS",
},
"unc_path": {
Type: schema.TypeString,
Computed: true,
Description: "unc path",
},
"link_to": {
Type: schema.TypeInt,
Computed: true,
Description: "",
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "status",
},
"tech_status": {
Type: schema.TypeString,
Computed: true,
Description: "tech atatus",
},
"version": {
Type: schema.TypeString,
Computed: true,
Description: "version",
},
"size": {
Type: schema.TypeInt,
Computed: true,
Description: "image size",
},
"enabled": {
Type: schema.TypeBool,
Computed: true,
},
"computeci_id": {
Type: schema.TypeInt,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
},
"provider_name": {
Type: schema.TypeString,
Computed: true,
},
"purge_attempts": {
Type: schema.TypeInt,
Computed: true,
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
},
"res_name": {
Type: schema.TypeString,
Computed: true,
},
"rescuecd": {
Type: schema.TypeBool,
Computed: true,
},
"last_modified": {
Type: schema.TypeInt,
Computed: true,
},
"desc": {
Type: schema.TypeString,
Computed: true,
},
}
}
func DataSourceImage() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,

@ -1,98 +1,69 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package image
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceImageListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
imageList, err := utilityImageListCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenImageList(imageList))
return nil
}
func dataSourceImageListSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"sep_id": {
Type: schema.TypeInt,
Optional: true,
Description: "filter images by storage endpoint provider ID",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Description: "image list",
Elem: &schema.Resource{
Schema: dataSourceImageSchemaMake(),
},
},
}
return rets
}
func DataSourceImageList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceImageListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceImageListSchemaMake(),
}
}
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package image
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceImageListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
imageList, err := utilityImageListCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenImageList(imageList))
d.Set("entry_count", imageList.EntryCount)
return nil
}
func DataSourceImageList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceImageListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceImageListSchemaMake(),
}
}

@ -1,167 +1,69 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package image
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceImageListStacksRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
imageListStacks, err := utilityImageListStacksCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenImageListStacks(d, imageListStacks))
return nil
}
func dataSourceImageListStackSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"api_url": {
Type: schema.TypeString,
Computed: true,
},
"api_key": {
Type: schema.TypeString,
Computed: true,
},
"app_id": {
Type: schema.TypeString,
Computed: true,
},
"desc": {
Type: schema.TypeString,
Computed: true,
},
"drivers": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"error": {
Type: schema.TypeInt,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"id": {
Type: schema.TypeInt,
Computed: true,
},
"images": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"login": {
Type: schema.TypeString,
Computed: true,
},
"name": {
Type: schema.TypeString,
Computed: true,
},
"passwd": {
Type: schema.TypeString,
Computed: true,
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"type": {
Type: schema.TypeString,
Computed: true,
},
}
}
func dataSourceImageListStacksSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"image_id": {
Type: schema.TypeInt,
Required: true,
Description: "image id",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: dataSourceImageListStackSchemaMake(),
},
Description: "items of stacks list",
},
}
}
func DataSourceImageListStacks() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceImageListStacksRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceImageListStacksSchemaMake(),
}
}
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package image
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourceImageListStacksRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
imageListStacks, err := utilityImageListStacksCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenImageListStacks(imageListStacks))
d.Set("entry_count", imageListStacks.EntryCount)
return nil
}
func DataSourceImageListStacks() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourceImageListStacksRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourceImageListStacksSchemaMake(),
}
}

@ -4,154 +4,269 @@ import (
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens"
)
func flattenImage(d *schema.ResourceData, img *image.RecordImage) {
d.Set("name", img.Name)
d.Set("drivers", img.Drivers)
d.Set("url", img.URL)
d.Set("gid", img.GID)
log.Debugf("flattenImageID %d", img.ID)
d.Set("image_id", img.ID)
d.Set("unc_path", img.UNCPath)
d.Set("ckey", img.CKey)
d.Set("meta", flattens.FlattenMeta(img.Meta))
d.Set("account_id", img.AccountID)
d.Set("acl", flattenAcl(img.ACL))
d.Set("architecture", img.Architecture)
d.Set("boot_type", img.BootType)
d.Set("image_type", img.Type)
d.Set("bootable", img.Bootable)
d.Set("sep_id", img.SEPID)
d.Set("unc_path", img.UNCPath)
d.Set("link_to", img.LinkTo)
d.Set("status", img.Status)
d.Set("tech_status", img.TechStatus)
d.Set("version", img.Version)
d.Set("size", img.Size)
d.Set("enabled", img.Enabled)
d.Set("computeci_id", img.ComputeCIID)
d.Set("pool_name", img.Pool)
d.Set("username", img.Username)
// d.Set("username_dl", img.UsernameDL)
d.Set("password", img.Password)
// d.Set("password_dl", img.PasswordDL)
d.Set("account_id", img.AccountID)
d.Set("deleted_time", img.DeletedTime)
d.Set("desc", img.Description)
d.Set("drivers", img.Drivers)
d.Set("enabled", img.Enabled)
d.Set("gid", img.GID)
d.Set("guid", img.GUID)
d.Set("history", flattenHistory(img.History))
d.Set("hot_resize", img.HotResize)
d.Set("last_modified", img.LastModified)
d.Set("link_to", img.LinkTo)
d.Set("milestones", img.Milestones)
d.Set("name", img.Name)
d.Set("password", img.Password)
d.Set("pool_name", img.Pool)
d.Set("present_to", img.PresentTo)
d.Set("provider_name", img.ProviderName)
d.Set("purge_attempts", img.PurgeAttempts)
d.Set("reference_id", img.ReferenceID)
d.Set("res_id", img.ResID)
d.Set("res_name", img.ResName)
d.Set("rescuecd", img.RescueCD)
d.Set("architecture", img.Architecture)
d.Set("hot_resize", img.HotResize)
d.Set("history", flattenHistory(img.History))
d.Set("last_modified", img.LastModified)
d.Set("meta", flattens.FlattenMeta(img.Meta))
d.Set("desc", img.Description)
d.Set("sep_id", img.SEPID)
d.Set("shared_with", img.SharedWith)
d.Set("size", img.Size)
d.Set("status", img.Status)
d.Set("tech_status", img.TechStatus)
d.Set("image_type", img.Type)
d.Set("url", img.URL)
d.Set("username", img.Username)
d.Set("version", img.Version)
}
func flattenMeta(m []interface{}) []string {
output := []string{}
for _, item := range m {
switch d := item.(type) {
case string:
output = append(output, d)
case int:
output = append(output, strconv.Itoa(d))
case int64:
output = append(output, strconv.FormatInt(d, 10))
case float64:
output = append(output, strconv.FormatInt(int64(d), 10))
default:
output = append(output, "")
func flattenAcl(acl image.ListACL) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(acl))
for _, val := range acl {
temp := map[string]interface{}{
"explicit": val.Explicit,
"guid": val.GUID,
"right": val.Right,
"status": val.Status,
"type": val.Type,
"user_group_id": val.UserGroupID,
}
res = append(res, temp)
}
return output
return res
}
func flattenHistory(history []image.History) []map[string]interface{} {
temp := make([]map[string]interface{}, 0)
func flattenHistory(history image.ListHistory) []map[string]interface{} {
temp := make([]map[string]interface{}, 0, len(history))
for _, item := range history {
t := map[string]interface{}{
"id": item.ID,
"guid": item.GUID,
"timestamp": item.Timestamp,
}
temp = append(temp, t)
}
return temp
}
func flattenImageList(il *image.ListImages) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
log.Debug("flattenImageList")
res := make([]map[string]interface{}, 0, len(il.Data))
for _, item := range il.Data {
temp := map[string]interface{}{
"name": item.Name,
"url": item.URL,
"gid": item.GID,
"guid": item.GUID,
"drivers": item.Drivers,
"image_id": item.ID,
"boot_type": item.BootType,
"bootable": item.Bootable,
"image_type": item.Type,
"status": item.Status,
"tech_status": item.TechStatus,
"version": item.Version,
"username": item.Username,
// "username_dl": item.UsernameDL,
"password": item.Password,
// "password_dl": item.PasswordDL,
"purge_attempts": item.PurgeAttempts,
"architecture": item.Architecture,
"image_id": item.ID,
"unc_path": item.UNCPath,
"ckey": item.CKey,
"meta": flattens.FlattenMeta(item.Meta),
"account_id": item.AccountID,
"acl": flattenAcl(item.ACL),
"architecture": item.Architecture,
"boot_type": item.BootType,
"bootable": item.Bootable,
"computeci_id": item.ComputeCIID,
"deleted_time": item.DeletedTime,
"desc": item.Description,
"drivers": item.Drivers,
"enabled": item.Enabled,
"gid": item.GID,
"guid": item.GUID,
"history": flattenHistory(item.History),
"hot_resize": item.HotResize,
"last_modified": item.LastModified,
"link_to": item.LinkTo,
"milestones": item.Milestones,
"name": item.Name,
"password": item.Password,
"pool_name": item.Pool,
"present_to": item.PresentTo,
"provider_name": item.ProviderName,
"purge_attempts": item.PurgeAttempts,
"reference_id": item.ReferenceID,
"res_id": item.ResID,
"res_name": item.ResName,
"rescuecd": item.RescueCD,
"provider_name": item.ProviderName,
"milestones": item.Milestones,
"size": item.Size,
"sep_id": item.SEPID,
"link_to": item.LinkTo,
"unc_path": item.UNCPath,
"pool_name": item.Pool,
"hot_resize": item.HotResize,
"history": flattenHistory(item.History),
"last_modified": item.LastModified,
"meta": flattenMeta(item.Meta),
"desc": item.Description,
"shared_with": item.SharedWith,
"size": item.Size,
"status": item.Status,
"tech_status": item.TechStatus,
"image_type": item.Type,
"url": item.URL,
"username": item.Username,
"version": item.Version,
"virtual": item.Virtual,
}
res = append(res, temp)
}
return res
}
func flattenImageListStacks(_ *schema.ResourceData, stack *image.ListStacks) []map[string]interface{} {
temp := make([]map[string]interface{}, 0)
func flattenEco(m interface{}) string {
log.Debug("flattenEco")
output := ""
switch d := m.(type) {
case string:
output = d
case int:
output = strconv.Itoa(d)
case int64:
output = strconv.FormatInt(d, 10)
case float64:
output = strconv.FormatInt(int64(d), 10)
default:
}
return output
}
func flattenImageListStacks(stack *image.ListStacks) []map[string]interface{} {
log.Debug("flattenImageListStacks")
temp := make([]map[string]interface{}, 0, len(stack.Data))
for _, item := range stack.Data {
t := map[string]interface{}{
"api_url": item.APIURL,
"api_key": item.APIKey,
"app_id": item.AppID,
"desc": item.Description,
"drivers": item.Drivers,
"error": item.Error,
"guid": item.GUID,
"id": item.ID,
"images": item.Images,
"login": item.Login,
"name": item.Name,
"passwd": item.Password,
"reference_id": item.ReferenceID,
"status": item.Status,
"type": item.Type,
"ckey": item.CKey,
"meta": flattens.FlattenMeta(item.Meta),
"api_url": item.APIURL,
"api_key": item.APIKey,
"app_id": item.AppID,
"cpu_allocation_ratio": item.CPUAllocationRatio,
"desc": item.Description,
"descr": item.Descr,
"drivers": item.Drivers,
"eco": flattenEco(item.Eco),
"error": item.Error,
"gid": item.GID,
"guid": item.GUID,
"id": item.ID,
"images": item.Images,
"login": item.Login,
"mem_allocation_ratio": item.MemAllocationRatio,
"name": item.Name,
"packages": flattenPackages(item.Packages),
"passwd": item.Password,
"reference_id": item.ReferenceID,
"status": item.Status,
"type": item.Type,
}
temp = append(temp, t)
}
return temp
}
func flattenPackages(pg image.Packages) []map[string]interface{} {
log.Debug("flattenPackages")
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"libvirt_bin": flattenLibvirtBin(pg),
"libvirt_daemon": flattenLibvirtDaemon(pg),
"lvm2_lockd": flattenLvm2Lockd(pg),
"openvswitch_common": flattenOpenvswitchCommon(pg),
"openvswitch_switch": flattenOpenvswitchSwitch(pg),
"qemu_system_x86": flattenQemuSystemX86(pg),
"sanlock": flattenSanlock(pg),
}
res = append(res, temp)
return res
}
func flattenLibvirtBin(lb image.Packages) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"installed_size": lb.LibvirtBin.InstalledSize,
"ver": lb.LibvirtBin.Ver,
}
res = append(res, temp)
return res
}
func flattenLibvirtDaemon(ld image.Packages) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"installed_size": ld.LibvirtDaemon.InstalledSize,
"ver": ld.LibvirtDaemon.Ver,
}
res = append(res, temp)
return res
}
func flattenLvm2Lockd(ll image.Packages) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"installed_size": ll.Lvm2Lockd.InstalledSize,
"ver": ll.Lvm2Lockd.Ver,
}
res = append(res, temp)
return res
}
func flattenOpenvswitchCommon(oc image.Packages) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"installed_size": oc.OpenvswitchCommon.InstalledSize,
"ver": oc.OpenvswitchCommon.Ver,
}
res = append(res, temp)
return res
}
func flattenOpenvswitchSwitch(os image.Packages) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"installed_size": os.OpenvswitchSwitch.InstalledSize,
"ver": os.OpenvswitchSwitch.Ver,
}
res = append(res, temp)
return res
}
func flattenQemuSystemX86(qs image.Packages) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"installed_size": qs.QemuSystemX86.InstalledSize,
"ver": qs.QemuSystemX86.Ver,
}
res = append(res, temp)
return res
}
func flattenSanlock(sl image.Packages) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"installed_size": sl.Sanlock.InstalledSize,
"ver": sl.Sanlock.Ver,
}
res = append(res, temp)
return res
}

@ -1,382 +1,257 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package image
import (
"context"
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
)
func resourceCDROMImageCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceCDROMImageCreate: called for image %s", d.Get("name").(string))
c := m.(*controller.ControllerCfg)
req := image.CreateCDROMImageRequest{
Name: d.Get("name").(string),
URL: d.Get("url").(string),
GID: uint64(d.Get("gid").(int)),
}
drivers := []string{}
for _, driver := range d.Get("drivers").([]interface{}) {
drivers = append(drivers, driver.(string))
}
req.Drivers = drivers
if username, ok := d.GetOk("username_dl"); ok {
req.UsernameDL = username.(string)
}
if password, ok := d.GetOk("password_dl"); ok {
req.PasswordDl = password.(string)
}
if accountId, ok := d.GetOk("account_id"); ok {
req.AccountID = uint64(accountId.(int))
}
if sepId, ok := d.GetOk("sep_id"); ok {
req.SEPID = uint64(sepId.(int))
}
if poolName, ok := d.GetOk("pool_name"); ok {
req.PoolName = poolName.(string)
}
if architecture, ok := d.GetOk("architecture"); ok {
req.Architecture = architecture.(string)
}
imageId, err := c.CloudBroker().Image().CreateCDROMImage(ctx, req)
if err != nil {
return diag.FromErr(err)
}
d.SetId(strconv.FormatUint(imageId, 10))
d.Set("image_id", imageId)
image, err := utilityImageCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
d.Set("bootable", image.Bootable)
diagnostics := resourceImageRead(ctx, d, m)
if diagnostics != nil {
return diagnostics
}
return nil
}
func resourceCDROMImageDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceCDROMImageDelete: called for %s, id: %s", d.Get("name").(string), d.Id())
imageData, err := utilityImageCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
c := m.(*controller.ControllerCfg)
req := image.DeleteCDROMImageRequest{
ImageID: imageData.ID,
}
if permanently, ok := d.GetOk("permanently"); ok {
req.Permanently = permanently.(bool)
}
_, err = c.CloudBroker().Image().DeleteCDROMImage(ctx, req)
if err != nil {
return diag.FromErr(err)
}
d.SetId("")
return nil
}
func resourceCDROMImageSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
Description: "Name of the rescue disk",
},
"url": {
Type: schema.TypeString,
Required: true,
Description: "URL where to download ISO from",
},
"gid": {
Type: schema.TypeInt,
Required: true,
Description: "grid (platform) ID where this template should be create in",
},
"boot_type": {
Type: schema.TypeString,
Computed: true,
Description: "Boot type of image bios or uefi",
},
"image_type": {
Type: schema.TypeString,
Computed: true,
Description: "Image type linux, windows or other",
},
"drivers": {
Type: schema.TypeList,
Required: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "List of types of compute suitable for image. Example: [ \"KVM_X86\" ]",
},
"meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "meta",
},
"hot_resize": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
Description: "Does this machine supports hot resize",
},
// "username": {
// Type: schema.TypeString,
// Optional: true,
// Computed: true,
// Description: "Optional username for the image",
// },
// "password": {
// Type: schema.TypeString,
// Optional: true,
// Computed: true,
// Description: "Optional password for the image",
// },
"account_id": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "AccountId to make the image exclusive",
},
"username_dl": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "username for upload binary media",
},
"password_dl": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "password for upload binary media",
},
"sep_id": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "storage endpoint provider ID",
},
"pool_name": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "pool for image create",
},
"architecture": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "binary architecture of this image, one of X86_64 of PPC64_LE",
},
"image_id": {
Type: schema.TypeInt,
Computed: true,
Description: "image id",
},
"permanently": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
Description: "Whether to completely delete the image",
},
"bootable": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
Description: "Does this image boot OS",
},
"unc_path": {
Type: schema.TypeString,
Computed: true,
Description: "unc path",
},
"link_to": {
Type: schema.TypeInt,
Computed: true,
Description: "",
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "status",
},
"tech_status": {
Type: schema.TypeString,
Computed: true,
Description: "tech atatus",
},
"version": {
Type: schema.TypeString,
Computed: true,
Description: "version",
},
"size": {
Type: schema.TypeInt,
Computed: true,
Description: "image size",
},
"enabled": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
},
"computeci_id": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
},
"provider_name": {
Type: schema.TypeString,
Computed: true,
},
"purge_attempts": {
Type: schema.TypeInt,
Computed: true,
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
},
"res_name": {
Type: schema.TypeString,
Computed: true,
},
"rescuecd": {
Type: schema.TypeBool,
Computed: true,
},
"desc": {
Type: schema.TypeString,
Computed: true,
},
"shared_with": {
Type: schema.TypeList,
Optional: true,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"enabled_stacks": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"history": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"guid": {
Type: schema.TypeString,
Computed: true,
},
"id": {
Type: schema.TypeInt,
Computed: true,
},
"timestamp": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
}
}
func ResourceCDROMImage() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
CreateContext: resourceCDROMImageCreate,
ReadContext: resourceImageRead,
UpdateContext: resourceImageUpdate,
DeleteContext: resourceCDROMImageDelete,
Importer: &schema.ResourceImporter{
StateContext: schema.ImportStatePassthroughContext,
},
Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout60s,
Read: &constants.Timeout30s,
Update: &constants.Timeout60s,
Delete: &constants.Timeout60s,
Default: &constants.Timeout60s,
},
Schema: resourceCDROMImageSchemaMake(),
}
}
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package image
import (
"context"
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/ic"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
)
func resourceCDROMImageCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceCDROMImageCreate: called for image %s", d.Get("name").(string))
c := m.(*controller.ControllerCfg)
req := image.CreateCDROMImageRequest{
Name: d.Get("name").(string),
URL: d.Get("url").(string),
}
if err := ic.ExistGID(ctx, uint64(d.Get("gid").(int)), c); err != nil {
return diag.FromErr(err)
}
req.GID = uint64(d.Get("gid").(int))
drivers := []string{}
for _, driver := range d.Get("drivers").([]interface{}) {
drivers = append(drivers, driver.(string))
}
req.Drivers = drivers
if username, ok := d.GetOk("username_dl"); ok {
req.UsernameDL = username.(string)
}
if password, ok := d.GetOk("password_dl"); ok {
req.PasswordDl = password.(string)
}
if accountId, ok := d.GetOk("account_id"); ok {
req.AccountID = uint64(accountId.(int))
}
if sepId, ok := d.GetOk("sep_id"); ok {
req.SEPID = uint64(sepId.(int))
}
if poolName, ok := d.GetOk("pool_name"); ok {
req.PoolName = poolName.(string)
}
if architecture, ok := d.GetOk("architecture"); ok {
req.Architecture = architecture.(string)
}
imageId, err := c.CloudBroker().Image().CreateCDROMImage(ctx, req)
if err != nil {
return diag.FromErr(err)
}
d.SetId(strconv.FormatUint(imageId, 10))
return resourceImageRead(ctx, d, m)
}
func resourceCDROMImageRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceCDROMImageRead: called for %s id: %s", d.Get("name").(string), d.Id())
img, err := utilityImageCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
switch img.Status {
case status.Modeled:
return diag.Errorf("The image is in status: %s, please, contact support for more information", img.Status)
case status.Destroyed, status.Purged:
d.SetId("")
return diag.Errorf("The resource cannot be updated because it has been destroyed")
}
flattenImage(d, img)
return nil
}
func resourceCDROMImageDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceCDROMImageDelete: called for %s, id: %s", d.Get("name").(string), d.Id())
imageData, err := utilityImageCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
c := m.(*controller.ControllerCfg)
req := image.DeleteCDROMImageRequest{
ImageID: imageData.ID,
}
if permanently, ok := d.GetOk("permanently"); ok {
req.Permanently = permanently.(bool)
}
_, err = c.CloudBroker().Image().DeleteCDROMImage(ctx, req)
if err != nil {
return diag.FromErr(err)
}
d.SetId("")
return nil
}
func resourceCDROMImageUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceCDROMImageEdit: called for %s, id: %s", d.Get("name").(string), d.Id())
img, err := utilityImageCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
switch img.Status {
case status.Modeled:
return diag.Errorf("The image is in status: %s, please, contact support for more information", img.Status)
case status.Destroyed, status.Purged:
d.SetId("")
return diag.Errorf("The resource cannot be updated because it has been destroyed")
}
if d.HasChange("enabled") {
err := resourceImageChangeEnabled(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("shared_with") {
err := resourceImageShare(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("computeci_id") {
err := resourceImageChangeComputeci(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("enabled_stacks") {
err := resourceImageUpdateNodes(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChanges("name", "password_dl", "username_dl", "account_id", "bootable", "hot_resize") {
err := resourceImageCDROMEdit(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
return nil
}
func resourceImageCDROMEdit(ctx context.Context, d *schema.ResourceData, m interface{}) error {
log.Debugf("resourceImageEdit: called for %s, id: %s", d.Get("name").(string), d.Id())
c := m.(*controller.ControllerCfg)
req := image.EditRequest{}
req.ImageID = uint64(d.Get("image_id").(int))
if d.HasChange("name") {
req.Name = d.Get("name").(string)
}
if d.HasChange("username_dl") {
req.Username = d.Get("username_dl").(string)
}
if d.HasChange("password_dl") {
req.Password = d.Get("password_dl").(string)
}
if d.HasChange("account_id") {
req.AccountID = uint64(d.Get("account_id").(int))
}
if d.HasChange("bootable") {
req.Bootable = d.Get("bootable").(bool)
}
if d.HasChange("hot_resize") {
req.HotResize = d.Get("hot_resize").(bool)
}
_, err := c.CloudBroker().Image().Edit(ctx, req)
if err != nil {
return err
}
return nil
}
func ResourceCDROMImage() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
CreateContext: resourceCDROMImageCreate,
ReadContext: resourceCDROMImageRead,
UpdateContext: resourceCDROMImageUpdate,
DeleteContext: resourceCDROMImageDelete,
Importer: &schema.ResourceImporter{
StateContext: schema.ImportStatePassthroughContext,
},
Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout60s,
Read: &constants.Timeout30s,
Update: &constants.Timeout60s,
Delete: &constants.Timeout60s,
Default: &constants.Timeout60s,
},
Schema: resourceCDROMImageSchemaMake(),
}
}

@ -1,611 +1,341 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package image
import (
"context"
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
)
func resourceImageCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceImageCreate: called for image %s", d.Get("name").(string))
c := m.(*controller.ControllerCfg)
req := image.CreateRequest{
Name: d.Get("name").(string),
URL: d.Get("url").(string),
GID: uint64(d.Get("gid").(int)),
BootType: d.Get("boot_type").(string),
ImageType: d.Get("image_type").(string),
}
drivers := []string{}
for _, driver := range d.Get("drivers").([]interface{}) {
drivers = append(drivers, driver.(string))
}
req.Drivers = drivers
if hotresize, ok := d.GetOk("hot_resize"); ok {
req.HotResize = hotresize.(bool)
}
if username, ok := d.GetOk("username"); ok {
req.Username = username.(string)
}
if password, ok := d.GetOk("password"); ok {
req.Password = password.(string)
}
if accountId, ok := d.GetOk("account_id"); ok {
req.AccountID = uint64(accountId.(int))
}
if usernameDL, ok := d.GetOk("username_dl"); ok {
req.UsernameDL = usernameDL.(string)
}
if passwordDL, ok := d.GetOk("password_dl"); ok {
req.PasswordDL = passwordDL.(string)
}
if sepId, ok := d.GetOk("sep_id"); ok {
req.SEPID = uint64(sepId.(int))
}
if poolName, ok := d.GetOk("pool_name"); ok {
req.PoolName = poolName.(string)
}
if architecture, ok := d.GetOk("architecture"); ok {
req.Architecture = architecture.(string)
}
imageId, err := c.CloudBroker().Image().CreateImage(ctx, req)
if err != nil {
return diag.FromErr(err)
}
d.SetId(strconv.FormatUint(imageId, 10))
d.Set("image_id", imageId)
return resourceImageRead(ctx, d, m)
}
func resourceImageRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceImageRead: called for %s id: %s", d.Get("name").(string), d.Id())
image, err := utilityImageCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
flattenImage(d, image)
return nil
}
func resourceImageDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceImageDelete: called for %s, id: %s", d.Get("name").(string), d.Id())
_, err := utilityImageCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
c := m.(*controller.ControllerCfg)
req := image.DeleteRequest{
ImageID: uint64(d.Get("image_id").(int)),
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
if permanently, ok := d.GetOk("permanently"); ok {
req.Permanently = permanently.(bool)
}
_, err = c.CloudBroker().Image().Delete(ctx, req)
if err != nil {
return diag.FromErr(err)
}
d.SetId("")
return nil
}
func resourceImageEditName(ctx context.Context, d *schema.ResourceData, m interface{}) error {
log.Debugf("resourceImageEditName: called for %s, id: %s", d.Get("name").(string), d.Id())
c := m.(*controller.ControllerCfg)
req := image.RenameRequest{
ImageID: uint64(d.Get("image_id").(int)),
Name: d.Get("name").(string),
}
_, err := c.CloudBroker().Image().Rename(ctx, req)
if err != nil {
return err
}
return nil
}
func resourceImageUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceImageEdit: called for %s, id: %s", d.Get("name").(string), d.Id())
c := m.(*controller.ControllerCfg)
if d.HasChange("enabled") {
err := resourceImageChangeEnabled(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("name") {
err := resourceImageEditName(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("shared_with") {
err := resourceImageShare(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("computeci_id") {
err := resourceImageChangeComputeci(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("enabled_stacks") {
err := resourceImageUpdateNodes(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("link_to") {
err := resourceImageLink(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChanges("name", "username", "password", "account_id", "bootable", "hot_resize") {
req := image.EditRequest{}
req.ImageID = uint64(d.Get("image_id").(int))
req.Name = d.Get("name").(string)
req.Username = d.Get("username").(string)
req.Password = d.Get("password").(string)
req.AccountID = uint64(d.Get("account_id").(int))
req.Bootable = d.Get("bootable").(bool)
req.HotResize = d.Get("hot_resize").(bool)
_, err := c.CloudBroker().Image().Edit(ctx, req)
if err != nil {
return diag.FromErr(err)
}
}
return nil
}
func resourceImageChangeEnabled(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
imageId := uint64(d.Get("image_id").(int))
if d.Get("enabled").(bool) {
req := image.EnableRequest{
ImageID: imageId,
}
_, err := c.CloudBroker().Image().Enable(ctx, req)
if err != nil {
return err
}
} else {
req := image.DisableRequest{
ImageID: imageId,
}
_, err := c.CloudBroker().Image().Disable(ctx, req)
if err != nil {
return err
}
}
return nil
}
func resourceImageLink(ctx context.Context, d *schema.ResourceData, m interface{}) error {
log.Debugf("resourceVirtualImageLink: called for %s, id: %s", d.Get("name").(string), d.Id())
c := m.(*controller.ControllerCfg)
req := image.LinkRequest{
ImageID: uint64(d.Get("image_id").(int)),
TargetID: uint64(d.Get("link_to").(int)),
}
_, err := c.CloudBroker().Image().Link(ctx, req)
if err != nil {
return err
}
return nil
}
func resourceImageShare(ctx context.Context, d *schema.ResourceData, m interface{}) error {
log.Debugf("resourceImageShare: called for %s, id: %s", d.Get("name").(string), d.Id())
c := m.(*controller.ControllerCfg)
req := image.ShareRequest{
ImageId: uint64(d.Get("image_id").(int)),
}
accIds := []uint64{}
for _, accId := range d.Get("shared_with").([]interface{}) {
accIds = append(accIds, uint64(accId.(int)))
}
req.AccountIDs = accIds
_, err := c.CloudBroker().Image().Share(ctx, req)
if err != nil {
return err
}
return nil
}
func resourceImageChangeComputeci(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
imageId := uint64(d.Get("image_id").(int))
computeci := uint64(d.Get("computeci_id").(int))
if computeci == 0 {
req := image.ComputeCIUnsetRequest{
ImageID: imageId,
}
_, err := c.CloudBroker().Image().ComputeCIUnset(ctx, req)
if err != nil {
return err
}
} else {
req := image.ComputeCISetRequest{
ImageID: imageId,
ComputeCIID: computeci,
}
_, err := c.CloudBroker().Image().ComputeCISet(ctx, req)
if err != nil {
return err
}
}
return nil
}
func resourceImageUpdateNodes(ctx context.Context, d *schema.ResourceData, m interface{}) error {
log.Debugf("resourceImageUpdateNodes: called for %s, id: %s", d.Get("name").(string), d.Id())
c := m.(*controller.ControllerCfg)
req := image.UpdateNodesRequest{
ImageID: uint64(d.Get("image_id").(int)),
}
enabledStacks := []uint64{}
for _, stack := range d.Get("enabled_stacks").([]interface{}) {
enabledStacks = append(enabledStacks, uint64(stack.(int)))
}
req.EnabledStacks = enabledStacks
_, err := c.CloudBroker().Image().UpdateNodes(ctx, req)
if err != nil {
return err
}
return nil
}
func resourceImageSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
Description: "Name of the rescue disk",
},
"url": {
Type: schema.TypeString,
Required: true,
Description: "URL where to download media from",
},
"gid": {
Type: schema.TypeInt,
Required: true,
Description: "grid (platform) ID where this template should be create in",
},
"boot_type": {
Type: schema.TypeString,
Required: true,
Description: "Boot type of image bios or uefi",
},
"image_type": {
Type: schema.TypeString,
Required: true,
Description: "Image type linux, windows or other",
},
"drivers": {
Type: schema.TypeList,
Required: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "List of types of compute suitable for image. Example: [ \"KVM_X86\" ]",
},
"meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "meta",
},
"hot_resize": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
Description: "Does this machine supports hot resize",
},
"username": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "Optional username for the image",
},
"password": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "Optional password for the image",
},
"account_id": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "AccountId to make the image exclusive",
},
"username_dl": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "username for upload binary media",
},
"password_dl": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "password for upload binary media",
},
"sep_id": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "storage endpoint provider ID",
},
"pool_name": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "pool for image create",
},
"architecture": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "binary architecture of this image, one of X86_64 of PPC64_LE",
},
"image_id": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "image id",
},
"permanently": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
Description: "Whether to completely delete the image",
},
"bootable": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
Description: "Does this image boot OS",
},
"unc_path": {
Type: schema.TypeString,
Computed: true,
Description: "unc path",
},
"link_to": {
Type: schema.TypeInt,
Computed: true,
Description: "",
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "status",
},
"tech_status": {
Type: schema.TypeString,
Computed: true,
Description: "tech atatus",
},
"version": {
Type: schema.TypeString,
Computed: true,
Description: "version",
},
"size": {
Type: schema.TypeInt,
Computed: true,
Description: "image size",
},
"enabled": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
},
"computeci_id": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
},
"provider_name": {
Type: schema.TypeString,
Computed: true,
},
"purge_attempts": {
Type: schema.TypeInt,
Computed: true,
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
},
"res_name": {
Type: schema.TypeString,
Computed: true,
},
"rescuecd": {
Type: schema.TypeBool,
Computed: true,
},
"reason": {
Type: schema.TypeString,
Optional: true,
},
"last_modified": {
Type: schema.TypeInt,
Computed: true,
},
"desc": {
Type: schema.TypeString,
Computed: true,
},
"shared_with": {
Type: schema.TypeList,
Optional: true,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
"sync": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "Create image from a media identified by URL (in synchronous mode)",
},
"enabled_stacks": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"history": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"guid": {
Type: schema.TypeString,
Computed: true,
},
"id": {
Type: schema.TypeInt,
Computed: true,
},
"timestamp": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
}
}
func ResourceImage() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
CreateContext: resourceImageCreate,
ReadContext: resourceImageRead,
UpdateContext: resourceImageUpdate,
DeleteContext: resourceImageDelete,
Importer: &schema.ResourceImporter{
StateContext: schema.ImportStatePassthroughContext,
},
Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout60s,
Read: &constants.Timeout30s,
Update: &constants.Timeout60s,
Delete: &constants.Timeout60s,
Default: &constants.Timeout60s,
},
Schema: resourceImageSchemaMake(),
}
}
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package image
import (
"context"
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
)
func resourceImageCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceImageCreate: called for image %s", d.Get("name").(string))
c := m.(*controller.ControllerCfg)
syncMode := d.Get("sync_mode").(bool)
var imageId uint64
if syncMode {
req, err := SyncCreateRequest(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
imageId, err = c.CloudBroker().Image().SyncCreate(ctx, req)
if err != nil {
return diag.FromErr(err)
}
} else {
req, err := CreateRequest(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
imageId, err = c.CloudBroker().Image().CreateImage(ctx, req)
if err != nil {
return diag.FromErr(err)
}
}
d.SetId(strconv.FormatUint(imageId, 10))
return resourceImageRead(ctx, d, m)
}
func resourceImageRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceImageRead: called for %s id: %s", d.Get("name").(string), d.Id())
img, err := utilityImageCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
switch img.Status {
case status.Modeled:
return diag.Errorf("The image is in status: %s, please, contact support for more information", img.Status)
case status.Destroyed, status.Purged:
d.SetId("")
return diag.Errorf("The resource cannot be updated because it has been destroyed")
}
flattenImage(d, img)
return nil
}
func resourceImageDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceImageDelete: called for %s, id: %s", d.Get("name").(string), d.Id())
_, err := utilityImageCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
c := m.(*controller.ControllerCfg)
req := image.DeleteRequest{
ImageID: uint64(d.Get("image_id").(int)),
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
if permanently, ok := d.GetOk("permanently"); ok {
req.Permanently = permanently.(bool)
}
_, err = c.CloudBroker().Image().Delete(ctx, req)
if err != nil {
return diag.FromErr(err)
}
d.SetId("")
return nil
}
func resourceImageUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceImageEdit: called for %s, id: %s", d.Get("name").(string), d.Id())
img, err := utilityImageCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
switch img.Status {
case status.Modeled:
return diag.Errorf("The image is in status: %s, please, contact support for more information", img.Status)
case status.Destroyed, status.Purged:
d.SetId("")
return diag.Errorf("The resource cannot be updated because it has been destroyed")
}
if d.HasChange("enabled") {
err := resourceImageChangeEnabled(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("shared_with") {
err := resourceImageShare(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("computeci_id") {
err := resourceImageChangeComputeci(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("enabled_stacks") {
err := resourceImageUpdateNodes(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChanges("name", "username", "password", "account_id", "bootable", "hot_resize") {
err := resourceImageEdit(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
return nil
}
func resourceImageChangeEnabled(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
imageId := uint64(d.Get("image_id").(int))
if d.Get("enabled").(bool) {
req := image.EnableRequest{
ImageID: imageId,
}
_, err := c.CloudBroker().Image().Enable(ctx, req)
if err != nil {
return err
}
} else {
req := image.DisableRequest{
ImageID: imageId,
}
_, err := c.CloudBroker().Image().Disable(ctx, req)
if err != nil {
return err
}
}
return nil
}
func resourceImageShare(ctx context.Context, d *schema.ResourceData, m interface{}) error {
log.Debugf("resourceImageShare: called for %s, id: %s", d.Get("name").(string), d.Id())
c := m.(*controller.ControllerCfg)
req := image.ShareRequest{
ImageId: uint64(d.Get("image_id").(int)),
}
accIds := []uint64{}
for _, accId := range d.Get("shared_with").([]interface{}) {
accIds = append(accIds, uint64(accId.(int)))
}
req.AccountIDs = accIds
_, err := c.CloudBroker().Image().Share(ctx, req)
if err != nil {
return err
}
return nil
}
func resourceImageChangeComputeci(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
imageId := uint64(d.Get("image_id").(int))
computeci := uint64(d.Get("computeci_id").(int))
if computeci == 0 {
req := image.ComputeCIUnsetRequest{
ImageID: imageId,
}
_, err := c.CloudBroker().Image().ComputeCIUnset(ctx, req)
if err != nil {
return err
}
} else {
req := image.ComputeCISetRequest{
ImageID: imageId,
ComputeCIID: computeci,
}
_, err := c.CloudBroker().Image().ComputeCISet(ctx, req)
if err != nil {
return err
}
}
return nil
}
func resourceImageUpdateNodes(ctx context.Context, d *schema.ResourceData, m interface{}) error {
log.Debugf("resourceImageUpdateNodes: called for %s, id: %s", d.Get("name").(string), d.Id())
c := m.(*controller.ControllerCfg)
req := image.UpdateNodesRequest{
ImageID: uint64(d.Get("image_id").(int)),
}
enabledStacks := []uint64{}
for _, stack := range d.Get("enabled_stacks").([]interface{}) {
enabledStacks = append(enabledStacks, uint64(stack.(int)))
}
req.EnabledStacks = enabledStacks
_, err := c.CloudBroker().Image().UpdateNodes(ctx, req)
if err != nil {
return err
}
return nil
}
func resourceImageEdit(ctx context.Context, d *schema.ResourceData, m interface{}) error {
log.Debugf("resourceImageEdit: called for %s, id: %s", d.Get("name").(string), d.Id())
c := m.(*controller.ControllerCfg)
req := image.EditRequest{}
req.ImageID = uint64(d.Get("image_id").(int))
if d.HasChange("name") {
req.Name = d.Get("name").(string)
}
if d.HasChange("username") {
req.Username = d.Get("username").(string)
}
if d.HasChange("password") {
req.Password = d.Get("password").(string)
}
if d.HasChange("account_id") {
req.AccountID = uint64(d.Get("account_id").(int))
}
if d.HasChange("bootable") {
req.Bootable = d.Get("bootable").(bool)
}
if d.HasChange("hot_resize") {
req.HotResize = d.Get("hot_resize").(bool)
}
_, err := c.CloudBroker().Image().Edit(ctx, req)
if err != nil {
return err
}
return nil
}
func ResourceImage() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
CreateContext: resourceImageCreate,
ReadContext: resourceImageRead,
UpdateContext: resourceImageUpdate,
DeleteContext: resourceImageDelete,
Importer: &schema.ResourceImporter{
StateContext: schema.ImportStatePassthroughContext,
},
Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout60s,
Read: &constants.Timeout30s,
Update: &constants.Timeout60s,
Delete: &constants.Timeout60s,
Default: &constants.Timeout60s,
},
Schema: resourceImageSchemaMake(),
}
}

@ -1,331 +1,220 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package image
import (
"context"
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
)
func resourceVirtualImageCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceImageCreate: called for image %s", d.Get("name").(string))
c := m.(*controller.ControllerCfg)
req := image.CreateVirtualRequest{
Name: d.Get("name").(string),
TargetID: uint64(d.Get("target_id").(int)),
}
imageId, err := c.CloudBroker().Image().CreateVirtual(ctx, req)
if err != nil {
return diag.FromErr(err)
}
d.SetId(strconv.FormatUint(imageId, 10))
d.Set("image_id", imageId)
return resourceImageRead(ctx, d, m)
}
func resourceVirtualImageSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
Description: "name of the virtual image to create",
},
"target_id": {
Type: schema.TypeInt,
Required: true,
Description: "ID of real image to link this virtual image to upon creation",
},
"history": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"guid": {
Type: schema.TypeString,
Computed: true,
},
"id": {
Type: schema.TypeInt,
Computed: true,
},
"timestamp": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"url": {
Type: schema.TypeString,
Computed: true,
Description: "URL where to download media from",
},
"gid": {
Type: schema.TypeInt,
Computed: true,
Description: "grid (platform) ID where this template should be create in",
},
"boot_type": {
Type: schema.TypeString,
Computed: true,
Description: "Boot type of image bios or uefi",
},
"image_type": {
Type: schema.TypeString,
Computed: true,
Description: "Image type linux, windows or other",
},
"drivers": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "List of types of compute suitable for image. Example: [ \"KVM_X86\" ]",
},
"meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Description: "meta",
},
"hot_resize": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
Description: "Does this machine supports hot resize",
},
"username": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "Optional username for the image",
},
"password": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "Optional password for the image",
},
"account_id": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "AccountId to make the image exclusive",
},
"username_dl": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "username for upload binary media",
},
"password_dl": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "password for upload binary media",
},
"sep_id": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "storage endpoint provider ID",
},
"pool_name": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "pool for image create",
},
"architecture": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "binary architecture of this image, one of X86_64 of PPC64_LE",
},
"image_id": {
Type: schema.TypeInt,
Computed: true,
Description: "image id",
},
"permanently": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
Description: "Whether to completely delete the image",
},
"bootable": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
Description: "Does this image boot OS",
},
"unc_path": {
Type: schema.TypeString,
Computed: true,
Description: "unc path",
},
"link_to": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "",
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "status",
},
"tech_status": {
Type: schema.TypeString,
Computed: true,
Description: "tech atatus",
},
"version": {
Type: schema.TypeString,
Computed: true,
Description: "version",
},
"size": {
Type: schema.TypeInt,
Computed: true,
Description: "image size",
},
"enabled": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
},
"computeci_id": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"milestones": {
Type: schema.TypeInt,
Computed: true,
},
"provider_name": {
Type: schema.TypeString,
Computed: true,
},
"purge_attempts": {
Type: schema.TypeInt,
Computed: true,
},
"reference_id": {
Type: schema.TypeString,
Computed: true,
},
"res_id": {
Type: schema.TypeString,
Computed: true,
},
"res_name": {
Type: schema.TypeString,
Computed: true,
},
"rescuecd": {
Type: schema.TypeBool,
Computed: true,
},
"reason": {
Type: schema.TypeString,
Optional: true,
},
"last_modified": {
Type: schema.TypeInt,
Computed: true,
},
"desc": {
Type: schema.TypeString,
Computed: true,
},
"enabled_stacks": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"shared_with": {
Type: schema.TypeList,
Optional: true,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
},
}
}
func ResourceVirtualImage() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
CreateContext: resourceVirtualImageCreate,
ReadContext: resourceImageRead,
UpdateContext: resourceImageUpdate,
DeleteContext: resourceImageDelete,
Importer: &schema.ResourceImporter{
StateContext: schema.ImportStatePassthroughContext,
},
Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout60s,
Read: &constants.Timeout30s,
Update: &constants.Timeout60s,
Delete: &constants.Timeout60s,
Default: &constants.Timeout60s,
},
Schema: resourceVirtualImageSchemaMake(),
}
}
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package image
import (
"context"
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
)
func resourceVirtualImageCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceImageCreate: called for image %s", d.Get("name").(string))
c := m.(*controller.ControllerCfg)
req := image.CreateVirtualRequest{
Name: d.Get("name").(string),
TargetID: uint64(d.Get("link_to").(int)),
}
imageId, err := c.CloudBroker().Image().CreateVirtual(ctx, req)
if err != nil {
return diag.FromErr(err)
}
d.SetId(strconv.FormatUint(imageId, 10))
return resourceImageRead(ctx, d, m)
}
func resourceVirtualImageRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceVirtualImageRead: called for %s id: %s", d.Get("name").(string), d.Id())
img, err := utilityImageCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
switch img.Status {
case status.Modeled:
return diag.Errorf("The image is in status: %s, please, contact support for more information", img.Status)
case status.Destroyed, status.Purged:
d.SetId("")
return diag.Errorf("The resource cannot be updated because it has been destroyed")
}
flattenImage(d, img)
return nil
}
func resourceVirtualImageDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceVirtualImageDelete: called for %s, id: %s", d.Get("name").(string), d.Id())
_, err := utilityImageCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
c := m.(*controller.ControllerCfg)
req := image.DeleteRequest{
ImageID: uint64(d.Get("image_id").(int)),
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
if permanently, ok := d.GetOk("permanently"); ok {
req.Permanently = permanently.(bool)
}
_, err = c.CloudBroker().Image().Delete(ctx, req)
if err != nil {
return diag.FromErr(err)
}
d.SetId("")
return nil
}
func resourceVirtualImageUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceVirtualImageEdit: called for %s, id: %s", d.Get("name").(string), d.Id())
img, err := utilityImageCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
switch img.Status {
case status.Modeled:
return diag.Errorf("The image is in status: %s, please, contact support for more information", img.Status)
case status.Destroyed, status.Purged:
d.SetId("")
return diag.Errorf("The resource cannot be updated because it has been destroyed")
}
if d.HasChange("enabled") {
err := resourceImageChangeEnabled(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("shared_with") {
err := resourceImageShare(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("computeci_id") {
err := resourceImageChangeComputeci(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("enabled_stacks") {
err := resourceImageUpdateNodes(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChanges("name", "username", "password", "account_id", "bootable", "hot_resize") {
err := resourceImageEdit(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("link_to") {
err := resourceImageLink(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
return nil
}
func resourceImageLink(ctx context.Context, d *schema.ResourceData, m interface{}) error {
log.Debugf("resourceVirtualImageLink: called for %s, id: %s", d.Get("name").(string), d.Id())
c := m.(*controller.ControllerCfg)
req := image.LinkRequest{
ImageID: uint64(d.Get("image_id").(int)),
TargetID: uint64(d.Get("link_to").(int)),
}
_, err := c.CloudBroker().Image().Link(ctx, req)
if err != nil {
return err
}
return nil
}
func ResourceVirtualImage() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
CreateContext: resourceVirtualImageCreate,
ReadContext: resourceVirtualImageRead,
UpdateContext: resourceVirtualImageUpdate,
DeleteContext: resourceVirtualImageDelete,
Importer: &schema.ResourceImporter{
StateContext: schema.ImportStatePassthroughContext,
},
Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout60s,
Read: &constants.Timeout30s,
Update: &constants.Timeout60s,
Delete: &constants.Timeout60s,
Default: &constants.Timeout60s,
},
Schema: resourceVirtualImageSchemaMake(),
}
}

@ -1,60 +1,60 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package image
import (
"context"
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
)
func utilityImageCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*image.RecordImage, error) {
c := m.(*controller.ControllerCfg)
req := image.GetRequest{}
if (strconv.Itoa(d.Get("image_id").(int))) != "0" {
req.ImageID = uint64(d.Get("image_id").(int))
} else {
id, _ := strconv.ParseUint(d.Id(), 10, 64)
req.ImageID = id
}
image, err := c.CloudBroker().Image().Get(ctx, req)
if err != nil {
return nil, err
}
return image, nil
}
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package image
import (
"context"
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
)
func utilityImageCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*image.RecordImage, error) {
c := m.(*controller.ControllerCfg)
req := image.GetRequest{}
if d.Id() != "" {
id, _ := strconv.ParseUint(d.Id(), 10, 64)
req.ImageID = id
} else {
req.ImageID = uint64(d.Get("image_id").(int))
}
image, err := c.CloudBroker().Image().Get(ctx, req)
if err != nil {
return nil, err
}
return image, nil
}

@ -1,66 +1,98 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package image
import (
"context"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func utilityImageListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*image.ListImages, error) {
c := m.(*controller.ControllerCfg)
req := image.ListRequest{}
if sepId, ok := d.GetOk("sep_id"); ok {
req.SepID = uint64(sepId.(int))
}
if page, ok := d.GetOk("page"); ok {
req.Page = uint64(page.(int))
}
if size, ok := d.GetOk("size"); ok {
req.Size = uint64(size.(int))
}
log.Debugf("utilityImageListCheckPresence: load image list")
imageList, err := c.CloudBroker().Image().List(ctx, req)
if err != nil {
return nil, err
}
return imageList, nil
}
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package image
import (
"context"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func utilityImageListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*image.ListImages, error) {
c := m.(*controller.ControllerCfg)
req := image.ListRequest{}
if sepId, ok := d.GetOk("sep_id"); ok {
req.SepID = uint64(sepId.(int))
}
if byId, ok := d.GetOk("by_id"); ok {
req.ByID = uint64(byId.(int))
}
if name, ok := d.GetOk("name"); ok {
req.Name = name.(string)
}
if status, ok := d.GetOk("status"); ok {
req.Status = status.(string)
}
if architecture, ok := d.GetOk("architecture"); ok {
req.Architecture = architecture.(string)
}
if typeImage, ok := d.GetOk("type_image"); ok {
req.TypeImage = typeImage.(string)
}
if imageSize, ok := d.GetOk("image_size"); ok {
req.ImageSize = uint64(imageSize.(int))
}
if sepName, ok := d.GetOk("sep_name"); ok {
req.SEPName = sepName.(string)
}
if pool, ok := d.GetOk("pool"); ok {
req.Pool = pool.(string)
}
if public, ok := d.GetOk("public"); ok {
req.Public = public.(bool)
}
if hotResize, ok := d.GetOk("hot_resize"); ok {
req.HotResize = hotResize.(bool)
}
if bootable, ok := d.GetOk("bootable"); ok {
req.Bootable = bootable.(bool)
}
if page, ok := d.GetOk("page"); ok {
req.Page = uint64(page.(int))
}
if size, ok := d.GetOk("size"); ok {
req.Size = uint64(size.(int))
}
log.Debugf("utilityImageListCheckPresence: load image list")
imageList, err := c.CloudBroker().Image().List(ctx, req)
if err != nil {
return nil, err
}
return imageList, nil
}

@ -1,57 +1,73 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package image
import (
"context"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func utilityImageListStacksCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*image.ListStacks, error) {
c := m.(*controller.ControllerCfg)
req := image.ListStacksRequest{
ImageID: uint64(d.Get("image_id").(int)),
}
log.Debugf("utilityImageListStacksCheckPresence: load image list")
imageListStacks, err := c.CloudBroker().Image().ListStacks(ctx, req)
if err != nil {
return nil, err
}
return imageListStacks, nil
}
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package image
import (
"context"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func utilityImageListStacksCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*image.ListStacks, error) {
c := m.(*controller.ControllerCfg)
req := image.ListStacksRequest{
ImageID: uint64(d.Get("image_id").(int)),
}
if page, ok := d.GetOk("page"); ok {
req.Page = uint64(page.(int))
}
if size, ok := d.GetOk("size"); ok {
req.Size = uint64(size.(int))
}
if name, ok := d.GetOk("name"); ok {
req.Name = name.(string)
}
if status, ok := d.GetOk("status"); ok {
req.Status = status.(string)
}
if typeImage, ok := d.GetOk("type_image"); ok {
req.Type = typeImage.(string)
}
log.Debugf("utilityImageListStacksCheckPresence: load image list")
imageListStacks, err := c.CloudBroker().Image().ListStacks(ctx, req)
if err != nil {
return nil, err
}
return imageListStacks, nil
}

@ -33,26 +33,65 @@ package k8s
import (
"context"
"fmt"
"strconv"
"strings"
"time"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/k8s"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/tasks"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/ic"
)
func resourceK8sWgCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceK8sWgCreate: called with k8s id %d", d.Get("k8s_id").(int))
c := m.(*controller.ControllerCfg)
if err := ic.ExistK8s(ctx, uint64(d.Get("k8s_id").(int)), c); err != nil {
return diag.FromErr(err)
}
req := k8s.WorkersGroupAddRequest{
K8SID: uint64(d.Get("k8s_id").(int)),
Name: d.Get("name").(string),
WorkerNum: uint64(d.Get("num").(int)),
WorkerCPU: uint64(d.Get("cpu").(int)),
WorkerRAM: uint64(d.Get("ram").(int)),
WorkerDisk: uint64(d.Get("disk").(int)),
K8SID: uint64(d.Get("k8s_id").(int)),
Name: d.Get("name").(string),
WorkerNum: uint64(d.Get("num").(int)),
WorkerCPU: uint64(d.Get("cpu").(int)),
WorkerRAM: uint64(d.Get("ram").(int)),
WorkerSEPID: uint64(d.Get("worker_sep_id").(int)),
WorkerSEPPool: d.Get("worker_sep_pool").(string),
}
if d.Get("disk") == nil {
req.WorkerDisk = 0
} else {
req.WorkerDisk = uint64(d.Get("disk").(int))
}
labels, _ := d.Get("labels").([]interface{})
for _, label := range labels {
if !strings.HasPrefix(label.(string), "workersGroupName") {
req.Labels = append(req.Labels, label.(string))
}
}
annotations, _ := d.Get("annotations").([]interface{})
for _, annotation := range annotations {
req.Annotations = append(req.Annotations, annotation.(string))
}
taints, _ := d.Get("taints").([]interface{})
for _, taint := range taints {
req.Taints = append(req.Taints, taint.(string))
}
if cloudInit, ok := d.GetOk("cloud_init"); ok {
req.UserData = cloudInit.(string)
}
resp, err := c.CloudBroker().K8S().WorkersGroupAdd(ctx, req)
@ -60,9 +99,31 @@ func resourceK8sWgCreate(ctx context.Context, d *schema.ResourceData, m interfac
return diag.FromErr(err)
}
d.SetId(resp)
taskReq := tasks.GetRequest{
AuditID: strings.Trim(resp, `"`),
}
return nil
for {
task, err := c.CloudBroker().Tasks().Get(ctx, taskReq)
if err != nil {
return diag.FromErr(err)
}
log.Debugf("resourceK8sWgCreate: instance creating - %s", task.Stage)
if task.Completed {
if task.Error != "" {
return diag.FromErr(fmt.Errorf("cannot create k8sWg instance: %v", task.Error))
}
d.SetId(strconv.Itoa(int(task.Result)))
break
}
time.Sleep(time.Second * 20)
}
return resourceK8sWgRead(ctx, d, m)
}
func resourceK8sWgRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
@ -74,11 +135,30 @@ func resourceK8sWgRead(ctx context.Context, d *schema.ResourceData, m interface{
return diag.FromErr(err)
}
d.Set("name", wg.Name)
d.Set("num", wg.Num)
d.Set("cpu", wg.CPU)
d.Set("ram", wg.RAM)
d.Set("disk", wg.Disk)
workersComputeList := make([]compute.RecordCompute, 0)
for _, info := range wg.DetailedInfo {
compute, err := utilityComputeCheckPresence(ctx, d, m, info.ID)
if err != nil {
return diag.FromErr(err)
}
workersComputeList = append(workersComputeList, *compute)
}
d.Set("wg_id", wg.ID)
if strings.Contains(d.Id(), "#") {
k8sId, err := strconv.Atoi(strings.Split(d.Id(), "#")[1])
if err != nil {
return diag.FromErr(err)
}
d.Set("k8s_id", k8sId)
} else {
d.Set("k8s_id", d.Get("k8s_id"))
}
d.SetId(strings.Split(d.Id(), "#")[0])
flattenWg(d, wg, workersComputeList)
return nil
}
@ -88,35 +168,56 @@ func resourceK8sWgUpdate(ctx context.Context, d *schema.ResourceData, m interfac
c := m.(*controller.ControllerCfg)
if err := ic.ExistK8s(ctx, uint64(d.Get("k8s_id").(int)), c); err != nil {
return diag.FromErr(err)
}
wg, err := utilityK8sWgCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
wgId, _ := strconv.ParseUint(d.Id(), 10, 64)
if d.HasChange("num") {
if newNum := d.Get("num").(int); uint64(newNum) > wg.Num {
req := k8s.WorkerAddRequest{
K8SID: uint64(d.Get("k8s_id").(int)),
WorkersGroupID: wgId,
Num: uint64(newNum) - wg.Num,
}
addReq := k8s.WorkerAddRequest{
K8SID: uint64(d.Get("k8s_id").(int)),
WorkersGroupID: wg.ID,
_, err := c.CloudBroker().K8S().WorkerAdd(ctx, req)
if err != nil {
return diag.FromErr(err)
}
} else {
for i := int(wg.Num) - 1; i >= newNum; i-- {
req := k8s.DeleteWorkerFromGroupRequest{
K8SID: uint64(d.Get("k8s_id").(int)),
WorkersGroupID: wgId,
WorkerID: wg.DetailedInfo[i].ID,
}
_, err := c.CloudBroker().K8S().DeleteWorkerFromGroup(ctx, req)
if err != nil {
return diag.FromErr(err)
}
}
}
}
delReq := k8s.DeleteWorkerFromGroupRequest{
K8SID: uint64(d.Get("k8s_id").(int)),
WorkersGroupID: wg.ID,
}
if d.HasChange("cloud_init") {
req := k8s.UpdateWorkerNodesMetaDataRequest{
K8SID: uint64(d.Get("k8s_id").(int)),
WorkersGroupID: wgId,
UserData: d.Get("cloud_init").(string),
}
if newNum := uint64(d.Get("num").(int)); newNum > wg.Num {
addReq.Num = newNum - wg.Num
_, err := c.CloudBroker().K8S().WorkerAdd(ctx, addReq)
_, err := c.CloudBroker().K8S().UpdateWorkerNodesMetaData(ctx, req)
if err != nil {
return diag.FromErr(err)
}
} else {
for i := wg.Num - 1; i >= newNum; i-- {
delReq.WorkerID = wg.DetailedInfo[i].ID
_, err := c.CloudBroker().K8S().DeleteWorkerFromGroup(ctx, delReq)
if err != nil {
return diag.FromErr(err)
}
}
}
return nil
@ -146,55 +247,6 @@ func resourceK8sWgDelete(ctx context.Context, d *schema.ResourceData, m interfac
return nil
}
func resourceK8sWgSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"k8s_id": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
Description: "ID of k8s instance.",
},
"name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
Description: "Name of the worker group.",
},
"num": {
Type: schema.TypeInt,
Optional: true,
Default: 1,
Description: "Number of worker nodes to create.",
},
"cpu": {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
Default: 1,
Description: "Worker node CPU count.",
},
"ram": {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
Default: 1024,
Description: "Worker node RAM in MB.",
},
"disk": {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
Default: 0,
Description: "Worker node boot disk size. If unspecified or 0, size is defined by OS image size.",
},
}
}
func ResourceK8sWg() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,

@ -1,5 +1,5 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
@ -36,6 +36,7 @@ import (
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/k8s"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
)
@ -43,9 +44,13 @@ import (
func utilityK8sCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*k8s.RecordK8S, error) {
c := m.(*controller.ControllerCfg)
req := k8s.GetRequest{}
k8sId, _ := strconv.ParseUint(d.Id(), 10, 64)
req.K8SID = k8sId
if d.Id() != "" {
k8sId, _ := strconv.ParseUint(d.Id(), 10, 64)
req.K8SID = k8sId
} else {
req.K8SID = uint64(d.Get("k8s_id").(int))
}
k8s, err := c.CloudBroker().K8S().Get(ctx, req)
if err != nil {
@ -54,3 +59,17 @@ func utilityK8sCheckPresence(ctx context.Context, d *schema.ResourceData, m inte
return k8s, nil
}
func utilityComputeCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}, computeID uint64) (*compute.RecordCompute, error) {
c := m.(*controller.ControllerCfg)
req := compute.GetRequest{
ComputeID: computeID,
}
compute, err := c.CloudBroker().Compute().Get(ctx, req)
if err != nil {
return nil, err
}
return compute, nil
}

@ -1,5 +1,5 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
@ -33,31 +33,136 @@ package k8s
import (
"context"
"fmt"
"strconv"
"strings"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/k8s"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
)
func utilityK8sWgCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*k8s.RecordK8SGroup, error) {
c := m.(*controller.ControllerCfg)
var wgId int
var k8sId int
var err error
if strings.Contains(d.Id(), "#") {
wgId, err = strconv.Atoi(strings.Split(d.Id(), "#")[0])
if err != nil {
return nil, err
}
k8sId, err = strconv.Atoi(strings.Split(d.Id(), "#")[1])
if err != nil {
return nil, err
}
} else {
wgId, err = strconv.Atoi(d.Id())
if err != nil {
return nil, err
}
k8sId = d.Get("k8s_id").(int)
}
req := k8s.GetRequest{
K8SID: uint64(d.Get("k8s_id").(int)),
K8SID: uint64(k8sId),
}
k8sData, err := c.CloudBroker().K8S().Get(ctx, req)
cluster, err := c.CloudBroker().K8S().Get(ctx, req)
if err != nil {
return nil, err
}
k8sWgID, _ := strconv.ParseUint(d.Id(), 10, 64)
for _, wg := range k8sData.K8SGroups.Workers {
if wg.ID == k8sWgID {
for _, wg := range cluster.K8SGroups.Workers {
if wg.ID == uint64(wgId) {
return &wg, nil
}
}
return nil, nil
return nil, fmt.Errorf("not found wg with id: %v in k8s cluster: %v", wgId, cluster.ID)
}
func utilityDataK8sWgCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*k8s.RecordK8SGroup, []compute.RecordCompute, error) {
c := m.(*controller.ControllerCfg)
k8sId := uint64(d.Get("k8s_id").(int))
wgId := uint64(d.Get("wg_id").(int))
k8sGetReq := k8s.GetRequest{
K8SID: k8sId,
}
cluster, err := c.CloudBroker().K8S().Get(ctx, k8sGetReq)
if err != nil {
return nil, nil, err
}
curWg := k8s.RecordK8SGroup{}
for _, wg := range cluster.K8SGroups.Workers {
if wg.ID == wgId {
curWg = wg
break
}
}
if curWg.ID == 0 {
return nil, nil, fmt.Errorf("WG with id %v in k8s cluster %v not found", wgId, k8sId)
}
workersComputeList := make([]compute.RecordCompute, 0)
for _, info := range curWg.DetailedInfo {
compute, err := utilityComputeCheckPresence(ctx, d, m, info.ID)
if err != nil {
return nil, nil, err
}
workersComputeList = append(workersComputeList, *compute)
}
return &curWg, workersComputeList, nil
}
func utilityK8sWgListCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (*k8s.ListK8SGroup, error) {
c := m.(*controller.ControllerCfg)
req := k8s.GetRequest{}
if d.Id() != "" {
k8sId, _ := strconv.ParseUint(d.Id(), 10, 64)
req.K8SID = k8sId
} else {
req.K8SID = uint64(d.Get("k8s_id").(int))
}
cluster, err := c.CloudBroker().K8S().Get(ctx, req)
if err != nil {
return nil, err
}
return &cluster.K8SGroups.Workers, nil
}
func utilityK8sWgCloudInitCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (string, error) {
c := m.(*controller.ControllerCfg)
req := k8s.GetWorkerNodesMetaDataRequest{
K8SID: uint64(d.Get("k8s_id").(int)),
WorkersGroupID: uint64(d.Get("wg_id").(int)),
}
cloudInit, err := c.CloudBroker().K8S().GetWorkerNodesMetaData(ctx, req)
if err != nil {
return "", err
}
return cloudInit, nil
}
func cloudInitDiffSupperss(key, oldVal, newVal string, d *schema.ResourceData) bool {
if newVal != "" && newVal != oldVal {
log.Debugf("networkSubresIPAddreDiffSupperss: key=%s, oldVal=%q, newVal=%q -> suppress=FALSE", key, oldVal, newVal)
return false
}
log.Debugf("networkSubresIPAddreDiffSupperss: key=%s, oldVal=%q, newVal=%q -> suppress=TRUE", key, oldVal, newVal)
return true // suppress difference
}

@ -1,5 +1,5 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
@ -33,12 +33,8 @@ package kvmvm
import (
"context"
"fmt"
// "net/url"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
@ -46,121 +42,6 @@ import (
// "github.com/hashicorp/terraform-plugin-sdk/helper/validation"
)
// Parse list of all disks from API compute/get into a list of "extra disks" attached to this compute
// Extra disks are all compute disks but a boot disk.
func parseComputeDisksToExtraDisks(disks compute.ListDisks) []interface{} {
// this return value will be used to d.Set("extra_disks",) item of dataSourceCompute schema,
// which is a simple list of integer disk IDs excluding boot disk ID
length := len(disks)
log.Debugf("parseComputeDisksToExtraDisks: called for %d disks", length)
if length == 0 || (length == 1 && disks[0].Type == "B") {
// the disk list is empty (which is kind of strange - diskless compute?), or
// there is only one disk in the list and it is a boot disk;
// as we skip boot disks, the result will be of 0 length anyway
return make([]interface{}, 0)
}
result := make([]interface{}, length-1)
idx := 0
for _, value := range disks {
if value.Type == "B" {
// skip boot disk when iterating over the list of disks
continue
}
result[idx] = value.ID
idx++
}
return result
}
// Parse the list of interfaces from compute/get response into a list of networks
// attached to this compute
func parseComputeInterfacesToNetworks(ifaces compute.ListInterfaces) []interface{} {
// return value will be used to d.Set("network") item of dataSourceCompute schema
length := len(ifaces)
log.Debugf("parseComputeInterfacesToNetworks: called for %d ifaces", length)
result := []interface{}{}
for _, value := range ifaces {
elem := make(map[string]interface{})
// Keys in this map should correspond to the Schema definition
// as returned by networkSubresourceSchemaMake()
elem["net_id"] = value.NetID
elem["net_type"] = value.NetType
elem["ip_address"] = value.IPAddress
elem["mac"] = value.MAC
// log.Debugf(" element %d: net_id=%d, net_type=%s", i, value.NetID, value.NetType)
result = append(result, elem)
}
return result
}
func flattenDataCompute(d *schema.ResourceData, compFacts *compute.RecordCompute) error {
// This function expects that compFacts string contains response from API compute/get,
// i.e. detailed information about compute instance.
//
// NOTE: this function modifies ResourceData argument - as such it should never be called
// from resourceComputeExists(...) method
log.Debugf("flattenCompute: ID %d, RG ID %d", compFacts.ID, compFacts.RGID)
d.SetId(fmt.Sprintf("%d", compFacts.ID))
d.Set("name", compFacts.Name)
d.Set("rg_id", compFacts.RGID)
d.Set("rg_name", compFacts.RGName)
d.Set("account_id", compFacts.AccountID)
d.Set("account_name", compFacts.AccountName)
d.Set("driver", compFacts.Driver)
d.Set("cpu", compFacts.CPUs)
d.Set("ram", compFacts.RAM)
d.Set("image_id", compFacts.ImageID)
d.Set("description", compFacts.Description)
d.Set("cloud_init", "applied")
if compFacts.TechStatus == "STARTED" {
d.Set("started", true)
} else {
d.Set("started", false)
}
bootDisk := findBootDisk(compFacts.Disks)
d.Set("boot_disk_size", bootDisk.SizeMax)
d.Set("boot_disk_id", bootDisk.ID) // we may need boot disk ID in resize operations
d.Set("sep_id", bootDisk.SEPID)
d.Set("pool", bootDisk.Pool)
if len(compFacts.Disks) > 0 {
log.Debugf("flattenCompute: calling parseComputeDisksToExtraDisks for %d disks", len(compFacts.Disks))
if err := d.Set("extra_disks", parseComputeDisksToExtraDisks(compFacts.Disks)); err != nil {
return err
}
}
if len(compFacts.Interfaces) > 0 {
log.Debugf("flattenCompute: calling parseComputeInterfacesToNetworks for %d interfaces", len(compFacts.Interfaces))
if err := d.Set("network", parseComputeInterfacesToNetworks(compFacts.Interfaces)); err != nil {
return err
}
}
if len(compFacts.OSUsers) > 0 {
log.Debugf("flattenCompute: calling parseOsUsers for %d logins", len(compFacts.OSUsers))
if err := d.Set("os_users", parseOsUsers(compFacts.OSUsers)); err != nil {
return err
}
}
return nil
}
func dataSourceComputeRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
compFacts, err := utilityComputeCheckPresence(ctx, d, m)
if compFacts == nil {
@ -186,143 +67,6 @@ func DataSourceCompute() *schema.Resource {
Default: &constants.Timeout60s,
},
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Optional: true,
Description: "Name of this compute instance. NOTE: this parameter is case sensitive.",
},
// TODO: consider removing compute_id from the schema, as it not practical to call this data provider if
// corresponding compute ID is already known
"compute_id": {
Type: schema.TypeInt,
Optional: true,
Description: "ID of the compute instance. If ID is specified, name and resource group ID are ignored.",
},
"rg_id": {
Type: schema.TypeInt,
Optional: true,
Description: "ID of the resource group where this compute instance is located.",
},
"rg_name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the resource group where this compute instance is located.",
},
"account_id": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the account this compute instance belongs to.",
},
"account_name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the account this compute instance belongs to.",
},
"driver": {
Type: schema.TypeString,
Computed: true,
Description: "Hardware architecture of this compute instance.",
},
"cpu": {
Type: schema.TypeInt,
Computed: true,
Description: "Number of CPUs allocated for this compute instance.",
},
"ram": {
Type: schema.TypeInt,
Computed: true,
Description: "Amount of RAM in MB allocated for this compute instance.",
},
"image_id": {
Type: schema.TypeInt,
Computed: true,
Description: "ID of the OS image this compute instance is based on.",
},
"image_name": {
Type: schema.TypeString,
Computed: true,
Description: "Name of the OS image this compute instance is based on.",
},
"boot_disk_size": {
Type: schema.TypeInt,
Computed: true,
Description: "This compute instance boot disk size in GB.",
},
"boot_disk_id": {
Type: schema.TypeInt,
Computed: true,
Description: "This compute instance boot disk ID.",
},
"extra_disks": {
Type: schema.TypeSet,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeInt,
},
Description: "IDs of the extra disk(s) attached to this compute.",
},
/*
"disks": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: dataSourceDiskSchemaMake(), // ID, type, name, size, account ID, SEP ID, SEP type, pool, status, tech status, compute ID, image ID
},
Description: "Detailed specification for all disks attached to this compute instance (including bood disk).",
},
*/
// "network": {
// Type: schema.TypeSet,
// Optional: true,
// MaxItems: constants.MaxNetworksPerCompute,
// Elem: &schema.Resource{
// Schema: networkSubresourceSchemaMake(),
// },
// Description: "Network connection(s) for this compute.",
// },
"os_users": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: osUsersSubresourceSchemaMake(),
},
Description: "Guest OS users provisioned on this compute instance.",
},
"description": {
Type: schema.TypeString,
Computed: true,
Description: "User-defined text description of this compute instance.",
},
"cloud_init": {
Type: schema.TypeString,
Computed: true,
Description: "Placeholder for cloud_init parameters.",
},
"started": {
Type: schema.TypeBool,
Optional: true,
Default: true,
Description: "Is compute started.",
},
},
Schema: dataSourceComputeSchemaMake(),
}
}

@ -2,11 +2,14 @@ package kvmvm
import (
"encoding/json"
"fmt"
"sort"
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens"
)
func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute) error {
@ -14,18 +17,35 @@ func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute) e
customFields, _ := json.Marshal(computeRec.CustomFields)
devices, _ := json.Marshal(computeRec.Devices)
userData, _ := json.Marshal(computeRec.Userdata)
bootDisk := findBootDisk(computeRec.Disks)
//extra fields setting
if len(computeRec.Disks) > 0 {
log.Debugf("flattenCompute: calling parseComputeDisksToExtraDisks for %d disks", len(computeRec.Disks))
if err := d.Set("extra_disks", parseComputeDisksToExtraDisks(computeRec.Disks)); err != nil {
return err
}
}
if len(computeRec.Interfaces) > 0 {
log.Debugf("flattenCompute: calling parseComputeInterfacesToNetworks for %d interfaces", len(computeRec.Interfaces))
if err := d.Set("network", parseComputeInterfacesToNetworks(computeRec.Interfaces)); err != nil {
return err
}
}
d.Set("account_id", computeRec.AccountID)
d.Set("account_name", computeRec.AccountName)
d.Set("acl", flattenListACLInterface(computeRec.ACL))
d.Set("affinity_label", computeRec.AffinityLabel)
d.Set("affinity_weight", computeRec.AffinityWeight)
d.Set("affinity_rules", flattenAffinityRules(computeRec.AffinityRules))
d.Set("anti_affinity_rules", flattenAffinityRules(computeRec.AntiAffinityRules))
d.Set("arch", computeRec.Arch)
d.Set("boot_order", computeRec.BootOrder)
d.Set("boot_disk_id", bootDisk.ID)
d.Set("boot_disk_size", computeRec.BootDiskSize)
d.Set("cd_image_id", computeRec.CdImageId)
d.Set("clone_reference", computeRec.CloneReference)
d.Set("clones", computeRec.Clones)
d.Set("computeci_id", computeRec.ComputeCIID)
@ -36,13 +56,11 @@ func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute) e
d.Set("deleted_time", computeRec.DeletedTime)
d.Set("description", computeRec.Description)
d.Set("devices", string(devices))
d.Set("disks",
flattenComputeDisks(
computeRec.Disks,
d.Get("extra_disks").(*schema.Set).List(),
bootDisk.ID,
),
)
err := d.Set("disks", flattenComputeDisks(computeRec.Disks, d.Get("extra_disks").(*schema.Set).List(), bootDisk.ID))
if err != nil {
return err
}
d.Set("driver", computeRec.Driver)
d.Set("gid", computeRec.GID)
d.Set("guid", computeRec.GUID)
d.Set("compute_id", computeRec.ID)
@ -53,6 +71,7 @@ func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute) e
d.Set("manager_type", computeRec.ManagerType)
d.Set("migrationjob", computeRec.MigrationJob)
d.Set("milestones", computeRec.Milestones)
d.Set("need_reboot", computeRec.NeedReboot)
d.Set("os_users", flattenOSUsers(computeRec.OSUsers))
d.Set("pinned", computeRec.Pinned)
d.Set("reference_id", computeRec.ReferenceID)
@ -69,6 +88,7 @@ func flattenCompute(d *schema.ResourceData, computeRec *compute.RecordCompute) e
d.Set("tech_status", computeRec.TechStatus)
d.Set("updated_by", computeRec.UpdatedBy)
d.Set("updated_time", computeRec.UpdatedTime)
d.Set("user_data", string(userData))
d.Set("user_managed", computeRec.UserManaged)
d.Set("vgpus", computeRec.VGPUs)
d.Set("virtual_image_id", computeRec.VirtualImageID)
@ -175,6 +195,7 @@ func flattenComputeDisks(disksList compute.ListDisks, extraDisks []interface{},
"disk_id": disk.ID,
"shareable": disk.Shareable,
"size_used": disk.SizeUsed,
"size_max": disk.SizeMax,
}
res = append(res, temp)
}
@ -193,15 +214,6 @@ func findInExtraDisks(diskId uint, extraDisks []interface{}) bool {
return false
}
func findBootDisk(disks compute.ListDisks) *compute.ItemDisk {
for _, disk := range disks {
if disk.Type == "B" {
return &disk
}
}
return nil
}
func flattenAffinityRules(rules compute.ListRules) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(rules))
@ -217,3 +229,541 @@ func flattenAffinityRules(rules compute.ListRules) []map[string]interface{} {
return res
}
func flattenComputeList(computes *compute.ListComputes) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(computes.Data))
for _, computeItem := range computes.Data {
customFields, _ := json.Marshal(computeItem.CustomFields)
devices, _ := json.Marshal(computeItem.Devices)
userData, _ := json.Marshal(computeItem.Userdata)
temp := map[string]interface{}{
"acl": flattenListACLInterface(computeItem.ACL),
"account_id": computeItem.AccountID,
"account_name": computeItem.AccountName,
"affinity_label": computeItem.AffinityLabel,
"affinity_rules": flattenListRules(computeItem.AffinityRules),
"affinity_weight": computeItem.AffinityWeight,
"anti_affinity_rules": flattenListRules(computeItem.AntiAffinityRules),
"arch": computeItem.Arch,
"cd_image_id": computeItem.CdImageId,
"boot_order": computeItem.BootOrder,
"bootdisk_size": computeItem.BootDiskSize,
"clone_reference": computeItem.CloneReference,
"clones": computeItem.Clones,
"computeci_id": computeItem.ComputeCIID,
"cpus": computeItem.CPUs,
"created_by": computeItem.CreatedBy,
"created_time": computeItem.CreatedTime,
"custom_fields": string(customFields),
"deleted_by": computeItem.DeletedBy,
"deleted_time": computeItem.DeletedTime,
"desc": computeItem.Description,
"devices": string(devices),
"disks": flattenDisks(computeItem.Disks),
"driver": computeItem.Driver,
"gid": computeItem.GID,
"guid": computeItem.GUID,
"compute_id": computeItem.ID,
"image_id": computeItem.ImageID,
"interfaces": flattenInterfaces(computeItem.Interfaces),
"lock_status": computeItem.LockStatus,
"manager_id": computeItem.ManagerID,
"manager_type": computeItem.ManagerType,
"migrationjob": computeItem.MigrationJob,
"milestones": computeItem.Milestones,
"name": computeItem.Name,
"need_reboot": computeItem.NeedReboot,
"os_users": flattenOSUsers(computeItem.OSUsers),
"pinned": computeItem.Pinned,
"ram": computeItem.RAM,
"reference_id": computeItem.ReferenceID,
"registered": computeItem.Registered,
"res_name": computeItem.ResName,
"rg_id": computeItem.RGID,
"rg_name": computeItem.RGName,
"snap_sets": flattenSnapSets(computeItem.SnapSets),
"stack_id": computeItem.StackID,
"stateless_sep_id": computeItem.StatelessSEPID,
"stateless_sep_type": computeItem.StatelessSEPType,
"status": computeItem.Status,
"tags": flattenTags(computeItem.Tags),
"tech_status": computeItem.TechStatus,
"total_disk_size": computeItem.TotalDiskSize,
"updated_by": computeItem.UpdatedBy,
"updated_time": computeItem.UpdatedTime,
"user_data": string(userData),
"user_managed": computeItem.UserManaged,
"vgpus": computeItem.VGPUs,
"vins_connected": computeItem.VINSConnected,
"virtual_image_id": computeItem.VirtualImageID,
}
res = append(res, temp)
}
return res
}
func flattenListACLInterface(listAcl []interface{}) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(listAcl))
for _, aclInterface := range listAcl {
acl := aclInterface.(map[string]interface{})
temp := map[string]interface{}{
"explicit": acl["explicit"],
"guid": acl["guid"],
"right": acl["right"],
"status": acl["status"],
"type": acl["type"],
"user_group_id": acl["user_group_id"],
}
res = append(res, temp)
}
return res
}
func flattenListACL(listAcl compute.ListACL) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(listAcl))
for _, acl := range listAcl {
temp := map[string]interface{}{
"explicit": acl.Explicit,
"guid": acl.GUID,
"right": acl.Right,
"status": acl.Status,
"type": acl.Type,
"user_group_id": acl.UserGroupID,
}
res = append(res, temp)
}
return res
}
func flattenListComputeACL(listAcl []compute.ItemComputeACL) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(listAcl))
for _, acl := range listAcl {
temp := map[string]interface{}{
"explicit": acl.Explicit,
"guid": acl.GUID,
"right": acl.Right,
"status": acl.Status,
"type": acl.Type,
"user_group_id": acl.UserGroupID,
}
res = append(res, temp)
}
return res
}
func flattenListRules(listRules compute.ListRules) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(listRules))
for _, rule := range listRules {
temp := map[string]interface{}{
"guid": rule.GUID,
"key": rule.Key,
"mode": rule.Mode,
"policy": rule.Policy,
"topology": rule.Topology,
"value": rule.Value,
}
res = append(res, temp)
}
return res
}
func flattenDisks(disks []compute.InfoDisk) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
for _, disk := range disks {
temp := map[string]interface{}{
"disk_id": disk.ID,
"pci_slot": disk.PCISlot,
}
res = append(res, temp)
}
return res
}
func flattenComputeAudits(computeAudits compute.ListDetailedAudits) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(computeAudits))
for _, computeAudit := range computeAudits {
temp := map[string]interface{}{
"call": computeAudit.Call,
"responsetime": computeAudit.ResponseTime,
"statuscode": computeAudit.StatusCode,
"timestamp": computeAudit.Timestamp,
"user": computeAudit.User,
}
res = append(res, temp)
}
return res
}
func flattenComputeGetAudits(computeAudits compute.ListAudits) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(computeAudits))
for _, computeAudit := range computeAudits {
temp := map[string]interface{}{
"epoch": computeAudit.Epoch,
"message": computeAudit.Message,
}
res = append(res, temp)
}
return res
}
func flattenPfwList(computePfws *compute.ListPFW) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(computePfws.Data))
for _, computePfw := range computePfws.Data {
temp := map[string]interface{}{
"pfw_id": computePfw.ID,
"local_ip": computePfw.LocalIP,
"local_port": computePfw.LocalPort,
"protocol": computePfw.Protocol,
"public_port_end": computePfw.PublicPortEnd,
"public_port_start": computePfw.PublicPortStart,
"vm_id": computePfw.VMID,
}
res = append(res, temp)
}
return res
}
func flattenUserList(d *schema.ResourceData, userList *compute.ListUsers) {
d.Set("account_acl", flattenListACL(userList.Data.AccountACL))
d.Set("compute_acl", flattenListComputeACL(userList.Data.ComputeACL))
d.Set("rg_acl", flattenListACL(userList.Data.RGACL))
}
func flattenSnapshotList(computeSnapshots *compute.ListSnapShot) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(computeSnapshots.Data))
for _, snp := range computeSnapshots.Data {
temp := map[string]interface{}{
"disks": snp.Disks,
"guid": snp.GUID,
"label": snp.Label,
"timestamp": snp.Timestamp,
}
res = append(res, temp)
}
return res
}
func flattenAffinityRelations(d *schema.ResourceData, ar *compute.RecordAffinityRelations) {
d.Set("other_node", flattenNodes(ar.OtherNode))
d.Set("other_node_indirect", flattenNodes(ar.OtherNodeIndirect))
d.Set("other_node_indirect_soft", flattenNodes(ar.OtherNodeIndirectSoft))
d.Set("other_node_soft", flattenNodes(ar.OtherNodeSoft))
d.Set("same_node", flattenNodes(ar.SameNode))
d.Set("same_node_soft", flattenNodes(ar.SameNodeSoft))
}
func flattenSnapshotUsage(computeSnapshotUsages compute.ListSnapshotUsage) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(computeSnapshotUsages))
for _, computeUsage := range computeSnapshotUsages {
temp := map[string]interface{}{
"count": computeUsage.Count,
"stored": computeUsage.Stored,
"label": computeUsage.Label,
"timestamp": computeUsage.Timestamp,
}
res = append(res, temp)
}
return res
}
func flattenPCIDevice(deviceList []compute.ItemPCIDevice) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(deviceList))
for _, dev := range deviceList {
temp := map[string]interface{}{
"ckey": dev.CKey,
"meta": flattens.FlattenMeta(dev.Meta),
"compute_id": dev.ComputeID,
"description": dev.Description,
"guid": dev.GUID,
"hwpath": dev.HwPath,
"device_id": dev.ID,
"name": dev.Name,
"rg_id": dev.RGID,
"stack_id": dev.StackID,
"status": dev.Status,
"system_name": dev.SystemName,
}
res = append(res, temp)
}
return res
}
func flattenVGPU(m []interface{}) []string {
var output []string
for _, item := range m {
switch d := item.(type) {
case string:
output = append(output, d)
case int:
output = append(output, strconv.Itoa(d))
case int64:
output = append(output, strconv.FormatInt(d, 10))
case float64:
output = append(output, strconv.FormatInt(int64(d), 10))
default:
output = append(output, "")
}
}
return output
}
func flattenNodes(m []interface{}) []string {
var output []string
for _, item := range m {
switch d := item.(type) {
case string:
output = append(output, d)
case int:
output = append(output, strconv.Itoa(d))
case int64:
output = append(output, strconv.FormatInt(d, 10))
case float64:
output = append(output, strconv.FormatInt(int64(d), 10))
default:
output = append(output, "")
}
}
return output
}
func flattenDataCompute(d *schema.ResourceData, compFacts *compute.RecordCompute) error {
// This function expects that compFacts string contains response from API compute/get,
// i.e. detailed information about compute instance.
//
// NOTE: this function modifies ResourceData argument - as such it should never be called
// from resourceComputeExists(...) method
log.Debugf("flattenCompute: ID %d, RG ID %d", compFacts.ID, compFacts.RGID)
customFields, _ := json.Marshal(compFacts.CustomFields)
devices, _ := json.Marshal(compFacts.Devices)
userData, _ := json.Marshal(compFacts.Userdata)
// general fields setting
d.SetId(fmt.Sprintf("%d", compFacts.ID))
d.Set("account_id", compFacts.AccountID)
d.Set("account_name", compFacts.AccountName)
d.Set("acl", flattenListACLInterface(compFacts.ACL))
d.Set("affinity_label", compFacts.AffinityLabel)
d.Set("affinity_rules", flattenAffinityRules(compFacts.AffinityRules))
d.Set("affinity_weight", compFacts.AffinityWeight)
d.Set("anti_affinity_rules", flattenAffinityRules(compFacts.AntiAffinityRules))
d.Set("arch", compFacts.Arch)
d.Set("boot_order", compFacts.BootOrder)
d.Set("cd_image_id", compFacts.CdImageId)
d.Set("clone_reference", compFacts.CloneReference)
d.Set("clones", compFacts.Clones)
d.Set("computeci_id", compFacts.ComputeCIID)
d.Set("cpus", compFacts.CPUs)
d.Set("created_by", compFacts.CreatedBy)
d.Set("created_time", compFacts.CreatedTime)
d.Set("custom_fields", string(customFields))
d.Set("deleted_by", compFacts.DeletedBy)
d.Set("deleted_time", compFacts.DeletedTime)
d.Set("desc", compFacts.Description)
d.Set("devices", string(devices))
d.Set("disks", flattenDisk(compFacts.Disks))
d.Set("driver", compFacts.Driver)
d.Set("gid", compFacts.GID)
d.Set("guid", compFacts.GUID)
d.Set("image_id", compFacts.ImageID)
d.Set("interfaces", flattenInterfaces(compFacts.Interfaces))
d.Set("lock_status", compFacts.LockStatus)
d.Set("manager_id", compFacts.ManagerID)
d.Set("manager_type", compFacts.ManagerType)
d.Set("migrationjob", compFacts.MigrationJob)
d.Set("milestones", compFacts.Milestones)
d.Set("name", compFacts.Name)
d.Set("need_reboot", compFacts.NeedReboot)
d.Set("os_users", flattenOSUsers(compFacts.OSUsers))
d.Set("pinned", compFacts.Pinned)
d.Set("ram", compFacts.RAM)
d.Set("reference_id", compFacts.ReferenceID)
d.Set("registered", compFacts.Registered)
d.Set("res_name", compFacts.ResName)
d.Set("rg_id", compFacts.RGID)
d.Set("rg_name", compFacts.RGName)
d.Set("snap_sets", flattenSnapSets(compFacts.SnapSets))
d.Set("stack_id", compFacts.StackID)
d.Set("stack_name", compFacts.StackName)
d.Set("stateless_sep_id", compFacts.StatelessSEPID)
d.Set("stateless_sep_type", compFacts.StatelessSEPType)
d.Set("status", compFacts.Status)
d.Set("tags", flattenTags(compFacts.Tags))
d.Set("tech_status", compFacts.TechStatus)
d.Set("updated_by", compFacts.UpdatedBy)
d.Set("updated_time", compFacts.UpdatedTime)
d.Set("user_data", string(userData))
d.Set("user_managed", compFacts.UserManaged)
d.Set("vgpus", compFacts.VGPUs)
d.Set("virtual_image_id", compFacts.VirtualImageID)
//extra fields setting
bootDisk := findBootDisk(compFacts.Disks)
if bootDisk != nil {
d.Set("boot_disk_size", bootDisk.SizeMax)
d.Set("boot_disk_id", bootDisk.ID) // we may need boot disk ID in resize operations
d.Set("sep_id", bootDisk.SEPID)
d.Set("pool", bootDisk.Pool)
}
if len(compFacts.Disks) > 0 {
log.Debugf("flattenCompute: calling parseComputeDisksToExtraDisks for %d disks", len(compFacts.Disks))
if err := d.Set("extra_disks", parseComputeDisksToExtraDisks(compFacts.Disks)); err != nil {
return err
}
}
if len(compFacts.Interfaces) > 0 {
log.Debugf("flattenCompute: calling parseComputeInterfacesToNetworks for %d interfaces", len(compFacts.Interfaces))
if err := d.Set("network", parseComputeInterfacesToNetworks(compFacts.Interfaces)); err != nil {
return err
}
}
return nil
}
// Parse list of all disks from API compute/get into a list of "extra disks" attached to this compute
// Extra disks are all compute disks but a boot disk.
func parseComputeDisksToExtraDisks(disks compute.ListDisks) []interface{} {
// this return value will be used to d.Set("extra_disks",) item of dataSourceCompute schema,
// which is a simple list of integer disk IDs excluding boot disk ID
length := len(disks)
log.Debugf("parseComputeDisksToExtraDisks: called for %d disks", length)
if length == 0 || (length == 1 && disks[0].Type == "B") {
// the disk list is empty (which is kind of strange - diskless compute?), or
// there is only one disk in the list and it is a boot disk;
// as we skip boot disks, the result will be of 0 length anyway
return make([]interface{}, 0)
}
result := make([]interface{}, length-1)
idx := 0
for _, value := range disks {
if value.Type == "B" {
// skip boot disk when iterating over the list of disks
continue
}
result[idx] = value.ID
idx++
}
return result
}
// Parse the list of interfaces from compute/get response into a list of networks
// attached to this compute
func parseComputeInterfacesToNetworks(ifaces compute.ListInterfaces) []interface{} {
// return value will be used to d.Set("network") item of dataSourceCompute schema
length := len(ifaces)
log.Debugf("parseComputeInterfacesToNetworks: called for %d ifaces", length)
result := []interface{}{}
for _, value := range ifaces {
elem := make(map[string]interface{})
// Keys in this map should correspond to the Schema definition for "network"
elem["net_id"] = value.NetID
elem["net_type"] = value.NetType
elem["ip_address"] = value.IPAddress
elem["mac"] = value.MAC
result = append(result, elem)
}
return result
}
func flattenDisk(diskList compute.ListDisks) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(diskList))
for _, disk := range diskList {
temp := map[string]interface{}{
"ckey": disk.CKey,
"meta": flattens.FlattenMeta(disk.Meta),
"account_id": disk.AccountID,
"boot_partition": disk.BootPartition,
"created_time": disk.CreatedTime,
"deleted_time": disk.DeletedTime,
"desc": disk.Description,
"destruction_time": disk.DestructionTime,
"disk_path": disk.DiskPath,
"gid": disk.GID,
"guid": disk.GUID,
"disk_id": disk.ID,
"image_id": disk.ImageID,
"images": disk.Images,
"iotune": flattenIOTune(disk.IOTune),
"iqn": disk.IQN,
"login": disk.Login,
"milestones": disk.Milestones,
"name": disk.Name,
"order": disk.Order,
"params": disk.Params,
"parent_id": disk.ParentID,
"passwd": disk.Password,
"pci_slot": disk.PCISlot,
"pool": disk.Pool,
"purge_attempts": disk.PurgeAttempts,
"present_to": disk.PresentTo,
"purge_time": disk.PurgeTime,
"reality_device_number": disk.RealityDeviceNumber,
"reference_id": disk.ReferenceID,
"res_id": disk.ResID,
"res_name": disk.ResName,
"role": disk.Role,
"sep_id": disk.SEPID,
"shareable": disk.Shareable,
"size_max": disk.SizeMax,
"size_used": disk.SizeUsed,
"snapshots": flattendDiskSnapshotList(disk.Snapshots),
"status": disk.Status,
"tech_status": disk.TechStatus,
"type": disk.Type,
"vmid": disk.VMID,
}
res = append(res, temp)
}
return res
}
func flattenIOTune(iot compute.IOTune) []map[string]interface{} {
res := make([]map[string]interface{}, 0)
temp := map[string]interface{}{
"read_bytes_sec": iot.ReadBytesSec,
"read_bytes_sec_max": iot.ReadBytesSecMax,
"read_iops_sec": iot.ReadIOPSSec,
"read_iops_sec_max": iot.ReadIOPSSecMax,
"size_iops_sec": iot.SizeIOPSSec,
"total_bytes_sec": iot.TotalBytesSec,
"total_bytes_sec_max": iot.TotalBytesSecMax,
"total_iops_sec": iot.TotalIOPSSec,
"total_iops_sec_max": iot.TotalIOPSSecMax,
"write_bytes_sec": iot.WriteBytesSec,
"write_bytes_sec_max": iot.WriteBytesSecMax,
"write_iops_sec": iot.WriteIOPSSec,
"write_iops_sec_max": iot.WriteIOPSSecMax,
}
res = append(res, temp)
return res
}
func flattendDiskSnapshotList(sl compute.ListDetailedSnapshots) []interface{} {
res := make([]interface{}, 0)
for _, snapshot := range sl {
temp := map[string]interface{}{
"guid": snapshot.GUID,
"label": snapshot.Label,
"res_id": snapshot.ResID,
"snap_set_guid": snapshot.SnapSetGUID,
"snap_set_time": snapshot.SnapSetTime,
"timestamp": snapshot.TimeStamp,
}
res = append(res, temp)
}
return res
}

File diff suppressed because it is too large Load Diff

@ -34,6 +34,7 @@ package kvmvm
import (
"context"
"strconv"
"strings"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute"
@ -42,6 +43,260 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func utilityComputeEnabled(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
enabled := d.Get("enabled").(bool)
if enabled {
req := compute.EnableRequest{
ComputeID: computeId,
}
if _, err := c.CloudBroker().Compute().Enable(ctx, req); err != nil {
return err
}
} else {
req := compute.DisableRequest{
ComputeID: computeId,
}
if _, err := c.CloudBroker().Compute().Disable(ctx, req); err != nil {
return err
}
}
log.Debugf("resourceComputeUpdate: enable=%v Compute ID %s after completing its resource configuration", enabled, d.Id())
return nil
}
func utilityComputeStarted(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
if d.Get("started").(bool) {
req := compute.StartRequest{
ComputeID: computeId,
}
if altBootId, ok := d.Get("alt_boot_id").(int); ok {
req.AltBootID = uint64(altBootId)
}
if stackId, ok := d.Get("stack_id").(int); ok {
req.StackID = uint64(stackId)
}
if _, err := c.CloudBroker().Compute().Start(ctx, req); err != nil {
return err
}
} else {
req := compute.StopRequest{
ComputeID: computeId,
}
if force, ok := d.Get("force_stop").(bool); ok {
req.Force = force
}
if _, err := c.CloudBroker().Compute().Stop(ctx, req); err != nil {
return err
}
}
return nil
}
func utilityComputeResize(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
resizeReq := compute.ResizeRequest{
ComputeID: computeId,
Force: true,
}
doUpdate := false
oldCpu, newCpu := d.GetChange("cpu")
if oldCpu.(int) != newCpu.(int) {
resizeReq.CPU = uint64(newCpu.(int))
doUpdate = true
} else {
resizeReq.CPU = 0
}
oldRam, newRam := d.GetChange("ram")
if oldRam.(int) != newRam.(int) {
resizeReq.RAM = uint64(newRam.(int))
doUpdate = true
} else {
resizeReq.RAM = 0
}
if doUpdate {
log.Debugf("resourceComputeUpdate: changing CPU %d -> %d and/or RAM %d -> %d",
oldCpu.(int), newCpu.(int),
oldRam.(int), newRam.(int))
_, err := c.CloudBroker().Compute().Resize(ctx, resizeReq)
if err != nil {
return err
}
}
return nil
}
func utilityComputeBootDiskResize(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
oldSize, newSize := d.GetChange("boot_disk_size")
if oldSize.(int) < newSize.(int) {
req := compute.DiskResizeRequest{ComputeID: computeId, Size: uint64(newSize.(int))}
if diskId, ok := d.GetOk("boot_disk_id"); ok {
req.DiskID = uint64(diskId.(int))
} else {
bootDisk, err := utilityComputeBootDiskCheckPresence(ctx, d, m)
if err != nil {
return err
}
req.DiskID = bootDisk.ID
}
log.Debugf("resourceComputeUpdate: compute ID %s, boot disk ID %d resize %d -> %d",
d.Id(), d.Get("boot_disk_id").(int), oldSize.(int), newSize.(int))
_, err := c.CloudBroker().Compute().DiskResize(ctx, req)
if err != nil {
return err
}
} else if oldSize.(int) > newSize.(int) {
log.Warnf("resourceComputeUpdate: compute ID %s - shrinking boot disk is not allowed", d.Id())
}
return nil
}
func utilityComputeUpdateDisks(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
deletedDisks := make([]interface{}, 0)
addedDisks := make([]interface{}, 0)
updatedDisks := make([]interface{}, 0)
oldDisks, newDisks := d.GetChange("disks")
oldConv := oldDisks.([]interface{})
newConv := newDisks.([]interface{})
for _, el := range oldConv {
if !isContainsDisk(newConv, el) {
deletedDisks = append(deletedDisks, el)
}
}
for _, el := range newConv {
if !isContainsDisk(oldConv, el) {
addedDisks = append(addedDisks, el)
} else {
if isChangeDisk(oldConv, el) {
updatedDisks = append(updatedDisks, el)
}
}
}
if len(deletedDisks) > 0 {
stopReq := compute.StopRequest{
ComputeID: computeId,
Force: false,
}
_, err := c.CloudBroker().Compute().Stop(ctx, stopReq)
if err != nil {
return err
}
for _, disk := range deletedDisks {
diskConv := disk.(map[string]interface{})
if diskConv["disk_type"].(string) == "B" {
continue
}
req := compute.DiskDelRequest{
ComputeID: computeId,
DiskID: uint64(diskConv["disk_id"].(int)),
Permanently: diskConv["permanently"].(bool),
}
_, err := c.CloudBroker().Compute().DiskDel(ctx, req)
if err != nil {
return err
}
}
req := compute.StartRequest{
ComputeID: computeId,
AltBootID: 0,
}
_, err = c.CloudBroker().Compute().Start(ctx, req)
if err != nil {
return err
}
}
if len(addedDisks) > 0 {
for _, disk := range addedDisks {
diskConv := disk.(map[string]interface{})
if diskConv["disk_type"].(string) == "B" {
continue
}
req := compute.DiskAddRequest{
ComputeID: computeId,
DiskName: diskConv["disk_name"].(string),
Size: uint64(diskConv["size"].(int)),
}
if diskConv["sep_id"].(int) != 0 {
req.SepID = uint64(diskConv["sep_id"].(int))
}
if diskConv["disk_type"].(string) != "" {
req.DiskType = diskConv["disk_type"].(string)
}
if diskConv["pool"].(string) != "" {
req.Pool = diskConv["pool"].(string)
}
if diskConv["desc"].(string) != "" {
req.Description = diskConv["desc"].(string)
}
if diskConv["image_id"].(int) != 0 {
req.ImageID = uint64(diskConv["image_id"].(int))
}
_, err := c.CloudBroker().Compute().DiskAdd(ctx, req)
if err != nil {
return err
}
}
}
if len(updatedDisks) > 0 {
for _, disk := range updatedDisks {
diskConv := disk.(map[string]interface{})
if diskConv["disk_type"].(string) == "B" {
continue
}
req := compute.DiskResizeRequest{
ComputeID: computeId,
DiskID: uint64(diskConv["disk_id"].(int)),
Size: uint64(diskConv["size"].(int)),
}
_, err := c.CloudBroker().Compute().DiskResize(ctx, req)
if err != nil {
return err
}
}
}
return nil
}
func utilityComputeExtraDisksConfigure(ctx context.Context, d *schema.ResourceData, m interface{}, do_delta bool) error {
c := m.(*controller.ControllerCfg)
@ -154,6 +409,10 @@ func utilityComputeCheckPresence(ctx context.Context, d *schema.ResourceData, m
req.ComputeID = uint64(d.Get("compute_id").(int))
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
res, err := c.CloudBroker().Compute().Get(ctx, req)
if err != nil {
return nil, err
@ -162,6 +421,15 @@ func utilityComputeCheckPresence(ctx context.Context, d *schema.ResourceData, m
return res, nil
}
func findBootDisk(disks compute.ListDisks) *compute.ItemDisk {
for _, disk := range disks {
if disk.Type == "B" {
return &disk
}
}
return nil
}
func networkSubresIPAddreDiffSupperss(key, oldVal, newVal string, d *schema.ResourceData) bool {
if newVal != "" && newVal != oldVal {
log.Debugf("networkSubresIPAddreDiffSupperss: key=%s, oldVal=%q, newVal=%q -> suppress=FALSE", key, oldVal, newVal)
@ -288,6 +556,659 @@ func utilityComputeNetworksConfigure(ctx context.Context, d *schema.ResourceData
return nil
}
func utilityComputeUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
req := compute.UpdateRequest{
ComputeID: computeId,
}
if d.HasChange("name") {
req.Name = d.Get("name").(string)
}
if d.HasChange("desc") {
req.Description = d.Get("desc").(string)
}
if _, err := c.CloudBroker().Compute().Update(ctx, req); err != nil {
return err
}
return nil
}
func utilityComputeUpdateAffinityLabel(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
affinityLabel := d.Get("affinity_label").(string)
if affinityLabel == "" {
req := compute.AffinityLabelRemoveRequest{
ComputeIDs: []uint64{computeId},
}
_, err := c.CloudBroker().Compute().AffinityLabelRemove(ctx, req)
if err != nil {
return err
}
}
req := compute.AffinityLabelSetRequest{
ComputeIDs: []uint64{computeId},
AffinityLabel: affinityLabel,
}
_, err := c.CloudBroker().Compute().AffinityLabelSet(ctx, req)
if err != nil {
return err
}
return nil
}
func utilityComputeUpdateAffinityRules(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
deletedAR := make([]interface{}, 0)
addedAR := make([]interface{}, 0)
oldAR, newAR := d.GetChange("affinity_rules")
oldConv := oldAR.([]interface{})
newConv := newAR.([]interface{})
if len(newConv) == 0 {
req := compute.AffinityRulesClearRequest{
ComputeIDs: []uint64{computeId},
}
_, err := c.CloudBroker().Compute().AffinityRulesClear(ctx, req)
if err != nil {
return err
}
} else {
for _, el := range oldConv {
if !isContainsAR(newConv, el) {
deletedAR = append(deletedAR, el)
}
}
for _, el := range newConv {
if !isContainsAR(oldConv, el) {
addedAR = append(addedAR, el)
}
}
if len(deletedAR) > 0 {
for _, ar := range deletedAR {
arConv := ar.(map[string]interface{})
req := compute.AffinityRuleRemoveRequest{
ComputeIDs: []uint64{computeId},
Topology: arConv["topology"].(string),
Policy: arConv["policy"].(string),
Mode: arConv["mode"].(string),
Key: arConv["key"].(string),
Value: arConv["value"].(string),
}
_, err := c.CloudBroker().Compute().AffinityRuleRemove(ctx, req)
if err != nil {
return err
}
}
}
if len(addedAR) > 0 {
for _, ar := range addedAR {
arConv := ar.(map[string]interface{})
req := compute.AffinityRuleAddRequest{
ComputeIDs: []uint64{computeId},
Topology: arConv["topology"].(string),
Policy: arConv["policy"].(string),
Mode: arConv["mode"].(string),
Key: arConv["key"].(string),
Value: arConv["value"].(string),
}
_, err := c.CloudBroker().Compute().AffinityRuleAdd(ctx, req)
if err != nil {
return err
}
}
}
}
return nil
}
func utilityComputeUpdateAntiAffinityRules(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
deletedAR := make([]interface{}, 0)
addedAR := make([]interface{}, 0)
oldAR, newAR := d.GetChange("anti_affinity_rules")
oldConv := oldAR.([]interface{})
newConv := newAR.([]interface{})
if len(newConv) == 0 {
req := compute.AntiAffinityRulesClearRequest{
ComputeIDs: []uint64{computeId},
}
_, err := c.CloudBroker().Compute().AntiAffinityRulesClear(ctx, req)
if err != nil {
return err
}
} else {
for _, el := range oldConv {
if !isContainsAR(newConv, el) {
deletedAR = append(deletedAR, el)
}
}
for _, el := range newConv {
if !isContainsAR(oldConv, el) {
addedAR = append(addedAR, el)
}
}
if len(deletedAR) > 0 {
for _, ar := range deletedAR {
arConv := ar.(map[string]interface{})
req := compute.AntiAffinityRuleRemoveRequest{
ComputeIDs: []uint64{computeId},
Topology: arConv["topology"].(string),
Policy: arConv["policy"].(string),
Mode: arConv["mode"].(string),
Key: arConv["key"].(string),
Value: arConv["value"].(string),
}
_, err := c.CloudBroker().Compute().AntiAffinityRuleRemove(ctx, req)
if err != nil {
return err
}
}
}
if len(addedAR) > 0 {
for _, ar := range addedAR {
arConv := ar.(map[string]interface{})
req := compute.AntiAffinityRuleAddRequest{
ComputeIDs: []uint64{computeId},
Topology: arConv["topology"].(string),
Policy: arConv["policy"].(string),
Mode: arConv["mode"].(string),
Key: arConv["key"].(string),
Value: arConv["value"].(string),
}
_, err := c.CloudBroker().Compute().AntiAffinityRuleAdd(ctx, req)
if err != nil {
return err
}
}
}
}
return nil
}
func utilityComputeUpdateTags(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
oldSet, newSet := d.GetChange("tags")
deletedTags := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List()
if len(deletedTags) > 0 {
for _, tagInterface := range deletedTags {
tagItem := tagInterface.(map[string]interface{})
req := compute.TagRemoveRequest{
ComputeIDs: []uint64{computeId},
Key: tagItem["key"].(string),
}
_, err := c.CloudBroker().Compute().TagRemove(ctx, req)
if err != nil {
return err
}
}
}
addedTags := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List()
if len(addedTags) > 0 {
for _, tagInterface := range addedTags {
tagItem := tagInterface.(map[string]interface{})
req := compute.TagAddRequest{
ComputeIDs: []uint64{computeId},
Key: tagItem["key"].(string),
Value: tagItem["value"].(string),
}
_, err := c.CloudBroker().Compute().TagAdd(ctx, req)
if err != nil {
return err
}
}
}
return nil
}
func utilityComputeUpdatePFW(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
oldSet, newSet := d.GetChange("port_forwarding")
deletedPfws := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List()
if len(deletedPfws) > 0 {
for _, pfwInterface := range deletedPfws {
pfwItem := pfwInterface.(map[string]interface{})
req := compute.PFWDelRequest{
ComputeID: computeId,
PublicPortStart: uint64(pfwItem["public_port_start"].(int)),
LocalBasePort: uint64(pfwItem["local_port"].(int)),
Proto: pfwItem["proto"].(string),
RuleID: uint64(pfwItem["rule_id"].(int)),
}
if pfwItem["public_port_end"].(int) == -1 {
req.PublicPortEnd = req.PublicPortStart
} else {
req.PublicPortEnd = uint64(pfwItem["public_port_end"].(int))
}
if pfwItem["reason"].(string) != "" {
req.Reason = pfwItem["reason"].(string)
}
_, err := c.CloudBroker().Compute().PFWDel(ctx, req)
if err != nil {
return err
}
}
}
addedPfws := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List()
if len(addedPfws) > 0 {
for _, pfwInterface := range addedPfws {
pfwItem := pfwInterface.(map[string]interface{})
req := compute.PFWAddRequest{
ComputeID: computeId,
PublicPortStart: uint64(pfwItem["public_port_start"].(int)),
PublicPortEnd: int64(pfwItem["public_port_end"].(int)),
LocalBasePort: uint64(pfwItem["local_port"].(int)),
Proto: pfwItem["proto"].(string),
}
if pfwItem["reason"].(string) != "" {
req.Reason = pfwItem["reason"].(string)
}
pwfId, err := c.CloudBroker().Compute().PFWAdd(ctx, req)
if err != nil {
return err
}
d.Set("rule_id", pwfId)
}
}
return nil
}
func utilityComputeRestore(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
restoreReq := compute.RestoreRequest{ComputeID: computeId}
_, err := c.CloudBroker().Compute().Restore(ctx, restoreReq)
if err != nil {
return err
}
if _, ok := d.GetOk("enabled"); ok {
if err := utilityComputeEnabled(ctx, d, m); err != nil {
return err
}
}
if _, ok := d.GetOk("started"); ok {
if err := utilityComputeStarted(ctx, d, m); err != nil {
return err
}
}
return nil
}
func utilityComputeUpdateUserAccess(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
oldSet, newSet := d.GetChange("user_access")
deletedUserAcess := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List()
if len(deletedUserAcess) > 0 {
for _, userAcessInterface := range deletedUserAcess {
userAccessItem := userAcessInterface.(map[string]interface{})
req := compute.UserRevokeRequest{
ComputeID: computeId,
Username: userAccessItem["username"].(string),
}
_, err := c.CloudBroker().Compute().UserRevoke(ctx, req)
if err != nil {
return err
}
}
}
addedUserAccess := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List()
if len(addedUserAccess) > 0 {
for _, userAccessInterface := range addedUserAccess {
userAccessItem := userAccessInterface.(map[string]interface{})
req := compute.UserGrantRequest{
ComputeID: computeId,
Username: userAccessItem["username"].(string),
AccessType: userAccessItem["access_type"].(string),
}
_, err := c.CloudBroker().Compute().UserGrant(ctx, req)
if err != nil {
return err
}
}
}
return nil
}
func utilityComputeUpdateSnapshot(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
oldSet, newSet := d.GetChange("snapshot")
deletedSnapshots := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List()
if len(deletedSnapshots) > 0 {
for _, snapshotInterface := range deletedSnapshots {
snapshotItem := snapshotInterface.(map[string]interface{})
req := compute.SnapshotDeleteRequest{
ComputeID: computeId,
Label: snapshotItem["label"].(string),
}
_, err := c.CloudBroker().Compute().SnapshotDelete(ctx, req)
if err != nil {
return err
}
}
}
addedSnapshots := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List()
if len(addedSnapshots) > 0 {
for _, snapshotInterface := range addedSnapshots {
snapshotItem := snapshotInterface.(map[string]interface{})
req := compute.SnapshotCreateRequest{
ComputeID: computeId,
Label: snapshotItem["label"].(string),
}
_, err := c.CloudBroker().Compute().SnapshotCreate(ctx, req)
if err != nil {
return err
}
}
}
return nil
}
func utilityComputeRollback(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
if rollback, ok := d.GetOk("rollback"); ok {
req := compute.StopRequest{
ComputeID: computeId,
Force: false,
}
_, err := c.CloudBroker().Compute().Stop(ctx, req)
if err != nil {
return err
}
rollbackInterface := rollback.(*schema.Set).List()[0]
rollbackItem := rollbackInterface.(map[string]interface{})
rollbackReq := compute.SnapshotRollbackRequest{
ComputeID: computeId,
Label: rollbackItem["label"].(string),
}
_, err = c.CloudBroker().Compute().SnapshotRollback(ctx, rollbackReq)
if err != nil {
return err
}
startReq := compute.StartRequest{ComputeID: computeId}
log.Debugf("utilityComputeRollback: starting compute %d", computeId)
_, err = c.CloudBroker().Compute().Start(ctx, startReq)
if err != nil {
return err
}
}
return nil
}
func utilityComputeUpdateCD(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
oldSet, newSet := d.GetChange("cd")
deletedCd := (oldSet.(*schema.Set).Difference(newSet.(*schema.Set))).List()
if len(deletedCd) > 0 {
req := compute.CDEjectRequest{
ComputeID: computeId,
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
_, err := c.CloudBroker().Compute().CDEject(ctx, req)
if err != nil {
return err
}
}
addedCd := (newSet.(*schema.Set).Difference(oldSet.(*schema.Set))).List()
if len(addedCd) > 0 {
cdItem := addedCd[0].(map[string]interface{})
req := compute.CDInsertRequest{
ComputeID: computeId,
CDROMID: uint64(cdItem["cdrom_id"].(int)),
}
if reason, ok := d.GetOk("reason"); ok {
req.Reason = reason.(string)
}
_, err := c.CloudBroker().Compute().CDInsert(ctx, req)
if err != nil {
return err
}
}
return nil
}
func utilityComputePinToStack(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
oldPin, newPin := d.GetChange("pin_to_stack")
if oldPin.(bool) && !newPin.(bool) {
req := compute.UnpinFromStackRequest{
ComputeID: computeId,
}
_, err := c.CloudBroker().Compute().UnpinFromStack(ctx, req)
if err != nil {
return err
}
}
if !oldPin.(bool) && newPin.(bool) {
req := compute.PinToStackRequest{
ComputeID: computeId,
TargetStackID: uint64(d.Get("target_stack_id").(int)),
}
if force, ok := d.Get("force_pin").(bool); ok {
req.Force = force
}
_, err := c.CloudBroker().Compute().PinToStack(ctx, req)
if err != nil {
return err
}
}
return nil
}
func utilityComputePause(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
oldPause, newPause := d.GetChange("pause")
if oldPause.(bool) && !newPause.(bool) {
req := compute.ResumeRequest{
ComputeID: computeId,
}
_, err := c.CloudBroker().Compute().Resume(ctx, req)
if err != nil {
return err
}
}
if !oldPause.(bool) && newPause.(bool) {
req := compute.PauseRequest{
ComputeID: computeId,
}
_, err := c.CloudBroker().Compute().Pause(ctx, req)
if err != nil {
return err
}
}
return nil
}
func utilityComputeReset(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
oldReset, newReset := d.GetChange("reset")
if !oldReset.(bool) && newReset.(bool) {
req := compute.ResetRequest{
ComputeID: computeId,
}
_, err := c.CloudBroker().Compute().Reset(ctx, req)
if err != nil {
return err
}
}
return nil
}
func utilityComputeUpdateImage(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
oldImage, newImage := d.GetChange("image_id")
stopReq := compute.StopRequest{
ComputeID: computeId,
Force: false,
}
_, err := c.CloudBroker().Compute().Stop(ctx, stopReq)
if err != nil {
return err
}
if oldImage.(int) != newImage.(int) {
req := compute.RedeployRequest{
ComputeID: computeId,
ImageID: uint64(newImage.(int)),
}
if diskSize, ok := d.GetOk("boot_disk_size"); ok {
req.DiskSize = uint64(diskSize.(int))
}
if dataDisks, ok := d.GetOk("data_disks"); ok {
req.DataDisks = dataDisks.(string)
}
if autoStart, ok := d.GetOk("auto_start"); ok {
req.AutoStart = autoStart.(bool)
}
if forceStop, ok := d.GetOk("force_stop"); ok {
req.ForceStop = forceStop.(bool)
}
_, err := c.CloudBroker().Compute().Redeploy(ctx, req)
if err != nil {
return err
}
}
return nil
}
func utilityComputeUpdateCustomFields(ctx context.Context, d *schema.ResourceData, m interface{}) error {
c := m.(*controller.ControllerCfg)
computeId, _ := strconv.ParseUint(d.Id(), 10, 64)
val := d.Get("custom_fields").(string)
val = strings.ReplaceAll(val, "\\", "")
val = strings.ReplaceAll(val, "\n", "")
val = strings.ReplaceAll(val, "\t", "")
val = strings.TrimSpace(val)
if len(val) > 0 {
req := compute.SetCustomFieldsRequest{
ComputeID: computeId,
CustomFields: val,
}
_, err := c.CloudBroker().Compute().SetCustomFields(ctx, req)
if err != nil {
return err
}
}
// } else {
// // req := compute.DeleteCustomFieldsRequest{
// // ComputeID: computeId,
// // }
// // _, err := c.CloudBroker().Compute().DeleteCustomFields(ctx, req)
// // if err != nil {
// // return err
// // }
// }
return nil
}
func isChangeDisk(els []interface{}, el interface{}) bool {
for _, elOld := range els {
elOldConv := elOld.(map[string]interface{})

@ -1,5 +1,5 @@
/*
Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
@ -10,7 +10,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,

@ -9,7 +9,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@ -44,11 +44,11 @@ import (
func dataSourceLBRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
lb, err := utilityLBCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
d.SetId(strconv.FormatUint(lb.ID, 10))
flattenLB(d, lb)
return nil

@ -44,8 +44,10 @@ import (
func dataSourceLBListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
lbList, err := utilityLBListCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenLBList(lbList))

@ -44,8 +44,10 @@ import (
func dataSourceLBListDeletedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
lbList, err := utilityLBListDeletedCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
id := uuid.New()
d.SetId(id.String())
d.Set("items", flattenLBList(lbList))

@ -96,11 +96,14 @@ func flattenLB(d *schema.ResourceData, lb *lb.RecordLB) {
d.Set("ha_mode", lb.HAMode)
d.Set("ckey", lb.CKey)
d.Set("meta", flattens.FlattenMeta(lb.Meta))
d.Set("acl", flattenACl(lb.ACL))
d.Set("backend_haip", lb.BackendHAIP)
d.Set("backends", flattenLBBackends(lb.Backends))
d.Set("desc", lb.Description)
d.Set("dp_api_user", lb.DPAPIUser)
d.Set("dp_api_password", lb.DPAPIPassword)
d.Set("extnet_id", lb.ExtNetID)
d.Set("frontend_haip", lb.FrontendHAIP)
d.Set("frontends", flattenFrontends(lb.Frontends))
d.Set("gid", lb.GID)
d.Set("guid", lb.GUID)
@ -108,6 +111,7 @@ func flattenLB(d *schema.ResourceData, lb *lb.RecordLB) {
d.Set("image_id", lb.ImageID)
d.Set("milestones", lb.Milestones)
d.Set("name", lb.Name)
d.Set("part_k8s", lb.PartK8s)
d.Set("primary_node", flattenNode(lb.PrimaryNode))
d.Set("rg_id", lb.RGID)
d.Set("secondary_node", flattenNode(lb.SecondaryNode))
@ -216,6 +220,7 @@ func flattenLBList(lbl *lb.ListLB) []map[string]interface{} {
temp := map[string]interface{}{
"ha_mode": lb.HAMode,
"acl": flattenACl(lb.ACL),
"backend_haip": lb.BackendHAIP,
"backends": flattenLBBackends(lb.Backends),
"created_by": lb.CreatedBy,
"created_time": lb.CreatedTime,
@ -225,6 +230,7 @@ func flattenLBList(lbl *lb.ListLB) []map[string]interface{} {
"dp_api_user": lb.DPAPIUser,
"dp_api_password": lb.DPAPIPassword,
"extnet_id": lb.ExtNetID,
"frontend_haip": lb.FrontendHAIP,
"frontends": flattenFrontends(lb.Frontends),
"gid": lb.GID,
"guid": lb.GUID,
@ -247,6 +253,7 @@ func flattenLBList(lbl *lb.ListLB) []map[string]interface{} {
}
func flattenACl(m interface{}) string {
switch d := m.(type) {
case string:
return d
@ -259,4 +266,5 @@ func flattenACl(m interface{}) string {
default:
return ""
}
}

@ -3,66 +3,32 @@ package lb
import (
"context"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/extnet"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/ic"
)
func existLBID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) {
c := m.(*controller.ControllerCfg)
lbId := uint64(d.Get("lb_id").(int))
func checkParamsExistence(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg) diag.Diagnostics {
errs := []error{}
req := lb.ListRequest{}
lbList, err := c.CloudBroker().LB().List(ctx, req)
if err != nil {
return false, err
if err := ic.ExistRG(ctx, uint64(d.Get("rg_id").(int)), c); err != nil {
errs = append(errs, err)
}
return len(lbList.FilterByID(lbId).Data) != 0, nil
}
func existRGID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) {
c := m.(*controller.ControllerCfg)
rgId := uint64(d.Get("rg_id").(int))
req := rg.ListRequest{}
rgList, err := c.CloudBroker().RG().List(ctx, req)
if err != nil {
return false, err
if err := ic.ExistExtNetInLb(ctx, uint64(d.Get("extnet_id").(int)), c); err != nil {
errs = append(errs, err)
}
return len(rgList.FilterByID(rgId).Data) != 0, nil
}
func existExtNetID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) {
c := m.(*controller.ControllerCfg)
extNetID := uint64(d.Get("extnet_id").(int))
req := extnet.ListRequest{}
extNetList, err := c.CloudBroker().ExtNet().List(ctx, req)
if err != nil {
return false, err
if err := ic.ExistVinsInLb(ctx, uint64(d.Get("vins_id").(int)), c); err != nil {
errs = append(errs, err)
}
return len(extNetList.FilterByID(extNetID).Data) != 0, nil
return dc.ErrorsToDiagnostics(errs)
}
func existViNSID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) {
c := m.(*controller.ControllerCfg)
vinsID := uint64(d.Get("vins_id").(int))
req := vins.ListRequest{}
vinsList, err := c.CloudBroker().VINS().List(ctx, req)
if err != nil {
return false, err
}
return len(vinsList.FilterByID(vinsID).Data) != 0, nil
func checkParamsExistenceLb(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg) diag.Diagnostics {
err := ic.ExistLB(ctx, uint64(d.Get("lb_id").(int)), c)
return diag.FromErr(err)
}

@ -34,6 +34,7 @@ package lb
import (
"context"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
@ -46,36 +47,13 @@ import (
)
func resourceLBCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBCreate")
haveRGID, err := existRGID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
if !haveRGID {
return diag.Errorf("resourceLBCreate: can't create LB because RGID %d is not allowed or does not exist", d.Get("rg_id").(int))
}
haveExtNetID, err := existExtNetID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
if !haveExtNetID {
return diag.Errorf("resourceLBCreate: can't create LB because ExtNetID %d is not allowed or does not exist", d.Get("extnet_id").(int))
}
haveVins, err := existViNSID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
log.Debugf("resourceLBCreate called with name: %s", d.Get("name").(string))
c := m.(*controller.ControllerCfg)
if !haveVins {
return diag.Errorf("resourceLBCreate: can't create LB because ViNSID %d is not allowed or does not exist", d.Get("vins_id").(int))
if diags := checkParamsExistence(ctx, d, c); diags != nil {
return diags
}
c := m.(*controller.ControllerCfg)
req := lb.CreateRequest{
Name: d.Get("name").(string),
RGID: uint64(d.Get("rg_id").(int)),
@ -88,49 +66,40 @@ func resourceLBCreate(ctx context.Context, d *schema.ResourceData, m interface{}
if desc, ok := d.GetOk("desc"); ok {
req.Description = desc.(string)
}
if haMode, ok := d.GetOk("ha_mode"); ok {
req.HighlyAvailable = haMode.(bool)
}
lbId, err := c.CloudBroker().LB().Create(ctx, req)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
d.SetId(strconv.FormatUint(lbId, 10))
d.Set("lb_id", lbId)
_, err = utilityLBCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
var warnings dc.Warnings
if enable, ok := d.GetOk("enable"); ok {
lbId := uint64(d.Get("lb_id").(int))
if enable.(bool) {
req := lb.EnableRequest{
LBID: lbId,
}
_, err := c.CloudBroker().LB().Enable(ctx, req)
if err != nil {
return diag.FromErr(err)
if err := resourceLbEnable(ctx, lbId, m); err != nil {
warnings.Add(err)
}
} else {
req := lb.DisableRequest{
LBID: lbId,
}
_, err := c.CloudBroker().LB().Disable(ctx, req)
if err != nil {
return diag.FromErr(err)
if err := resourceLbDisable(ctx, lbId, m); err != nil {
warnings.Add(err)
}
}
}
return resourceLBRead(ctx, d, m)
return append(warnings.Get(), resourceLBRead(ctx, d, m)...)
}
func resourceLBRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBRead")
log.Debugf("resourceLBRead called for lb_id %s", d.Id())
c := m.(*controller.ControllerCfg)
// c := m.(*controller.ControllerCfg)
lbRec, err := utilityLBCheckPresence(ctx, d, m)
if lbRec == nil {
@ -147,43 +116,44 @@ func resourceLBRead(ctx context.Context, d *schema.ResourceData, m interface{})
case status.Created:
case status.Deleting:
case status.Deleted:
lbId, _ := strconv.ParseUint(d.Id(), 10, 64)
restoreReq := lb.RestoreRequest{LBID: lbId}
_, err := c.CloudBroker().LB().Restore(ctx, restoreReq)
if err != nil {
return diag.FromErr(err)
}
if enable := d.Get("enable"); enable.(bool) {
req := lb.EnableRequest{
LBID: lbId,
}
_, err := c.CloudBroker().LB().Enable(ctx, req)
if err != nil {
return diag.FromErr(err)
}
}
if start := d.Get("start"); start.(bool) {
if enable := d.Get("enable"); enable.(bool) {
req := lb.StartRequest{
LBID: lbId,
}
_, err := c.CloudBroker().LB().Start(ctx, req)
if err != nil {
return diag.FromErr(err)
}
} else {
return diag.Errorf("To start the LB, please, enable LB first.")
}
}
hasChanged = true
// lbId, _ := strconv.ParseUint(d.Id(), 10, 64)
// restoreReq := lb.RestoreRequest{LBID: lbId}
// _, err := c.CloudBroker().LB().Restore(ctx, restoreReq)
// if err != nil {
// return diag.FromErr(err)
// }
// if enable := d.Get("enable"); enable.(bool) {
// req := lb.EnableRequest{
// LBID: lbId,
// }
// _, err := c.CloudBroker().LB().Enable(ctx, req)
// if err != nil {
// return diag.FromErr(err)
// }
// }
// if start := d.Get("start"); start.(bool) {
// if enable := d.Get("enable"); enable.(bool) {
// req := lb.StartRequest{
// LBID: lbId,
// }
// _, err := c.CloudBroker().LB().Start(ctx, req)
// if err != nil {
// return diag.FromErr(err)
// }
// } else {
// return diag.Errorf("To start the LB, please, enable LB first.")
// }
// }
// hasChanged = true
case status.Destroying:
return diag.Errorf("The LB is in progress with status: %s", lbRec.Status)
case status.Destroyed:
d.SetId("")
return resourceLBCreate(ctx, d, m)
return diag.Errorf("The resource cannot be read because it has been destroyed")
// return resourceLBCreate(ctx, d, m)
case status.Enabled:
case status.Enabling:
case status.Disabling:
@ -206,10 +176,11 @@ func resourceLBRead(ctx context.Context, d *schema.ResourceData, m interface{})
}
func resourceLBDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBDelete")
log.Debugf("resourceLBDelete called with lb id: %v", d.Get("lb_id").(int))
_, err := utilityLBCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
@ -233,34 +204,11 @@ func resourceLBDelete(ctx context.Context, d *schema.ResourceData, m interface{}
}
func resourceLBUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBUpdate")
log.Debugf("resourceLBUpdate called for lb_id %s", d.Id())
c := m.(*controller.ControllerCfg)
haveRGID, err := existRGID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
if !haveRGID {
return diag.Errorf("resourceLBUpdate: can't update LB because RGID %d is not allowed or does not exist", d.Get("rg_id").(int))
}
haveExtNetID, err := existExtNetID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
if !haveExtNetID {
return diag.Errorf("resourceLBUpdate: can't update LB because ExtNetID %d is not allowed or does not exist", d.Get("extnet_id").(int))
}
haveVins, err := existViNSID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
if !haveVins {
return diag.Errorf("resourceLBUpdate: can't update LB because ViNSID %d is not allowed or does not exist", d.Get("vins_id").(int))
if diags := checkParamsExistence(ctx, d, c); diags != nil {
return diags
}
lbRec, err := utilityLBCheckPresence(ctx, d, m)
@ -278,34 +226,28 @@ func resourceLBUpdate(ctx context.Context, d *schema.ResourceData, m interface{}
case status.Created:
case status.Deleting:
case status.Deleted:
lbId, _ := strconv.ParseUint(d.Id(), 10, 64)
restoreReq := lb.RestoreRequest{LBID: lbId}
_, err := c.CloudBroker().LB().Restore(ctx, restoreReq)
if err != nil {
return diag.FromErr(err)
restore, ok := d.GetOk("restore")
if ok && restore.(bool) {
if err := resourceLbRestore(ctx, lbRec.ID, m); err != nil {
return diag.FromErr(err)
}
}
if enable := d.Get("enable"); enable.(bool) {
req := lb.EnableRequest{
LBID: lbId,
}
_, err := c.CloudBroker().LB().Enable(ctx, req)
if err != nil {
enable, ok := d.GetOk("enable")
if ok && enable.(bool) {
if err := resourceLbEnable(ctx, lbRec.ID, m); err != nil {
return diag.FromErr(err)
}
}
if start := d.Get("start"); start.(bool) {
if enable := d.Get("enable"); enable.(bool) {
req := lb.StartRequest{
LBID: lbId,
}
_, err := c.CloudBroker().LB().Start(ctx, req)
if err != nil {
start, ok := d.GetOk("start")
if ok && start.(bool) {
if enable.(bool) {
if err := resourceLbStart(ctx, lbRec.ID, m); err != nil {
return diag.FromErr(err)
}
} else {
return diag.Errorf("To start the LB, please, enable LB first.")
return diag.Errorf("to start the LB, please, enable LB first.")
}
}
@ -314,7 +256,8 @@ func resourceLBUpdate(ctx context.Context, d *schema.ResourceData, m interface{}
return diag.Errorf("The LB is in progress with status: %s", lbRec.Status)
case status.Destroyed:
d.SetId("")
return resourceLBCreate(ctx, d, m)
return diag.Errorf("The resource cannot be updated because it has been destroyed")
// return resourceLBCreate(ctx, d, m)
case status.Enabled:
case status.Enabling:
case status.Disabling:
@ -332,100 +275,201 @@ func resourceLBUpdate(ctx context.Context, d *schema.ResourceData, m interface{}
}
if d.HasChange("enable") {
enable := d.Get("enable").(bool)
if enable {
req := lb.EnableRequest{
LBID: uint64(d.Get("lb_id").(int)),
}
_, err := c.CloudBroker().LB().Enable(ctx, req)
if err != nil {
return diag.FromErr(err)
}
} else {
req := lb.DisableRequest{
LBID: uint64(d.Get("lb_id").(int)),
}
_, err := c.CloudBroker().LB().Disable(ctx, req)
if err != nil {
return diag.FromErr(err)
}
if err := resourceLbChangeEnable(ctx, d, lbRec.ID, m); err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("ha_mode") {
if err := resourceLbChangeHaMode(ctx, d, lbRec.ID, m); err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("start") {
start := d.Get("start").(bool)
lbId := uint64(d.Get("lb_id").(int))
if start {
req := lb.StartRequest{LBID: lbId}
_, err := c.CloudBroker().LB().Start(ctx, req)
if err != nil {
return diag.FromErr(err)
}
} else {
req := lb.StopRequest{LBID: lbId}
_, err := c.CloudBroker().LB().Stop(ctx, req)
if err != nil {
return diag.FromErr(err)
}
if err := resourceLbChangeStart(ctx, d, lbRec.ID, m); err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("desc") {
req := lb.UpdateRequest{
LBID: uint64(d.Get("lb_id").(int)),
Description: d.Get("desc").(string),
if err := resourceLbChangeDesc(ctx, d, lbRec.ID, m); err != nil {
return diag.FromErr(err)
}
}
_, err := c.CloudBroker().LB().Update(ctx, req)
if err != nil {
if d.HasChange("restart") {
if err := resourceLbChangeRestart(ctx, d, lbRec.ID, m); err != nil {
return diag.FromErr(err)
}
}
if d.HasChange("restart") {
restart := d.Get("restart").(bool)
if restart {
req := lb.RestartRequest{
LBID: uint64(d.Get("lb_id").(int)),
}
if d.HasChange("config_reset") {
if err := resourceLbChangeConfigReset(ctx, d, lbRec.ID, m); err != nil {
return diag.FromErr(err)
}
}
_, err := c.CloudBroker().LB().Restart(ctx, req)
if err != nil {
return diag.FromErr(err)
}
return resourceLBRead(ctx, d, m)
}
func resourceLbEnable(ctx context.Context, lbId uint64, m interface{}) error {
c := m.(*controller.ControllerCfg)
req := lb.EnableRequest{
LBID: lbId,
}
_, err := c.CloudBroker().LB().Enable(ctx, req)
return err
}
func resourceLbDisable(ctx context.Context, lbId uint64, m interface{}) error {
c := m.(*controller.ControllerCfg)
req := lb.DisableRequest{
LBID: lbId,
}
_, err := c.CloudBroker().LB().Disable(ctx, req)
return err
}
func resourceLbRestore(ctx context.Context, lbId uint64, m interface{}) error {
c := m.(*controller.ControllerCfg)
restoreReq := lb.RestoreRequest{
LBID: lbId,
}
_, err := c.CloudBroker().LB().Restore(ctx, restoreReq)
return err
}
func resourceLbStart(ctx context.Context, lbId uint64, m interface{}) error {
c := m.(*controller.ControllerCfg)
req := lb.StartRequest{
LBID: lbId,
}
_, err := c.CloudBroker().LB().Start(ctx, req)
return err
}
func resourceLbChangeEnable(ctx context.Context, d *schema.ResourceData, lbId uint64, m interface{}) error {
enable := d.Get("enable").(bool)
if enable {
if err := resourceLbEnable(ctx, lbId, m); err != nil {
return err
}
} else {
if err := resourceLbDisable(ctx, lbId, m); err != nil {
return err
}
}
if d.HasChange("restore") {
restore := d.Get("restore").(bool)
if restore {
req := lb.RestoreRequest{
LBID: uint64(d.Get("lb_id").(int)),
}
return nil
}
_, err := c.CloudBroker().LB().Restore(ctx, req)
if err != nil {
return diag.FromErr(err)
}
func resourceLbChangeHaMode(ctx context.Context, d *schema.ResourceData, lbId uint64, m interface{}) error {
c := m.(*controller.ControllerCfg)
haModeOn := d.Get("ha_mode").(bool)
if haModeOn {
req := lb.HighlyAvailableRequest{
LBID: lbId,
}
if _, err := c.CloudBroker().LB().HighlyAvailable(ctx, req); err != nil {
return err
}
}
if d.HasChange("config_reset") {
cfgReset := d.Get("config_reset").(bool)
if cfgReset {
req := lb.ConfigResetRequest{
LBID: uint64(d.Get("lb_id").(int)),
}
return nil
}
_, err := c.CloudBroker().LB().ConfigReset(ctx, req)
if err != nil {
return diag.FromErr(err)
}
func resourceLbChangeStart(ctx context.Context, d *schema.ResourceData, lbId uint64, m interface{}) error {
c := m.(*controller.ControllerCfg)
start := d.Get("start").(bool)
if start {
req := lb.StartRequest{LBID: lbId}
if _, err := c.CloudBroker().LB().Start(ctx, req); err != nil {
return err
}
} else {
req := lb.StopRequest{LBID: lbId}
if _, err := c.CloudBroker().LB().Stop(ctx, req); err != nil {
return err
}
}
return resourceLBRead(ctx, d, m)
return nil
}
func resourceLbChangeDesc(ctx context.Context, d *schema.ResourceData, lbId uint64, m interface{}) error {
c := m.(*controller.ControllerCfg)
desc := d.Get("desc").(string)
req := lb.UpdateRequest{
LBID: lbId,
Description: desc,
}
if _, err := c.CloudBroker().LB().Update(ctx, req); err != nil {
return err
}
return nil
}
func resourceLbChangeRestart(ctx context.Context, d *schema.ResourceData, lbId uint64, m interface{}) error {
c := m.(*controller.ControllerCfg)
restart := d.Get("restart").(bool)
if restart {
req := lb.RestartRequest{
LBID: lbId,
}
if _, err := c.CloudBroker().LB().Restart(ctx, req); err != nil {
return err
}
}
return nil
}
func resourceLbChangeRestore(ctx context.Context, d *schema.ResourceData, lbId uint64, m interface{}) error {
c := m.(*controller.ControllerCfg)
restore := d.Get("restore").(bool)
if restore {
req := lb.RestoreRequest{
LBID: lbId,
}
if _, err := c.CloudBroker().LB().Restore(ctx, req); err != nil {
return err
}
}
return nil
}
func resourceLbChangeConfigReset(ctx context.Context, d *schema.ResourceData, lbId uint64, m interface{}) error {
c := m.(*controller.ControllerCfg)
cfgReset := d.Get("config_reset").(bool)
if cfgReset {
req := lb.ConfigResetRequest{
LBID: lbId,
}
if _, err := c.CloudBroker().LB().ConfigReset(ctx, req); err != nil {
return err
}
}
return nil
}
func ResourceLB() *schema.Resource {

@ -39,7 +39,6 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
@ -47,23 +46,18 @@ import (
)
func resourceLBBackendCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBBackendCreate")
log.Debugf("resourceLBBackendCreate: call for lb_backend id %s", d.Id())
c := m.(*controller.ControllerCfg)
haveLBID, err := existLBID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
if diags := checkParamsExistenceLb(ctx, d, c); diags != nil {
return diags
}
if !haveLBID {
return diag.Errorf("resourceLBBackendCreate: can't create LB backend because LBID %d is not allowed or does not exist", d.Get("lb_id").(int))
req := lb.BackendCreateRequest{
LBID: uint64(d.Get("lb_id").(int)),
BackendName: d.Get("name").(string),
}
c := m.(*controller.ControllerCfg)
req := lb.BackendCreateRequest{}
req.BackendName = d.Get("name").(string)
req.LBID = uint64(d.Get("lb_id").(int))
if algorithm, ok := d.GetOk("algorithm"); ok {
req.Algorithm = algorithm.(string)
}
@ -92,23 +86,18 @@ func resourceLBBackendCreate(ctx context.Context, d *schema.ResourceData, m inte
req.Weight = uint64(weight.(int))
}
_, err = c.CloudBroker().LB().BackendCreate(ctx, req)
_, err := c.CloudBroker().LB().BackendCreate(ctx, req)
if err != nil {
return diag.FromErr(err)
}
d.SetId(strconv.Itoa(d.Get("lb_id").(int)) + "#" + d.Get("name").(string))
_, err = utilityLBBackendCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
return resourceLBBackendRead(ctx, d, m)
}
func resourceLBBackendRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBBackendRead")
log.Debugf("resourceLBBackendRead: call for lb_backend id %s", d.Id())
b, err := utilityLBBackendCheckPresence(ctx, d, m)
if b == nil {
@ -124,10 +113,11 @@ func resourceLBBackendRead(ctx context.Context, d *schema.ResourceData, m interf
}
func resourceLBBackendDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBBackendDelete")
log.Debugf("resourceLBBackendDelete: call for lb_backend id %s", d.Id())
_, err := utilityLBBackendCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
@ -148,16 +138,11 @@ func resourceLBBackendDelete(ctx context.Context, d *schema.ResourceData, m inte
}
func resourceLBBackendUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBBackendEdit")
log.Debugf("resourceLBBackendUpdate: call for lb_backend id %s", d.Id())
c := m.(*controller.ControllerCfg)
haveLBID, err := existLBID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
if !haveLBID {
return diag.Errorf("resourceLBBackendUpdate: can't update LB backend because LBID %d is not allowed or does not exist", d.Get("lb_id").(int))
if diags := checkParamsExistenceLb(ctx, d, c); diags != nil {
return diags
}
req := lb.BackendUpdateRequest{
@ -193,7 +178,7 @@ func resourceLBBackendUpdate(ctx context.Context, d *schema.ResourceData, m inte
req.Weight = uint64(d.Get("weight").(int))
}
_, err = c.CloudBroker().LB().BackendUpdate(ctx, req)
_, err := c.CloudBroker().LB().BackendUpdate(ctx, req)
if err != nil {
return diag.FromErr(err)
}
@ -222,153 +207,6 @@ func ResourceLBBackend() *schema.Resource {
Default: &constants.Timeout300s,
},
Schema: map[string]*schema.Schema{
"lb_id": {
Type: schema.TypeInt,
Required: true,
Description: "ID of the LB instance to backendCreate",
},
"name": {
Type: schema.TypeString,
Required: true,
Description: "Must be unique among all backends of this LB - name of the new backend to create",
},
"algorithm": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ValidateFunc: validation.StringInSlice([]string{"roundrobin", "static-rr", "leastconn"}, false),
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
"downinter": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"fall": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"inter": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"maxconn": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"maxqueue": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"rise": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"slowstart": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"weight": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"servers": {
Type: schema.TypeList,
Optional: true,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"address": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"check": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
"name": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"port": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"server_settings": {
Type: schema.TypeList,
Optional: true,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"downinter": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"fall": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
"inter": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"maxconn": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"maxqueue": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"rise": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"slowstart": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"weight": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
},
},
},
},
},
},
},
Schema: resourceLbBackendSchemaMake(),
}
}

@ -39,7 +39,6 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
@ -47,18 +46,16 @@ import (
)
func resourceLBBackendServerCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBBackendServerCreate")
haveLBID, err := existLBID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
log.Debugf("resourceLBBackendServerCreate: call for lb_id %d, backend_name %s, server_name %s",
d.Get("lb_id").(int),
d.Get("backend_name").(string),
d.Get("name").(string))
c := m.(*controller.ControllerCfg)
if !haveLBID {
return diag.Errorf("resourceLBBackendServerCreate: can't create LB backend server because LBID %d is not allowed or does not exist", d.Get("lb_id").(int))
if diags := checkParamsExistenceLb(ctx, d, c); diags != nil {
return diags
}
c := m.(*controller.ControllerCfg)
req := lb.BackendServerAddRequest{
BackendName: d.Get("backend_name").(string),
ServerName: d.Get("name").(string),
@ -95,23 +92,19 @@ func resourceLBBackendServerCreate(ctx context.Context, d *schema.ResourceData,
req.Weight = uint64(weight.(int))
}
_, err = c.CloudBroker().LB().BackendServerAdd(ctx, req)
_, err := c.CloudBroker().LB().BackendServerAdd(ctx, req)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
d.SetId(strconv.Itoa(d.Get("lb_id").(int)) + "#" + d.Get("backend_name").(string) + "#" + d.Get("name").(string))
_, err = utilityLBBackendServerCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
return resourceLBBackendServerRead(ctx, d, m)
}
func resourceLBBackendServerRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBBackendServerRead")
log.Debugf("resourceLBBackendServerRead: call for id %s", d.Id())
s, err := utilityLBBackendServerCheckPresence(ctx, d, m)
if err != nil {
@ -128,10 +121,11 @@ func resourceLBBackendServerRead(ctx context.Context, d *schema.ResourceData, m
}
func resourceLBBackendServerDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBBackendServerDelete")
log.Debugf("resourceLBBackendServerDelete: call for id %s", d.Id())
_, err := utilityLBBackendServerCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
@ -152,16 +146,11 @@ func resourceLBBackendServerDelete(ctx context.Context, d *schema.ResourceData,
}
func resourceLBBackendServerUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBBackendServerEdit")
log.Debugf("resourceLBBackendServerEdit: call for id %s", d.Id())
c := m.(*controller.ControllerCfg)
haveLBID, err := existLBID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
if !haveLBID {
return diag.Errorf("resourceLBBackendServerUpdate: can't update LB backend server because LBID %d is not allowed or does not exist", d.Get("lb_id").(int))
if diags := checkParamsExistenceLb(ctx, d, c); diags != nil {
return diags
}
req := lb.BackendServerUpdateRequest{
@ -200,7 +189,7 @@ func resourceLBBackendServerUpdate(ctx context.Context, d *schema.ResourceData,
req.Weight = uint64(d.Get("weight").(int))
}
_, err = c.CloudBroker().LB().BackendServerUpdate(ctx, req)
_, err := c.CloudBroker().LB().BackendServerUpdate(ctx, req)
if err != nil {
return diag.FromErr(err)
}
@ -229,83 +218,6 @@ func ResourceLBBackendServer() *schema.Resource {
Default: &constants.Timeout300s,
},
Schema: map[string]*schema.Schema{
"lb_id": {
Type: schema.TypeInt,
Required: true,
Description: "ID of the LB instance to backendCreate",
},
"backend_name": {
Type: schema.TypeString,
Required: true,
Description: "Must be unique among all backends of this LB - name of the new backend to create",
},
"name": {
Type: schema.TypeString,
Required: true,
Description: "Must be unique among all servers defined for this backend - name of the server definition to add.",
},
"address": {
Type: schema.TypeString,
Required: true,
Description: "IP address of the server.",
},
"port": {
Type: schema.TypeInt,
Required: true,
Description: "Port number on the server",
},
"check": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ValidateFunc: validation.StringInSlice([]string{"disabled", "enabled"}, false),
Description: "set to disabled if this server should be used regardless of its state.",
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
"downinter": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"fall": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"inter": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"maxconn": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"maxqueue": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"rise": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"slowstart": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"weight": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
},
Schema: resourceLbBackendServerSchemaMake(),
}
}

@ -46,41 +46,36 @@ import (
)
func resourceLBFrontendCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBFrontendCreate")
log.Debugf("resourceLBFrontendCreate: call for lb_id %d, backend %s to create frontend %s",
d.Get("lb_id").(int),
d.Get("backend_name").(string),
d.Get("name").(string))
haveLBID, err := existLBID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
c := m.(*controller.ControllerCfg)
if !haveLBID {
return diag.Errorf("resourceLBFrontendCreate: can't create LB frontend because LBID %d is not allowed or does not exist", d.Get("lb_id").(int))
if diags := checkParamsExistenceLb(ctx, d, c); diags != nil {
return diags
}
c := m.(*controller.ControllerCfg)
req := lb.FrontendCreateRequest{
BackendName: d.Get("backend_name").(string),
LBID: uint64(d.Get("lb_id").(int)),
FrontendName: d.Get("name").(string),
}
_, err = c.CloudBroker().LB().FrontendCreate(ctx, req)
_, err := c.CloudBroker().LB().FrontendCreate(ctx, req)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
d.SetId(strconv.Itoa(d.Get("lb_id").(int)) + "#" + d.Get("name").(string))
_, err = utilityLBFrontendCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
return resourceLBFrontendRead(ctx, d, m)
}
func resourceLBFrontendRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBFrontendRead")
log.Debugf("resourceLBFrontendRead: call for id %s", d.Id())
f, err := utilityLBFrontendCheckPresence(ctx, d, m)
if err != nil {
@ -96,10 +91,11 @@ func resourceLBFrontendRead(ctx context.Context, d *schema.ResourceData, m inter
}
func resourceLBFrontendDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBFrontendDelete")
log.Debugf("resourceLBFrontendDelete: call for id %s", d.Id())
_, err := utilityLBFrontendCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
@ -144,48 +140,6 @@ func ResourceLBFrontend() *schema.Resource {
Default: &constants.Timeout300s,
},
Schema: map[string]*schema.Schema{
"lb_id": {
Type: schema.TypeInt,
Required: true,
Description: "ID of the LB instance to backendCreate",
},
"backend_name": {
Type: schema.TypeString,
Required: true,
},
"name": {
Type: schema.TypeString,
Required: true,
},
"bindings": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"address": {
Type: schema.TypeString,
Computed: true,
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
"name": {
Type: schema.TypeString,
Computed: true,
},
"port": {
Type: schema.TypeInt,
Computed: true,
},
},
},
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
},
Schema: resourceLbFrontendSchemaMake(),
}
}

@ -46,18 +46,17 @@ import (
)
func resourceLBFrontendBindCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBFrontendBindCreate")
log.Debugf("resourceLBFrontendBindCreate: call for lb_id %d, frontend %s to create bind %s",
d.Get("lb_id").(int),
d.Get("frontend_name").(string),
d.Get("name").(string))
haveLBID, err := existLBID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
c := m.(*controller.ControllerCfg)
if !haveLBID {
return diag.Errorf("resourceLBFrontendBindCreate: can't create LB frontend bind because LBID %d is not allowed or does not exist", d.Get("lb_id").(int))
if diags := checkParamsExistenceLb(ctx, d, c); diags != nil {
return diags
}
c := m.(*controller.ControllerCfg)
req := lb.FrontendBindRequest{
LBID: uint64(d.Get("lb_id").(int)),
FrontendName: d.Get("frontend_name").(string),
@ -66,23 +65,18 @@ func resourceLBFrontendBindCreate(ctx context.Context, d *schema.ResourceData, m
BindingPort: uint64(d.Get("port").(int)),
}
_, err = c.CloudBroker().LB().FrontendBind(ctx, req)
_, err := c.CloudBroker().LB().FrontendBind(ctx, req)
if err != nil {
return diag.FromErr(err)
}
d.SetId(strconv.Itoa(d.Get("lb_id").(int)) + "#" + d.Get("frontend_name").(string) + "#" + d.Get("name").(string))
_, err = utilityLBFrontendBindCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
return resourceLBFrontendBindRead(ctx, d, m)
}
func resourceLBFrontendBindRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBFrontendBindRead")
log.Debugf("resourceLBFrontendBindRead: call for %s", d.Id())
b, err := utilityLBFrontendBindCheckPresence(ctx, d, m)
if err != nil {
@ -99,10 +93,11 @@ func resourceLBFrontendBindRead(ctx context.Context, d *schema.ResourceData, m i
}
func resourceLBFrontendBindDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBFrontendBindDelete")
log.Debugf("resourceLBFrontendBindDelete: call for %s", d.Id())
_, err := utilityLBFrontendBindCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
@ -124,16 +119,11 @@ func resourceLBFrontendBindDelete(ctx context.Context, d *schema.ResourceData, m
}
func resourceLBFrontendBindUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourceLBFrontendBindEdit")
log.Debugf("resourceLBFrontendBindEdit: call for %s", d.Id())
c := m.(*controller.ControllerCfg)
haveLBID, err := existLBID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
if !haveLBID {
return diag.Errorf("resourceLBFrontendBindUpdate: can't update LB frontend bind because LBID %d is not allowed or does not exist", d.Get("lb_id").(int))
if diags := checkParamsExistenceLb(ctx, d, c); diags != nil {
return diags
}
req := lb.FrontendBindUpdateRequest{
@ -142,12 +132,12 @@ func resourceLBFrontendBindUpdate(ctx context.Context, d *schema.ResourceData, m
LBID: uint64(d.Get("lb_id").(int)),
}
if d.HasChange("address") || d.HasChange("port") {
if d.HasChange("address") || d.HasChange("port") {
req.BindingAddress = d.Get("address").(string)
req.BindingPort = uint64(d.Get("port").(int))
}
_, err = c.CloudBroker().LB().FrontendBindUpdate(ctx, req)
_, err := c.CloudBroker().LB().FrontendBindUpdate(ctx, req)
if err != nil {
return diag.FromErr(err)
}
@ -176,33 +166,6 @@ func ResourceLBFrontendBind() *schema.Resource {
Default: &constants.Timeout300s,
},
Schema: map[string]*schema.Schema{
"lb_id": {
Type: schema.TypeInt,
Required: true,
Description: "ID of the LB instance to backendCreate",
},
"frontend_name": {
Type: schema.TypeString,
Required: true,
Description: "Must be unique among all backends of this LB - name of the new backend to create",
},
"address": {
Type: schema.TypeString,
Required: true,
},
"guid": {
Type: schema.TypeString,
Computed: true,
},
"name": {
Type: schema.TypeString,
Required: true,
},
"port": {
Type: schema.TypeInt,
Required: true,
},
},
Schema: resourceLbFrontendBindSchemaMake(),
}
}

@ -45,14 +45,13 @@ func utilityLBCheckPresence(ctx context.Context, d *schema.ResourceData, m inter
c := m.(*controller.ControllerCfg)
req := lb.GetRequest{}
if d.Id() != "" {
rgId, _ := strconv.ParseUint(d.Id(), 10, 64)
req.LBID = rgId
} else {
req.LBID = uint64(d.Get("lb_id").(int))
}
lb, err := c.CloudBroker().LB().Get(ctx, req)
if err != nil {
return nil, err

@ -57,17 +57,17 @@ func utilityLBBackendCheckPresence(ctx context.Context, d *schema.ResourceData,
req.LBID = uint64(d.Get("lb_id").(int))
}
lb, err := c.CloudBroker().LB().Get(ctx, req)
lbRec, err := c.CloudBroker().LB().Get(ctx, req)
if err != nil {
return nil, err
}
backends := lb.Backends
backends := lbRec.Backends
for _, b := range backends {
if b.Name == bName {
return &b, nil
}
}
return nil, fmt.Errorf("can not find backend with name: %s for lb: %d", bName, lb.ID)
return nil, fmt.Errorf("can not find backend with name: %s for lb: %d", bName, lbRec.ID)
}

@ -1,140 +1,69 @@
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package pcidevice
import (
"context"
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens"
)
func dataSourcePcideviceRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
pcidevice, err := utilityPcideviceCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
d.Set("ckey", pcidevice.CKey)
d.Set("meta", flattens.FlattenMeta(pcidevice.Meta))
d.Set("compute_id", pcidevice.ComputeID)
d.Set("description", pcidevice.Description)
d.Set("guid", pcidevice.GUID)
d.Set("hw_path", pcidevice.HwPath)
d.Set("device_id",pcidevice.ID)
d.Set("name", pcidevice.Name)
d.Set("rg_id", pcidevice.RGID)
d.Set("stack_id", pcidevice.StackID)
d.Set("status", pcidevice.Status)
d.Set("system_name", pcidevice.SystemName)
d.SetId(strconv.Itoa(d.Get("device_id").(int)))
return nil
}
func dataSourcePcideviceSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"device_id": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
},
"ckey": {
Type: schema.TypeString,
Computed: true,
},
"meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"compute_id": {
Type: schema.TypeInt,
Computed: true,
},
"description": {
Type: schema.TypeString,
Computed: true,
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"hw_path": {
Type: schema.TypeString,
Computed: true,
},
"name": {
Type: schema.TypeString,
Computed: true,
},
"rg_id": {
Type: schema.TypeInt,
Computed: true,
},
"stack_id": {
Type: schema.TypeInt,
Computed: true,
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"system_name": {
Type: schema.TypeString,
Computed: true,
},
}
return rets
}
func DataSourcePcidevice() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourcePcideviceRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourcePcideviceSchemaMake(),
}
}
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package pcidevice
import (
"context"
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourcePcideviceRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
pcidevice, err := utilityPcideviceCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
flattenPcidevice(d, pcidevice)
d.SetId(strconv.Itoa(d.Get("device_id").(int)))
return nil
}
func DataSourcePcidevice() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourcePcideviceRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourcePcideviceSchemaMake(),
}
}

@ -1,149 +1,72 @@
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package pcidevice
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/pcidevice"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens"
)
func flattenPcideviceList(pl *pcidevice.ListPCIDevices) []map[string]interface{} {
res := make([]map[string]interface{}, 0, len(pl.Data))
for _, item := range pl.Data {
temp := map[string]interface{}{
"ckey": item.CKey,
"meta": flattens.FlattenMeta(item.Meta),
"compute_id": item.ComputeID,
"description": item.Description,
"guid": item.GUID,
"hw_path": item.HwPath,
"device_id": item.ID,
"rg_id": item.RGID,
"name": item.Name,
"stack_id": item.StackID,
"status": item.Status,
"system_name": item.SystemName,
}
res = append(res, temp)
}
return res
}
func dataSourcePcideviceListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
pcideviceList, err := utilityPcideviceListCheckPresence(ctx, m)
if err != nil {
return diag.FromErr(err)
}
d.Set("items", flattenPcideviceList(pcideviceList))
d.Set("entry_count", pcideviceList.EntryCount)
id := uuid.New()
d.SetId(id.String())
return nil
}
func dataSourcePcideviceListSchemaMake() map[string]*schema.Schema {
rets := map[string]*schema.Schema{
"by_id": {
Type: schema.TypeInt,
Optional: true,
Description: "by_id",
},
"compute_id": {
Type: schema.TypeInt,
Optional: true,
Description: "compute_id",
},
"name": {
Type: schema.TypeString,
Optional: true,
Description: "name",
},
"rg_id": {
Type: schema.TypeInt,
Optional: true,
Description: "rg_id",
},
"status": {
Type: schema.TypeString,
Optional: true,
Description: "status",
},
"page": {
Type: schema.TypeInt,
Optional: true,
Description: "page number",
},
"size": {
Type: schema.TypeInt,
Optional: true,
Description: "page size",
},
"items": {
Type: schema.TypeList,
Computed: true,
Description: "pcidevice list",
Elem: &schema.Resource{
Schema: dataSourcePcideviceSchemaMake(),
},
},
"entry_count": {
Type: schema.TypeInt,
Computed: true,
Description: "entry count",
},
}
return rets
}
func DataSourcePcideviceList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourcePcideviceListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourcePcideviceListSchemaMake(),
}
}
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package pcidevice
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
)
func dataSourcePcideviceListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
pcideviceList, err := utilityPcideviceListCheckPresence(ctx, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
d.Set("items", flattenPcideviceList(pcideviceList))
d.Set("entry_count", pcideviceList.EntryCount)
id := uuid.New()
d.SetId(id.String())
return nil
}
func DataSourcePcideviceList() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
ReadContext: dataSourcePcideviceListRead,
Timeouts: &schema.ResourceTimeout{
Read: &constants.Timeout30s,
Default: &constants.Timeout60s,
},
Schema: dataSourcePcideviceListSchemaMake(),
}
}

@ -34,55 +34,26 @@ package pcidevice
import (
"context"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/compute"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/service/cloudbroker/ic"
)
func existStackID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) {
c := m.(*controller.ControllerCfg)
StackID := uint64(d.Get("stack_id").(int))
RGID := uint64(d.Get("rg_id").(int))
func checkParamsExistence(ctx context.Context, d *schema.ResourceData, c *controller.ControllerCfg) diag.Diagnostics {
var errs []error
req := rg.ListRequest{
IncludeDeleted: false,
}
rgList, err := c.CloudBroker().RG().List(ctx, req)
if err != nil {
return false, err
}
for _, v := range rgList.FilterByID(RGID).Data {
for _, idVM := range v.VMs {
req := compute.GetRequest{
ComputeID: idVM,
}
checkStackID, err := c.CloudBroker().Compute().Get(ctx, req)
if err != nil {
return false, err
}
if checkStackID.StackID == StackID {
return true, nil
}
}
}
return false, err
}
stackId := uint64(d.Get("stack_id").(int))
rgId := uint64(d.Get("rd_id").(int))
func existRGID(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) {
c := m.(*controller.ControllerCfg)
RGID := uint64(d.Get("rg_id").(int))
req := rg.ListRequest{
IncludeDeleted: false,
if err := ic.ExistRG(ctx, rgId, c); err != nil {
errs = append(errs, err)
}
rgList, err := c.CloudBroker().RG().List(ctx, req)
if err != nil {
return false, err
if err := ic.ExistStackInPcidevice(ctx, stackId, rgId, c); err != nil {
errs = append(errs, err)
}
return len(rgList.FilterByID(RGID).Data) != 0, nil
return dc.ErrorsToDiagnostics(errs)
}

@ -1,292 +1,229 @@
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package pcidevice
import (
"context"
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
log "github.com/sirupsen/logrus"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/pcidevice"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/flattens"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
)
func resourcePcideviceCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourcePcideviceCreate: called for pcidevice %s", d.Get("name").(string))
c := m.(*controller.ControllerCfg)
req := pcidevice.CreateRequest{}
haveRGID, err := existRGID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
if !haveRGID {
return diag.Errorf("resourcePcideviceCreate: can't create Pcidevice because RGID %d is not allowed or does not exist", d.Get("rg_id").(int))
}
haveStackID, err := existStackID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
if !haveStackID {
return diag.Errorf("resourcePcideviceCreate: can't create Pcidevice because StackID %d is not allowed or does not exist", d.Get("stack_id").(int))
}
req.StackID = uint64(d.Get("stack_id").(int))
req.RGID = uint64(d.Get("rg_id").(int))
req.Name = d.Get("name").(string)
req.HWPath = d.Get("hw_path").(string)
if description, ok := d.GetOk("description"); ok {
req.Description = description.(string)
}
pcideviceId, err := c.CloudBroker().PCIDevice().Create(ctx, req)
if err != nil {
return diag.FromErr(err)
}
d.SetId(strconv.FormatUint(pcideviceId, 10))
d.Set("device_id", pcideviceId)
return resourcePcideviceRead(ctx, d, m)
}
func resourcePcideviceRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
pcidevice, err := utilityPcideviceCheckPresence(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
d.Set("ckey", pcidevice.CKey)
d.Set("meta", flattens.FlattenMeta(pcidevice.Meta))
d.Set("compute_id", pcidevice.ComputeID)
d.Set("description", pcidevice.Description)
d.Set("guid", pcidevice.GUID)
d.Set("hw_path", pcidevice.HwPath)
d.Set("device_id", pcidevice.ID)
d.Set("rg_id", pcidevice.RGID)
d.Set("name", pcidevice.Name)
d.Set("stack_id", pcidevice.StackID)
d.Set("status", pcidevice.Status)
d.Set("system_name", pcidevice.SystemName)
return nil
}
func resourcePcideviceUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
haveRGID, err := existRGID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
if !haveRGID {
return diag.Errorf("resourcePcideviceCreate: can't update Pcidevice because RGID %d is not allowed or does not exist", d.Get("rg_id").(int))
}
haveStackID, err := existStackID(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
if !haveStackID {
return diag.Errorf("resourcePcideviceCreate: can't update Pcidevice because StackID %d is not allowed or does not exist", d.Get("stack_id").(int))
}
if d.HasChange("enable") {
state := d.Get("enable").(bool)
c := m.(*controller.ControllerCfg)
if state {
req := pcidevice.EnableRequest{
DeviceID: uint64(d.Get("device_id").(int)),
}
_, err := c.CloudBroker().PCIDevice().Enable(ctx, req)
if err != nil {
return diag.FromErr(err)
}
} else {
req := pcidevice.DisableRequest{
DeviceID: uint64(d.Get("device_id").(int)),
}
if force, ok := d.GetOk("force"); ok {
req.Force = force.(bool)
}
_, err := c.CloudBroker().PCIDevice().Disable(ctx, req)
if err != nil {
return diag.FromErr(err)
}
}
}
return resourcePcideviceRead(ctx, d, m)
}
func resourcePcideviceDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourcePcideviceDelete: called for %s, id: %s", d.Get("name").(string), d.Id())
pciDevice, err := utilityPcideviceCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
if pciDevice.Status == status.Destroyed || pciDevice.Status == status.Purged {
return nil
}
c := m.(*controller.ControllerCfg)
req := pcidevice.DeleteRequest{
DeviceID: pciDevice.ID,
}
if force, ok := d.GetOk("force"); ok {
req.Force = force.(bool)
}
_, err = c.CloudBroker().PCIDevice().Delete(ctx, req)
if err != nil {
return diag.FromErr(err)
}
d.SetId("")
return nil
}
func resourcePcideviceSchemaMake() map[string]*schema.Schema {
return map[string]*schema.Schema{
"ckey": {
Type: schema.TypeString,
Computed: true,
},
"meta": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"compute_id": {
Type: schema.TypeInt,
Computed: true,
},
"description": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: "description, just for information",
},
"guid": {
Type: schema.TypeInt,
Computed: true,
},
"hw_path": {
Type: schema.TypeString,
Required: true,
Description: "PCI address of the device",
},
"device_id": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"name": {
Type: schema.TypeString,
Required: true,
Description: "Name of Device",
},
"rg_id": {
Type: schema.TypeInt,
Required: true,
Description: "Resource GROUP",
},
"stack_id": {
Type: schema.TypeInt,
Required: true,
Description: "stackId",
},
"status": {
Type: schema.TypeString,
Computed: true,
},
"system_name": {
Type: schema.TypeString,
Computed: true,
},
"force": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "Force delete",
},
"enable": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "Enable pci device",
},
}
}
func ResourcePcidevice() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
CreateContext: resourcePcideviceCreate,
ReadContext: resourcePcideviceRead,
UpdateContext: resourcePcideviceUpdate,
DeleteContext: resourcePcideviceDelete,
Importer: &schema.ResourceImporter{
StateContext: schema.ImportStatePassthroughContext,
},
Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout60s,
Read: &constants.Timeout30s,
Update: &constants.Timeout60s,
Delete: &constants.Timeout60s,
Default: &constants.Timeout60s,
},
Schema: resourcePcideviceSchemaMake(),
}
}
/*
Copyright (c) 2019-2023 Digital Energy Cloud Solutions LLC. All Rights Reserved.
Authors:
Petr Krutov, <petr.krutov@digitalenergy.online>
Stanislav Solovev, <spsolovev@digitalenergy.online>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud
Orchestration Technology) with Terraform by Hashicorp.
Source code: https://repository.basistech.ru/BASIS/terraform-provider-decort
Please see README.md to learn where to place source code so that it
builds seamlessly.
Documentation: https://repository.basistech.ru/BASIS/terraform-provider-decort/wiki
*/
package pcidevice
import (
"context"
log "github.com/sirupsen/logrus"
"strconv"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/pcidevice"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/constants"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/controller"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/dc"
"repository.basistech.ru/BASIS/terraform-provider-decort/internal/status"
)
func resourcePcideviceCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourcePcideviceCreate: called for pcidevice %s", d.Get("name").(string))
c := m.(*controller.ControllerCfg)
createReq := pcidevice.CreateRequest{}
if diags := checkParamsExistence(ctx, d, c); diags != nil {
return diags
}
createReq.StackID = uint64(d.Get("stack_id").(int))
createReq.RGID = uint64(d.Get("rg_id").(int))
createReq.Name = d.Get("name").(string)
createReq.HWPath = d.Get("hw_path").(string)
if description, ok := d.GetOk("description"); ok {
createReq.Description = description.(string)
}
pcideviceId, err := c.CloudBroker().PCIDevice().Create(ctx, createReq)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
d.SetId(strconv.FormatUint(pcideviceId, 10))
d.Set("device_id", pcideviceId)
warnings := dc.Warnings{}
if enable, ok := d.GetOk("enable"); ok {
log.Debugf("resourcePcideviceCreate: enable=%t device_id %d after completing its resource configuration", enable, pcideviceId)
if enable.(bool) {
req := pcidevice.EnableRequest{DeviceID: pcideviceId}
if _, err := c.CloudBroker().PCIDevice().Enable(ctx, req); err != nil {
warnings.Add(err)
}
} else {
req := pcidevice.DisableRequest{
DeviceID: pcideviceId,
}
if force, ok := d.GetOk("force_disable"); ok {
req.Force = force.(bool)
log.Debugf("force_disable=%v", force)
}
if _, err := c.CloudBroker().PCIDevice().Disable(ctx, req); err != nil {
warnings.Add(err)
}
}
}
return append(resourcePcideviceRead(ctx, d, m), warnings.Get()...)
}
func resourcePcideviceRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourcePcideviceRead: called for pci_device id %d, name %s",
d.Id(), d.Get("name").(string))
pcideviceRec, err := utilityPcideviceCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
flattenPcidevice(d, pcideviceRec)
log.Debugf("resourcePcideviceRead: after flattenPcidevice: device_id %s, name %s",
d.Id(), d.Get("name").(string))
return nil
}
func resourcePcideviceUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourcePcideviceUpdate: called for pcidevice id %s, name %s", d.Id(), d.Get("name").(string))
c := m.(*controller.ControllerCfg)
if diags := checkParamsExistence(ctx, d, c); diags != nil {
return diags
}
if d.HasChange("enable") {
err := resourcePcideviceChangeEnable(ctx, d, m)
if err != nil {
return diag.FromErr(err)
}
}
return resourcePcideviceRead(ctx, d, m)
}
func resourcePcideviceDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
log.Debugf("resourcePcideviceDelete: called for %s, id: %s", d.Get("name").(string), d.Id())
c := m.(*controller.ControllerCfg)
pciDevice, err := utilityPcideviceCheckPresence(ctx, d, m)
if err != nil {
d.SetId("")
return diag.FromErr(err)
}
if pciDevice.Status == status.Destroyed || pciDevice.Status == status.Purged {
return nil
}
req := pcidevice.DeleteRequest{
DeviceID: pciDevice.ID,
}
if force, ok := d.GetOk("force_delete"); ok {
req.Force = force.(bool)
}
if _, err = c.CloudBroker().PCIDevice().Delete(ctx, req); err != nil {
return diag.FromErr(err)
}
d.SetId("")
return nil
}
func ResourcePcidevice() *schema.Resource {
return &schema.Resource{
SchemaVersion: 1,
CreateContext: resourcePcideviceCreate,
ReadContext: resourcePcideviceRead,
UpdateContext: resourcePcideviceUpdate,
DeleteContext: resourcePcideviceDelete,
Importer: &schema.ResourceImporter{
StateContext: schema.ImportStatePassthroughContext,
},
Timeouts: &schema.ResourceTimeout{
Create: &constants.Timeout60s,
Read: &constants.Timeout30s,
Update: &constants.Timeout60s,
Delete: &constants.Timeout60s,
Default: &constants.Timeout60s,
},
Schema: resourcePcideviceSchemaMake(),
}
}
func resourcePcideviceChangeEnable(ctx context.Context, d *schema.ResourceData, m interface{}) error {
enable := d.Get("enable").(bool)
log.Debugf("resourcePcideviceChangeEnable: enable=%t device_id %s after completing its resource configuration", enable, d.Id())
pcideviceRec, err := utilityPcideviceCheckPresence(ctx, d, m)
if err != nil {
return err
}
c := m.(*controller.ControllerCfg)
if enable {
req := pcidevice.EnableRequest{
DeviceID: pcideviceRec.ID,
}
if _, err := c.CloudBroker().PCIDevice().Enable(ctx, req); err != nil {
return err
}
} else {
req := pcidevice.DisableRequest{
DeviceID: pcideviceRec.ID,
}
if force, ok := d.GetOk("force_disable"); ok {
req.Force = force.(bool)
}
if _, err := c.CloudBroker().PCIDevice().Disable(ctx, req); err != nil {
return err
}
}
return nil
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save